| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| |
|
| | from typing import Any, Optional, Tuple, Union, List, Dict |
| | from dataclasses import dataclass |
| | import math |
| | import warnings |
| | from functools import partial, reduce |
| |
|
| |
|
| | import numpy as np |
| | from PIL import Image |
| | import torch |
| | import torch.utils.checkpoint |
| | from torch import nn |
| |
|
| | from transformers.image_processing_utils import BatchFeature |
| | from transformers.image_transforms import ( |
| | convert_to_rgb, |
| | normalize, |
| | rescale, |
| | resize, |
| | to_channel_dimension_format, |
| | ) |
| | from transformers.image_utils import ( |
| | ChannelDimension, |
| | PILImageResampling, |
| | to_numpy_array, |
| | ) |
| | from transformers.activations import ACT2FN |
| | from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling |
| | from transformers.modeling_utils import PreTrainedModel |
| | from transformers.utils import ModelOutput |
| |
|
| | from .configuration_imp import SiglipVisionConfig |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def simple_image_processor( |
| | images, |
| | image_mean=(0.5, 0.5, 0.5), |
| | image_std=(0.5, 0.5, 0.5), |
| | size=(384, 384), |
| | resample=PILImageResampling.BICUBIC, |
| | rescale_factor=1 / 255, |
| | data_format=ChannelDimension.FIRST, |
| | return_tensors="pt" |
| | ): |
| |
|
| | if isinstance(images, Image.Image): |
| | images = [images] |
| | else: |
| | assert isinstance(images, list) |
| | |
| | transforms = [ |
| | convert_to_rgb, |
| | to_numpy_array, |
| | partial(resize, size=size, resample=resample, data_format=data_format), |
| | partial(rescale, scale=rescale_factor, data_format=data_format), |
| | partial(normalize, mean=image_mean, std=image_std, data_format=data_format), |
| | partial(to_channel_dimension_format, channel_dim=data_format, input_channel_dim=data_format), |
| | ] |
| |
|
| | images = reduce(lambda x, f: [*map(f, x)], transforms, images) |
| | data = {"pixel_values": images} |
| | |
| | return BatchFeature(data=data, tensor_type=return_tensors) |
| |
|
| | |
| | |
| | |
| |
|
| | @dataclass |
| | |
| | class SiglipVisionModelOutput(ModelOutput): |
| | """ |
| | Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states. |
| | |
| | Args: |
| | image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): |
| | The image embeddings obtained by applying the projection layer to the pooler_output. |
| | last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): |
| | Sequence of hidden-states at the output of the last layer of the model. |
| | hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): |
| | Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + |
| | one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. |
| | |
| | Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. |
| | attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): |
| | Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, |
| | sequence_length)`. |
| | |
| | Attentions weights after the attention softmax, used to compute the weighted average in the self-attention |
| | heads. |
| | """ |
| |
|
| | image_embeds: Optional[torch.FloatTensor] = None |
| | last_hidden_state: torch.FloatTensor = None |
| | hidden_states: Optional[Tuple[torch.FloatTensor]] = None |
| | attentions: Optional[Tuple[torch.FloatTensor]] = None |
| |
|
| |
|
| | class SiglipVisionEmbeddings(nn.Module): |
| | def __init__(self, config: SiglipVisionConfig): |
| | super().__init__() |
| | self.config = config |
| | self.embed_dim = config.hidden_size |
| | self.image_size = config.image_size |
| | self.patch_size = config.patch_size |
| |
|
| | self.patch_embedding = nn.Conv2d( |
| | in_channels=config.num_channels, |
| | out_channels=self.embed_dim, |
| | kernel_size=self.patch_size, |
| | stride=self.patch_size, |
| | padding="valid", |
| | ) |
| |
|
| | self.num_patches = (self.image_size // self.patch_size) ** 2 |
| | self.num_positions = self.num_patches |
| | self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) |
| | self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False) |
| |
|
| | def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: |
| | patch_embeds = self.patch_embedding(pixel_values) |
| | embeddings = patch_embeds.flatten(2).transpose(1, 2) |
| |
|
| | embeddings = embeddings + self.position_embedding(self.position_ids) |
| | return embeddings |
| |
|
| |
|
| |
|
| | class SiglipAttention(nn.Module): |
| | """Multi-headed attention from 'Attention Is All You Need' paper""" |
| |
|
| | |
| | def __init__(self, config): |
| | super().__init__() |
| | self.config = config |
| | self.embed_dim = config.hidden_size |
| | self.num_heads = config.num_attention_heads |
| | self.head_dim = self.embed_dim // self.num_heads |
| | if self.head_dim * self.num_heads != self.embed_dim: |
| | raise ValueError( |
| | f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" |
| | f" {self.num_heads})." |
| | ) |
| | self.scale = self.head_dim**-0.5 |
| | self.dropout = config.attention_dropout |
| |
|
| | self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) |
| | self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) |
| | self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) |
| | self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) |
| |
|
| | def forward( |
| | self, |
| | hidden_states: torch.Tensor, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | output_attentions: Optional[bool] = False, |
| | ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: |
| | """Input shape: Batch x Time x Channel""" |
| |
|
| | batch_size, q_len, _ = hidden_states.size() |
| |
|
| | query_states = self.q_proj(hidden_states) |
| | key_states = self.k_proj(hidden_states) |
| | value_states = self.v_proj(hidden_states) |
| |
|
| | query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2) |
| | key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2) |
| | value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2) |
| |
|
| | k_v_seq_len = key_states.shape[-2] |
| | attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scale |
| |
|
| | if attn_weights.size() != (batch_size, self.num_heads, q_len, k_v_seq_len): |
| | raise ValueError( |
| | f"Attention weights should be of size {(batch_size, self.num_heads, q_len, k_v_seq_len)}, but is" |
| | f" {attn_weights.size()}" |
| | ) |
| |
|
| | if attention_mask is not None: |
| | if attention_mask.size() != (batch_size, 1, q_len, k_v_seq_len): |
| | raise ValueError( |
| | f"Attention mask should be of size {(batch_size, 1, q_len, k_v_seq_len)}, but is {attention_mask.size()}" |
| | ) |
| | attn_weights = attn_weights + attention_mask |
| |
|
| | |
| | attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) |
| | attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) |
| | attn_output = torch.matmul(attn_weights, value_states) |
| |
|
| | if attn_output.size() != (batch_size, self.num_heads, q_len, self.head_dim): |
| | raise ValueError( |
| | f"`attn_output` should be of size {(batch_size, self.num_heads, q_len, self.head_dim)}, but is" |
| | f" {attn_output.size()}" |
| | ) |
| |
|
| | attn_output = attn_output.transpose(1, 2).contiguous() |
| | attn_output = attn_output.reshape(batch_size, q_len, self.embed_dim) |
| |
|
| | attn_output = self.out_proj(attn_output) |
| |
|
| | return attn_output, attn_weights |
| |
|
| |
|
| | |
| | class SiglipMLP(nn.Module): |
| | def __init__(self, config): |
| | super().__init__() |
| | self.config = config |
| | self.activation_fn = ACT2FN[config.hidden_act] |
| | self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) |
| | self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) |
| |
|
| | def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
| | hidden_states = self.fc1(hidden_states) |
| | hidden_states = self.activation_fn(hidden_states) |
| | hidden_states = self.fc2(hidden_states) |
| | return hidden_states |
| |
|
| |
|
| | |
| | class SiglipEncoderLayer(nn.Module): |
| | def __init__(self, config: SiglipVisionConfig): |
| | super().__init__() |
| | self.embed_dim = config.hidden_size |
| | self.self_attn = SiglipAttention(config) |
| | self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) |
| | self.mlp = SiglipMLP(config) |
| | self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) |
| |
|
| | |
| | def forward( |
| | self, |
| | hidden_states: torch.Tensor, |
| | attention_mask: torch.Tensor, |
| | output_attentions: Optional[bool] = False, |
| | ) -> Tuple[torch.FloatTensor]: |
| | """ |
| | Args: |
| | hidden_states (`torch.FloatTensor`): |
| | Input to the layer of shape `(batch, seq_len, embed_dim)`. |
| | attention_mask (`torch.FloatTensor`): |
| | Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values. |
| | output_attentions (`bool`, *optional*, defaults to `False`): |
| | Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
| | returned tensors for more detail. |
| | """ |
| | residual = hidden_states |
| |
|
| | hidden_states = self.layer_norm1(hidden_states) |
| | hidden_states, attn_weights = self.self_attn( |
| | hidden_states=hidden_states, |
| | attention_mask=attention_mask, |
| | output_attentions=output_attentions, |
| | ) |
| | hidden_states = residual + hidden_states |
| |
|
| | residual = hidden_states |
| | hidden_states = self.layer_norm2(hidden_states) |
| | hidden_states = self.mlp(hidden_states) |
| | hidden_states = residual + hidden_states |
| |
|
| | outputs = (hidden_states,) |
| |
|
| | if output_attentions: |
| | outputs += (attn_weights,) |
| |
|
| | return outputs |
| |
|
| |
|
| | class SiglipPreTrainedModel(PreTrainedModel): |
| | """ |
| | An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained |
| | models. |
| | """ |
| |
|
| | config_class = SiglipVisionConfig |
| | base_model_prefix = "siglip" |
| | supports_gradient_checkpointing = True |
| |
|
| | def _init_weights(self, module): |
| | """Initialize the weights""" |
| | pass |
| |
|
| | |
| | class SiglipEncoder(nn.Module): |
| | """ |
| | Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a |
| | [`SiglipEncoderLayer`]. |
| | |
| | Args: |
| | config: SiglipVisionConfig |
| | """ |
| |
|
| | def __init__(self, config: SiglipVisionConfig): |
| | super().__init__() |
| | self.config = config |
| | self.layers = nn.ModuleList([SiglipEncoderLayer(config) for _ in range(config.num_hidden_layers)]) |
| | self.gradient_checkpointing = False |
| |
|
| | |
| | def forward( |
| | self, |
| | inputs_embeds, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, BaseModelOutput]: |
| | r""" |
| | Args: |
| | inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): |
| | Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. |
| | This is useful if you want more control over how to convert `input_ids` indices into associated vectors |
| | than the model's internal embedding lookup matrix. |
| | attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): |
| | Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
| | |
| | - 1 for tokens that are **not masked**, |
| | - 0 for tokens that are **masked**. |
| | |
| | [What are attention masks?](../glossary#attention-mask) |
| | output_attentions (`bool`, *optional*): |
| | Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
| | returned tensors for more detail. |
| | output_hidden_states (`bool`, *optional*): |
| | Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors |
| | for more detail. |
| | return_dict (`bool`, *optional*): |
| | Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| | """ |
| | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| | output_hidden_states = ( |
| | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| | ) |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | encoder_states = () if output_hidden_states else None |
| | all_attentions = () if output_attentions else None |
| |
|
| | hidden_states = inputs_embeds |
| | for encoder_layer in self.layers: |
| | if output_hidden_states: |
| | encoder_states = encoder_states + (hidden_states,) |
| | if self.gradient_checkpointing and self.training: |
| | layer_outputs = self._gradient_checkpointing_func( |
| | encoder_layer.__call__, |
| | hidden_states, |
| | attention_mask, |
| | output_attentions, |
| | ) |
| | else: |
| | layer_outputs = encoder_layer( |
| | hidden_states, |
| | attention_mask, |
| | output_attentions=output_attentions, |
| | ) |
| |
|
| | hidden_states = layer_outputs[0] |
| |
|
| | if output_attentions: |
| | all_attentions = all_attentions + (layer_outputs[1],) |
| |
|
| | if output_hidden_states: |
| | encoder_states = encoder_states + (hidden_states,) |
| |
|
| | if not return_dict: |
| | return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) |
| | return BaseModelOutput( |
| | last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions |
| | ) |
| |
|
| |
|
| | class SiglipVisionTransformer(nn.Module): |
| | def __init__(self, config: SiglipVisionConfig): |
| | super().__init__() |
| | self.config = config |
| | embed_dim = config.hidden_size |
| |
|
| | self.embeddings = SiglipVisionEmbeddings(config) |
| | self.encoder = SiglipEncoder(config) |
| | self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) |
| | self.head = SiglipMultiheadAttentionPoolingHead(config) |
| |
|
| | def forward( |
| | self, |
| | pixel_values, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, BaseModelOutputWithPooling]: |
| | r""" |
| | Returns: |
| | |
| | """ |
| | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| | output_hidden_states = ( |
| | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| | ) |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | hidden_states = self.embeddings(pixel_values) |
| |
|
| | encoder_outputs = self.encoder( |
| | inputs_embeds=hidden_states, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| |
|
| | last_hidden_state = encoder_outputs[0] |
| | last_hidden_state = self.post_layernorm(last_hidden_state) |
| |
|
| | pooled_output = self.head(last_hidden_state) |
| |
|
| | if not return_dict: |
| | return (last_hidden_state, pooled_output) + encoder_outputs[1:] |
| |
|
| | return BaseModelOutputWithPooling( |
| | last_hidden_state=last_hidden_state, |
| | pooler_output=pooled_output, |
| | hidden_states=encoder_outputs.hidden_states, |
| | attentions=encoder_outputs.attentions, |
| | ) |
| |
|
| |
|
| | class SiglipMultiheadAttentionPoolingHead(nn.Module): |
| | """Multihead Attention Pooling.""" |
| |
|
| | def __init__(self, config: SiglipVisionConfig): |
| | super().__init__() |
| |
|
| | self.probe = nn.Parameter(torch.randn(1, 1, config.hidden_size)) |
| | self.attention = torch.nn.MultiheadAttention(config.hidden_size, config.num_attention_heads, batch_first=True) |
| | self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
| | self.mlp = SiglipMLP(config) |
| |
|
| | def forward(self, hidden_state): |
| | batch_size = hidden_state.shape[0] |
| | probe = self.probe.repeat(batch_size, 1, 1) |
| |
|
| | hidden_state = self.attention(probe, hidden_state, hidden_state)[0] |
| |
|
| | residual = hidden_state |
| | hidden_state = self.layernorm(hidden_state) |
| | hidden_state = residual + self.mlp(hidden_state) |
| |
|
| | return hidden_state[:, 0] |
| |
|
| |
|
| | class SiglipVisionModel(SiglipPreTrainedModel): |
| | config_class = SiglipVisionConfig |
| | main_input_name = "pixel_values" |
| | _no_split_modules = ["SiglipEncoderLayer"] |
| |
|
| | def __init__(self, config: SiglipVisionConfig): |
| | super().__init__(config) |
| |
|
| | self.vision_model = SiglipVisionTransformer(config) |
| |
|
| | |
| | self.post_init() |
| |
|
| | def get_input_embeddings(self) -> nn.Module: |
| | return self.vision_model.embeddings.patch_embedding |
| |
|
| | def forward( |
| | self, |
| | pixel_values, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, BaseModelOutputWithPooling]: |
| | r""" |
| | Returns: |
| | |
| | Examples: |
| | |
| | ```python |
| | >>> from PIL import Image |
| | >>> import requests |
| | >>> from transformers import AutoProcessor, SiglipVisionModel |
| | |
| | >>> model = SiglipVisionModel.from_pretrained("google/siglip-base-patch16-224") |
| | >>> processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224") |
| | |
| | >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" |
| | >>> image = Image.open(requests.get(url, stream=True).raw) |
| | |
| | >>> inputs = processor(images=image, return_tensors="pt") |
| | |
| | >>> outputs = model(**inputs) |
| | >>> last_hidden_state = outputs.last_hidden_state |
| | >>> pooled_output = outputs.pooler_output # pooled features |
| | ```""" |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | return self.vision_model( |
| | pixel_values=pixel_values, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | class VisionTower(nn.Module): |
| | def __init__(self, vision_tower_cfg, delay_load=False): |
| | super().__init__() |
| |
|
| | self.is_loaded = False |
| |
|
| | self.config = vision_tower_cfg |
| | self.vision_tower_name = vision_tower_cfg.mm_vision_tower |
| | self.select_layer = vision_tower_cfg.mm_vision_select_layer |
| | |
| |
|
| | self.image_processor = simple_image_processor |
| |
|
| | if not delay_load: |
| | self.load_model() |
| | else: |
| | raise NotImplementedError("delay load is not implemented yet.") |
| |
|
| | def load_model(self): |
| | if self.is_loaded: |
| | return |
| |
|
| | |
| | |
| | self.vision_tower = SiglipVisionModel(self.config) |
| | del self.vision_tower.vision_model.encoder.layers[(self.select_layer + 1):] |
| | self.vision_tower.vision_model.head = nn.Identity() |
| | self.vision_tower.requires_grad_(False) |
| | self.vision_tower.eval() |
| |
|
| | self.is_loaded = True |
| |
|
| | @torch.no_grad() |
| | def forward(self, images): |
| | if type(images) is list: |
| | image_features = [] |
| | for image in images: |
| | image_forward_out = self.vision_tower(image.to(device=self.device, dtype=self.dtype).unsqueeze(0), output_hidden_states=True) |
| | image_feature = image_forward_out.hidden_states[-1].to(image.dtype) |
| | assert image_features.shape[-2] == 729 |
| | image_features.append(image_feature) |
| | else: |
| | image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype), output_hidden_states=True) |
| | image_features = image_forward_outs.hidden_states[-1].to(images.dtype) |
| | assert image_features.shape[-2] == 729 |
| |
|
| | return image_features |
| |
|
| | @property |
| | def dummy_feature(self): |
| | return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype) |
| |
|
| | @property |
| | def dtype(self): |
| | for p in self.vision_tower.parameters(): |
| | return p.dtype |
| |
|
| | @property |
| | def device(self): |
| | for p in self.vision_tower.parameters(): |
| | return p.device |
| |
|
| | @property |
| | def hidden_size(self): |
| | return self.config.hidden_size |
| |
|
| | @property |
| | def num_patches(self): |
| | return (self.config.image_size // self.config.patch_size) ** 2 |
| |
|