| import math |
| from typing import Optional, Tuple, Union |
|
|
| import torch |
| from einops import rearrange |
| from peft import LoraConfig, get_peft_model |
| from torch import nn |
| from torch.nn import functional as F |
| from transformers import PreTrainedModel, add_start_docstrings |
| from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling |
| from transformers.models.clip.modeling_clip import CLIPMLP, CLIPAttention, CLIPTextEmbeddings, CLIPVisionEmbeddings, \ |
| CLIPVisionModelWithProjection, CLIPTextModelWithProjection, _expand_mask, CLIPOutput, clip_loss |
| from transformers.utils import add_start_docstrings_to_model_forward, replace_return_docstrings |
|
|
| from .configuration_depth import LanguageBindDepthConfig, CLIPVisionConfig, CLIPTextConfig |
|
|
|
|
|
|
| class PatchDropout(nn.Module): |
| """ |
| https://arxiv.org/abs/2212.00794 |
| """ |
|
|
| def __init__(self, prob, exclude_first_token=True): |
| super().__init__() |
| assert 0 <= prob < 1. |
| self.prob = prob |
| self.exclude_first_token = exclude_first_token |
|
|
| def forward(self, x, B, T): |
| if not self.training or self.prob == 0.: |
| return x |
|
|
| if self.exclude_first_token: |
| cls_tokens, x = x[:, :1], x[:, 1:] |
| else: |
| cls_tokens = torch.jit.annotate(torch.Tensor, x[:, :1]) |
|
|
| batch = x.size()[0] |
| num_tokens = x.size()[1] |
|
|
| batch_indices = torch.arange(batch) |
| batch_indices = batch_indices[..., None] |
|
|
| keep_prob = 1 - self.prob |
| num_patches_keep = max(1, int(num_tokens * keep_prob)) |
|
|
| if T == 1: |
| rand = torch.randn(batch, num_tokens) |
| patch_indices_keep = rand.topk(num_patches_keep, dim=-1).indices |
| else: |
| rand = torch.randn(B, num_tokens) |
| patch_indices_keep = rand.topk(num_patches_keep, dim=-1).indices |
| patch_indices_keep = patch_indices_keep.unsqueeze(1).repeat(1, T, 1) |
| patch_indices_keep = rearrange(patch_indices_keep, 'b t n -> (b t) n') |
|
|
|
|
| x = x[batch_indices, patch_indices_keep] |
|
|
| if self.exclude_first_token: |
| x = torch.cat((cls_tokens, x), dim=1) |
|
|
| return x |
|
|
| class CLIPEncoderLayer(nn.Module): |
| def __init__(self, config: LanguageBindDepthConfig): |
| super().__init__() |
| self.embed_dim = config.hidden_size |
| self.self_attn = CLIPAttention(config) |
| self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) |
| self.mlp = CLIPMLP(config) |
| self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) |
|
|
| self.add_time_attn = config.add_time_attn |
| if self.add_time_attn: |
| self.t = config.num_frames |
| self.temporal_embedding = nn.Parameter(torch.zeros(1, config.num_frames, config.hidden_size)) |
| nn.init.normal_(self.temporal_embedding, std=config.hidden_size ** -0.5) |
|
|
| self.embed_dim = config.hidden_size |
| self.temporal_attn = CLIPAttention(config) |
| self.temporal_layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) |
| self.temporal_mlp = CLIPMLP(config) |
| self.temporal_layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| attention_mask: torch.Tensor, |
| causal_attention_mask: torch.Tensor, |
| output_attentions: Optional[bool] = False, |
| ) -> Tuple[torch.FloatTensor]: |
| """ |
| Args: |
| hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` |
| attention_mask (`torch.FloatTensor`): attention mask of size |
| `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. |
| `(config.encoder_attention_heads,)`. |
| output_attentions (`bool`, *optional*): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
| returned tensors for more detail. |
| """ |
|
|
|
|
| if self.add_time_attn: |
| bt, n, d = hidden_states.shape |
| t = self.t |
|
|
| |
| if t != 1: |
| n = hidden_states.shape[1] |
| hidden_states = rearrange(hidden_states, '(b t) n d -> (b n) t d', t=t) |
| hidden_states = hidden_states + self.temporal_embedding[:, :t, :] |
| hidden_states = rearrange(hidden_states, '(b n) t d -> (b t) n d', n=n) |
|
|
| |
| residual = hidden_states |
| hidden_states = rearrange(hidden_states, '(b t) n d -> (b n) t d', t=t) |
| |
| hidden_states = self.temporal_layer_norm1(hidden_states) |
| hidden_states, attn_weights = self.temporal_attn( |
| hidden_states=hidden_states, |
| attention_mask=attention_mask, |
| causal_attention_mask=causal_attention_mask, |
| output_attentions=output_attentions, |
| ) |
| hidden_states = residual + rearrange(hidden_states, '(b n) t d -> (b t) n d', n=n) |
|
|
| residual = hidden_states |
| hidden_states = rearrange(hidden_states, '(b t) n d -> (b n) t d', t=t) |
| |
| hidden_states = self.temporal_layer_norm2(hidden_states) |
| hidden_states = self.temporal_mlp(hidden_states) |
| hidden_states = residual + rearrange(hidden_states, '(b n) t d -> (b t) n d', n=n) |
|
|
| |
| residual = hidden_states |
|
|
| hidden_states = self.layer_norm1(hidden_states) |
| hidden_states, attn_weights = self.self_attn( |
| hidden_states=hidden_states, |
| attention_mask=attention_mask, |
| causal_attention_mask=causal_attention_mask, |
| output_attentions=output_attentions, |
| ) |
| hidden_states = residual + hidden_states |
|
|
| residual = hidden_states |
| hidden_states = self.layer_norm2(hidden_states) |
| hidden_states = self.mlp(hidden_states) |
| hidden_states = residual + hidden_states |
|
|
| outputs = (hidden_states,) |
|
|
| if output_attentions: |
| outputs += (attn_weights,) |
|
|
| return outputs |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| class CLIPPreTrainedModel(PreTrainedModel): |
| """ |
| An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained |
| models. |
| """ |
|
|
| config_class = LanguageBindDepthConfig |
| base_model_prefix = "clip" |
| supports_gradient_checkpointing = True |
| _keys_to_ignore_on_load_missing = [r"position_ids"] |
|
|
| def _init_weights(self, module): |
| """Initialize the weights""" |
| factor = self.config.initializer_factor |
| if isinstance(module, CLIPTextEmbeddings): |
| module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) |
| module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) |
| elif isinstance(module, CLIPVisionEmbeddings): |
| factor = self.config.initializer_factor |
| nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor) |
| nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) |
| nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) |
| elif isinstance(module, CLIPAttention): |
| factor = self.config.initializer_factor |
| in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor |
| out_proj_std = (module.embed_dim**-0.5) * factor |
| nn.init.normal_(module.q_proj.weight, std=in_proj_std) |
| nn.init.normal_(module.k_proj.weight, std=in_proj_std) |
| nn.init.normal_(module.v_proj.weight, std=in_proj_std) |
| nn.init.normal_(module.out_proj.weight, std=out_proj_std) |
| elif isinstance(module, CLIPMLP): |
| factor = self.config.initializer_factor |
| in_proj_std = ( |
| (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor |
| ) |
| fc_std = (2 * module.config.hidden_size) ** -0.5 * factor |
| nn.init.normal_(module.fc1.weight, std=fc_std) |
| nn.init.normal_(module.fc2.weight, std=in_proj_std) |
| elif isinstance(module, LanguageBindDepth): |
| nn.init.normal_( |
| module.text_projection.weight, |
| std=module.text_embed_dim**-0.5 * self.config.initializer_factor, |
| ) |
| nn.init.normal_( |
| module.visual_projection.weight, |
| std=module.vision_embed_dim**-0.5 * self.config.initializer_factor, |
| ) |
| elif isinstance(module, CLIPVisionModelWithProjection): |
| nn.init.normal_( |
| module.visual_projection.weight, |
| std=self.config.hidden_size**-0.5 * self.config.initializer_factor, |
| ) |
| elif isinstance(module, CLIPTextModelWithProjection): |
| nn.init.normal_( |
| module.text_projection.weight, |
| std=self.config.hidden_size**-0.5 * self.config.initializer_factor, |
| ) |
|
|
| if isinstance(module, nn.LayerNorm): |
| module.bias.data.zero_() |
| module.weight.data.fill_(1.0) |
| if isinstance(module, nn.Linear) and module.bias is not None: |
| module.bias.data.zero_() |
|
|
| def _set_gradient_checkpointing(self, module, value=False): |
| if isinstance(module, CLIPEncoder): |
| module.gradient_checkpointing = value |
|
|
|
|
| CLIP_START_DOCSTRING = r""" |
| This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the |
| library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads |
| etc.) |
| |
| This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. |
| Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage |
| and behavior. |
| |
| Parameters: |
| config ([`CLIPConfig`]): Model configuration class with all the parameters of the model. |
| Initializing with a config file does not load the weights associated with the model, only the |
| configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. |
| """ |
|
|
| CLIP_TEXT_INPUTS_DOCSTRING = r""" |
| Args: |
| input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): |
| Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide |
| it. |
| |
| Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| [`PreTrainedTokenizer.__call__`] for details. |
| |
| [What are input IDs?](../glossary#input-ids) |
| attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
| |
| - 1 for tokens that are **not masked**, |
| - 0 for tokens that are **masked**. |
| |
| [What are attention masks?](../glossary#attention-mask) |
| position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, |
| config.max_position_embeddings - 1]`. |
| |
| [What are position IDs?](../glossary#position-ids) |
| output_attentions (`bool`, *optional*): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned |
| tensors for more detail. |
| output_hidden_states (`bool`, *optional*): |
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for |
| more detail. |
| return_dict (`bool`, *optional*): |
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| """ |
|
|
| CLIP_VISION_INPUTS_DOCSTRING = r""" |
| Args: |
| pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): |
| Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using |
| [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details. |
| output_attentions (`bool`, *optional*): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned |
| tensors for more detail. |
| output_hidden_states (`bool`, *optional*): |
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for |
| more detail. |
| return_dict (`bool`, *optional*): |
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| """ |
|
|
| CLIP_INPUTS_DOCSTRING = r""" |
| Args: |
| input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): |
| Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide |
| it. |
| |
| Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| [`PreTrainedTokenizer.__call__`] for details. |
| |
| [What are input IDs?](../glossary#input-ids) |
| attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
| |
| - 1 for tokens that are **not masked**, |
| - 0 for tokens that are **masked**. |
| |
| [What are attention masks?](../glossary#attention-mask) |
| position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, |
| config.max_position_embeddings - 1]`. |
| |
| [What are position IDs?](../glossary#position-ids) |
| pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): |
| Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using |
| [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details. |
| return_loss (`bool`, *optional*): |
| Whether or not to return the contrastive loss. |
| output_attentions (`bool`, *optional*): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned |
| tensors for more detail. |
| output_hidden_states (`bool`, *optional*): |
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for |
| more detail. |
| return_dict (`bool`, *optional*): |
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| """ |
|
|
|
|
| class CLIPEncoder(nn.Module): |
| """ |
| Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a |
| [`CLIPEncoderLayer`]. |
| |
| Args: |
| config: CLIPConfig |
| """ |
|
|
| def __init__(self, config: LanguageBindDepthConfig): |
| super().__init__() |
| self.config = config |
| self.layers = nn.ModuleList([CLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)]) |
| self.gradient_checkpointing = False |
|
|
| def forward( |
| self, |
| inputs_embeds, |
| attention_mask: Optional[torch.Tensor] = None, |
| causal_attention_mask: Optional[torch.Tensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, BaseModelOutput]: |
| r""" |
| Args: |
| inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): |
| Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. |
| This is useful if you want more control over how to convert `input_ids` indices into associated vectors |
| than the model's internal embedding lookup matrix. |
| attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
| |
| - 1 for tokens that are **not masked**, |
| - 0 for tokens that are **masked**. |
| |
| [What are attention masks?](../glossary#attention-mask) |
| causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Causal mask for the text model. Mask values selected in `[0, 1]`: |
| |
| - 1 for tokens that are **not masked**, |
| - 0 for tokens that are **masked**. |
| |
| [What are attention masks?](../glossary#attention-mask) |
| output_attentions (`bool`, *optional*): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
| returned tensors for more detail. |
| output_hidden_states (`bool`, *optional*): |
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors |
| for more detail. |
| return_dict (`bool`, *optional*): |
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| """ |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| encoder_states = () if output_hidden_states else None |
| all_attentions = () if output_attentions else None |
|
|
| hidden_states = inputs_embeds |
| for idx, encoder_layer in enumerate(self.layers): |
| if output_hidden_states: |
| encoder_states = encoder_states + (hidden_states,) |
| if self.gradient_checkpointing and self.training: |
|
|
| def create_custom_forward(module): |
| def custom_forward(*inputs): |
| return module(*inputs, output_attentions) |
|
|
| return custom_forward |
|
|
| layer_outputs = torch.utils.checkpoint.checkpoint( |
| create_custom_forward(encoder_layer), |
| hidden_states, |
| attention_mask, |
| causal_attention_mask, |
| ) |
| else: |
| layer_outputs = encoder_layer( |
| hidden_states, |
| attention_mask, |
| causal_attention_mask, |
| output_attentions=output_attentions, |
| ) |
|
|
| hidden_states = layer_outputs[0] |
|
|
| if output_attentions: |
| all_attentions = all_attentions + (layer_outputs[1],) |
|
|
| if output_hidden_states: |
| encoder_states = encoder_states + (hidden_states,) |
|
|
| if not return_dict: |
| return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) |
| return BaseModelOutput( |
| last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions |
| ) |
|
|
|
|
| |
| def _make_causal_mask( |
| input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 |
| ): |
| """ |
| Make causal mask used for bi-directional self-attention. |
| """ |
| bsz, tgt_len = input_ids_shape |
| mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) |
| mask_cond = torch.arange(mask.size(-1), device=device) |
| mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) |
| mask = mask.to(dtype) |
|
|
| if past_key_values_length > 0: |
| mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) |
| return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) |
|
|
|
|
| class CLIPTextTransformer(nn.Module): |
| def __init__(self, config: CLIPTextConfig): |
| super().__init__() |
| self.config = config |
| embed_dim = config.hidden_size |
| self.embeddings = CLIPTextEmbeddings(config) |
| self.encoder = CLIPEncoder(config) |
| self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) |
|
|
| @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING) |
| @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPTextConfig) |
| def forward( |
| self, |
| input_ids: Optional[torch.Tensor] = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| position_ids: Optional[torch.Tensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, BaseModelOutputWithPooling]: |
| r""" |
| Returns: |
| |
| """ |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| if input_ids is None: |
| raise ValueError("You have to specify input_ids") |
|
|
| input_shape = input_ids.size() |
| input_ids = input_ids.view(-1, input_shape[-1]) |
|
|
| hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids) |
|
|
| |
| |
| causal_attention_mask = _make_causal_mask(input_shape, hidden_states.dtype, device=hidden_states.device) |
| |
| if attention_mask is not None: |
| |
| attention_mask = _expand_mask(attention_mask, hidden_states.dtype) |
|
|
| encoder_outputs = self.encoder( |
| inputs_embeds=hidden_states, |
| attention_mask=attention_mask, |
| causal_attention_mask=causal_attention_mask, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| last_hidden_state = encoder_outputs[0] |
| last_hidden_state = self.final_layer_norm(last_hidden_state) |
|
|
| |
| |
| |
| pooled_output = last_hidden_state[ |
| torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device), |
| input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1), |
| ] |
|
|
| if not return_dict: |
| return (last_hidden_state, pooled_output) + encoder_outputs[1:] |
|
|
| return BaseModelOutputWithPooling( |
| last_hidden_state=last_hidden_state, |
| pooler_output=pooled_output, |
| hidden_states=encoder_outputs.hidden_states, |
| attentions=encoder_outputs.attentions, |
| ) |
|
|
|
|
| @add_start_docstrings( |
| """The text model from CLIP without any head or projection on top.""", |
| CLIP_START_DOCSTRING, |
| ) |
| class CLIPTextModel(CLIPPreTrainedModel): |
| config_class = CLIPTextConfig |
|
|
| _no_split_modules = ["CLIPEncoderLayer"] |
|
|
| def __init__(self, config: CLIPTextConfig): |
| super().__init__(config) |
| self.text_model = CLIPTextTransformer(config) |
| |
| self.post_init() |
|
|
| def get_input_embeddings(self) -> nn.Module: |
| return self.text_model.embeddings.token_embedding |
|
|
| def set_input_embeddings(self, value): |
| self.text_model.embeddings.token_embedding = value |
|
|
| @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING) |
| @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPTextConfig) |
| def forward( |
| self, |
| input_ids: Optional[torch.Tensor] = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| position_ids: Optional[torch.Tensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, BaseModelOutputWithPooling]: |
| r""" |
| Returns: |
| |
| Examples: |
| |
| ```python |
| >>> from transformers import AutoTokenizer, CLIPTextModel |
| |
| >>> model = CLIPTextModel.from_pretrained("openai/clip-vit-base-patch32") |
| >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32") |
| |
| >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") |
| |
| >>> outputs = model(**inputs) |
| >>> last_hidden_state = outputs.last_hidden_state |
| >>> pooled_output = outputs.pooler_output # pooled (EOS token) states |
| ```""" |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| return self.text_model( |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
|
|
| class CLIPVisionTransformer(nn.Module): |
| def __init__(self, config: CLIPVisionConfig): |
| super().__init__() |
| self.config = config |
| embed_dim = config.hidden_size |
|
|
| self.embeddings = CLIPVisionEmbeddings(config) |
| self.patch_dropout = PatchDropout(config.force_patch_dropout) |
| self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) |
| self.encoder = CLIPEncoder(config) |
| self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) |
|
|
| @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING) |
| @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPVisionConfig) |
| def forward( |
| self, |
| pixel_values: Optional[torch.FloatTensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, BaseModelOutputWithPooling]: |
| r""" |
| Returns: |
| |
| """ |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| if pixel_values is None: |
| raise ValueError("You have to specify pixel_values") |
| |
| if len(pixel_values.shape) == 7: |
| b_new, pair_new, T, bs_new, channel_new, h_new, w_new = pixel_values.shape |
| |
| B = b_new * pair_new * bs_new |
| pixel_values = pixel_values.reshape(B*T, channel_new, h_new, w_new) |
|
|
| elif len(pixel_values.shape) == 5: |
| B, _, T, _, _ = pixel_values.shape |
| |
| pixel_values = rearrange(pixel_values, 'b c t h w -> (b t) c h w') |
| else: |
| |
| B, _, _, _ = pixel_values.shape |
| T = 1 |
| |
| hidden_states = self.embeddings(pixel_values) |
|
|
| hidden_states = self.patch_dropout(hidden_states, B, T) |
|
|
| hidden_states = self.pre_layrnorm(hidden_states) |
|
|
| encoder_outputs = self.encoder( |
| inputs_embeds=hidden_states, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| last_hidden_state = encoder_outputs[0] |
| pooled_output = last_hidden_state[:, 0, :] |
| pooled_output = self.post_layernorm(pooled_output) |
|
|
| pooled_output = pooled_output.reshape(B, T, -1).mean(1) |
|
|
| if not return_dict: |
| return (last_hidden_state, pooled_output) + encoder_outputs[1:] |
|
|
| return BaseModelOutputWithPooling( |
| last_hidden_state=last_hidden_state, |
| pooler_output=pooled_output, |
| hidden_states=encoder_outputs.hidden_states, |
| attentions=encoder_outputs.attentions, |
| ) |
|
|
|
|
| @add_start_docstrings( |
| """The vision model from CLIP without any head or projection on top.""", |
| CLIP_START_DOCSTRING, |
| ) |
| class CLIPVisionModel(CLIPPreTrainedModel): |
| config_class = CLIPVisionConfig |
| main_input_name = "pixel_values" |
|
|
| def __init__(self, config: CLIPVisionConfig): |
| super().__init__(config) |
| self.vision_model = CLIPVisionTransformer(config) |
| |
| self.post_init() |
|
|
| def get_input_embeddings(self) -> nn.Module: |
| return self.vision_model.embeddings.patch_embedding |
|
|
| @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING) |
| @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPVisionConfig) |
| def forward( |
| self, |
| pixel_values: Optional[torch.FloatTensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, BaseModelOutputWithPooling]: |
| r""" |
| Returns: |
| |
| Examples: |
| |
| ```python |
| >>> from PIL import Image |
| >>> import requests |
| >>> from transformers import AutoProcessor, CLIPVisionModel |
| |
| >>> model = CLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32") |
| >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32") |
| |
| >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" |
| >>> image = Image.open(requests.get(url, stream=True).raw) |
| |
| >>> inputs = processor(images=image, return_tensors="pt") |
| |
| >>> outputs = model(**inputs) |
| >>> last_hidden_state = outputs.last_hidden_state |
| >>> pooled_output = outputs.pooler_output # pooled CLS states |
| ```""" |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| return self.vision_model( |
| pixel_values=pixel_values, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
|
|
| @add_start_docstrings(CLIP_START_DOCSTRING) |
| class LanguageBindDepth(CLIPPreTrainedModel): |
| config_class = LanguageBindDepthConfig |
|
|
| def __init__(self, config: LanguageBindDepthConfig): |
| super().__init__(config) |
|
|
| if not isinstance(config.text_config, CLIPTextConfig): |
| raise ValueError( |
| "config.text_config is expected to be of type CLIPTextConfig but is of type" |
| f" {type(config.text_config)}." |
| ) |
|
|
| if not isinstance(config.vision_config, CLIPVisionConfig): |
| raise ValueError( |
| "config.vision_config is expected to be of type CLIPVisionConfig but is of type" |
| f" {type(config.vision_config)}." |
| ) |
|
|
| text_config = config.text_config |
| vision_config = config.vision_config |
| self.add_time_attn = vision_config.add_time_attn |
| self.lora_r = vision_config.lora_r |
| self.lora_alpha = vision_config.lora_alpha |
| self.lora_dropout = vision_config.lora_dropout |
|
|
| self.projection_dim = config.projection_dim |
| self.text_embed_dim = text_config.hidden_size |
| self.vision_embed_dim = vision_config.hidden_size |
|
|
| self.text_model = CLIPTextTransformer(text_config) |
| self.vision_model = CLIPVisionTransformer(vision_config) |
|
|
| self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) |
| self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) |
| self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) |
|
|
| |
| self.post_init() |
| self.convert_to_lora() |
| self.resize_pos(self.vision_model.embeddings, vision_config) |
|
|
| def convert_to_lora(self): |
| if self.lora_r == 0: |
| return |
| if self.add_time_attn: |
| target_modules = ["temporal_attn.k_proj", "temporal_attn.v_proj", |
| "temporal_attn.q_proj", "temporal_attn.out_proj", |
| "temporal_mlp.fc1", "temporal_mlp.fc2"] |
| else: |
| target_modules = ["k_proj", "v_proj", "q_proj", "out_proj"] |
| config = LoraConfig( |
| r=self.lora_r, |
| lora_alpha=self.lora_alpha, |
| target_modules=target_modules, |
| lora_dropout=self.lora_dropout, |
| bias="none", |
| modules_to_save=[], |
| ) |
| self.vision_model.encoder.is_gradient_checkpointing = False |
| self.vision_model.encoder = get_peft_model(self.vision_model.encoder, config) |
|
|
| def resize_pos(self, m, vision_config): |
| |
| if vision_config.num_mel_bins!=0 and vision_config.target_length!=0: |
| m.image_size = [vision_config.num_mel_bins, vision_config.target_length] |
| m.config.image_size = [m.image_size, m.image_size] if isinstance(m.image_size, int) else m.image_size |
| |
| old_pos_embed_state_dict = m.position_embedding.state_dict() |
| old_pos_embed = old_pos_embed_state_dict['weight'] |
| dtype = old_pos_embed.dtype |
| grid_size = [m.config.image_size[0] // m.patch_size, m.config.image_size[1] // m.patch_size] |
| extra_tokens = 1 |
| new_seq_len = grid_size[0] * grid_size[1] + extra_tokens |
| if new_seq_len == old_pos_embed.shape[0]: |
| |
| return |
|
|
| m.num_patches = grid_size[0] * grid_size[1] |
| m.num_positions = m.num_patches + 1 |
| m.register_buffer("position_ids", torch.arange(m.num_positions).expand((1, -1))) |
| new_position_embedding = nn.Embedding(m.num_positions, m.embed_dim) |
|
|
| if extra_tokens: |
| pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:] |
| else: |
| pos_emb_tok, pos_emb_img = None, old_pos_embed |
| old_grid_size = [int(math.sqrt(len(pos_emb_img)))] * 2 |
|
|
| |
| |
| pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2) |
| pos_emb_img = F.interpolate( |
| pos_emb_img, |
| size=grid_size, |
| mode='bicubic', |
| antialias=True, |
| align_corners=False, |
| ) |
| pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0] |
| if pos_emb_tok is not None: |
| new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0) |
| else: |
| new_pos_embed = pos_emb_img |
| old_pos_embed_state_dict['weight'] = new_pos_embed.to(dtype) |
| m.position_embedding = new_position_embedding |
| m.position_embedding.load_state_dict(old_pos_embed_state_dict) |
|
|
| |
|
|
| @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING) |
| def get_text_features( |
| self, |
| input_ids: Optional[torch.Tensor] = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| position_ids: Optional[torch.Tensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> torch.FloatTensor: |
| r""" |
| Returns: |
| text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by |
| applying the projection layer to the pooled output of [`CLIPTextModel`]. |
| |
| Examples: |
| |
| ```python |
| >>> from transformers import AutoTokenizer, CLIPModel |
| |
| >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") |
| >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32") |
| |
| >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") |
| >>> text_features = model.get_text_features(**inputs) |
| ```""" |
| |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| text_outputs = self.text_model( |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| pooled_output = text_outputs[1] |
| text_features = self.text_projection(pooled_output) |
|
|
| return text_features |
|
|
| @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING) |
| def get_image_features( |
| self, |
| pixel_values: Optional[torch.FloatTensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> torch.FloatTensor: |
| r""" |
| Returns: |
| image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by |
| applying the projection layer to the pooled output of [`CLIPVisionModel`]. |
| |
| Examples: |
| |
| ```python |
| >>> from PIL import Image |
| >>> import requests |
| >>> from transformers import AutoProcessor, CLIPModel |
| |
| >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") |
| >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32") |
| |
| >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" |
| >>> image = Image.open(requests.get(url, stream=True).raw) |
| |
| >>> inputs = processor(images=image, return_tensors="pt") |
| |
| >>> image_features = model.get_image_features(**inputs) |
| ```""" |
| |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| vision_outputs = self.vision_model( |
| pixel_values=pixel_values, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| pooled_output = vision_outputs[1] |
| image_features = self.visual_projection(pooled_output) |
|
|
| return image_features |
|
|
| @add_start_docstrings_to_model_forward(CLIP_INPUTS_DOCSTRING) |
| @replace_return_docstrings(output_type=CLIPOutput, config_class=LanguageBindDepthConfig) |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| pixel_values: Optional[torch.FloatTensor] = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| return_loss: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, CLIPOutput]: |
| r""" |
| Returns: |
| |
| Examples: |
| |
| ```python |
| >>> from PIL import Image |
| >>> import requests |
| >>> from transformers import AutoProcessor, CLIPModel |
| |
| >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") |
| >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32") |
| |
| >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" |
| >>> image = Image.open(requests.get(url, stream=True).raw) |
| |
| >>> inputs = processor( |
| ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True |
| ... ) |
| |
| >>> outputs = model(**inputs) |
| >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score |
| >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities |
| ```""" |
| |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| vision_outputs = self.vision_model( |
| pixel_values=pixel_values, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| text_outputs = self.text_model( |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| position_ids=position_ids, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| image_embeds = vision_outputs[1] |
| image_embeds = self.visual_projection(image_embeds) |
|
|
| text_embeds = text_outputs[1] |
| text_embeds = self.text_projection(text_embeds) |
|
|
| |
| image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) |
| text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) |
|
|
| |
| logit_scale = self.logit_scale.exp() |
| logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale |
| logits_per_image = logits_per_text.t() |
|
|
| loss = None |
| if return_loss: |
| loss = clip_loss(logits_per_text) |
|
|
| if not return_dict: |
| output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) |
| return ((loss,) + output) if loss is not None else output |
|
|
| return CLIPOutput( |
| loss=loss, |
| logits_per_image=logits_per_image, |
| logits_per_text=logits_per_text, |
| text_embeds=text_embeds, |
| image_embeds=image_embeds, |
| text_model_output=text_outputs, |
| vision_model_output=vision_outputs, |
| ) |