| import inspect |
| import tempfile |
| import unittest |
|
|
| import numpy as np |
|
|
| from transformers import CLIPConfig, CLIPTextConfig, CLIPVisionConfig, is_flax_available |
| from transformers.testing_utils import require_flax, slow |
|
|
| from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask |
|
|
|
|
| if is_flax_available(): |
| import jax |
|
|
| from transformers.models.clip.modeling_flax_clip import ( |
| FlaxCLIPModel, |
| FlaxCLIPTextModel, |
| FlaxCLIPTextModelWithProjection, |
| FlaxCLIPVisionModel, |
| ) |
|
|
|
|
| class FlaxCLIPVisionModelTester: |
| def __init__( |
| self, |
| parent, |
| batch_size=12, |
| image_size=30, |
| patch_size=2, |
| num_channels=3, |
| is_training=True, |
| hidden_size=32, |
| num_hidden_layers=2, |
| num_attention_heads=4, |
| intermediate_size=37, |
| dropout=0.1, |
| attention_dropout=0.1, |
| initializer_range=0.02, |
| scope=None, |
| ): |
| self.parent = parent |
| self.batch_size = batch_size |
| self.image_size = image_size |
| self.patch_size = patch_size |
| self.num_channels = num_channels |
| self.is_training = is_training |
| self.hidden_size = hidden_size |
| self.num_hidden_layers = num_hidden_layers |
| self.num_attention_heads = num_attention_heads |
| self.intermediate_size = intermediate_size |
| self.dropout = dropout |
| self.attention_dropout = attention_dropout |
| self.initializer_range = initializer_range |
| self.scope = scope |
|
|
| def prepare_config_and_inputs(self): |
| pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) |
| config = CLIPVisionConfig( |
| image_size=self.image_size, |
| patch_size=self.patch_size, |
| num_channels=self.num_channels, |
| hidden_size=self.hidden_size, |
| num_hidden_layers=self.num_hidden_layers, |
| num_attention_heads=self.num_attention_heads, |
| intermediate_size=self.intermediate_size, |
| dropout=self.dropout, |
| attention_dropout=self.attention_dropout, |
| initializer_range=self.initializer_range, |
| ) |
|
|
| return config, pixel_values |
|
|
| def prepare_config_and_inputs_for_common(self): |
| config_and_inputs = self.prepare_config_and_inputs() |
| config, pixel_values = config_and_inputs |
| inputs_dict = {"pixel_values": pixel_values} |
| return config, inputs_dict |
|
|
|
|
| @require_flax |
| class FlaxCLIPVisionModelTest(FlaxModelTesterMixin, unittest.TestCase): |
| """ |
| Here we also overwrite some of the tests of test_modeling_common.py, as CLIP does not use input_ids, inputs_embeds, |
| attention_mask and seq_length. |
| """ |
|
|
| all_model_classes = (FlaxCLIPVisionModel,) if is_flax_available() else () |
|
|
| def setUp(self): |
| self.model_tester = FlaxCLIPVisionModelTester(self) |
|
|
| def test_forward_signature(self): |
| config, _ = self.model_tester.prepare_config_and_inputs_for_common() |
|
|
| for model_class in self.all_model_classes: |
| model = model_class(config) |
| signature = inspect.signature(model.__call__) |
| |
| arg_names = [*signature.parameters.keys()] |
|
|
| expected_arg_names = ["pixel_values"] |
| self.assertListEqual(arg_names[:1], expected_arg_names) |
|
|
| def test_jit_compilation(self): |
| config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
|
|
| for model_class in self.all_model_classes: |
| with self.subTest(model_class.__name__): |
| prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) |
| model = model_class(config) |
|
|
| @jax.jit |
| def model_jitted(pixel_values, **kwargs): |
| return model(pixel_values=pixel_values, **kwargs).to_tuple() |
|
|
| with self.subTest("JIT Enabled"): |
| jitted_outputs = model_jitted(**prepared_inputs_dict) |
|
|
| with self.subTest("JIT Disabled"): |
| with jax.disable_jit(): |
| outputs = model_jitted(**prepared_inputs_dict) |
|
|
| self.assertEqual(len(outputs), len(jitted_outputs)) |
| for jitted_output, output in zip(jitted_outputs, outputs): |
| self.assertEqual(jitted_output.shape, output.shape) |
|
|
| def test_hidden_states_output(self): |
| def check_hidden_states_output(inputs_dict, config, model_class): |
| model = model_class(config) |
|
|
| outputs = model(**self._prepare_for_class(inputs_dict, model_class)) |
| hidden_states = outputs.hidden_states |
|
|
| self.assertEqual(len(hidden_states), self.model_tester.num_hidden_layers + 1) |
|
|
| |
| image_size = (self.model_tester.image_size, self.model_tester.image_size) |
| patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) |
| num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) |
| seq_length = num_patches + 1 |
|
|
| self.assertListEqual( |
| list(hidden_states[0].shape[-2:]), |
| [seq_length, self.model_tester.hidden_size], |
| ) |
|
|
| config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
|
|
| for model_class in self.all_model_classes: |
| inputs_dict["output_hidden_states"] = True |
| check_hidden_states_output(inputs_dict, config, model_class) |
|
|
| |
| del inputs_dict["output_hidden_states"] |
| config.output_hidden_states = True |
|
|
| check_hidden_states_output(inputs_dict, config, model_class) |
|
|
| def test_attention_outputs(self): |
| config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| config.return_dict = True |
|
|
| |
| image_size = (self.model_tester.image_size, self.model_tester.image_size) |
| patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) |
| num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) |
| seq_length = num_patches + 1 |
|
|
| for model_class in self.all_model_classes: |
| inputs_dict["output_attentions"] = True |
| inputs_dict["output_hidden_states"] = False |
| model = model_class(config) |
| outputs = model(**self._prepare_for_class(inputs_dict, model_class)) |
| attentions = outputs.attentions |
| self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) |
|
|
| |
| del inputs_dict["output_attentions"] |
| config.output_attentions = True |
| model = model_class(config) |
| outputs = model(**self._prepare_for_class(inputs_dict, model_class)) |
| attentions = outputs.attentions |
| self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) |
|
|
| self.assertListEqual( |
| list(attentions[0].shape[-3:]), |
| [self.model_tester.num_attention_heads, seq_length, seq_length], |
| ) |
| out_len = len(outputs) |
|
|
| |
| inputs_dict["output_attentions"] = True |
| inputs_dict["output_hidden_states"] = True |
| model = model_class(config) |
| outputs = model(**self._prepare_for_class(inputs_dict, model_class)) |
|
|
| added_hidden_states = 1 |
| self.assertEqual(out_len + added_hidden_states, len(outputs)) |
|
|
| self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions |
| self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) |
|
|
| self.assertListEqual( |
| list(self_attentions[0].shape[-3:]), |
| [self.model_tester.num_attention_heads, seq_length, seq_length], |
| ) |
|
|
| |
| def test_save_load_from_base(self): |
| pass |
|
|
| |
| def test_save_load_to_base(self): |
| pass |
|
|
| @slow |
| def test_model_from_pretrained(self): |
| for model_class_name in self.all_model_classes: |
| model = model_class_name.from_pretrained("openai/clip-vit-base-patch32", from_pt=True) |
| outputs = model(np.ones((1, 3, 224, 224))) |
| self.assertIsNotNone(outputs) |
|
|
|
|
| class FlaxCLIPTextModelTester: |
| def __init__( |
| self, |
| parent, |
| batch_size=12, |
| seq_length=7, |
| is_training=True, |
| use_input_mask=True, |
| use_labels=True, |
| vocab_size=99, |
| hidden_size=32, |
| num_hidden_layers=2, |
| num_attention_heads=4, |
| intermediate_size=37, |
| dropout=0.1, |
| attention_dropout=0.1, |
| max_position_embeddings=512, |
| initializer_range=0.02, |
| scope=None, |
| ): |
| self.parent = parent |
| self.batch_size = batch_size |
| self.seq_length = seq_length |
| self.is_training = is_training |
| self.use_input_mask = use_input_mask |
| self.use_labels = use_labels |
| self.vocab_size = vocab_size |
| self.hidden_size = hidden_size |
| self.num_hidden_layers = num_hidden_layers |
| self.num_attention_heads = num_attention_heads |
| self.intermediate_size = intermediate_size |
| self.dropout = dropout |
| self.attention_dropout = attention_dropout |
| self.max_position_embeddings = max_position_embeddings |
| self.initializer_range = initializer_range |
| self.scope = scope |
|
|
| def prepare_config_and_inputs(self): |
| input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) |
|
|
| input_mask = None |
| if self.use_input_mask: |
| input_mask = random_attention_mask([self.batch_size, self.seq_length]) |
|
|
| if input_mask is not None: |
| batch_size, seq_length = input_mask.shape |
| rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) |
| for batch_idx, start_index in enumerate(rnd_start_indices): |
| input_mask[batch_idx, :start_index] = 1 |
| input_mask[batch_idx, start_index:] = 0 |
|
|
| config = CLIPTextConfig( |
| vocab_size=self.vocab_size, |
| hidden_size=self.hidden_size, |
| num_hidden_layers=self.num_hidden_layers, |
| num_attention_heads=self.num_attention_heads, |
| intermediate_size=self.intermediate_size, |
| dropout=self.dropout, |
| attention_dropout=self.attention_dropout, |
| max_position_embeddings=self.max_position_embeddings, |
| initializer_range=self.initializer_range, |
| ) |
|
|
| return config, input_ids, input_mask |
|
|
| def prepare_config_and_inputs_for_common(self): |
| config_and_inputs = self.prepare_config_and_inputs() |
| config, input_ids, input_mask = config_and_inputs |
| inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} |
| return config, inputs_dict |
|
|
|
|
| @require_flax |
| class FlaxCLIPTextModelTest(FlaxModelTesterMixin, unittest.TestCase): |
| all_model_classes = (FlaxCLIPTextModel, FlaxCLIPTextModelWithProjection) if is_flax_available() else () |
|
|
| def setUp(self): |
| self.model_tester = FlaxCLIPTextModelTester(self) |
|
|
| |
| def test_save_load_from_base(self): |
| pass |
|
|
| |
| def test_save_load_to_base(self): |
| pass |
|
|
| @slow |
| def test_model_from_pretrained(self): |
| for model_class_name in self.all_model_classes: |
| model = model_class_name.from_pretrained("openai/clip-vit-base-patch32", from_pt=True) |
| outputs = model(np.ones((1, 1))) |
| self.assertIsNotNone(outputs) |
|
|
|
|
| class FlaxCLIPModelTester: |
| def __init__(self, parent, is_training=True): |
| self.parent = parent |
| self.text_model_tester = FlaxCLIPTextModelTester(parent) |
| self.vision_model_tester = FlaxCLIPVisionModelTester(parent) |
| self.is_training = is_training |
|
|
| def prepare_config_and_inputs(self): |
| text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() |
| vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() |
|
|
| config = CLIPConfig.from_text_vision_configs(text_config, vision_config, projection_dim=64) |
|
|
| return config, input_ids, attention_mask, pixel_values |
|
|
| def prepare_config_and_inputs_for_common(self): |
| config_and_inputs = self.prepare_config_and_inputs() |
| config, input_ids, attention_mask, pixel_values = config_and_inputs |
| inputs_dict = { |
| "input_ids": input_ids, |
| "attention_mask": attention_mask, |
| "pixel_values": pixel_values, |
| } |
| return config, inputs_dict |
|
|
|
|
| @require_flax |
| class FlaxCLIPModelTest(FlaxModelTesterMixin, unittest.TestCase): |
| all_model_classes = (FlaxCLIPModel,) if is_flax_available() else () |
| test_attention_outputs = False |
|
|
| def setUp(self): |
| self.model_tester = FlaxCLIPModelTester(self) |
|
|
| |
| def test_hidden_states_output(self): |
| pass |
|
|
| def test_jit_compilation(self): |
| config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
|
|
| for model_class in self.all_model_classes: |
| with self.subTest(model_class.__name__): |
| prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) |
| model = model_class(config) |
|
|
| @jax.jit |
| def model_jitted(input_ids, pixel_values, **kwargs): |
| return model(input_ids=input_ids, pixel_values=pixel_values, **kwargs).to_tuple() |
|
|
| with self.subTest("JIT Enabled"): |
| jitted_outputs = model_jitted(**prepared_inputs_dict) |
|
|
| with self.subTest("JIT Disabled"): |
| with jax.disable_jit(): |
| outputs = model_jitted(**prepared_inputs_dict) |
|
|
| self.assertEqual(len(outputs), len(jitted_outputs)) |
| for jitted_output, output in zip(jitted_outputs[:4], outputs[:4]): |
| self.assertEqual(jitted_output.shape, output.shape) |
|
|
| def test_forward_signature(self): |
| config, _ = self.model_tester.prepare_config_and_inputs_for_common() |
|
|
| for model_class in self.all_model_classes: |
| model = model_class(config) |
| signature = inspect.signature(model.__call__) |
| |
| arg_names = [*signature.parameters.keys()] |
|
|
| expected_arg_names = ["input_ids", "pixel_values", "attention_mask", "position_ids"] |
| self.assertListEqual(arg_names[:4], expected_arg_names) |
|
|
| def test_get_image_features(self): |
| config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| model = FlaxCLIPModel(config) |
|
|
| @jax.jit |
| def model_jitted(pixel_values): |
| return model.get_image_features(pixel_values=pixel_values) |
|
|
| with self.subTest("JIT Enabled"): |
| jitted_output = model_jitted(inputs_dict["pixel_values"]) |
|
|
| with self.subTest("JIT Disabled"): |
| with jax.disable_jit(): |
| output = model_jitted(inputs_dict["pixel_values"]) |
|
|
| self.assertEqual(jitted_output.shape, output.shape) |
| self.assertTrue(np.allclose(jitted_output, output, atol=1e-3)) |
|
|
| def test_get_text_features(self): |
| config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| model = FlaxCLIPModel(config) |
|
|
| @jax.jit |
| def model_jitted(input_ids, attention_mask, **kwargs): |
| return model.get_text_features(input_ids=input_ids, attention_mask=attention_mask) |
|
|
| with self.subTest("JIT Enabled"): |
| jitted_output = model_jitted(**inputs_dict) |
|
|
| with self.subTest("JIT Disabled"): |
| with jax.disable_jit(): |
| output = model_jitted(**inputs_dict) |
|
|
| self.assertEqual(jitted_output.shape, output.shape) |
| self.assertTrue(np.allclose(jitted_output, output, atol=1e-3)) |
|
|
| @slow |
| def test_model_from_pretrained(self): |
| for model_class_name in self.all_model_classes: |
| model = model_class_name.from_pretrained("openai/clip-vit-base-patch32", from_pt=True) |
| outputs = model(input_ids=np.ones((1, 1)), pixel_values=np.ones((1, 3, 224, 224))) |
| self.assertIsNotNone(outputs) |
|
|
| |
| |
| def test_from_pretrained_save_pretrained(self): |
| config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
|
|
| for model_class in self.all_model_classes: |
| if model_class.__name__ != "FlaxBertModel": |
| continue |
|
|
| with self.subTest(model_class.__name__): |
| model = model_class(config) |
|
|
| prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) |
| outputs = model(**prepared_inputs_dict).to_tuple() |
|
|
| |
| with tempfile.TemporaryDirectory() as tmpdirname: |
| model.save_pretrained(tmpdirname) |
| model_loaded = model_class.from_pretrained(tmpdirname) |
|
|
| outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple()[:4] |
| for output_loaded, output in zip(outputs_loaded, outputs): |
| self.assert_almost_equals(output_loaded, output, 1e-3) |
|
|
| |
| |
| with tempfile.TemporaryDirectory() as tmpdirname: |
| model.save_pretrained(tmpdirname, params=model.params) |
| model_loaded = model_class.from_pretrained(tmpdirname) |
|
|
| outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple()[:4] |
| for output_loaded, output in zip(outputs_loaded, outputs): |
| self.assert_almost_equals(output_loaded, output, 1e-3) |
|
|