| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """Testing suite for the PyTorch Bit model.""" |
|
|
| import unittest |
|
|
| from transformers import BitConfig |
| from transformers.testing_utils import require_torch, require_vision, slow, torch_device |
| from transformers.utils import cached_property, is_torch_available, is_vision_available |
|
|
| from ...test_backbone_common import BackboneTesterMixin |
| from ...test_configuration_common import ConfigTester |
| from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor |
| from ...test_pipeline_mixin import PipelineTesterMixin |
|
|
|
|
| if is_torch_available(): |
| import torch |
| from torch import nn |
|
|
| from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel |
|
|
|
|
| if is_vision_available(): |
| from PIL import Image |
|
|
|
|
| class BitModelTester: |
| def __init__( |
| self, |
| parent, |
| batch_size=3, |
| image_size=32, |
| num_channels=3, |
| embeddings_size=10, |
| hidden_sizes=[8, 16, 32, 64], |
| depths=[1, 1, 2, 1], |
| is_training=True, |
| use_labels=True, |
| hidden_act="relu", |
| num_labels=3, |
| scope=None, |
| out_features=["stage2", "stage3", "stage4"], |
| out_indices=[2, 3, 4], |
| num_groups=1, |
| ): |
| self.parent = parent |
| self.batch_size = batch_size |
| self.image_size = image_size |
| self.num_channels = num_channels |
| self.embeddings_size = embeddings_size |
| self.hidden_sizes = hidden_sizes |
| self.depths = depths |
| self.is_training = is_training |
| self.use_labels = use_labels |
| self.hidden_act = hidden_act |
| self.num_labels = num_labels |
| self.scope = scope |
| self.num_stages = len(hidden_sizes) |
| self.out_features = out_features |
| self.out_indices = out_indices |
| self.num_groups = num_groups |
|
|
| def prepare_config_and_inputs(self): |
| pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) |
|
|
| labels = None |
| if self.use_labels: |
| labels = ids_tensor([self.batch_size], self.num_labels) |
|
|
| config = self.get_config() |
|
|
| return config, pixel_values, labels |
|
|
| def get_config(self): |
| return BitConfig( |
| num_channels=self.num_channels, |
| embeddings_size=self.embeddings_size, |
| hidden_sizes=self.hidden_sizes, |
| depths=self.depths, |
| hidden_act=self.hidden_act, |
| num_labels=self.num_labels, |
| out_features=self.out_features, |
| out_indices=self.out_indices, |
| num_groups=self.num_groups, |
| ) |
|
|
| def create_and_check_model(self, config, pixel_values, labels): |
| model = BitModel(config=config) |
| model.to(torch_device) |
| model.eval() |
| result = model(pixel_values) |
| self.parent.assertEqual( |
| result.last_hidden_state.shape, |
| (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), |
| ) |
|
|
| def create_and_check_for_image_classification(self, config, pixel_values, labels): |
| config.num_labels = self.num_labels |
| model = BitForImageClassification(config) |
| model.to(torch_device) |
| model.eval() |
| result = model(pixel_values, labels=labels) |
| self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) |
|
|
| def create_and_check_backbone(self, config, pixel_values, labels): |
| model = BitBackbone(config=config) |
| model.to(torch_device) |
| model.eval() |
| result = model(pixel_values) |
|
|
| |
| self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) |
| self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[1], 4, 4]) |
|
|
| |
| self.parent.assertEqual(len(model.channels), len(config.out_features)) |
| self.parent.assertListEqual(model.channels, config.hidden_sizes[1:]) |
|
|
| |
| config.out_features = None |
| model = BitBackbone(config=config) |
| model.to(torch_device) |
| model.eval() |
| result = model(pixel_values) |
|
|
| |
| self.parent.assertEqual(len(result.feature_maps), 1) |
| self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[-1], 1, 1]) |
|
|
| |
| self.parent.assertEqual(len(model.channels), 1) |
| self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]]) |
|
|
| def prepare_config_and_inputs_for_common(self): |
| config_and_inputs = self.prepare_config_and_inputs() |
| config, pixel_values, labels = config_and_inputs |
| inputs_dict = {"pixel_values": pixel_values} |
| return config, inputs_dict |
|
|
|
|
| @require_torch |
| class BitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): |
| """ |
| Here we also overwrite some of the tests of test_modeling_common.py, as Bit does not use input_ids, inputs_embeds, |
| attention_mask and seq_length. |
| """ |
|
|
| all_model_classes = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () |
| pipeline_model_mapping = ( |
| {"image-feature-extraction": BitModel, "image-classification": BitForImageClassification} |
| if is_torch_available() |
| else {} |
| ) |
|
|
| fx_compatible = False |
| test_pruning = False |
| test_resize_embeddings = False |
| test_head_masking = False |
| has_attentions = False |
| test_torch_exportable = True |
|
|
| def setUp(self): |
| self.model_tester = BitModelTester(self) |
| self.config_tester = ConfigTester( |
| self, config_class=BitConfig, has_text_modality=False, common_properties=["num_channels"] |
| ) |
|
|
| def test_config(self): |
| self.config_tester.run_common_tests() |
|
|
| @unittest.skip(reason="Bit does not output attentions") |
| def test_attention_outputs(self): |
| pass |
|
|
| @unittest.skip(reason="Bit does not use inputs_embeds") |
| def test_inputs_embeds(self): |
| pass |
|
|
| @unittest.skip(reason="Bit does not support input and output embeddings") |
| def test_model_get_set_embeddings(self): |
| pass |
|
|
| def test_model(self): |
| config_and_inputs = self.model_tester.prepare_config_and_inputs() |
| self.model_tester.create_and_check_model(*config_and_inputs) |
|
|
| def test_backbone(self): |
| config_and_inputs = self.model_tester.prepare_config_and_inputs() |
| self.model_tester.create_and_check_backbone(*config_and_inputs) |
|
|
| def test_initialization(self): |
| config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
|
|
| for model_class in self.all_model_classes: |
| model = model_class(config=config) |
| for name, module in model.named_modules(): |
| if isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)): |
| self.assertTrue( |
| torch.all(module.weight == 1), |
| msg=f"Parameter {name} of model {model_class} seems not properly initialized", |
| ) |
| self.assertTrue( |
| torch.all(module.bias == 0), |
| msg=f"Parameter {name} of model {model_class} seems not properly initialized", |
| ) |
|
|
| def test_hidden_states_output(self): |
| def check_hidden_states_output(inputs_dict, config, model_class): |
| model = model_class(config) |
| model.to(torch_device) |
| model.eval() |
|
|
| with torch.no_grad(): |
| outputs = model(**self._prepare_for_class(inputs_dict, model_class)) |
|
|
| hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states |
|
|
| expected_num_stages = self.model_tester.num_stages |
| self.assertEqual(len(hidden_states), expected_num_stages + 1) |
|
|
| |
| self.assertListEqual( |
| list(hidden_states[0].shape[-2:]), |
| [self.model_tester.image_size // 4, self.model_tester.image_size // 4], |
| ) |
|
|
| config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| layers_type = ["preactivation", "bottleneck"] |
| for model_class in self.all_model_classes: |
| for layer_type in layers_type: |
| config.layer_type = layer_type |
| inputs_dict["output_hidden_states"] = True |
| check_hidden_states_output(inputs_dict, config, model_class) |
|
|
| |
| del inputs_dict["output_hidden_states"] |
| config.output_hidden_states = True |
|
|
| check_hidden_states_output(inputs_dict, config, model_class) |
|
|
| @unittest.skip(reason="Bit does not use feedforward chunking") |
| def test_feed_forward_chunking(self): |
| pass |
|
|
| def test_for_image_classification(self): |
| config_and_inputs = self.model_tester.prepare_config_and_inputs() |
| self.model_tester.create_and_check_for_image_classification(*config_and_inputs) |
|
|
| @slow |
| def test_model_from_pretrained(self): |
| model_name = "google/bit-50" |
| model = BitModel.from_pretrained(model_name) |
| self.assertIsNotNone(model) |
|
|
|
|
| |
| def prepare_img(): |
| image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") |
| return image |
|
|
|
|
| @require_torch |
| @require_vision |
| class BitModelIntegrationTest(unittest.TestCase): |
| @cached_property |
| def default_image_processor(self): |
| return BitImageProcessor.from_pretrained("google/bit-50") if is_vision_available() else None |
|
|
| @slow |
| def test_inference_image_classification_head(self): |
| model = BitForImageClassification.from_pretrained("google/bit-50").to(torch_device) |
|
|
| image_processor = self.default_image_processor |
| image = prepare_img() |
| inputs = image_processor(images=image, return_tensors="pt").to(torch_device) |
|
|
| |
| with torch.no_grad(): |
| outputs = model(**inputs) |
|
|
| |
| expected_shape = torch.Size((1, 1000)) |
| self.assertEqual(outputs.logits.shape, expected_shape) |
|
|
| expected_slice = torch.tensor([[-0.6526, -0.5263, -1.4398]]).to(torch_device) |
|
|
| torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4) |
|
|
|
|
| @require_torch |
| class BitBackboneTest(BackboneTesterMixin, unittest.TestCase): |
| all_model_classes = (BitBackbone,) if is_torch_available() else () |
| config_class = BitConfig |
|
|
| has_attentions = False |
|
|
| def setUp(self): |
| self.model_tester = BitModelTester(self) |
|
|