| import datasets |
| from transformers import CLIPProcessor, CLIPModel |
| import torch |
| from PIL import Image |
|
|
| |
| dataset = datasets.load_dataset("metmuseum/openaccess") |
|
|
| |
| model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") |
| processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") |
|
|
| |
| |
| device = torch.device("mps" if torch.backends.mps.is_available() else "cpu") |
| model.to(device) |
|
|
| |
| embeddings_map = {} |
|
|
| |
| def create_embedding(image_pil): |
| try: |
| inputs = processor(images=image_pil, return_tensors="pt", padding=True).to(device) |
| with torch.no_grad(): |
| embeddings = model.get_image_features(**inputs) |
| return embeddings |
| except Exception as e: |
| print(f"Error processing image: {e}") |
| return None |
|
|
| |
| |
| |
| for item in dataset['train']: |
| object_id = item['Object ID'] |
| image_pil = item['jpg'] |
| if image_pil: |
| embedding = create_embedding(image_pil) |
| if embedding is not None: |
| embeddings_map[object_id] = embedding.cpu().numpy() |
|
|
| |
| |
| |
| |
| embedding_dataset = datasets.Dataset.from_dict({ |
| 'Object ID': list(embeddings_map.keys()), |
| 'Embedding': [embedding.tolist() for embedding in embeddings_map.values()][0] |
| }) |
|
|
| |
| embedding_dataset.save_to_disk('metmuseum_embeddings') |