| |
| from typing import Dict, List, Any |
| from PIL import Image |
| import torch |
| import os |
| from io import BytesIO |
| from transformers import BlipForConditionalGeneration, BlipProcessor |
| |
|
|
| device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
|
|
| class EndpointHandler(): |
| def __init__(self, path=""): |
| |
| |
| self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") |
| self.model = BlipForConditionalGeneration.from_pretrained( |
| "Salesforce/blip-image-captioning-base" |
| ).to(device) |
| self.model.eval() |
| self.model = self.model.to(device) |
| |
|
|
|
|
| def __call__(self, data: Any) -> Dict[str, Any]: |
| """ |
| Args: |
| data (:obj:): |
| includes the input data and the parameters for the inference. |
| Return: |
| A :obj:`dict`:. The object returned should be a dict of one list like {"captions": ["A hugging face at the office"]} containing : |
| - "caption": A string corresponding to the generated caption. |
| """ |
| inputs = data.pop("inputs", data) |
| parameters = data.pop("parameters", {}) |
| |
| processed_image = self.processor(images=inputs, return_tensors="pt") |
| processed_image["pixel_values"] = processed_image["pixel_values"].to(device) |
| processed_image = {**processed_image, **parameters} |
| |
| with torch.no_grad(): |
| out = self.model.generate( |
| **processed_image |
| ) |
| captions = self.processor.batch_decode(out, skip_special_tokens=True) |
| |
| return {"captions": captions} |
|
|