| from typing import Dict, List, Any |
| import torch |
| from transformers import AutoTokenizer, AutoModelForCausalLM |
| import os |
|
|
| class EndpointHandler(): |
| def __init__(self, path=""): |
| |
| checkpoint_path = None |
| |
| if not path or path == "/repository": |
| base_path = "." |
| else: |
| base_path = path |
| |
| |
| possible_paths = [ |
| os.path.join(base_path, "checkpoint-100"), |
| os.path.join(".", "checkpoint-100"), |
| os.path.join("/repository", "checkpoint-100"), |
| "checkpoint-100" |
| ] |
| |
| for check_path in possible_paths: |
| if os.path.exists(check_path) and os.path.isdir(check_path): |
| |
| files = os.listdir(check_path) |
| if any(f in files for f in ['config.json', 'pytorch_model.bin', 'model.safetensors']): |
| checkpoint_path = check_path |
| break |
| |
| if checkpoint_path is None: |
| print(f"Available files in base path: {os.listdir(base_path) if os.path.exists(base_path) else 'Path does not exist'}") |
| raise ValueError("Could not find checkpoint-100 folder with model files") |
| |
| print(f"Loading model from: {checkpoint_path}") |
| print(f"Files in checkpoint: {os.listdir(checkpoint_path)}") |
| |
| |
| self.tokenizer = AutoTokenizer.from_pretrained(checkpoint_path, trust_remote_code=True) |
| self.model = AutoModelForCausalLM.from_pretrained( |
| checkpoint_path, |
| device_map="auto", |
| torch_dtype=torch.bfloat16, |
| trust_remote_code=True, |
| ) |
| |
| |
| if self.tokenizer.pad_token is None: |
| self.tokenizer.pad_token = self.tokenizer.eos_token |
|
|
| def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]: |
| """ |
| data args: |
| inputs (:str): a string to be generated from |
| parameters (:dict): generation parameters |
| Return: |
| A :obj:`list` | `dict`: will be serialized and returned |
| """ |
| |
| inputs = data.pop("inputs", data) |
| parameters = data.pop("parameters", {}) |
| |
| |
| if isinstance(inputs, str): |
| input_text = inputs |
| else: |
| input_text = str(inputs) |
| |
| |
| max_new_tokens = parameters.get("max_new_tokens", 1000) |
| temperature = parameters.get("temperature", 0.1) |
| do_sample = parameters.get("do_sample", True) |
| top_p = parameters.get("top_p", 0.9) |
| return_full_text = parameters.get("return_full_text", False) |
| |
| |
| input_ids = self.tokenizer( |
| input_text, |
| return_tensors="pt", |
| padding=True, |
| truncation=True, |
| max_length=2048 |
| ).to(self.model.device) |
| |
| |
| with torch.no_grad(): |
| generated_ids = self.model.generate( |
| **input_ids, |
| max_new_tokens=max_new_tokens, |
| temperature=temperature, |
| do_sample=do_sample, |
| top_p=top_p, |
| pad_token_id=self.tokenizer.pad_token_id, |
| eos_token_id=self.tokenizer.eos_token_id, |
| ) |
| |
| |
| if return_full_text: |
| generated_text = self.tokenizer.decode(generated_ids[0], skip_special_tokens=True) |
| else: |
| |
| new_tokens = generated_ids[0][input_ids["input_ids"].shape[1]:] |
| generated_text = self.tokenizer.decode(new_tokens, skip_special_tokens=True) |
| |
| return [{"generated_text": generated_text}] |