| | import torch |
| | import transformers |
| | from typing import Any, Dict |
| | from transformers import AutoModelForCausalLM, AutoTokenizer |
| |
|
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| |
|
| | class EndpointHandler: |
| | def __init__(self, path=""): |
| | |
| | self.tokenizer = AutoTokenizer.from_pretrained(path) |
| | self.device = "cuda" if torch.cuda.is_available() else "cpu" |
| | self.model = AutoModelForCausalLM.from_pretrained(path, |
| | device_map="auto", |
| | torch_dtype=torch.float16, |
| | trust_remote_code=True) |
| |
|
| | def __call__(self, data: Dict[str, Any]) -> Dict[str, str]: |
| | |
| | inputs = data.pop("inputs", data) |
| | parameters = data.pop("parameters", {}) |
| | return_full_text = parameters.pop("return_full_text", True) |
| |
|
| | |
| | inputs = self.tokenizer(inputs, |
| | return_tensors="pt", |
| | return_token_type_ids=False) |
| | inputs = inputs.to(self.device) |
| | input_len = len(inputs[0]) |
| |
|
| | outputs = self.model.generate(**inputs, **parameters)[0] |
| |
|
| | if not return_full_text: |
| | outputs = outputs[input_len:] |
| |
|
| | |
| | prediction = self.tokenizer.decode(outputs, |
| | skip_special_tokens=True) |
| |
|
| | return [{"generated_text": prediction}] |
| |
|