| from typing import Dict, Any |
| from transformers import AutoProcessor, Qwen2VLForConditionalGeneration |
| from PIL import Image |
| import io |
| import base64 |
| import requests |
| import torch |
|
|
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
| class EndpointHandler(): |
| def __init__(self, path=""): |
| self.processor = AutoProcessor.from_pretrained(path) |
| self.model = Qwen2VLForConditionalGeneration.from_pretrained( |
| path, device_map="auto" |
| ) |
| self.model.to(device) |
|
|
| def __call__(self, data: Any) -> Dict[str, Any]: |
| inputs = data.pop("inputs", data) |
| image_input = inputs.get('image') |
| text_input = inputs.get('text', "Describe this image.") |
|
|
| if not image_input: |
| return {"error": "No image provided."} |
|
|
| try: |
| if image_input.startswith('http'): |
| response = requests.get(image_input, stream=True) |
| if response.status_code == 200: |
| image = Image.open(response.raw).convert('RGB') |
| else: |
| return {"error": f"Failed to fetch image. Status code: {response.status_code}"} |
| else: |
| image_data = base64.b64decode(image_input) |
| image = Image.open(io.BytesIO(image_data)).convert('RGB') |
| except Exception as e: |
| return {"error": f"Failed to process the image. Details: {str(e)}"} |
|
|
| try: |
| conversation = [ |
| { |
| "role": "user", |
| "content": [ |
| {"type": "image"}, |
| {"type": "text", "text": text_input}, |
| ], |
| } |
| ] |
|
|
| text_prompt = self.processor.apply_chat_template( |
| conversation, add_generation_prompt=True |
| ) |
|
|
| inputs = self.processor( |
| text=[text_prompt], |
| images=[image], |
| padding=True, |
| return_tensors="pt", |
| ) |
|
|
| inputs = inputs.to(device) |
|
|
| output_ids = self.model.generate( |
| **inputs, max_new_tokens=128 |
| ) |
|
|
| generated_ids = [ |
| output_id[len(input_id):] for input_id, output_id in zip(inputs.input_ids, output_ids) |
| ] |
|
|
| output_text = self.processor.batch_decode( |
| generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True |
| )[0] |
|
|
| return {"generated_text": output_text} |
|
|
| except Exception as e: |
| return {"error": f"Failed during generation. Details: {str(e)}"} |
|
|