| import numpy as np |
| import torch |
| import torchvision.transforms as T |
| from PIL import Image |
| from torchvision.transforms.functional import InterpolationMode |
| from transformers import AutoModel, AutoTokenizer |
| import gradio as gr |
|
|
| |
| IMAGENET_MEAN = (0.485, 0.456, 0.406) |
| IMAGENET_STD = (0.229, 0.224, 0.225) |
|
|
| |
| def build_transform(input_size): |
| transform = T.Compose([ |
| T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), |
| T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC), |
| T.ToTensor(), |
| T.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD) |
| ]) |
| return transform |
|
|
| |
| def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False): |
| orig_width, orig_height = image.size |
| aspect_ratio = orig_width / orig_height |
| target_ratios = sorted( |
| set((i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if i * j <= max_num and i * j >= min_num), |
| key=lambda x: x[0] * x[1] |
| ) |
| target_aspect_ratio = target_ratios[0] |
| target_width = image_size * target_aspect_ratio[0] |
| target_height = image_size * target_aspect_ratio[1] |
| blocks = target_aspect_ratio[0] * target_aspect_ratio[1] |
| resized_img = image.resize((target_width, target_height)) |
| processed_images = [ |
| resized_img.crop(( |
| (i % (target_width // image_size)) * image_size, |
| (i // (target_width // image_size)) * image_size, |
| ((i % (target_width // image_size)) + 1) * image_size, |
| ((i // (target_width // image_size)) + 1) * image_size |
| )) |
| for i in range(blocks) |
| ] |
| if use_thumbnail and len(processed_images) != 1: |
| thumbnail_img = image.resize((image_size, image_size)) |
| processed_images.append(thumbnail_img) |
| return processed_images |
|
|
| |
| def load_image(image, input_size=448, max_num=12): |
| transform = build_transform(input_size=input_size) |
| images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num) |
| pixel_values = [transform(image) for image in images] |
| pixel_values = torch.stack(pixel_values) |
| return pixel_values |
|
|
| |
| path = 'OpenGVLab/InternVL2_5-78B' |
| model = AutoModel.from_pretrained( |
| path, |
| torch_dtype=torch.bfloat16, |
| trust_remote_code=True, |
| device_map="auto" |
| ) |
|
|
| tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False) |
|
|
| |
| def process_image(image): |
| try: |
| pixel_values = load_image(image, max_num=12).to(torch.bfloat16).cuda() |
| generation_config = dict(max_new_tokens=1024, do_sample=True) |
| question = '<image>\nExtract text from the image, respond with only the extracted text.' |
| response = model.chat(tokenizer, pixel_values, question, generation_config) |
| return response |
| except Exception as e: |
| return f"Error: {str(e)}" |
|
|
| |
| def chatbot(input_text, history=[]): |
| try: |
| generation_config = dict(max_new_tokens=1024, do_sample=True) |
| response, updated_history = model.chat(tokenizer, None, input_text, generation_config, history=history, return_history=True) |
| return response, updated_history |
| except Exception as e: |
| return f"Error: {str(e)}", history |
|
|
| |
| with gr.Blocks() as demo: |
| with gr.Tab("Image Processing"): |
| gr.Markdown("Upload an image and get detailed responses using the InternVL model.") |
| image_input = gr.Image(type="pil") |
| image_output = gr.Textbox(label="Response") |
| image_btn = gr.Button("Process") |
| image_btn.click(process_image, inputs=image_input, outputs=image_output) |
|
|
| with gr.Tab("Chatbot"): |
| gr.Markdown("Chat with the model.") |
| chatbot_input = gr.Textbox(label="Your Message") |
| chatbot_output = gr.Textbox(label="Response") |
| chatbot_history = gr.State([]) |
| chatbot_btn = gr.Button("Send") |
| chatbot_btn.click(chatbot, inputs=[chatbot_input, chatbot_history], outputs=[chatbot_output, chatbot_history]) |
|
|
| |
| if __name__ == "__main__": |
| demo.launch(server_name="0.0.0.0", server_port=7860) |
|
|