| import argparse |
| import torch |
|
|
| from videollama2.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, NUM_FRAMES |
| from videollama2.conversation import conv_templates, SeparatorStyle |
| from videollama2.model.builder import load_pretrained_model |
| from videollama2.utils import disable_torch_init |
| from videollama2.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path, tokenizer_MMODAL_token |
|
|
| from PIL import Image |
| from decord import VideoReader, cpu |
|
|
| import requests |
| from io import BytesIO |
| from transformers import TextStreamer |
|
|
|
|
| def load_image(image_file): |
| if image_file.startswith('http://') or image_file.startswith('https://'): |
| response = requests.get(image_file) |
| image = Image.open(BytesIO(response.content)).convert('RGB') |
| else: |
| image = Image.open(image_file).convert('RGB') |
| return image |
|
|
| def load_video(video_file): |
| decord_vr = VideoReader(uri=video_file, ctx=cpu(0)) |
| duration = len(decord_vr) |
| frame_id_list = np.linspace(0, duration-1, NUM_FRAMES, dtype=int) |
| video = decord_vr.get_batch(frame_id_list) |
| return video |
|
|
| def load_image_or_video(image_or_video_file): |
| if file_path.endswith(('.jpg', '.jpeg', '.png', '.bmp')): |
| return load_image(image_file=image_or_video_file) |
| elif file_path.endswith(('.mp4', '.avi', '.mov')): |
| return load_video(video_file=image_or_video_file) |
| else: |
| raise Exception(f"File type of {image_or_video_file} not supported!!!") |
|
|
|
|
| def main(args): |
| |
| disable_torch_init() |
|
|
| model_name = get_model_name_from_path(args.model_path) |
| tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit, device=args.device) |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| conv_mode = "llava_v1" |
|
|
| if args.conv_mode is not None and conv_mode != args.conv_mode: |
| print('[WARNING] the auto inferred conversation mode is {}, while `--conv-mode` is {}, using {}'.format(conv_mode, args.conv_mode, args.conv_mode)) |
| else: |
| args.conv_mode = conv_mode |
|
|
| conv = conv_templates[args.conv_mode].copy() |
| roles = conv.roles |
|
|
| image = load_image(args.image_file) |
| image_size = image.size |
| |
| image_tensor = process_images([image], image_processor, model.config) |
| if type(image_tensor) is list: |
| image_tensor = [image.to(model.device, dtype=torch.float16) for image in image_tensor] |
| else: |
| image_tensor = image_tensor.to(model.device, dtype=torch.float16) |
|
|
| while True: |
| try: |
| inp = input(f"{roles[0]}: ") |
| except EOFError: |
| inp = "" |
| if not inp: |
| print("exit...") |
| break |
|
|
| print(f"{roles[1]}: ", end="") |
|
|
| if image is not None: |
| |
| if model.config.mm_use_im_start_end: |
| inp = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + inp |
| else: |
| inp = DEFAULT_IMAGE_TOKEN + '\n' + inp |
| conv.append_message(conv.roles[0], inp) |
| image = None |
| else: |
| |
| conv.append_message(conv.roles[0], inp) |
| conv.append_message(conv.roles[1], None) |
| prompt = conv.get_prompt() |
|
|
| input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(model.device) |
| stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2 |
| keywords = [stop_str] |
| streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) |
|
|
| with torch.inference_mode(): |
| output_ids = model.generate( |
| input_ids, |
| images=image_tensor, |
| image_sizes=[image_size], |
| do_sample=True if args.temperature > 0 else False, |
| temperature=args.temperature, |
| max_new_tokens=args.max_new_tokens, |
| streamer=streamer, |
| use_cache=True) |
|
|
| outputs = tokenizer.decode(output_ids[0, input_ids.shape[1]:]).strip() |
| conv.messages[-1][-1] = outputs |
|
|
| if args.debug: |
| print("\n", {"prompt": prompt, "outputs": outputs}, "\n") |
|
|
|
|
| if __name__ == "__main__": |
| parser = argparse.ArgumentParser() |
| parser.add_argument("--model-path", type=str, default="facebook/opt-350m") |
| parser.add_argument("--model-base", type=str, default=None) |
| parser.add_argument("--image-file", type=str, required=True) |
| parser.add_argument("--device", type=str, default="cuda") |
| parser.add_argument("--conv-mode", type=str, default=None) |
| parser.add_argument("--temperature", type=float, default=0.2) |
| parser.add_argument("--max-new-tokens", type=int, default=512) |
| parser.add_argument("--load-8bit", action="store_true") |
| parser.add_argument("--load-4bit", action="store_true") |
| parser.add_argument("--debug", action="store_true") |
| args = parser.parse_args() |
| main(args) |
|
|