--- license: apache-2.0 --- ### Korean Otter [Otter](https://huggingface.co/luodian/OTTER-9B-LA-InContext) 모델을 [KoLLaVA-Instruct-150K](https://huggingface.co/datasets/tabtoyou/KoLLaVA-Instruct-150k) 중 Complex resoning에 해당하는 77k 데이터셋으로 학습했습니다. Otter 이미지 [데모](https://github.com/Luodian/Otter)에서 한국어 질문을 어느정도 이해해 영어로 답변하는 것을 확인하고, 해당 모델을 그대로 가져와 한국어 데이터셋으로 학습이 되는지 테스트한 모델입니다. GPU memory 한계로 Otter의 LLM 부분에서 특정 레이어 이상(>25)만 1epoch 학습했습니다. 이 모델의 답변 퀄리티는 좋지 않지만, 더 많은 데이터셋으로 epoch을 늘려 학습한다면 더 좋은 결과를 얻을 수 있을 것으로 보입니다. 이러한 가능성을 확인했다는 것에 의미가 있다고 생각해 모델을 공유합니다. ``` python import mimetypes import os from io import BytesIO from typing import Union import cv2 import requests import torch import transformers from PIL import Image from torchvision.transforms import Compose, Resize, ToTensor from tqdm import tqdm import sys from otter.modeling_otter import OtterForConditionalGeneration # Disable warnings requests.packages.urllib3.disable_warnings() # ------------------- Utility Functions ------------------- def get_content_type(file_path): content_type, _ = mimetypes.guess_type(file_path) return content_type # ------------------- Image and Video Handling Functions ------------------- def get_image(url: str) -> Union[Image.Image, list]: if "://" not in url: # Local file content_type = get_content_type(url) else: # Remote URL content_type = requests.head(url, stream=True, verify=False).headers.get("Content-Type") if "image" in content_type: if "://" not in url: # Local file return Image.open(url) else: # Remote URL return Image.open(requests.get(url, stream=True, verify=False).raw) else: raise ValueError("Invalid content type. Expected image or video.") # ------------------- OTTER Prompt and Response Functions ------------------- def get_formatted_prompt(prompt: str, in_context_prompts: list = []) -> str: in_context_string = "" for in_context_prompt, in_context_answer in in_context_prompts: in_context_string += f"User: {in_context_prompt} GPT: {in_context_answer}<|endofchunk|>" return f"{in_context_string}User: {prompt} GPT:" def get_response(image_list, prompt: str, model=None, image_processor=None, in_context_prompts: list = []) -> str: input_data = image_list if isinstance(input_data, Image.Image): vision_x = image_processor.preprocess([input_data], return_tensors="pt")["pixel_values"].unsqueeze(1).unsqueeze(0) elif isinstance(input_data, list): # list of video frames vision_x = image_processor.preprocess(input_data, return_tensors="pt")["pixel_values"].unsqueeze(1).unsqueeze(0) else: raise ValueError("Invalid input data. Expected PIL Image or list of video frames.") lang_x = model.text_tokenizer( [ get_formatted_prompt(prompt, in_context_prompts), ], return_tensors="pt", ) bad_words_id = tokenizer(["User:", "GPT1:", "GFT:", "GPT:"], add_special_tokens=False).input_ids generated_text = model.generate( vision_x=vision_x.to(model.device), lang_x=lang_x["input_ids"].to(model.device), attention_mask=lang_x["attention_mask"].to(model.device), max_new_tokens=512, num_beams=3, no_repeat_ngram_size=3, bad_words_ids=bad_words_id, ) parsed_output = ( model.text_tokenizer.decode(generated_text[0]) .split("")[-1] .lstrip() .rstrip() .split("<|endofchunk|>")[0] .lstrip() .rstrip() .lstrip('"') .rstrip('"') ) return parsed_output # ------------------- Main Function ------------------- if __name__ == "__main__": model = OtterForConditionalGeneration.from_pretrained("tabtoyou/Ko-Otter-9B-LACR-v0", device_map="auto") model.text_tokenizer.padding_side = "left" tokenizer = model.text_tokenizer image_processor = transformers.CLIPImageProcessor() model.eval() while True: urls = [ "https://images.cocodataset.org/train2017/000000339543.jpg", "https://images.cocodataset.org/train2017/000000140285.jpg", ] encoded_frames_list = [] for url in urls: frames = get_image(url) encoded_frames_list.append(frames) in_context_prompts = [] in_context_examples = [ "이미지에 대해 묘사해주세요::한 가족이 설산 앞에서 사진을 찍고 있습니다.", ] for in_context_input in in_context_examples: in_context_prompt, in_context_answer = in_context_input.split("::") in_context_prompts.append((in_context_prompt.strip(), in_context_answer.strip())) # prompts_input = input("Enter the prompts separated by commas (or type 'quit' to exit): ") prompts_input = "이미지에 대해 묘사해주세요" prompts = [prompt.strip() for prompt in prompts_input.split(",")] for prompt in prompts: print(f"\nPrompt: {prompt}") response = get_response(encoded_frames_list, prompt, model, image_processor, in_context_prompts) print(f"Response: {response}") if prompts_input.lower() == "quit": break ```