import torch from llava.constants import X_TOKEN_INDEX from llava.conversation import conv_templates, SeparatorStyle from llava.mm_utils import get_model_name_from_path, KeywordsStoppingCriteria, tokenizer_X_token from llava.model.builder import load_pretrained_model from llava.utils import disable_torch_init title_markdown = ("""
LanguageBind🚀

Video-LLaVA: Improved LLaVA with United Visual Representation

If you like our project, please give us a star ✨ on Github for the latest update.
""") block_css = """ #buttons button { min-width: min(120px,100%); } """ tos_markdown = (""" ### Terms of use By using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research. Please click the "Flag" button if you get any inappropriate answer! We will collect those to keep improving our moderator. For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality. """) learn_more_markdown = (""" ### License The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of LLaMA, [Terms of Use](https://openai.com/policies/terms-of-use) of the data generated by OpenAI, and [Privacy Practices](https://chrome.google.com/webstore/detail/sharegpt-share-your-chatg/daiacboceoaocpibfodeljbdfacokfjb) of ShareGPT. Please contact us if you find any potential violation. """) class Chat: def __init__(self, model_path, conv_mode, model_base=None, load_8bit=False, load_4bit=False, device='cuda'): disable_torch_init() model_name = get_model_name_from_path(model_path) self.tokenizer, self.model, processor, context_len = load_pretrained_model(model_path, model_base, model_name, load_8bit, load_4bit, device=device) self.image_processor = processor['image'] self.video_processor = processor['video'] self.conv_mode = conv_mode self.device = self.model.device print(self.model) def get_prompt(self, qs, state): state.append_message(state.roles[0], qs) state.append_message(state.roles[1], None) return state @torch.inference_mode() def generate(self, images_tensor: list, prompt: str, first_run: bool, state): tokenizer, model, image_processor = self.tokenizer, self.model, self.image_processor state = self.get_prompt(prompt, state) prompt = state.get_prompt() print('\n\n\n') print(prompt) if 'image' in images_tensor[1] and 'video' not in images_tensor[1]: input_ids = tokenizer_X_token(prompt, tokenizer, X_TOKEN_INDEX['IMAGE'], return_tensors='pt').unsqueeze(0).to(self.device) elif 'image' not in images_tensor[1] and 'video' in images_tensor[1]: input_ids = tokenizer_X_token(prompt, tokenizer, X_TOKEN_INDEX['VIDEO'], return_tensors='pt').unsqueeze(0).to(self.device) elif 'image' in images_tensor[1] and 'video' in images_tensor[1]: #