MaziyarPanahi commited on
Commit
29b9322
1 Parent(s): cf67b8a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -28,6 +28,7 @@ model = LlavaForConditionalGeneration.from_pretrained(
28
  )
29
 
30
  model.to("cuda:0")
 
31
 
32
  @spaces.GPU
33
  def bot_streaming(message, history):
@@ -68,8 +69,9 @@ def bot_streaming(message, history):
68
  yield generated_text_without_prompt
69
 
70
 
71
- demo = gr.ChatInterface(fn=bot_streaming, css=CSS, title="LLaVA Llama-3-8B", examples=[{"text": "What is on the flower?", "files":["./bee.jpg"]},
 
72
  {"text": "How to make this pastry?", "files":["./baklava.png"]}],
73
- description="Try [LLaVA Llama-3-8B](https://huggingface.co/xtuner/llava-llama-3-8b-v1_1-transformers). Upload an image and start chatting about it, or simply try one of the examples below. If you don't upload an image, you will receive an error.",
74
- stop_btn="Stop Generation", multimodal=True)
75
  demo.launch(debug=True)
 
28
  )
29
 
30
  model.to("cuda:0")
31
+ model.generation_config.eos_token_id = tokenizer.convert_tokens_to_ids("<|eot_id|>")
32
 
33
  @spaces.GPU
34
  def bot_streaming(message, history):
 
69
  yield generated_text_without_prompt
70
 
71
 
72
+ with gr.Blocks(css=CSS) as demo:
73
+ chatbot = gr.ChatInterface(fn=bot_streaming, title="LLaVA Llama-3-8B", examples=[{"text": "What is on the flower?", "files":["./bee.jpg"]},
74
  {"text": "How to make this pastry?", "files":["./baklava.png"]}],
75
+ description="Try [LLaVA Llama-3-8B](https://huggingface.co/xtuner/llava-llama-3-8b-v1_1-transformers). Upload an image and start chatting about it, or simply try one of the examples below. If you don't upload an image, you will receive an error.",
76
+ stop_btn="Stop Generation", multimodal=True)
77
  demo.launch(debug=True)