tonyassi commited on
Commit
389a29c
β€’
1 Parent(s): 439c8b6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -1
app.py CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
2
  import requests
3
  from PIL import Image
4
  from transformers import BlipProcessor, BlipForConditionalGeneration
 
5
 
6
  processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
7
  model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
@@ -15,7 +16,12 @@ def caption(img, min_len, max_len):
15
  return processor.decode(out[0], skip_special_tokens=True)
16
 
17
  def greet(img, min_len, max_len):
18
- return caption(img, min_len, max_len)
 
 
 
 
 
19
 
20
  iface = gr.Interface(fn=greet,
21
  title='Blip Image Captioning Large',
 
2
  import requests
3
  from PIL import Image
4
  from transformers import BlipProcessor, BlipForConditionalGeneration
5
+ import time
6
 
7
  processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
8
  model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
 
16
  return processor.decode(out[0], skip_special_tokens=True)
17
 
18
  def greet(img, min_len, max_len):
19
+ start = time.time()
20
+ result = caption(img, min_len, max_len)
21
+ end = time.time()
22
+ total_time = str(end - start)
23
+ result = result + '\n' + total_time + ' seconds'
24
+ return result
25
 
26
  iface = gr.Interface(fn=greet,
27
  title='Blip Image Captioning Large',