zesquirrelnator commited on
Commit
9243d24
1 Parent(s): 197f38e

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +3 -3
handler.py CHANGED
@@ -10,9 +10,9 @@ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
10
 
11
  class EndpointHandler():
12
  def __init__(self, path=""):
13
- self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
14
  self.model = BlipForConditionalGeneration.from_pretrained(
15
- "Salesforce/blip-image-captioning-large"
16
  ).to(device)
17
  self.model.eval()
18
 
@@ -23,7 +23,7 @@ class EndpointHandler():
23
  if not encoded_images:
24
  return {"captions": [], "error": "No images provided"}
25
 
26
- texts = input_data.get("texts", ["a photography of"] * len(encoded_images))
27
 
28
  try:
29
  raw_images = [Image.open(BytesIO(base64.b64decode(img))).convert("RGB") for img in encoded_images]
 
10
 
11
  class EndpointHandler():
12
  def __init__(self, path=""):
13
+ self.processor = BlipProcessor.from_pretrained("zesquirrelnator/idefics2-8b-docvqa-finetuned-tutorial")
14
  self.model = BlipForConditionalGeneration.from_pretrained(
15
+ "zesquirrelnator/idefics2-8b-docvqa-finetuned-tutorial"
16
  ).to(device)
17
  self.model.eval()
18
 
 
23
  if not encoded_images:
24
  return {"captions": [], "error": "No images provided"}
25
 
26
+ texts = input_data.get("texts", ["move to red ball"] * len(encoded_images))
27
 
28
  try:
29
  raw_images = [Image.open(BytesIO(base64.b64decode(img))).convert("RGB") for img in encoded_images]