getting error while running this on the GPU

#1
by Shashwath01 - opened

code -
from gradio_client import Client

client = Client("https://autotrain-projects-llm-merge-adapter.hf.space/--replicas/uu4v8/")
result = client.predict(
"HuggingFaceM4/idefics-9b-instruct", # str in 'Base Model (e.g. meta-llama/Llama-2-7b-chat-hf)' Textbox component
"Shashwath01/idefics-9b-instruct-IDEFIC_Medical_VQA", # str in 'Trained Adapter Model (e.g. username/autotrain-my-llama)' Textbox component
"**********************", # str in 'Hugging Face Write Token' Textbox component
fn_index=0
)
print(result)

error -

Loaded as API: https://autotrain-projects-llm-merge-adapter.hf.space/--replicas/uu4v8/

ValueError Traceback (most recent call last)
in <cell line: 4>()
2
3 client = Client("https://autotrain-projects-llm-merge-adapter.hf.space/--replicas/uu4v8/")
----> 4 result = client.predict(
5 "HuggingFaceM4/idefics-9b-instruct", # str in 'Base Model (e.g. meta-llama/Llama-2-7b-chat-hf)' Textbox component
6 "Shashwath01/idefics-9b-instruct-IDEFIC_Medical_VQA", # str in 'Trained Adapter Model (e.g. username/autotrain-my-llama)' Textbox component

6 frames
/usr/local/lib/python3.10/dist-packages/gradio_client/client.py in predict(self, api_name, fn_index, *args)
402 "Cannot call predict on this function as it may run forever. Use submit instead."
403 )
--> 404 return self.submit(*args, api_name=api_name, fn_index=fn_index).result()
405
406 def new_helper(self, fn_index: int) -> Communicator:

/usr/local/lib/python3.10/dist-packages/gradio_client/client.py in result(self, timeout)
1609 >> 9
1610 """
-> 1611 return super().result(timeout=timeout)
1612
1613 def outputs(self) -> list[tuple | Any]:

/usr/lib/python3.10/concurrent/futures/_base.py in result(self, timeout)
456 raise CancelledError()
457 elif self._state == FINISHED:
--> 458 return self.__get_result()
459 else:
460 raise TimeoutError()

/usr/lib/python3.10/concurrent/futures/_base.py in __get_result(self)
401 if self._exception:
402 try:
--> 403 raise self._exception
404 finally:
405 # Break a reference cycle with the exception in self._exception

/usr/lib/python3.10/concurrent/futures/thread.py in run(self)
56
57 try:
---> 58 result = self.fn(*self.args, **self.kwargs)
59 except BaseException as exc:
60 self.future.set_exception(exc)

/usr/local/lib/python3.10/dist-packages/gradio_client/client.py in _inner(*data)
1277 if self.client.upload_files:
1278 data = self.serialize(*data)
-> 1279 predictions = _predict(*data)
1280 predictions = self.process_predictions(*predictions)
1281 # Append final output only if not already present

/usr/local/lib/python3.10/dist-packages/gradio_client/client.py in _predict(*data)
1307 result = utils.synchronize_async(self._ws_fn, data, hash_data, helper)
1308 if "error" in result:
-> 1309 raise ValueError(result["error"])
1310 else:
1311 response = httpx.post(

ValueError: None

how to go about solving it,
I need to get the config.json to do inferencing on my fine-tuned model

Same. This autotrain thing is a pain in the ass.

Did you found a solution?

Yea , I used merge_unload function.You can have a look at
1.https://huggingface.co/Shashwath01/Idefic_medical_VQA_merged_4bit
2.https://github.com/Shashwathp/Idefic_medical_vqa/blob/main/merge_unload.ipynb (github repo for the same)

There's even inference.ipynb in the same repo

Sign up or log in to comment