Daniel Marques commited on
Commit
1ae7653
1 Parent(s): 81fca18

feat: add backend

Browse files
Files changed (1) hide show
  1. main.py +33 -9
main.py CHANGED
@@ -32,9 +32,7 @@ class Predict(BaseModel):
32
  class Delete(BaseModel):
33
  filename: str
34
 
35
- class MyCustomSyncHandler(BaseCallbackHandler):
36
- def on_llm_new_token(self, token: str, **kwargs) -> None:
37
- print(f"token: {token}")
38
 
39
  # if torch.backends.mps.is_available():
40
  # DEVICE_TYPE = "mps"
@@ -57,7 +55,31 @@ DB = Chroma(
57
 
58
  RETRIEVER = DB.as_retriever()
59
 
60
- LLM = load_model(device_type=DEVICE_TYPE, model_id=MODEL_ID, model_basename=MODEL_BASENAME, stream=True, callbacks=[MyCustomSyncHandler()])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
  template = """you are a helpful, respectful and honest assistant. You should only use the source documents provided to answer the questions.
63
  You should only respond only topics that contains in documents use to training. Use the following pieces of context to answer the question at the end.
@@ -233,12 +255,14 @@ async def websocket_endpoint(websocket: WebSocket):
233
 
234
  await websocket.accept()
235
  while True:
236
- websocket_state = websocket;
237
 
238
- data = await websocket_state.receive_text()
239
 
240
- res = QA(data)
 
 
 
 
241
 
242
- print(f"${res}")
243
 
244
- await websocket_state.send_text(f"Message text was:")
 
32
  class Delete(BaseModel):
33
  filename: str
34
 
35
+
 
 
36
 
37
  # if torch.backends.mps.is_available():
38
  # DEVICE_TYPE = "mps"
 
55
 
56
  RETRIEVER = DB.as_retriever()
57
 
58
+
59
+ machine = Machine(lump)
60
+ solid = State('solid')
61
+ liquid = State('liquid')
62
+
63
+ class MyCustomSyncHandler(BaseCallbackHandler):
64
+ def __init__(self):
65
+ self.end = False
66
+
67
+ async def on_llm_start(
68
+ self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
69
+ ) -> None:
70
+ self.end = False
71
+
72
+ def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
73
+ self.end = True
74
+
75
+ def on_llm_new_token(self, token: str, **kwargs) -> Any:
76
+ return token
77
+
78
+
79
+ handlerToken = MyCustomSyncHandler()
80
+
81
+
82
+ LLM = load_model(device_type=DEVICE_TYPE, model_id=MODEL_ID, model_basename=MODEL_BASENAME, stream=True, callbacks=[handlerToken])
83
 
84
  template = """you are a helpful, respectful and honest assistant. You should only use the source documents provided to answer the questions.
85
  You should only respond only topics that contains in documents use to training. Use the following pieces of context to answer the question at the end.
 
255
 
256
  await websocket.accept()
257
  while True:
258
+ data = await websocket.receive_text()
259
 
260
+ QA(data)
261
 
262
+ finish = False
263
+ while finish == False:
264
+ finish = handlerToken.end
265
+ token = handlerToken.on_llm_new_token()
266
+ await websocket.send_text(f"result: {token}")
267
 
 
268