mikeee commited on
Commit
e4f4dd2
0 Parent(s):

Duplicate from mikeee/mpt-30b-chat

Browse files
Files changed (5) hide show
  1. .gitattributes +35 -0
  2. .gitignore +4 -0
  3. README.md +14 -0
  4. app.py +487 -0
  5. requirements.txt +5 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ app-.py
2
+ __pycache__
3
+ flagged
4
+ models
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: mpt-30b-chat-ggml
3
+ emoji: 🔥
4
+ colorFrom: purple
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 3.35.2
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: mikeee/mpt-30b-chat
11
+ ---
12
+ NB: Need a CPU UPGRADE (32GB RAM) instance to run on a huggingface space or 19GB+ disk, 22GB+ RAM at a minimum
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,487 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Refer to https://github.com/abacaj/mpt-30B-inference/blob/main/download_model.py."""
2
+ # pylint: disable=invalid-name, missing-function-docstring, missing-class-docstring, redefined-outer-name, broad-except
3
+ import os
4
+ import time
5
+ from dataclasses import asdict, dataclass
6
+ from types import SimpleNamespace
7
+
8
+ import gradio as gr
9
+ from ctransformers import AutoConfig, AutoModelForCausalLM
10
+
11
+ from mcli import predict
12
+ from huggingface_hub import hf_hub_download
13
+ from loguru import logger
14
+
15
+ URL = os.getenv("URL", "")
16
+ MOSAICML_API_KEY = os.getenv("MOSAICML_API_KEY", "")
17
+ if URL is None:
18
+ raise ValueError("URL environment variable must be set")
19
+ if MOSAICML_API_KEY is None:
20
+ raise ValueError("git environment variable must be set")
21
+
22
+ ns = SimpleNamespace(response="")
23
+
24
+ def predict0(prompt, bot):
25
+ # logger.debug(f"{prompt=}, {bot=}, {timeout=}")
26
+ logger.debug(f"{prompt=}, {bot=}")
27
+ ns.response = ""
28
+ try:
29
+ user_prompt = prompt
30
+ generator = generate(llm, generation_config, system_prompt, user_prompt.strip())
31
+ print(assistant_prefix, end=" ", flush=True)
32
+
33
+ response = ""
34
+ buff.update(value="diggin...")
35
+ for word in generator:
36
+ print(word, end="", flush=True)
37
+ response += word
38
+ ns.response = response
39
+ buff.update(value=response)
40
+ print("")
41
+ logger.debug(f"{response=}")
42
+ except Exception as exc:
43
+ logger.error(exc)
44
+ response = f"{exc=}"
45
+ # bot = {"inputs": [response]}
46
+ bot = [(prompt, response)]
47
+
48
+ return prompt, bot
49
+
50
+ def predict_api(prompt):
51
+ logger.debug(f"{prompt=}")
52
+ ns.response = ""
53
+ try:
54
+ user_prompt = prompt
55
+ generator = generate(llm, generation_config, system_prompt, user_prompt.strip())
56
+ print(assistant_prefix, end=" ", flush=True)
57
+
58
+ response = ""
59
+ buff.update(value="diggin...")
60
+ for word in generator:
61
+ print(word, end="", flush=True)
62
+ response += word
63
+ ns.response = response
64
+ buff.update(value=response)
65
+ print("")
66
+ logger.debug(f"{response=}")
67
+ except Exception as exc:
68
+ logger.error(exc)
69
+ response = f"{exc=}"
70
+ # bot = {"inputs": [response]}
71
+ # bot = [(prompt, response)]
72
+
73
+ return response
74
+
75
+ def download_mpt_quant(destination_folder: str, repo_id: str, model_filename: str):
76
+ local_path = os.path.abspath(destination_folder)
77
+ return hf_hub_download(
78
+ repo_id=repo_id,
79
+ filename=model_filename,
80
+ local_dir=local_path,
81
+ local_dir_use_symlinks=True,
82
+ )
83
+
84
+
85
+ @dataclass
86
+ class GenerationConfig:
87
+ temperature: float
88
+ top_k: int
89
+ top_p: float
90
+ repetition_penalty: float
91
+ max_new_tokens: int
92
+ seed: int
93
+ reset: bool
94
+ stream: bool
95
+ threads: int
96
+ stop: list[str]
97
+
98
+
99
+ def format_prompt(system_prompt: str, user_prompt: str):
100
+ """format prompt based on: https://huggingface.co/spaces/mosaicml/mpt-30b-chat/blob/main/app.py"""
101
+
102
+ system_prompt = f"<|im_start|>system\n{system_prompt}<|im_end|>\n"
103
+ user_prompt = f"<|im_start|>user\n{user_prompt}<|im_end|>\n"
104
+ assistant_prompt = "<|im_start|>assistant\n"
105
+
106
+ return f"{system_prompt}{user_prompt}{assistant_prompt}"
107
+
108
+
109
+ def generate(
110
+ llm: AutoModelForCausalLM,
111
+ generation_config: GenerationConfig,
112
+ system_prompt: str,
113
+ user_prompt: str,
114
+ ):
115
+ """run model inference, will return a Generator if streaming is true"""
116
+
117
+ return llm(
118
+ format_prompt(
119
+ system_prompt,
120
+ user_prompt,
121
+ ),
122
+ **asdict(generation_config),
123
+ )
124
+
125
+
126
+ class Chat:
127
+ default_system_prompt = "A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers."
128
+ system_format = "<|im_start|>system\n{}<|im_end|>\n"
129
+
130
+ def __init__(
131
+ self, system: str = None, user: str = None, assistant: str = None
132
+ ) -> None:
133
+ if system is not None:
134
+ self.set_system_prompt(system)
135
+ else:
136
+ self.reset_system_prompt()
137
+ self.user = user if user else "<|im_start|>user\n{}<|im_end|>\n"
138
+ self.assistant = (
139
+ assistant if assistant else "<|im_start|>assistant\n{}<|im_end|>\n"
140
+ )
141
+ self.response_prefix = self.assistant.split("{}", maxsplit=1)[0]
142
+
143
+ def set_system_prompt(self, system_prompt):
144
+ # self.system = self.system_format.format(system_prompt)
145
+ return system_prompt
146
+
147
+ def reset_system_prompt(self):
148
+ return self.set_system_prompt(self.default_system_prompt)
149
+
150
+ def history_as_formatted_str(self, system, history) -> str:
151
+ system = self.system_format.format(system)
152
+ text = system + "".join(
153
+ [
154
+ "\n".join(
155
+ [
156
+ self.user.format(item[0]),
157
+ self.assistant.format(item[1]),
158
+ ]
159
+ )
160
+ for item in history[:-1]
161
+ ]
162
+ )
163
+ text += self.user.format(history[-1][0])
164
+ text += self.response_prefix
165
+ # stopgap solution to too long sequences
166
+ if len(text) > 4500:
167
+ # delete from the middle between <|im_start|> and <|im_end|>
168
+ # find the middle ones, then expand out
169
+ start = text.find("<|im_start|>", 139)
170
+ end = text.find("<|im_end|>", 139)
171
+ while end < len(text) and len(text) > 4500:
172
+ end = text.find("<|im_end|>", end + 1)
173
+ text = text[:start] + text[end + 1 :]
174
+ if len(text) > 4500:
175
+ # the nice way didn't work, just truncate
176
+ # deleting the beginning
177
+ text = text[-4500:]
178
+
179
+ return text
180
+
181
+ def clear_history(self, history):
182
+ return []
183
+
184
+ def turn(self, user_input: str):
185
+ self.user_turn(user_input)
186
+ return self.bot_turn()
187
+
188
+ def user_turn(self, user_input: str, history):
189
+ history.append([user_input, ""])
190
+ return user_input, history
191
+
192
+ def bot_turn(self, system, history):
193
+ conversation = self.history_as_formatted_str(system, history)
194
+ assistant_response = call_inf_server(conversation)
195
+ history[-1][-1] = assistant_response
196
+ print(system)
197
+ print(history)
198
+ return "", history
199
+
200
+
201
+ def call_inf_server(prompt):
202
+ try:
203
+ response = predict(
204
+ URL,
205
+ {"inputs": [prompt], "temperature": 0.2, "top_p": 0.9, "output_len": 512},
206
+ timeout=70,
207
+ )
208
+ # print(f'prompt: {prompt}')
209
+ # print(f'len(prompt): {len(prompt)}')
210
+ response = response["outputs"][0]
211
+ # print(f'len(response): {len(response)}')
212
+ # remove spl tokens from prompt
213
+ spl_tokens = ["<|im_start|>", "<|im_end|>"]
214
+ clean_prompt = prompt.replace(spl_tokens[0], "").replace(spl_tokens[1], "")
215
+
216
+ # return response[len(clean_prompt) :] # remove the prompt
217
+ try:
218
+ user_prompt = prompt
219
+ generator = generate(llm, generation_config, system_prompt, user_prompt.strip())
220
+ print(assistant_prefix, end=" ", flush=True)
221
+ for word in generator:
222
+ print(word, end="", flush=True)
223
+ print("")
224
+ response = word
225
+ except Exception as exc:
226
+ logger.error(exc)
227
+ response = f"{exc=}"
228
+ return response
229
+
230
+ except Exception as e:
231
+ # assume it is our error
232
+ # just wait and try one more time
233
+ print(e)
234
+ time.sleep(1)
235
+ response = predict(
236
+ URL,
237
+ {"inputs": [prompt], "temperature": 0.2, "top_p": 0.9, "output_len": 512},
238
+ timeout=70,
239
+ )
240
+ # print(response)
241
+ response = response["outputs"][0]
242
+ return response[len(prompt) :] # remove the prompt
243
+
244
+
245
+ logger.info("start dl")
246
+ _ = """full url: https://huggingface.co/TheBloke/mpt-30B-chat-GGML/blob/main/mpt-30b-chat.ggmlv0.q4_1.bin"""
247
+
248
+ repo_id = "TheBloke/mpt-30B-chat-GGML"
249
+
250
+ # https://huggingface.co/TheBloke/mpt-30B-chat-GGML
251
+ _ = """
252
+ mpt-30b-chat.ggmlv0.q4_0.bin q4_0 4 16.85 GB 19.35 GB 4-bit.
253
+ mpt-30b-chat.ggmlv0.q4_1.bin q4_1 4 18.73 GB 21.23 GB 4-bit. Higher accuracy than q4_0 but not as high as q5_0. However has quicker inference than q5 models.
254
+ """
255
+ model_filename = "mpt-30b-chat.ggmlv0.q4_1.bin"
256
+ destination_folder = "models"
257
+
258
+ download_mpt_quant(destination_folder, repo_id, model_filename)
259
+
260
+ logger.info("done dl")
261
+
262
+ config = AutoConfig.from_pretrained("mosaicml/mpt-30b-chat", context_length=8192)
263
+ llm = AutoModelForCausalLM.from_pretrained(
264
+ os.path.abspath("models/mpt-30b-chat.ggmlv0.q4_1.bin"),
265
+ model_type="mpt",
266
+ config=config,
267
+ )
268
+
269
+ system_prompt = "A conversation between a user and an LLM-based AI assistant named Local Assistant. Local Assistant gives helpful and honest answers."
270
+
271
+ generation_config = GenerationConfig(
272
+ temperature=0.2,
273
+ top_k=0,
274
+ top_p=0.9,
275
+ repetition_penalty=1.0,
276
+ max_new_tokens=512, # adjust as needed
277
+ seed=42,
278
+ reset=False, # reset history (cache)
279
+ stream=True, # streaming per word/token
280
+ threads=int(os.cpu_count() / 2), # adjust for your CPU
281
+ stop=["<|im_end|>", "|<"],
282
+ )
283
+
284
+ user_prefix = "[user]: "
285
+ assistant_prefix = "[assistant]: "
286
+
287
+
288
+ css = """
289
+ .importantButton {
290
+ background: linear-gradient(45deg, #7e0570,#5d1c99, #6e00ff) !important;
291
+ border: none !important;
292
+ }
293
+ .importantButton:hover {
294
+ background: linear-gradient(45deg, #ff00e0,#8500ff, #6e00ff) !important;
295
+ border: none !important;
296
+ }
297
+ .disclaimer {font-variant-caps: all-small-caps; font-size: xx-small;}
298
+ .xsmall {font-size: x-small;}
299
+ """
300
+
301
+ with gr.Blocks(
302
+ title="mpt-30b-chat-ggml",
303
+ theme=gr.themes.Soft(text_size="sm"),
304
+ css=css,
305
+ ) as block:
306
+ with gr.Accordion("🎈 Info", open=False):
307
+ gr.HTML(
308
+ """<center><a href="https://huggingface.co/spaces/mikeee/mpt-30b-chat?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate"></a> and spin a CPU UPGRADE to avoid the queue</center>"""
309
+ )
310
+ gr.Markdown(
311
+ """<h4><center>mpt-30b-chat-ggml</center></h4>
312
+
313
+ This demo is of [TheBloke/mpt-30B-chat-GGML](https://huggingface.co/TheBloke/mpt-30B-chat-GGML).
314
+
315
+ It takes about >40 seconds to get a response. Restarting the space takes about 5 minutes if the space is asleep due to inactivity. If the space crashes for some reason, it will also take about 5 minutes to restart. You need to refresh the browser to reload the new space.
316
+ """,
317
+ elem_classes="xsmall"
318
+ )
319
+ conversation = Chat()
320
+ chatbot = gr.Chatbot().style(height=700) # 500
321
+ buff = gr.Textbox(show_label=False)
322
+ with gr.Row():
323
+ with gr.Column():
324
+ msg = gr.Textbox(
325
+ label="Chat Message Box",
326
+ placeholder="Ask me anything (press Enter or click Submit to send)",
327
+ show_label=False,
328
+ ).style(container=False)
329
+ with gr.Column():
330
+ with gr.Row():
331
+ submit = gr.Button("Submit", elem_classes="xsmall")
332
+ stop = gr.Button("Stop", visible=False)
333
+ clear = gr.Button("Clear", visible=False)
334
+ with gr.Row(visible=False):
335
+ with gr.Accordion("Advanced Options:", open=False):
336
+ with gr.Row():
337
+ with gr.Column(scale=2):
338
+ system = gr.Textbox(
339
+ label="System Prompt",
340
+ value=Chat.default_system_prompt,
341
+ show_label=False,
342
+ ).style(container=False)
343
+ with gr.Column():
344
+ with gr.Row():
345
+ change = gr.Button("Change System Prompt")
346
+ reset = gr.Button("Reset System Prompt")
347
+
348
+ with gr.Accordion("Example inputs", open=True):
349
+ etext = """In America, where cars are an important part of the national psyche, a decade ago people had suddenly started to drive less, which had not happened since the oil shocks of the 1970s. """
350
+ examples = gr.Examples(
351
+ examples=[
352
+ ["Explain the plot of Cinderella in a sentence."],
353
+ [
354
+ "How long does it take to become proficient in French, and what are the best methods for retaining information?"
355
+ ],
356
+ ["What are some common mistakes to avoid when writing code?"],
357
+ ["Build a prompt to generate a beautiful portrait of a horse"],
358
+ ["Suggest four metaphors to describe the benefits of AI"],
359
+ ["Write a pop song about leaving home for the sandy beaches."],
360
+ ["Write a summary demonstrating my ability to tame lions"],
361
+ ["鲁迅和周树人什么关系"],
362
+ ["从前有一头牛,这头牛后面有什么?"],
363
+ ["正无穷大加一大于正无穷大吗?"],
364
+ ["正无穷大加正无穷大大于正无穷大吗?"],
365
+ ["-2的平方根等于什么"],
366
+ ["树上有5只鸟,猎人开枪打死了一只。树上还有几只鸟?"],
367
+ ["树上有11只鸟,猎人开枪打死了一只。树上还有几只鸟?提示:需考虑鸟可能受惊吓飞走。"],
368
+ ["鲁迅和周树人什么关系 用英文回答"],
369
+ ["以红楼梦的行文风格写一张委婉的请假条。不少于320字。"],
370
+ [f"{etext} 翻成中文,列出3个版本"],
371
+ [f"{etext} \n 翻成中文,保留原意,但使用文学性的语言。不要写解释。列出3个版本"],
372
+ ["js 判断一个数是不是质数"],
373
+ ["js 实现python 的 range(10)"],
374
+ ["js 实现python 的 [*(range(10)]"],
375
+ ["假定 1 + 2 = 4, 试求 7 + 8"],
376
+ ["Erkläre die Handlung von Cinderella in einem Satz."],
377
+ ["Erkläre die Handlung von Cinderella in einem Satz. Auf Deutsch"],
378
+ ],
379
+ inputs=[msg],
380
+ examples_per_page=30,
381
+ )
382
+
383
+ # with gr.Row():
384
+ with gr.Accordion("Disclaimer", open=False):
385
+ gr.Markdown(
386
+ "Disclaimer: MPT-30B can produce factually incorrect output, and should not be relied on to produce "
387
+ "factually accurate information. MPT-30B was trained on various public datasets; while great efforts "
388
+ "have been taken to clean the pretraining data, it is possible that this model could generate lewd, "
389
+ "biased, or otherwise offensive outputs.",
390
+ elem_classes=["disclaimer"],
391
+ )
392
+ with gr.Row(visible=False):
393
+ gr.Markdown(
394
+ "[Privacy policy](https://gist.github.com/samhavens/c29c68cdcd420a9aa0202d0839876dac)",
395
+ elem_classes=["disclaimer"],
396
+ )
397
+
398
+ _ = """
399
+ submit_event = msg.submit(
400
+ fn=conversation.user_turn,
401
+ inputs=[msg, chatbot],
402
+ outputs=[msg, chatbot],
403
+ queue=False,
404
+ ).then(
405
+ fn=conversation.bot_turn,
406
+ inputs=[system, chatbot],
407
+ outputs=[msg, chatbot],
408
+ queue=True,
409
+ )
410
+ submit_click_event = submit.click(
411
+ fn=conversation.user_turn,
412
+ inputs=[msg, chatbot],
413
+ outputs=[msg, chatbot],
414
+ queue=False,
415
+ ).then(
416
+ # fn=conversation.bot_turn,
417
+ inputs=[system, chatbot],
418
+ outputs=[msg, chatbot],
419
+ queue=True,
420
+ )
421
+
422
+ stop.click(
423
+ fn=None,
424
+ inputs=None,
425
+ outputs=None,
426
+ cancels=[submit_event, submit_click_event],
427
+ queue=False,
428
+ )
429
+ clear.click(lambda: None, None, chatbot, queue=False).then(
430
+ fn=conversation.clear_history,
431
+ inputs=[chatbot],
432
+ outputs=[chatbot],
433
+ queue=False,
434
+ )
435
+ change.click(
436
+ fn=conversation.set_system_prompt,
437
+ inputs=[system],
438
+ outputs=[system],
439
+ queue=False,
440
+ )
441
+ reset.click(
442
+ fn=conversation.reset_system_prompt,
443
+ inputs=[],
444
+ outputs=[system],
445
+ queue=False,
446
+ )
447
+ # """
448
+
449
+ msg.submit(
450
+ # fn=conversation.user_turn,
451
+ fn=predict0,
452
+ inputs=[msg, chatbot],
453
+ outputs=[msg, chatbot],
454
+ queue=True,
455
+ show_progress="full",
456
+ api_name="predict"
457
+ )
458
+ submit.click(
459
+ # fn=conversation.user_turn,
460
+ fn=predict0,
461
+ inputs=[msg, chatbot],
462
+ outputs=[msg, chatbot],
463
+ queue=True,
464
+ show_progress="full",
465
+ )
466
+
467
+ # update buff Textbox, every: units in seconds)
468
+ # https://huggingface.co/spaces/julien-c/nvidia-smi/discussions
469
+ # does not work
470
+ # AttributeError: 'Blocks' object has no attribute 'run_forever'
471
+ # block.run_forever(lambda: ns.response, None, [buff], every=1)
472
+
473
+ with gr.Accordion("For Chat/Translation API", open=False, visible=False):
474
+ input_text = gr.Text()
475
+ api_btn = gr.Button("Go", variant="primary")
476
+ out_text = gr.Text()
477
+ api_btn.click(
478
+ predict_api,
479
+ input_text,
480
+ out_text,
481
+ # show_progress="full",
482
+ api_name="api",
483
+ )
484
+
485
+ # concurrency_count=5, max_size=20
486
+ # max_size=36, concurrency_count=14
487
+ block.queue(concurrency_count=5, max_size=20).launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ mosaicml-cli
2
+ ctransformers==0.2.10
3
+ transformers==4.30.2
4
+ huggingface_hub
5
+ loguru