Spaces:
Dao3
/
Runtime error

Dao3 hanyullai commited on
Commit
6343115
0 Parent(s):

Duplicate from THUDM/GLM-130B

Browse files

Co-authored-by: Hanyu Lai <hanyullai@users.noreply.huggingface.co>

Files changed (3) hide show
  1. .gitattributes +31 -0
  2. README.md +14 -0
  3. app.py +138 -0
.gitattributes ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ftz filter=lfs diff=lfs merge=lfs -text
6
+ *.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.h5 filter=lfs diff=lfs merge=lfs -text
8
+ *.joblib filter=lfs diff=lfs merge=lfs -text
9
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
+ *.model filter=lfs diff=lfs merge=lfs -text
11
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
12
+ *.npy filter=lfs diff=lfs merge=lfs -text
13
+ *.npz filter=lfs diff=lfs merge=lfs -text
14
+ *.onnx filter=lfs diff=lfs merge=lfs -text
15
+ *.ot filter=lfs diff=lfs merge=lfs -text
16
+ *.parquet filter=lfs diff=lfs merge=lfs -text
17
+ *.pickle filter=lfs diff=lfs merge=lfs -text
18
+ *.pkl filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pt filter=lfs diff=lfs merge=lfs -text
21
+ *.pth filter=lfs diff=lfs merge=lfs -text
22
+ *.rar filter=lfs diff=lfs merge=lfs -text
23
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
24
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
25
+ *.tflite filter=lfs diff=lfs merge=lfs -text
26
+ *.tgz filter=lfs diff=lfs merge=lfs -text
27
+ *.wasm filter=lfs diff=lfs merge=lfs -text
28
+ *.xz filter=lfs diff=lfs merge=lfs -text
29
+ *.zip filter=lfs diff=lfs merge=lfs -text
30
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
31
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: GLM 130B
3
+ emoji: 📚
4
+ colorFrom: yellow
5
+ colorTo: pink
6
+ sdk: gradio
7
+ sdk_version: 3.1.3
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ duplicated_from: THUDM/GLM-130B
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+
4
+ import json
5
+ import os
6
+
7
+
8
+ APIKEY = os.environ.get("APIKEY")
9
+ APISECRET = os.environ.get("APISECRET")
10
+
11
+ def predict(text, seed, out_seq_length, min_gen_length, sampling_strategy,
12
+ num_beams, length_penalty, no_repeat_ngram_size,
13
+ temperature, topk, topp):
14
+ global APIKEY
15
+ global APISECRET
16
+
17
+ if text == '':
18
+ return 'Input should not be empty!'
19
+
20
+ url = 'https://tianqi.aminer.cn/api/v2/completions_130B'
21
+
22
+ payload = json.dumps({
23
+ "apikey": APIKEY,
24
+ "apisecret": APISECRET ,
25
+ "model_name": "glm-130b-v1",
26
+ "prompt": text,
27
+ "length_penalty": length_penalty,
28
+ "temperature": temperature,
29
+ "top_k": topk,
30
+ "top_p": topp,
31
+ "min_gen_length": min_gen_length,
32
+ "sampling_strategy": sampling_strategy,
33
+ "num_beams": num_beams,
34
+ "max_tokens": out_seq_length,
35
+ "no_repeat_ngram": no_repeat_ngram_size,
36
+ "quantization": "int4",
37
+ "seed": seed
38
+ })
39
+
40
+ headers = {
41
+ 'Content-Type': 'application/json'
42
+ }
43
+
44
+ try:
45
+ response = requests.request("POST", url, headers=headers, data=payload, timeout=(20, 100)).json()
46
+ except Exception as e:
47
+ return 'Timeout! Please wait a few minutes and retry'
48
+
49
+ if response['status'] == 1:
50
+ return response['message']['errmsg']
51
+
52
+ answer = response['result']['output']['raw']
53
+ if isinstance(answer, list):
54
+ answer = answer[0]
55
+
56
+ answer = answer.replace('[</s>]', '')
57
+
58
+ return answer
59
+
60
+
61
+ if __name__ == "__main__":
62
+
63
+ en_fil = ['The Starry Night is an oil-on-canvas painting by [MASK] in June 1889.']
64
+ en_gen = ['Question: What\'s the best winter resort city? User: A 10-year professional traveler. Answer: [gMASK]'] #['Eight planets in solar system are [gMASK]']
65
+ ch_fil = ['凯旋门位于意大利米兰市古城堡旁。1807年为纪念[MASK]而建,门高25米,顶上矗立两武士青铜古兵车铸像。']
66
+ ch_gen = ['三亚位于海南岛的最南端,是[gMASK]']
67
+ en_to_ch = ['Pencil in Chinese is [MASK].']
68
+ ch_to_en = ['"我思故我在"的英文是"[MASK]"。']
69
+
70
+ examples = [en_fil, en_gen, ch_fil, ch_gen, en_to_ch, ch_to_en]
71
+
72
+ with gr.Blocks() as demo:
73
+ gr.Markdown(
74
+ """
75
+ Dear friends,
76
+
77
+ Nice to meet you here! This is a toy demo of GLM-130B, an open bilingual pre-trained model from Tsinghua Univeristy. GLM-130B uses two different mask tokens: `[MASK]` for short blank filling and `[gMASK]` for left-to-right long text generation. When the input does not contain any MASK token, `[gMASK]` will be automatically appended to the end of the text. We recommend that you use `[MASK]` to try text fill-in-the-blank to reduce wait time (ideally within seconds without queuing).
78
+
79
+ This demo is a raw language model **without** instruction fine-tuning (which is applied to FLAN-* series) and RLHF (which is applied to ChatGPT); its ability is roughly between OpenAI `davinci` and `text-davinci-001`. Thus, it is currently worse than ChatGPT and other instruction fine-tuned models :(
80
+
81
+ However, we are sparing no effort to improve it, and its updated versions will meet you soon! If you find the open-source effort useful, please star our [GitHub repo](https://github.com/THUDM/GLM-130B) to encourage our following development :)
82
+ """)
83
+
84
+ with gr.Row():
85
+ with gr.Column():
86
+ model_input = gr.Textbox(lines=7, placeholder='Input something in English or Chinese', label='Input')
87
+ with gr.Row():
88
+ gen = gr.Button("Generate")
89
+ clr = gr.Button("Clear")
90
+
91
+ outputs = gr.Textbox(lines=7, label='Output')
92
+
93
+ gr.Markdown(
94
+ """
95
+ Generation Parameter
96
+ """)
97
+ with gr.Row():
98
+ with gr.Column():
99
+ seed = gr.Slider(maximum=100000, value=1234, step=1, label='Seed')
100
+ out_seq_length = gr.Slider(maximum=256, value=128, minimum=32, step=1, label='Output Sequence Length')
101
+ with gr.Column():
102
+ min_gen_length = gr.Slider(maximum=64, value=0, step=1, label='Min Generate Length')
103
+ sampling_strategy = gr.Radio(choices=['BeamSearchStrategy', 'BaseStrategy'], value='BaseStrategy', label='Search Strategy')
104
+
105
+ with gr.Row():
106
+ with gr.Column():
107
+ # beam search
108
+ gr.Markdown(
109
+ """
110
+ BeamSearchStrategy
111
+ """)
112
+ num_beams = gr.Slider(maximum=4, value=2, minimum=1, step=1, label='Number of Beams')
113
+ length_penalty = gr.Slider(maximum=1, value=1, minimum=0, label='Length Penalty')
114
+ no_repeat_ngram_size = gr.Slider(maximum=5, value=3, minimum=1, step=1, label='No Repeat Ngram Size')
115
+ with gr.Column():
116
+ # base search
117
+ gr.Markdown(
118
+ """
119
+ BaseStrategy
120
+ """)
121
+ temperature = gr.Slider(maximum=1, value=1.0, minimum=0, label='Temperature')
122
+ topk = gr.Slider(maximum=40, value=0, minimum=0, step=1, label='Top K')
123
+ topp = gr.Slider(maximum=1, value=0.7, minimum=0, label='Top P')
124
+
125
+ inputs = [model_input, seed, out_seq_length, min_gen_length, sampling_strategy, num_beams, length_penalty, no_repeat_ngram_size, temperature, topk, topp]
126
+ gen.click(fn=predict, inputs=inputs, outputs=outputs)
127
+ clr.click(fn=lambda value: gr.update(value=""), inputs=clr, outputs=model_input)
128
+
129
+ gr_examples = gr.Examples(examples=examples, inputs=model_input)
130
+
131
+ gr.Markdown(
132
+ """
133
+ Disclaimer inspired from [BLOOM](https://huggingface.co/spaces/bigscience/bloom-book)
134
+
135
+ GLM-130B was trained on web-crawled data, so it's hard to predict how GLM-130B will respond to particular prompts; harmful or otherwise offensive content may occur without warning. We prohibit users from knowingly generating or allowing others to knowingly generate harmful content, including Hateful, Harassment, Violence, Adult, Political, Deception, etc.
136
+ """)
137
+
138
+ demo.launch()