TrgTuan10 commited on
Commit
406df24
1 Parent(s): 89368d2

add app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -1
app.py CHANGED
@@ -1,3 +1,23 @@
 
1
  import gradio as gr
2
 
3
- gr.load("models/zenai-org/SmolLM-prompt-generation").launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
2
  import gradio as gr
3
 
4
+ # First define a prediction function that takes in a text prompt and returns the text completion
5
+
6
+ model = pipeline("text-generation", model="zenai-org/SmolLM-prompt-generation")
7
+
8
+ def predict(prompt):
9
+ out = model(
10
+ prompt,
11
+ max_length=77, # Max length of the generated sequence
12
+ min_length=10, # Minimum length of the generated sequence
13
+ do_sample=True, # Enable sampling
14
+ top_k=50, # Top-k sampling
15
+ top_p=0.95, # Top-p sampling
16
+ temperature=0.7, # Control the creativity of the output
17
+ eos_token_id=0, # End-of-sequence token
18
+ # pad_token_id = tokenizer.eos_token_id,
19
+ )
20
+ return out[0]['generated_text']
21
+
22
+ # Now create the interface
23
+ gr.Interface(fn=predict, inputs="text", outputs="text", css=".footer{display:none !important}").launch(share=True)