run480 commited on
Commit
919dfe7
1 Parent(s): a533979

Update app.py

Browse files

T5 question generation

Files changed (1) hide show
  1. app.py +43 -9
app.py CHANGED
@@ -152,7 +152,8 @@
152
  # grad.Interface(classify, inputs=[txt,labels], outputs=out).launch()
153
 
154
  #-----------------------------------------------------------------------------------
155
- # 8. Text Generation Task/Models
 
156
  # The earliest text generation models were based on Markov chains . Markov chains are like a state machine wherein
157
  # using only the previous state, the next state is predicted. This is similar also to what we studied in bigrams.
158
 
@@ -205,17 +206,50 @@
205
  #-----------------------------------------------------------------------------------
206
  # 9. Text Generation: different model "distilgpt2"
207
 
208
- from transformers import pipeline, set_seed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
  import gradio as grad
210
 
211
- gpt2_pipe = pipeline('text-generation', model='distilgpt2')
212
- set_seed(42)
213
 
214
- def generate(starting_text):
215
- response= gpt2_pipe(starting_text, max_length=20, num_return_sequences=5)
 
 
 
 
 
216
  return response
217
 
218
- txt=grad.Textbox(lines=1, label="English", placeholder="English Text here")
219
- out=grad.Textbox(lines=1, label="Generated Text")
 
 
 
 
 
220
 
221
- grad.Interface(generate, inputs=txt, outputs=out).launch()
 
152
  # grad.Interface(classify, inputs=[txt,labels], outputs=out).launch()
153
 
154
  #-----------------------------------------------------------------------------------
155
+ # 8. Text Generation Task/Models with GPT2 model
156
+
157
  # The earliest text generation models were based on Markov chains . Markov chains are like a state machine wherein
158
  # using only the previous state, the next state is predicted. This is similar also to what we studied in bigrams.
159
 
 
206
  #-----------------------------------------------------------------------------------
207
  # 9. Text Generation: different model "distilgpt2"
208
 
209
+ # from transformers import pipeline, set_seed
210
+ # import gradio as grad
211
+
212
+ # gpt2_pipe = pipeline('text-generation', model='distilgpt2')
213
+ # set_seed(42)
214
+
215
+ # def generate(starting_text):
216
+ # response= gpt2_pipe(starting_text, max_length=20, num_return_sequences=5)
217
+ # return response
218
+
219
+ # txt=grad.Textbox(lines=1, label="English", placeholder="English Text here")
220
+ # out=grad.Textbox(lines=1, label="Generated Text")
221
+
222
+ # grad.Interface(generate, inputs=txt, outputs=out).launch()
223
+
224
+ #-----------------------------------------------------------------------------------
225
+ # 10. Text-to-Text Generation using the T5 model - first use case generates a question given some context.
226
+
227
+ # A transformer-based architecture that takes a text-to-text approach is referred to as T5, which stands for Text-to-Text Transfer Transformer.
228
+
229
+ # In the text-to-text approach, we take a task like Q&A, classification, summarization, code generation, etc. and turn it into a problem,
230
+ # which provides the model with some form of input and then teaches it to generate some form of target text. This makes it possible to apply
231
+ # the same model, loss function, hyperparameters, and other settings to all of our varied sets of responsibilities.
232
+
233
+ from transformers import AutoModelWithLMHead, AutoTokenizer
234
  import gradio as grad
235
 
236
+ text2text_tkn = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap")
237
+ mdl = AutoModelWithLMHead.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap")
238
 
239
+ def text2text(context,answer):
240
+ input_text = "answer: %s context: %s </s>" % (answer, context)
241
+ features = text2text_tkn ([input_text], return_tensors='pt')
242
+ output = mdl.generate(input_ids=features['input_ids'],
243
+ attention_mask=features['attention_mask'],
244
+ max_length=64)
245
+ response=text2text_tkn.decode(output[0])
246
  return response
247
 
248
+ context=grad.Textbox(lines=10, label="English", placeholder="Context")
249
+ ans=grad.Textbox(lines=1, label="Answer")
250
+ out=grad.Textbox(lines=1, label="Genereated Question")
251
+
252
+ grad.Interface(text2text, inputs=[context,ans], outputs=out).launch()
253
+
254
+
255