import torch from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM import gradio as gr # トークナイザーとモデルの準備 tokenizer = AutoTokenizer.from_pretrained('sonoisa/t5-base-japanese') model = AutoModelForSeq2SeqLM.from_pretrained('models/') def summary(text): # 文章をテンソルに変換 input = tokenizer.encode(text, return_tensors='pt', max_length=512, truncation=True) # 推論 model.eval() with torch.no_grad(): summary_ids = model.generate(input) return tokenizer.decode(summary_ids[0][1:-1]) descriptions = "T5による文章要約。文章を入力すると、その要約文を出力します。" demo = gr.Interface(fn=summary, inputs=gr.Textbox(lines=5, placeholder="文章を入力してください"), outputs=gr.Textbox(lines=5),title="Sentence Summary", description=descriptions) demo.launch()