from transformers import AutoTokenizer, AutoModelForSeq2SeqLM import gradio as gr tokenizer = AutoTokenizer.from_pretrained("merve/chatgpt-prompt-generator-v12") model = AutoModelForSeq2SeqLM.from_pretrained("merve/chatgpt-prompt-generator-v12", from_tf=True) # tokenizer2 = AutoTokenizer.from_pretrained("Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum") model2 = AutoModelForSeq2SeqLM.from_pretrained("Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum", from_tf=True) def generate(prompt): batch = tokenizer(prompt, return_tensors="pt") generated_ids = model.generate(batch["input_ids"], max_new_tokens=150) output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) return output[0] def generate2(prompt, max_new_tokens): batch = tokenizer2(prompt, return_tensors="pt") generated_ids = model2.generate(batch["input_ids"], max_new_tokens=150) output = tokenizer2.batch_decode(generated_ids, skip_special_tokens=True) return output[0] def generate2_test(prompt): batch = tokenizer2(prompt, return_tensors="pt") generated_ids = model2.generate(batch["input_ids"], max_new_tokens=150) output = tokenizer2.batch_decode(generated_ids, skip_special_tokens=True) return output[0] def generate_prompt(type, prompt, max_new_tokens): if type==1: return generate(prompt) elif type==2: return generate2(prompt, max_new_tokens) # input_component = gr.Textbox(label = "Input a persona, e.g. photographer", value = "photographer") output_component = gr.Textbox(label = "Prompt") examples = [["photographer"], ["developer"]] description = "" gr.Interface(generate2_test, inputs = input_component, outputs=output_component, examples=examples, title = "👨🏻‍🎤 ChatGPT Prompt Generator v12 👨🏻‍🎤", description=description).launch()