Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -34,22 +34,22 @@ print("Dolly Pipeline Loaded!")
|
|
34 |
llm_dolly = HuggingFacePipeline(pipeline=instruct_pipeline)
|
35 |
|
36 |
|
37 |
-
print("Loading Pipeline Alpaca...")
|
38 |
-
tokenizer_alpaca = LlamaTokenizer.from_pretrained('minlik/chinese-alpaca-plus-7b-merged')
|
39 |
-
model_alpaca = LlamaForCausalLM.from_pretrained('minlik/chinese-alpaca-plus-7b-merged')
|
40 |
-
instruct_pipeline_alpaca = pipeline(
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
)
|
51 |
-
print("Pipeline Loaded Alpaca!")
|
52 |
-
llm_alpaca = HuggingFacePipeline(pipeline=instruct_pipeline_alpaca)
|
53 |
|
54 |
def summarize(Model, File, Input_text):
|
55 |
prompt_template = """Write a concise summary of the following:
|
@@ -80,7 +80,7 @@ def summarize(Model, File, Input_text):
|
|
80 |
if Model=='Dolly':
|
81 |
chain = load_summarize_chain(llm_dolly, chain_type="refine", question_prompt=PROMPT)
|
82 |
else:
|
83 |
-
chain = load_summarize_chain(
|
84 |
summary_text = chain({"input_documents": docs}, return_only_outputs=True)
|
85 |
print(summary_text["output_text"])
|
86 |
return summary_text["output_text"]
|
|
|
34 |
llm_dolly = HuggingFacePipeline(pipeline=instruct_pipeline)
|
35 |
|
36 |
|
37 |
+
# print("Loading Pipeline Alpaca...")
|
38 |
+
# tokenizer_alpaca = LlamaTokenizer.from_pretrained('minlik/chinese-alpaca-plus-7b-merged')
|
39 |
+
# model_alpaca = LlamaForCausalLM.from_pretrained('minlik/chinese-alpaca-plus-7b-merged')
|
40 |
+
# instruct_pipeline_alpaca = pipeline(
|
41 |
+
# "text-generation",
|
42 |
+
# model=model_alpaca,
|
43 |
+
# tokenizer=tokenizer_alpaca,
|
44 |
+
# max_length=1024,
|
45 |
+
# temperature=0.6,
|
46 |
+
# pad_token_id=tokenizer_alpaca.eos_token_id,
|
47 |
+
# top_p=0.95,
|
48 |
+
# repetition_penalty=1.2,
|
49 |
+
# device_map= "auto"
|
50 |
+
# )
|
51 |
+
# print("Pipeline Loaded Alpaca!")
|
52 |
+
# llm_alpaca = HuggingFacePipeline(pipeline=instruct_pipeline_alpaca)
|
53 |
|
54 |
def summarize(Model, File, Input_text):
|
55 |
prompt_template = """Write a concise summary of the following:
|
|
|
80 |
if Model=='Dolly':
|
81 |
chain = load_summarize_chain(llm_dolly, chain_type="refine", question_prompt=PROMPT)
|
82 |
else:
|
83 |
+
chain = load_summarize_chain(llm_dolly, chain_type="refine", question_prompt=PROMPT)
|
84 |
summary_text = chain({"input_documents": docs}, return_only_outputs=True)
|
85 |
print(summary_text["output_text"])
|
86 |
return summary_text["output_text"]
|