seawolf2357 commited on
Commit
b36e692
1 Parent(s): 3616ff0

Delete backup.history.03212024.app.py

Browse files
Files changed (1) hide show
  1. backup.history.03212024.app.py +0 -235
backup.history.03212024.app.py DELETED
@@ -1,235 +0,0 @@
1
- import torch
2
- import transformers
3
- import gradio as gr
4
- from ragatouille import RAGPretrainedModel
5
- from huggingface_hub import InferenceClient
6
- import re
7
- from datetime import datetime
8
- import json
9
- import os
10
-
11
- import arxiv
12
- from utils import get_md_text_abstract, search_cleaner, get_arxiv_live_search
13
-
14
- retrieve_results = 10
15
- show_examples = False
16
- llm_models_to_choose = ['mistralai/Mixtral-8x7B-Instruct-v0.1','mistralai/Mistral-7B-Instruct-v0.2', 'google/gemma-7b-it', 'None']
17
-
18
- generate_kwargs = dict(
19
- temperature = None,
20
- max_new_tokens = 512,
21
- top_p = None,
22
- do_sample = False,
23
- )
24
-
25
- ## RAG Model
26
- RAG = RAGPretrainedModel.from_index("colbert/indexes/arxiv_colbert")
27
-
28
- try:
29
- gr.Info("Setting up retriever, please wait...")
30
- rag_initial_output = RAG.search("what is Mistral?", k = 1)
31
- gr.Info("Retriever working successfully!")
32
-
33
- except:
34
- gr.Warning("Retriever not working!")
35
-
36
- ## Header
37
- mark_text = '# 🩺🔍 Search Results\n'
38
- header_text = "## Arxiv Paper Summary With QA Retrieval Augmented Generation \n"
39
-
40
- try:
41
- with open("README.md", "r") as f:
42
- mdfile = f.read()
43
- date_pattern = r'Index Last Updated : \d{4}-\d{2}-\d{2}'
44
- match = re.search(date_pattern, mdfile)
45
- date = match.group().split(': ')[1]
46
- formatted_date = datetime.strptime(date, '%Y-%m-%d').strftime('%d %b %Y')
47
- header_text += f'Index Last Updated: {formatted_date}\n'
48
- index_info = f"Semantic Search - up to {formatted_date}"
49
- except:
50
- index_info = "Semantic Search"
51
-
52
- database_choices = [index_info,'Arxiv Search - Latest - (EXPERIMENTAL)']
53
-
54
- ## Arxiv API
55
- arx_client = arxiv.Client()
56
- is_arxiv_available = True
57
- check_arxiv_result = get_arxiv_live_search("What is Self Rewarding AI and how can it be used in Multi-Agent Systems?", arx_client, retrieve_results)
58
- if len(check_arxiv_result) == 0:
59
- is_arxiv_available = False
60
- print("Arxiv search not working, switching to default search ...")
61
- database_choices = [index_info]
62
-
63
- ## Show examples (disabled)
64
- if show_examples:
65
- with open("sample_outputs.json", "r") as f:
66
- sample_outputs = json.load(f)
67
- output_placeholder = sample_outputs['output_placeholder']
68
- md_text_initial = sample_outputs['search_placeholder']
69
-
70
- else:
71
- output_placeholder = None
72
- md_text_initial = ''
73
-
74
- def rag_cleaner(inp):
75
- rank = inp['rank']
76
- title = inp['document_metadata']['title']
77
- content = inp['content']
78
- date = inp['document_metadata']['_time']
79
- return f"{rank}. <b> {title} </b> \n Date : {date} \n Abstract: {content}"
80
-
81
- def get_prompt_text(question, context, formatted = True, llm_model_picked = 'mistralai/Mistral-7B-Instruct-v0.2'):
82
- if formatted:
83
- sys_instruction = f"Context:\n {context} \n Given the following scientific paper abstracts, take a deep breath and lets think step by step to answer the question. Cite the titles of your sources when answering, do not cite links or dates."
84
- message = f"Question: {question}"
85
-
86
- if 'mistralai' in llm_model_picked:
87
- return f"<s>" + f"[INST] {sys_instruction}" + f" {message}[/INST]"
88
-
89
- elif 'gemma' in llm_model_picked:
90
- return f"<bos><start_of_turn>user\n{sys_instruction}" + f" {message}<end_of_turn>\n"
91
-
92
- return f"Context:\n {context} \n Given the following info, take a deep breath and lets think step by step to answer the question: {question}. Cite the titles of your sources when answering.\n\n"
93
-
94
- def get_references(question, retriever, k = retrieve_results):
95
- rag_out = retriever.search(query=question, k=k)
96
- return rag_out
97
-
98
- def get_rag(message):
99
- return get_references(message, RAG)
100
-
101
- def SaveResponseAndRead(result):
102
- documentHTML5='''
103
- <!DOCTYPE html>
104
- <html>
105
- <head>
106
- <title>Read It Aloud</title>
107
- <script type="text/javascript">
108
- function readAloud() {
109
- const text = document.getElementById("textArea").value;
110
- const speech = new SpeechSynthesisUtterance(text);
111
- window.speechSynthesis.speak(speech);
112
- }
113
- </script>
114
- </head>
115
- <body>
116
- <h1>🔊 Read It Aloud</h1>
117
- <textarea id="textArea" rows="10" cols="80">
118
- '''
119
- documentHTML5 = documentHTML5 + result
120
- documentHTML5 = documentHTML5 + '''
121
- </textarea>
122
- <br>
123
- <button onclick="readAloud()">🔊 Read Aloud</button>
124
- </body>
125
- </html>
126
- '''
127
- gr.HTML(documentHTML5)
128
-
129
- def save_search_results(prompt, results):
130
- filename = re.sub(r'[^\w\-_\. ]', '_', prompt) + ".txt"
131
- with open(filename, "w") as f:
132
- f.write(f"# {prompt}\n\n")
133
- f.write(results)
134
-
135
- def get_past_searches():
136
- txt_files = [f for f in os.listdir(".") if f.endswith(".txt") and f != "requirements.txt"]
137
- return txt_files
138
-
139
- with gr.Blocks(theme = gr.themes.Soft()) as demo:
140
- header = gr.Markdown(header_text)
141
-
142
- with gr.Row():
143
- with gr.Column():
144
- msg = gr.Textbox(label = 'Search', placeholder = 'What is Mistral?')
145
-
146
- with gr.Accordion("Advanced Settings", open=False):
147
- with gr.Row(equal_height = True):
148
- llm_model = gr.Dropdown(choices = llm_models_to_choose, value = 'mistralai/Mistral-7B-Instruct-v0.2', label = 'LLM Model')
149
- llm_results = gr.Slider(minimum=4, maximum=10, value=5, step=1, interactive=True, label="Top n results as context")
150
- database_src = gr.Dropdown(choices = database_choices, value = index_info, label = 'Search Source')
151
- stream_results = gr.Checkbox(value = True, label = "Stream output", visible = False)
152
-
153
- output_text = gr.Textbox(show_label = True, container = True, label = 'LLM Answer', visible = True, placeholder = output_placeholder)
154
- input = gr.Textbox(show_label = False, visible = False)
155
- gr_md = gr.Markdown(mark_text + md_text_initial)
156
-
157
- with gr.Column():
158
- past_searches = gr.Dropdown(choices=get_past_searches(), label="Past Searches")
159
- past_search_content = gr.Textbox(label="Past Search Content", visible=False)
160
-
161
- def update_past_search_content(past_search):
162
- if past_search:
163
- with open(past_search, "r") as f:
164
- content = f.read()
165
- return gr.Textbox.update(value=content, visible=True)
166
- else:
167
- return gr.Textbox.update(visible=False)
168
-
169
- past_searches.change(update_past_search_content, past_searches, past_search_content)
170
-
171
- def update_with_rag_md(message, llm_results_use = 5, database_choice = index_info, llm_model_picked = 'mistralai/Mistral-7B-Instruct-v0.2'):
172
- prompt_text_from_data = ""
173
- database_to_use = database_choice
174
- if database_choice == index_info:
175
- rag_out = get_rag(message)
176
- else:
177
- arxiv_search_success = True
178
- try:
179
- rag_out = get_arxiv_live_search(message, arx_client, retrieve_results)
180
- if len(rag_out) == 0:
181
- arxiv_search_success = False
182
- except:
183
- arxiv_search_success = False
184
-
185
- if not arxiv_search_success:
186
- gr.Warning("Arxiv Search not working, switching to semantic search ...")
187
- rag_out = get_rag(message)
188
- database_to_use = index_info
189
-
190
- md_text_updated = mark_text
191
- for i in range(retrieve_results):
192
- rag_answer = rag_out[i]
193
- if i < llm_results_use:
194
- md_text_paper, prompt_text = get_md_text_abstract(rag_answer, source = database_to_use, return_prompt_formatting = True)
195
- prompt_text_from_data += f"{i+1}. {prompt_text}"
196
- else:
197
- md_text_paper = get_md_text_abstract(rag_answer, source = database_to_use)
198
- md_text_updated += md_text_paper
199
- prompt = get_prompt_text(message, prompt_text_from_data, llm_model_picked = llm_model_picked)
200
- save_search_results(message, md_text_updated)
201
- return md_text_updated, prompt, get_past_searches()
202
-
203
- def ask_llm(prompt, llm_model_picked = 'mistralai/Mistral-7B-Instruct-v0.2', stream_outputs = False):
204
- model_disabled_text = "LLM Model is disabled"
205
- output = ""
206
-
207
- if llm_model_picked == 'None':
208
- if stream_outputs:
209
- for out in model_disabled_text:
210
- output += out
211
- yield output
212
- return output
213
- else:
214
- return model_disabled_text
215
-
216
- client = InferenceClient(llm_model_picked)
217
- try:
218
- stream = client.text_generation(prompt, **generate_kwargs, stream=stream_outputs, details=False, return_full_text=False)
219
-
220
- except:
221
- gr.Warning("LLM Inference rate limit reached, try again later!")
222
- return ""
223
-
224
- if stream_outputs:
225
- for response in stream:
226
- output += response
227
- SaveResponseAndRead(response)
228
- yield output
229
- return output
230
- else:
231
- return stream
232
-
233
- msg.submit(update_with_rag_md, [msg, llm_results, database_src, llm_model], [gr_md, input, past_searches]).success(ask_llm, [input, llm_model, stream_results], output_text)
234
-
235
- demo.queue().launch()