seawolf2357 commited on
Commit
abb9e73
1 Parent(s): b36e692

Delete backup.412024.app.py

Browse files
Files changed (1) hide show
  1. backup.412024.app.py +0 -222
backup.412024.app.py DELETED
@@ -1,222 +0,0 @@
1
- import torch
2
- import transformers
3
- import gradio as gr
4
- from ragatouille import RAGPretrainedModel
5
- from huggingface_hub import InferenceClient
6
- import re
7
- from datetime import datetime
8
- import json
9
-
10
- import arxiv
11
- from utils import get_md_text_abstract, search_cleaner, get_arxiv_live_search
12
-
13
- retrieve_results = 20
14
- show_examples = True
15
- llm_models_to_choose = ['mistralai/Mixtral-8x7B-Instruct-v0.1','mistralai/Mistral-7B-Instruct-v0.2', 'google/gemma-7b-it', 'None']
16
-
17
- generate_kwargs = dict(
18
- temperature = None,
19
- max_new_tokens = 512,
20
- top_p = None,
21
- do_sample = False,
22
- )
23
-
24
- ## RAG Model
25
- RAG = RAGPretrainedModel.from_index("colbert/indexes/arxiv_colbert")
26
-
27
- try:
28
- gr.Info("Setting up retriever, please wait...")
29
- rag_initial_output = RAG.search("What is Generative AI in Healthcare?", k = 1)
30
- gr.Info("Retriever working successfully!")
31
-
32
- except:
33
- gr.Warning("Retriever not working!")
34
-
35
- ## Header
36
- mark_text = '# 🩺🔍 Search Results\n'
37
- header_text = "## Arxiv Paper Summary With QA Retrieval Augmented Generation \n"
38
-
39
- try:
40
- with open("README.md", "r") as f:
41
- mdfile = f.read()
42
- date_pattern = r'Index Last Updated : \d{4}-\d{2}-\d{2}'
43
- match = re.search(date_pattern, mdfile)
44
- date = match.group().split(': ')[1]
45
- formatted_date = datetime.strptime(date, '%Y-%m-%d').strftime('%d %b %Y')
46
- header_text += f'Index Last Updated: {formatted_date}\n'
47
- index_info = f"Semantic Search - up to {formatted_date}"
48
- except:
49
- index_info = "Semantic Search"
50
-
51
- database_choices = [index_info,'Arxiv Search - Latest - (EXPERIMENTAL)']
52
-
53
- ## Arxiv API
54
- arx_client = arxiv.Client()
55
- is_arxiv_available = True
56
- check_arxiv_result = get_arxiv_live_search("What is Self Rewarding AI and how can it be used in Multi-Agent Systems?", arx_client, retrieve_results)
57
- if len(check_arxiv_result) == 0:
58
- is_arxiv_available = False
59
- print("Arxiv search not working, switching to default search ...")
60
- database_choices = [index_info]
61
-
62
-
63
-
64
- ## Show examples
65
- sample_outputs = {
66
- 'output_placeholder': 'The LLM will provide an answer to your question here...',
67
- 'search_placeholder': '''
68
- 1. What is MoE?
69
- 2. What are Multi Agent Systems?
70
- 3. What is Self Rewarding AI?
71
- 4. What is Semantic and Episodic memory?
72
- 5. What is AutoGen?
73
- 6. What is ChatDev?
74
- 7. What is Omniverse?
75
- 8. What is Lumiere?
76
- 9. What is SORA?
77
- '''
78
- }
79
-
80
- output_placeholder = sample_outputs['output_placeholder']
81
- md_text_initial = sample_outputs['search_placeholder']
82
-
83
-
84
- def rag_cleaner(inp):
85
- rank = inp['rank']
86
- title = inp['document_metadata']['title']
87
- content = inp['content']
88
- date = inp['document_metadata']['_time']
89
- return f"{rank}. <b> {title} </b> \n Date : {date} \n Abstract: {content}"
90
-
91
- def get_prompt_text(question, context, formatted = True, llm_model_picked = 'mistralai/Mistral-7B-Instruct-v0.2'):
92
- if formatted:
93
- sys_instruction = f"Context:\n {context} \n Given the following scientific paper abstracts, take a deep breath and lets think step by step to answer the question. Cite the titles of your sources when answering, do not cite links or dates."
94
- message = f"Question: {question}"
95
-
96
- if 'mistralai' in llm_model_picked:
97
- return f"<s>" + f"[INST] {sys_instruction}" + f" {message}[/INST]"
98
-
99
- elif 'gemma' in llm_model_picked:
100
- return f"<bos><start_of_turn>user\n{sys_instruction}" + f" {message}<end_of_turn>\n"
101
-
102
- return f"Context:\n {context} \n Given the following info, take a deep breath and lets think step by step to answer the question: {question}. Cite the titles of your sources when answering.\n\n"
103
-
104
- def get_references(question, retriever, k = retrieve_results):
105
- rag_out = retriever.search(query=question, k=k)
106
- return rag_out
107
-
108
- def get_rag(message):
109
- return get_references(message, RAG)
110
-
111
- def SaveResponseAndRead(result):
112
- documentHTML5='''
113
- <!DOCTYPE html>
114
- <html>
115
- <head>
116
- <title>Read It Aloud</title>
117
- <script type="text/javascript">
118
- function readAloud() {
119
- const text = document.getElementById("textArea").value;
120
- const speech = new SpeechSynthesisUtterance(text);
121
- window.speechSynthesis.speak(speech);
122
- }
123
- </script>
124
- </head>
125
- <body>
126
- <h1>🔊 Read It Aloud</h1>
127
- <textarea id="textArea" rows="10" cols="80">
128
- '''
129
- documentHTML5 = documentHTML5 + result
130
- documentHTML5 = documentHTML5 + '''
131
- </textarea>
132
- <br>
133
- <button onclick="readAloud()">🔊 Read Aloud</button>
134
- </body>
135
- </html>
136
- '''
137
- gr.HTML(documentHTML5)
138
-
139
-
140
- with gr.Blocks(theme = gr.themes.Soft()) as demo:
141
- header = gr.Markdown(header_text)
142
-
143
- with gr.Group():
144
- msg = gr.Textbox(label = 'Search', placeholder = 'What is Mistral?')
145
-
146
- with gr.Accordion("Advanced Settings", open=False):
147
- with gr.Row(equal_height = True):
148
- llm_model = gr.Dropdown(choices = llm_models_to_choose, value = 'mistralai/Mistral-7B-Instruct-v0.2', label = 'LLM Model')
149
- llm_results = gr.Slider(minimum=4, maximum=10, value=5, step=1, interactive=True, label="Top n results as context")
150
- database_src = gr.Dropdown(choices = database_choices, value = index_info, label = 'Search Source')
151
- stream_results = gr.Checkbox(value = True, label = "Stream output", visible = False)
152
-
153
- output_text = gr.Textbox(show_label = True, container = True, label = 'LLM Answer', visible = True, placeholder = output_placeholder)
154
- input = gr.Textbox(show_label = False, visible = False)
155
- gr_md = gr.Markdown(mark_text + md_text_initial)
156
-
157
- def update_with_rag_md(message, llm_results_use = 5, database_choice = index_info, llm_model_picked = 'mistralai/Mistral-7B-Instruct-v0.2'):
158
- prompt_text_from_data = ""
159
- database_to_use = database_choice
160
- if database_choice == index_info:
161
- rag_out = get_rag(message)
162
- else:
163
- arxiv_search_success = True
164
- try:
165
- rag_out = get_arxiv_live_search(message, arx_client, retrieve_results)
166
- if len(rag_out) == 0:
167
- arxiv_search_success = False
168
- except:
169
- arxiv_search_success = False
170
-
171
-
172
- if not arxiv_search_success:
173
- gr.Warning("Arxiv Search not working, switching to semantic search ...")
174
- rag_out = get_rag(message)
175
- database_to_use = index_info
176
-
177
- md_text_updated = mark_text
178
- for i in range(retrieve_results):
179
- rag_answer = rag_out[i]
180
- if i < llm_results_use:
181
- md_text_paper, prompt_text = get_md_text_abstract(rag_answer, source = database_to_use, return_prompt_formatting = True)
182
- prompt_text_from_data += f"{i+1}. {prompt_text}"
183
- else:
184
- md_text_paper = get_md_text_abstract(rag_answer, source = database_to_use)
185
- md_text_updated += md_text_paper
186
- prompt = get_prompt_text(message, prompt_text_from_data, llm_model_picked = llm_model_picked)
187
- return md_text_updated, prompt
188
-
189
- def ask_llm(prompt, llm_model_picked = 'mistralai/Mistral-7B-Instruct-v0.2', stream_outputs = False):
190
- model_disabled_text = "LLM Model is disabled"
191
- output = ""
192
-
193
- if llm_model_picked == 'None':
194
- if stream_outputs:
195
- for out in model_disabled_text:
196
- output += out
197
- yield output
198
- return output
199
- else:
200
- return model_disabled_text
201
-
202
- client = InferenceClient(llm_model_picked)
203
- try:
204
- stream = client.text_generation(prompt, **generate_kwargs, stream=stream_outputs, details=False, return_full_text=False)
205
-
206
- except:
207
- gr.Warning("LLM Inference rate limit reached, try again later!")
208
- return ""
209
-
210
- if stream_outputs:
211
- for response in stream:
212
- output += response
213
- SaveResponseAndRead(response)
214
- yield output
215
- return output
216
- else:
217
- return stream
218
-
219
-
220
- msg.submit(update_with_rag_md, [msg, llm_results, database_src, llm_model], [gr_md, input]).success(ask_llm, [input, llm_model, stream_results], output_text)
221
-
222
- demo.queue().launch()