Michelangiolo commited on
Commit
cf38d1a
1 Parent(s): 72872c2
Files changed (6) hide show
  1. _test.ipynb +436 -145
  2. app.py +155 -200
  3. app_old.py +151 -0
  4. create_vectors.ipynb +951 -0
  5. df.parquet +3 -0
  6. df_qa.parquet +3 -0
_test.ipynb CHANGED
@@ -2,134 +2,11 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": 39,
6
  "metadata": {},
7
- "outputs": [
8
- {
9
- "name": "stdout",
10
- "output_type": "stream",
11
- "text": [
12
- "Our company builds AI Recommendation Systems for Matching Platforms using the latest technology company goliath\n",
13
- "sending request\n",
14
- "<Response [200]>\n",
15
- "Goliath builds AI Recommendation Systems for Matching Platforms using the latest technology.\n",
16
- "Our company builds AI Recommendation Systems for Matching Platforms using the latest technology company we\n",
17
- "sending request\n",
18
- "<Response [200]>\n",
19
- "We build AI Recommendation Systems for Matching Platforms using the latest technology.\n",
20
- "Our company is estabilished and operates in Japan company goliath\n",
21
- "sending request\n",
22
- "<Response [200]>\n",
23
- "Our goliath is established and operates in Japan.\n",
24
- "Our company is estabilished and operates in Japan company we\n",
25
- "sending request\n",
26
- "<Response [200]>\n",
27
- "Our we is established and operates in Japan.\n",
28
- "Our company uses the AWS Cloud to manage Servers company goliath\n",
29
- "sending request\n",
30
- "<Response [200]>\n",
31
- "Goliath uses the AWS Cloud to manage Servers.\n",
32
- "Our company uses the AWS Cloud to manage Servers company we\n",
33
- "sending request\n",
34
- "<Response [200]>\n",
35
- "We use the AWS Cloud to manage Servers.\n",
36
- "Our company can use GPT3 as well company goliath\n",
37
- "sending request\n",
38
- "<Response [200]>\n",
39
- "Goliath can use GPT3 as well.\n",
40
- "Our company can use GPT3 as well company we\n",
41
- "sending request\n",
42
- "<Response [200]>\n",
43
- "We can use GPT3 as well.\n",
44
- "Our company also builds GPT3-based chatbots company goliath\n",
45
- "sending request\n",
46
- "<Response [200]>\n",
47
- "Goliath also builds GPT3-based chatbots.\n",
48
- "Our company also builds GPT3-based chatbots company we\n",
49
- "sending request\n",
50
- "<Response [200]>\n",
51
- "We also build GPT3-based chatbots.\n",
52
- "Our company can use open-source models, if requested company goliath\n",
53
- "sending request\n",
54
- "<Response [200]>\n",
55
- "Our goliath can use open-source models, if requested.\n",
56
- "Our company can use open-source models, if requested company we\n",
57
- "sending request\n",
58
- "<Response [200]>\n",
59
- "If requested, we can use open-source models.\n",
60
- "Our company uses open source models. company goliath\n",
61
- "sending request\n",
62
- "<Response [200]>\n",
63
- "Goliath uses open source models.\n",
64
- "Our company uses open source models. company we\n",
65
- "sending request\n",
66
- "<Response [200]>\n",
67
- "We use open source models.\n"
68
- ]
69
- },
70
- {
71
- "data": {
72
- "text/plain": [
73
- "['Goliath operates in Japan',\n",
74
- " 'Goliath builds AI Recommendation Systems for Matching Platforms using the latest technology.',\n",
75
- " 'Goliath builds AI Recommendation Systems for Matching Platforms using the latest technology.',\n",
76
- " 'We build AI Recommendation Systems for Matching Platforms using the latest technology.',\n",
77
- " 'Our company builds AI Recommendation Systems for Matching Platforms using the latest technology',\n",
78
- " 'Our goliath is established and operates in Japan.',\n",
79
- " 'Our we is established and operates in Japan.',\n",
80
- " 'Our company is estabilished and operates in Japan',\n",
81
- " 'Goliath uses the AWS Cloud to manage Servers.',\n",
82
- " 'We use the AWS Cloud to manage Servers.',\n",
83
- " 'Our company uses the AWS Cloud to manage Servers',\n",
84
- " 'Goliath can use GPT3 as well.',\n",
85
- " 'We can use GPT3 as well.',\n",
86
- " 'Our company can use GPT3 as well',\n",
87
- " 'Goliath also builds GPT3-based chatbots.',\n",
88
- " 'We also build GPT3-based chatbots.',\n",
89
- " 'Our company also builds GPT3-based chatbots',\n",
90
- " 'Our goliath can use open-source models, if requested.',\n",
91
- " 'If requested, we can use open-source models.',\n",
92
- " 'Our company can use open-source models, if requested',\n",
93
- " 'Goliath uses open source models.',\n",
94
- " 'We use open source models.',\n",
95
- " 'Our company uses open source models.',\n",
96
- " 'The price of a recommendation system depends on the amount of complexity that is required to build, as well as the volume of customers. Reach us to get a quotation',\n",
97
- " 'The price of a chatbot depends by its intended usage and complexity, contact us for a quotation.',\n",
98
- " 'If your company wants to recommend products to customers, we can build a recommendation system for you',\n",
99
- " 'GPT3 can be used to build recommendation systems by using embeddings, mapping choices in a mathematical space',\n",
100
- " 'Once the recommendation system has been built, we will manage it in the future as well',\n",
101
- " 'Recommendation system could also be built for startups, though they will be in smaller size',\n",
102
- " 'We use AWS OpenSearch to host recommendation system.',\n",
103
- " 'A matching platform is a business with thousands of users, who could be customers, individuals or companies, who are interacting with one another. For example dating apps, ecommerce platforms, or job recruiting platforms.']"
104
- ]
105
- },
106
- "execution_count": 39,
107
- "metadata": {},
108
- "output_type": "execute_result"
109
- }
110
- ],
111
  "source": [
112
  "import pandas as pd\n",
113
- "import requests\n",
114
- "\n",
115
- "def gpt3_question(prompt):\n",
116
- " api_key = \"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\"\n",
117
- " api_endpoint = \"https://api.openai.com/v1/engines/text-davinci-003/completions\"\n",
118
- " headers = {\n",
119
- " \"Content-Type\": \"application/json\",\n",
120
- " \"Authorization\": f\"Bearer {api_key}\"\n",
121
- " }\n",
122
- " data = {\n",
123
- " \"prompt\": prompt,\n",
124
- " \"max_tokens\": 400,\n",
125
- " \"temperature\": 0.5\n",
126
- " }\n",
127
- " print('sending request')\n",
128
- " response = requests.post(api_endpoint, headers=headers, json=data)\n",
129
- " print(response)\n",
130
- " generated_text = response.json()[\"choices\"][0][\"text\"]\n",
131
- "\n",
132
- " return generated_text\n",
133
  "\n",
134
  "context_dict = {\n",
135
  " \"company; goliath; we\" : \n",
@@ -226,58 +103,472 @@
226
  },
227
  {
228
  "cell_type": "code",
229
- "execution_count": 38,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230
  "metadata": {},
231
  "outputs": [
232
  {
233
- "name": "stdout",
234
- "output_type": "stream",
235
- "text": [
236
- "sending request\n",
237
- "<Response [200]>\n"
238
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239
  },
240
  {
241
  "data": {
242
  "text/plain": [
243
- "'\\n\\nWe build AI Recommendation Systems for Matching Platforms using the latest technology.'"
244
  ]
245
  },
246
- "execution_count": 38,
247
  "metadata": {},
248
  "output_type": "execute_result"
249
  }
250
  ],
251
  "source": [
252
- "s = 'Our company builds AI Recommendation Systems for Matching Platforms using the latest technology'\n",
253
- "key = 'company'\n",
254
- "synonym = 'we'\n",
255
- "\n",
256
- "prompt = f'in the following sentence: {s}. Replace {key} with {synonym} correcting the grammar'\n",
257
- "gpt3_question(prompt).replace('\\n', '')"
 
 
 
258
  ]
259
  },
260
  {
261
  "cell_type": "code",
262
- "execution_count": 17,
263
  "metadata": {},
264
  "outputs": [
 
 
 
 
 
 
 
 
 
265
  {
266
  "data": {
 
 
 
267
  "text/plain": [
268
- "'company'"
269
  ]
270
  },
271
- "execution_count": 17,
 
 
 
 
 
 
 
272
  "metadata": {},
273
  "output_type": "execute_result"
 
 
 
 
 
 
 
 
 
274
  }
275
  ],
276
  "source": [
277
- "str1 = 'company; goliath; we'\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
278
  "\n",
279
- "str1.split(';')[0]\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280
  ]
 
 
 
 
 
 
 
281
  }
282
  ],
283
  "metadata": {
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": null,
6
  "metadata": {},
7
+ "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  "source": [
9
  "import pandas as pd\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  "\n",
11
  "context_dict = {\n",
12
  " \"company; goliath; we\" : \n",
 
103
  },
104
  {
105
  "cell_type": "code",
106
+ "execution_count": null,
107
+ "metadata": {},
108
+ "outputs": [],
109
+ "source": [
110
+ "s = 'Our company builds AI Recommendation Systems for Matching Platforms using the latest technology'\n",
111
+ "key = 'company'\n",
112
+ "synonym = 'we'\n",
113
+ "\n",
114
+ "prompt = f'in the following sentence: {s}. Replace {key} with {synonym} correcting the grammar'\n",
115
+ "gpt3_question(prompt).replace('\\n', '')"
116
+ ]
117
+ },
118
+ {
119
+ "cell_type": "code",
120
+ "execution_count": 110,
121
+ "metadata": {},
122
+ "outputs": [],
123
+ "source": [
124
+ "import requests\n",
125
+ "import os\n",
126
+ "import torch\n",
127
+ "# os.system('pip install openpyxl')\n",
128
+ "# os.system('pip install sentence-transformers==2.2.2')\n",
129
+ "# os.system('pip install torch==1.13.0')\n",
130
+ "\n",
131
+ "def chatgpt3_question(context, question):\n",
132
+ " api_key = \"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\"\n",
133
+ " url = \"https://api.openai.com/v1/chat/completions\"\n",
134
+ "\n",
135
+ " prompt = f\"\"\"\n",
136
+ " based on this context: {context}\n",
137
+ " answer this use question: {question}\n",
138
+ " \"\"\"\n",
139
+ "\n",
140
+ " headers = {\n",
141
+ " \"Content-Type\": \"application/json\",\n",
142
+ " \"Authorization\": f\"Bearer {api_key}\"\n",
143
+ " }\n",
144
+ "\n",
145
+ " data = {\n",
146
+ " \"model\": \"gpt-3.5-turbo\",\n",
147
+ " \"messages\": [{\"role\": \"user\", \"content\": prompt}]\n",
148
+ " }\n",
149
+ "\n",
150
+ " response = requests.post(url, headers=headers, json=data)\n",
151
+ " generated_text = response.json()['choices'][0]['message']['content']\n",
152
+ "\n",
153
+ " return generated_text\n",
154
+ "\n",
155
+ "import os\n",
156
+ "import requests\n",
157
+ "import pandas as pd\n",
158
+ "\n",
159
+ "def text2vec(query):\n",
160
+ " headers = {\n",
161
+ " 'Content-Type': 'application/json',\n",
162
+ " 'Authorization': 'Bearer ' + \"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\",\n",
163
+ " }\n",
164
+ "\n",
165
+ " json_data = {\n",
166
+ " 'input': query,\n",
167
+ " 'model': 'text-embedding-ada-002',\n",
168
+ " }\n",
169
+ "\n",
170
+ " response = requests.post('https://api.openai.com/v1/embeddings', headers=headers, json=json_data)\n",
171
+ " query = response.json()['data'][0]['embedding'] #len=1536 #pricing=0.0004\n",
172
+ " return query\n",
173
+ "\n",
174
+ "import pandas as pd\n",
175
+ "from sentence_transformers import SentenceTransformer, util\n",
176
+ "\n",
177
+ "df = pd.read_parquet('df.parquet')\n",
178
+ "df_qa = pd.read_parquet('df_qa.parquet')\n",
179
+ "\n",
180
+ "df_qa_ = df_qa.copy()\n",
181
+ "df_ = df.copy()\n",
182
+ "\n",
183
+ "def qa(df_, df_qa_, min_qa_score, min_context_score, verbose, query):\n",
184
+ " query_vec = text2vec(query)\n",
185
+ " query_vec = torch.DoubleTensor(query_vec)\n",
186
+ "\n",
187
+ " #first check if there is already a question in df_qa\n",
188
+ " df_qa_['score'] = df_qa_['text_vector_'].apply(lambda x : float(util.cos_sim(x, query_vec)))\n",
189
+ " df_qa_ = df_qa_.sort_values('score', ascending=False)\n",
190
+ " \n",
191
+ " df_qa_ = df_qa_[df_qa_['score']>=min_qa_score]\n",
192
+ " #if we find at least one possible preset answer\n",
193
+ " if len(df_qa_) > 0:\n",
194
+ " if verbose : display(df_qa_)\n",
195
+ " answer = df_qa_[0:1]['answer'].values.tolist()[0]\n",
196
+ " return answer\n",
197
+ " \n",
198
+ " #then check if we can use the context to answer a question\n",
199
+ " df_['score'] = df_['text_vector_'].apply(lambda x : float(util.cos_sim(x, query_vec)))\n",
200
+ " df_ = df_.sort_values('score', ascending=False)\n",
201
+ " df_ = df_[df_['score']>=min_context_score]\n",
202
+ " #if we find at least one possible preset answer\n",
203
+ " if len(df_) > 0:\n",
204
+ " if verbose : display(df_)\n",
205
+ " #in case we might decide to merge multiple context\n",
206
+ " context = ' '.join(df_['description'][0:1].values.tolist())\n",
207
+ " answer = chatgpt3_question(context, query)\n",
208
+ " return answer\n",
209
+ " else:\n",
210
+ " return 'impossible to give an answer'\n",
211
+ "\n",
212
+ "# print(\n",
213
+ "# qa(\n",
214
+ "# df_, \n",
215
+ "# df_qa_, \n",
216
+ "# min_qa_score=0.92, \n",
217
+ "# min_context_score=.75, \n",
218
+ "# verbose=False, \n",
219
+ "# query='What is a recommender system?'\n",
220
+ "# )\n",
221
+ "# )"
222
+ ]
223
+ },
224
+ {
225
+ "cell_type": "code",
226
+ "execution_count": 111,
227
  "metadata": {},
228
  "outputs": [
229
  {
230
+ "data": {
231
+ "text/plain": [
232
+ "'Query after coreference resolution: \"How much does your company charge for recommendation systems?\"'"
233
+ ]
234
+ },
235
+ "execution_count": 111,
236
+ "metadata": {},
237
+ "output_type": "execute_result"
238
+ }
239
+ ],
240
+ "source": [
241
+ "def gpt3_reference(last_context, query):\n",
242
+ " #needs to be referred to the second\n",
243
+ " # last_context = 'you are a company'\n",
244
+ " # query = \"\"\"what do you do\"\"\"\n",
245
+ "\n",
246
+ " prompt = f\"\"\"\n",
247
+ " context : {last_context} \n",
248
+ " query : {query}\n",
249
+ " instructions:\n",
250
+ " apply a coreference resolution on the query and replace the pronoun with no temperature, no adjectives\n",
251
+ " \"\"\"\n",
252
+ " #only if pronoun is unclear, replace query pronoun with its reference\n",
253
+ " answer = chatgpt3_question(prompt)\n",
254
+ "\n",
255
+ " #replacements\n",
256
+ " answer = answer.replace('\\n', '')\n",
257
+ " answer = answer.replace('Answer:', '')\n",
258
+ " answer = answer.replace('answer:', '')\n",
259
+ " answer = answer.replace('answer', '')\n",
260
+ " answer = answer.strip()\n",
261
+ " return answer\n",
262
+ "\n",
263
+ "gpt3_reference(\"you are a company. recommendation systems are expensive\", \"How much do you charge?\")"
264
+ ]
265
+ },
266
+ {
267
+ "cell_type": "code",
268
+ "execution_count": 104,
269
+ "metadata": {},
270
+ "outputs": [
271
+ {
272
+ "data": {
273
+ "text/html": [
274
+ "<div>\n",
275
+ "<style scoped>\n",
276
+ " .dataframe tbody tr th:only-of-type {\n",
277
+ " vertical-align: middle;\n",
278
+ " }\n",
279
+ "\n",
280
+ " .dataframe tbody tr th {\n",
281
+ " vertical-align: top;\n",
282
+ " }\n",
283
+ "\n",
284
+ " .dataframe thead th {\n",
285
+ " text-align: right;\n",
286
+ " }\n",
287
+ "</style>\n",
288
+ "<table border=\"1\" class=\"dataframe\">\n",
289
+ " <thead>\n",
290
+ " <tr style=\"text-align: right;\">\n",
291
+ " <th></th>\n",
292
+ " <th>question</th>\n",
293
+ " <th>answer</th>\n",
294
+ " <th>text_vector_</th>\n",
295
+ " </tr>\n",
296
+ " </thead>\n",
297
+ " <tbody>\n",
298
+ " <tr>\n",
299
+ " <th>0</th>\n",
300
+ " <td>how much does it cost?</td>\n",
301
+ " <td>The price depends by its intended usage and co...</td>\n",
302
+ " <td>[0.02400375, -0.022719184, 0.010704716, -0.012...</td>\n",
303
+ " </tr>\n",
304
+ " <tr>\n",
305
+ " <th>1</th>\n",
306
+ " <td>do you use GPT3 API?</td>\n",
307
+ " <td>yes, we can</td>\n",
308
+ " <td>[0.008728346, -0.0068335673, -0.008393759, -0....</td>\n",
309
+ " </tr>\n",
310
+ " <tr>\n",
311
+ " <th>2</th>\n",
312
+ " <td>do you use GPT3?</td>\n",
313
+ " <td>yes, we can</td>\n",
314
+ " <td>[0.00771756, -0.0016906569, -0.0074193655, -0....</td>\n",
315
+ " </tr>\n",
316
+ " <tr>\n",
317
+ " <th>3</th>\n",
318
+ " <td>do you use GPT4?</td>\n",
319
+ " <td>yes, we can</td>\n",
320
+ " <td>[0.007858252, -0.0008719981, -0.010744374, -0....</td>\n",
321
+ " </tr>\n",
322
+ " <tr>\n",
323
+ " <th>4</th>\n",
324
+ " <td>what do you do?</td>\n",
325
+ " <td>Our company builds AI recommendaion systems</td>\n",
326
+ " <td>[-0.0043235356, -0.018366648, -0.006951173, -0...</td>\n",
327
+ " </tr>\n",
328
+ " <tr>\n",
329
+ " <th>5</th>\n",
330
+ " <td>what does goliath do?</td>\n",
331
+ " <td>Our company builds AI recommendaion systems</td>\n",
332
+ " <td>[-0.027456148, -0.017353108, -0.007992724, 0.0...</td>\n",
333
+ " </tr>\n",
334
+ " <tr>\n",
335
+ " <th>6</th>\n",
336
+ " <td>what does your company do?</td>\n",
337
+ " <td>Our company builds AI recommendaion systems</td>\n",
338
+ " <td>[0.0022247417, -0.012323239, -0.004670404, -0....</td>\n",
339
+ " </tr>\n",
340
+ " </tbody>\n",
341
+ "</table>\n",
342
+ "</div>"
343
+ ],
344
+ "text/plain": [
345
+ " question \\\n",
346
+ "0 how much does it cost? \n",
347
+ "1 do you use GPT3 API? \n",
348
+ "2 do you use GPT3? \n",
349
+ "3 do you use GPT4? \n",
350
+ "4 what do you do? \n",
351
+ "5 what does goliath do? \n",
352
+ "6 what does your company do? \n",
353
+ "\n",
354
+ " answer \\\n",
355
+ "0 The price depends by its intended usage and co... \n",
356
+ "1 yes, we can \n",
357
+ "2 yes, we can \n",
358
+ "3 yes, we can \n",
359
+ "4 Our company builds AI recommendaion systems \n",
360
+ "5 Our company builds AI recommendaion systems \n",
361
+ "6 Our company builds AI recommendaion systems \n",
362
+ "\n",
363
+ " text_vector_ \n",
364
+ "0 [0.02400375, -0.022719184, 0.010704716, -0.012... \n",
365
+ "1 [0.008728346, -0.0068335673, -0.008393759, -0.... \n",
366
+ "2 [0.00771756, -0.0016906569, -0.0074193655, -0.... \n",
367
+ "3 [0.007858252, -0.0008719981, -0.010744374, -0.... \n",
368
+ "4 [-0.0043235356, -0.018366648, -0.006951173, -0... \n",
369
+ "5 [-0.027456148, -0.017353108, -0.007992724, 0.0... \n",
370
+ "6 [0.0022247417, -0.012323239, -0.004670404, -0.... "
371
+ ]
372
+ },
373
+ "execution_count": 104,
374
+ "metadata": {},
375
+ "output_type": "execute_result"
376
+ }
377
+ ],
378
+ "source": [
379
+ "df_qa_"
380
+ ]
381
+ },
382
+ {
383
+ "cell_type": "code",
384
+ "execution_count": 107,
385
+ "metadata": {},
386
+ "outputs": [
387
+ {
388
+ "data": {
389
+ "text/html": [
390
+ "<div>\n",
391
+ "<style scoped>\n",
392
+ " .dataframe tbody tr th:only-of-type {\n",
393
+ " vertical-align: middle;\n",
394
+ " }\n",
395
+ "\n",
396
+ " .dataframe tbody tr th {\n",
397
+ " vertical-align: top;\n",
398
+ " }\n",
399
+ "\n",
400
+ " .dataframe thead th {\n",
401
+ " text-align: right;\n",
402
+ " }\n",
403
+ "</style>\n",
404
+ "<table border=\"1\" class=\"dataframe\">\n",
405
+ " <thead>\n",
406
+ " <tr style=\"text-align: right;\">\n",
407
+ " <th></th>\n",
408
+ " <th>question</th>\n",
409
+ " <th>answer</th>\n",
410
+ " <th>text_vector_</th>\n",
411
+ " <th>score</th>\n",
412
+ " </tr>\n",
413
+ " </thead>\n",
414
+ " <tbody>\n",
415
+ " <tr>\n",
416
+ " <th>5</th>\n",
417
+ " <td>what does goliath do?</td>\n",
418
+ " <td>Our company builds AI recommendaion systems</td>\n",
419
+ " <td>[-0.027456148, -0.017353108, -0.007992724, 0.0...</td>\n",
420
+ " <td>0.902588</td>\n",
421
+ " </tr>\n",
422
+ " </tbody>\n",
423
+ "</table>\n",
424
+ "</div>"
425
+ ],
426
+ "text/plain": [
427
+ " question answer \\\n",
428
+ "5 what does goliath do? Our company builds AI recommendaion systems \n",
429
+ "\n",
430
+ " text_vector_ score \n",
431
+ "5 [-0.027456148, -0.017353108, -0.007992724, 0.0... 0.902588 "
432
+ ]
433
+ },
434
+ "metadata": {},
435
+ "output_type": "display_data"
436
  },
437
  {
438
  "data": {
439
  "text/plain": [
440
+ "'Our company builds AI recommendaion systems'"
441
  ]
442
  },
443
+ "execution_count": 107,
444
  "metadata": {},
445
  "output_type": "execute_result"
446
  }
447
  ],
448
  "source": [
449
+ "bot_answer = qa(\n",
450
+ " df_, \n",
451
+ " df_qa_, \n",
452
+ " min_qa_score=0.88, \n",
453
+ " min_context_score=.75, \n",
454
+ " verbose=True, \n",
455
+ " query='how much does goliath charge?'\n",
456
+ " )\n",
457
+ "bot_answer"
458
  ]
459
  },
460
  {
461
  "cell_type": "code",
462
+ "execution_count": 108,
463
  "metadata": {},
464
  "outputs": [
465
+ {
466
+ "name": "stdout",
467
+ "output_type": "stream",
468
+ "text": [
469
+ "Running on local URL: http://127.0.0.1:7876\n",
470
+ "\n",
471
+ "To create a public link, set `share=True` in `launch()`.\n"
472
+ ]
473
+ },
474
  {
475
  "data": {
476
+ "text/html": [
477
+ "<div><iframe src=\"http://127.0.0.1:7876/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
478
+ ],
479
  "text/plain": [
480
+ "<IPython.core.display.HTML object>"
481
  ]
482
  },
483
+ "metadata": {},
484
+ "output_type": "display_data"
485
+ },
486
+ {
487
+ "data": {
488
+ "text/plain": []
489
+ },
490
+ "execution_count": 108,
491
  "metadata": {},
492
  "output_type": "execute_result"
493
+ },
494
+ {
495
+ "name": "stdout",
496
+ "output_type": "stream",
497
+ "text": [
498
+ "sending request\n",
499
+ "<Response [200]>\n",
500
+ "@@@ How much does Goliath charge?\n"
501
+ ]
502
  }
503
  ],
504
  "source": [
505
+ "import subprocess\n",
506
+ "import random\n",
507
+ "import gradio as gr\n",
508
+ "import requests\n",
509
+ "\n",
510
+ "history = None\n",
511
+ "\n",
512
+ "def predict(input, history, last_context):\n",
513
+ " last_context += 'you are a company'\n",
514
+ "\n",
515
+ " #WE CAN PLAY WITH user_input AND bot_answer, as well as history\n",
516
+ " user_input = input\n",
517
+ "\n",
518
+ " query = gpt3_reference(last_context, user_input)\n",
519
+ " print('@@@', query)\n",
520
+ " bot_answer = qa(\n",
521
+ " df_, \n",
522
+ " df_qa_, \n",
523
+ " min_qa_score=0.92, \n",
524
+ " min_context_score=.75, \n",
525
+ " verbose=False, \n",
526
+ " query=input\n",
527
+ " )\n",
528
  "\n",
529
+ " response = list()\n",
530
+ " response = [(input, bot_answer)]\n",
531
+ " \n",
532
+ " history.append(response[0])\n",
533
+ " response = history\n",
534
+ "\n",
535
+ " last_context = input\n",
536
+ "\n",
537
+ " # print('#history', history)\n",
538
+ " # print('#response', response)\n",
539
+ "\n",
540
+ " return response, history, last_context\n",
541
+ "\n",
542
+ "demo = gr.Blocks()\n",
543
+ "with demo:\n",
544
+ " gr.Markdown(\n",
545
+ " \"\"\"\n",
546
+ " Chatbot\n",
547
+ " \"\"\"\n",
548
+ " )\n",
549
+ " state = gr.Variable(value=[]) #beginning\n",
550
+ " last_context = gr.Variable(value='') #beginning\n",
551
+ " chatbot = gr.Chatbot() #color_map=(\"#00ff7f\", \"#00d5ff\")\n",
552
+ " text = gr.Textbox(\n",
553
+ " label=\"Question\",\n",
554
+ " value=\"What is a recommendation system?\",\n",
555
+ " placeholder=\"\",\n",
556
+ " max_lines=1,\n",
557
+ " )\n",
558
+ " text.submit(predict, [text, state, last_context], [chatbot, state, last_context])\n",
559
+ " text.submit(lambda x: \"\", text, text)\n",
560
+ " # btn = gr.Button(value=\"submit\")\n",
561
+ " # btn.click(chatbot_foo, None, [chatbot, state])\n",
562
+ "\n",
563
+ "demo.launch(share=False)"
564
  ]
565
+ },
566
+ {
567
+ "cell_type": "code",
568
+ "execution_count": null,
569
+ "metadata": {},
570
+ "outputs": [],
571
+ "source": []
572
  }
573
  ],
574
  "metadata": {
app.py CHANGED
@@ -1,120 +1,113 @@
1
  import requests
2
- import os
3
- os.system('pip install openpyxl')
4
- os.system('pip install sentence-transformers')
5
 
6
- def gpt3_question(prompt):
7
- api_key = "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"
8
- api_endpoint = "https://api.openai.com/v1/engines/text-davinci-003/completions"
9
- headers = {
10
- "Content-Type": "application/json",
11
- "Authorization": f"Bearer {api_key}"
12
- }
13
- data = {
14
- "prompt": prompt,
15
- "max_tokens": 400,
16
- "temperature": 0.5
17
- }
18
- print('sending request')
19
- response = requests.post(api_endpoint, headers=headers, json=data)
20
- print(response)
21
- generated_text = response.json()["choices"][0]["text"]
22
-
23
- return generated_text
24
 
25
- def chatgpt3_question(context, question):
26
- api_key = "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"
27
- url = "https://api.openai.com/v1/chat/completions"
 
 
28
 
29
- prompt = f"""
30
- based on this context: {context}
31
- answer this use question: {question}
32
- """
33
-
34
- headers = {
35
- "Content-Type": "application/json",
36
- "Authorization": f"Bearer {api_key}"
37
- }
38
-
39
- data = {
40
- "model": "gpt-3.5-turbo",
41
- "messages": [{"role": "user", "content": prompt}]
42
- }
43
-
44
- response = requests.post(url, headers=headers, json=data)
45
- generated_text = response.json()['choices'][0]['message']['content']
46
-
47
- return generated_text
48
-
49
- import os
50
- import requests
51
- import pandas as pd
52
-
53
- def split_paragraph(text, keyword):
54
- list1 = [x.strip() for x in text.split('.')]
55
- list2 = []
56
 
57
- for sentence in list1:
58
- # Check if the sentence contains the phrase "chamber of commerce"
59
- if keyword in sentence.lower():
60
- list2.append(1)
61
- else:
62
- list2.append(0)
63
-
64
- #in case first sentence has no keyword, we add it
65
- if list2[0] == 0:
66
- list1[0] = f'the {keyword}: ' + list1[0]
67
- list2[0] = 1
68
-
69
- # print(list1)
70
- # print(list2)
71
-
72
- list3 = list()
73
- current_string = ''
74
- # Loop through each element of list1 and list2
75
- for i in range(len(list1)):
76
- # If the corresponding element in list2 is 1, add the current string to list3 and reset the current string
77
-
78
- if list2[i] == 1:
79
- list3.append(current_string)
80
- current_string = "" #reset
81
- current_string += list1[i]
82
-
83
- # Otherwise, concatenate the current string with the current element of list1
84
- if list2[i] == 0:
85
- current_string += '. '+list1[i]
86
-
87
- # Add the final concatenated string to list3
88
- list3.append(current_string)
89
-
90
- return [x.strip() for x in list3[1:]]
91
-
92
- def context_dict2context_list(context_dict):
93
- list1 = list()
94
- for all_keys in context_dict:
95
- key = all_keys.split(';')[0]
96
- try:
97
- synonyms = all_keys.split(';')[1:]
98
- except:
99
- pass
100
- # print(key)
101
- str1 = context_dict[all_keys]
102
 
103
- split_list = [x.replace('\n', '').strip() for x in str1.split('\n\n')]
104
- split_list
105
-
106
- for sentence in split_list:
107
- for s in split_paragraph(sentence, key):
108
- #add synonyms
109
- for synonym in synonyms:
110
- #manual replacement causes a wrong grammar
111
- #gpt3 replacement
112
- print(s, key, synonym)
113
- prompt = f'in the following sentence: {s}. Replace {key} with {synonym} correcting the grammar'
114
- answer = gpt3_question(prompt).replace('\n', '')
115
- list1.append(answer)
116
- list1.append(s)
117
- return list1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
 
119
  def list2vec(list1):
120
  headers = {
@@ -135,135 +128,94 @@ def list2vec(list1):
135
  dict1[json_data['input'][index]] = response.json()['data'][index]['embedding']
136
  return dict1
137
 
138
- def text2vec(query):
139
- headers = {
140
- 'Content-Type': 'application/json',
141
- 'Authorization': 'Bearer ' + "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",
142
- }
143
-
144
- json_data = {
145
- 'input': query,
146
- 'model': 'text-embedding-ada-002',
147
- }
148
 
149
- response = requests.post('https://api.openai.com/v1/embeddings', headers=headers, json=json_data)
150
- query = response.json()['data'][0]['embedding'] #len=1536 #pricing=0.0004
151
- return query
 
 
152
 
153
- context_dict = {
154
- "company; goliath; we" :
155
- """
156
- Our company builds AI Recommendation Systems for Matching Platforms using the latest technology. Our company is estabilished and operates in Japan. Our company uses the AWS Cloud to manage Servers. Our company can use GPT3 as well. Our company also builds GPT3-based chatbots. Our company can use open-source models, if requested. Our company uses open source models. Our company operates in Japan. Our company has been operating for 1 year, and we are expanding in Hong Kong. Our company offers other services apart from recommendation systems, like GPT3 chatbots. Our company can also build recommendation systems for mobile apps. You can contact our company at ma@goliath.jp .
157
- """
158
- ,
159
- "price" :
160
- """
161
- The price of a recommendation system depends on the amount of complexity that is required to build, as well as the volume of customers. Reach us to get a quotation. The price of a chatbot depends by its intended usage and complexity, contact us for a quotation.
162
- """
163
- ,
164
- "recommendation system" :
165
- """
166
- If your company wants to recommend products to customers, we can build a recommendation system for you. GPT3 can be used to build recommendation systems by using embeddings, mapping choices in a mathematical space. Once the recommendation system has been built, we will manage it in the future as well. Recommendation system could also be built for startups, though they will be in smaller size. We use AWS OpenSearch to host recommendation system.
167
- """
168
- ,
169
- "a matching platform" :
170
- """
171
- A matching platform is a business with thousands of users, who could be customers, individuals or companies, who are interacting with one another. For example dating apps, ecommerce platforms, or job recruiting platforms.
172
  """
173
- }
 
174
 
175
- import pandas as pd
176
- from sentence_transformers import SentenceTransformer, util
 
 
 
 
 
177
 
178
- #prepare context
179
- context_list = context_dict2context_list(context_dict)
180
-
181
- #adding invidivual sentences
182
- context_list += [
183
- 'We can also use GPT3, if requested',
184
- 'Our email is ma@goliath.jp',
185
- 'You can contact us at ma@goliath.jp'
186
- ]
187
-
188
- #create df
189
- df = pd.DataFrame([context_list, list2vec(context_list)]).T
190
- df.columns = ['description', 'text_vector_']
191
- df['description'] = df['description'].apply(lambda x : x.strip())
192
- df
193
-
194
- qa_list = {
195
- 'how long does it take to build a recommendation system?' : 'Usually, from a few weeks to one month',
196
- 'how long does it take to build one' : 'Usually, from a few weeks to one month',
197
- 'how many people are working for goliath?' : '5 people',
198
- 'how many people are working for you?' : '5 people',
199
- 'how much does it cost?' : 'The price depends by its intended usage and complexity, contact us for a quotation.',
200
- 'do you use GPT3 API?' : 'yes, we can',
201
- 'do you use GPT3?' : 'yes, we can',
202
- 'do you use GPT4?' : 'yes, we can',
203
- 'so you build chatbots' : 'yes, we built state-of-the art chatbots with GPT3 technology',
204
- 'what do you do?' : 'our company builds AI recommendaion systems'
205
- }
206
- df_qa = pd.DataFrame([qa_list]).T.reset_index()
207
- df_qa.columns = ['question', 'answer']
208
- df_qa['text_vector_'] = list2vec(df_qa['question'].values.tolist())
209
- df_qa
210
 
211
  df_qa_ = df_qa.copy()
212
  df_ = df.copy()
213
 
214
  def qa(df_, df_qa_, min_qa_score, min_context_score, verbose, query):
215
- query_vec = text2vec(query)
 
216
 
217
  #first check if there is already a question in df_qa
218
  df_qa_['score'] = df_qa_['text_vector_'].apply(lambda x : float(util.cos_sim(x, query_vec)))
219
  df_qa_ = df_qa_.sort_values('score', ascending=False)
 
 
220
  df_qa_ = df_qa_[df_qa_['score']>=min_qa_score]
221
  #if we find at least one possible preset answer
222
  if len(df_qa_) > 0:
223
- if verbose : display(df_qa_)
224
  answer = df_qa_[0:1]['answer'].values.tolist()[0]
225
  return answer
226
 
227
  #then check if we can use the context to answer a question
228
  df_['score'] = df_['text_vector_'].apply(lambda x : float(util.cos_sim(x, query_vec)))
229
  df_ = df_.sort_values('score', ascending=False)
 
230
  df_ = df_[df_['score']>=min_context_score]
231
  #if we find at least one possible preset answer
232
  if len(df_) > 0:
233
- if verbose : display(df_)
234
  #in case we might decide to merge multiple context
235
  context = ' '.join(df_['description'][0:1].values.tolist())
236
- answer = chatgpt3_question(context, query)
 
 
 
 
 
237
  return answer
238
  else:
239
  return 'impossible to give an answer'
240
 
241
- # print(
242
- # qa(
243
- # df_,
244
- # df_qa_,
245
- # min_qa_score=0.92,
246
- # min_context_score=.75,
247
- # verbose=False,
248
- # query='What is a recommender system?'
249
- # )
250
- # )
251
-
252
  import subprocess
253
  import random
254
  import gradio as gr
255
  import requests
256
 
257
  history = None
258
- history_prompt = None
259
 
260
- def predict(input, history):
 
 
261
  #WE CAN PLAY WITH user_input AND bot_answer, as well as history
262
  user_input = input
263
 
264
- global history_prompt
265
- global block_predict
266
-
267
  bot_answer = qa(
268
  df_,
269
  df_qa_,
@@ -279,10 +231,12 @@ def predict(input, history):
279
  history.append(response[0])
280
  response = history
281
 
 
 
282
  # print('#history', history)
283
  # print('#response', response)
284
 
285
- return response, history
286
 
287
  demo = gr.Blocks()
288
  with demo:
@@ -292,6 +246,7 @@ with demo:
292
  """
293
  )
294
  state = gr.Variable(value=[]) #beginning
 
295
  chatbot = gr.Chatbot() #color_map=("#00ff7f", "#00d5ff")
296
  text = gr.Textbox(
297
  label="Question",
@@ -299,7 +254,7 @@ with demo:
299
  placeholder="",
300
  max_lines=1,
301
  )
302
- text.submit(predict, [text, state], [chatbot, state])
303
  text.submit(lambda x: "", text, text)
304
  # btn = gr.Button(value="submit")
305
  # btn.click(chatbot_foo, None, [chatbot, state])
 
1
  import requests
 
 
 
2
 
3
+ #openai
4
+ openai_api_key = "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"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ #azure
7
+ azure_api_key = "c6d9cc1f487640cc92800d8d177f5f59"
8
+ azure_api_base = "https://openai-619.openai.azure.com/" # your endpoint should look like the following https://YOUR_RESOURCE_NAME.openai.azure.com/
9
+ azure_api_type = 'azure'
10
+ azure_api_version = '2022-12-01' # this may change in the future
11
 
12
+ def gpt3(prompt, model, service, max_tokens=400):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
+ if service == 'openai':
15
+ if model == 'gpt-3.5-turbo':
16
+ api_endpoint = "https://api.openai.com/v1/chat/completions"
17
+ data = {
18
+ "model": "gpt-3.5-turbo",
19
+ "messages": [{"role": "user", "content": prompt}]
20
+ }
21
+ headers = {
22
+ "Content-Type": "application/json",
23
+ "Authorization": f"Bearer {openai_api_key}"
24
+ }
25
+ response = requests.post(api_endpoint, headers=headers, json=data)
26
+ return response.json()['choices'][0]['message']['content']
27
+
28
+ elif model == 'gpt-3':
29
+ api_endpoint = "https://api.openai.com/v1/engines/text-davinci-003/completions"
30
+ data = {
31
+ "prompt": prompt,
32
+ "max_tokens": max_tokens,
33
+ "temperature": 0.5
34
+ }
35
+ headers = {
36
+ "Content-Type": "application/json",
37
+ "Authorization": f"Bearer {openai_api_key}"
38
+ }
39
+ response = requests.post(api_endpoint, headers=headers, json=data)
40
+ return response.json()["choices"][0]["text"]
41
+
42
+ elif service == 'azure':
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
+ if model == 'gpt-3':
45
+ azure_deployment_name='gpt3'
46
+
47
+ api_endpoint = f"""{azure_api_base}openai/deployments/{azure_deployment_name}/completions?api-version={azure_api_version}"""
48
+
49
+ headers = {
50
+ "Content-Type": "application/json",
51
+ "api-key": azure_api_key
52
+ }
53
+
54
+ data = {
55
+ "prompt": prompt,
56
+ "max_tokens": max_tokens
57
+ }
58
+ response = requests.post(api_endpoint, headers=headers, json=data)
59
+
60
+ generated_text = response.json()["choices"][0]["text"]
61
+ return generated_text
62
+
63
+ elif model == 'gpt-3.5-turbo':
64
+ azure_deployment_name='gpt-35-turbo' #cannot be creative with the name
65
+ headers = {
66
+ "Content-Type": "application/json",
67
+ "api-key": azure_api_key
68
+ }
69
+ json_data = {
70
+ 'messages': [
71
+ {
72
+ 'role': 'user',
73
+ 'content': prompt,
74
+ },
75
+ ],
76
+ }
77
+ api_endpoint = f"""{azure_api_base}openai/deployments/{azure_deployment_name}/chat/completions?api-version=2023-03-15-preview"""
78
+ response = requests.post(api_endpoint, headers=headers, json=json_data)
79
+ return response.json()['choices'][0]['message']['content']
80
+
81
+ #azure is much more sensible to max_tokens
82
+ # gpt3('how are you?', model='gpt-3.5-turbo', service='azure')
83
+
84
+ def text2vec(input, service):
85
+ if service == 'openai':
86
+ api_endpoint = 'https://api.openai.com/v1/embeddings'
87
+ headers = {
88
+ 'Content-Type': 'application/json',
89
+ 'Authorization': 'Bearer ' + "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",
90
+ }
91
+ json_data = {
92
+ 'input': input,
93
+ 'model': 'text-embedding-ada-002',
94
+ }
95
+ # response = requests.post(api_endpoint, headers=headers, json=json_data)
96
+
97
+ elif service == 'azure':
98
+ azure_deployment_name = 'gpt3_embedding'
99
+ api_endpoint = f"""{azure_api_base}openai/deployments/{azure_deployment_name}/embeddings?api-version={azure_api_version}"""
100
+ headers = {
101
+ "Content-Type": "application/json",
102
+ "api-key": azure_api_key
103
+ }
104
+ json_data = {
105
+ "input": input
106
+ }
107
+
108
+ response = requests.post(api_endpoint, headers=headers, json=json_data)
109
+ vec = response.json()['data'][0]['embedding'] #len=1536 #pricing=0.0004
110
+ return vec
111
 
112
  def list2vec(list1):
113
  headers = {
 
128
  dict1[json_data['input'][index]] = response.json()['data'][index]['embedding']
129
  return dict1
130
 
131
+ import requests
132
+ import os
133
+ import torch
134
+ os.system('pip install openpyxl')
135
+ os.system('pip install sentence-transformers==2.2.2')
136
+ os.system('pip install torch==1.13.0')
137
+ import pandas as pd
138
+ from sentence_transformers import SentenceTransformer, util
 
 
139
 
140
+ #reference filter
141
+ def gpt3_reference(last_context, query):
142
+ #needs to be referred to the second
143
+ # last_context = 'you are a company'
144
+ # query = """what do you do"""
145
 
146
+ prompt = f"""
147
+ context : {last_context}
148
+ query : {query}
149
+ instructions:
150
+ apply a coreference resolution on the query and replace the pronoun with no temperature, no adjectives
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
  """
152
+ #only if pronoun is unclear, replace query pronoun with its reference
153
+ answer = gpt3(prompt, model='gpt-3.5-turbo', service='azure')
154
 
155
+ #replacements
156
+ answer = answer.replace('\n', '')
157
+ answer = answer.replace('Answer:', '')
158
+ answer = answer.replace('answer:', '')
159
+ answer = answer.replace('answer', '')
160
+ answer = answer.strip()
161
+ return answer
162
 
163
+ # gpt3_reference("you are a company. recommendation systems are expensive", "How much do you charge?")
164
+
165
+ df = pd.read_parquet('df.parquet')
166
+ df_qa = pd.read_parquet('df_qa.parquet')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
 
168
  df_qa_ = df_qa.copy()
169
  df_ = df.copy()
170
 
171
  def qa(df_, df_qa_, min_qa_score, min_context_score, verbose, query):
172
+ query_vec = text2vec(query, 'azure')
173
+ query_vec = torch.DoubleTensor(query_vec)
174
 
175
  #first check if there is already a question in df_qa
176
  df_qa_['score'] = df_qa_['text_vector_'].apply(lambda x : float(util.cos_sim(x, query_vec)))
177
  df_qa_ = df_qa_.sort_values('score', ascending=False)
178
+
179
+ if verbose : display(df_qa_[0:5])
180
  df_qa_ = df_qa_[df_qa_['score']>=min_qa_score]
181
  #if we find at least one possible preset answer
182
  if len(df_qa_) > 0:
 
183
  answer = df_qa_[0:1]['answer'].values.tolist()[0]
184
  return answer
185
 
186
  #then check if we can use the context to answer a question
187
  df_['score'] = df_['text_vector_'].apply(lambda x : float(util.cos_sim(x, query_vec)))
188
  df_ = df_.sort_values('score', ascending=False)
189
+ if verbose : display(df_[0:5])
190
  df_ = df_[df_['score']>=min_context_score]
191
  #if we find at least one possible preset answer
192
  if len(df_) > 0:
 
193
  #in case we might decide to merge multiple context
194
  context = ' '.join(df_['description'][0:1].values.tolist())
195
+ prompt = f"""
196
+ context: {context}
197
+ query: {query}
198
+ Answer the query using context. Do not justify the answer.
199
+ """
200
+ answer = gpt3(prompt, model='gpt-3.5-turbo', service='azure')
201
  return answer
202
  else:
203
  return 'impossible to give an answer'
204
 
 
 
 
 
 
 
 
 
 
 
 
205
  import subprocess
206
  import random
207
  import gradio as gr
208
  import requests
209
 
210
  history = None
 
211
 
212
+ def predict(input, history, last_context):
213
+ last_context += 'you are a company'
214
+
215
  #WE CAN PLAY WITH user_input AND bot_answer, as well as history
216
  user_input = input
217
 
218
+ query = gpt3_reference(last_context, user_input)
 
 
219
  bot_answer = qa(
220
  df_,
221
  df_qa_,
 
231
  history.append(response[0])
232
  response = history
233
 
234
+ last_context = input
235
+
236
  # print('#history', history)
237
  # print('#response', response)
238
 
239
+ return response, history, last_context
240
 
241
  demo = gr.Blocks()
242
  with demo:
 
246
  """
247
  )
248
  state = gr.Variable(value=[]) #beginning
249
+ last_context = gr.Variable(value='') #beginning
250
  chatbot = gr.Chatbot() #color_map=("#00ff7f", "#00d5ff")
251
  text = gr.Textbox(
252
  label="Question",
 
254
  placeholder="",
255
  max_lines=1,
256
  )
257
+ text.submit(predict, [text, state, last_context], [chatbot, state, last_context])
258
  text.submit(lambda x: "", text, text)
259
  # btn = gr.Button(value="submit")
260
  # btn.click(chatbot_foo, None, [chatbot, state])
app_old.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import os
3
+ os.system('pip install openpyxl')
4
+ os.system('pip install sentence-transformers')
5
+
6
+ def chatgpt3_question(context, question):
7
+ api_key = "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"
8
+ url = "https://api.openai.com/v1/chat/completions"
9
+
10
+ prompt = f"""
11
+ based on this context: {context}
12
+ answer this use question: {question}
13
+ """
14
+
15
+ headers = {
16
+ "Content-Type": "application/json",
17
+ "Authorization": f"Bearer {api_key}"
18
+ }
19
+
20
+ data = {
21
+ "model": "gpt-3.5-turbo",
22
+ "messages": [{"role": "user", "content": prompt}]
23
+ }
24
+
25
+ response = requests.post(url, headers=headers, json=data)
26
+ generated_text = response.json()['choices'][0]['message']['content']
27
+
28
+ return generated_text
29
+
30
+ import os
31
+ import requests
32
+ import pandas as pd
33
+
34
+ def text2vec(query):
35
+ headers = {
36
+ 'Content-Type': 'application/json',
37
+ 'Authorization': 'Bearer ' + "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",
38
+ }
39
+
40
+ json_data = {
41
+ 'input': query,
42
+ 'model': 'text-embedding-ada-002',
43
+ }
44
+
45
+ response = requests.post('https://api.openai.com/v1/embeddings', headers=headers, json=json_data)
46
+ query = response.json()['data'][0]['embedding'] #len=1536 #pricing=0.0004
47
+ return query
48
+
49
+ import pandas as pd
50
+ from sentence_transformers import SentenceTransformer, util
51
+
52
+ df = pd.read_parquet('df.parquet')
53
+ df_qa = pd.read_parquet('df_qa.parquet')
54
+
55
+ df_qa_ = df_qa.copy()
56
+ df_ = df.copy()
57
+
58
+ def qa(df_, df_qa_, min_qa_score, min_context_score, verbose, query):
59
+ query_vec = text2vec(query)
60
+
61
+ #first check if there is already a question in df_qa
62
+ df_qa_['score'] = df_qa_['text_vector_'].apply(lambda x : float(util.cos_sim(x, query_vec)))
63
+ df_qa_ = df_qa_.sort_values('score', ascending=False)
64
+ df_qa_ = df_qa_[df_qa_['score']>=min_qa_score]
65
+ #if we find at least one possible preset answer
66
+ if len(df_qa_) > 0:
67
+ if verbose : display(df_qa_)
68
+ answer = df_qa_[0:1]['answer'].values.tolist()[0]
69
+ return answer
70
+
71
+ #then check if we can use the context to answer a question
72
+ df_['score'] = df_['text_vector_'].apply(lambda x : float(util.cos_sim(x, query_vec)))
73
+ df_ = df_.sort_values('score', ascending=False)
74
+ df_ = df_[df_['score']>=min_context_score]
75
+ #if we find at least one possible preset answer
76
+ if len(df_) > 0:
77
+ if verbose : display(df_)
78
+ #in case we might decide to merge multiple context
79
+ context = ' '.join(df_['description'][0:1].values.tolist())
80
+ answer = chatgpt3_question(context, query)
81
+ return answer
82
+ else:
83
+ return 'impossible to give an answer'
84
+
85
+ # print(
86
+ # qa(
87
+ # df_,
88
+ # df_qa_,
89
+ # min_qa_score=0.92,
90
+ # min_context_score=.75,
91
+ # verbose=False,
92
+ # query='What is a recommender system?'
93
+ # )
94
+ # )
95
+
96
+ import subprocess
97
+ import random
98
+ import gradio as gr
99
+ import requests
100
+
101
+ history = None
102
+ history_prompt = None
103
+
104
+ def predict(input, history):
105
+ #WE CAN PLAY WITH user_input AND bot_answer, as well as history
106
+ user_input = input
107
+
108
+ global history_prompt
109
+ global block_predict
110
+
111
+ bot_answer = qa(
112
+ df_,
113
+ df_qa_,
114
+ min_qa_score=0.92,
115
+ min_context_score=.75,
116
+ verbose=False,
117
+ query=input
118
+ )
119
+
120
+ response = list()
121
+ response = [(input, bot_answer)]
122
+
123
+ history.append(response[0])
124
+ response = history
125
+
126
+ # print('#history', history)
127
+ # print('#response', response)
128
+
129
+ return response, history
130
+
131
+ demo = gr.Blocks()
132
+ with demo:
133
+ gr.Markdown(
134
+ """
135
+ Chatbot
136
+ """
137
+ )
138
+ state = gr.Variable(value=[]) #beginning
139
+ chatbot = gr.Chatbot() #color_map=("#00ff7f", "#00d5ff")
140
+ text = gr.Textbox(
141
+ label="Question",
142
+ value="What is a recommendation system?",
143
+ placeholder="",
144
+ max_lines=1,
145
+ )
146
+ text.submit(predict, [text, state], [chatbot, state])
147
+ text.submit(lambda x: "", text, text)
148
+ # btn = gr.Button(value="submit")
149
+ # btn.click(chatbot_foo, None, [chatbot, state])
150
+
151
+ demo.launch(share=False)
create_vectors.ipynb ADDED
@@ -0,0 +1,951 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "attachments": {},
5
+ "cell_type": "markdown",
6
+ "metadata": {},
7
+ "source": [
8
+ "### openai REST API"
9
+ ]
10
+ },
11
+ {
12
+ "cell_type": "code",
13
+ "execution_count": 264,
14
+ "metadata": {},
15
+ "outputs": [],
16
+ "source": [
17
+ "import requests\n",
18
+ "\n",
19
+ "#openai\n",
20
+ "openai_api_key = \"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\"\n",
21
+ "\n",
22
+ "#azure\n",
23
+ "azure_api_key = \"c6d9cc1f487640cc92800d8d177f5f59\"\n",
24
+ "azure_api_base = \"https://openai-619.openai.azure.com/\" # your endpoint should look like the following https://YOUR_RESOURCE_NAME.openai.azure.com/\n",
25
+ "azure_api_type = 'azure'\n",
26
+ "azure_api_version = '2022-12-01' # this may change in the future"
27
+ ]
28
+ },
29
+ {
30
+ "cell_type": "code",
31
+ "execution_count": 265,
32
+ "metadata": {},
33
+ "outputs": [
34
+ {
35
+ "data": {
36
+ "text/plain": [
37
+ "\"\\n\\nAs an AI language model, I don't have feelings like humans, but I'm functioning optimally. How may I help you?\""
38
+ ]
39
+ },
40
+ "execution_count": 265,
41
+ "metadata": {},
42
+ "output_type": "execute_result"
43
+ }
44
+ ],
45
+ "source": [
46
+ "def gpt3(prompt, model, service, max_tokens=400):\n",
47
+ " \n",
48
+ " if service == 'openai':\n",
49
+ " if model == 'gpt-3.5-turbo':\n",
50
+ " api_endpoint = \"https://api.openai.com/v1/chat/completions\"\n",
51
+ " data = {\n",
52
+ " \"model\": \"gpt-3.5-turbo\",\n",
53
+ " \"messages\": [{\"role\": \"user\", \"content\": prompt}]\n",
54
+ " }\n",
55
+ " headers = {\n",
56
+ " \"Content-Type\": \"application/json\",\n",
57
+ " \"Authorization\": f\"Bearer {openai_api_key}\"\n",
58
+ " }\n",
59
+ " response = requests.post(api_endpoint, headers=headers, json=data)\n",
60
+ " return response.json()['choices'][0]['message']['content']\n",
61
+ "\n",
62
+ " elif model == 'gpt-3':\n",
63
+ " api_endpoint = \"https://api.openai.com/v1/engines/text-davinci-003/completions\"\n",
64
+ " data = {\n",
65
+ " \"prompt\": prompt,\n",
66
+ " \"max_tokens\": max_tokens,\n",
67
+ " \"temperature\": 0.5\n",
68
+ " }\n",
69
+ " headers = {\n",
70
+ " \"Content-Type\": \"application/json\",\n",
71
+ " \"Authorization\": f\"Bearer {openai_api_key}\"\n",
72
+ " }\n",
73
+ " response = requests.post(api_endpoint, headers=headers, json=data)\n",
74
+ " return response.json()[\"choices\"][0][\"text\"]\n",
75
+ " \n",
76
+ " elif service == 'azure':\n",
77
+ " \n",
78
+ " if model == 'gpt-3':\n",
79
+ " azure_deployment_name='gpt3'\n",
80
+ "\n",
81
+ " api_endpoint = f\"\"\"{azure_api_base}openai/deployments/{azure_deployment_name}/completions?api-version={azure_api_version}\"\"\"\n",
82
+ "\n",
83
+ " headers = {\n",
84
+ " \"Content-Type\": \"application/json\",\n",
85
+ " \"api-key\": azure_api_key\n",
86
+ " }\n",
87
+ "\n",
88
+ " data = {\n",
89
+ " \"prompt\": prompt,\n",
90
+ " \"max_tokens\": max_tokens\n",
91
+ " }\n",
92
+ " response = requests.post(api_endpoint, headers=headers, json=data)\n",
93
+ "\n",
94
+ " generated_text = response.json()[\"choices\"][0][\"text\"]\n",
95
+ " return generated_text\n",
96
+ "\n",
97
+ " elif model == 'gpt-3.5-turbo':\n",
98
+ " azure_deployment_name='gpt-35-turbo' #cannot be creative with the name\n",
99
+ " headers = {\n",
100
+ " \"Content-Type\": \"application/json\",\n",
101
+ " \"api-key\": azure_api_key\n",
102
+ " }\n",
103
+ " json_data = {\n",
104
+ " 'messages': [\n",
105
+ " {\n",
106
+ " 'role': 'user',\n",
107
+ " 'content': prompt,\n",
108
+ " },\n",
109
+ " ],\n",
110
+ " }\n",
111
+ " api_endpoint = f\"\"\"{azure_api_base}openai/deployments/{azure_deployment_name}/chat/completions?api-version=2023-03-15-preview\"\"\"\n",
112
+ " response = requests.post(api_endpoint, headers=headers, json=json_data)\n",
113
+ " return response.json()['choices'][0]['message']['content']\n",
114
+ "\n",
115
+ "#azure is much more sensible to max_tokens\n",
116
+ "gpt3('how are you?', model='gpt-3.5-turbo', service='azure')"
117
+ ]
118
+ },
119
+ {
120
+ "cell_type": "code",
121
+ "execution_count": 266,
122
+ "metadata": {},
123
+ "outputs": [],
124
+ "source": [
125
+ "def text2vec(input, service):\n",
126
+ " if service == 'openai':\n",
127
+ " api_endpoint = 'https://api.openai.com/v1/embeddings'\n",
128
+ " headers = {\n",
129
+ " 'Content-Type': 'application/json',\n",
130
+ " 'Authorization': 'Bearer ' + \"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\",\n",
131
+ " }\n",
132
+ " json_data = {\n",
133
+ " 'input': input,\n",
134
+ " 'model': 'text-embedding-ada-002',\n",
135
+ " }\n",
136
+ " # response = requests.post(api_endpoint, headers=headers, json=json_data)\n",
137
+ "\n",
138
+ " elif service == 'azure':\n",
139
+ " azure_deployment_name = 'gpt3_embedding'\n",
140
+ " api_endpoint = f\"\"\"{azure_api_base}openai/deployments/{azure_deployment_name}/embeddings?api-version={azure_api_version}\"\"\"\n",
141
+ " headers = {\n",
142
+ " \"Content-Type\": \"application/json\",\n",
143
+ " \"api-key\": azure_api_key\n",
144
+ " }\n",
145
+ " json_data = {\n",
146
+ " \"input\": input\n",
147
+ " }\n",
148
+ "\n",
149
+ " response = requests.post(api_endpoint, headers=headers, json=json_data)\n",
150
+ " vec = response.json()['data'][0]['embedding'] #len=1536 #pricing=0.0004\n",
151
+ " return vec\n",
152
+ "\n",
153
+ "def list2vec(list1):\n",
154
+ " headers = {\n",
155
+ " 'Content-Type': 'application/json',\n",
156
+ " 'Authorization': 'Bearer ' + \"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\",\n",
157
+ " }\n",
158
+ "\n",
159
+ " json_data = {\n",
160
+ " 'input': list1,\n",
161
+ " 'model': 'text-embedding-ada-002',\n",
162
+ " }\n",
163
+ "\n",
164
+ " response = requests.post('https://api.openai.com/v1/embeddings', headers=headers, json=json_data)\n",
165
+ " return [x['embedding'] for x in response.json()['data']]\n",
166
+ "\n",
167
+ " dict1 = dict()\n",
168
+ " for index in range(len(json_data['input'])):\n",
169
+ " dict1[json_data['input'][index]] = response.json()['data'][index]['embedding']\n",
170
+ " return dict1"
171
+ ]
172
+ },
173
+ {
174
+ "attachments": {},
175
+ "cell_type": "markdown",
176
+ "metadata": {},
177
+ "source": [
178
+ "### context generator"
179
+ ]
180
+ },
181
+ {
182
+ "cell_type": "code",
183
+ "execution_count": 343,
184
+ "metadata": {},
185
+ "outputs": [
186
+ {
187
+ "data": {
188
+ "text/plain": [
189
+ "[]"
190
+ ]
191
+ },
192
+ "execution_count": 343,
193
+ "metadata": {},
194
+ "output_type": "execute_result"
195
+ }
196
+ ],
197
+ "source": [
198
+ "import requests\n",
199
+ "import pandas as pd\n",
200
+ "\n",
201
+ "def split_paragraph(text, keyword):\n",
202
+ " list1 = [x.strip() for x in text.split('.')]\n",
203
+ " list2 = []\n",
204
+ " \n",
205
+ " for sentence in list1:\n",
206
+ " # Check if the sentence contains the phrase \"chamber of commerce\"\n",
207
+ " if keyword in sentence.lower():\n",
208
+ " list2.append(1)\n",
209
+ " else:\n",
210
+ " list2.append(0)\n",
211
+ "\n",
212
+ " #in case first sentence has no keyword, we add it\n",
213
+ " if list2[0] == 0:\n",
214
+ " list1[0] = f'the {keyword}: ' + list1[0]\n",
215
+ " list2[0] = 1\n",
216
+ "\n",
217
+ " # print(list1)\n",
218
+ " # print(list2)\n",
219
+ "\n",
220
+ " list3 = list()\n",
221
+ " current_string = ''\n",
222
+ " # Loop through each element of list1 and list2\n",
223
+ " for i in range(len(list1)):\n",
224
+ " # If the corresponding element in list2 is 1, add the current string to list3 and reset the current string\n",
225
+ "\n",
226
+ " if list2[i] == 1:\n",
227
+ " list3.append(current_string)\n",
228
+ " current_string = \"\" #reset\n",
229
+ " current_string += list1[i]\n",
230
+ "\n",
231
+ " # Otherwise, concatenate the current string with the current element of list1\n",
232
+ " if list2[i] == 0:\n",
233
+ " current_string += '. '+list1[i]\n",
234
+ "\n",
235
+ " # Add the final concatenated string to list3\n",
236
+ " list3.append(current_string)\n",
237
+ "\n",
238
+ " return [x.strip() for x in list3[1:]]\n",
239
+ "\n",
240
+ "def context_dict2context_list(context_dict):\n",
241
+ " list1 = list()\n",
242
+ " for all_keys in context_dict:\n",
243
+ " key = all_keys.split(';')[0]\n",
244
+ " try:\n",
245
+ " synonyms = all_keys.split(';')[1:]\n",
246
+ " except:\n",
247
+ " pass\n",
248
+ " # print(key)\n",
249
+ " str1 = context_dict[all_keys]\n",
250
+ " \n",
251
+ " split_list = [x.replace('\\n', '').strip() for x in str1.split('\\n\\n')]\n",
252
+ " split_list\n",
253
+ "\n",
254
+ " for sentence in split_list:\n",
255
+ " for s in split_paragraph(sentence, key):\n",
256
+ " #if s does not exist already:\n",
257
+ " try:\n",
258
+ " if s not in df['description'].values.tolist():\n",
259
+ " #add synonyms\n",
260
+ "\n",
261
+ " #SYNONYM MAY BE AN OBSOLETE METHOD TO AVOID CONTEXT IDENTIFICATION\n",
262
+ " for synonym in synonyms:\n",
263
+ " #manual replacement causes a wrong grammar\n",
264
+ " #gpt3 replacement\n",
265
+ " print(s, key, synonym)\n",
266
+ " prompt = f'in the following sentence: {s}. Replace {key} with {synonym} correcting the grammar'\n",
267
+ " answer = gpt3(prompt, model='gpt-3', service='azure').replace('\\n', '')\n",
268
+ " list1.append(answer)\n",
269
+ " list1.append(s)\n",
270
+ " else:\n",
271
+ " #if duplicate is found\n",
272
+ " pass\n",
273
+ " except:\n",
274
+ " #in case no df is loaded, ignore it\n",
275
+ " list1.append(s)\n",
276
+ "\n",
277
+ " return list1\n",
278
+ "\n",
279
+ "def add_context_list(context_list):\n",
280
+ " list1 = list()\n",
281
+ " for s in context_list:\n",
282
+ " try:\n",
283
+ " if s not in df['description'].values.tolist():\n",
284
+ " list1.append(s)\n",
285
+ " except:\n",
286
+ " #in case no df is loaded, ignore it\n",
287
+ " list1.append(s)\n",
288
+ " return list1\n",
289
+ "\n",
290
+ "context_dict = {\n",
291
+ " \"company; goliath; we\" :\n",
292
+ " \"\"\"\n",
293
+ " This is what we do: our company builds AI Recommendation Systems for Matching Platforms using the latest technology. Our company is estabilished and operates in Japan. Our company uses the AWS Cloud to manage Servers. Our company can use GPT3 as well. Our company also builds GPT3-based chatbots. Our company can use open-source models, if requested. Our company uses open source models. Our company operates in Japan. Our company has been operating for 1 year, and we are expanding in Hong Kong. Our company offers other services apart from recommendation systems, like GPT3 chatbots. Our company can also build recommendation systems for mobile apps. You can contact our company at ma@goliath.jp . There are 5 people working for our company.\n",
294
+ " \"\"\"\n",
295
+ " ,\n",
296
+ " \"price\" :\n",
297
+ " \"\"\"\n",
298
+ " The price of a recommendation system depends on the amount of complexity that is required to build, as well as the volume of customers. Reach us to get a quotation. The price of a chatbot depends by its intended usage and complexity, contact us for a quotation.\n",
299
+ " \"\"\"\n",
300
+ " ,\n",
301
+ " \"recommendation system\" :\n",
302
+ " \"\"\"\n",
303
+ " If your company wants to recommend products to customers, we can build a recommendation system for you. GPT3 can be used to build recommendation systems by using embeddings, mapping choices in a mathematical space. Once the recommendation system has been built, we will manage it in the future as well. Recommendation system could also be built for startups, though they will be in smaller size. We use AWS OpenSearch to host recommendation system. It takes from a few weeks to one month to build a recommendation system.\n",
304
+ " \"\"\"\n",
305
+ " ,\n",
306
+ " \"a matching platform\" :\n",
307
+ " \"\"\"\n",
308
+ " A matching platform is a business with thousands of users, who could be customers, individuals or companies, who are interacting with one another. For example dating apps, ecommerce platforms, or job recruiting platforms. \n",
309
+ " \"\"\"\n",
310
+ "}\n",
311
+ "#adding invidivual sentences\n",
312
+ "context_list_ = [\n",
313
+ " # 'We can also use GPT3, if requested',\n",
314
+ " 'You can contact us at ma@goliath.jp',\n",
315
+ " # 'We operate in the AI sector'\n",
316
+ "]\n",
317
+ "#adding qa\n",
318
+ "qa_list = {\n",
319
+ " 'How much does it cost?' : 'The price depends by its intended usage and complexity, contact us for a quotation.',\n",
320
+ " 'Do you use GPT3 API?' : 'yes, we can',\n",
321
+ " 'Do you use GPT3?' : 'yes, we can',\n",
322
+ " 'Do you use GPT4?' : 'yes, we can',\n",
323
+ " 'What do you do?' : 'Our company builds AI recommendation systems',\n",
324
+ " 'What does goliath do?' : 'Our company builds AI recommendation systems',\n",
325
+ " 'What does your company do?' : 'Our company builds AI recommendation systems',\n",
326
+ " 'How much does Goliath charge?' : 'The price depends by its intended usage and complexity, contact us for a quotation.',\n",
327
+ " 'How much does Goliath charge for a recommendation system?' : 'The price depends by its intended usage and complexity, contact us for a quotation.',\n",
328
+ " 'How much does Goliath charge for a chatbot?' : 'The price depends by its intended usage and complexity, contact us for a quotation.'\n",
329
+ " 'What is your charge?' : 'The price depends by its intended usage and complexity, contact us for a quotation.'\n",
330
+ "}\n",
331
+ "\n",
332
+ "#\n",
333
+ "# df = pd.DataFrame(columns=['description'])\n",
334
+ "df = pd.read_parquet('df.parquet') #if we comment it, it start from scratch\n",
335
+ "df_qa = pd.read_parquet('df_qa.parquet')\n",
336
+ "\n",
337
+ "#prepare context\n",
338
+ "missing_context = context_dict2context_list(context_dict)\n",
339
+ "missing_context += add_context_list(context_list_)\n",
340
+ "missing_context"
341
+ ]
342
+ },
343
+ {
344
+ "cell_type": "code",
345
+ "execution_count": 325,
346
+ "metadata": {},
347
+ "outputs": [
348
+ {
349
+ "data": {
350
+ "text/plain": [
351
+ "{}"
352
+ ]
353
+ },
354
+ "execution_count": 325,
355
+ "metadata": {},
356
+ "output_type": "execute_result"
357
+ }
358
+ ],
359
+ "source": [
360
+ "missing_qa = dict()\n",
361
+ "for question in qa_list:\n",
362
+ " answer = qa_list[question]\n",
363
+ " if question not in df_qa['question'].values.tolist():\n",
364
+ " print(question)\n",
365
+ " missing_qa[question] = answer\n",
366
+ "missing_qa"
367
+ ]
368
+ },
369
+ {
370
+ "cell_type": "code",
371
+ "execution_count": 267,
372
+ "metadata": {},
373
+ "outputs": [
374
+ {
375
+ "data": {
376
+ "text/plain": [
377
+ "'What does the company do?'"
378
+ ]
379
+ },
380
+ "execution_count": 267,
381
+ "metadata": {},
382
+ "output_type": "execute_result"
383
+ }
384
+ ],
385
+ "source": [
386
+ "def gpt3_reference(last_context, query):\n",
387
+ " #needs to be referred to the second\n",
388
+ " # last_context = 'you are a company'\n",
389
+ " # query = \"\"\"what do you do\"\"\"\n",
390
+ "\n",
391
+ " #apply a coreference resolution on the query and replace the pronoun with no temperature, no adjectives\n",
392
+ " prompt = f\"\"\"\n",
393
+ " context : {last_context} \n",
394
+ " query : {query}\n",
395
+ " instructions:\n",
396
+ " only if pronoun is unclear, replace query pronoun with its context reference. Return the edited query.\n",
397
+ " \"\"\" \n",
398
+ " answer = gpt3(prompt, model='gpt-3.5-turbo', service='azure')\n",
399
+ "\n",
400
+ " #replacements\n",
401
+ " answer = answer.replace('\\n', '')\n",
402
+ " answer = answer.replace('Answer:', '')\n",
403
+ " answer = answer.replace('answer:', '')\n",
404
+ " answer = answer.replace('answer', '')\n",
405
+ " answer = answer.strip()\n",
406
+ " return answer\n",
407
+ "\n",
408
+ "gpt3_reference('we are a company. Recommendation systems are expensive.', 'what do you do?')"
409
+ ]
410
+ },
411
+ {
412
+ "attachments": {},
413
+ "cell_type": "markdown",
414
+ "metadata": {},
415
+ "source": [
416
+ "### edit final df"
417
+ ]
418
+ },
419
+ {
420
+ "cell_type": "code",
421
+ "execution_count": 51,
422
+ "metadata": {},
423
+ "outputs": [],
424
+ "source": [
425
+ "#drop\n",
426
+ "df = pd.read_parquet('df.parquet')\n",
427
+ "df = df.drop([9, 10, 11]).reset_index(drop=True)\n",
428
+ "df.to_parquet('df.parquet')"
429
+ ]
430
+ },
431
+ {
432
+ "cell_type": "code",
433
+ "execution_count": 53,
434
+ "metadata": {},
435
+ "outputs": [],
436
+ "source": [
437
+ "#create df with vectors\n",
438
+ "# df_new = pd.DataFrame([context_list, list2vec(context_list)]).T #batch embeddings not available with azure\n",
439
+ "df_new = pd.DataFrame(context_list)\n",
440
+ "df_new[1] = df_new[0].apply(lambda x : text2vec(x, 'azure'))\n",
441
+ "\n",
442
+ "df_new.columns = ['description', 'text_vector_']\n",
443
+ "df_new['description'] = df_new['description'].apply(lambda x : x.strip())\n",
444
+ "\n",
445
+ "df_new = pd.concat([df, df_new], axis=0).reset_index(drop=True)\n",
446
+ "df_new.to_parquet('df.parquet')"
447
+ ]
448
+ },
449
+ {
450
+ "cell_type": "code",
451
+ "execution_count": 346,
452
+ "metadata": {},
453
+ "outputs": [
454
+ {
455
+ "data": {
456
+ "text/html": [
457
+ "<div>\n",
458
+ "<style scoped>\n",
459
+ " .dataframe tbody tr th:only-of-type {\n",
460
+ " vertical-align: middle;\n",
461
+ " }\n",
462
+ "\n",
463
+ " .dataframe tbody tr th {\n",
464
+ " vertical-align: top;\n",
465
+ " }\n",
466
+ "\n",
467
+ " .dataframe thead th {\n",
468
+ " text-align: right;\n",
469
+ " }\n",
470
+ "</style>\n",
471
+ "<table border=\"1\" class=\"dataframe\">\n",
472
+ " <thead>\n",
473
+ " <tr style=\"text-align: right;\">\n",
474
+ " <th></th>\n",
475
+ " <th>question</th>\n",
476
+ " <th>answer</th>\n",
477
+ " <th>text_vector_</th>\n",
478
+ " </tr>\n",
479
+ " </thead>\n",
480
+ " <tbody>\n",
481
+ " <tr>\n",
482
+ " <th>0</th>\n",
483
+ " <td>How much does it cost?</td>\n",
484
+ " <td>The price depends by its intended usage and co...</td>\n",
485
+ " <td>[0.028263725, -0.0101905335, 0.008142526, -0.0...</td>\n",
486
+ " </tr>\n",
487
+ " <tr>\n",
488
+ " <th>1</th>\n",
489
+ " <td>Do you use GPT3 API?</td>\n",
490
+ " <td>yes, we can</td>\n",
491
+ " <td>[0.008896397, -0.0057652825, 0.00010452615, -0...</td>\n",
492
+ " </tr>\n",
493
+ " <tr>\n",
494
+ " <th>2</th>\n",
495
+ " <td>Do you use GPT3?</td>\n",
496
+ " <td>yes, we can</td>\n",
497
+ " <td>[0.007887953, -0.0010633436, 6.204963e-05, -0....</td>\n",
498
+ " </tr>\n",
499
+ " <tr>\n",
500
+ " <th>3</th>\n",
501
+ " <td>Do you use GPT4?</td>\n",
502
+ " <td>yes, we can</td>\n",
503
+ " <td>[0.008745, -0.00041013403, -0.001318879, -0.04...</td>\n",
504
+ " </tr>\n",
505
+ " <tr>\n",
506
+ " <th>4</th>\n",
507
+ " <td>What do you do?</td>\n",
508
+ " <td>Our company builds AI recommendation systems</td>\n",
509
+ " <td>[-0.00083139725, -0.017905554, 0.0027184868, -...</td>\n",
510
+ " </tr>\n",
511
+ " <tr>\n",
512
+ " <th>5</th>\n",
513
+ " <td>What does goliath do?</td>\n",
514
+ " <td>Our company builds AI recommendation systems</td>\n",
515
+ " <td>[-0.02096649, -0.01710899, -0.00011881243, 0.0...</td>\n",
516
+ " </tr>\n",
517
+ " <tr>\n",
518
+ " <th>6</th>\n",
519
+ " <td>What does your company do?</td>\n",
520
+ " <td>Our company builds AI recommendation systems</td>\n",
521
+ " <td>[0.0068105333, -0.010677755, -0.00048340266, -...</td>\n",
522
+ " </tr>\n",
523
+ " <tr>\n",
524
+ " <th>7</th>\n",
525
+ " <td>How much does Goliath charge?</td>\n",
526
+ " <td>The price depends by its intended usage and co...</td>\n",
527
+ " <td>[0.0018087317, -0.013888897, -0.00455645, -0.0...</td>\n",
528
+ " </tr>\n",
529
+ " <tr>\n",
530
+ " <th>8</th>\n",
531
+ " <td>How much does Goliath charge for a recommendat...</td>\n",
532
+ " <td>The price depends by its intended usage and co...</td>\n",
533
+ " <td>[0.0006508778, -0.0021186466, -0.022374032, -0...</td>\n",
534
+ " </tr>\n",
535
+ " <tr>\n",
536
+ " <th>9</th>\n",
537
+ " <td>How much does Goliath charge for a chatbot?</td>\n",
538
+ " <td>The price depends by its intended usage and co...</td>\n",
539
+ " <td>[-0.009120062, -0.012517998, -0.0015486096, -0...</td>\n",
540
+ " </tr>\n",
541
+ " </tbody>\n",
542
+ "</table>\n",
543
+ "</div>"
544
+ ],
545
+ "text/plain": [
546
+ " question \\\n",
547
+ "0 How much does it cost? \n",
548
+ "1 Do you use GPT3 API? \n",
549
+ "2 Do you use GPT3? \n",
550
+ "3 Do you use GPT4? \n",
551
+ "4 What do you do? \n",
552
+ "5 What does goliath do? \n",
553
+ "6 What does your company do? \n",
554
+ "7 How much does Goliath charge? \n",
555
+ "8 How much does Goliath charge for a recommendat... \n",
556
+ "9 How much does Goliath charge for a chatbot? \n",
557
+ "\n",
558
+ " answer \\\n",
559
+ "0 The price depends by its intended usage and co... \n",
560
+ "1 yes, we can \n",
561
+ "2 yes, we can \n",
562
+ "3 yes, we can \n",
563
+ "4 Our company builds AI recommendation systems \n",
564
+ "5 Our company builds AI recommendation systems \n",
565
+ "6 Our company builds AI recommendation systems \n",
566
+ "7 The price depends by its intended usage and co... \n",
567
+ "8 The price depends by its intended usage and co... \n",
568
+ "9 The price depends by its intended usage and co... \n",
569
+ "\n",
570
+ " text_vector_ \n",
571
+ "0 [0.028263725, -0.0101905335, 0.008142526, -0.0... \n",
572
+ "1 [0.008896397, -0.0057652825, 0.00010452615, -0... \n",
573
+ "2 [0.007887953, -0.0010633436, 6.204963e-05, -0.... \n",
574
+ "3 [0.008745, -0.00041013403, -0.001318879, -0.04... \n",
575
+ "4 [-0.00083139725, -0.017905554, 0.0027184868, -... \n",
576
+ "5 [-0.02096649, -0.01710899, -0.00011881243, 0.0... \n",
577
+ "6 [0.0068105333, -0.010677755, -0.00048340266, -... \n",
578
+ "7 [0.0018087317, -0.013888897, -0.00455645, -0.0... \n",
579
+ "8 [0.0006508778, -0.0021186466, -0.022374032, -0... \n",
580
+ "9 [-0.009120062, -0.012517998, -0.0015486096, -0... "
581
+ ]
582
+ },
583
+ "metadata": {},
584
+ "output_type": "display_data"
585
+ }
586
+ ],
587
+ "source": [
588
+ "df_qa = pd.DataFrame([qa_list]).T.reset_index()\n",
589
+ "df_qa.columns = [0, 1]\n",
590
+ "df_qa['text_vector_'] = df_qa[0].apply(lambda x : text2vec(x, 'azure'))\n",
591
+ "df_qa.columns = ['question', 'answer', 'text_vector_']\n",
592
+ "display(df_qa)\n",
593
+ "df_qa.to_parquet('df_qa.parquet')"
594
+ ]
595
+ },
596
+ {
597
+ "attachments": {},
598
+ "cell_type": "markdown",
599
+ "metadata": {},
600
+ "source": [
601
+ "### qa function"
602
+ ]
603
+ },
604
+ {
605
+ "cell_type": "code",
606
+ "execution_count": 348,
607
+ "metadata": {},
608
+ "outputs": [],
609
+ "source": [
610
+ "import requests\n",
611
+ "import os\n",
612
+ "import torch\n",
613
+ "# os.system('pip install openpyxl')\n",
614
+ "# os.system('pip install sentence-transformers==2.2.2')\n",
615
+ "# os.system('pip install torch==1.13.0')\n",
616
+ "import pandas as pd\n",
617
+ "from sentence_transformers import SentenceTransformer, util\n",
618
+ "\n",
619
+ "#reference filter\n",
620
+ "def gpt3_reference(last_context, query):\n",
621
+ " #needs to be referred to the second\n",
622
+ " # last_context = 'you are a company'\n",
623
+ " # query = \"\"\"what do you do\"\"\"\n",
624
+ "\n",
625
+ " prompt = f\"\"\"\n",
626
+ " context : {last_context} \n",
627
+ " query : {query}\n",
628
+ " instructions:\n",
629
+ " apply a coreference resolution on the query and replace the pronoun with no temperature, no adjectives\n",
630
+ " \"\"\"\n",
631
+ " #only if pronoun is unclear, replace query pronoun with its reference\n",
632
+ " answer = gpt3(prompt, model='gpt-3.5-turbo', service='azure')\n",
633
+ "\n",
634
+ " #replacements\n",
635
+ " answer = answer.replace('\\n', '')\n",
636
+ " answer = answer.replace('Answer:', '')\n",
637
+ " answer = answer.replace('answer:', '')\n",
638
+ " answer = answer.replace('answer', '')\n",
639
+ " answer = answer.strip()\n",
640
+ " return answer\n",
641
+ "\n",
642
+ "# gpt3_reference(\"you are a company. recommendation systems are expensive\", \"How much do you charge?\")\n",
643
+ "\n",
644
+ "df = pd.read_parquet('df.parquet')\n",
645
+ "df_qa = pd.read_parquet('df_qa.parquet')\n",
646
+ "\n",
647
+ "df_qa_ = df_qa.copy()\n",
648
+ "df_ = df.copy()\n",
649
+ "\n",
650
+ "def qa(df_, df_qa_, min_qa_score, min_context_score, verbose, query):\n",
651
+ " query_vec = text2vec(query, 'azure')\n",
652
+ " query_vec = torch.DoubleTensor(query_vec)\n",
653
+ "\n",
654
+ " #first check if there is already a question in df_qa\n",
655
+ " df_qa_['score'] = df_qa_['text_vector_'].apply(lambda x : float(util.cos_sim(x, query_vec)))\n",
656
+ " df_qa_ = df_qa_.sort_values('score', ascending=False)\n",
657
+ " \n",
658
+ " if verbose : display(df_qa_[0:5])\n",
659
+ " df_qa_ = df_qa_[df_qa_['score']>=min_qa_score]\n",
660
+ " #if we find at least one possible preset answer\n",
661
+ " if len(df_qa_) > 0:\n",
662
+ " answer = df_qa_[0:1]['answer'].values.tolist()[0]\n",
663
+ " return answer\n",
664
+ " \n",
665
+ " #then check if we can use the context to answer a question\n",
666
+ " df_['score'] = df_['text_vector_'].apply(lambda x : float(util.cos_sim(x, query_vec)))\n",
667
+ " df_ = df_.sort_values('score', ascending=False)\n",
668
+ " if verbose : display(df_[0:5])\n",
669
+ " df_ = df_[df_['score']>=min_context_score]\n",
670
+ " #if we find at least one possible preset answer\n",
671
+ " if len(df_) > 0:\n",
672
+ " #in case we might decide to merge multiple context\n",
673
+ " context = ' '.join(df_['description'][0:1].values.tolist())\n",
674
+ " prompt = f\"\"\"\n",
675
+ " context: {context}\n",
676
+ " query: {query}\n",
677
+ " Answer the query using context. Do not justify the answer.\n",
678
+ " \"\"\"\n",
679
+ " answer = gpt3(prompt, model='gpt-3.5-turbo', service='azure')\n",
680
+ " return answer\n",
681
+ " else:\n",
682
+ " return 'impossible to give an answer'\n",
683
+ "\n",
684
+ "# bot_answer = qa(\n",
685
+ "# df_, \n",
686
+ "# df_qa_, \n",
687
+ "# min_qa_score=0.92, \n",
688
+ "# min_context_score=.75, \n",
689
+ "# verbose=False, \n",
690
+ "# query='how much does a recommendation system cost?'\n",
691
+ "# )\n",
692
+ "# bot_answer"
693
+ ]
694
+ },
695
+ {
696
+ "attachments": {},
697
+ "cell_type": "markdown",
698
+ "metadata": {},
699
+ "source": [
700
+ "### testing"
701
+ ]
702
+ },
703
+ {
704
+ "cell_type": "code",
705
+ "execution_count": 294,
706
+ "metadata": {},
707
+ "outputs": [
708
+ {
709
+ "data": {
710
+ "text/plain": [
711
+ "['what does your company do?',\n",
712
+ " 'how much does your company charge for a recommendation system?',\n",
713
+ " 'how much does your company charge?',\n",
714
+ " 'What does Goliath do?',\n",
715
+ " 'How much does Goliath charge for a recommendation system?',\n",
716
+ " 'How much does Goliath charge?',\n",
717
+ " 'What do you do?',\n",
718
+ " 'How much do you charge for a recommendation system?',\n",
719
+ " 'What is your charge?',\n",
720
+ " 'how much does a recommendation system cost?',\n",
721
+ " 'what is the pricing structure?']"
722
+ ]
723
+ },
724
+ "execution_count": 294,
725
+ "metadata": {},
726
+ "output_type": "execute_result"
727
+ }
728
+ ],
729
+ "source": [
730
+ "testing_questions = {\n",
731
+ " 'company; goliath; you' : #subject is the company\n",
732
+ " [\n",
733
+ " 'what does your company do?',\n",
734
+ " 'how much does your company charge for a recommendation system?',\n",
735
+ " 'how much does your company charge?'\n",
736
+ " \n",
737
+ " ],\n",
738
+ " \"recommendation system\" : \n",
739
+ " [\n",
740
+ " 'how much does a recommendation system cost?'\n",
741
+ " ],\n",
742
+ " \"price\" : \n",
743
+ " [\n",
744
+ " \"what is the pricing structure?\"\n",
745
+ " ]\n",
746
+ "}\n",
747
+ "\n",
748
+ "list1 = list()\n",
749
+ "for key in testing_questions:\n",
750
+ " list2 = testing_questions[key]\n",
751
+ " #we add the original questions\n",
752
+ " if ';' in key:\n",
753
+ " list1 += list2\n",
754
+ " mainkey = key.split(';')[0]\n",
755
+ " for subkey in key.split(';')[1:]:\n",
756
+ " for question in list2:\n",
757
+ " # print(mainkey, subkey.strip())\n",
758
+ " prompt = f\"\"\"\n",
759
+ " question: {question}\n",
760
+ " instructions: replace {mainkey} with {subkey}. Correct the grammar.\n",
761
+ " \"\"\"\n",
762
+ " new_text = '_'\n",
763
+ " new_text = gpt3(prompt, 'gpt-3.5-turbo', 'azure', max_tokens=400)\n",
764
+ " new_text = new_text.replace('\\n', '')\n",
765
+ " new_text = new_text.replace('\"', '')\n",
766
+ " list1.append(new_text)\n",
767
+ " else:\n",
768
+ " list1 += list2\n",
769
+ "list1"
770
+ ]
771
+ },
772
+ {
773
+ "cell_type": "code",
774
+ "execution_count": 341,
775
+ "metadata": {},
776
+ "outputs": [
777
+ {
778
+ "name": "stdout",
779
+ "output_type": "stream",
780
+ "text": [
781
+ "what does your company do? -> Our company builds AI recommendaion systems\n",
782
+ "how much does your company charge for a recommendation system? -> The price depends by its intended usage and complexity, contact us for a quotation.\n",
783
+ "how much does your company charge? -> The price of a recommendation system depends on the amount of complexity that is required to build, as well as the volume of customers. Reach us to get a quotation.\n",
784
+ "What does Goliath do? -> Our company builds AI recommendaion systems\n",
785
+ "How much does Goliath charge for a recommendation system? -> The price depends by its intended usage and complexity, contact us for a quotation.\n",
786
+ "How much does Goliath charge? -> The price depends by its intended usage and complexity, contact us for a quotation.\n",
787
+ "What do you do? -> Our company builds AI recommendaion systems\n",
788
+ "How much do you charge for a recommendation system? -> The price depends by its intended usage and complexity, contact us for a quotation.\n",
789
+ "What is your charge? -> The context provided is unrelated to the query.\n",
790
+ "how much does a recommendation system cost? -> The price depends by its intended usage and complexity, contact us for a quotation.\n",
791
+ "what is the pricing structure? -> The pricing of a recommendation system depends on the complexity of the required build and the volume of customers. Contact the company to receive a quotation.\n"
792
+ ]
793
+ }
794
+ ],
795
+ "source": [
796
+ "for question in list1:\n",
797
+ " bot_answer = qa(\n",
798
+ " df_, \n",
799
+ " df_qa_, \n",
800
+ " min_qa_score=0.92, \n",
801
+ " min_context_score=.75, \n",
802
+ " verbose=False, \n",
803
+ " query=question\n",
804
+ " )\n",
805
+ " print(question, '->', bot_answer)"
806
+ ]
807
+ },
808
+ {
809
+ "cell_type": "code",
810
+ "execution_count": null,
811
+ "metadata": {},
812
+ "outputs": [],
813
+ "source": [
814
+ "qa(\n",
815
+ " df_, \n",
816
+ " df_qa_, \n",
817
+ " min_qa_score=0.92, \n",
818
+ " min_context_score=.75, \n",
819
+ " verbose=False, \n",
820
+ " query='How much for a recommender system?'\n",
821
+ " )"
822
+ ]
823
+ },
824
+ {
825
+ "attachments": {},
826
+ "cell_type": "markdown",
827
+ "metadata": {},
828
+ "source": [
829
+ "### gradio"
830
+ ]
831
+ },
832
+ {
833
+ "cell_type": "code",
834
+ "execution_count": 349,
835
+ "metadata": {},
836
+ "outputs": [
837
+ {
838
+ "name": "stdout",
839
+ "output_type": "stream",
840
+ "text": [
841
+ "Running on local URL: http://127.0.0.1:7878\n",
842
+ "\n",
843
+ "To create a public link, set `share=True` in `launch()`.\n"
844
+ ]
845
+ },
846
+ {
847
+ "data": {
848
+ "text/html": [
849
+ "<div><iframe src=\"http://127.0.0.1:7878/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
850
+ ],
851
+ "text/plain": [
852
+ "<IPython.core.display.HTML object>"
853
+ ]
854
+ },
855
+ "metadata": {},
856
+ "output_type": "display_data"
857
+ },
858
+ {
859
+ "data": {
860
+ "text/plain": []
861
+ },
862
+ "execution_count": 349,
863
+ "metadata": {},
864
+ "output_type": "execute_result"
865
+ }
866
+ ],
867
+ "source": [
868
+ "import subprocess\n",
869
+ "import random\n",
870
+ "import gradio as gr\n",
871
+ "import requests\n",
872
+ "\n",
873
+ "history = None\n",
874
+ "\n",
875
+ "def predict(input, history, last_context):\n",
876
+ " last_context += 'you are a company'\n",
877
+ "\n",
878
+ " #WE CAN PLAY WITH user_input AND bot_answer, as well as history\n",
879
+ " user_input = input\n",
880
+ "\n",
881
+ " query = gpt3_reference(last_context, user_input)\n",
882
+ " bot_answer = qa(\n",
883
+ " df_, \n",
884
+ " df_qa_, \n",
885
+ " min_qa_score=0.92, \n",
886
+ " min_context_score=.75, \n",
887
+ " verbose=False, \n",
888
+ " query=input\n",
889
+ " )\n",
890
+ "\n",
891
+ " response = list()\n",
892
+ " response = [(input, bot_answer)]\n",
893
+ " \n",
894
+ " history.append(response[0])\n",
895
+ " response = history\n",
896
+ "\n",
897
+ " last_context = input\n",
898
+ "\n",
899
+ " # print('#history', history)\n",
900
+ " # print('#response', response)\n",
901
+ "\n",
902
+ " return response, history, last_context\n",
903
+ "\n",
904
+ "demo = gr.Blocks()\n",
905
+ "with demo:\n",
906
+ " gr.Markdown(\n",
907
+ " \"\"\"\n",
908
+ " Chatbot\n",
909
+ " \"\"\"\n",
910
+ " )\n",
911
+ " state = gr.Variable(value=[]) #beginning\n",
912
+ " last_context = gr.Variable(value='') #beginning\n",
913
+ " chatbot = gr.Chatbot() #color_map=(\"#00ff7f\", \"#00d5ff\")\n",
914
+ " text = gr.Textbox(\n",
915
+ " label=\"Question\",\n",
916
+ " value=\"What is a recommendation system?\",\n",
917
+ " placeholder=\"\",\n",
918
+ " max_lines=1,\n",
919
+ " )\n",
920
+ " text.submit(predict, [text, state, last_context], [chatbot, state, last_context])\n",
921
+ " text.submit(lambda x: \"\", text, text)\n",
922
+ " # btn = gr.Button(value=\"submit\")\n",
923
+ " # btn.click(chatbot_foo, None, [chatbot, state])\n",
924
+ "\n",
925
+ "demo.launch(share=False)"
926
+ ]
927
+ }
928
+ ],
929
+ "metadata": {
930
+ "kernelspec": {
931
+ "display_name": "Python 3",
932
+ "language": "python",
933
+ "name": "python3"
934
+ },
935
+ "language_info": {
936
+ "codemirror_mode": {
937
+ "name": "ipython",
938
+ "version": 3
939
+ },
940
+ "file_extension": ".py",
941
+ "mimetype": "text/x-python",
942
+ "name": "python",
943
+ "nbconvert_exporter": "python",
944
+ "pygments_lexer": "ipython3",
945
+ "version": "3.9.13"
946
+ },
947
+ "orig_nbformat": 4
948
+ },
949
+ "nbformat": 4,
950
+ "nbformat_minor": 2
951
+ }
df.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:822e5af50f9704aa3b6a91901782faefc4424342fd75da227ff52316b36ff7c2
3
+ size 848949
df_qa.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2dd615502335060ed7d6c634baad5ba9f0b97ab51331a333a21ea605e38f843d
3
+ size 143679