{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import os\n", "# os.system('pip install requests')\n", "import requests\n", "# gpt3_key = os.environ['GPT3_API_KEY']\n", "gpt3_key = \"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\"\n", "\n", "def gpt3_question(api_key, prompt):\n", " api_endpoint = \"https://api.openai.com/v1/engines/text-davinci-003/completions\"\n", " headers = {\n", " \"Content-Type\": \"application/json\",\n", " \"Authorization\": f\"Bearer {api_key}\"\n", " }\n", " data = {\n", " \"prompt\": prompt,\n", " \"max_tokens\": 400,\n", " \"temperature\": 0.5\n", " }\n", " print('sending request')\n", " response = requests.post(api_endpoint, headers=headers, json=data)\n", " print(response)\n", " generated_text = response.json()[\"choices\"][0][\"text\"]\n", "\n", " return generated_text\n", "\n", "def chatgpt3_question(api_key, prompt):\n", " url = \"https://api.openai.com/v1/chat/completions\"\n", " api_key = \"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\"\n", "\n", " headers = {\n", " \"Content-Type\": \"application/json\",\n", " \"Authorization\": f\"Bearer {api_key}\"\n", " }\n", "\n", " data = {\n", " \"model\": \"gpt-3.5-turbo\",\n", " \"messages\": [{\"role\": \"user\", \"content\": prompt}]\n", " }\n", "\n", " response = requests.post(url, headers=headers, json=data)\n", " generated_text = response.json()['choices'][0]['message']['content']\n", "\n", " return generated_text\n", "\n", "def history2prompt(history, extra):\n", " # history = [('The other day it was raining, and while I was driving a hit a stranger with my car.', 'Did you stop and render aid to the victim after the accident?'), ('True', 'Did you kill the guy?'), ('False', 'Was he part of the Mafia?')]\n", " history_ = [item for tup in history for item in tup]\n", " history_.append(extra)\n", " print(history_)\n", "\n", " if len(history_) > 1:\n", " combinations = []\n", " for i in range(1, len(history_)):\n", " if i % 2 == 1:\n", " combinations.append([i, i+2])\n", "\n", " history_full = list()\n", " history_full.append(history_[0])\n", " for range_ in combinations:\n", " history_full.append(' - '.join(history_[range_[0]:range_[1]]))\n", "\n", " return '\\n'.join(history_full)\n", " else:\n", " return history_[0]\n", "\n", "# gpt3_keywords('The other day it was raining, and while I was driving a hit a stranger with my car.')\n", "\n", "import subprocess\n", "import random\n", "import gradio as gr\n", "import requests\n", "\n", "history = None\n", "history_prompt = None\n", "history_final = None\n", "block_predict = False\n", "block_advice = False\n", "\n", "def predict(input, history):\n", " #WE CAN PLAY WITH user_input AND bot_answer, as well as history\n", " user_input = input\n", "\n", " # print('##', [x for x in history], input)\n", " global history_prompt\n", " global history_final\n", " global block_predict\n", "\n", " if block_predict == False:\n", " print('@@@', history)\n", " history_prompt = history2prompt(history, input)\n", " print('###', history_prompt)\n", "\n", " prompt = f\"\"\"\n", " Imagine being a criminal lawyer being told the following story with the following circumstances: {history_prompt}\n", " Output the first relevant legal question that can result in the highest incrimination for the client (if somebody is hurt, start from fatal injuries), and that can only be answered as Yes or No\n", " \"\"\"\n", " bot_answer = gpt3_question(gpt3_key, prompt)\n", "\n", " response = list()\n", " response = [(input, bot_answer)]\n", " \n", " history.append(response[0])\n", " response = history\n", " history_final = history\n", "\n", " # print('#history', history)\n", " # print('#response', response)\n", "\n", " return response, history\n", "\n", "def chatbot_foo():\n", " global history_prompt\n", " global history_final\n", " global block_predict\n", " global block_advice\n", "\n", " if block_advice == False and history_prompt is not None:\n", " prompt = f\"\"\"\n", " Imagine being an Ohio criminal lawyer being told the following story with the following circumstances: {history_prompt}\n", " Tell the client how much does he risk in terms of criminal charges, prison, and cite sources from law books\n", " \"\"\"\n", " bot_answer = gpt3_question(gpt3_key, prompt)\n", "\n", " history_final.append(('Consult me on the matter:', bot_answer))\n", "\n", " block_predict = True\n", " block_advice = True\n", " return history_final, history_final\n", "\n", "def reset_interface():\n", " global history_prompt\n", " global history_final\n", " global block_predict\n", " global block_advice\n", "\n", " history_prompt = None\n", " history_final = None\n", " block_predict = None\n", " block_advice = None\n", "\n", "demo = gr.Blocks()\n", "with demo:\n", " gr.Markdown(\n", " \"\"\"\n", "
\n", " Chat with Morty by typing in the input box below.\n", "
\n", " \"\"\"\n", " )\n", " state = gr.Variable(value=[]) #beginning\n", " chatbot = gr.Chatbot(color_map=(\"#00ff7f\", \"#00d5ff\"))\n", " text = gr.Textbox(\n", " label=\"Talk to your lawyer (press enter to submit)\",\n", " value=\"The other day it was raining, and while I was driving a hit a stranger with my car.\",\n", " placeholder=\"reply Yes or No\",\n", " max_lines=1,\n", " )\n", " text.submit(predict, [text, state], [chatbot, state])\n", " text.submit(lambda x: \"\", text, text)\n", "\n", " btn = gr.Button(value=\"submit\")\n", " btn.click(chatbot_foo, None, [chatbot, state])\n", "\n", " btn2 = gr.Button(value=\"reset\")\n", " btn.click(reset_interface)\n", " # true_false_radio = gr.Radio(choices=[\"True\", \"False\"], label=\"Select True or False\")\n", " # iface = gr.Interface(fn=my_function, inputs=[text, true_false_radio], outputs=chatbot, live=True, capture_session=True)\n", "\n", "demo.launch(share=False)" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "None\n", "a\n" ] } ], "source": [ "print(history_prompt)\n", "\n", "if history_prompt is not None:\n", " print('a')" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Running on local URL: http://127.0.0.1:7862\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" }, { "name": "stdout", "output_type": "stream", "text": [ "@@@ []\n", "['The other day it was raining, and while I was driving a hit a stranger with my car.']\n", "### The other day it was raining, and while I was driving a hit a stranger with my car.\n", "sending request\n", "\n", "@@@ [('The other day it was raining, and while I was driving a hit a stranger with my car.', '\\nDid the stranger suffer any fatal injuries as a result of the collision?')]\n", "['The other day it was raining, and while I was driving a hit a stranger with my car.', '\\nDid the stranger suffer any fatal injuries as a result of the collision?', 'yes']\n", "### The other day it was raining, and while I was driving a hit a stranger with my car.\n", "\n", "Did the stranger suffer any fatal injuries as a result of the collision? - yes\n", "sending request\n", "\n", "sending request\n", "\n" ] } ], "source": [ "import os\n", "# os.system('pip install requests')\n", "import requests\n", "# gpt3_key = os.environ['GPT3_API_KEY']\n", "gpt3_key = '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'\n", "\n", "def gpt3_question(api_key, prompt):\n", " api_endpoint = \"https://api.openai.com/v1/engines/text-davinci-003/completions\"\n", " headers = {\n", " \"Content-Type\": \"application/json\",\n", " \"Authorization\": f\"Bearer {api_key}\"\n", " }\n", " data = {\n", " \"prompt\": prompt,\n", " \"max_tokens\": 400,\n", " \"temperature\": 0.5\n", " }\n", " print('sending request')\n", " response = requests.post(api_endpoint, headers=headers, json=data)\n", " print(response)\n", " generated_text = response.json()[\"choices\"][0][\"text\"]\n", "\n", " return generated_text\n", "\n", "def chatgpt3_question(api_key, prompt):\n", " url = \"https://api.openai.com/v1/chat/completions\"\n", "\n", " headers = {\n", " \"Content-Type\": \"application/json\",\n", " \"Authorization\": f\"Bearer {api_key}\"\n", " }\n", "\n", " data = {\n", " \"model\": \"gpt-3.5-turbo\",\n", " \"messages\": [{\"role\": \"user\", \"content\": prompt}]\n", " }\n", "\n", " response = requests.post(url, headers=headers, json=data)\n", " generated_text = response.json()['choices'][0]['message']['content']\n", "\n", " return generated_text\n", "\n", "def history2prompt(history, extra):\n", " # history = [('The other day it was raining, and while I was driving a hit a stranger with my car.', 'Did you stop and render aid to the victim after the accident?'), ('True', 'Did you kill the guy?'), ('False', 'Was he part of the Mafia?')]\n", " history_ = [item for tup in history for item in tup]\n", " history_.append(extra)\n", " print(history_)\n", "\n", " if len(history_) > 1:\n", " combinations = []\n", " for i in range(1, len(history_)):\n", " if i % 2 == 1:\n", " combinations.append([i, i+2])\n", "\n", " history_full = list()\n", " history_full.append(history_[0])\n", " for range_ in combinations:\n", " history_full.append(' - '.join(history_[range_[0]:range_[1]]))\n", "\n", " return '\\n'.join(history_full)\n", " else:\n", " return history_[0]\n", "\n", "# gpt3_keywords('The other day it was raining, and while I was driving a hit a stranger with my car.')\n", "\n", "import subprocess\n", "import random\n", "import gradio as gr\n", "import requests\n", "\n", "# history = None\n", "history_prompt = None\n", "history_final = None\n", "block_predict = False\n", "block_advice = False\n", "\n", "def predict(input, history, start_var):\n", " #WE CAN PLAY WITH user_input AND bot_answer, as well as history\n", " user_input = input\n", "\n", " # print('##', [x for x in history], input)\n", " global history_prompt\n", " global history_final\n", " global block_predict\n", " global block_advice\n", "\n", " if start_var == True:\n", " history_prompt = None\n", " history_final = None\n", " block_predict = False\n", " block_advice = False\n", " start_var = False\n", "\n", " if block_predict == False:\n", " print('@@@', history)\n", " history_prompt = history2prompt(history, input)\n", " print('###', history_prompt)\n", "\n", " prompt = f\"\"\"\n", " Imagine being a criminal lawyer being told the following story with the following circumstances: {history_prompt}\n", " Output the first relevant legal question that can result in the highest incrimination for the client (if somebody is hurt, start from fatal injuries), and that can only be answered as Yes or No\n", " \"\"\"\n", " bot_answer = gpt3_question(gpt3_key, prompt)\n", "\n", " response = list()\n", " response = [(input, bot_answer)]\n", " \n", " history.append(response[0])\n", " response = history\n", " history_final = history\n", "\n", " # print('#history', history)\n", " # print('#response', response)\n", "\n", " return response, history\n", "\n", "def chatbot_foo():\n", " global history_prompt\n", " global history_final\n", " global block_predict\n", " global block_advice\n", "\n", " if block_advice == False and history_prompt is not None:\n", " \n", " prompt = f\"\"\"\n", " Imagine being an Ohio criminal lawyer being told the following story with the following circumstances: {history_prompt}\n", " Tell the client how much does he risk in terms of criminal charges, prison, and cite sources from law books\n", " \"\"\"\n", " bot_answer = gpt3_question(gpt3_key, prompt)\n", "\n", " history_final.append(('Consult me on the matter:', bot_answer))\n", "\n", " block_predict = True\n", " block_advice = True\n", " return history_final, history_final\n", "\n", "demo = gr.Blocks()\n", "with demo:\n", " gr.Markdown(\n", " \"\"\"\n", "
\n", " Chat with your Lawyer\n", "
\n", " \"\"\"\n", " )\n", " state = gr.Variable(value=[]) #beginning\n", " start_var = gr.Variable(value=True) #beginning\n", " chatbot = gr.Chatbot(color_map=(\"#00ff7f\", \"#00d5ff\"))\n", " text = gr.Textbox(\n", " label=\"Talk to your lawyer (press enter to submit)\",\n", " value=\"The other day it was raining, and while I was driving a hit a stranger with my car.\",\n", " placeholder=\"reply Yes or No\",\n", " max_lines=1,\n", " )\n", " text.submit(predict, [text, state, start_var], [chatbot, state])\n", " text.submit(lambda x: \"\", text, text)\n", "\n", " btn = gr.Button(value=\"submit\")\n", " btn.click(chatbot_foo, None, [chatbot, state])\n", " # true_false_radio = gr.Radio(choices=[\"True\", \"False\"], label=\"Select True or False\")\n", " # iface = gr.Interface(fn=my_function, inputs=[text, true_false_radio], outputs=chatbot, live=True, capture_session=True)\n", "\n", "demo.launch(share=False)" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.13" }, "orig_nbformat": 4 }, "nbformat": 4, "nbformat_minor": 2 }