File size: 5,192 Bytes
4c0a16b
 
 
0aa4b61
4c0a16b
0aa4b61
4c0a16b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0aa4b61
4c0a16b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8feb591
4c0a16b
 
 
 
 
8feb591
4c0a16b
 
 
 
 
 
 
8feb591
 
 
 
 
 
 
 
4c0a16b
 
 
 
 
 
 
 
 
 
0aa4b61
4c0a16b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0aa4b61
4c0a16b
 
 
 
 
0aa4b61
4c0a16b
 
 
 
 
 
 
 
 
 
 
 
d1fbdbe
4c0a16b
 
 
 
8feb591
4c0a16b
 
 
 
0aa4b61
4c0a16b
 
8feb591
4c0a16b
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
import os
# os.system('pip install requests')
import requests
gpt3_key = os.environ['GPT3_API_KEY']

def gpt3_question(api_key, prompt):
    api_endpoint = "https://api.openai.com/v1/engines/text-davinci-003/completions"
    headers = {
        "Content-Type": "application/json",
        "Authorization": f"Bearer {api_key}"
    }
    data = {
        "prompt": prompt,
        "max_tokens": 400,
        "temperature": 0.5
    }
    print('sending request')
    response = requests.post(api_endpoint, headers=headers, json=data)
    print(response)
    generated_text = response.json()["choices"][0]["text"]

    return generated_text

def chatgpt3_question(api_key, prompt):
    url = "https://api.openai.com/v1/chat/completions"

    headers = {
        "Content-Type": "application/json",
        "Authorization": f"Bearer {api_key}"
    }

    data = {
        "model": "gpt-3.5-turbo",
        "messages": [{"role": "user", "content": prompt}]
    }

    response = requests.post(url, headers=headers, json=data)
    generated_text = response.json()['choices'][0]['message']['content']

    return generated_text

def history2prompt(history, extra):
    # history = [('The other day it was raining, and while I was driving a hit a stranger with my car.', 'Did you stop and render aid to the victim after the accident?'), ('True', 'Did you kill the guy?'), ('False', 'Was he part of the Mafia?')]
    history_ = [item for tup in history for item in tup]
    history_.append(extra)
    print(history_)

    if len(history_) > 1:
        combinations = []
        for i in range(1, len(history_)):
            if i % 2 == 1:
                combinations.append([i, i+2])

        history_full = list()
        history_full.append(history_[0])
        for range_ in combinations:
            history_full.append(' - '.join(history_[range_[0]:range_[1]]))

        return '\n'.join(history_full)
    else:
        return history_[0]

# gpt3_keywords('The other day it was raining, and while I was driving a hit a stranger with my car.')

import subprocess
import random
import gradio as gr
import requests

# history = None
history_prompt = None
history_final = None
block_predict = False
block_advice = False

def predict(input, history, start_var):
    #WE CAN PLAY WITH user_input AND bot_answer, as well as history
    user_input = input

    # print('##', [x for x in history], input)
    global history_prompt
    global history_final
    global block_predict
    global block_advice

    if start_var == True:
        history_prompt = None
        history_final = None
        block_predict = False
        block_advice = False
        start_var = False

    if block_predict == False:
        print('@@@', history)
        history_prompt = history2prompt(history, input)
        print('###', history_prompt)

        prompt = f"""
        Imagine being a criminal lawyer being told the following story with the following circumstances: {history_prompt}
        Output the first relevant legal question that can result in the highest incrimination for the client (if somebody is hurt, start from fatal injuries), and that can only be answered as Yes or No
        """
        bot_answer = gpt3_question(gpt3_key, prompt)

        response = list()
        response = [(input, bot_answer)]
        
        history.append(response[0])
        response = history
        history_final = history

        # print('#history', history)
        # print('#response', response)

        return response, history

def chatbot_foo():
    global history_prompt
    global history_final
    global block_predict
    global block_advice

    if block_advice == False and history_prompt is not None:
        
        prompt = f"""
        Imagine being an Ohio criminal lawyer being told the following story with the following circumstances: {history_prompt}
        Tell the client how much does he risk in terms of criminal charges, prison, and cite sources from law books
        """
        bot_answer = gpt3_question(gpt3_key, prompt)

        history_final.append(('Consult me on the matter:', bot_answer))

        block_predict = True
        block_advice = True
        return history_final, history_final

demo = gr.Blocks()
with demo:
    gr.Markdown(
        """
    <center>    
        Chat with your Lawyer
    </center>
    """
    )
    state = gr.Variable(value=[]) #beginning
    start_var = gr.Variable(value=True) #beginning
    chatbot = gr.Chatbot(color_map=("#00ff7f", "#00d5ff"))
    text = gr.Textbox(
        label="Talk to your lawyer (press enter to submit)",
        value="The other day it was raining, and while I was driving a hit a stranger with my car.",
        placeholder="reply Yes or No",
        max_lines=1,
    )
    text.submit(predict, [text, state, start_var], [chatbot, state])
    text.submit(lambda x: "", text, text)

    btn = gr.Button(value="submit")
    btn.click(chatbot_foo, None, [chatbot, state])
    # true_false_radio = gr.Radio(choices=["True", "False"], label="Select True or False")
    # iface = gr.Interface(fn=my_function, inputs=[text, true_false_radio], outputs=chatbot, live=True, capture_session=True)

demo.launch(share=False)