File size: 2,397 Bytes
cc2101c
 
 
 
 
 
 
 
 
 
 
ea4e88a
db73536
cc2101c
1ccb976
 
0f0ea0c
 
1ccb976
0f0ea0c
 
44ca16b
cc2101c
087e3b8
cc2101c
 
087e3b8
cc2101c
087e3b8
cc2101c
 
 
 
db73536
cc2101c
 
 
 
 
db73536
 
 
cc2101c
496d856
b802fa4
cc2101c
 
 
 
478d560
a3a731c
a668eef
5af3e9f
478d560
 
 
 
 
 
 
 
b720019
cc2101c
 
 
44ca16b
 
 
 
 
db73536
aa12e2e
 
 
 
 
 
cc2101c
db73536
5f3ef5b
 
44ca16b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import argparse
import itertools
import math
import os
from pathlib import Path
from typing import Optional
import subprocess
import sys

import torch


from spanish_medica_llm import run_training, run_training_process, run_finnetuning_process, generate_response

import gradio as gr

#def greet(name):
#    return "Hello " + name + "!!"

#iface = gr.Interface(fn=greet, inputs="text", outputs="text")
#iface.launch()

def generate_model(name):
    return f"Welcome to Gradio HF_ACCES_TOKEN, {os.environ.get('HG_FACE_TOKEN')}!"
    
def generate(prompt):
    #from diffusers import StableDiffusionPipeline
    
    #pipe = StableDiffusionPipeline.from_pretrained("./output_model", torch_dtype=torch.float16)
    pipe = pipe.to("cuda")
    image = pipe(prompt).images[0]  
    return(image)
    
def evaluate_model(input):
    #from diffusers import StableDiffusionPipeline
    
    #pipe = StableDiffusionPipeline.from_pretrained("./output_model", torch_dtype=torch.float16)
    #pipe = pipe.to("cuda")
    #image = pipe(prompt).images[0]  
    output = generate_response(input)
    return output




def train_model(*inputs):
    if "IS_SHARED_UI" in os.environ:
        raise gr.Error("This Space only works in duplicated instances")
  
    run_training_process()   
    
    return f"Train Model Sucessful!!!"

def finnetuning_model(*inputs):
    if "IS_SHARED_UI" in os.environ:
        raise gr.Error("This Space only works in duplicated instances")
  
    run_finnetuning_process()   
    
    return f"Finnetuning Model Sucessful!!!"


def stop_model(*input):
    return f"Model with Gradio!"

with gr.Blocks() as demo:
    gr.Markdown("Start typing below and then click **Run** to see the output.")
    with gr.Row():
        inp = gr.Textbox(placeholder="What is your name?")
        out = gr.Textbox()

    # btn_response = gr.Button("Generate Response")
    # btn_response.click(fn=generate_model, inputs=inp, outputs=out)
    # btn_train = gr.Button("Train Model")
    # btn_train.click(fn=train_model, inputs=[], outputs=out)
    # btn_finnetuning = gr.Button("Finnetuning Model")
    # btn_finnetuning.click(fn=finnetuning_model, inputs=[], outputs=out)
    btn_evaluate = gr.Button("Evaluate Model")
    btn_evaluate.click(fn=evaluate_model, inputs=inp, outputs=out)
    # btn_stop = gr.Button("Stop Model")
    # btn_stop.click(fn=stop_model, inputs=[], outputs=out)

demo.launch()