File size: 3,950 Bytes
32a0c88
 
 
 
 
af091f0
666b18f
32a0c88
0a84bc5
308bbdb
 
3692424
dd6acae
308bbdb
 
8788308
 
308bbdb
fa449b5
 
0a84bc5
fa449b5
32a0c88
250da89
8788308
023be41
d7dcddc
3692424
32a0c88
 
3692424
c7e3a64
32a0c88
c7e3a64
32a0c88
3692424
 
 
32a0c88
eb3778e
32a0c88
 
 
eb3778e
32a0c88
 
 
3692424
 
32a0c88
f626afb
32a0c88
8788308
50bf796
 
68fdef8
3692424
68fdef8
32a0c88
8788308
32a0c88
165b6ba
8788308
32a0c88
 
e462381
7778534
3692424
8bf463f
0e9a343
8bf463f
32a0c88
 
8788308
 
 
 
32a0c88
3692424
 
8788308
 
421c741
3692424
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import os
import torch
from PIL import Image
from torchvision import transforms
import gradio as gr
#https://huggingface.co/spaces/yuhe6/final_project/blob/main/Net_Rotate9.pth
#os.system("wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt")

#model = torch.hub.load('huawei-noah/ghostnet', 'ghostnet_1x', pretrained=True)
#model = torch.jit.load('https://huggingface.co/spaces/yuhe6/final_project/blob/main/Net_Rotate9.pth').eval().to(device)


model = torch.jit.load('Net2_Flip_jit.pt', map_location = torch.device('cpu'))
model.eval()

model_categories = ["cat","dog"] # verify order
n_categories = len(model_categories)

#torch.hub.download_url_to_file('https://huggingface.co/spaces/yuhe6/final_project/blob/main/Net_Rotate9.pth', '/tmp/temporary_file')
#model = torch.hub.load('/tmp', 'temporary_file', pretrained=True)

#model.eval()
# Download an example image from the pytorch website
torch.hub.download_url_to_file("https://upload.wikimedia.org/wikipedia/commons/5/5b/Dog_%28Canis_lupus_familiaris%29_%281%29.jpg", "dog1.jpg")
torch.hub.download_url_to_file("https://upload.wikimedia.org/wikipedia/commons/thumb/6/6e/Golde33443.jpg/640px-Golde33443.jpg", "dog2.jpg")
torch.hub.download_url_to_file("https://upload.wikimedia.org/wikipedia/commons/c/c7/Tabby_cat_with_blue_eyes-3336579.jpg", "cat1.jpg")
torch.hub.download_url_to_file("https://upload.wikimedia.org/wikipedia/commons/9/9e/Domestic_cat.jpg", "cat2.jpg")

def inference(input_image):
    preprocess = transforms.Compose([
        transforms.Resize(size = (256, 256)), # Fixed resize from transforms.Resize(256)
        #transforms.CenterCrop(224),
        transforms.ToTensor(),
        #transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    # Used print statements to detect shapes between input tensor & batch
    # e.g. input_tensor.shape
    input_tensor = preprocess(input_image)
    input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model

    # move the input and model to GPU for speed if available
    if torch.cuda.is_available():
        input_batch = input_batch.to('cuda')
        model.to('cuda')

    with torch.no_grad():
        output = model(input_batch) # model(input_tensor) # needed to have batch dimension

    # The output has unnormalized scores. To get probabilities, you can run a softmax on it.
    probabilities = torch.nn.functional.softmax(output[0])

    # Read the categories
    #with open("dog_cat.txt", "r") as f:
        #categories = [s.strip() for s in f.readlines()]
    #with open("dog_cat.txt", "r") as f:

    #categories = [s.strip() for s in f.readlines()]
    # Show top categories per image
    top1_prob, top1_catid = torch.topk(probabilities, n_categories)
    result = {}
    for i in range(top1_prob.size(0)):
        result[model_categories[top1_catid[i]]] = top1_prob[i].item()
    return result

inputs = gr.Image(type='pil')
outputs = gr.Label(num_top_classes=n_categories)

title = "STAT 430 Final Project App -- Made by Group DHZ"
description = "This is our Cat & Dog Classifier for the final project, and the model we use is generated by our second neural network augmented by the flipping technique, which is would give the best accuracy. To use it, simply upload your image, or click one of the examples to load them. The authors are Xiongjie Dai (xdai12), Yu He (yuhe6), Mengjia Zeng (mengjia6)."
#article = "<p style='text-align: center'><a href='https://arxiv.org/abs/1911.11907'>GhostNet: More Features from Cheap Operations</a> | <a href='https://github.com/huawei-noah/CV-Backbones'>Github Repo</a></p>"

examples = [
  ['dog1.jpg'], 
  ['cat1.jpg'], 
  ['dog2.jpg'], 
  ['cat2.jpg']
]

gr.Interface(
  inference, inputs, outputs, 
  title = title, description = description, 
  examples = examples,
  analytics_enabled = False).launch(
    #debug = True # Enabled debug mode to see the stacktrace on Google Colab.
  )