File size: 1,110 Bytes
7d51abb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
from transformers import AutoImageProcessor, AutoModelForImageClassification
from PIL import Image
import torch
import numpy as np
from io import BytesIO  # Add this import statement

processor = AutoImageProcessor.from_pretrained("dima806/medicinal_plants_image_detection")
model = AutoModelForImageClassification.from_pretrained("dima806/medicinal_plants_image_detection")

def read_image(file) -> Image.Image:
    pil_image = Image.open(BytesIO(file))
    return pil_image

def transformacao(file: Image.Image):
    inputs = processor(images=file, return_tensors="pt", padding=True)

    with torch.no_grad():
        outputs = model(**inputs)

    logits = outputs.logits
    probabilities = logits.softmax(dim=1).squeeze()

    # Get top 3 predictions
    top3_probabilities, top3_indices = torch.topk(probabilities, 3)

    labels = model.config.id2label

    response = []
    for prob, idx in zip(top3_probabilities, top3_indices):
        resp = {}
        resp["class"] = labels[idx.item()]
        resp["confidence"] = f"{prob.item()*100:0.2f} %"
        response.append(resp)

    return response