riabayonaor commited on
Commit
9f33da8
1 Parent(s): 5616571

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -26
app.py CHANGED
@@ -3,43 +3,52 @@ import requests
3
  from PIL import Image
4
  from io import BytesIO
5
 
6
- st.title("Cucumber Disease Prediction")
7
- uploaded_file = st.file_uploader("Upload a photo of a cucumber plant or cucumber", type=["jpg", "jpeg", "png"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
  if uploaded_file is not None:
10
  image = Image.open(uploaded_file)
11
- st.image(image, caption='Uploaded Image.', use_column_width=True)
12
- st.write("")
13
- st.write("Classifying...")
14
 
15
- # Convert image to bytes
16
  img_byte_arr = BytesIO()
17
  image.save(img_byte_arr, format='PNG')
18
  img_byte_arr = img_byte_arr.getvalue()
19
 
20
- # Send image to Hugging Face model
21
- headers = {"Authorization": "Bearer YOUR_HUGGINGFACE_API_KEY"}
22
- api_url = "https://api-inference.huggingface.co/models/riabayonaor/modelo_prediccion_enfermedades_pepinos"
23
- response = requests.post(api_url, headers=headers, files={"file": img_byte_arr})
24
-
25
- if response.status_code == 200:
26
- predictions = response.json()
27
- # Assuming the predictions are in the format [{label: "label1", score: 0.95}, {label: "label2", score: 0.05}]
28
  top_prediction = max(predictions, key=lambda x: x["score"])
29
- st.write(f"Top prediction: {top_prediction['label']} with confidence {top_prediction['score']:.2f}")
30
 
31
- # Use top label for Meta Llama model
32
- prompt = f"This disease is {top_prediction['label']}. Explain what it is and suggest possible insecticides or solutions."
33
-
34
- # Call Meta Llama model
35
- meta_llama_url = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
36
- llama_response = requests.post(meta_llama_url, headers=headers, json={"inputs": prompt})
37
 
38
- if llama_response.status_code == 200:
39
- explanation = llama_response.json()[0]["generated_text"]
40
- st.write("Explanation and Possible Solutions:")
41
  st.write(explanation)
42
  else:
43
- st.write("Failed to get a response from Meta Llama model.")
44
  else:
45
- st.write("Failed to classify the image.")
 
3
  from PIL import Image
4
  from io import BytesIO
5
 
6
+ # Configuración de la API
7
+ API_URL = "https://api-inference.huggingface.co/models/riabayonaor/modelo_prediccion_enfermedades_pepinos"
8
+ META_LLAMA_API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
9
+ headers = {"Authorization": "Bearer YOUR_HUGGINGFACE_API_KEY"} # Reemplaza con tu API Key
10
+
11
+ def query(image_bytes):
12
+ response = requests.post(API_URL, headers=headers, data=image_bytes)
13
+ return response.json()
14
+
15
+ def llama_query(prompt):
16
+ response = requests.post(META_LLAMA_API_URL, headers=headers, json={"inputs": prompt})
17
+ return response.json()
18
+
19
+ # Interfaz de Streamlit
20
+ st.title("Predicción de Enfermedades en Pepinos")
21
+ uploaded_file = st.file_uploader("Sube una foto de una planta de pepino o un pepino", type=["jpg", "jpeg", "png"])
22
 
23
  if uploaded_file is not None:
24
  image = Image.open(uploaded_file)
25
+ st.image(image, caption='Imagen subida.', use_column_width=True)
26
+ st.write("Clasificando...")
 
27
 
28
+ # Convertir la imagen a bytes
29
  img_byte_arr = BytesIO()
30
  image.save(img_byte_arr, format='PNG')
31
  img_byte_arr = img_byte_arr.getvalue()
32
 
33
+ # Enviar la imagen al modelo de Hugging Face
34
+ predictions = query(img_byte_arr)
35
+
36
+ if "error" not in predictions:
37
+ # Suponiendo que las predicciones están en el formato [{label: "label1", score: 0.95}, {label: "label2", score: 0.05}]
 
 
 
38
  top_prediction = max(predictions, key=lambda x: x["score"])
39
+ st.write(f"Predicción principal: {top_prediction['label']} con confianza {top_prediction['score']:.2f}")
40
 
41
+ # Usar la etiqueta principal para el modelo de Meta Llama
42
+ prompt = f"Esta enfermedad es {top_prediction['label']}. Explica qué es y sugiere posibles insecticidas o soluciones."
43
+
44
+ # Llamar al modelo Meta Llama
45
+ llama_response = llama_query(prompt)
 
46
 
47
+ if "error" not in llama_response:
48
+ explanation = llama_response[0]["generated_text"]
49
+ st.write("Explicación y posibles soluciones:")
50
  st.write(explanation)
51
  else:
52
+ st.write("No se pudo obtener una respuesta del modelo Meta Llama.")
53
  else:
54
+ st.write("No se pudo clasificar la imagen.")