Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,31 +1,36 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
|
| 3 |
from azure.ai.inference import ChatCompletionsClient
|
| 4 |
from azure.ai.inference.models import (
|
| 5 |
SystemMessage,
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
ImageDetailLevel,
|
| 7 |
)
|
| 8 |
from azure.core.credentials import AzureKeyCredential
|
|
|
|
|
|
|
|
|
|
| 9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
-
|
| 13 |
-
# Azure API credentials
|
| 14 |
-
token = "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"
|
| 15 |
-
endpoint = "https://models.inference.ai.azure.com"
|
| 16 |
-
model_name = "gpt-4o"
|
| 17 |
-
|
| 18 |
-
# Initialize the ChatCompletionsClient
|
| 19 |
client = ChatCompletionsClient(
|
|
|
|
| 20 |
credential=AzureKeyCredential(token),
|
| 21 |
)
|
| 22 |
|
| 23 |
-
#
|
| 24 |
def analyze_leaf_disease(image_path, leaf_type):
|
| 25 |
-
|
| 26 |
-
|
| 27 |
try:
|
| 28 |
-
# Prepare and send the request to the Azure API
|
| 29 |
response = client.complete(
|
| 30 |
messages=[
|
| 31 |
SystemMessage(
|
|
@@ -37,44 +42,67 @@ def analyze_leaf_disease(image_path, leaf_type):
|
|
| 37 |
ImageContentItem(
|
| 38 |
image_url=ImageUrl.load(
|
| 39 |
image_file=image_path,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
except Exception as e:
|
| 41 |
-
return f"
|
| 42 |
|
| 43 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
def handle_proceed(image_path, leaf_type):
|
| 45 |
-
|
| 46 |
-
detecting_status = "Detecting..."
|
| 47 |
-
result = analyze_leaf_disease(image_path, leaf_type)
|
| 48 |
-
# Clear detecting status after processing
|
| 49 |
-
return "", result
|
| 50 |
|
|
|
|
| 51 |
with gr.Blocks() as interface:
|
| 52 |
-
|
| 53 |
-
gr.Markdown("""
|
| 54 |
-
# Leaf Disease Detector
|
| 55 |
-
Upload a leaf image, select the leaf type, and let the AI analyze the disease.
|
| 56 |
-
""")
|
| 57 |
|
| 58 |
with gr.Row():
|
| 59 |
-
image_input = gr.Image(type="filepath", label="Upload
|
| 60 |
leaf_type = gr.Dropdown(
|
| 61 |
choices=["Tomato", "Tobacco", "Corn", "Paddy", "Maze", "Potato", "Wheat"],
|
| 62 |
-
label="Select Leaf Type",
|
| 63 |
)
|
| 64 |
-
proceed_button = gr.Button("
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
|
| 72 |
with gr.Row():
|
| 73 |
detecting_label = gr.Label("Detecting...", visible=False)
|
| 74 |
-
output_box = gr.Textbox(label="
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
|
| 76 |
-
#
|
| 77 |
proceed_button.click(handle_proceed, inputs=[image_input, leaf_type], outputs=[detecting_label, output_box])
|
|
|
|
|
|
|
| 78 |
|
| 79 |
if __name__ == "__main__":
|
| 80 |
-
interface.launch()
|
|
|
|
| 1 |
import gradio as gr
|
|
|
|
| 2 |
from azure.ai.inference import ChatCompletionsClient
|
| 3 |
from azure.ai.inference.models import (
|
| 4 |
SystemMessage,
|
| 5 |
+
UserMessage,
|
| 6 |
+
TextContentItem,
|
| 7 |
+
ImageContentItem,
|
| 8 |
+
ImageUrl,
|
| 9 |
ImageDetailLevel,
|
| 10 |
)
|
| 11 |
from azure.core.credentials import AzureKeyCredential
|
| 12 |
+
from gtts import gTTS
|
| 13 |
+
from deep_translator import GoogleTranslator
|
| 14 |
+
import os
|
| 15 |
|
| 16 |
+
# β
Securely load Azure credentials from environment
|
| 17 |
+
token = os.getenv("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")
|
| 18 |
+
endpoint = os.getenv("https://models.inference.ai.azure.com")
|
| 19 |
+
model_name = os.getenv("AZURE_MODEL_NAME", "gpt-4o") # Optional: use secret or default to gpt-4o
|
| 20 |
|
| 21 |
+
# β
Validate credentials
|
| 22 |
+
if not (isinstance(token, str) and token.strip()) or not (isinstance(endpoint, str) and endpoint.strip()):
|
| 23 |
+
raise ValueError("Azure API credentials are missing. Please set AZURE_API_KEY and AZURE_ENDPOINT in Hugging Face secrets.")
|
| 24 |
|
| 25 |
+
# β
Azure Client
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
client = ChatCompletionsClient(
|
| 27 |
+
endpoint=endpoint,
|
| 28 |
credential=AzureKeyCredential(token),
|
| 29 |
)
|
| 30 |
|
| 31 |
+
# π Analyze disease
|
| 32 |
def analyze_leaf_disease(image_path, leaf_type):
|
|
|
|
|
|
|
| 33 |
try:
|
|
|
|
| 34 |
response = client.complete(
|
| 35 |
messages=[
|
| 36 |
SystemMessage(
|
|
|
|
| 42 |
ImageContentItem(
|
| 43 |
image_url=ImageUrl.load(
|
| 44 |
image_file=image_path,
|
| 45 |
+
image_format="jpg",
|
| 46 |
+
detail=ImageDetailLevel.LOW,
|
| 47 |
+
)
|
| 48 |
+
),
|
| 49 |
+
],
|
| 50 |
+
),
|
| 51 |
+
],
|
| 52 |
+
model=model_name,
|
| 53 |
+
)
|
| 54 |
+
return response.choices[0].message.content
|
| 55 |
+
except Exception as e:
|
| 56 |
+
return f"β Error: {e}"
|
| 57 |
+
|
| 58 |
+
# π Translate to Bangla
|
| 59 |
+
def translate_to_bangla(text):
|
| 60 |
+
try:
|
| 61 |
+
return GoogleTranslator(source="auto", target="bn").translate(text)
|
| 62 |
except Exception as e:
|
| 63 |
+
return f"β Translation error: {e}"
|
| 64 |
|
| 65 |
+
# π Text to Speech
|
| 66 |
+
def text_to_speech(text):
|
| 67 |
+
try:
|
| 68 |
+
tts = gTTS(text)
|
| 69 |
+
audio_file = "tts_output.mp3"
|
| 70 |
+
tts.save(audio_file)
|
| 71 |
+
return audio_file
|
| 72 |
+
except Exception as e:
|
| 73 |
+
return f"β TTS error: {e}"
|
| 74 |
+
|
| 75 |
+
# π Main Action
|
| 76 |
def handle_proceed(image_path, leaf_type):
|
| 77 |
+
return "", analyze_leaf_disease(image_path, leaf_type)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
|
| 79 |
+
# πΏ Gradio App
|
| 80 |
with gr.Blocks() as interface:
|
| 81 |
+
gr.Markdown("# π Leaf Disease Detector\nUpload an image, select the leaf type, and analyze the disease. Listen or translate the result.")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
|
| 83 |
with gr.Row():
|
| 84 |
+
image_input = gr.Image(type="filepath", label="πΈ Upload Leaf Image")
|
| 85 |
leaf_type = gr.Dropdown(
|
| 86 |
choices=["Tomato", "Tobacco", "Corn", "Paddy", "Maze", "Potato", "Wheat"],
|
| 87 |
+
label="πΏ Select Leaf Type",
|
| 88 |
)
|
| 89 |
+
proceed_button = gr.Button("π Analyze")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 90 |
|
| 91 |
with gr.Row():
|
| 92 |
detecting_label = gr.Label("Detecting...", visible=False)
|
| 93 |
+
output_box = gr.Textbox(label="π Result", placeholder="Analysis will appear here", lines=10)
|
| 94 |
+
|
| 95 |
+
with gr.Row():
|
| 96 |
+
tts_button = gr.Button("π Read Aloud")
|
| 97 |
+
tts_audio = gr.Audio(label="π§ Audio", autoplay=True)
|
| 98 |
+
|
| 99 |
+
translate_button = gr.Button("π Translate to Bangla")
|
| 100 |
+
translated_output = gr.Textbox(label="π Bangla Translation", placeholder="Translation will appear here", lines=10)
|
| 101 |
|
| 102 |
+
# Button logic
|
| 103 |
proceed_button.click(handle_proceed, inputs=[image_input, leaf_type], outputs=[detecting_label, output_box])
|
| 104 |
+
tts_button.click(text_to_speech, inputs=[output_box], outputs=[tts_audio])
|
| 105 |
+
translate_button.click(translate_to_bangla, inputs=[output_box], outputs=[translated_output])
|
| 106 |
|
| 107 |
if __name__ == "__main__":
|
| 108 |
+
interface.launch()
|