arjunvankani commited on
Commit
7774178
·
verified ·
1 Parent(s): aeac287

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -17
app.py CHANGED
@@ -18,9 +18,6 @@ seg = pipeline("image-segmentation", model="facebook/mask2former-swin-base-coco-
18
  sd_pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
19
  sd_pipe = sd_pipe.to("cuda" if torch.cuda.is_available() else "cpu")
20
 
21
- # --- Speech ---
22
- asr = pipeline("automatic-speech-recognition", model="openai/whisper-small")
23
- tts = pipeline("text-to-speech", model="espnet/kan-bayashi_ljspeech_vits")
24
 
25
  # --- Functions ---
26
  def classify_text(text):
@@ -48,12 +45,7 @@ def generate_image(prompt):
48
  image = sd_pipe(prompt).images[0]
49
  return image
50
 
51
- def transcribe(audio):
52
- return asr(audio)["text"]
53
 
54
- def speak_text(text):
55
- audio = tts(text)
56
- return (audio["sample_rate"], audio["audio"])
57
 
58
  # --- Gradio Interface ---
59
  with gr.Blocks() as demo:
@@ -100,14 +92,5 @@ with gr.Blocks() as demo:
100
  gen_out = gr.Image(label="Generated Image")
101
  gr.Button("Generate").click(generate_image, gen_in, gen_out)
102
 
103
- with gr.Tab("Speech Recognition"):
104
- audio_in = gr.Audio(type="filepath")
105
- audio_out = gr.Textbox(label="Transcription")
106
- audio_in.change(transcribe, audio_in, audio_out)
107
-
108
- with gr.Tab("Text-to-Speech"):
109
- tts_in = gr.Textbox(label="Text to Speak")
110
- tts_out = gr.Audio(label="Generated Speech")
111
- gr.Button("Speak").click(speak_text, tts_in, tts_out)
112
 
113
  demo.launch()
 
18
  sd_pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
19
  sd_pipe = sd_pipe.to("cuda" if torch.cuda.is_available() else "cpu")
20
 
 
 
 
21
 
22
  # --- Functions ---
23
  def classify_text(text):
 
45
  image = sd_pipe(prompt).images[0]
46
  return image
47
 
 
 
48
 
 
 
 
49
 
50
  # --- Gradio Interface ---
51
  with gr.Blocks() as demo:
 
92
  gen_out = gr.Image(label="Generated Image")
93
  gr.Button("Generate").click(generate_image, gen_in, gen_out)
94
 
 
 
 
 
 
 
 
 
 
95
 
96
  demo.launch()