fffiloni commited on
Commit
a59f564
β€’
1 Parent(s): 9f2757b

added title and description

Browse files
Files changed (1) hide show
  1. app.py +8 -2
app.py CHANGED
@@ -29,7 +29,13 @@ diffuser_pipeline = diffuser_pipeline.to(device)
29
 
30
  #β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
31
  # GRADIO SETUP
32
-
 
 
 
 
 
 
33
  audio_input = gr.Audio(source="microphone", type="numpy")
34
  image_output = gr.Image()
35
 
@@ -45,5 +51,5 @@ def speech_to_text(audio_sample):
45
 
46
  return output.images[0]
47
 
48
- demo = gr.Interface(fn=speech_to_text, inputs=audio_input, outputs=image_output)
49
  demo.launch()
 
29
 
30
  #β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
31
  # GRADIO SETUP
32
+ title = "Speech to Diffusion β€’ Community Pipeline"
33
+ description = """
34
+ <p style='text-align: center;'>This demo can generate an image from an audio sample using pre-trained OpenAI whisper-small and Stable Diffusion.
35
+ Community examples consist of both inference and training examples that have been added by the community.
36
+ <a href='https://github.com/huggingface/diffusers/tree/main/examples/community#speech-to-image' target='_blank'> Click here for more information about community pipelines </a>
37
+ </p>
38
+ """
39
  audio_input = gr.Audio(source="microphone", type="numpy")
40
  image_output = gr.Image()
41
 
 
51
 
52
  return output.images[0]
53
 
54
+ demo = gr.Interface(fn=speech_to_text, inputs=audio_input, outputs=image_output, title=title, description=description)
55
  demo.launch()