speech-to-image / app.py
fffiloni's picture
Update app.py
801fc0e
raw
history blame
1.38 kB
import gradio as gr
import torch
from diffusers import DiffusionPipeline
from transformers import (
WhisperForConditionalGeneration,
WhisperProcessor,
)
import os
MY_SECRET_TOKEN=os.environ.get('HF_TOKEN_SD')
device = "cuda" if torch.cuda.is_available() else "cpu"
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small").to(device)
processor = WhisperProcessor.from_pretrained("openai/whisper-small")
diffuser_pipeline = DiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
custom_pipeline="speech_to_image_diffusion",
speech_model=model,
speech_processor=processor,
use_auth_token=MY_SECRET_TOKEN,
#revision="fp16",
#torch_dtype=torch.float16,
)
diffuser_pipeline.enable_attention_slicing()
diffuser_pipeline = diffuser_pipeline.to(device)
#β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
# GRADIO SETUP
audio_input = gr.Audio(source="microphone", type="numpy")
image_output = gr.Image()
def speech_to_text(audio_sample):
#text = audio_sample["text"].lower()
#print(text)
#speech_data = audio_sample["audio"]["array"]
print(audio_sample)
output = diffuser_pipeline(audio_sample)
return output.images[0]
demo = gr.Interface(fn=speech_to_text, inputs=audio_input, outputs=image_output)
demo.launch()