adi-123's picture
Update app.py
424c4c2 verified
raw
history blame
7.04 kB
import os
import streamlit as st
import requests
from transformers import pipeline
from typing import Dict
from together import Together
# Image-to-text
def img2txt(url: str) -> str:
print("Initializing captioning model...")
captioning_model = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
print("Generating text from the image...")
text = captioning_model(url, max_new_tokens=20)[0]["generated_text"]
print(text)
return text
# Text-to-story generation with LLM model
def txt2story(prompt: str, top_k: int, top_p: float, temperature: float) -> str:
# Load the Together API client
client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
# Modify the prompt based on user inputs and ensure a 250-word limit
story_prompt = f"Write a short story of no more than 250 words based on the following prompt: {prompt}"
# Call the LLM model
stream = client.chat.completions.create(
model="meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
messages=[
{"role": "system", "content": '''As an experienced short story writer, write a meaningful story influenced by the provided prompt.
Ensure the story is full of positive inspiration & enthusiasm and concludes with a happy ending.
Ensure the story does not exceed 250 words.'''},
{"role": "user", "content": story_prompt}
],
top_k=top_k,
top_p=top_p,
temperature=temperature,
stream=True
)
# Concatenate story chunks
story = ''
for chunk in stream:
story += chunk.choices[0].delta.content
return story
# Text-to-speech
def txt2speech(text: str) -> None:
print("Initializing text-to-speech conversion...")
API_URL = "https://api-inference.huggingface.co/models/espnet/kan-bayashi_ljspeech_vits"
headers = {"Authorization": f"Bearer {os.environ['HUGGINGFACEHUB_API_TOKEN']}"}
payloads = {'inputs': text}
response = requests.post(API_URL, headers=headers, json=payloads)
with open('audio_story.mp3', 'wb') as file:
file.write(response.content)
# Story translation function
def translate_story(story: str, target_language: str) -> str:
# Translation pipeline
translation_model = pipeline("translation", model="SnypzZz/Llama2-13b-Language-translate")
print(f"Translating the story to {target_language}...")
translated_story = translation_model(story, max_length=400, tgt_lang=target_language)[0]['translation_text']
return translated_story
# Get user preferences for the story
def get_user_preferences() -> Dict[str, str]:
preferences = {}
preferences['continent'] = st.selectbox("Continent", ["North America", "Europe", "Asia", "Africa", "Australia"])
preferences['language'] = st.selectbox("Language", ["English", "Hindi", "Spanish", "French", "German"])
preferences['genre'] = st.selectbox("Genre", ["Science Fiction", "Fantasy", "Mystery", "Romance"])
preferences['setting'] = st.selectbox("Setting", ["Future", "Medieval times", "Modern day", "Alternate reality"])
preferences['plot'] = st.selectbox("Plot", ["Hero's journey", "Solving a mystery", "Love story", "Survival"])
preferences['tone'] = st.selectbox("Tone", ["Serious", "Light-hearted", "Humorous", "Dark"])
preferences['theme'] = st.selectbox("Theme", ["Self-discovery", "Redemption", "Love", "Justice"])
preferences['conflict'] = st.selectbox("Conflict Type", ["Person vs. Society", "Internal struggle", "Person vs. Nature", "Person vs. Person"])
preferences['magic_tech'] = st.selectbox("Magic/Technology", ["Advanced technology", "Magic system", "Supernatural abilities", "Alien technology"])
preferences['twist'] = st.selectbox("Mystery/Twist", ["Plot twist", "Hidden identity", "Unexpected ally/enemy", "Time paradox"])
preferences['ending'] = st.selectbox("Ending", ["Bittersweet", "Happy", "Open-ended", "Tragic"])
return preferences
# Main function
def main():
st.set_page_config(page_title="🎨 Image-to-Audio Story 🎧", page_icon="πŸ–ΌοΈ")
st.title("Turn the Image into Audio Story")
# Allows users to upload an image file
uploaded_file = st.file_uploader("# πŸ“· Upload an image...", type=["jpg", "jpeg", "png"])
# Parameters for LLM model (in the sidebar)
st.sidebar.markdown("# LLM Inference Configuration Parameters")
top_k = st.sidebar.number_input("Top-K", min_value=1, max_value=100, value=5)
top_p = st.sidebar.number_input("Top-P", min_value=0.0, max_value=1.0, value=0.8)
temperature = st.sidebar.number_input("Temperature", min_value=0.1, max_value=2.0, value=1.5)
# Get user preferences for the story
st.markdown("## Story Preferences")
preferences = get_user_preferences()
if uploaded_file is not None:
# Reads and saves uploaded image file
bytes_data = uploaded_file.read()
with open("uploaded_image.jpg", "wb") as file:
file.write(bytes_data)
st.image(uploaded_file, caption='πŸ–ΌοΈ Uploaded Image', use_column_width=True)
# Initiates AI processing and story generation
with st.spinner("## πŸ€– AI is at Work! "):
scenario = img2txt("uploaded_image.jpg") # Extracts text from the image
# Modify the prompt to include user preferences
prompt = f"Based on the image description: '{scenario}', create a {preferences['genre']} story set in {preferences['setting']}. " \
f"The story should have a {preferences['tone']} tone and explore the theme of {preferences['theme']}. " \
f"The main conflict should be {preferences['conflict']}. " \
f"Include {preferences['magic_tech']} as a key element. " \
f"The story should have a {preferences['twist']} and end with a {preferences['ending']} ending."
story = txt2story(prompt, top_k, top_p, temperature) # Generates a story based on the image text, LLM params, and user preferences
# Translate the story if the user selected a non-English language
if preferences['language'] != "English":
story = translate_story(story, preferences['language'])
txt2speech(story) # Converts the story to audio
st.markdown("---")
st.markdown("## πŸ“œ Image Caption")
st.write(scenario)
st.markdown("---")
st.markdown("## πŸ“– Story")
st.write(story)
st.markdown("---")
st.markdown("## 🎧 Audio Story")
st.audio("audio_story.mp3")
if __name__ == '__main__':
main()
# Credits
st.markdown("### Credits")
st.caption('''
Made with ❀️ by @Aditya-Neural-Net-Ninja\n
Utilizes Image-to-Text, Text Generation, Text-to-Speech Transformer Models\n
Gratitude to Streamlit, πŸ€— Spaces for Deployment & Hosting
''')