adi-123 commited on
Commit
e7bd9fb
β€’
1 Parent(s): 3e74eb0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +100 -45
app.py CHANGED
@@ -1,48 +1,103 @@
 
1
  import os
2
- import requests
3
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
- # API details
6
- API_URL = "https://api-inference.huggingface.co/models/openai-community/gpt2-xl"
7
- API_TOKEN = os.environ.get('HUGGINGFACEHUB_API_TOKEN')
8
- HEADERS = {"Authorization": f"Bearer {API_TOKEN}"}
9
-
10
- # Streamlit UI
11
- st.title("GPT-2 Movie Sentiment Analysis")
12
-
13
- # Input text for sentiment analysis
14
- input_text = st.text_area("Enter movie review:", "")
15
-
16
- # Choose analysis type
17
- analysis_type = st.radio("Select analysis type:", ["Zero-shot", "One-shot", "Few-shot"])
18
-
19
- if st.button("Analyze Sentiment"):
20
- # Prepare payload for API request
21
- if analysis_type == "Zero-shot":
22
- payload = {"inputs": f"Label the text as either 'positive', 'negative', or 'mixed' related to a movie:\n\n{input_text}"}
23
- elif analysis_type == "One-shot":
24
- prompt = "Label the sentence as either 'positive', 'negative', or 'mixed' related to a movie:\n\n" \
25
- "Sentence: This movie exceeded my expectations.\nLabel: positive"
26
- payload = {"inputs": f"{prompt} {input_text}"}
27
- elif analysis_type == "Few-shot":
28
- examples = [
29
- "Sentence: The cinematography in this movie is outstanding.\nLabel: positive",
30
- "Sentence: I didn't enjoy the plot twists in the movie.\nLabel: negative",
31
- "Sentence: The acting was great, but the pacing felt off.\nLabel: mixed",
32
- "Sentence: This movie didn't live up to the hype.\nLabel: negative",
33
- ]
34
- prompt = "Label the sentences as either 'positive', 'negative', or 'mixed' related to a movie:\n\n" + "\n".join(examples)
35
- payload = {"inputs": f"{prompt}\n\n{input_text}"}
36
-
37
- # Make API request
38
- response = requests.post(API_URL, headers=HEADERS, json=payload)
39
-
40
- # Print entire response for debugging
41
- st.write("API Response:", response.json())
42
-
43
- # Display results
44
- if response.status_code == 200:
45
- result = response.json()[0] # Assuming the sentiment is in the first item of the list
46
- st.write("Sentiment:", result.get('generated_text', 'N/A'))
47
- else:
48
- st.write("Error:", response.status_code, response.text)
 
1
+ # Imports
2
  import os
 
3
  import streamlit as st
4
+ import requests
5
+ from transformers import pipeline
6
+ import openai
7
+
8
+ # Suppressing all warnings
9
+ import warnings
10
+ warnings.filterwarnings("ignore")
11
+
12
+ # Image-to-text
13
+ def img2txt(url):
14
+ print("Initializing captioning model...")
15
+ captioning_model = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
16
+
17
+ print("Generating text from the image...")
18
+ text = captioning_model(url, max_new_tokens=20)[0]["generated_text"]
19
+
20
+ print(text)
21
+ return text
22
+
23
+ # Text-to-story
24
+ def txt2story(img_text):
25
+ print("Initializing client...")
26
+ client = openai.OpenAI(
27
+ api_key=os.environ["TOGETHER_API_KEY"],
28
+ base_url='https://api.together.xyz',
29
+ )
30
+
31
+ messages = [
32
+ {"role": "system", "content": '''As an experienced short story writer, write story title and then create a meaningful story influenced by provided words.
33
+ Ensure stories conclude positively within 100 words. Remember the story must end within 100 words''', "temperature": 1.8},
34
+ {"role": "user", "content": f"Here is input set of words: {img_text}", "temperature": 1.5},
35
+ ]
36
+
37
+ print("Story...")
38
+ chat_completion = client.chat.completions.create(
39
+ messages=messages,
40
+ model="togethercomputer/llama-2-70b-chat")
41
+
42
+ print(chat_completion.choices[0].message.content)
43
+ return chat_completion.choices[0].message.content
44
+
45
+
46
+
47
+ # Text-to-speech
48
+ def txt2speech(text):
49
+ print("Initializing text-to-speech conversion...")
50
+ API_URL = "https://api-inference.huggingface.co/models/espnet/kan-bayashi_ljspeech_vits"
51
+ headers = {"Authorization": f"Bearer {os.environ['HUGGINGFACEHUB_API_TOKEN']}"}
52
+ payloads = {'inputs': text}
53
+
54
+ response = requests.post(API_URL, headers=headers, json=payloads)
55
+
56
+ with open('audio_story.mp3', 'wb') as file:
57
+ file.write(response.content)
58
+
59
+
60
+ # Streamlit web app main function
61
+ def main():
62
+ st.set_page_config(page_title="🎨 Image-to-Audio Story 🎧", page_icon="πŸ–ΌοΈ")
63
+ st.title("Turn the Image into Audio Story")
64
+
65
+ # Allows users to upload an image file
66
+ uploaded_file = st.file_uploader("# πŸ“· Upload an image...", type=["jpg", "jpeg", "png"])
67
+
68
+ if uploaded_file is not None:
69
+ # Reads and saves uploaded image file
70
+ bytes_data = uploaded_file.read()
71
+ with open("uploaded_image.jpg", "wb") as file:
72
+ file.write(bytes_data)
73
+
74
+ st.image(uploaded_file, caption='πŸ–ΌοΈ Uploaded Image', use_column_width=True)
75
+
76
+ # Initiates AI processing and story generation
77
+ with st.spinner("## πŸ€– AI is at Work! "):
78
+ scenario = img2txt("uploaded_image.jpg") # Extracts text from the image
79
+ story = txt2story(scenario) # Generates a story based on the image text
80
+ txt2speech(story) # Converts the story to audio
81
+
82
+ st.markdown("---")
83
+ st.markdown("## πŸ“œ Image Caption")
84
+ st.write(scenario)
85
+
86
+ st.markdown("---")
87
+ st.markdown("## πŸ“– Story")
88
+ st.write(story)
89
+
90
+ st.markdown("---")
91
+ st.markdown("## 🎧 Audio Story")
92
+ st.audio("audio_story.mp3")
93
+
94
+ if __name__ == '__main__':
95
+ main()
96
 
97
+ # Credits
98
+ st.markdown("### Credits")
99
+ st.caption('''
100
+ Made with ❀️ by @Aditya-Neural-Net-Ninja\n
101
+ Utilizes Image-to-text, Text Generation, Text-to-speech Transformer Models\n
102
+ Gratitude to Streamlit, πŸ€— Spaces for Deployment & Hosting
103
+ ''')