Oshchepkov's picture
Upload app.py
6dd3d81
raw
history blame
No virus
3.04 kB
import streamlit as st
import torch
from urllib.parse import urlparse, parse_qs
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
# https://pypi.org/project/youtube-transcript-api/
from youtube_transcript_api import YouTubeTranscriptApi
def get_video_id(url: str) -> str:
"""
Examples:
- http://youtu.be/SA2iWivDJiE
- http://www.youtube.com/watch?v=_oPAwA_Udwc&feature=feedu
- http://www.youtube.com/embed/SA2iWivDJiE
- http://www.youtube.com/v/SA2iWivDJiE?version=3&hl=en_US
"""
query = urlparse(url)
if query.hostname == 'youtu.be':
return query.path[1:]
if query.hostname in ('www.youtube.com', 'youtube.com'):
if query.path == '/watch':
p = parse_qs(query.query)
return p['v'][0]
if query.path[:7] == '/embed/':
return query.path.split('/')[2]
if query.path[:3] == '/v/':
return query.path.split('/')[2]
return None
def get_youtube_subtitle(video_id: str) -> str:
try:
parse = YouTubeTranscriptApi.get_transcript(video_id, languages=['ru'])
result = ''
for i in parse:
if (i['text'][0] =='[') & (i['text'][-1] ==']'): continue
result += ' ' + i['text']
result = result.strip()[0].upper() + result.strip()[1:]
return result.strip()
except:
return None
device = "cuda" if torch.cuda.is_available() else "cpu"
m_name = '/home/user/app/model'
#m_name = '../model'
tokenizer = AutoTokenizer.from_pretrained(m_name)
model = AutoModelForSeq2SeqLM.from_pretrained(m_name)
model.to(device)
if __name__ == "__main__":
st.header("Annotation of subtitles from YouTube")
url = st.text_input('Enter the URL of the Youtube video', 'https://www.youtube.com/watch?v=HGSVsK32rKA ')
st.text("""
Example:
https://www.youtube.com/watch?v=HGSVsK32rKA
https://www.youtube.com/watch?v=fSpARfZ3I50
https://www.youtube.com/watch?v=3lEMopaRSjw
"""
)
video_id = get_video_id(url)
if video_id is not None:
subtitle = get_youtube_subtitle(video_id)
if subtitle is not None:
st.subheader('Subtitles')
st.markdown(subtitle)
inputs = tokenizer( [subtitle],
max_length=1000,
padding="max_length",
truncation=True,
return_tensors="pt",
)["input_ids"]
if st.button('Compute summary', help='Click me'):
outputs = model.generate(inputs.to(device), max_new_tokens=100, do_sample=False)
summary = tokenizer.decode(outputs[0], skip_special_tokens=True)
st.subheader('Summary')
st.markdown(summary)
else:
st.markdown(':red[Subtitles are disabled for this video]')
else:
st.markdown(':red[Video clip is not detected]')