Oshchepkov's picture
Upload 2 files
43f8454
raw
history blame
No virus
1.98 kB
import streamlit as st
import torch
#from urllib.parse import urlparse, parse_qs
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
#from youtube_transcript_api import YouTubeTranscriptApi
from util import get_video_id, get_youtube_subtitle
device = "cuda" if torch.cuda.is_available() else "cpu"
m_name = '/home/user/app/model'
#m_name = '../model'
tokenizer = AutoTokenizer.from_pretrained(m_name)
model = AutoModelForSeq2SeqLM.from_pretrained(m_name)
model.to(device)
if __name__ == "__main__":
st.header("Annotation of subtitles from YouTube")
option = st.selectbox(
'Video for example:',
('https://www.youtube.com/watch?v=HGSVsK32rKA',
'https://www.youtube.com/watch?v=fSpARfZ3I50',
'https://www.youtube.com/watch?v=3lEMopaRSjw')
)
url = st.text_input(':green[Enter your URL of the Youtube video] πŸ‘‡', option)
video_id = get_video_id(url)
if video_id is not None:
subtitle = get_youtube_subtitle(video_id)
if subtitle is not None:
st.subheader('Subtitles')
st.markdown(subtitle)
inputs = tokenizer( [subtitle],
max_length=1000,
padding="max_length",
truncation=True,
return_tensors="pt",
)["input_ids"]
if st.button('Compute summary', help='Click me'):
outputs = model.generate(inputs.to(device), max_new_tokens=100, do_sample=False)
summary = tokenizer.decode(outputs[0], skip_special_tokens=True)
st.subheader('Summary')
st.markdown(summary)
else:
st.markdown(':red[Subtitles are disabled for this video]')
else:
st.markdown(':red[Video clip is not detected]')