kxx-kkk commited on
Commit
569f8f6
1 Parent(s): 7af941b

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -8
app.py CHANGED
@@ -2,14 +2,14 @@ import streamlit as st
2
  from transformers import pipeline
3
  from transformers import AutoModelForQuestionAnswering, AutoTokenizer
4
 
5
- # set page title
6
- st.set_page_config(page_title="Automated Question Answering System")
7
 
8
- # heading and description
9
- st.markdown("<h2 style='text-align: center;'>Question Answering on Academic Essays</h2>", unsafe_allow_html=True)
10
- st.markdown("<h3 style='text-align: left; color:#F63366; font-size:18px;'><b>What is extractive question answering about?<b></h3>", unsafe_allow_html=True)
11
- st.write("Extractive question answering is a Natural Language Processing task where text is provided for a model so that the model can refer to it and make predictions about where the answer to a question is.")
12
 
 
 
 
 
 
13
 
14
  # store the model in cache resources to enhance efficiency (ref: https://docs.streamlit.io/library/advanced-features/caching)
15
  @st.cache_resource(show_spinner=True)
@@ -55,8 +55,9 @@ with tab1:
55
  with st.spinner(text="Getting answer..."):
56
  answer = question_answerer(context=context, question=question)
57
  answer = answer["answer"]
 
58
  container = st.container(border=True)
59
- container.write("<h5><b>Answer:</b></h5>" + answer, unsafe_allow_html=True)
60
 
61
 
62
  # if upload file as input
@@ -78,5 +79,9 @@ with tab2:
78
  with st.spinner(text="Getting answer..."):
79
  answer = question_answerer(context=context, question=question)
80
  answer = answer["answer"]
81
- st.success(answer)
 
 
 
 
82
 
 
2
  from transformers import pipeline
3
  from transformers import AutoModelForQuestionAnswering, AutoTokenizer
4
 
 
 
5
 
6
+ st.set_page_config(page_title="Automated Question Answering System") # set page title
 
 
 
7
 
8
+ # heading
9
+ st.markdown("<h2 style='text-align: center;'>Question Answering on Academic Essays</h2>", unsafe_allow_html=True)
10
+ # description
11
+ st.markdown("<h3 style='text-align: left; color:#F63366; font-size:18px;'><b>What is extractive question answering about?<b></h3>", unsafe_allow_html=True)
12
+ st.write("Extractive question answering is a Natural Language Processing task where text is provided for a model so that the model can refer to it and make predictions about where the answer to a question is.")
13
 
14
  # store the model in cache resources to enhance efficiency (ref: https://docs.streamlit.io/library/advanced-features/caching)
15
  @st.cache_resource(show_spinner=True)
 
55
  with st.spinner(text="Getting answer..."):
56
  answer = question_answerer(context=context, question=question)
57
  answer = answer["answer"]
58
+ # display the result in container
59
  container = st.container(border=True)
60
+ container.write("<h5><b>Answer:</b></h5>" + answer + "<br>", unsafe_allow_html=True)
61
 
62
 
63
  # if upload file as input
 
79
  with st.spinner(text="Getting answer..."):
80
  answer = question_answerer(context=context, question=question)
81
  answer = answer["answer"]
82
+ # display the result in container
83
+ container = st.container(border=True)
84
+ container.write("<h5><b>Answer:</b></h5>" + answer + "<br>", unsafe_allow_html=True)
85
+
86
+ st.markdown("<br><br><br><br><br>", unsafe_allow_html=True)
87