import streamlit as st from PIL import Image from surya.ocr import run_ocr from surya.model.detection.model import load_model as load_det_model, load_processor as load_det_processor from surya.model.recognition.model import load_model as load_rec_model from surya.model.recognition.processor import load_processor as load_rec_processor import re from transformers import AutoModel, AutoTokenizer import torch import tempfile import os # os.environ["CUDA_VISIBLE_DEVICES"] = "" st.set_page_config(page_title="OCR Application", page_icon="🖼️", layout="wide") # device = "cuda" if torch.cuda.is_available() else "cpu" device="cpu" # @st.cache_resource # def load_surya_models(): det_processor, det_model = load_det_processor(), load_det_model() det_model.to(device) rec_model, rec_processor = load_rec_model(), load_rec_processor() rec_model.to(device) # return det_processor, det_model, rec_model, rec_processor # @st.cache_resource # def load_got_ocr_model(): # tokenizer = AutoTokenizer.from_pretrained('aarishshahmohsin/got_ocr_cpu', trust_remote_code=True, device_map='cpu') # model = AutoModel.from_pretrained('aarishshahmohsin/got_ocr_cpu', trust_remote_code=True, low_cpu_mem_usage=True, device_map='cpu', use_safetensors=True, pad_token_id=tokenizer.eos_token_id) # tokenizer = AutoTokenizer.from_pretrained('RufusRubin777/GOT-OCR2_0_CPU', trust_remote_code=True, device_map='cpu') # got_model = AutoModel.from_pretrained('RufusRubin777/GOT-OCR2_0_CPU', trust_remote_code=True, low_cpu_mem_usage=True, device_map='cpu', use_safetensors=True) tokenizer = AutoTokenizer.from_pretrained('aarishshahmohsin/got_ocr_2', trust_remote_code=True, device_map='cpu') got_model = AutoModel.from_pretrained('aarishshahmohsin/got_ocr_2', trust_remote_code=True, low_cpu_mem_usage=True, device_map='cpu', use_safetensors=True) # got_model = got_model.to_empty() got_model = got_model.eval().to(device) # return tokenizer, model # det_processor, det_model, rec_model, rec_processor = load_surya_models() # tokenizer, got_model = load_got_ocr_model() st.title("OCR Application (Aarish Shah Mohsin)") st.write("Upload an image for OCR processing. Using GOT-OCR for English translations, Picked Surya OCR Model for English+Hindi Translations") st.sidebar.header("Configuration") model_choice = st.sidebar.selectbox("Select OCR Model:", ("For English + Hindi", "For English (GOT-OCR)")) # Store the uploaded image and extracted text in session state if 'uploaded_image' not in st.session_state: st.session_state.uploaded_image = None if 'extracted_text' not in st.session_state: st.session_state.extracted_text = "" uploaded_file = st.sidebar.file_uploader("Choose an image...", type=["png", "jpg", "jpeg"]) # Update the session state if a new file is uploaded if uploaded_file is not None: st.session_state.uploaded_image = uploaded_file predict_button = st.sidebar.button("Predict", key="predict") col1, col2 = st.columns([2, 1]) # Display the image preview if it's already uploaded if st.session_state.uploaded_image: image = Image.open(st.session_state.uploaded_image) with col1: # Display a smaller preview of the uploaded image (set width to 300px) col1.image(image, caption='Uploaded Image', use_column_width=False, width=300) # Handle predictions if predict_button and st.session_state.uploaded_image: # with col2: with st.spinner("Processing..."): # Save the uploaded file temporarily with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as temp_file: temp_file.write(st.session_state.uploaded_image.getvalue()) temp_file_path = temp_file.name image = Image.open(temp_file_path) image = image.convert("RGB") if model_choice == "For English + Hindi": langs = ["en", "hi"] predictions = run_ocr([image], [langs], det_model, det_processor, rec_model, rec_processor) text_list = re.findall(r"text='(.*?)'", str(predictions[0])) extracted_text = ' '.join(text_list) st.session_state.extracted_text = extracted_text # Save extracted text in session state # with col2: # st.subheader("Extracted Text (Surya):") # st.write(extracted_text) elif model_choice == "For English (GOT-OCR)": image_file = temp_file_path res = got_model.chat(tokenizer, image_file, ocr_type='ocr') st.session_state.extracted_text = res # Save extracted text in session state # with col2: # st.subheader("Extracted Text (GOT-OCR):") # st.write(res) # Delete the temporary file after processing if os.path.exists(temp_file_path): os.remove(temp_file_path) # Search functionality if st.session_state.extracted_text: search_query = st.text_input("Search in extracted text:", key="search_query", placeholder="Type to search...") # Create a pattern to find the search query in a case-insensitive way if search_query: pattern = re.compile(re.escape(search_query), re.IGNORECASE) highlighted_text = st.session_state.extracted_text # Replace matching text with highlighted version (bright green) highlighted_text = pattern.sub(lambda m: f"{m.group(0)}", highlighted_text) st.markdown("### Highlighted Search Results:") st.markdown(highlighted_text, unsafe_allow_html=True) else: # If no search query, show the original extracted text st.markdown("### Extracted Text:") st.markdown(st.session_state.extracted_text, unsafe_allow_html=True)