# libraries from flask import Flask, render_template, request, redirect, url_for, flash, session, send_from_directory import os import logging from utility.utils import extract_text_from_images, process_extracted_text, process_resume_data from backup.backup import NER_Model from paddleocr import PaddleOCR # Configure logging logging.basicConfig( level=logging.INFO, handlers=[ logging.StreamHandler() # Remove FileHandler and log only to the console ] ) # Flask App app = Flask(__name__) app.secret_key = 'your_secret_key' @app.template_filter('basename') def basename_filter(path): return os.path.basename(path) app.config['UPLOAD_FOLDER'] = 'uploads/' app.config['RESULT_FOLDER'] = 'results/' UPLOAD_FOLDER = 'static/uploads/' RESULT_FOLDER = 'static/results/' os.makedirs(UPLOAD_FOLDER, exist_ok=True) os.makedirs(RESULT_FOLDER, exist_ok=True) if not os.path.exists(app.config['UPLOAD_FOLDER']): os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True) if not os.path.exists(app.config['RESULT_FOLDER']): os.makedirs(app.config['RESULT_FOLDER'], exist_ok=True) # Set the PaddleOCR home directory to a writable location os.environ['PADDLEOCR_HOME'] = '/tmp/.paddleocr' # Check if PaddleOCR home directory is writable if not os.path.exists('/tmp/.paddleocr'): os.makedirs('/tmp/.paddleocr', exist_ok=True) logging.info("Created PaddleOCR home directory.") else: logging.info("PaddleOCR home directory exists.") @app.route('/') def index(): uploaded_files = session.get('uploaded_files', []) logging.info(f"Accessed index page, uploaded files: {uploaded_files}") return render_template('index.html', uploaded_files=uploaded_files) @app.route('/upload', methods=['POST']) def upload_file(): if 'files' not in request.files: flash('No file part') logging.warning("No file part found in the request") return redirect(request.url) files = request.files.getlist('files') if not files or all(file.filename == '' for file in files): flash('No selected files') logging.warning("No files selected for upload") return redirect(request.url) uploaded_files = session.get('uploaded_files', []) for file in files: if file: filename = file.filename file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename) file.save(file_path) print(f"file path --->{file_path}") uploaded_files.append(filename) logging.info(f"Uploaded file: {filename} at {file_path}") session['uploaded_files'] = uploaded_files flash('Files successfully uploaded') logging.info(f"Files successfully uploaded: {uploaded_files}") return process_file() @app.route('/remove_file',methods=['POST']) def remove_file(): uploaded_files = session.get('uploaded_files', []) if uploaded_file: for filename in uploaded_files: file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename) if os.path.exists(file_path): os.remove(file_path) logging.info(f"Removed file: {filename}") else: logging.warning(f"File not found for removal: {file_path}") # More specific log session.pop('uploaded_files', None) flash('Files successfully removed') logging.info("All uploaded files removed") else: flash('No file to remove.') logging.warning("File not found for removal") return redirect(url_for('index')) @app.route('/reset_upload') def reset_upload(): """Reset the uploaded file and the processed data.""" uploaded_files = session.get('uploaded_files', []) if uploaded_file: for filename in uploaded_files: file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename) if os.path.exists(file_path): os.remove(file_path) logging.info(f"Removed file: {filename}") else: logging.warning(f"File not found for removal: {file_path}") # More specific log session.pop('uploaded_files', None) flash('Files successfully removed') logging.info("All uploaded files removed") else: flash('No file to remove.') logging.warning("File not found for removal") return redirect(url_for('index')) # @app.route('/process', methods=['GET','POST']) # def process_file(): # uploaded_files = session.get('uploaded_files', []) # if not uploaded_files: # flash('No files selected for processing') # logging.warning("No files selected for processing") # return redirect(url_for('index')) # file_paths = [os.path.join(app.config['UPLOAD_FOLDER'], filename) for filename in uploaded_files] # logging.info(f"Processing files: {file_paths}") # extracted_text = {} # processed_Img = {} # try: # extracted_text, processed_Img = extract_text_from_images(file_paths) # logging.info(f"Extracted text: {extracted_text}") # logging.info(f"Processed images: {processed_Img}") # llmText = json_to_llm_str(extracted_text) # logging.info(f"LLM text: {llmText}") # LLMdata = Data_Extractor(llmText) # print("llm data--------->",llmText) # logging.info(f"LLM data: {LLMdata}") # except Exception as e: # logging.error(f"Error during LLM processing: {e}") # logging.info("Running backup model...") @app.route('/process', methods=['GET', 'POST']) def process_file(): uploaded_files = session.get('uploaded_files', []) if not uploaded_files: flash('No files selected for processing') logging.warning("No files selected for processing") return redirect(url_for('index')) file_paths = [os.path.join(app.config['UPLOAD_FOLDER'], filename) for filename in uploaded_files] logging.info(f"Processing files: {file_paths}") try: # Single Groq VLM pass on each image LLMdata, extracted_text, processed_Img = extract_text_from_images(file_paths) LLMdata['meta'] = "Primary: Groq VLM Extraction" logging.info(f"Groq VLM structured data: {LLMdata}") logging.info(f"Extracted text blobs: {extracted_text}") logging.info(f"Processed images: {processed_Img}") # If LLMdata is essentially empty (all values are empty lists), we might want to try backup is_empty = all(len(v) == 0 for k, v in LLMdata.items() if k != 'extracted_text') if is_empty: logging.info("Groq VLM returned empty data. Trying backup model...") raise ValueError("Empty data from Groq VLM") # Regex fallback / augmentation from model text cont_data = process_extracted_text(extracted_text) logging.info(f"Contextual data: {cont_data}") processed_data = process_resume_data(LLMdata, cont_data, extracted_text) logging.info(f"Processed data: {processed_data}") session['processed_data'] = processed_data session['processed_Img'] = processed_Img flash('Data processed and analyzed successfully') return redirect(url_for('result')) except Exception as e: logging.exception(f"Error during primary processing: {e}") flash('Primary processing failed, attempting backup model...') # We don't call extract_text_from_images AGAIN because it already ran and produced its results # in the variables assigned at line 162. We just need to ensure they are available here. # If extraction completely failed (raised before return), then we have nothing to do. if 'extracted_text' not in locals() or not extracted_text: flash('Critical failure: Could not extract text from image.') return redirect(url_for('index')) LLMdata = {} try: text = json_to_llm_str(extracted_text) LLMdata = NER_Model(text) LLMdata['meta'] = "Backup: PaddleOCR + Local NER" logging.info(f"NER model data: {LLMdata}") except Exception as backup_e: logging.exception(f"Error during backup processing: {backup_e}") flash('Backup processing also failed') return redirect(url_for('index')) # Final merge using backup data if we reached here cont_data = process_extracted_text(extracted_text) processed_data = process_resume_data(LLMdata, cont_data, extracted_text) logging.info(f"Final merged data: {processed_data}") session['processed_data'] = processed_data session['processed_Img'] = processed_Img flash('Data processed using backup model') logging.info("Data processed using backup model") return redirect(url_for('result')) @app.route('/result') def result(): processed_data = session.get('processed_data', {}) processed_Img = session.get('processed_Img', {}) logging.info(f"Displaying results: Data - {processed_data}, Images - {processed_Img}") return render_template('result.html', data=processed_data, Img=processed_Img) @app.route('/uploads/') def uploaded_file(filename): logging.info(f"Serving file: {filename}") return send_from_directory(app.config['UPLOAD_FOLDER'], filename) if __name__ == '__main__': logging.info("Starting Flask app") app.run(debug=True)