| | import os |
| | import cv2 |
| | import numpy as np |
| | import pandas as pd |
| | import base64 |
| | from flask import Flask, render_template, request, jsonify |
| | from werkzeug.utils import secure_filename |
| | from io import BytesIO |
| | from PIL import Image |
| | import tensorflow as tf |
| | from tensorflow.keras.models import load_model |
| |
|
| | |
| | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' |
| | os.environ['MPLCONFIGDIR'] = '/tmp/matplotlib' |
| |
|
| | app = Flask(__name__) |
| | app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 |
| | app.config['UPLOAD_FOLDER'] = 'uploads' |
| | app.config['ALLOWED_EXTENSIONS'] = {'png', 'jpg', 'jpeg'} |
| |
|
| | os.makedirs(app.config['UPLOAD_FOLDER'], mode=0o777, exist_ok=True) |
| |
|
| | |
| | print("Loading traffic sign classification model...") |
| | model = load_model('tabela_tespit.h5', compile=False) |
| | model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) |
| | print("✓ Model loaded successfully!") |
| |
|
| | |
| | labels_df = pd.read_csv('labels.csv') |
| | print(f"✓ Loaded {len(labels_df)} traffic sign classes") |
| |
|
| | def allowed_file(filename): |
| | return '.' in filename and filename.rsplit('.', 1)[1].lower() in app.config['ALLOWED_EXTENSIONS'] |
| |
|
| | def preprocess_image(image_path): |
| | """Preprocess image for CNN model (32x32 grayscale)""" |
| | try: |
| | |
| | img_original = cv2.imread(image_path) |
| | if img_original is None: |
| | raise ValueError("Could not read image") |
| | |
| | |
| | img_display = cv2.cvtColor(img_original, cv2.COLOR_BGR2RGB) |
| | |
| | |
| | img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE) |
| | print(f"Original image shape: {img.shape}") |
| | |
| | |
| | img_resized = cv2.resize(img, (32, 32)) |
| | |
| | |
| | img_equalized = cv2.equalizeHist(img_resized) |
| | |
| | |
| | img_normalized = img_equalized / 255.0 |
| | |
| | |
| | img_input = img_normalized.reshape(1, 32, 32, 1) |
| | |
| | print(f"Model input shape: {img_input.shape}") |
| | |
| | return img_input, img_display |
| | |
| | except Exception as e: |
| | raise ValueError(f"Failed to preprocess image: {str(e)}") |
| |
|
| | def img_to_base64(img): |
| | """Convert numpy image to base64 string""" |
| | img_pil = Image.fromarray(img.astype('uint8')) |
| | buf = BytesIO() |
| | img_pil.save(buf, format='PNG') |
| | buf.seek(0) |
| | img_base64 = base64.b64encode(buf.getvalue()).decode('utf-8') |
| | return f'data:image/png;base64,{img_base64}' |
| |
|
| | @app.route('/') |
| | def index(): |
| | return render_template('index.html') |
| |
|
| | @app.route('/predict', methods=['POST']) |
| | def predict(): |
| | try: |
| | if 'file' not in request.files: |
| | return jsonify({'error': 'No file uploaded'}), 400 |
| | |
| | file = request.files['file'] |
| | |
| | if file.filename == '': |
| | return jsonify({'error': 'No file selected'}), 400 |
| | |
| | if not allowed_file(file.filename): |
| | return jsonify({'error': 'Invalid file type. Please upload PNG, JPG, or JPEG'}), 400 |
| | |
| | |
| | filename = secure_filename(file.filename) |
| | filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename) |
| | file.save(filepath) |
| | |
| | print(f"Processing: {filename}") |
| | |
| | |
| | img_input, img_display = preprocess_image(filepath) |
| | |
| | |
| | print("Making prediction...") |
| | predictions = model.predict(img_input, verbose=0) |
| | |
| | |
| | class_id = np.argmax(predictions) |
| | confidence = np.max(predictions) |
| | class_name = labels_df.loc[class_id, 'Name'] |
| | |
| | |
| | top5_indices = np.argsort(predictions[0])[-5:][::-1] |
| | top5_predictions = [] |
| | for idx in top5_indices: |
| | top5_predictions.append({ |
| | 'class_id': int(idx), |
| | 'class_name': labels_df.loc[idx, 'Name'], |
| | 'confidence': float(predictions[0][idx]) |
| | }) |
| | |
| | |
| | img_base64 = img_to_base64(img_display) |
| | |
| | |
| | os.remove(filepath) |
| | |
| | result = { |
| | 'predicted_class': class_name, |
| | 'class_id': int(class_id), |
| | 'confidence': float(confidence), |
| | 'top5_predictions': top5_predictions, |
| | 'image': img_base64 |
| | } |
| | |
| | print(f"✓ Prediction: {class_name} (Confidence: {confidence:.2%})") |
| | |
| | return jsonify(result) |
| | |
| | except Exception as e: |
| | print(f"Error during prediction: {e}") |
| | import traceback |
| | traceback.print_exc() |
| | if os.path.exists(filepath): |
| | os.remove(filepath) |
| | return jsonify({'error': str(e)}), 500 |
| |
|
| | @app.route('/test-example', methods=['POST']) |
| | def test_example(): |
| | """Test with example image""" |
| | try: |
| | example_path = 'image.jpg' |
| | |
| | if not os.path.exists(example_path): |
| | return jsonify({'error': 'Example image not found'}), 404 |
| | |
| | print(f"Testing with example: {example_path}") |
| | |
| | |
| | img_input, img_display = preprocess_image(example_path) |
| | |
| | |
| | print("Making prediction on example...") |
| | predictions = model.predict(img_input, verbose=0) |
| | |
| | |
| | class_id = np.argmax(predictions) |
| | confidence = np.max(predictions) |
| | class_name = labels_df.loc[class_id, 'Name'] |
| | |
| | |
| | top5_indices = np.argsort(predictions[0])[-5:][::-1] |
| | top5_predictions = [] |
| | for idx in top5_indices: |
| | top5_predictions.append({ |
| | 'class_id': int(idx), |
| | 'class_name': labels_df.loc[idx, 'Name'], |
| | 'confidence': float(predictions[0][idx]) |
| | }) |
| | |
| | |
| | img_base64 = img_to_base64(img_display) |
| | |
| | result = { |
| | 'predicted_class': class_name, |
| | 'class_id': int(class_id), |
| | 'confidence': float(confidence), |
| | 'top5_predictions': top5_predictions, |
| | 'image': img_base64 |
| | } |
| | |
| | print(f"✓ Example prediction: {class_name} (Confidence: {confidence:.2%})") |
| | |
| | return jsonify(result) |
| | |
| | except Exception as e: |
| | print(f"Error during example prediction: {e}") |
| | import traceback |
| | traceback.print_exc() |
| | return jsonify({'error': str(e)}), 500 |
| |
|
| | if __name__ == '__main__': |
| | app.run(host='0.0.0.0', port=7860, debug=False) |
| |
|