import streamlit as st import pandas as pd import numpy as np from PIL import Image # Image Processing import cv2 as cv2 from matplotlib import pyplot as plt import scipy.misc import scipy.ndimage import skimage.filters import sklearn.metrics import imageio import torch from model import AESc from postprocessing import postprocessing, final_prediction import torchvision.transforms as TT from matplotlib.backends.backend_agg import FigureCanvasAgg st.title('Loacker Scanner') st.markdown(""" The purpose of this application is to estimate the amount of fat bloom in a picture of a Tortina. It works best when the border of the Tortina has high contrast with the background and there are no shadows around. To use the application, simply upload an image of the Tortina. The application will then detect the Tortina and highlight the predicted bloom zone in green. You can use the slider to adjust the threshold for the fat/chocolate ratio. The default value is set to 53, but you can move the slider to increase or decrease the threshold. The threshold value is used to determine whether a particular region of the Tortina has fat bloom or not. If the fat/chocolate ratio of a region exceeds the threshold, that region is classified as having fat bloom and is highlighted in the predicted bloom zone. Please note that this application is intended to provide an estimate only and may not be accurate in all cases. """ ) #image uploader image = st.file_uploader(label="Upload your image here", type=['png', 'jpg', 'jpeg']) def write_area_prediction(fat_ratio): fat_percentage = round(fat_ratio * 100, ndigits=2) choco_percentage = 100 - fat_percentage st.write("Area Covered by Fat (%):", fat_percentage) st.write("Area Covered by Chocolate (%):", choco_percentage) #Define class appartenence if( fat_ratio == 0): st.write("Predicted Area Class:", 0) if( fat_ratio > 0 and fat_ratio < 0.25): st.write("Predicted Area Class:", 1) if( fat_ratio >= 0.25 and fat_ratio < 0.50): st.write("Predicted Area Class:", 2) if( fat_ratio >= 0.5 and fat_ratio < 0.75): st.write("Predicted Area Class:", 3) if( fat_ratio >= 0.75 ): st.write("Predicted Area Class:", 4) def fat_choco_rateo(image, input_image, threshold): img = cv2.imread(image) original = cv2.imread(image) #convertion to gray scale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #threshold 160 set the rest to 255 (remove the border) ret, thresh = cv2.threshold(gray, 160, 255, cv2.THRESH_BINARY) #set all value 255 to 0 (black) img[thresh == 255] = 0 #core transformation #The basic idea of erosion is just like soil erosion only, it erodes away the boundaries of foreground object kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) # A set of operations that process images based on shapes. Morphological operations apply a structuring element to an input image and generate an output image. erosion = cv2.erode(img, kernel, iterations=1) #Return a contiguous flattened array. hist_analisy = erosion.ravel() #remove black coloro as we don't need to count them hist_analisy = [i for i in hist_analisy if i != 0] #count the pixel fat, choco = [], [] for x in hist_analisy: (choco, fat)[int(x > threshold)].append(x) #semantic part median_filtered = scipy.ndimage.median_filter(img, size=3) predicted = np.uint8(median_filtered > threshold) * 255 # Display results in Streamlit col1, col2, col3, col4 = st.columns([1, 1, 1, 1]) with col1: st.image(input_image, caption="Original", use_column_width=True) with col2: img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) st.image(img, caption="Detected Tortina", use_column_width=True) with col3: predicted = predicted.min(-1) #take minimum of thresholded channels overlay = img #overlay = cv2.cvtColor(img, cv2.COLOR_RGB2RGBA) #overlay[:,:,-1] = 200 #make half transparent overlay[predicted == 255] = np.array([0, 0, 255]) st.image(overlay, caption="Prediction", use_column_width=True) with col4: fig, ax = plt.subplots(figsize=(6, 4)) ax.hist(erosion.ravel(), 256, [1, 256]) ax.axvline(x=threshold, color='red') ax.set_title("Histogram") # Render plot as image canvas = FigureCanvasAgg(fig) canvas.draw() buf = canvas.buffer_rgba() hist_image = np.asarray(buf) hist_image = cv2.resize(hist_image, (600, 600)) st.image(hist_image, caption="Histogram", use_column_width=True) fat_ratio = (len(fat) / len(hist_analisy)) write_area_prediction(fat_ratio) def auto_encoder_prediction(image, thresh): img = cv2.imread(image) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (256, 256)) img = np.expand_dims(img, (0)) input = torch.tensor(img) # create a tensor X = input.permute((0, 3, 1, 2)) X = TT.Grayscale()(X).float()/255 # convert it to grayscale gray = X.permute((0, 2, 3, 1)).numpy()[0] checkpoint = torch.load( "./model/20230302-021129_tortina.pt", map_location="cpu") model = AESc(cmap="gray") model.load_state_dict(checkpoint["model_state_dict"]) model.eval() Y = model(X) anomaly_maps = (Y - X).abs() anomaly_maps_np = anomaly_maps.permute((0, 2, 3, 1)).detach().numpy() thresholded_anomaly_maps = ( anomaly_maps_np > thresh).astype(np.uint8).max(-1) #postprocessed_anomaly_maps = np.zeros_like(thresholded_anomaly_maps) postprocessed_anomaly_maps = postprocessing( gray, thresholded_anomaly_maps[0]) predicted_area = final_prediction( anomaly_maps_np[0], postprocessed_anomaly_maps)[0] final_predictions = predicted_area # rgb_output = np.zeros((postprocessed_anomaly_maps.shape) + (3,)) # rgb_output[postprocessed_anomaly_maps == 1] = np.array([128/255, 0, 0]) # rgb_output[postprocessed_anomaly_maps == 2] = np.array([1, 1, 1]) # Display results in Streamlit col1, col2, col3, col4, col5 = st.columns([1, 1, 1, 1, 1]) with col1: st.image(img, caption="Original", use_column_width=True) with col2: detected = img[0].copy() detected[postprocessed_anomaly_maps == 0] = 0 st.image(detected, caption="Detected Tortina", use_column_width=True) with col3: predicted = detected#np.zeros(img[0].shape, dtype=np.uint8) predicted[(thresholded_anomaly_maps[0] == 1) & (detected.sum(-1) != 0)] = np.array([0, 0, 255]) st.image(predicted, caption="Prediction",use_column_width=True) with col4: # predicted = np.zeros(postprocessed_anomaly_maps.shape, dtype=np.uint8) # predicted[postprocessed_anomaly_maps == 2] = 255 # st.image(predicted, caption="Predicted bloom zone",use_column_width=True) predicted = np.zeros(img[0].shape, dtype=np.uint8) predicted[postprocessed_anomaly_maps == 1] = img[0][postprocessed_anomaly_maps == 1] predicted[postprocessed_anomaly_maps == 2] = np.array([0, 0, 255]) st.image(predicted, caption="Post-processed Prediction",use_column_width=True) with col5: #threshold = 0.03 hist_data = np.ravel(anomaly_maps_np[0][postprocessed_anomaly_maps != 0]) fig, ax = plt.subplots(figsize=(6, 4)) ax.hist(hist_data, 256//10, (0, hist_data.max())) ax.axvline(x=thresh, color='red') ax.set_title("Histogram") # Render plot as image canvas = FigureCanvasAgg(fig) canvas.draw() buf = canvas.buffer_rgba() hist_image = np.asarray(buf) hist_image = cv2.resize(hist_image, (600, 600)) st.image(hist_image, caption="Histogram", use_column_width=True) #st.image(rgb_output, caption="rgb out", use_column_width=True) #st.write("Final class prediction: ", final_predictions) fat_pixels = (postprocessed_anomaly_maps == 2).sum() choco_pixels = (postprocessed_anomaly_maps == 1).sum() fat_ratio = (fat_pixels) / (fat_pixels + choco_pixels) write_area_prediction(fat_ratio) if image is not None: input_image = Image.open(image) # read image image = input_image.save("img.jpg") st.title('Histogram Approach') # Create a slider for the threshold value threshold = st.slider( 'Threshold (53 is the optimal value for Dark Tortinas , 100 is the optimal value for Brown Tortinas)', 1, 255, 53) fat_choco_rateo("img.jpg", input_image, threshold) st.title('Deep Learning Approach') threshold2 = st.slider( 'Threshold (0.033 is the optimal value evalueted by the model)', 0.001, 1.000, 0.033) auto_encoder_prediction("img.jpg", threshold2) else: st.write("Upload an Image") st.caption("Made with ❤️ by @Sottovia Alessandro and @Jonas Rabensteiner​")