eho69 commited on
Commit
dfdf6f0
·
verified ·
1 Parent(s): b1b0648

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +643 -0
app.py ADDED
@@ -0,0 +1,643 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import cv2
3
+ import numpy as np
4
+ from PIL import Image
5
+ import matplotlib.pyplot as plt
6
+ import io
7
+
8
+ def create_histogram(image, title="Histogram"):
9
+ """Create histogram for grayscale or RGB image"""
10
+ fig, ax = plt.subplots(figsize=(8, 4))
11
+
12
+ if len(image.shape) == 2: # Grayscale
13
+ hist = cv2.calcHist([image], [0], None, [256], [0, 256])
14
+ ax.plot(hist, color='black')
15
+ ax.set_xlim([0, 256])
16
+ ax.set_xlabel('Pixel Intensity')
17
+ ax.set_ylabel('Frequency')
18
+ ax.set_title(title)
19
+ ax.grid(True, alpha=0.3)
20
+ else: # RGB
21
+ colors = ('r', 'g', 'b')
22
+ for i, color in enumerate(colors):
23
+ hist = cv2.calcHist([image], [i], None, [256], [0, 256])
24
+ ax.plot(hist, color=color, label=color.upper())
25
+ ax.set_xlim([0, 256])
26
+ ax.set_xlabel('Pixel Intensity')
27
+ ax.set_ylabel('Frequency')
28
+ ax.set_title(title)
29
+ ax.legend()
30
+ ax.grid(True, alpha=0.3)
31
+
32
+ # Convert plot to image
33
+ buf = io.BytesIO()
34
+ plt.tight_layout()
35
+ plt.savefig(buf, format='png', dpi=100, bbox_inches='tight')
36
+ buf.seek(0)
37
+ plot_image = Image.open(buf)
38
+ plt.close()
39
+
40
+ return np.array(plot_image)
41
+
42
+
43
+ def get_pixel_info(image, x, y):
44
+ """Get detailed pixel information"""
45
+ if image is None:
46
+ return "No image loaded"
47
+
48
+ h, w = image.shape[:2]
49
+ if x < 0 or x >= w or y < 0 or y >= h:
50
+ return "Click within image bounds"
51
+
52
+ if len(image.shape) == 2: # Grayscale
53
+ pixel_value = image[y, x]
54
+ info = f"""
55
+ **Pixel Information at ({x}, {y})**
56
+ - **Gray Value**: {pixel_value}
57
+ - **Image Size**: {w} x {h}
58
+ """
59
+ else: # RGB
60
+ b, g, r = image[y, x]
61
+ info = f"""
62
+ **Pixel Information at ({x}, {y})**
63
+ - **RGB**: ({r}, {g}, {b})
64
+ - **Hex**: #{r:02x}{g:02x}{b:02x}
65
+ - **Image Size**: {w} x {h}
66
+ """
67
+ return info
68
+
69
+
70
+ def apply_clahe(image, clip_limit, tile_size):
71
+ """Apply CLAHE with adjustable parameters"""
72
+ if isinstance(image, Image.Image):
73
+ image = np.array(image)
74
+
75
+ # Convert to grayscale if RGB
76
+ if len(image.shape) == 3:
77
+ gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
78
+ else:
79
+ gray = image
80
+
81
+ # Apply CLAHE
82
+ clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=(tile_size, tile_size))
83
+ result = clahe.apply(gray)
84
+
85
+ # Create histograms
86
+ hist_before = create_histogram(gray, "Histogram - Before CLAHE")
87
+ hist_after = create_histogram(result, "Histogram - After CLAHE")
88
+
89
+ # Convert back to RGB for display
90
+ result_rgb = cv2.cvtColor(result, cv2.COLOR_GRAY2RGB)
91
+ gray_rgb = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)
92
+
93
+ info = f"""
94
+ ### CLAHE Applied
95
+ - **Clip Limit**: {clip_limit}
96
+ - **Tile Size**: {tile_size}x{tile_size}
97
+ - **Effect**: Enhances local contrast by equalizing histograms in small tiles
98
+ - **Use Case**: Improves visibility in shadowed or low-contrast regions
99
+ """
100
+
101
+ return result_rgb, hist_before, hist_after, info
102
+
103
+
104
+ def apply_gaussian_blur(image, kernel_size, sigma):
105
+ """Apply Gaussian blur with adjustable parameters"""
106
+ if isinstance(image, Image.Image):
107
+ image = np.array(image)
108
+
109
+ # Ensure kernel size is odd
110
+ if kernel_size % 2 == 0:
111
+ kernel_size += 1
112
+
113
+ result = cv2.GaussianBlur(image, (kernel_size, kernel_size), sigma)
114
+
115
+ info = f"""
116
+ ### Gaussian Blur Applied
117
+ - **Kernel Size**: {kernel_size}x{kernel_size}
118
+ - **Sigma**: {sigma}
119
+ - **Effect**: Smooths image by averaging pixels with Gaussian weights
120
+ - **Use Case**: Noise reduction, preparing for edge detection
121
+ """
122
+
123
+ return result, info
124
+
125
+
126
+ def apply_bilateral_filter(image, diameter, sigma_color, sigma_space):
127
+ """Apply bilateral filter with adjustable parameters"""
128
+ if isinstance(image, Image.Image):
129
+ image = np.array(image)
130
+
131
+ # Convert to grayscale for processing
132
+ if len(image.shape) == 3:
133
+ gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
134
+ else:
135
+ gray = image
136
+
137
+ result = cv2.bilateralFilter(gray, diameter, sigma_color, sigma_space)
138
+
139
+ # Convert back to RGB
140
+ result_rgb = cv2.cvtColor(result, cv2.COLOR_GRAY2RGB)
141
+
142
+ info = f"""
143
+ ### Bilateral Filter Applied
144
+ - **Diameter**: {diameter}
145
+ - **Sigma Color**: {sigma_color}
146
+ - **Sigma Space**: {sigma_space}
147
+ - **Effect**: Edge-preserving smoothing filter
148
+ - **Use Case**: Noise reduction while keeping edges sharp
149
+ """
150
+
151
+ return result_rgb, info
152
+
153
+
154
+ def apply_median_filter(image, kernel_size):
155
+ """Apply median filter"""
156
+ if isinstance(image, Image.Image):
157
+ image = np.array(image)
158
+
159
+ # Ensure kernel size is odd
160
+ if kernel_size % 2 == 0:
161
+ kernel_size += 1
162
+
163
+ result = cv2.medianBlur(image, kernel_size)
164
+
165
+ info = f"""
166
+ ### Median Filter Applied
167
+ - **Kernel Size**: {kernel_size}x{kernel_size}
168
+ - **Effect**: Replaces each pixel with median of surrounding pixels
169
+ - **Use Case**: Excellent for removing salt-and-pepper noise
170
+ """
171
+
172
+ return result, info
173
+
174
+
175
+ def apply_morphology(image, operation, kernel_size, iterations):
176
+ """Apply morphological operations"""
177
+ if isinstance(image, Image.Image):
178
+ image = np.array(image)
179
+
180
+ # Convert to grayscale
181
+ if len(image.shape) == 3:
182
+ gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
183
+ else:
184
+ gray = image
185
+
186
+ # Create kernel
187
+ kernel = np.ones((kernel_size, kernel_size), np.uint8)
188
+
189
+ # Apply operation
190
+ if operation == "Erosion":
191
+ result = cv2.erode(gray, kernel, iterations=iterations)
192
+ desc = "Shrinks white regions, removes small white noise"
193
+ elif operation == "Dilation":
194
+ result = cv2.dilate(gray, kernel, iterations=iterations)
195
+ desc = "Expands white regions, fills small holes"
196
+ elif operation == "Opening":
197
+ result = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel, iterations=iterations)
198
+ desc = "Erosion followed by dilation, removes small white noise"
199
+ elif operation == "Closing":
200
+ result = cv2.morphologyEx(gray, cv2.MORPH_CLOSE, kernel, iterations=iterations)
201
+ desc = "Dilation followed by erosion, fills small holes"
202
+ elif operation == "Gradient":
203
+ result = cv2.morphologyEx(gray, cv2.MORPH_GRADIENT, kernel)
204
+ desc = "Difference between dilation and erosion, shows outlines"
205
+ else:
206
+ result = gray
207
+ desc = "No operation"
208
+
209
+ # Convert back to RGB
210
+ result_rgb = cv2.cvtColor(result, cv2.COLOR_GRAY2RGB)
211
+
212
+ info = f"""
213
+ ### Morphological Operation: {operation}
214
+ - **Kernel Size**: {kernel_size}x{kernel_size}
215
+ - **Iterations**: {iterations}
216
+ - **Effect**: {desc}
217
+ """
218
+
219
+ return result_rgb, info
220
+
221
+
222
+ def apply_edge_detection(image, method, threshold1, threshold2):
223
+ """Apply edge detection methods"""
224
+ if isinstance(image, Image.Image):
225
+ image = np.array(image)
226
+
227
+ # Convert to grayscale
228
+ if len(image.shape) == 3:
229
+ gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
230
+ else:
231
+ gray = image
232
+
233
+ if method == "Canny":
234
+ edges = cv2.Canny(gray, threshold1, threshold2)
235
+ desc = "Multi-stage edge detection algorithm"
236
+ elif method == "Sobel":
237
+ sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=5)
238
+ sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=5)
239
+ edges = np.sqrt(sobelx**2 + sobely**2)
240
+ edges = np.uint8(edges / edges.max() * 255)
241
+ desc = "Gradient-based edge detection"
242
+ elif method == "Laplacian":
243
+ edges = cv2.Laplacian(gray, cv2.CV_64F)
244
+ edges = np.uint8(np.abs(edges))
245
+ desc = "Second derivative edge detection"
246
+ else:
247
+ edges = gray
248
+ desc = "No operation"
249
+
250
+ # Convert to RGB
251
+ edges_rgb = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
252
+
253
+ info = f"""
254
+ ### Edge Detection: {method}
255
+ - **Method**: {desc}
256
+ - **Threshold 1**: {threshold1}
257
+ - **Threshold 2**: {threshold2}
258
+ """
259
+
260
+ return edges_rgb, info
261
+
262
+
263
+ def apply_color_space(image, color_space):
264
+ """Convert to different color spaces"""
265
+ if isinstance(image, Image.Image):
266
+ image = np.array(image)
267
+
268
+ if color_space == "RGB":
269
+ result = image
270
+ desc = "Standard Red-Green-Blue color space"
271
+ elif color_space == "HSV":
272
+ result = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
273
+ desc = "Hue-Saturation-Value: Separates color from intensity"
274
+ elif color_space == "LAB":
275
+ result = cv2.cvtColor(image, cv2.COLOR_RGB2LAB)
276
+ desc = "Perceptually uniform color space"
277
+ elif color_space == "Grayscale":
278
+ gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
279
+ result = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)
280
+ desc = "Single channel intensity image"
281
+ elif color_space == "YCrCb":
282
+ result = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)
283
+ desc = "Luma and chroma components"
284
+ else:
285
+ result = image
286
+ desc = "No conversion"
287
+
288
+ # Create histogram
289
+ hist = create_histogram(result if color_space != "Grayscale" else gray, f"Histogram - {color_space}")
290
+
291
+ info = f"""
292
+ ### Color Space: {color_space}
293
+ - **Description**: {desc}
294
+ """
295
+
296
+ return result, hist, info
297
+
298
+
299
+ def apply_thresholding(image, method, threshold_value, max_value):
300
+ """Apply different thresholding methods"""
301
+ if isinstance(image, Image.Image):
302
+ image = np.array(image)
303
+
304
+ # Convert to grayscale
305
+ if len(image.shape) == 3:
306
+ gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
307
+ else:
308
+ gray = image
309
+
310
+ if method == "Binary":
311
+ _, result = cv2.threshold(gray, threshold_value, max_value, cv2.THRESH_BINARY)
312
+ desc = "Pixels > threshold become max_value, others become 0"
313
+ elif method == "Binary Inverse":
314
+ _, result = cv2.threshold(gray, threshold_value, max_value, cv2.THRESH_BINARY_INV)
315
+ desc = "Inverse of binary threshold"
316
+ elif method == "Truncate":
317
+ _, result = cv2.threshold(gray, threshold_value, max_value, cv2.THRESH_TRUNC)
318
+ desc = "Pixels > threshold become threshold value"
319
+ elif method == "To Zero":
320
+ _, result = cv2.threshold(gray, threshold_value, max_value, cv2.THRESH_TOZERO)
321
+ desc = "Pixels < threshold become 0"
322
+ elif method == "Otsu":
323
+ _, result = cv2.threshold(gray, 0, max_value, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
324
+ desc = "Automatic threshold calculation using Otsu's method"
325
+ elif method == "Adaptive Mean":
326
+ result = cv2.adaptiveThreshold(gray, max_value, cv2.ADAPTIVE_THRESH_MEAN_C,
327
+ cv2.THRESH_BINARY, 11, 2)
328
+ desc = "Threshold calculated for small regions using mean"
329
+ elif method == "Adaptive Gaussian":
330
+ result = cv2.adaptiveThreshold(gray, max_value, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
331
+ cv2.THRESH_BINARY, 11, 2)
332
+ desc = "Threshold calculated for small regions using Gaussian weights"
333
+ else:
334
+ result = gray
335
+ desc = "No thresholding"
336
+
337
+ # Convert to RGB
338
+ result_rgb = cv2.cvtColor(result, cv2.COLOR_GRAY2RGB)
339
+
340
+ info = f"""
341
+ ### Thresholding: {method}
342
+ - **Threshold Value**: {threshold_value}
343
+ - **Max Value**: {max_value}
344
+ - **Effect**: {desc}
345
+ """
346
+
347
+ return result_rgb, info
348
+
349
+
350
+ def analyze_image_stats(image):
351
+ """Provide detailed statistical analysis"""
352
+ if isinstance(image, Image.Image):
353
+ image = np.array(image)
354
+
355
+ if len(image.shape) == 3:
356
+ gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
357
+ channels = cv2.split(image)
358
+
359
+ stats = f"""
360
+ ### Image Statistics
361
+
362
+ **Dimensions**: {image.shape[1]} x {image.shape[0]} pixels
363
+
364
+ **RGB Channel Statistics**:
365
+ - **Red**: Mean={np.mean(channels[0]):.2f}, Std={np.std(channels[0]):.2f}, Min={np.min(channels[0])}, Max={np.max(channels[0])}
366
+ - **Green**: Mean={np.mean(channels[1]):.2f}, Std={np.std(channels[1]):.2f}, Min={np.min(channels[1])}, Max={np.max(channels[1])}
367
+ - **Blue**: Mean={np.mean(channels[2]):.2f}, Std={np.std(channels[2]):.2f}, Min={np.min(channels[2])}, Max={np.max(channels[2])}
368
+
369
+ **Grayscale Statistics**:
370
+ - **Mean Intensity**: {np.mean(gray):.2f}
371
+ - **Standard Deviation**: {np.std(gray):.2f}
372
+ - **Min Value**: {np.min(gray)}
373
+ - **Max Value**: {np.max(gray)}
374
+ - **Median**: {np.median(gray):.2f}
375
+
376
+ **Brightness Assessment**: {"Dark" if np.mean(gray) < 85 else "Medium" if np.mean(gray) < 170 else "Bright"}
377
+ **Contrast Assessment**: {"Low" if np.std(gray) < 30 else "Medium" if np.std(gray) < 60 else "High"}
378
+ """
379
+ else:
380
+ stats = f"""
381
+ ### Image Statistics
382
+
383
+ **Dimensions**: {image.shape[1]} x {image.shape[0]} pixels
384
+
385
+ **Grayscale Statistics**:
386
+ - **Mean Intensity**: {np.mean(image):.2f}
387
+ - **Standard Deviation**: {np.std(image):.2f}
388
+ - **Min Value**: {np.min(image)}
389
+ - **Max Value**: {np.max(image)}
390
+ - **Median**: {np.median(image):.2f}
391
+
392
+ **Brightness Assessment**: {"Dark" if np.mean(image) < 85 else "Medium" if np.mean(image) < 170 else "Bright"}
393
+ **Contrast Assessment**: {"Low" if np.std(image) < 30 else "Medium" if np.std(image) < 60 else "High"}
394
+ """
395
+
396
+ return stats
397
+
398
+
399
+ # Create Gradio Interface
400
+ with gr.Blocks(title="Image Preprocessing Analyzer", theme=gr.themes.Soft()) as demo:
401
+ gr.Markdown("""
402
+ # 🔬 Image Preprocessing Analyzer
403
+ ### Understand Image Processing at Pixel Level
404
+
405
+ Upload an image and explore various preprocessing techniques with real-time parameter adjustments.
406
+ See histograms, pixel-level information, and understand how each filter affects your image.
407
+ """)
408
+
409
+ with gr.Row():
410
+ input_image = gr.Image(label="Upload Image", type="pil", height=400)
411
+ original_hist = gr.Image(label="Original Histogram")
412
+
413
+ with gr.Row():
414
+ stats_output = gr.Markdown(label="Image Statistics")
415
+
416
+ # Update stats when image is loaded
417
+ input_image.change(
418
+ fn=lambda img: (analyze_image_stats(img), create_histogram(np.array(img), "Original Histogram")) if img else ("No image", None),
419
+ inputs=[input_image],
420
+ outputs=[stats_output, original_hist]
421
+ )
422
+
423
+ with gr.Tabs():
424
+ # CLAHE Tab
425
+ with gr.TabItem("🎨 CLAHE (Contrast Enhancement)"):
426
+ gr.Markdown("""
427
+ **CLAHE** (Contrast Limited Adaptive Histogram Equalization) enhances local contrast.
428
+ Adjust parameters to see how it affects different image regions.
429
+ """)
430
+
431
+ with gr.Row():
432
+ clahe_clip = gr.Slider(0.5, 10.0, value=2.0, step=0.5, label="Clip Limit")
433
+ clahe_tile = gr.Slider(2, 32, value=8, step=2, label="Tile Size")
434
+
435
+ clahe_btn = gr.Button("Apply CLAHE", variant="primary")
436
+
437
+ with gr.Row():
438
+ clahe_output = gr.Image(label="Result")
439
+ clahe_hist_before = gr.Image(label="Histogram - Before")
440
+
441
+ with gr.Row():
442
+ clahe_hist_after = gr.Image(label="Histogram - After")
443
+ clahe_info = gr.Markdown()
444
+
445
+ clahe_btn.click(
446
+ fn=apply_clahe,
447
+ inputs=[input_image, clahe_clip, clahe_tile],
448
+ outputs=[clahe_output, clahe_hist_before, clahe_hist_after, clahe_info]
449
+ )
450
+
451
+ # Smoothing Filters Tab
452
+ with gr.TabItem("🌊 Smoothing Filters"):
453
+ filter_type = gr.Radio(
454
+ ["Gaussian Blur", "Bilateral Filter", "Median Filter"],
455
+ value="Gaussian Blur",
456
+ label="Filter Type"
457
+ )
458
+
459
+ with gr.Row():
460
+ with gr.Column():
461
+ # Gaussian parameters
462
+ gauss_kernel = gr.Slider(1, 31, value=5, step=2, label="Kernel Size (Gaussian)")
463
+ gauss_sigma = gr.Slider(0, 10, value=0, step=0.5, label="Sigma (Gaussian)")
464
+
465
+ with gr.Column():
466
+ # Bilateral parameters
467
+ bilat_diameter = gr.Slider(1, 15, value=9, step=2, label="Diameter (Bilateral)")
468
+ bilat_sigma_color = gr.Slider(1, 150, value=75, step=5, label="Sigma Color (Bilateral)")
469
+ bilat_sigma_space = gr.Slider(1, 150, value=75, step=5, label="Sigma Space (Bilateral)")
470
+
471
+ with gr.Column():
472
+ # Median parameters
473
+ median_kernel = gr.Slider(1, 31, value=5, step=2, label="Kernel Size (Median)")
474
+
475
+ smooth_btn = gr.Button("Apply Filter", variant="primary")
476
+
477
+ with gr.Row():
478
+ smooth_output = gr.Image(label="Result")
479
+ smooth_info = gr.Markdown()
480
+
481
+ def apply_smoothing(image, filter_type, gk, gs, bd, bsc, bss, mk):
482
+ if filter_type == "Gaussian Blur":
483
+ return apply_gaussian_blur(image, gk, gs)
484
+ elif filter_type == "Bilateral Filter":
485
+ return apply_bilateral_filter(image, bd, bsc, bss)
486
+ else:
487
+ return apply_median_filter(image, mk)
488
+
489
+ smooth_btn.click(
490
+ fn=apply_smoothing,
491
+ inputs=[input_image, filter_type, gauss_kernel, gauss_sigma,
492
+ bilat_diameter, bilat_sigma_color, bilat_sigma_space, median_kernel],
493
+ outputs=[smooth_output, smooth_info]
494
+ )
495
+
496
+ # Edge Detection Tab
497
+ with gr.TabItem("📐 Edge Detection"):
498
+ edge_method = gr.Radio(
499
+ ["Canny", "Sobel", "Laplacian"],
500
+ value="Canny",
501
+ label="Edge Detection Method"
502
+ )
503
+
504
+ with gr.Row():
505
+ edge_thresh1 = gr.Slider(0, 255, value=50, step=5, label="Threshold 1")
506
+ edge_thresh2 = gr.Slider(0, 255, value=150, step=5, label="Threshold 2")
507
+
508
+ edge_btn = gr.Button("Detect Edges", variant="primary")
509
+
510
+ with gr.Row():
511
+ edge_output = gr.Image(label="Result")
512
+ edge_info = gr.Markdown()
513
+
514
+ edge_btn.click(
515
+ fn=apply_edge_detection,
516
+ inputs=[input_image, edge_method, edge_thresh1, edge_thresh2],
517
+ outputs=[edge_output, edge_info]
518
+ )
519
+
520
+ # Morphological Operations Tab
521
+ with gr.TabItem("🔲 Morphological Operations"):
522
+ morph_op = gr.Radio(
523
+ ["Erosion", "Dilation", "Opening", "Closing", "Gradient"],
524
+ value="Closing",
525
+ label="Operation"
526
+ )
527
+
528
+ with gr.Row():
529
+ morph_kernel = gr.Slider(1, 21, value=3, step=2, label="Kernel Size")
530
+ morph_iter = gr.Slider(1, 5, value=1, step=1, label="Iterations")
531
+
532
+ morph_btn = gr.Button("Apply Operation", variant="primary")
533
+
534
+ with gr.Row():
535
+ morph_output = gr.Image(label="Result")
536
+ morph_info = gr.Markdown()
537
+
538
+ morph_btn.click(
539
+ fn=apply_morphology,
540
+ inputs=[input_image, morph_op, morph_kernel, morph_iter],
541
+ outputs=[morph_output, morph_info]
542
+ )
543
+
544
+ # Color Spaces Tab
545
+ with gr.TabItem("🎨 Color Spaces"):
546
+ color_space = gr.Radio(
547
+ ["RGB", "HSV", "LAB", "YCrCb", "Grayscale"],
548
+ value="RGB",
549
+ label="Color Space"
550
+ )
551
+
552
+ color_btn = gr.Button("Convert Color Space", variant="primary")
553
+
554
+ with gr.Row():
555
+ color_output = gr.Image(label="Result")
556
+ color_hist = gr.Image(label="Histogram")
557
+
558
+ color_info = gr.Markdown()
559
+
560
+ color_btn.click(
561
+ fn=apply_color_space,
562
+ inputs=[input_image, color_space],
563
+ outputs=[color_output, color_hist, color_info]
564
+ )
565
+
566
+ # Thresholding Tab
567
+ with gr.TabItem("⚫⚪ Thresholding"):
568
+ thresh_method = gr.Radio(
569
+ ["Binary", "Binary Inverse", "Truncate", "To Zero", "Otsu",
570
+ "Adaptive Mean", "Adaptive Gaussian"],
571
+ value="Binary",
572
+ label="Thresholding Method"
573
+ )
574
+
575
+ with gr.Row():
576
+ thresh_value = gr.Slider(0, 255, value=127, step=1, label="Threshold Value")
577
+ thresh_max = gr.Slider(0, 255, value=255, step=1, label="Max Value")
578
+
579
+ thresh_btn = gr.Button("Apply Threshold", variant="primary")
580
+
581
+ with gr.Row():
582
+ thresh_output = gr.Image(label="Result")
583
+ thresh_info = gr.Markdown()
584
+
585
+ thresh_btn.click(
586
+ fn=apply_thresholding,
587
+ inputs=[input_image, thresh_method, thresh_value, thresh_max],
588
+ outputs=[thresh_output, thresh_info]
589
+ )
590
+
591
+ # Documentation
592
+ with gr.Accordion("📚 Filter Documentation", open=False):
593
+ gr.Markdown("""
594
+ ### Filter Explanations
595
+
596
+ #### CLAHE (Contrast Limited Adaptive Histogram Equalization)
597
+ - **Purpose**: Enhance local contrast in images
598
+ - **How it works**: Divides image into tiles and equalizes histogram in each tile
599
+ - **Clip Limit**: Controls contrast enhancement (higher = more enhancement)
600
+ - **Tile Size**: Size of local regions (smaller = more local adaptation)
601
+ - **Use Case**: Medical imaging, underwater images, shadowed regions
602
+
603
+ #### Gaussian Blur
604
+ - **Purpose**: Smooth images and reduce noise
605
+ - **How it works**: Weighted average of neighboring pixels using Gaussian function
606
+ - **Kernel Size**: Larger = more blur
607
+ - **Sigma**: Standard deviation of Gaussian (0 = auto-calculated)
608
+ - **Use Case**: Preprocessing for edge detection, noise reduction
609
+
610
+ #### Bilateral Filter
611
+ - **Purpose**: Edge-preserving smoothing
612
+ - **How it works**: Averages pixels but preserves edges by considering both spatial and color distance
613
+ - **Diameter**: Size of pixel neighborhood
614
+ - **Sigma Color**: How much color difference matters
615
+ - **Sigma Space**: How much spatial distance matters
616
+ - **Use Case**: Noise reduction while keeping edges sharp
617
+
618
+ #### Median Filter
619
+ - **Purpose**: Remove salt-and-pepper noise
620
+ - **How it works**: Replaces each pixel with median of surrounding pixels
621
+ - **Kernel Size**: Size of neighborhood
622
+ - **Use Case**: Impulse noise removal
623
+
624
+ #### Morphological Operations
625
+ - **Erosion**: Shrinks white regions, removes small noise
626
+ - **Dilation**: Expands white regions, fills small holes
627
+ - **Opening**: Erosion then dilation, removes small objects
628
+ - **Closing**: Dilation then erosion, fills small holes
629
+ - **Gradient**: Difference between dilation and erosion, shows boundaries
630
+
631
+ #### Edge Detection
632
+ - **Canny**: Multi-stage algorithm, best overall edge detector
633
+ - **Sobel**: Gradient-based, sensitive to horizontal/vertical edges
634
+ - **Laplacian**: Second derivative, sensitive to rapid intensity changes
635
+
636
+ #### Thresholding
637
+ - **Binary**: Simple cutoff threshold
638
+ - **Otsu**: Automatically finds optimal threshold
639
+ - **Adaptive**: Different thresholds for different regions
640
+ """)
641
+
642
+ if __name__ == "__main__":
643
+ demo.launch()