AdarshDRC commited on
Commit
4d03437
·
1 Parent(s): e5a7bbe

fix : face not detecting

Browse files
Files changed (1) hide show
  1. src/models.py +20 -18
src/models.py CHANGED
@@ -109,23 +109,30 @@ class AIModelManager:
109
 
110
  # ── Face Lane: InsightFace (YuNet + ArcFace) ─────────────
111
  self.face_app = None
 
112
  if INSIGHTFACE_AVAILABLE:
113
  try:
114
- # buffalo_sc = small+fast model (CPU optimised)
115
- # buffalo_l = large+accurate (use if GPU available)
116
  model_name = "buffalo_l" if self.device == "cuda" else "buffalo_sc"
 
117
  self.face_app = FaceAnalysis(name=model_name)
118
- # det_size controls detection resolution — larger = finds smaller faces
119
  self.face_app.prepare(
120
  ctx_id=0 if self.device == "cuda" else -1,
121
  det_size=(640, 640),
122
  )
123
- print(f"✅ InsightFace ({model_name}) loaded ArcFace face lane active")
 
 
 
 
124
  except Exception as e:
125
- print(f"⚠️ InsightFace init failed: {e} — face lane disabled")
 
 
126
  self.face_app = None
127
  else:
128
- print("⚠️ InsightFace not availableinstall: pip install insightface onnxruntime")
129
 
130
  self._cache = {}
131
  self._cache_maxsize = 128
@@ -176,9 +183,11 @@ class AIModelManager:
176
  }
177
  """
178
  if self.face_app is None:
 
179
  return []
180
 
181
  try:
 
182
  # InsightFace expects BGR numpy array
183
  if img_np.shape[2] == 3 and img_np.dtype == np.uint8:
184
  bgr = img_np[..., ::-1].copy() # RGB → BGR
@@ -187,6 +196,7 @@ class AIModelManager:
187
 
188
  with self._face_lock:
189
  faces = self.face_app.get(bgr)
 
190
  results = []
191
 
192
  for idx, face in enumerate(faces):
@@ -281,23 +291,16 @@ class AIModelManager:
281
  extracted.append(fr)
282
 
283
  # ── OBJECT LANE ──────────────────────────────────────────
284
- # ALWAYS runs stores full image + YOLO crops in enterprise-objects
285
- # This is critical: even when faces found, the full image must be
286
- # stored in enterprise-objects so face search can retrieve it!
287
- #
288
- # Strategy (matches cloud_db.py):
289
- # - full image ALWAYS included as first crop
290
- # - YOLO person crops SKIPPED when faces found (avoid duplication)
291
- # - other object crops always included
292
- crops_pil = [_resize_pil(original_pil, MAX_IMAGE_SIZE)] # ALWAYS include full image
293
  yolo_results = self.yolo(image_path, conf=0.5, verbose=False)
294
 
295
  for r in yolo_results:
296
  if r.masks is not None:
297
  for seg_idx, mask_xy in enumerate(r.masks.xy):
298
  cls_id = int(r.boxes.cls[seg_idx].item())
299
- # Skip YOLO person crops when face lane handled faces
300
- # (avoids storing low-quality person crops redundantly)
301
  if faces_found and cls_id == YOLO_PERSON_CLASS_ID:
302
  print("🔵 PERSON crop skipped — face lane active")
303
  continue
@@ -324,7 +327,6 @@ class AIModelManager:
324
  if len(crops_pil) >= MAX_CROPS + 1:
325
  break
326
 
327
- # Embed all crops — full image is ALWAYS first in the list
328
  crops = [_resize_pil(c, MAX_IMAGE_SIZE) for c in crops_pil]
329
  print(f"🧠 Embedding {len(crops)} object crop(s) in one batch …")
330
  obj_vecs = self._embed_crops_batch(crops)
 
109
 
110
  # ── Face Lane: InsightFace (YuNet + ArcFace) ─────────────
111
  self.face_app = None
112
+ print(f"🔍 INSIGHTFACE_AVAILABLE = {INSIGHTFACE_AVAILABLE}")
113
  if INSIGHTFACE_AVAILABLE:
114
  try:
115
+ import insightface
116
+ print(f"🔍 InsightFace version: {insightface.__version__}")
117
  model_name = "buffalo_l" if self.device == "cuda" else "buffalo_sc"
118
+ print(f"🔍 Loading InsightFace model: {model_name}")
119
  self.face_app = FaceAnalysis(name=model_name)
 
120
  self.face_app.prepare(
121
  ctx_id=0 if self.device == "cuda" else -1,
122
  det_size=(640, 640),
123
  )
124
+ # Test with a blank image to confirm models loaded
125
+ import numpy as _np
126
+ test_img = _np.zeros((112, 112, 3), dtype=_np.uint8)
127
+ _ = self.face_app.get(test_img)
128
+ print(f"✅ InsightFace ({model_name}) loaded — ArcFace face lane ACTIVE")
129
  except Exception as e:
130
+ import traceback
131
+ print(f"❌ InsightFace init FAILED: {e}")
132
+ print(traceback.format_exc())
133
  self.face_app = None
134
  else:
135
+ print("InsightFace NOT installedrun: pip install insightface onnxruntime")
136
 
137
  self._cache = {}
138
  self._cache_maxsize = 128
 
183
  }
184
  """
185
  if self.face_app is None:
186
+ print("⚠️ face_app is None — InsightFace not loaded!")
187
  return []
188
 
189
  try:
190
+ print(f"🔍 Running InsightFace on image shape: {img_np.shape}")
191
  # InsightFace expects BGR numpy array
192
  if img_np.shape[2] == 3 and img_np.dtype == np.uint8:
193
  bgr = img_np[..., ::-1].copy() # RGB → BGR
 
196
 
197
  with self._face_lock:
198
  faces = self.face_app.get(bgr)
199
+ print(f"🔍 InsightFace raw detection: {len(faces)} faces found")
200
  results = []
201
 
202
  for idx, face in enumerate(faces):
 
291
  extracted.append(fr)
292
 
293
  # ── OBJECT LANE ──────────────────────────────────────────
294
+ # Always run object lane even if faces found
295
+ # (image may contain both people and objects)
296
+ crops_pil = [_resize_pil(original_pil, MAX_IMAGE_SIZE)] # full-image always
 
 
 
 
 
 
297
  yolo_results = self.yolo(image_path, conf=0.5, verbose=False)
298
 
299
  for r in yolo_results:
300
  if r.masks is not None:
301
  for seg_idx, mask_xy in enumerate(r.masks.xy):
302
  cls_id = int(r.boxes.cls[seg_idx].item())
303
+ # Skip person crops if face lane already handled them
 
304
  if faces_found and cls_id == YOLO_PERSON_CLASS_ID:
305
  print("🔵 PERSON crop skipped — face lane active")
306
  continue
 
327
  if len(crops_pil) >= MAX_CROPS + 1:
328
  break
329
 
 
330
  crops = [_resize_pil(c, MAX_IMAGE_SIZE) for c in crops_pil]
331
  print(f"🧠 Embedding {len(crops)} object crop(s) in one batch …")
332
  obj_vecs = self._embed_crops_batch(crops)