""" FomoFeed - Timing Optimizer AI v3 Ensemble: Circular KDE + Bayesian Prior + Recency Decay + Day-Aware No third-party API needed — pure scikit-learn + numpy """ from fastapi import FastAPI, HTTPException from pydantic import BaseModel import numpy as np from datetime import datetime, timedelta from scipy.stats import vonmises # Circular distribution from sklearn.neighbors import KernelDensity import uvicorn import math app = FastAPI(title="FomoFeed Timing Optimizer", version="3.0.0") # ════════════════════════════════════════════════════ # MODELS # ════════════════════════════════════════════════════ class UserEngagementHistory(BaseModel): user_id: int engagement_hours: list[int] # Saat (0-23) engagement_weights: list[float] # Ağırlık (view=1, like=3, comment=5, save=7) engagement_days: list[int] = [] # Haftanın günü (0=Pazartesi, 6=Pazar) - opsiyonel engagement_ages: list[float] = [] # Kaç gün önce oldu - opsiyonel content_type: str = "post" timezone_offset: int = 3 clip_score: float = 0.0 has_video: bool = False class TimingRecommendation(BaseModel): optimal_hour: int confidence: float alternative_hours: list[int] reasoning: dict # ════════════════════════════════════════════════════ # GLOBAL PRIOR (Tüm sosyal medya platformlarının ortalaması) # Yeni kullanıcılar için başlangıç bilgisi # ════════════════════════════════════════════════════ GLOBAL_PRIOR = np.array([ 0.02, 0.01, 0.01, 0.01, 0.01, 0.02, # 00-05: Gece (düşük) 0.03, 0.05, 0.06, 0.05, 0.04, 0.04, # 06-11: Sabah (yükseliş) 0.07, 0.07, 0.05, 0.04, 0.04, 0.05, # 12-17: Öğle-Akşamüstü 0.07, 0.08, 0.08, 0.07, 0.05, 0.03, # 18-23: Akşam (zirve) ]) GLOBAL_PRIOR = GLOBAL_PRIOR / GLOBAL_PRIOR.sum() # Normalize # Hafta sonu farklı pattern WEEKEND_BOOST = np.array([ 1.3, 1.2, 1.1, 1.0, 1.0, 0.9, # 00-05: Gece geç yatma 0.7, 0.6, 0.7, 0.9, 1.1, 1.2, # 06-11: Geç kalkma 1.2, 1.2, 1.1, 1.1, 1.0, 1.0, # 12-17: Gündüz aktif 1.0, 1.0, 1.1, 1.2, 1.3, 1.4, # 18-23: Akşam daha aktif ]) # ════════════════════════════════════════════════════ # CORE ALGORITHM # ════════════════════════════════════════════════════ def hour_to_radian(hour: int) -> float: """Saati radyana çevir (circular representation)""" return (hour / 24.0) * 2 * math.pi def radian_to_hour(rad: float) -> int: """Radyanı saate çevir""" hour = (rad / (2 * math.pi)) * 24 return int(round(hour)) % 24 def recency_weight(age_days: float, half_life: float = 14.0) -> float: """ Exponential decay: 14 gün önce olan veri yarı ağırlık alır Yeni veri daha değerli """ return math.exp(-0.693 * age_days / half_life) def circular_kde_scores(hours: list[float], weights: list[float]) -> np.ndarray: """Von Mises KDE — circular data için doğru yöntem""" if len(hours) < 2: return GLOBAL_PRIOR.copy() radians = np.array([hour_to_radian(h) for h in hours]) expanded = [] for rad, w in zip(radians, weights): expanded.extend([rad] * max(1, int(round(w)))) expanded = np.array(expanded) n = len(expanded) kappa = max(1.0, min(8.0, 0.5 * math.sqrt(n))) test_hours = np.linspace(0, 2 * math.pi, 24, endpoint=False) scores = np.zeros(24) for obs in expanded: scores += vonmises.pdf(test_hours, kappa, loc=obs) if scores.sum() > 0: scores /= scores.sum() return scores def bayesian_update(prior: np.ndarray, likelihood: np.ndarray, data_strength: float) -> np.ndarray: """ Bayesian güncelleme: - Az veri → prior'a (genel pattern) yakın - Çok veri → likelihood'a (kullanıcı pattern) yakın data_strength: 0-1 arası, verinin güvenilirliği """ # Weighted geometric mean posterior = prior ** (1 - data_strength) * likelihood ** data_strength if posterior.sum() > 0: posterior = posterior / posterior.sum() else: return prior return posterior def calculate_confidence(n_events: int, consistency: float, has_boost: bool = False) -> float: """ Güven skoru hesapla: - Veri miktarı (daha çok veri = daha yüksek güven) - Tutarlılık (belirli saatlere yoğunlaşma = yüksek güven) - Boost faktörleri """ # Veri miktarı faktörü (logaritmik büyüme) data_factor = min(0.5, 0.1 * math.log2(max(1, n_events))) # Tutarlılık faktörü (entropy-based) consistency_factor = consistency * 0.35 # Boost boost = 0.05 if has_boost else 0 # Toplam confidence = min(0.95, 0.25 + data_factor + consistency_factor + boost) return round(confidence, 2) def calculate_consistency(scores: np.ndarray) -> float: """ Dağılımın ne kadar konsantre olduğunu ölç Entropy düşükse → belirli saatlere yoğunlaşmış → tutarlı """ # Normalized entropy (0 = tek saat, 1 = tamamen düz) scores_clean = scores[scores > 0] if len(scores_clean) <= 1: return 1.0 entropy = -np.sum(scores_clean * np.log2(scores_clean)) max_entropy = math.log2(24) # Ters çevir: düşük entropy = yüksek tutarlılık consistency = 1 - (entropy / max_entropy) return max(0, min(1, consistency)) # ════════════════════════════════════════════════════ # MAIN PREDICTION # ════════════════════════════════════════════════════ def calculate_optimal_time(history: UserEngagementHistory) -> dict: """ Ensemble Prediction: 1. Circular KDE ile kullanıcı pattern'ı çıkar 2. Recency decay uygula (yeni veri daha değerli) 3. Hafta içi/sonu ayrımı yap 4. Bayesian update ile global prior'la birleştir 5. Content-type ve CLIP/video boost uygula """ n_events = len(history.engagement_hours) # ── COLD START ── if n_events < 3: # Çok az veri, prior kullan is_weekend = datetime.now().weekday() >= 5 prior = GLOBAL_PRIOR.copy() if is_weekend: prior *= WEEKEND_BOOST prior /= prior.sum() optimal = int(np.argmax(prior)) sorted_hours = np.argsort(-prior) alternatives = [int(h) for h in sorted_hours[1:5] if h != optimal] return { "optimal_hour": optimal, "confidence": 0.25, "alternative_hours": alternatives[:4], "reasoning": { "method": "global_prior", "note": f"Only {n_events} data points, using platform average", "is_weekend": is_weekend, "data_points": n_events } } # ── RECENCY DECAY ── weighted_hours = [] weighted_weights = [] if history.engagement_ages and len(history.engagement_ages) == n_events: for hour, weight, age in zip( history.engagement_hours, history.engagement_weights, history.engagement_ages ): decay = recency_weight(age) weighted_hours.append(hour) weighted_weights.append(weight * decay) else: weighted_hours = history.engagement_hours weighted_weights = history.engagement_weights # ── CIRCULAR KDE ── user_pattern = circular_kde_scores(weighted_hours, weighted_weights) # ── DAY-OF-WEEK ADJUSTMENT ── is_weekend = datetime.now().weekday() >= 5 if history.engagement_days and len(history.engagement_days) == n_events: # Kullanıcının hafta içi vs hafta sonu pattern'ını ayır if is_weekend: day_hours = [h for h, d in zip(weighted_hours, history.engagement_days) if d >= 5] day_weights = [w for w, d in zip(weighted_weights, history.engagement_days) if d >= 5] else: day_hours = [h for h, d in zip(weighted_hours, history.engagement_days) if d < 5] day_weights = [w for w, d in zip(weighted_weights, history.engagement_days) if d < 5] if len(day_hours) >= 3: user_pattern = circular_kde_scores(day_hours, day_weights) elif is_weekend: # Gün verisi yoksa genel weekend boost uygula user_pattern *= WEEKEND_BOOST user_pattern /= user_pattern.sum() # ── BAYESIAN UPDATE ── # Veri miktarına göre prior vs user balance data_strength = min(0.9, n_events / 100) # 100 event'te %90 user data prior = GLOBAL_PRIOR.copy() if is_weekend: prior *= WEEKEND_BOOST prior /= prior.sum() final_scores = bayesian_update(prior, user_pattern, data_strength) # ── CONTENT-TYPE & CLIP/VIDEO BOOST ── boost_applied = False if history.clip_score > 5: # Yüksek kaliteli görsel → prime time bonus prime_mask = np.zeros(24) for h in [12, 13, 18, 19, 20, 21]: prime_mask[h] = 1 + (history.clip_score / 10) * 0.2 prime_mask[prime_mask == 0] = 1.0 final_scores *= prime_mask final_scores /= final_scores.sum() boost_applied = True if history.has_video: # Video → akşam saatleri bonus video_mask = np.ones(24) for h in [18, 19, 20, 21, 22]: video_mask[h] = 1.15 final_scores *= video_mask final_scores /= final_scores.sum() boost_applied = True if history.content_type == "moment": # Moment → sabah ve öğle bonus moment_mask = np.ones(24) for h in [7, 8, 9, 12, 13]: moment_mask[h] = 1.1 final_scores *= moment_mask final_scores /= final_scores.sum() # ── SONUÇ ── optimal_hour = int(np.argmax(final_scores)) # Tutarlılık ve güven consistency = calculate_consistency(final_scores) confidence = calculate_confidence(n_events, consistency, boost_applied) # Alternatif saatler sorted_hours = np.argsort(-final_scores) alternatives = [int(h) for h in sorted_hours[1:5] if h != optimal_hour] # Top 3 saat ve skorları (debug için) top3 = [(int(sorted_hours[i]), round(float(final_scores[sorted_hours[i]]), 4)) for i in range(min(3, len(sorted_hours)))] return { "optimal_hour": optimal_hour, "confidence": confidence, "alternative_hours": alternatives[:4], "reasoning": { "method": "ensemble_v3", "components": ["circular_kde", "bayesian_prior", "recency_decay", "day_aware"], "data_points": n_events, "data_strength": round(data_strength, 2), "consistency": round(consistency, 2), "is_weekend": is_weekend, "clip_boost": history.clip_score > 5, "video_boost": history.has_video, "content_type": history.content_type, "top_hours": top3 } } # ════════════════════════════════════════════════════ # NEXT OPPORTUNITIES # ════════════════════════════════════════════════════ def calculate_next_optimal_times(history: UserEngagementHistory, count: int = 3) -> list[dict]: """ Sonraki N optimal zamanı hesapla (48 saat içinde) """ result = calculate_optimal_time(history) optimal = result["optimal_hour"] alts = set(result["alternative_hours"]) now = datetime.now() opportunities = [] for hours_ahead in range(2, 48): future = now + timedelta(hours=hours_ahead) h = future.hour if h == optimal: score = 100 elif h in alts: score = 75 else: # Final scores'dan oku score = 30 # Prime time bonus if h in [12, 13, 18, 19, 20, 21]: score = min(100, score + 10) opportunities.append({ "datetime": future.isoformat(), "hour": h, "day": future.strftime("%A"), "score": score, "hours_from_now": hours_ahead, "is_optimal": h == optimal }) opportunities.sort(key=lambda x: x["score"], reverse=True) return opportunities[:count] # ════════════════════════════════════════════════════ # API ENDPOINTS # ════════════════════════════════════════════════════ @app.get("/") def root(): return { "service": "FomoFeed Timing Optimizer", "status": "active", "version": "3.0.0", "method": "ensemble", "components": [ "circular_kde", "bayesian_prior", "recency_decay", "day_aware", "content_boost" ] } @app.get("/health") def health(): return { "status": "healthy", "timestamp": datetime.now().isoformat() } @app.post("/predict", response_model=TimingRecommendation) def predict_optimal_time(history: UserEngagementHistory): try: result = calculate_optimal_time(history) return TimingRecommendation(**result) except Exception as e: raise HTTPException(status_code=500, detail=str(e)) @app.post("/next_opportunities") def get_next_opportunities(history: UserEngagementHistory, count: int = 3): try: opportunities = calculate_next_optimal_times(history, count) return { "opportunities": opportunities, "generated_at": datetime.now().isoformat() } except Exception as e: raise HTTPException(status_code=500, detail=str(e)) @app.post("/batch_predict") def batch_predict(histories: list[UserEngagementHistory]): """PHP pn_decision_engine batch çağrısı için""" results = [] for h in histories: try: r = calculate_optimal_time(h) results.append({"user_id": h.user_id, **r}) except Exception as e: results.append({"user_id": h.user_id, "error": str(e), "optimal_hour": 19, "confidence": 0.25}) return {"predictions": results} if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=7860)