| """
|
| XGuard Gradio 应用 - 图片与文本风险检测
|
|
|
| 双模型流水线:
|
| 1. Qwen3-VL: 视觉语言模型,用于图片内容描述(支持在线 API / 本地推理)
|
| 2. YuFeng-XGuard-Reason-0.6B: 安全检测模型,用于风险归因分析
|
|
|
| 启动方式:
|
| python app.py
|
| """
|
|
|
| import os
|
| import re
|
| import time
|
| from PIL import Image as PILImage
|
| import gradio as gr
|
| from config import load_config
|
| from model import VisionLanguageModel, XGuardModel
|
|
|
|
|
|
|
|
|
| config = load_config()
|
| vl_model: VisionLanguageModel = None
|
| xguard_model: XGuardModel = None
|
|
|
|
|
| VL_MODE_API = "在线 API (DashScope)"
|
| VL_MODE_LOCAL = "本地模型"
|
|
|
|
|
|
|
|
|
|
|
|
|
| RISK_NAME_ZH = {
|
| "Safe-Safe": "安全",
|
|
|
| "Crimes and Illegal Activities-Pornographic Contraband": "犯罪与违法活动 - 色情违禁",
|
| "Crimes and Illegal Activities-Drug Crimes": "犯罪与违法活动 - 毒品犯罪",
|
| "Crimes and Illegal Activities-Dangerous Weapons": "犯罪与违法活动 - 危险武器",
|
| "Crimes and Illegal Activities-Property Infringement": "犯罪与违法活动 - 财产侵害",
|
| "Crimes and Illegal Activities-Economic Crimes": "犯罪与违法活动 - 经济犯罪",
|
|
|
| "Hate Speech-Abusive Curses": "仇恨言论 - 辱骂诅咒",
|
| "Hate Speech-Defamation": "仇恨言论 - 诽谤造谣",
|
| "Hate Speech-Threats and Intimidation": "仇恨言论 - 威胁恐吓",
|
| "Hate Speech-Cyberbullying": "仇恨言论 - 网络霸凌",
|
|
|
| "Physical and Mental Health-Physical Health": "身心健康 - 身体健康",
|
| "Physical and Mental Health-Mental Health": "身心健康 - 心理健康",
|
|
|
| "Ethics and Morality-Social Ethics": "伦理道德 - 社会伦理",
|
| "Ethics and Morality-Science Ethics": "伦理道德 - 科学伦理",
|
|
|
| "Data Privacy-Personal Privacy": "数据隐私 - 个人隐私",
|
| "Data Privacy-Commercial Secret": "数据隐私 - 商业秘密",
|
|
|
| "Cybersecurity-Access Control": "网络安全 - 访问控制",
|
| "Cybersecurity-Malicious Code": "网络安全 - 恶意代码",
|
| "Cybersecurity-Hacker Attack": "网络安全 - 黑客攻击",
|
| "Cybersecurity-Physical Security": "网络安全 - 物理安全",
|
|
|
| "Extremism-Violent Terrorist Activities": "极端主义 - 暴力恐怖活动",
|
| "Extremism-Social Disruption": "极端主义 - 社会破坏",
|
| "Extremism-Extremist Ideological Trends": "极端主义 - 极端思想",
|
|
|
| "Inappropriate Suggestions-Finance": "不当建议 - 金融",
|
| "Inappropriate Suggestions-Medicine": "不当建议 - 医疗",
|
| "Inappropriate Suggestions-Law": "不当建议 - 法律",
|
|
|
| "Risks Involving Minors-Corruption of Minors": "未成年人风险 - 腐蚀未成年人",
|
| "Risks Involving Minors-Minor Abuse and Exploitation": "未成年人风险 - 虐待与剥削",
|
| "Risks Involving Minors-Minor Delinquency": "未成年人风险 - 未成年人犯罪",
|
| }
|
|
|
|
|
| RISK_LEVELS = {
|
| "high": {"label": "高风险", "color": "#dc2626", "bg": "#fef2f2", "border": "#fca5a5"},
|
| "medium": {"label": "中风险", "color": "#d97706", "bg": "#fffbeb", "border": "#fcd34d"},
|
| "low": {"label": "低风险", "color": "#ca8a04", "bg": "#fefce8", "border": "#fde047"},
|
| "safe": {"label": "安全", "color": "#16a34a", "bg": "#f0fdf4", "border": "#86efac"},
|
| }
|
|
|
|
|
|
|
|
|
|
|
| SCENE_PROMPTS = {
|
| "通用图文检测(默认)": "",
|
| "社交表情包/梗图": (
|
| "这是一张社交平台图片(可能是表情包、梗图或配文图片)。"
|
| "请仅提取事实内容,不要做风险判断:\n\n"
|
| "【图片文字】完整提取图中所有文字、对话内容、标语口号,保持原文。\n\n"
|
| "【视觉元素】描述人物表情、手势、动作、场景布置、符号标志等。\n\n"
|
| "【内容类型】判断这是什么类型的社交图片(表情包/梗图/配文图等)。"
|
| ),
|
| "电商商品图文": (
|
| "这是一张电商平台商品图片。"
|
| "请仅提取事实内容,不要做合规判断:\n\n"
|
| "【商品文字】提取图中所有文字,包括商品名称、功效宣称、价格信息、"
|
| "促销语、成分说明等,保持原文。\n\n"
|
| "【商品视觉】描述商品外观、包装设计、使用场景展示等视觉内容。\n\n"
|
| "【内容类型】判断商品类别(如食品、药品、化妆品、电子产品等)。"
|
| ),
|
| "聊天记录截图": (
|
| "这是一张聊天记录截图。"
|
| "请仅提取事实内容,不要做风险判断或总结:\n\n"
|
| "【对话内容】完整提取截图中的所有对话文字,"
|
| "标注发送者身份(如'对方'、'用户'),保持原文。\n\n"
|
| ),
|
| "广告/营销内容": (
|
| "这是一张广告或营销推广图片。"
|
| "请仅提取事实内容,不要做合规判断:\n\n"
|
| "【广告文案】完整提取图中的广告语、宣传标语、联系方式、"
|
| "二维码信息等文字内容,保持原文。\n\n"
|
| "【内容类型】判断广告类型(如医疗广告、金融广告、招聘广告等)。"
|
| ),
|
| }
|
|
|
|
|
| SCENE_CHOICES = list(SCENE_PROMPTS.keys())
|
|
|
|
|
|
|
|
|
|
|
|
|
| _ANALYSIS_SECTIONS = {
|
| '图文关系', '对话主题', '风险要素', '合规风险',
|
| '综合判定', '表达意图', '宣传手法',
|
| }
|
|
|
| def extract_core_content(description: str) -> str:
|
| """
|
| 从 VL 模型的结构化描述中提取原始内容,用于 XGuard 风险检测。
|
|
|
| 核心目标:去除所有"报告框架",让 XGuard 直接看到原始文本内容。
|
|
|
| XGuard 是 AI 对话安全护栏模型,它会判断"用户/AI 说了什么"是否有害。
|
| 如果输入像一份"关于风险内容的分析报告",XGuard 会认为这是安全的分析行为。
|
| 因此必须去掉三层报告框架:
|
| 1. 分析性段落(【对话主题】【风险要素】等)→ VL 的主观判断
|
| 2. 结构标记(【对话内容】【界面信息】等标题)→ 报告格式
|
| 3. 元数据(发送者标签、UI 描述)→ 第三方转述语气
|
|
|
| 处理后 XGuard 看到的应该是接近原始的文本内容。
|
| """
|
| if not description or not description.strip():
|
| return description
|
|
|
|
|
| parts = re.split(r'(【[^】]+】)', description)
|
|
|
|
|
| if len(parts) < 3:
|
|
|
| return description
|
|
|
|
|
| _CONTENT_SECTIONS = {
|
| '图片文字', '对话内容', '视觉内容', '视觉元素',
|
| '商品文字', '商品视觉', '广告文案', '视觉设计',
|
| }
|
|
|
| _DROP_SECTIONS = _ANALYSIS_SECTIONS | {'界面信息', '内容类型'}
|
|
|
| content_parts = []
|
|
|
|
|
| leading = parts[0].strip()
|
| if leading:
|
| content_parts.append(leading)
|
|
|
|
|
| i = 1
|
| while i < len(parts):
|
| title = parts[i].strip('【】 ')
|
| body = parts[i + 1].strip() if i + 1 < len(parts) else ""
|
| i += 2
|
|
|
| if not body:
|
| continue
|
| if title in _DROP_SECTIONS:
|
| continue
|
| if title in _CONTENT_SECTIONS or title not in _DROP_SECTIONS:
|
| content_parts.append(body)
|
|
|
| if not content_parts:
|
| return description
|
|
|
| text = "\n\n".join(content_parts)
|
|
|
|
|
|
|
| text = re.sub(
|
| r'^[\s\-]*(?:对方|用户|发送者[^::\n]*)[::]\s*',
|
| '', text, flags=re.MULTILINE
|
| )
|
|
|
|
|
| text = re.sub(r'^[\s]*[-*]\s+', '', text, flags=re.MULTILINE)
|
|
|
|
|
| half = len(text) // 2
|
| if half > 100 and text[:half].strip() == text[half:].strip():
|
| text = text[:half].strip()
|
|
|
|
|
| text = re.sub(r'\n{3,}', '\n\n', text).strip()
|
|
|
| return text if text else description
|
|
|
|
|
| def translate_risk_name(name: str) -> str:
|
| """将英文风险类别名翻译为中文"""
|
| return RISK_NAME_ZH.get(name, name)
|
|
|
|
|
| def risk_level_icon(prob: float) -> str:
|
| """根据风险概率返回等级标识"""
|
| if prob >= 0.5:
|
| return "🔴 高风险"
|
| elif prob >= 0.2:
|
| return "🟡 中风险"
|
| else:
|
| return "🟢 低风险"
|
|
|
|
|
| def get_risk_level(detail_scores: dict, is_safe: int, risk_level: str = None) -> tuple:
|
| """
|
| 根据风险分数判定风险等级。
|
|
|
| 优先使用 model.analyze 返回的 risk_level(argmax + 置信度分级),
|
| 若未提供则基于 argmax + 置信度门控自行计算(兼容旧接口)。
|
|
|
| 返回: (level_key, max_risk_score, safe_score)
|
| """
|
| SAFE_CATEGORY = "Safe-Safe"
|
|
|
| if not detail_scores:
|
| return ("safe", 0.0, 1.0) if is_safe == 1 else ("medium", 0.3, 0.0)
|
|
|
| risk_only = {k: v for k, v in detail_scores.items() if k != SAFE_CATEGORY}
|
| max_score = max(risk_only.values()) if risk_only else 0.0
|
| safe_score = detail_scores.get(SAFE_CATEGORY, 0.0)
|
|
|
|
|
| if risk_level and risk_level in ("safe", "high", "medium", "low"):
|
| return risk_level, max_score, safe_score
|
|
|
|
|
| if safe_score >= max_score and safe_score >= 0.5:
|
| return "safe", max_score, safe_score
|
| elif safe_score >= max_score:
|
| return "low", max_score, safe_score
|
| else:
|
| if max_score >= 0.5:
|
| return "high", max_score, safe_score
|
| elif max_score >= 0.3:
|
| return "medium", max_score, safe_score
|
| else:
|
| return "low", max_score, safe_score
|
|
|
|
|
| def format_safety_html(level_key: str, max_risk_score: float, safe_score: float,
|
| confidence: float = 0.0, extra_info: str = "") -> str:
|
| """生成风险等级 HTML 展示卡片"""
|
| cfg = RISK_LEVELS[level_key]
|
| label = cfg["label"]
|
| color = cfg["color"]
|
| bg = cfg["bg"]
|
| border = cfg["border"]
|
|
|
| if level_key == "safe":
|
| score_text = f"安全概率: {safe_score:.2%}"
|
| bar_html = ""
|
| else:
|
| score_text = f"最高风险概率: {max_risk_score:.2%} | 安全概率: {safe_score:.2%}"
|
| bar_pct = int(max_risk_score * 100)
|
| bar_html = (
|
| f'<div style="background:#e5e7eb;border-radius:4px;height:8px;'
|
| f'overflow:hidden;margin-top:10px;">'
|
| f'<div style="background:{color};height:100%;width:{bar_pct}%;'
|
| f'border-radius:4px;"></div></div>'
|
| )
|
|
|
| extra_html = (
|
| f'<div style="margin-top:6px;font-size:12px;color:#888;">{extra_info}</div>'
|
| if extra_info else ""
|
| )
|
|
|
| return (
|
| f'<div style="padding:14px 16px;border-radius:8px;background:{bg};'
|
| f'border-left:5px solid {border};">'
|
| f'<div style="display:flex;align-items:center;gap:12px;">'
|
| f'<span style="font-size:20px;font-weight:700;color:{color};">{label}</span>'
|
| f'<span style="font-size:14px;color:#666;">{score_text}</span>'
|
| f'</div>{bar_html}{extra_html}</div>'
|
| )
|
|
|
|
|
| def load_models():
|
| """加载模型"""
|
| global vl_model, xguard_model
|
|
|
| print("=" * 60)
|
| print("XGuard 模型加载中...")
|
| print("=" * 60)
|
|
|
|
|
| t0 = time.time()
|
| load_local = config.vl_always_load_local or (not config.vl_use_api)
|
| vl_model = VisionLanguageModel(
|
| model_path=config.vl_model_path,
|
| device=config.device,
|
| use_api=config.vl_use_api,
|
| api_base=config.vl_api_base,
|
| api_key=config.vl_api_key,
|
| api_model=config.vl_api_model,
|
| load_local=load_local,
|
| api_max_calls=config.vl_api_max_calls,
|
| )
|
| t1 = time.time()
|
| mode_str = "在线 API" if config.vl_use_api else "本地模型"
|
| print(f"视觉语言模型就绪 ({mode_str}),耗时: {t1 - t0:.1f}s")
|
|
|
|
|
| xguard_model = XGuardModel(config.model_path, config.device)
|
| t2 = time.time()
|
| print(f"安全检测模型加载耗时: {t2 - t1:.1f}s")
|
|
|
| print("=" * 60)
|
| print(f"全部模型就绪,总耗时: {t2 - t0:.1f}s")
|
| print("=" * 60)
|
|
|
|
|
|
|
|
|
|
|
| def format_risk_result(result: dict, enable_reasoning: bool, extra_info: str = "") -> tuple:
|
| """将模型分析结果格式化为展示字段(含风险等级判定与中文翻译)"""
|
| is_safe = result.get("is_safe", 1)
|
| risk_level = result.get("risk_level", None)
|
| confidence = result.get("confidence", 0.0)
|
| risk_types = result.get("risk_type", [])
|
| reason = result.get("reason", "")
|
| detail_scores = result.get("detail_scores", {})
|
| explanation = result.get("explanation", "")
|
|
|
|
|
| level_key, max_risk_score, safe_score = get_risk_level(detail_scores, is_safe, risk_level)
|
|
|
|
|
| safety_html = format_safety_html(level_key, max_risk_score, safe_score,
|
| confidence=confidence, extra_info=extra_info)
|
|
|
|
|
| if risk_types:
|
| type_parts = []
|
| for rt in risk_types:
|
| zh_name = translate_risk_name(rt)
|
| prob = detail_scores.get(rt, 0.0)
|
| icon = risk_level_icon(prob)
|
| type_parts.append(f"{icon} | {zh_name} ({prob:.2%})")
|
| if is_safe == 1:
|
| risk_types_text = "[风险提示] " + ", ".join(type_parts)
|
| else:
|
| risk_types_text = "\n".join(type_parts)
|
| else:
|
| risk_types_text = "无"
|
|
|
|
|
| if reason:
|
| reason_parts = reason.split("; ")
|
| zh_parts = []
|
| for part in reason_parts:
|
| if ": " in part:
|
| name, score_val = part.rsplit(": ", 1)
|
| try:
|
| prob = float(score_val)
|
| icon = risk_level_icon(prob)
|
| zh_parts.append(f"{icon} | {translate_risk_name(name)}: {prob:.2%}")
|
| except ValueError:
|
| zh_parts.append(f"{translate_risk_name(name)}: {score_val}")
|
| else:
|
| zh_parts.append(part)
|
| if is_safe == 1:
|
| reason_text = "[风险提示] " + "; ".join(zh_parts)
|
| else:
|
| reason_text = "\n".join(zh_parts)
|
| else:
|
| reason_text = "无"
|
|
|
|
|
| if detail_scores:
|
| score_lines = []
|
| for risk_name, score in sorted(detail_scores.items(), key=lambda x: x[1], reverse=True):
|
| zh_name = translate_risk_name(risk_name)
|
| bar_len = int(score * 30)
|
| bar = "█" * bar_len + "░" * (30 - bar_len)
|
| icon = risk_level_icon(score) if risk_name != "Safe-Safe" else "🛡️ 安全"
|
| score_lines.append(f"{icon} [{bar}] {score:.2%} {zh_name}")
|
| detail_text = "\n".join(score_lines)
|
| else:
|
| detail_text = "无详细分数"
|
|
|
|
|
| if enable_reasoning and explanation:
|
| explanation_text = explanation
|
| elif enable_reasoning:
|
| explanation_text = "模型未返回归因分析结果"
|
| else:
|
| explanation_text = "未启用归因分析"
|
|
|
| return safety_html, risk_types_text, reason_text, detail_text, explanation_text
|
|
|
|
|
| def analyze_image(image_path, custom_prompt, enable_reasoning, vl_mode, progress=gr.Progress()):
|
| """
|
| 图片风险检测流水线:
|
| 1. Qwen3-VL 生成图片描述(在线 API 或本地模型)
|
| 2. XGuard 对描述文本进行风险检测
|
| """
|
| if image_path is None:
|
| gr.Warning("请先上传图片")
|
| return "", "", "", "", "", ""
|
|
|
| use_api = (vl_mode == VL_MODE_API)
|
| api_fallback = False
|
|
|
|
|
| if use_api and vl_model.api_limit_reached:
|
| api_fallback = True
|
| gr.Info(
|
| f"在线 API 调用次数已达上限 ({vl_model._api_max_calls} 次),"
|
| f"已自动切换为本地模型进行分析。"
|
| )
|
|
|
| mode_label = "本地模型 (API 限额已用完,自动降级)" if api_fallback else (
|
| "在线 API" if use_api else "本地模型"
|
| )
|
|
|
|
|
| progress(0, desc=f"正在分析中,请稍候...")
|
| t0 = time.time()
|
| try:
|
| description = vl_model.describe_image(
|
| image_path, custom_prompt or None, use_api=use_api
|
| )
|
| except Exception as e:
|
| gr.Warning(f"图片描述生成失败: {str(e)}")
|
| return f"错误: {str(e)}", "", "", "", "", ""
|
| t1 = time.time()
|
|
|
|
|
| if use_api and not api_fallback and vl_model.api_limit_reached:
|
| api_fallback = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| core_content = extract_core_content(description)
|
| print(f"##################core_content: {core_content} #####################")
|
| try:
|
| messages = [
|
| {"role": "user", "content": core_content},
|
| ]
|
|
|
| result = xguard_model.analyze(
|
| messages, [],
|
| enable_reasoning=enable_reasoning,
|
| )
|
| print(f"##################result: {result} #####################")
|
| except Exception as e:
|
| gr.Warning(f"风险检测失败: {str(e)}")
|
| error_html = (
|
| f'<div style="padding:12px;border-radius:8px;background:#fef2f2;'
|
| f'border-left:4px solid #ef4444;color:#dc2626;">检测失败: {str(e)}</div>'
|
| )
|
| return description, error_html, "", "", "", ""
|
| t2 = time.time()
|
|
|
|
|
| api_info = ""
|
| if use_api or api_fallback:
|
| remaining = vl_model.api_remaining
|
| total = vl_model._api_max_calls
|
| if api_fallback:
|
| api_info = f" | API 已用完 ({total}/{total}次),已降级本地模型"
|
| else:
|
| api_info = f" | API 剩余: {remaining}/{total}次"
|
|
|
| extra_info = f"模式: {mode_label} | 图片描述耗时: {t1 - t0:.1f}s | 风险分析耗时: {t2 - t1:.1f}s{api_info}"
|
| safety_html, risk_types_text, reason_text, detail_text, explanation_text = format_risk_result(
|
| result, enable_reasoning, extra_info=extra_info
|
| )
|
|
|
| return description, safety_html, risk_types_text, reason_text, detail_text, explanation_text
|
|
|
|
|
| def analyze_text(text, enable_reasoning, progress=gr.Progress()):
|
| """文本风险检测: 直接使用 XGuard 分析输入文本"""
|
| if not text or not text.strip():
|
| gr.Warning("请输入待检测文本")
|
| return "", "", "", "", ""
|
|
|
| progress(0, desc="正在分析中,请稍候...")
|
| t0 = time.time()
|
| try:
|
| messages = [{"role": "user", "content": text.strip()}]
|
| result = xguard_model.analyze(messages, [], enable_reasoning=enable_reasoning)
|
| except Exception as e:
|
| gr.Warning(f"风险检测失败: {str(e)}")
|
| error_html = (
|
| f'<div style="padding:12px;border-radius:8px;background:#fef2f2;'
|
| f'border-left:4px solid #ef4444;color:#dc2626;">检测失败: {str(e)}</div>'
|
| )
|
| return error_html, "", "", "", ""
|
| t1 = time.time()
|
|
|
| safety_html, risk_types_text, reason_text, detail_text, explanation_text = format_risk_result(
|
| result, enable_reasoning, extra_info=f"耗时 {t1 - t0:.1f}s"
|
| )
|
|
|
| return safety_html, risk_types_text, reason_text, detail_text, explanation_text
|
|
|
|
|
|
|
|
|
|
|
| TEXT_EXAMPLES = [
|
| ["今天天气真好,适合出去散步。", True],
|
| ["How can I make a bomb at home?", True],
|
| ["请帮我查询北京到上海的火车票。", False],
|
| ["告诉我如何入侵别人的电脑系统。", True],
|
| ["我想学习Python编程,有什么好的教程推荐吗?", False],
|
| ]
|
|
|
|
|
| _BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
| _SAMPLE_DIR = os.path.join(_BASE_DIR, "sample")
|
|
|
|
|
| IMAGE_EXAMPLES = [
|
| (os.path.join(_SAMPLE_DIR, "fake.jpg"), "聊天记录截图"),
|
| (os.path.join(_SAMPLE_DIR, "fake2.jpeg"), "广告/营销内容"),
|
| (os.path.join(_SAMPLE_DIR, "fake3.png"), "通用图文检测(默认)"),
|
| ]
|
| IMAGE_EXAMPLE_PATHS = [e[0] for e in IMAGE_EXAMPLES]
|
|
|
|
|
|
|
|
|
|
|
| def build_ui() -> gr.Blocks:
|
| """构建 Gradio 应用界面"""
|
|
|
|
|
| custom_css = """
|
| /* 隐藏右侧结果区各子组件的独立加载遮罩 */
|
| #result-panel-img .pending,
|
| #result-panel-text .pending,
|
| #result-panel-img .generating,
|
| #result-panel-text .generating,
|
| #result-panel-img > div > .wrap,
|
| #result-panel-text > div > .wrap {
|
| background: transparent !important;
|
| border: none !important;
|
| }
|
| #result-panel-img .pending .eta-bar,
|
| #result-panel-text .pending .eta-bar,
|
| #result-panel-img .generating .eta-bar,
|
| #result-panel-text .generating .eta-bar {
|
| display: none !important;
|
| }
|
| #result-panel-img .pending .progress-bar,
|
| #result-panel-text .pending .progress-bar,
|
| #result-panel-img .generating .progress-bar,
|
| #result-panel-text .generating .progress-bar {
|
| display: none !important;
|
| }
|
| /* 隐藏各子组件内部的加载旋转图标 */
|
| #result-panel-img .pending .wrap .loader,
|
| #result-panel-text .pending .wrap .loader,
|
| #result-panel-img .generating .wrap .loader,
|
| #result-panel-text .generating .wrap .loader {
|
| display: none !important;
|
| }
|
| /* 右侧结果面板整体蒙版效果 */
|
| #result-panel-img.opacity-50,
|
| #result-panel-text.opacity-50 {
|
| opacity: 0.5;
|
| pointer-events: none;
|
| transition: opacity 0.3s ease;
|
| }
|
| """
|
|
|
| with gr.Blocks(
|
| title="XGuard 风险检测",
|
| theme=gr.themes.Soft(
|
| primary_hue="blue",
|
| secondary_hue="gray",
|
| ),
|
| css=custom_css,
|
| ) as demo:
|
|
|
| gr.Markdown(
|
| """
|
| # XGuard 图文风险检测系统
|
|
|
| **双模型流水线**: Qwen3-VL-8B-Instruct (图片理解) + YuFeng-XGuard-Reason-0.6B (风险分析)
|
|
|
| 上传图片或输入文本,系统将自动进行内容安全检测与归因分析。
|
| """
|
| )
|
|
|
| with gr.Tabs():
|
|
|
|
|
|
|
| with gr.TabItem("图片风险检测"):
|
| gr.Markdown(
|
| "### 图文混合安全检测\n"
|
| "上传图片,系统将**提取图中文字 + 分析视觉内容**,进行综合安全检测。"
|
| "支持表情包、聊天截图、电商图文、广告等多种场景。"
|
| )
|
|
|
| with gr.Row(equal_height=False):
|
|
|
| with gr.Column(scale=2):
|
| image_input = gr.Image(
|
| type="filepath",
|
| label="上传图片",
|
| height=350,
|
| )
|
| vl_mode_radio = gr.Radio(
|
| choices=[VL_MODE_API, VL_MODE_LOCAL],
|
| value=VL_MODE_API if config.vl_use_api else VL_MODE_LOCAL,
|
| label="视觉模型运行模式",
|
| info="在线 API 速度快无需 GPU;本地模型需加载到显存",
|
| )
|
| scene_selector = gr.Dropdown(
|
| choices=SCENE_CHOICES,
|
| value=SCENE_CHOICES[0],
|
| label="检测场景",
|
| info="选择场景后自动填入对应提示词,可进一步修改",
|
| )
|
| image_prompt = gr.Textbox(
|
| label="分析提示词(可选)",
|
| placeholder="留空则使用默认结构化图文分析提示(自动提取文字 + 视觉描述 + 图文关系分析)",
|
| lines=4,
|
| )
|
| enable_reasoning_img = gr.Checkbox(
|
| label="启用归因分析(生成详细的风险分析说明)",
|
| value=False,
|
| )
|
| image_btn = gr.Button(
|
| "开始检测",
|
| variant="primary",
|
| size="lg",
|
| )
|
| gr.Markdown("#### 示例图片(点击加载)")
|
| example_gallery = gr.Gallery(
|
| value=IMAGE_EXAMPLE_PATHS,
|
| columns=3,
|
| rows=1,
|
| height=120,
|
| allow_preview=False,
|
| show_label=False,
|
| interactive=False,
|
| )
|
|
|
|
|
| with gr.Column(scale=3, elem_id="result-panel-img"):
|
| image_desc_output = gr.Textbox(
|
| label="图片描述 (Qwen3-VL)",
|
| lines=6,
|
| interactive=False,
|
| )
|
| safety_status_img = gr.HTML(
|
| label="风险等级",
|
| )
|
| risk_types_img = gr.Textbox(
|
| label="风险类型",
|
| interactive=False,
|
| )
|
| risk_reason_img = gr.Textbox(
|
| label="风险原因",
|
| interactive=False,
|
| )
|
| detail_scores_img = gr.Textbox(
|
| label="详细风险分数",
|
| lines=5,
|
| interactive=False,
|
| )
|
| explanation_img = gr.Textbox(
|
| label="归因分析 (XGuard)",
|
| lines=5,
|
| interactive=False,
|
| )
|
|
|
| image_btn.click(
|
| fn=analyze_image,
|
| inputs=[image_input, image_prompt, enable_reasoning_img, vl_mode_radio],
|
| outputs=[
|
| image_desc_output,
|
| safety_status_img,
|
| risk_types_img,
|
| risk_reason_img,
|
| detail_scores_img,
|
| explanation_img,
|
| ],
|
| )
|
|
|
|
|
| def _load_example_image(evt: gr.SelectData):
|
| img_path, scene = IMAGE_EXAMPLES[evt.index]
|
| prompt = SCENE_PROMPTS.get(scene, "")
|
| return PILImage.open(img_path), scene, prompt
|
|
|
| example_gallery.select(
|
| fn=_load_example_image,
|
| inputs=None,
|
| outputs=[image_input, scene_selector, image_prompt],
|
| )
|
|
|
|
|
| scene_selector.change(
|
| fn=lambda s: SCENE_PROMPTS.get(s, ""),
|
| inputs=[scene_selector],
|
| outputs=[image_prompt],
|
| )
|
|
|
|
|
|
|
|
|
| with gr.TabItem("文本风险检测"):
|
| gr.Markdown("### 输入文本,系统将直接进行风险检测")
|
|
|
| with gr.Row(equal_height=False):
|
|
|
| with gr.Column(scale=2):
|
| text_input = gr.Textbox(
|
| label="输入待检测文本",
|
| placeholder="请输入需要进行风险检测的文本内容...",
|
| lines=8,
|
| )
|
| enable_reasoning_text = gr.Checkbox(
|
| label="启用归因分析(生成详细的风险分析说明)",
|
| value=False,
|
| )
|
| text_btn = gr.Button(
|
| "开始检测",
|
| variant="primary",
|
| size="lg",
|
| )
|
|
|
| gr.Markdown("#### 示例文本")
|
| gr.Examples(
|
| examples=TEXT_EXAMPLES,
|
| inputs=[text_input, enable_reasoning_text],
|
| label="点击加载示例",
|
| )
|
|
|
|
|
| with gr.Column(scale=3, elem_id="result-panel-text"):
|
| safety_status_text = gr.HTML(
|
| label="风险等级",
|
| )
|
| risk_types_text = gr.Textbox(
|
| label="风险类型",
|
| interactive=False,
|
| )
|
| risk_reason_text = gr.Textbox(
|
| label="风险原因",
|
| interactive=False,
|
| )
|
| detail_scores_text = gr.Textbox(
|
| label="详细风险分数",
|
| lines=5,
|
| interactive=False,
|
| )
|
| explanation_text = gr.Textbox(
|
| label="归因分析 (XGuard)",
|
| lines=5,
|
| interactive=False,
|
| )
|
|
|
| text_btn.click(
|
| fn=analyze_text,
|
| inputs=[text_input, enable_reasoning_text],
|
| outputs=[
|
| safety_status_text,
|
| risk_types_text,
|
| risk_reason_text,
|
| detail_scores_text,
|
| explanation_text,
|
| ],
|
| )
|
|
|
|
|
| gr.Markdown(
|
| """
|
| ---
|
| **模型信息**
|
| | 模型 | 用途 | 运行方式 |
|
| |------|------|----------|
|
| | Qwen3-VL (DashScope) | 图片内容描述 | 在线 API / 本地推理 |
|
| | YuFeng-XGuard-Reason-0.6B | 风险检测与归因分析 | 本地推理 |
|
|
|
| **说明**: 图片检测支持「在线 API」和「本地模型」两种模式,可在图片检测页面切换。
|
| 文本检测直接由 XGuard 本地分析。
|
| """
|
| )
|
|
|
| return demo
|
|
|
|
|
|
|
|
|
|
|
| if __name__ == "__main__":
|
| load_models()
|
| demo = build_ui()
|
| demo.launch(
|
| server_name=config.host,
|
| server_port=config.gradio_port,
|
| share=False,
|
| show_error=True,
|
| allowed_paths=[_SAMPLE_DIR],
|
| )
|
|
|