child-psycho-companion/src/psycho_screener/screener.py

233 lines
8.3 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters!

This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.

"""
儿童心理陪伴 - 筛查器
基于 MiniMax API对儿童对话进行心理问题筛查
"""
from __future__ import annotations
import os
import re
import json
import requests
from typing import Literal
from pydantic import BaseModel, Field
# ============================================================================
# 数据模型
# ============================================================================
class ConcernCategory(str):
"""心理问题类别"""
NONE = "none"
BULLYING = "bullying" # 校园霸凌/同伴冲突
DEPRESSION = "depression" # 抑郁情绪
ANXIETY = "anxiety" # 焦虑/恐惧
FAMILY_CONFLICT = "family_conflict" # 家庭矛盾
SELF_ESTEEM = "self_esteem" # 自卑/自我否定
TRAUMA = "trauma" # 创伤事件
SOCIAL_ISOLATION = "social_isolation" # 社交孤立
OTHER = "other" # 其他
class ScreeningResult(BaseModel):
"""筛查结果"""
detected: bool = Field(description="是否检测到心理问题")
category: str = Field(default=ConcernCategory.NONE, description="问题类别")
severity: Literal["none", "low", "medium", "high"] = Field(
default="none", description="严重程度"
)
summary: str = Field(default="", description="简要描述检测到的问题")
suggestion: str = Field(default="", description="建议行动")
raw_response: str = Field(default="", description="模型原始响应(调试用)")
# ============================================================================
# 筛查系统提示词
# ============================================================================
SYSTEM_PROMPT = """你是一个专业的儿童心理咨询师助手专注于分析3-8岁儿童的对话内容识别潜在的心理需求或问题。
## 你的任务
分析给定的儿童对话上下文,判断是否存在以下心理问题类别:
1. **bullying** - 霸凌/同伴冲突:孩子表达被欺负、被嘲笑、被孤立、被人威胁等
2. **depression** - 抑郁情绪:孩子表达悲伤、绝望、无助、对事物失去兴趣、提到"不想活了"
3. **anxiety** - 焦虑/恐惧:孩子表达担心、害怕、做噩梦、回避某些情境等
4. **family_conflict** - 家庭矛盾:孩子表达父母争吵、离婚担心、被忽视、被严厉惩罚等
5. **self_esteem** - 自卑/自我否定:孩子表达"我不行""没人喜欢我""我太笨了"
6. **trauma** - 创伤事件:孩子描述意外事故、暴力事件、亲人离世等创伤性经历
7. **social_isolation** - 社交孤立:孩子表达没有朋友、被排斥、孤独感等
8. **other** - 其他值得关注的心理需求
## 输出格式
请严格按以下JSON格式返回不要添加任何额外内容
{
"detected": true/false,
"category": "具体类别",
"severity": "none/low/medium/high",
"summary": "一句话描述检测到的问题",
"suggestion": "建议的应对方式简短1-2句话"
}
## 判断标准
- **low**: 轻微迹象,需要关注但无需立即介入
- **medium**: 中度迹象,建议与家长沟通
- **high**: 严重迹象,需要专业干预
如果对话内容完全正常,没有任何心理问题迹象,返回:
{
"detected": false,
"category": "none",
"severity": "none",
"summary": "未检测到心理问题",
"suggestion": ""
}
注意:
- 只关注确实存在问题的迹象,不要过度解读
- 儿童的语言表达可能不精确,需要结合上下文判断
- 正常的情绪表达(偶尔哭、发脾气)不构成问题"""
# ============================================================================
# 筛查器
# ============================================================================
class PsychoScreener:
"""
儿童心理问题筛查器
使用方法:
screener = PsychoScreener(api_key="your-api-key")
result = screener.screen("今天小明打我了,我很伤心")
if result.detected:
print(f"检测到问题:{result.summary}")
"""
def __init__(
self,
api_key: str | None = None,
model: str = "MiniMax-M2.5",
base_url: str = "https://api.minimaxi.com/v1/text/chatcompletion_v2",
):
self.api_key = api_key or os.environ.get("MINIMAX_API_KEY", "")
self.model = model
self.base_url = base_url
if not self.api_key:
raise ValueError(
"MiniMax API key is required. "
"Set MINIMAX_API_KEY env var or pass api_key parameter."
)
def _call_minimax(self, messages: list[dict]) -> str:
"""调用 MiniMax API"""
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
payload = {
"model": self.model,
"messages": messages,
}
response = requests.post(
self.base_url,
headers=headers,
json=payload,
timeout=30,
)
response.raise_for_status()
data = response.json()
# 兼容不同返回格式
if "choices" in data:
return data["choices"][0]["message"]["content"]
elif "output" in data:
return data["output"]
return str(data)
def screen(self, context: str) -> ScreeningResult:
"""
对给定的对话上下文进行心理问题筛查
Args:
context: 儿童的对话上下文(可以是多轮对话的汇总文本)
Returns:
ScreeningResult: 包含检测结果的数据模型
"""
messages = [
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": f"请分析以下儿童对话内容:\n\n{context}"},
]
try:
raw_response = self._call_minimax(messages)
except Exception as e:
return ScreeningResult(
detected=False,
category=ConcernCategory.OTHER,
severity="none",
summary=f"API调用失败: {str(e)}",
suggestion="",
raw_response=str(e),
)
# 尝试解析 JSON
try:
content = raw_response.strip()
# 策略1查找 ```json ... ``` 代码块Markdown 格式)
md_match = re.search(r"```json\s*(.*?)\s*```", content, re.DOTALL)
if md_match:
content = md_match.group(1).strip()
else:
# 策略2查找原始 JSON 对象 { ... }
json_start = content.find('{"')
json_end = content.rfind('"}')
if json_start != -1 and json_end != -1 and json_end > json_start:
content = content[json_start:json_end + 2]
else:
# 策略3去掉思考过程标记取最后一个 { 之后的内容
last_brace = content.rfind('{')
if last_brace != -1:
content = content[last_brace:]
parsed = json.loads(content)
return ScreeningResult(
detected=parsed.get("detected", False),
category=parsed.get("category", ConcernCategory.NONE),
severity=parsed.get("severity", "none"),
summary=parsed.get("summary", ""),
suggestion=parsed.get("suggestion", ""),
raw_response=raw_response,
)
except json.JSONDecodeError:
# 无法解析 JSON返回原始内容
return ScreeningResult(
detected=False,
category=ConcernCategory.OTHER,
severity="none",
summary="无法解析模型响应",
suggestion="",
raw_response=raw_response,
)
def build_response_prefix(self, result: ScreeningResult) -> str:
"""
根据筛查结果构建响应前缀
Args:
result: 筛查结果
Returns:
str: 如果检测到问题,返回前缀字符串;否则返回空字符串
"""
if not result.detected:
return ""
return f"【已发现特定心理问题】类别:{result.category},严重程度:{result.severity},描述:{result.summary}"