1075 lines
51 KiB
Python
Executable File
1075 lines
51 KiB
Python
Executable File
import random
|
||
from pathlib import Path
|
||
import time
|
||
import sys
|
||
import os
|
||
sys.path.append("./")
|
||
from tools.yaml_operator import read_yaml
|
||
from tools.log import CustomLogger
|
||
import requests
|
||
import queue
|
||
|
||
current_file_path = os.path.abspath(__file__)
|
||
|
||
Language_Path = os.path.dirname(os.path.dirname(os.path.dirname(current_file_path)))
|
||
# print("Language_Path:",Language_Path)
|
||
# 找到上三级目录的父级(project_root),然后拼接目标文件夹路径
|
||
MassageRobot_Dobot_Path = os.path.dirname(Language_Path)
|
||
# print("MassageRobot_Dobot_Path:",MassageRobot_Dobot_Path)
|
||
|
||
# 添加目标文件夹到系统路径
|
||
sys.path.append(MassageRobot_Dobot_Path)
|
||
|
||
# # 测试
|
||
try:
|
||
from LLM_dify import DifyClient
|
||
except:
|
||
# 外部调用
|
||
from .LLM_dify import DifyClient
|
||
|
||
from datetime import datetime
|
||
import json
|
||
from openai import OpenAI
|
||
import re
|
||
import threading
|
||
import subprocess
|
||
import os
|
||
import requests
|
||
from VortXDB.client import VTXClient
|
||
import asyncio
|
||
import websockets
|
||
|
||
|
||
class DashscopeClient:
|
||
def __init__(self):
|
||
# 获取密钥
|
||
self.weather_info=None
|
||
self.vtxdb = VTXClient()
|
||
self.Ali_OpenAI_api_key = self.vtxdb.get("robot_config", "Language.LLM.Ali_OpenAI_api_key")
|
||
# print("self.Sillcon_OpenAI_api_key:",self.Sillcon_OpenAI_api_key)
|
||
self.Ali_OpenAI_BaseUrl = self.vtxdb.get("robot_config", "Language.LLM.Ali_OpenAI_BaseUrl")
|
||
self.client = OpenAI(api_key=self.Ali_OpenAI_api_key, base_url=self.Ali_OpenAI_BaseUrl)
|
||
self.client_deep=OpenAI(api_key=self.vtxdb.get("robot_config", "Language.LLM.Sillcon_OpenAI_api_key"),base_url=self.vtxdb.get("robot_config", "Language.LLM.Sillcon_OpenAI_BaseUrl"))
|
||
# self.client_search=DifyClient(base_url='http://124.71.62.243/v1',api_key='app-9vrC0QkaVFbj1g2rKiwkyzv3')
|
||
# self.client_search_deep=DifyClient(base_url='http://124.71.62.243/v1',api_key='app-KFatoFKPMwjIt2za2paXvVA7')
|
||
# self.massage_method_suggestion=DifyClient(base_url='http://124.71.62.243/v1',api_key='app-Kcm2KEaWmAIS5FiMmsX5hcyd')
|
||
self.client_search=DifyClient(base_url='https://robotstorm.tech/dify/v1',api_key='app-9vrC0QkaVFbj1g2rKiwkyzv3')
|
||
self.client_search_deep=DifyClient(base_url='https://robotstorm.tech/dify/v1',api_key='app-KFatoFKPMwjIt2za2paXvVA7')
|
||
self.massage_method_suggestion=DifyClient(base_url='https://robotstorm.tech/dify/v1',api_key='app-WrJeh2wGNVcKoJtZPEK2EC27')
|
||
# self.massage_method_suggestion=DifyClient(base_url='http://124.71.62.243/v1',api_key='app-RzktoHvGfosZmrWW5hwQyT5z')
|
||
self.logger = CustomLogger()
|
||
self.massage_status_languge={'task_time':'3','progress':'30','force': 0, 'press': 0, 'frequency': 0, 'temperature': 0, 'gear': 0,'body_part':'back','current_head':'thermotherapy_head','shake':5,'speed':1,'direction':1,'start_pos':'滑肉左','end_pos':'滑肉右','massage_path':'line'}
|
||
self.deep_thinking_flag=False
|
||
self.search_flag=False
|
||
self.suggestion_flag=False
|
||
self.suggestion_deekseek_flag=False
|
||
self.stop_event = threading.Event()
|
||
self.thread = None
|
||
self.suggestion_mode_flag=False
|
||
self.correct_flag=False
|
||
# self.massage_status_languge=None
|
||
# 初始化天气和新闻
|
||
# self.weather_and_news=Weather_AND_News(api_keys)
|
||
|
||
# 读取prompt
|
||
with open('LLM/config/classify_prompts_new.txt', 'r') as f:
|
||
self.classify_prompts = f.read().strip()
|
||
self.classify_message = [{'role': 'system', 'content': self.classify_prompts}]
|
||
with open('LLM/config/auto_correction_prompts.txt', 'r') as f:
|
||
self.chat_prompts1 = f.read().strip()
|
||
self.chat_message1 = [{'role': 'system', 'content': self.chat_prompts1}]
|
||
with open('LLM/config/classify_prompts_new.txt', 'r') as f:
|
||
self.classify_prompts2 = f.read().strip()
|
||
self.classify_message2 = [{'role': 'system', 'content': self.classify_prompts2}]
|
||
with open('LLM/config/chat_prompts_new.txt', 'r') as f:
|
||
self.chat_prompts = f.read().strip()
|
||
self.chat_message = [{'role': 'system', 'content': self.chat_prompts}]
|
||
self.weather_message = self.chat_message
|
||
with open('LLM/config/retrieve_prompts.txt', 'r') as f:
|
||
self.retrieve_prompts = f.read().strip()
|
||
self.retrieve_message = [{'role': 'system', 'content': self.retrieve_prompts}]
|
||
with open('LLM/config/adjust_volumn_prompts.txt', 'r') as f:
|
||
self.adjust_volumn_prompts = f.read().strip()
|
||
self.adjust_volumn_message = [{'role': 'system', 'content': self.adjust_volumn_prompts}]
|
||
# with open('LLM/config/trajectory_retrieval_prompt.txt', 'r') as f:
|
||
# self.trajectory_retrieval_prompt = f.read().strip()
|
||
# self.trajectory_retrieval_message = [{'role': 'system', 'content': self.trajectory_retrieval_prompt}]
|
||
with open('LLM/config/trajectory_config/message_retrieval_prompt.txt', 'r') as f:
|
||
self.message_retrieval_prompt = f.read().strip()
|
||
self.message_retrieval_message = [{'role': 'system', 'content': self.message_retrieval_prompt}]
|
||
with open('LLM/config/trajectory_config/body_part_prompts.txt', 'r') as f:
|
||
self.body_part_prompts = f.read().strip()
|
||
self.body_part_message = [{'role': 'system', 'content': self.body_part_prompts}]
|
||
|
||
with open('LLM/config/trajectory_config/shoulder_limit_prompts.txt', 'r') as f:
|
||
self.shoulder_limit_prompts = f.read().strip()
|
||
with open('LLM/config/trajectory_config/back_limit_prompts.txt', 'r') as f:
|
||
self.back_limit_prompts = f.read().strip()
|
||
with open('LLM/config/trajectory_config/waist_limit_prompts.txt', 'r') as f:
|
||
self.waist_limit_prompts = f.read().strip()
|
||
with open('LLM/config/trajectory_config/belly_limit_prompts.txt', 'r') as f:
|
||
self.belly_limit_prompts = f.read().strip()
|
||
with open('LLM/config/trajectory_config/leg_limit_prompts.txt', 'r') as f:
|
||
self.leg_limit_prompts = f.read().strip()
|
||
|
||
self.model = {
|
||
'classify_model2': 'qwen2.5-72b-instruct',
|
||
'classify_model1': 'qwen2-7b-instruct',
|
||
'chat_model': 'qwen2-7b-instruct',
|
||
'chat_model1': 'qwen2-7b-instruct',
|
||
'retrieve_model': 'qwen2-7b-instruct',
|
||
'adjust_volumn_model': 'qwen2-7b-instruct',
|
||
'deep_thinking_model': 'deepseek-ai/DeepSeek-R1-Distill-Qwen-32B'
|
||
}
|
||
|
||
# self.location = self.get_current_position()[3:]
|
||
|
||
# 定义操作字典
|
||
self.operations = {
|
||
'ksam': '按摩过程调整:开始,无',
|
||
'tzam': '按摩过程调整:停止,无',
|
||
'ldzd': '按摩过程调整:变重,一点',
|
||
'ldjx': '按摩过程调整:变轻,一点',
|
||
'wdzj': '按摩过程调整:升温,一点',
|
||
'wdjx': '按摩过程调整:降温,一点',
|
||
|
||
'dlzd': '按摩过程调整:增电,一点',
|
||
'dljx': '按摩过程调整:减电,一点',
|
||
'pljk': '按摩过程调整:增频,一点',
|
||
'pljd': '按摩过程调整:减频,一点',
|
||
'cjlz': '按摩过程调整:增冲,一点',
|
||
'cjlj': '按摩过程调整:减冲,一点',
|
||
'zszj': '按摩过程调整:增速,一点',
|
||
'zsjx': '按摩过程调整:减速,一点',
|
||
'gbzx': '按摩过程调整:换向,一点',
|
||
'tsgd': '按摩过程调整:升高,一点',
|
||
'jdgd': '按摩过程调整:降高,一点',
|
||
'dayh': '按摩过程调整:时间,加长',
|
||
'tgdw': '按摩过程调整:时间,跳过',
|
||
|
||
'jtrq': '',
|
||
'xzsj': '',
|
||
'jttq': 'weather',
|
||
'jtxw': 'news',
|
||
'qthd': '',
|
||
'bfyy': '音乐播放调整:播放,无',
|
||
'tzbf': '音乐播放调整:停播,无',
|
||
'ssyy': '音乐播放调整:上首',
|
||
'xsyy': '音乐播放调整:下首',
|
||
}
|
||
|
||
self.operations1 = {
|
||
'ksam': '按摩过程调整:开始,无',
|
||
'tzam': '按摩过程调整:停止,无',
|
||
'ldzd': '按摩过程调整:变重,较大幅度',
|
||
'ldjx': '按摩过程调整:变轻,较大幅度',
|
||
'wdzj': '按摩过程调整:升温,较大幅度',
|
||
'wdjx': '按摩过程调整:降温,较大幅度',
|
||
|
||
'dlzd': '按摩过程调整:增电,较大幅度',
|
||
'dljx': '按摩过程调整:减电,较大幅度',
|
||
'pljk': '按摩过程调整:增频,较大幅度',
|
||
'pljd': '按摩过程调整:减频,较大幅度',
|
||
'cjlz': '按摩过程调整:增冲,较大幅度',
|
||
'cjlj': '按摩过程调整:减冲,较大幅度',
|
||
'zszj': '按摩过程调整:增速,较大幅度',
|
||
'zsjx': '按摩过程调整:减速,较大幅度',
|
||
'gbzx': '按摩过程调整:换向,较大幅度',
|
||
'dayh': '按摩过程调整:时间,加长',
|
||
'tgdw': '按摩过程调整:时间,跳过',
|
||
|
||
'jtrq': '',
|
||
'xzsj': '',
|
||
'jttq': 'weather',
|
||
'jtxw': 'news',
|
||
'qthd': '',
|
||
'bfyy': '音乐播放调整:播放,无',
|
||
'tzbf': '音乐播放调整:停播,无',
|
||
'ssyy': '音乐播放调整:上首',
|
||
'xsyy': '音乐播放调整:下首',
|
||
}
|
||
|
||
self.task_mapping = {
|
||
"finger": "指疗通络",
|
||
"shockwave": "点阵按摩",
|
||
"roller": "滚滚刺疗",
|
||
"thermotherapy": "深部热疗",
|
||
"stone": "温砭舒揉",
|
||
"ball": "全能滚珠"
|
||
}
|
||
|
||
self.body_mapping = {
|
||
"back": "背部",
|
||
"belly": "腹部",
|
||
"waist": "腰部",
|
||
"shoulder": "肩颈",
|
||
"leg": "腿部"
|
||
}
|
||
|
||
self.pathTypeMap = {
|
||
'line': "循经直推法",
|
||
'in_spiral': "螺旋内揉法",
|
||
'out_spiral': "螺旋外散法",
|
||
'ellipse': "周天环摩法",
|
||
'lemniscate': "双环疏经法",
|
||
'cycloid': "摆浪通络法",
|
||
'point': "定穴点按法",
|
||
'point_rub': "定点揉摩法"
|
||
}
|
||
# 获取位置信息
|
||
# def get_current_position(self, **kwargs):
|
||
# """ 调用API获取位置 """
|
||
# try:
|
||
# location = get_location()['城市']
|
||
# return '位置:' + location
|
||
# except Exception as e:
|
||
# self.logger.log_error("获取位置信息失败")
|
||
|
||
# 获取时间信息
|
||
def xzsj(self, **kwargs):
|
||
time_result = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
||
time_result = time_result.split(' ')[1]
|
||
return '当前时间:'+time_result
|
||
# 获取日期信息
|
||
def jtrq(self, **kwargs):
|
||
time_result = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
||
date_result = time_result.split(' ')[0]
|
||
print('当前日期:'+date_result)
|
||
return '当前日期:'+date_result
|
||
|
||
def get_volume(self):
|
||
"""
|
||
查询当前音量并返回结果。
|
||
"""
|
||
try:
|
||
# 执行命令并捕获输出
|
||
result = subprocess.run(
|
||
["/usr/bin/amixer", "-D", "pulse", "get", "Master"],
|
||
stdout=subprocess.PIPE,
|
||
stderr=subprocess.PIPE,
|
||
text=True
|
||
)
|
||
if result.returncode == 0:
|
||
# 解析结果
|
||
output = result.stdout
|
||
return output
|
||
else:
|
||
print("查询音量失败:", result.stderr)
|
||
return None
|
||
except Exception as e:
|
||
print("发生错误:", e)
|
||
return None
|
||
|
||
# 通用处理方法
|
||
def handle_request(self, operation, **kwargs):
|
||
self.stop_thread()
|
||
# 默认获取用户输入
|
||
human_input = kwargs.get('human_input')
|
||
# 获取当前机器人状态值
|
||
function_to_status = {
|
||
'lddx': str(self.massage_status_languge['force']),
|
||
'wddx': str(self.massage_status_languge['temperature']),
|
||
'dldw': str(self.massage_status_languge['gear']),
|
||
# 'pldw': str(self.massage_status_languge['frequency']),
|
||
'pldw': str(self.massage_status_languge['frequency']) if str(self.massage_status_languge['current_head']) == 'shockwave_head' else
|
||
str(self.massage_status_languge['shake']) if str(self.massage_status_languge['current_head']) == 'thermotherapy_head' else
|
||
str(self.massage_status_languge['frequency']), # 默认值
|
||
'cjdx': str(self.massage_status_languge['press']),
|
||
'amjd': str(self.massage_status_languge['progress']),
|
||
'amdt': str(self.massage_status_languge['current_head']),
|
||
'ambw': str(self.massage_status_languge['body_part']),
|
||
'gnjs': str(self.massage_status_languge['current_head']),
|
||
'zsdw': str(self.massage_status_languge['speed'])
|
||
}
|
||
status_result = function_to_status.get(operation, None)
|
||
print('status_result:',status_result)
|
||
|
||
if operation in ['lddx', 'wddx', 'dldw', 'pldw', 'cjdx','zsdw']:
|
||
if operation in ['lddx', 'cjdx']:
|
||
self.unit_status = "牛"
|
||
elif operation=='wddx':
|
||
self.unit_status = "档"
|
||
elif operation in ['dldw', 'pldw', 'zsdw']:
|
||
self.unit_status="档"
|
||
human_input = f"{human_input} {status_result}{self.unit_status} "
|
||
|
||
if operation == 'ambw':
|
||
if status_result == 'back':
|
||
human_input = f"{human_input} 目前按摩部位:{'背部'}"
|
||
elif status_result == 'shoulder':
|
||
human_input = f"{human_input} 目前按摩部位:{'肩颈'}"
|
||
elif status_result == 'belly':
|
||
human_input = f"{human_input} 目前按摩部位:{'腹部'}"
|
||
elif status_result == 'back_shoulder':
|
||
human_input = f"{human_input} 目前按摩部位:{'肩颈和背部'}"
|
||
|
||
if operation in ['amdt','gnjs']:
|
||
if status_result == 'thermotherapy_head':
|
||
human_input = f"{human_input} 目前使用按摩头:{'热疗按摩头'}"
|
||
elif status_result == 'shockwave_head':
|
||
human_input = f"{human_input} 目前使用按摩头:{'冲击波按摩头'}"
|
||
elif status_result == 'ball_head':
|
||
human_input = f"{human_input} 目前使用按摩头:{'全能滚珠按摩头'}"
|
||
elif status_result == 'finger_head':
|
||
human_input = f"{human_input} 目前使用按摩头:{'指疗通络按摩头'}"
|
||
elif status_result == 'roller_head':
|
||
human_input = f"{human_input} 目前使用按摩头:{'狼牙滚珠按摩头'}"
|
||
|
||
|
||
if operation == 'amjd':
|
||
massage_progress=int(self.massage_status_languge['progress'])
|
||
massage_task_time=int(self.massage_status_languge['task_time'])
|
||
remaining_time = massage_task_time*12-(round((massage_task_time * 12) * massage_progress / 100))
|
||
print("massage_progress:",massage_progress)
|
||
print("massage_task_time:",massage_task_time)
|
||
print("remaining_time:",remaining_time)
|
||
human_input=f"{human_input} 目前按摩进度:{massage_progress},按摩剩余时间:{remaining_time}"
|
||
|
||
|
||
# 如果操作为 jtrq 或 xzsj,则获取相应函数的返回值作为 human_input
|
||
if operation == 'jtrq':
|
||
human_input = self.jtrq()
|
||
elif operation == 'xzsj':
|
||
human_input = self.xzsj()
|
||
|
||
if operation == 'cqjm':
|
||
try:
|
||
# 提供密码通过标准输入传递给 sudo
|
||
result = subprocess.run(
|
||
['/bin/sudo', '-S', 'systemctl', 'restart', 'ui_next_app'],
|
||
input='jsfb\n',
|
||
text=True,
|
||
check=True
|
||
)
|
||
print("服务 ui_next_app 已成功重启")
|
||
except subprocess.CalledProcessError as e:
|
||
print(f"重启服务失败,错误信息: {e}")
|
||
|
||
if operation == 'yltj':
|
||
try:
|
||
self.adjust_volumn_message = [{'role': 'system', 'content': self.adjust_volumn_prompts}]
|
||
input_prompt = {
|
||
"role": "user",
|
||
"content": human_input
|
||
}
|
||
self.adjust_volumn_message.append(input_prompt)
|
||
response = self.client.chat.completions.create(
|
||
model=self.model['adjust_volumn_model'],
|
||
messages=self.adjust_volumn_message,
|
||
stream=False,
|
||
stop=["}"],
|
||
timeout=10
|
||
).json()
|
||
|
||
# 解析并提取 question_function
|
||
response = json.loads(response)
|
||
adjust_volumn_result = response['choices'][0]['message']['content']
|
||
adjust_volumn_result = adjust_volumn_result + "}"
|
||
adjust_volumn_result = re.findall(r'\{(.*?)\}', adjust_volumn_result)[0]
|
||
print("adjust_volumn_result:",adjust_volumn_result)
|
||
except Exception as e:
|
||
self.logger.log_error("音量调节模型有问题")
|
||
try:
|
||
# response = requests.post('http://127.0.0.1:5000/adjust_volumn', data=adjust_volumn_result)
|
||
response = requests.post(
|
||
'http://127.0.0.1:5000/adjust_volume',
|
||
json={'adjust_volumn_result': adjust_volumn_result}
|
||
)
|
||
# # 判断符号
|
||
# if "+" in adjust_volumn_result:
|
||
# volume_change = int(re.search(r'\+(\d+)', adjust_volumn_result).group(1))
|
||
# print(f"增加音量: {volume_change}%")
|
||
# os.system(f"/usr/bin/amixer -D pulse sset Master {volume_change}%+")
|
||
# elif "-" in adjust_volumn_result:
|
||
# volume_change = int(re.search(r'-(\d+)', adjust_volumn_result).group(1))
|
||
# print(f"减少音量: {volume_change}%")
|
||
# os.system(f"/usr/bin/amixer -D pulse sset Master {volume_change}%-")
|
||
# elif "=" in adjust_volumn_result:
|
||
# volume_set = int(re.search(r'=(\d+)', adjust_volumn_result).group(1))
|
||
# print(f"设置音量为: {volume_set}%")
|
||
# os.system(f"/usr/bin/amixer -D pulse sset Master {volume_set}%")
|
||
# else:
|
||
# print("无法识别的音量调节指令")
|
||
except Exception as e:
|
||
print(f"处理音量调节指令时发生错误: {e}")
|
||
|
||
if operation == 'ylcx':
|
||
volumn_result = self.get_volume()
|
||
print("volumn_result:",volumn_result)
|
||
try:
|
||
# 使用正则表达式匹配音量百分比值
|
||
match = re.search(r'(\d+)%', volumn_result)
|
||
if match:
|
||
print("int(match.group(1)):",int(match.group(1)))
|
||
match = int(match.group(1))
|
||
human_input = f"{human_input} {match}"
|
||
print("human_input:",human_input)
|
||
else:
|
||
self.logger.log_error("无法提取出音量信息") # 未找到匹配
|
||
except Exception as e:
|
||
print(f"提取音量时发生错误: {e}")
|
||
|
||
if operation=='jttq':
|
||
self.weather_info = self.get_info('/get_weather')
|
||
print("weather_info",self.weather_info)
|
||
time_result = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
||
time_result = time_result.split(' ')[1]
|
||
human_input = f"天气提问:{human_input} 天气数据:{self.weather_info} (其中daily.0.fxDate为今日日期, 当前时间为{time_result}) 你需要结合geo_info和时间来分析当前用户提问的是接下来几天daily还是接下来几小时hourly还是当前天气now, 你需要综合分析并准确根据数据回答,不需要给分析过程,给出分析后的信息就可以,回复用户询问的相应时间点的天气就可以,回复需要简短,一句话说完"
|
||
|
||
if operation=='dayh':
|
||
massage_path = self.massage_status_languge['massage_path']
|
||
if massage_path in self.pathTypeMap:
|
||
massage_path = self.pathTypeMap[massage_path]
|
||
print(massage_path) # This will output: 循经直推法
|
||
else:
|
||
massage_path=None
|
||
human_input = f"{human_input} 目前按摩穴位:从{self.massage_status_languge['start_pos']}到{self.massage_status_languge['end_pos']} 目前按摩手法:{massage_path}"
|
||
|
||
if operation=='tgdw':
|
||
massage_path = self.massage_status_languge['massage_path']
|
||
if massage_path in self.pathTypeMap:
|
||
massage_path = self.pathTypeMap[massage_path]
|
||
print(massage_path) # This will output: 循经直推法
|
||
else:
|
||
massage_path=None
|
||
human_input = f"{human_input} 目前按摩穴位:从{self.massage_status_languge['start_pos']}到{self.massage_status_languge['end_pos']} 目前按摩手法:{massage_path} 需要跳过该区域"
|
||
|
||
if operation == 'sftj':
|
||
asyncio.run(self.send_message("massage_plan_start"))
|
||
self.logger.log_info("进入手法推荐模块")
|
||
def model_output(messages, model='qwen2.5-7b-instruct', stream=False, timeout=10):
|
||
try:
|
||
# 调用大模型接口
|
||
completion = self.client.chat.completions.create(
|
||
model=model,
|
||
messages=messages,
|
||
stream=stream,
|
||
timeout=timeout
|
||
)
|
||
return completion
|
||
except Exception as e:
|
||
asyncio.run(self.send_message("JSON_error"))
|
||
print(f"错误信息:{e}")
|
||
return {"error": f"调用大模型时出错:{e}"}
|
||
|
||
|
||
|
||
messages1=[
|
||
{'role': 'system', 'content': self.body_part_prompts},
|
||
{'role': 'user', 'content': f"提问:{human_input}"},
|
||
{'role': 'user', 'content': "请根据用户提问推断出用户想要按摩的部位及按摩头选择"}]
|
||
# completion1=model_output(messages1)
|
||
# print("completion1:",completion1)
|
||
try:
|
||
completion1 = model_output(messages1,model='qwen2.5-72b-instruct',stream=False)
|
||
print(completion1.model_dump_json())
|
||
response_json = json.loads(completion1.model_dump_json())
|
||
content_str = response_json['choices'][0]['message']['content']
|
||
# print("content_str:",content_str)
|
||
json_data = content_str.strip().strip('```json').strip()
|
||
body_and_header = json.loads(json_data)
|
||
self.logger.log_blue(f"body_and_header:{body_and_header}")
|
||
except Exception as e:
|
||
asyncio.run(self.send_message("JSON_error"))
|
||
print("解析JSON格式部位失败")
|
||
return "解析JSON格式部位失败"
|
||
|
||
messages0=[
|
||
{'role': 'system', 'content': "你是中医智能理疗分析师,请从用户输入给出理由为什么需要推荐按摩手法"},
|
||
{'role': 'user', 'content': f"提问:{human_input}"},
|
||
{'role': 'user', 'content': f"中医推荐按摩部位:{body_and_header.get('body_part')}"},
|
||
{'role': 'user', 'content': "请根据用户提问给出需要推荐按摩手法的原因,原因分析请根据推荐部位进行"}]
|
||
try:
|
||
completion0=model_output(messages0,stream=True)
|
||
full_content1 = ''
|
||
for chunk in completion0:
|
||
if chunk.choices:
|
||
result = getattr(chunk.choices[0].delta, "content", None)
|
||
if result:
|
||
full_content1 += result
|
||
instruction = {'message':result,'isReasoning':True}
|
||
requests.post('http://127.0.0.1:5000/ai_respon', data=instruction)
|
||
|
||
except Exception as e:
|
||
print("生成原因失败")
|
||
return "生成原因失败"
|
||
|
||
massage_heads = {
|
||
"指疗通络": ["背部", "肩颈", "腿部", "腰部"],
|
||
"点阵按摩": ["背部", "肩颈", "腰部"],
|
||
"滚滚刺疗": ["背部", "腿部"],
|
||
"全能滚珠": ["背部"],
|
||
"深部热疗": ["背部", "腹部", "腰部"],
|
||
"温砭舒揉": ["背部", "腹部", "腰部"]
|
||
}
|
||
# 假设输入的部位和任务
|
||
body_part = body_and_header.get('body_part')
|
||
task = body_and_header.get('choose_task')
|
||
|
||
# 检查该部位是否能使用对应的按摩头
|
||
if body_part not in massage_heads.get(task, []):
|
||
print(f"{task} 不可以在 {body_part} 上使用。")
|
||
asyncio.run(self.send_message("header_not_use"))
|
||
return f"{task} 不可以在 {body_part} 上使用。"
|
||
|
||
messages2=[
|
||
{'role': 'system', 'content': self.message_retrieval_prompt},
|
||
{'role': 'user', 'content': f"提问:{human_input}"},
|
||
{'role': 'user', 'content': f"部位、按摩头:{body_and_header}"},
|
||
{'role': 'user', 'content': "请根据用户提问及提供的部位、按摩头,生成该部位的个性化中医理疗方案"}]
|
||
try:
|
||
completion2=model_output(messages2,stream=True)
|
||
if isinstance(completion2, dict) and 'error' in completion2:
|
||
print(f"模型调用失败:{completion2['error']}")
|
||
return "无法获取理疗轨迹信息检索后消息"
|
||
|
||
full_content = ''
|
||
for chunk in completion2:
|
||
if chunk.choices:
|
||
result = getattr(chunk.choices[0].delta, "content", None)
|
||
if result:
|
||
full_content += result
|
||
instruction = {'message':result,'isReasoning':False}
|
||
requests.post('http://127.0.0.1:5000/ai_respon', data=instruction)
|
||
asyncio.run(self.send_message("trajectory_generate"))
|
||
# 输出结束后通知:轨迹生成开始
|
||
status_payload = {
|
||
'status': 'start', # 其他值可为 'fail' 或 'done'
|
||
'phase_message': '轨迹生成流程已启动'
|
||
}
|
||
requests.post('http://127.0.0.1:5000/trajectory_status', json=status_payload)
|
||
|
||
except Exception as e:
|
||
asyncio.run(self.send_message("JSON_error"))
|
||
print("推荐方案失败")
|
||
return "推荐方案失败"
|
||
|
||
plan_body_part = body_and_header.get('body_part')
|
||
print("plan_body_part:",plan_body_part)
|
||
if plan_body_part == '肩颈':
|
||
body_and_header['body_part'] = 'shoulder'
|
||
body_part_prompts = self.shoulder_limit_prompts
|
||
elif plan_body_part == '背部':
|
||
body_and_header['body_part'] = 'back'
|
||
body_part_prompts = self.back_limit_prompts
|
||
elif plan_body_part == '腰部':
|
||
body_and_header['body_part'] = 'waist'
|
||
body_part_prompts = self.waist_limit_prompts
|
||
elif plan_body_part == '腹部':
|
||
body_and_header['body_part'] = 'belly'
|
||
body_part_prompts = self.belly_limit_prompts
|
||
elif plan_body_part == '腿部':
|
||
body_and_header['body_part'] = 'leg'
|
||
body_part_prompts = self.leg_limit_prompts
|
||
else:
|
||
print("获取到的部位有误")
|
||
status_payload = {
|
||
'status': 'fail',
|
||
'phase_message': '轨迹生成流程已失败'
|
||
}
|
||
requests.post('http://127.0.0.1:5000/trajectory_status', json=status_payload)
|
||
return "获取部位有误"
|
||
|
||
messages3=[
|
||
{'role': 'system', 'content': body_part_prompts},
|
||
{'role': 'user', 'content': f"提问:{human_input}"},
|
||
{'role': 'user', 'content': f"部位、按摩头:{body_and_header}"},
|
||
{'role': 'user', 'content': "请根据用户提问及提供的部位、按摩头,生成该部位的中医理疗按摩手法轨迹"}]
|
||
|
||
try:
|
||
completion3 = model_output(messages3,model='qwen-plus', stream=False,timeout=60)
|
||
print(completion3.model_dump_json())
|
||
response_json = json.loads(completion3.model_dump_json())
|
||
content_str = response_json['choices'][0]['message']['content']
|
||
# print("content_str:",content_str)
|
||
json_data = content_str.strip().strip('```json').strip()
|
||
massage_plan = json.loads(json_data)
|
||
self.logger.log_blue(f"massage_plan:{massage_plan}")
|
||
except Exception as e:
|
||
status_payload = {
|
||
'status': 'fail',
|
||
'phase_message': '轨迹生成流程已失败'
|
||
}
|
||
requests.post('http://127.0.0.1:5000/trajectory_status', json=status_payload)
|
||
asyncio.run(self.send_message("JSON_error"))
|
||
print("解析JSON格式轨迹失败")
|
||
return "解析JSON格式轨迹失败"
|
||
|
||
try:
|
||
plan_timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
|
||
plan_name = f"{self.task_mapping[massage_plan['choose_task']]}-{self.body_mapping[massage_plan['body_part']]}-{massage_plan['title']}{plan_timestamp}"
|
||
plan_selection = f"按摩头以及部位选择:{self.task_mapping[massage_plan['choose_task']]}-{self.body_mapping[massage_plan['body_part']]}"
|
||
massage_plan['can_delete'] = True
|
||
self.vtxdb.set("massage_plan", plan_name, massage_plan)
|
||
instruction = {'message':f"____\n{plan_selection}\n"}
|
||
requests.post('http://127.0.0.1:5000/ai_respon', data=instruction)
|
||
instruction = {'message':f"____\n按摩方案名称: {plan_name}"}
|
||
requests.post('http://127.0.0.1:5000/ai_respon', data=instruction)
|
||
asyncio.run(self.send_message("massage_plan_finish"))
|
||
status_payload = {
|
||
'status': 'done',
|
||
'phase_message': '轨迹生成流程已完成'
|
||
}
|
||
requests.post('http://127.0.0.1:5000/trajectory_status', json=status_payload)
|
||
except Exception as e:
|
||
status_payload = {
|
||
'status': 'fail',
|
||
'phase_message': '轨迹生成流程已失败'
|
||
}
|
||
requests.post('http://127.0.0.1:5000/trajectory_status', json=status_payload)
|
||
asyncio.run(self.send_message("JSON_error"))
|
||
print(f"检查参数服务器key格式或者没写进参数服务器{e}")
|
||
return "按摩手法记录失败,请重新生成"
|
||
|
||
|
||
# 如果操作为 jtrq、xzsj 或 qthd,则生成模型响应
|
||
if operation in ['jtrq', 'xzsj', 'qthd','lddx', 'wddx', 'dldw', 'pldw', 'cjdx', 'amjd','amdt','ambw','gnjs','cqjm','ylcx','zsdw','jtxw','dayh','tgdw']:
|
||
self.chat_message = [msg for msg in self.chat_message if msg.get('role') != 'user']
|
||
input_prompt = {
|
||
"role": "user",
|
||
"content": human_input
|
||
}
|
||
self.chat_message.append(input_prompt)
|
||
# print("self.chat_message:",self.chat_message)
|
||
if operation=='qthd' or operation == 'jtxw':
|
||
if self.deep_thinking_flag == True and self.search_flag == False:
|
||
response_generator = self.client_deep.chat.completions.create(
|
||
model=self.model['deep_thinking_model'],
|
||
messages=self.chat_message,
|
||
stream=True,
|
||
timeout=10
|
||
)
|
||
elif self.deep_thinking_flag == False and self.search_flag == True:
|
||
user_id = os.uname()[1]
|
||
response_generator = self.client_search.chat_completion(user_id=user_id, query=human_input,callback=self.finished_callback)
|
||
elif self.deep_thinking_flag == True and self.search_flag == True:
|
||
user_id = os.uname()[1]
|
||
response_generator = self.client_search_deep.chat_completion(user_id=user_id, query=human_input,callback=self.finished_callback)
|
||
else:
|
||
response_generator = self.client.chat.completions.create(
|
||
model=self.model['chat_model'],
|
||
messages=self.chat_message,
|
||
stream=True,
|
||
timeout=10
|
||
)
|
||
else:
|
||
response_generator = self.client.chat.completions.create(
|
||
model=self.model['chat_model'],
|
||
messages=self.chat_message,
|
||
stream=True,
|
||
timeout=10
|
||
)
|
||
if operation=='dayh':
|
||
human_input=f"{human_input} 按摩过程调整:时间,加长"
|
||
if operation=='tgdw':
|
||
human_input=f"{human_input} 按摩过程调整:时间,跳过"
|
||
return {
|
||
'chat_message': human_input,
|
||
'response_generator': response_generator
|
||
}
|
||
|
||
if operation == 'jttq':
|
||
input_prompt = {
|
||
"role": "user",
|
||
"content": human_input
|
||
}
|
||
self.weather_message.append(input_prompt)
|
||
if self.deep_thinking_flag == True:
|
||
response_generator = self.client_deep.chat.completions.create(
|
||
model=self.model['deep_thinking_model'],
|
||
messages=self.weather_message,
|
||
stream=True,
|
||
timeout=10
|
||
)
|
||
else:
|
||
response_generator = self.client.chat.completions.create(
|
||
model=self.model['chat_model'],
|
||
messages=self.weather_message,
|
||
stream=True,
|
||
timeout=10
|
||
)
|
||
self.weather_message.pop()
|
||
return {
|
||
'chat_message': "询问天气",
|
||
'response_generator': response_generator
|
||
}
|
||
# 如果操作不在 jtrq、xzsj、qthd 之中,则返回其他操作的结果
|
||
return self.operations.get(operation, self.operations['qthd'])
|
||
|
||
async def send_message(self,data):
|
||
uri = "ws://localhost:8766" # 或替换成目标服务器地址
|
||
try:
|
||
async with websockets.connect(uri) as websocket:
|
||
await websocket.send(data) # 发送消息
|
||
print("消息已发送:", data) # 打印发送的消息
|
||
except websockets.exceptions.ConnectionClosedOK as e:
|
||
# 连接正常关闭的异常处理
|
||
print(f"连接已关闭:{e}")
|
||
except websockets.exceptions.WebSocketException as e:
|
||
# 处理其他 WebSocket 异常
|
||
print(f"WebSocket 错误: {e}")
|
||
except Exception as e:
|
||
# 捕获其他异常
|
||
print(f"发生错误: {e}")
|
||
|
||
def stop_thread(self):
|
||
if hasattr(self, 'thread') and self.thread and self.thread.is_alive():
|
||
self.stop_event.set()
|
||
self.thread.join(timeout=1)
|
||
print("旧线程安全终止")
|
||
|
||
def finished_callback(self,event_type,event_title):
|
||
# if event_type=="node_finished" and event_title=="massage_method_suggestion_llm":
|
||
# self.suggestion_flag=True
|
||
# print("self.suggestion_flag:",self.suggestion_flag)
|
||
# print("执行到播报结束...")
|
||
# if event_type=="node_started" and event_title=="吹水":
|
||
# self.suggestion_flag=False
|
||
# print("self.suggestion_flag:",self.suggestion_flag)
|
||
# print("执行到开始json...")
|
||
|
||
if event_type=="node_started" and event_title=="深度思考轨迹":
|
||
# self.suggestion_deekseek_flag=True
|
||
# print("self.suggestion_deekseek_flag:",self.suggestion_deekseek_flag)
|
||
print("执行到开始深度思考轨迹...")
|
||
# if event_type=="node_finished" and event_title=="深度思考轨迹":
|
||
# self.suggestion_deekseek_flag=False
|
||
# print("self.suggestion_flag:",self.suggestion_deekseek_flag)
|
||
# print("执行到结束深度思考轨迹...")
|
||
|
||
def classify(self, human_input):
|
||
""" 输入语句并与LLM交流,进行一次模型回复,返回结果并统计时间 """
|
||
start_time = time.time() # 开始计时
|
||
self.classify_message = [{'role': 'system', 'content': self.classify_prompts}]
|
||
input_prompt = {
|
||
"role": "user",
|
||
"content": human_input
|
||
}
|
||
self.classify_message.append(input_prompt)
|
||
try:
|
||
# LLM交流并获取回复
|
||
response = self.client.chat.completions.create(
|
||
model=self.model['classify_model1'],
|
||
messages=self.classify_message,
|
||
stream=False,
|
||
stop=["}"],
|
||
timeout=10
|
||
).json()
|
||
# 解析并提取 question_function
|
||
response = json.loads(response)
|
||
question_function = response['choices'][0]['message']['content']
|
||
question_function = question_function + "}"
|
||
question_function = re.findall(r'\{(.*?)\}', question_function)[0]
|
||
|
||
print(f"classifyProcessed question_function: {question_function}")
|
||
|
||
except Exception as e:
|
||
self.logger.log_error(f"Error processing question_function: {e}")
|
||
return "cwsb"
|
||
|
||
total_time = time.time() - start_time
|
||
print(f"Total time for processing: {total_time:.2f} seconds")
|
||
|
||
return question_function
|
||
|
||
def correct_classify(self, human_input):
|
||
# time.sleep(1)
|
||
""" 输入语句并与LLM交流,进行一次模型回复,返回结果并统计时间 """
|
||
start_time = time.time() # 开始计时
|
||
self.classify_message2 = [{'role': 'system', 'content': self.classify_prompts2}]
|
||
input_prompt = {
|
||
"role": "user",
|
||
"content": human_input
|
||
}
|
||
self.classify_message2.append(input_prompt)
|
||
try:
|
||
# LLM交流并获取回复
|
||
response = self.client.chat.completions.create(
|
||
model=self.model['classify_model2'],
|
||
messages=self.classify_message2,
|
||
stream=False,
|
||
stop=["}"],
|
||
timeout=10
|
||
).json()
|
||
# 解析并提取 question_function
|
||
response = json.loads(response)
|
||
question_function = response['choices'][0]['message']['content']
|
||
question_function = question_function + "}"
|
||
question_function = re.findall(r'\{(.*?)\}', question_function)[0]
|
||
|
||
print(f"correct_classifyProcessed question_function: {question_function}")
|
||
|
||
except Exception as e:
|
||
self.logger.log_error(f"Error processing question_function: {e}")
|
||
return "cwsb"
|
||
|
||
total_time = time.time() - start_time
|
||
print(f"Total time for processing: {total_time:.2f} seconds")
|
||
|
||
return question_function
|
||
|
||
def classify_or_correct(self, human_input):
|
||
"""
|
||
生成器:先 yield 第一个返回值,再 yield 全部返回值。
|
||
"""
|
||
|
||
result_queue = queue.Queue()
|
||
final_results = {}
|
||
lock = threading.Lock()
|
||
first_result_sent = threading.Event()
|
||
|
||
def wrapper(func, name):
|
||
def inner():
|
||
try:
|
||
result = func(human_input)
|
||
except Exception as e:
|
||
result = "cwsb"
|
||
self.logger.log_error(f"[{name}] Error: {e}")
|
||
|
||
with lock:
|
||
final_results[name] = result
|
||
if not first_result_sent.is_set():
|
||
result_queue.put((name, result))
|
||
first_result_sent.set()
|
||
return inner
|
||
|
||
# 启动两个线程
|
||
thread1 = threading.Thread(target=wrapper(self.classify, "classify"))
|
||
thread2 = threading.Thread(target=wrapper(self.correct_classify, "correct_classify"))
|
||
thread1.start()
|
||
thread2.start()
|
||
|
||
# yield 第一结果(谁先到谁 yield)
|
||
first_name, first_result = result_queue.get(timeout=12)
|
||
yield {
|
||
"stage": "first",
|
||
"function": first_name,
|
||
"result": first_result
|
||
}
|
||
|
||
# 等两个都完成
|
||
thread1.join()
|
||
thread2.join()
|
||
|
||
# yield 所有结果
|
||
yield {
|
||
"stage": "all",
|
||
"results": final_results
|
||
}
|
||
|
||
def music_keyword_retrieve(self,human_input):
|
||
try:
|
||
self.retrieve_message = [{'role': 'system', 'content': self.retrieve_prompts}]
|
||
input_prompt = {
|
||
"role": "user",
|
||
"content": human_input
|
||
}
|
||
self.retrieve_message.append(input_prompt)
|
||
response = self.client.chat.completions.create(
|
||
model=self.model['retrieve_model'],
|
||
messages=self.retrieve_message,
|
||
stream=False,
|
||
stop=["}"],
|
||
timeout=10
|
||
).json()
|
||
|
||
# 解析并提取 question_function
|
||
response = json.loads(response)
|
||
question_retrieve = response['choices'][0]['message']['content']
|
||
print(question_retrieve)
|
||
return question_retrieve
|
||
except Exception as e:
|
||
self.logger.log_error("获得音乐关键词失败")
|
||
|
||
def get_info(self,endpoint,params=None):
|
||
base_url = 'http://127.0.0.1:5000'
|
||
url = f"{base_url}{endpoint}"
|
||
|
||
try:
|
||
# 发送 GET 请求
|
||
response = requests.get(url, params=params)
|
||
|
||
# 如果请求成功,返回 JSON 数据
|
||
if response.status_code == 200:
|
||
return response.json()
|
||
else:
|
||
print(f"Error: {response.status_code}, {response.text}")
|
||
return None
|
||
except requests.RequestException as e:
|
||
print(f"请求失败: {e}")
|
||
return None
|
||
def error_answer(self, **kwargs):
|
||
human_input = kwargs.get('human_input')
|
||
self.chat_message1 = [{'role': 'system', 'content': self.chat_prompts1}]
|
||
input_prompt = {
|
||
"role": "user",
|
||
"content": human_input
|
||
}
|
||
self.chat_message1.append(input_prompt)
|
||
response_generator = self.client.chat.completions.create(
|
||
model=self.model['chat_model1'],
|
||
messages=self.chat_message1,
|
||
stream=True,
|
||
timeout=10
|
||
)
|
||
return {
|
||
'response_generator': response_generator
|
||
}
|
||
|
||
|
||
def chat(self,human_input):
|
||
self.logger.log_yellow("-----------------------------------------------------")
|
||
try:
|
||
return_dict = {}
|
||
gen = self.classify_or_correct(human_input)
|
||
# 第一阶段:抢先拿到第一个结果
|
||
first_finish = next(gen)
|
||
self.logger.log_info(f"先完成返回:{first_finish}")
|
||
question_function=first_finish['result']
|
||
first_source = first_finish['function']
|
||
|
||
# print("提取函数后值:",question_function)
|
||
self.logger.log_blue(f"提取函数后值:{question_function}")
|
||
valid_functions = [
|
||
'ksam', 'tzam', 'ldzd', 'ldjx', 'jtrq', 'xzsj', 'jttq', 'jtxw',
|
||
'dlzd', 'dljx', 'pljk', 'pljd', 'cjlz', 'cjlj', 'zszj', 'zsjx', 'gbzx', 'tsgd','jdgd','dayh','tgdw',
|
||
'lddx', 'wddx', 'dldw', 'pldw', 'cjdx', 'amjd', 'amdt', 'ambw', 'gnjs', 'cqjm','ylcx','yltj', 'zsdw',
|
||
'wdzj', 'wdjx', 'qthd', 'cwsb', 'bfyy', 'tzbf', 'ssyy', 'xsyy', 'sftj']
|
||
if question_function not in valid_functions:
|
||
question_function = 'cwsb'
|
||
|
||
return_dict.update({'question_function': question_function})
|
||
# print("return_dict:",return_dict)
|
||
time4=time.time()
|
||
# self.logger.log_blue(f"一层大模型分类时间:{time4-time3}")
|
||
kwargs = {'human_input': human_input}
|
||
if question_function != 'cwsb':
|
||
time5=time.time()
|
||
chat_response = self.handle_request(question_function, **kwargs)
|
||
print("chat_response:",chat_response)
|
||
if question_function=='bfyy':
|
||
question_retrieve=self.music_keyword_retrieve(human_input)
|
||
chat_response=f"音乐播放调整:{question_retrieve}"
|
||
time6=time.time()
|
||
self.logger.log_blue(f"二层时间:{time6-time5}")
|
||
# 处理返回的多种信息
|
||
if isinstance(chat_response, dict):
|
||
chat_message = chat_response.get('chat_message', '')
|
||
response_generator = chat_response.get('response_generator', '')
|
||
return_dict.update({'chat_message': chat_message, 'response_generator': response_generator})
|
||
else:
|
||
return_dict.update({'chat_message': chat_response})
|
||
else:
|
||
return_dict.update({'chat_message': '我没有理解您的意思,请重新提问。'})
|
||
self.logger.log_yellow(f"改正前的return_dict:{return_dict}")
|
||
yield return_dict
|
||
if first_source == "classify":
|
||
try:
|
||
all_finish = next(gen)
|
||
classify_result = all_finish['results'].get('classify')
|
||
correct_result = all_finish['results'].get('correct_classify')
|
||
self.logger.log_info(f"最后完成返回:{all_finish}")
|
||
|
||
if classify_result != correct_result:
|
||
self.logger.log_info("两个模型结果不一致,使用correct_classify更新返回")
|
||
question_function=correct_result
|
||
# 重新请求正确分类的处理
|
||
chat_response = self.error_answer(**kwargs)
|
||
print("修正后chat_response:",chat_response)
|
||
# 更新 return_dict
|
||
if correct_result == 'bfyy':
|
||
question_retrieve = self.music_keyword_retrieve(human_input)
|
||
chat_response = f"音乐播放调整:{question_retrieve}"
|
||
|
||
if isinstance(chat_response, dict):
|
||
if question_function in ['ksam','tzam','ldzd','ldjx','wdzj','wdjx','dlzd','dljx','pljk','pljd','cjlz','cjlj','zszj','zsjx','gbzx','dayh','tgdw','jtrq','xzsj','jttq','jtxw','qthd','bfyy','tzbf','ssyy','xsyy']:
|
||
return_dict.update({
|
||
'question_function': question_function,
|
||
'chat_message': self.operations1.get(question_function),
|
||
'response_generator': chat_response.get('response_generator', ''),
|
||
'source': 'correct_classify'
|
||
})
|
||
else:
|
||
return_dict.update({
|
||
'question_function': question_function,
|
||
'chat_message': kwargs.get('human_input'),
|
||
'response_generator': chat_response.get('response_generator', ''),
|
||
'source': 'correct_classify'
|
||
})
|
||
else:
|
||
return_dict.update({'chat_message': chat_response})
|
||
|
||
# 第二次 yield:更新后的 return_dict
|
||
self.logger.log_yellow(f"改正后的return_dict:{return_dict}")
|
||
yield return_dict
|
||
|
||
except Exception as e:
|
||
self.logger.log_error(f"二阶段处理出错: {e}")
|
||
except Exception as e:
|
||
self.logger.log_error(f"二层模型问题: {e}")
|
||
|
||
|
||
if __name__ == '__main__':
|
||
dashscopeclient = DashscopeClient()
|
||
while True:
|
||
# 从终端输入获取 human_input
|
||
human_input = input("请输入问题:")
|
||
|
||
if human_input.lower() in ['exit', 'quit', 'q']:
|
||
print("程序结束")
|
||
break
|
||
|
||
time1 = time.time()
|
||
return_gen = dashscopeclient.chat(human_input)
|
||
|
||
# print(return_dict)
|
||
return_dict=next(return_gen)
|
||
print("return_dict:",return_dict)
|
||
def handle_all_finish():
|
||
try:
|
||
update_dict = next(return_gen)
|
||
print("update_dict:",update_dict)
|
||
if 'source' in update_dict and update_dict['source'] is not None:
|
||
flag1=True
|
||
except StopIteration:
|
||
pass # 生成器已结束
|
||
except Exception as e:
|
||
print("处理最终结果时出错:",e)
|
||
threading.Thread(target=handle_all_finish).start()
|
||
######流式大模型输出的时输出方式########
|
||
if isinstance(return_dict, str):
|
||
pass
|
||
else:
|
||
response_generator = return_dict.get('response_generator', '')
|
||
punctuation_marks_regex = r'[。,,;!?]'
|
||
full_content = ''
|
||
last_index = 0
|
||
full_reasoning_content = ''
|
||
for response in response_generator:
|
||
# if dashscopeclient.suggestion_flag==False:
|
||
# print(response)
|
||
if response.choices:
|
||
reasoning_result = getattr(response.choices[0].delta, "reasoning_content", None)
|
||
# print("reasoning_content:",reasoning_result)
|
||
result = getattr(response.choices[0].delta, "content", None)
|
||
# print("content:",reasoning_result)
|
||
if reasoning_result:
|
||
full_reasoning_content += reasoning_result
|
||
print(reasoning_result)
|
||
|
||
if result:
|
||
full_content += result
|
||
punctuation_indices = [m.start() for m in re.finditer(punctuation_marks_regex, full_content)]
|
||
for index in punctuation_indices:
|
||
if index > last_index:
|
||
accumulated_text = full_content[last_index:index+1]
|
||
last_index = index + 1
|
||
if accumulated_text.strip():
|
||
print(accumulated_text.strip())
|
||
# else:
|
||
# print("结束了")
|
||
# break
|
||
###################################
|
||
|
||
time2 = time.time()
|
||
print(time2-time1) |