1314 lines
74 KiB
Python
Executable File
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

import argparse
from tools.yaml_operator import read_yaml
import sys
import os
import threading
import signal
import time
import re
import websockets
import asyncio
import requests
import atexit
from Speech_processor import Speechprocessor
from Speech_processor import SpeechRecognizer
from Speech_processor import SpeechAudio
from Hotword_awaker import Awaker
from LLM import DashscopeClient
# from LLM import DifyClient
from tools.log import CustomLogger
from pathlib import Path
import random
import subprocess
import datetime
import shutil
import json
from ask_summarize import extract_data_from_log
import socket
import os
import copy
current_file_path = os.path.abspath(__file__)
Language_Path = os.path.dirname(os.path.dirname(os.path.dirname(current_file_path)))
MassageRobot_Dobot_Path = os.path.dirname(Language_Path)
print("MassageRobot_Dobot_Path:",MassageRobot_Dobot_Path)
sys.path.append(MassageRobot_Dobot_Path)
from VortXDB.client import VTXClient
# 自定义输出类,将输出同时发送到终端和日志文件
class MultiWriter:
def __init__(self, *writers):
self.writers = writers
def write(self, message):
for writer in self.writers:
writer.write(message)
writer.flush() # 确保及时输出
def flush(self):
for writer in self.writers:
writer.flush()
# 重定向标准输出和标准错误到文件和终端
def redirect_output(log_file):
# 保存当前的标准输出和标准错误(终端输出)
original_stdout_fd = sys.stdout.fileno()
original_stderr_fd = sys.stderr.fileno()
# 打开日志文件用于写入
log_fd = os.open(log_file, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
# 保留原始的标准输出和错误输出,创建文件对象
original_stdout = os.fdopen(os.dup(original_stdout_fd), 'w')
original_stderr = os.fdopen(os.dup(original_stderr_fd), 'w')
# 将 stdout 和 stderr 分别指向日志文件
os.dup2(log_fd, original_stdout_fd)
os.dup2(log_fd, original_stderr_fd)
# 将输出同时发送到终端和日志文件
sys.stdout = MultiWriter(original_stdout, os.fdopen(log_fd, 'w'))
sys.stderr = MultiWriter(original_stderr, os.fdopen(log_fd, 'w'))
# 获取当前时间并格式化为适合文件名的字符串(例如:'2024-11-13_154530'
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S")
# 目标目录和目标文件路径(将日志文件复制到新的位置)
target_dir = '/home/jsfb/jsfb_ws/LanguageLog/'
os.makedirs(target_dir, exist_ok=True) # 确保目标目录存在
ask_summarize_dir = '/home/jsfb/jsfb_ws/LanguageLog/ask_summarize'
os.makedirs(target_dir, exist_ok=True) # 确保目标目录存在
os.makedirs(ask_summarize_dir, exist_ok=True) # 确保目标目录存在
source_log_file = '../log/Language.log'
# 如果源文件不存在,则创建一个空文件
if not os.path.exists(source_log_file):
with open(source_log_file, 'w'):
pass # 创建空文件
# 定义复制的目标文件路径(带时间戳)
target_file = os.path.join(target_dir, f'Language_{timestamp}.log')
# 复制文件到新位置
shutil.copy('../log/Language.log', target_file)
try:
log_file_path = target_file # 替换为实际的日志文件路径
output_file_path = os.path.join(ask_summarize_dir, f'Language_{timestamp}.txt')
extract_data_from_log(log_file_path, output_file_path)
except Exception as e:
print("保存上次聊天记录出错了")
log_file = '../log/Language.log'
redirect_output(log_file)
# 获取资源文件的正确路径
def get_resource_path(relative_path):
"""获取资源文件的路径打包后的应用会把资源放到_MEIPASS目录"""
try:
# PyInstaller会把资源放到_MEIPASS路径下
base_path = sys._MEIPASS
except Exception:
# 没有打包时,使用脚本的当前目录
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
def parse_args():
parser = argparse.ArgumentParser(description='Speech processor')
parser.add_argument('--keyword_path', type=str, default=get_resource_path('keyword.yaml'))
args = parser.parse_args()
return args
class RoboStorm():
def __init__(self,args):
vtxdb = VTXClient()
self.logger = CustomLogger()
self.Recognize_initialization_successful = True
self.Synthesizer_initialization_successful = True
self.LLM_initialization_successful = True
self.Awaken_initialization_successful = True
self.ui_speech_interrupted=False
self.ui_speech_end_flag = False
self.ui_speech_path=None
self.ui_speech_cancel = False
self.ksam_flag=False
self.player_flag=False
self.wait_1_second_flag=False
self.massage_status=None
self.speech_audio=SpeechAudio()
max = 10 # 设置最大重试次数
retry_time = 2 # 设置每次重试之间的延迟时间(秒)
keyword_config: dict = read_yaml(args.keyword_path)
retries1 = 0
max_retries1 = 10 # 最大重试次数
retry_delay1 = 5 # 每次重试之间的延迟时间(秒)
while retries1 < max_retries1:
try:
self.recognizer = SpeechRecognizer()
self.recognizer.test_token()
if self.recognizer.token_success==True:
break
else:
self.logger.log_error(f"参数服务器token_HW有误尝试自动获取{retries1}")
self.recognizer.get_Speech_Recognize_token()
self.recognizer.token=self.recognizer.get_Speech_Recognize_token()
vtxdb.set("robot_config", "Language.Speech_processor.huaweiyun_recognize_config.token_HW",self.recognizer.token)
self.logger.log_info("设置参数服务器成功")
self.Recognize_initialization_successful=True
break
except Exception as e:
# self.logger.logger(f"语音识别初始化问题{e}")
retries1 += 1
self.logger.log_error(f"语音初始化失败: {e}, 尝试重试 {retries1}/{max_retries1}")
if retries1 < max_retries1:
instruction = {'message': f"语音初始化失败,正在重试第 {retries1} 次..."}
self.send_instruction(instruction, 'on_message')
time.sleep(retry_delay1) # 等待 retry_delay 秒后再重试
else:
instruction = {'message': "无法聊天,请检查网络连接!"}
self.send_instruction(instruction, 'on_message')
# sys.exit(1) # 视情况决定是否退出程序
self.Recognize_initialization_successful = False
self.logger.log_error("达到最大重试次数,初始化失败,退出循环")
break # 达到最大重试次数后,退出循环
try:
self.hotword_detector = Awaker()
self.Awaken_initialization_successful=True
except Exception as e:
self.logger.log_error(f"{e},Please cheack path or app_id")
instruction = {'message':"唤醒失败!!!"}
self.send_instruction(instruction,'on_message')
# sys.exit(1)
self.Awaken_initialization_successful = False
return
retries = 0
max_retries = 10 # 最大重试次数
retry_delay = 5 # 每次重试之间的延迟时间(秒)
while retries < max_retries:
try:
self.speech_processor = Speechprocessor()
self.Synthesizer_initialization_successful = True
self.logger.log_info("语音初始化成功")
break # 初始化成功后,跳出循环
except Exception as e:
retries += 1
self.logger.log_error(f"语音初始化失败: {e}, 尝试重试 {retries}/{max_retries}")
if retries < max_retries:
instruction = {'message': f"语音初始化失败,正在重试第 {retries} 次..."}
self.send_instruction(instruction, 'on_message')
time.sleep(retry_delay) # 等待 retry_delay 秒后再重试
else:
instruction = {'message': "无法聊天,请检查网络连接!"}
self.send_instruction(instruction, 'on_message')
# sys.exit(1) # 视情况决定是否退出程序
self.Synthesizer_initialization_successful = False
self.logger.log_error("达到最大重试次数,初始化失败,退出循环")
break # 达到最大重试次数后,退出循环
self.speech_thread_stop_event = threading.Event()
self.correct_stop_event = threading.Event()
try:
self.llm = DashscopeClient()
self.LLM_initialization_successful=True
# self.llm_dify = DifyClient()
except Exception as e:
self.logger.log_error(f"大模型初始化{e}")
instruction = {'message':"请检查网络连接!!!"}
self.send_instruction(instruction,'on_message')
# sys.exit(1)
self.LLM_initialization_successful = False
return
self.keyword = keyword_config
self.hotword_interrupted = False # 检测是否退出主程序
self.running_task = None
self.running_zone = None
self.audio_map = {
# 'jttq': 'pre_mp3/weather.mp3',
# 'jtxw': 'pre_mp3/news.mp3',
'ksam': 'pre_mp3/not_begin',
'tzam': 'pre_mp3/amstop',
'ldzd': 'pre_mp3/move_deeper/', # 指向文件夹路径
'ldjx': 'pre_mp3/lift_up/', # 指向文件夹路径
'wdzj': 'pre_mp3/tem_up/', # 指向文件夹路径
'wdjx': 'pre_mp3/tem_down/', # 指向文件夹路径
'dlzd': 'pre_mp3/gear_up/', # 指向文件夹路径
'dljx': 'pre_mp3/gear_down/', # 指向文件夹路径
'pljk': 'pre_mp3/shockwave_frequency_up/', # 指向文件夹路径
'pljd': 'pre_mp3/shockwave_frequency_down/', # 指向文件夹路径
'cjlz': 'pre_mp3/shockwave_press_up/', # 指向文件夹路径
'cjlj': 'pre_mp3/shockwave_press_down/', # 指向文件夹路径
'tsgd':'pre_mp3/increase_high/',
'jdgd':'pre_mp3/decrease_high/',
'zszj': 'pre_mp3/stone_speed_up/',
'zsjx': 'pre_mp3/stone_speed_down/',
'gbzx': 'pre_mp3/stone_direction_change/',
'bfyy': '',
'tzbf': '',
'ssyy': '',
'xsyy': '',
'yltj': ''
}
self.speech_thread = threading.Thread(target=self.hello())
self.speech_thread.start()
self.start_awaken()
self.stop_event = asyncio.Event() # 用于停止事件
self.loop = None
self.server_thread = None
self.client_thread = None
self.correct_thread = None
def start_awaken(self):
base_path = os.path.dirname(os.path.abspath(__file__))
script_path = os.path.join(base_path, "Hotword_awaker", "run.sh")
script_dir = os.path.join(base_path, "Hotword_awaker")
log_path = os.path.join(base_path, "Hotword_awaker", "awaken.log") # 指定日志文件路径
print("script_dir:", script_dir)
if not os.path.exists(script_path):
raise FileNotFoundError(f"找不到脚本文件:{script_path}")
# 打开日志文件,用于写入脚本输出
with open(log_path, "w") as log_file:
# 使用 subprocess 后台运行脚本并将输出重定向到日志文件
self.process = subprocess.Popen(
["/bin/bash", script_path],
cwd=script_dir,
stdout=log_file, # 重定向标准输出到日志文件
stderr=log_file # 重定向标准错误到日志文件
)
print(f"Script started, output redirected to {log_path}")
def get_random_mp3_file(self,folder_path):
"""
从指定文件夹中随机选择一个 .mp3 文件并返回其完整路径。
:param folder_path: 文件夹路径
:return: 选定的 .mp3 文件的完整路径,如果文件夹不存在或没有 .mp3 文件则返回 None
"""
if os.path.isdir(folder_path) and any(f.endswith('.mp3') for f in os.listdir(folder_path)):
mp3_files = [f for f in os.listdir(folder_path) if f.endswith('.mp3')]
mp3_file = random.choice(mp3_files)
full_path = os.path.join(folder_path, mp3_file)
return full_path
else:
return None
def hello(self):
self.speech_audio.player.play(get_resource_path('pre_mp3/' + 'hello.mp3'),remove_file=False)
self.speech_audio.player.play(get_resource_path('pre_mp3/' + 'successfully_start.mp3'),remove_file=False)
def detected_callback(self):
'''检测到热词就打断语音识别过程并重新运行'''
# self.logger.log_info("检测到小悠小悠、小悠师傅")
self.send_instruction("get_status",'massage')
if self.speech_thread.is_alive():
self.logger.log_error(f"self.ui_speech_interrupted状态{self.ui_speech_interrupted}")
self.logger.log_error(f"self.ui_speech_end_flag状态{self.ui_speech_end_flag}")
self.logger.log_error(f"self.ui_speech_cancel状态{self.ui_speech_cancel}")
print("正在播放中打断")
self.speech_thread_stop_event.set()
if self.ui_speech_interrupted == False:
self.logger.log_info("检测到小悠小悠、小悠师傅")
folder_path = get_resource_path(f'pre_mp3/我在/')
if os.path.isdir(folder_path) and any(f.endswith('.mp3') for f in os.listdir(folder_path)):
mp3_files = [f for f in os.listdir(folder_path) if f.endswith('.mp3')]
mp3_files = random.choice(mp3_files)
path=folder_path+mp3_files
self.send_instruction({'path': path}, 'ui_lip_sync')
self.speech_audio.player.play(path,remove_file=False,priority=10)
print('folder_path:',folder_path)
else:
self.logger.log_info("UI打断")
folder_path = get_resource_path(f'pre_mp3/empty_audio/')
if os.path.isdir(folder_path) and any(f.endswith('.mp3') for f in os.listdir(folder_path)):
mp3_files = [f for f in os.listdir(folder_path) if f.endswith('.mp3')]
mp3_files = random.choice(mp3_files)
path=folder_path+mp3_files
self.speech_audio.player.play(path,remove_file=False,priority=10)
print('folder_path:',folder_path)
self.speech_thread.join()
print("78787878")
self.speech_thread_stop_event.clear()
self.speech_thread = threading.Thread(target=self.run_speech)
self.speech_thread.start()
else:
self.logger.log_error(f"self.ui_speech_interrupted状态{self.ui_speech_interrupted}")
self.logger.log_error(f"self.ui_speech_end_flag状态{self.ui_speech_end_flag}")
self.logger.log_error(f"self.ui_speech_cancel状态{self.ui_speech_cancel}")
if self.correct_thread is not None and self.correct_thread.is_alive():
self.speech_thread_stop_event.set()
print("correct_thread还活着")
else:
print("111111111111111")
print("没有在播放中打断")
if self.ui_speech_interrupted == False:
folder_path = get_resource_path(f'pre_mp3/我在/')
if os.path.isdir(folder_path) and any(f.endswith('.mp3') for f in os.listdir(folder_path)):
mp3_files = [f for f in os.listdir(folder_path) if f.endswith('.mp3')]
mp3_files = random.choice(mp3_files)
path=folder_path+mp3_files
self.send_instruction({'path': path}, 'ui_lip_sync')
self.speech_audio.player.play(path,remove_file=False)
else:
folder_path = get_resource_path(f'pre_mp3/empty_audio/')
if os.path.isdir(folder_path) and any(f.endswith('.mp3') for f in os.listdir(folder_path)):
mp3_files = [f for f in os.listdir(folder_path) if f.endswith('.mp3')]
mp3_files = random.choice(mp3_files)
path=folder_path+mp3_files
self.send_instruction({'path': path}, 'ui_lip_sync')
self.speech_audio.player.play(path,remove_file=False,priority=10)
print('folder_path:',folder_path)
self.speech_thread_stop_event.clear()
self.speech_thread = threading.Thread(target=self.run_speech)
self.speech_thread.start()
def signal_handler(self, signal, frame):
'''捕获Ctrl+C信号停止热词检测'''
self.stop_event.set()
if self.loop is not None:
self.loop.call_soon_threadsafe(self.loop.stop) # 停止事件循环
self.hotword_detector.interrupted = True
sys.exit(0)
def cleanup(self):
'''程序退出时的清理工作'''
print("程序退出,正在清理资源...")
if self.server_thread is not None:
self.stop_event.set()
self.server_thread.join()
if self.client_thread is not None:
self.client_thread.join()
print("资源清理完毕")
def run_hotword_detect(self):
'''启动热词检测线程'''
self.logger.log_info('Listening... please say wake-up word.')
self.hotword_detector.start_server(self.detected_callback)
def run_speech(self):
self.logger.log_blue("进入run_speech")
'''进行一次语音处理'''
while not self.speech_thread_stop_event.is_set():
self.logger.log_blue("run_speech stop没set")
self.logger.log_blue(f"run_speech这里ui_speech_interrupted{self.ui_speech_interrupted}")
try:
if self.ui_speech_interrupted == True:
print("111111111111111111")
while not self.ui_speech_end_flag:
self.logger.log_blue("self.ui_speech_end_flag还在True,退不出去了")
time.sleep(0.1)
if self.speech_thread_stop_event.is_set():
return
if self.ui_speech_cancel:
self.ui_speech_interrupted = False
self.ui_speech_end_flag = False
self.send_message(message="UI_not_recognizing")
return
print("1222222222222222222")
text = self.recognizer.speech_recognize_UI(self.ui_speech_path)
instruction = {'message': text}
self.send_instruction(instruction, 'user_input')
self.ui_speech_interrupted = False
self.ui_speech_end_flag = False
self.send_message(message="UI_not_recognizing")
else:
print("33333")
if self.speech_thread_stop_event.is_set():
return
text, if_timeout, remaining_time = self.recognizer.speech_recognize(timeout=7)
if self.speech_thread_stop_event.is_set():
return
# 如果识别到的文本为空且还有剩余时间,则继续识别
while not text.strip() and remaining_time > 0:
text, if_timeout, remaining_time = self.recognizer.speech_recognize(timeout=remaining_time)
if self.speech_thread_stop_event.is_set():
return
instruction = {'message': text}
self.send_instruction(instruction, 'user_input')
if self.speech_thread_stop_event.is_set():
return
if if_timeout:
path = get_resource_path('pre_mp3/timeout.mp3')
self.send_instruction({'path': path}, 'ui_lip_sync')
self.speech_audio.player.play(path, remove_file=False)
return
if self.speech_thread_stop_event.is_set():
return
self.logger.log_info("Recognized text: " + text)
self.logger.log_error("--------------------------")
text_status = self.run_text(text)
if self.speech_thread_stop_event.is_set():
return
self.logger.log_blue(f"text_status: {text_status}")
if text_status == 2:
return
if text_status==1:
return
except Exception as e:
instruction = {'message':"失败,请检查网络连接"}
self.logger.log_error(f"失败{e}")
self.send_instruction(instruction,'on_error')
return
def run_text(self,text):
'''进行一次文本处理返回0代表正常处理返回1代表用户打断'''
# 获取机械臂现在的状态给LLM进行处理包括力度、电流这些
try:
response = requests.get('http://127.0.0.1:5000/get_deep_thought')
self.logger.log_info("获取思考状态 ")
print(response.json()) # 输出返回的深度思考状态
self.llm.deep_thinking_flag=response.json().get('deep_thought_active')
self.logger.log_info(f"获取到的深度思考状态为:{self.llm.deep_thinking_flag}")
except Exception as e:
self.logger.log_error("获取不到深度思考状态")
try:
response = requests.get('http://127.0.0.1:5000/get_ai_search')
self.logger.log_info("获取思考状态 ")
print(response.json()) # 输出返回的深度思考状态
self.llm.search_flag=response.json().get('ai_search_active')
self.logger.log_info(f"获取到的联网搜索状态为:{self.llm.search_flag}")
except Exception as e:
self.logger.log_error("获取不到联网搜索状态")
try:
self.send_instruction("get_status",'massage')
self.llm.massage_status_languge = {key: self.massage_status[key] for key in ['task_time','progress','force','press','frequency','temperature','gear','body_part','current_head','shake','speed','direction','start_pos','end_pos','massage_path']}
except Exception as e:
self.logger.log_error(f"机械臂还未开启,无法获取状态:{e}")
if not self.speech_thread_stop_event.is_set():
if text.strip(): # 识别到有效文本则正常运行,否则语音提醒
# 语料库匹配
for key, value in self.keyword.items():
if key in text:
mp3_folder, instruction = value
path = get_resource_path(f'pre_mp3/{mp3_folder}')
if key == "开始按摩":
if self.ksam_flag==False:
try:
self.send_instruction("get_status",'massage')
if self.massage_status['massage_service_started'] == True and self.massage_status['is_massaging'] == False:
path = self.get_random_mp3_file(path)
# 发送按摩和UI指令
self.send_instruction(instruction, 'massage')
self.send_instruction({'path': path}, '_lip_sync')
# 播放选定的mp3文件
self.speech_audio.player.play(path, remove_file=False)
return 1
else:
self.logger.log_error("机械臂状态不对,请确保机械臂已连接且不在按摩状态")
path = get_resource_path('pre_mp3/connect_and_status.mp3')
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
return 2
except Exception as e:
self.logger.log_error(f"获取机械臂状态失败:{e}")
path = get_resource_path('pre_mp3/connect_and_status.mp3')
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
return 2
else:
if self.massage_status['massage_service_started'] == True and self.massage_status['is_massaging'] == False:
path = get_resource_path('pre_mp3/begin.mp3')
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
else:
self.logger.log_error("请确保机械臂已连接且不在按摩中")
path = get_resource_path('pre_mp3/connect_and_status.mp3')
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
return 2
else:
try:
self.send_instruction("get_status",'massage')
if self.massage_status['massage_service_started'] == True:
path = self.get_random_mp3_file(path)
# 发送按摩和UI指令
self.send_instruction(instruction, 'massage')
self.send_instruction({'path': path}, 'ui_lip_sync')
# 播放选定的mp3文件
self.speech_audio.player.play(path, remove_file=False)
return 1
else:
self.logger.log_error("机械臂状态不对,请确保机械臂已连接且正在按摩")
path = get_resource_path('pre_mp3/connect_and_status.mp3')
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
return 2
except Exception as e:
self.logger.log_error(f"获取机械臂状态失败:{e}")
path = get_resource_path('pre_mp3/connect_and_status.mp3')
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
return 2
# 调用大模型
# 先获取一下机械臂以及按摩头状态
try:
self.send_instruction("get_status",'massage')
except Exception as e:
self.logger.log_info(f"无法获取机械臂状态:{e}")
punctuation_marks_regex = r'[。,]'
# return_info = self.llm.chat(text)
if self.speech_thread_stop_event.is_set():
return 1 # 立即退出
return_info = None # 存储 `llm.chat(text)` 结果
update_info = None
return_gen = None
chat_thread_done = threading.Event() # 标志线程是否完成
def chat_task():
"""运行 LLM 处理任务"""
nonlocal return_info
nonlocal return_gen
return_gen = self.llm.chat(text)
return_info=next(return_gen)
print("first return_info:",return_info)
chat_thread_done.set() # 标记任务完成
print("完成")
# **启动 `self.llm.chat(text)` 线程**
chat_thread = threading.Thread(target=chat_task, daemon=True)
chat_thread.start()
timeout = 60 # 最长等待时间(秒)
start_time = time.time()
self.logger.log_info("现在时间是:",start_time)
while not chat_thread_done.is_set(): # 等待线程完成
if self.speech_thread_stop_event.is_set():
self.logger.log_error("Stop event detected! Exiting immediately.")
return 1 # 立即退出
if time.time() - start_time > timeout:
self.logger.log_error("LLM chat request timed out. Exiting.")
try:
path = get_resource_path(f'pre_mp3/check_network.mp3')
self.send_instruction({'path': path}, 'ui_lip_sync')
threading.Thread(target=self.speech_audio.player.play, args=(path, False)).start()
except Exception as e:
self.logger.log_error("请检查message和mp3是否对应上")
return 2 # 超时返回
time.sleep(0.1) # 避免高频率 CPU 轮询
def handle_all_finish():
nonlocal update_info
if return_info is None:
print("return_info 为 None无法进行深拷贝")
else:
try:
return_info_copy = return_info.copy() # 浅拷贝
if 'response_generator' in return_info_copy:
del return_info_copy['response_generator']
return_info1 = copy.deepcopy(return_info_copy)
except Exception as e:
print(f"深拷贝失败: {e}")
print("return_info1:",return_info1)
try:
update_info = next(return_gen)
if return_info1['question_function']!=update_info['question_function']:
print("前后两次分类结果不一样")
self.wait_1_second_flag=True
correct_response_generator=update_info['response_generator']
correct_chat_message=update_info['chat_message']
self.command_process(update_info)
print("correct_chat_message:",correct_chat_message)
full_content = ''
last_index = 0
full_reasoning_content = ''
self.play_recored_file_thread = None
if self.massage_status['massage_service_started'] == True and self.massage_status['is_massaging'] == True:
self.logger.log_info(f"correct_response_generator{correct_response_generator}")
for response in correct_response_generator:
if response.choices:
reasoning_result = getattr(response.choices[0].delta, "reasoning_content", None)
result = getattr(response.choices[0].delta, "content", None)
if reasoning_result:
full_reasoning_content += reasoning_result
instruction = {'message':reasoning_result,'isReasoning':True}
self.send_instruction(instruction,'ui_ai_respon')
if self.speech_thread_stop_event.is_set():
self.logger.log_error("未在播报打断成功----------------------------------------------------------")
self.speech_thread_stop_event.clear()
return 1
if result:
# print("response:",response)
full_content += result
punctuation_indices = [m.start() for m in re.finditer(punctuation_marks_regex, full_content)] # 标点符号分隔断句位置
for index in punctuation_indices:
if self.speech_thread_stop_event.is_set():
self.logger.log_error("correct_current收到speech_thread_stop_event结束标志退出")
self.speech_thread_stop_event.clear()
return 1
if index > last_index:
accumulated_text = full_content[last_index:index+1]
last_index = index + 1
if accumulated_text.strip():
print("accumulated_text:",accumulated_text)
instruction = {'message':accumulated_text.strip()}
self.send_instruction(instruction,'ui_ai_respon')
filename = get_resource_path(self.speech_processor.synthesizer.speech_synthesize(accumulated_text.strip()))
if self.play_recored_file_thread is not None and self.play_recored_file_thread.is_alive(): # 用户打断则等待本句语音播放完成再返回
self.play_recored_file_thread.join()
if self.speech_thread_stop_event.is_set():
self.logger.log_error("正在播报打断成功----------------------------------------------------------")
self.speech_thread_stop_event.clear()
return 1
# time.sleep(4)
instruction = {'path':filename}
self.send_instruction(instruction,'ui_lip_sync')
if self.speech_thread_stop_event.is_set():
self.logger.log_error("未在播报打断成功----------------------------------------------------------")
self.speech_thread_stop_event.clear()
return 1
# 等待 player_flag 为 False 再进行播放
self.logger.log_info(f"self.player_flag:{self.player_flag}")
while self.player_flag:
self.logger.log_info("还在player_flag")
if self.speech_thread_stop_event.is_set():
self.logger.log_error("由于wait_1_second_flag没有置为True导致player_flag没有变为Flase所以再次唤醒就退出卡住的这里----------------------------------------------------------")
self.speech_thread_stop_event.clear()
return 1
time.sleep(0.1)
self.play_recored_file_thread = threading.Thread(target=self.speech_audio.player.play, args=(filename,True,10))
self.play_recored_file_thread.start()
else:
print("机械臂未连接或者不再按摩")
self.logger.log_yellow("-----------------------------------------------------")
self.wait_1_second_flag=False
else:
print("分类结果一致")
if self.play_recored_file_thread is not None and self.play_recored_file_thread.is_alive():
self.logger.log_blue("222222222222222222222222222222222")
self.play_recored_file_thread.join()
self.logger.log_blue("33333333333333333333")
self.logger.log_blue("11111111111111111111111111111111111")
print("return_info1/1:",return_info1)
print("update_dict:",update_info)
print("获取所有分类结果结束")
self.logger.log_blue("11111111111111111111111111111111111")
if self.speech_thread_stop_event.is_set():
self.logger.log_blue("handle_all_finishm没清除speech_thread_stop_event")
self.speech_thread_stop_event.clear()
except StopIteration as s:
print(f"生成器已结束了{s}")
pass # 生成器已结束
except Exception as e:
print("处理最终结果时出错:",e)
self.correct_thread=threading.Thread(target=handle_all_finish)
self.correct_thread.start()
if self.speech_thread_stop_event.is_set():
self.logger.log_blue(f"handle_all_finis后停止speech_thread_stop_event没更新")
self.speech_thread_stop_event.clear()
return 1 # 终止
self.logger.log_blue(return_info)
if isinstance(return_info, str):
path = get_resource_path('pre_mp3/error_input.mp3')
self.send_instruction({'path': path}, 'ui_lip_sync')
self.speech_audio.player.play(path,remove_file=False)
return 2
self.command_process(return_info)
question_function=return_info['question_function']
if question_function in self.audio_map:
if question_function in ['ldzd', 'ldjx', 'wdzj', 'wdjx','dlzd','dljx','pljk','pljd','cjlz','cjlj','zszj','zsjx','gbzx','tsgd','jdgd']:
try:
if self.massage_status['massage_service_started'] == True:
# if question_function=='dayh':
# self.logger.log_error("多按一会成功")
# path = get_resource_path('pre_mp3/massage_time_longer.mp3')
# threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
# return 1
if self.massage_status['massage_head'] == 'thermotherapy_head' and question_function in ['cjlz','cjlj','zszj','zsjx','gbzx']:
self.logger.log_error("获取到是热疗头,无该控制指令")
path = get_resource_path('pre_mp3/thermotherapy_not_adjust.mp3')
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
return 2
elif self.massage_status['massage_head'] == 'shockwave_head' and question_function in ['wdzj', 'wdjx','dlzd','dljx','zszj','zsjx','gbzx']:
self.logger.log_error("获取到是冲击波,无该控制指令")
path = get_resource_path('pre_mp3/shockwave_not_adjust.mp3')
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
return 2
elif self.massage_status['massage_head'] == 'ball_head' and question_function in ['wdzj', 'wdjx','dlzd','dljx','pljk','pljd','cjlz','cjlj','zszj','zsjx','gbzx']:
self.logger.log_error("获取到是滚珠,无该控制指令")
path = get_resource_path('pre_mp3/ball_not_adjust.mp3')
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
return 2
elif self.massage_status['massage_head'] == 'finger_head' and question_function in ['wdzj', 'wdjx','dlzd','dljx','pljk','pljd','cjlz','cjlj','zszj','zsjx','gbzx']:
self.logger.log_error("获取到是一指禅,无该控制指令")
path = get_resource_path('pre_mp3/finger_not_adjust.mp3')
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
return 2
elif self.massage_status['massage_head'] == 'roller_head' and question_function in ['wdzj', 'wdjx','dlzd','dljx','pljk','pljd','cjlz','cjlj','zszj','zsjx','gbzx']:
self.logger.log_error("获取到是狼牙滚珠按摩头,无该控制指令")
path = get_resource_path('pre_mp3/roller_not_adjust.mp3')
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
return 2
elif self.massage_status['massage_head'] == 'stone_head' and question_function in ['dlzd','dljx','pljk','pljd','cjlz','cjlj']:
self.logger.log_error("获取到是砭石按摩头,无该控制指令")
path = get_resource_path('pre_mp3/stone_not_adjust.mp3')
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
return 2
elif self.massage_status['massage_head'] == 'heat_head' and question_function in ['dlzd','dljx','pljk','pljd','cjlz','cjlj','zszj','zsjx','gbzx']:
self.logger.log_error("获取到是能量热疗按摩头,无该控制指令")
path = get_resource_path('pre_mp3/heat_not_adjust.mp3')
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
return 2
elif self.massage_status['massage_head'] == 'ion_head' and question_function in ['dlzd','dljx','pljk','pljd','cjlz','cjlj','ldzd','ldjx','zszj','zsjx','gbzx']:
self.logger.log_error("获取到是ion按摩头无该控制指令")
path = get_resource_path('pre_mp3/ion_not_adjust.mp3')
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
return 2
elif self.massage_status['massage_head'] == 'spheres_head' and question_function in ['wdzj', 'wdjx','dlzd','dljx','pljk','pljd','cjlz','cjlj','zszj','zsjx','gbzx']:
self.logger.log_error("获取到是天球滚捏按摩头,无该控制指令")
path = get_resource_path('pre_mp3/spheres_not_adjust.mp3')
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
return 2
else:
folder_path = get_resource_path(self.audio_map[question_function])
random_audio_path = self.get_random_mp3_file(folder_path)
# random_audio_path = self.play_random_audio_from_folder(folder_path)
if random_audio_path:
self.player_flag=True
self.send_instruction({'path': random_audio_path}, 'ui_lip_sync')
self.speech_audio.player.play(random_audio_path, remove_file=False)\
# 测试一下卡死的时候这里self.wait_1_second_flag的状态万一还没改变他是会卡死的
self.logger.log_error(f"self.wait_1_second_flag:{self.wait_1_second_flag}")
if self.wait_1_second_flag==True:
time.sleep(1)
# while True:
# print("测试有没有更新player_flag 111111")
self.player_flag=False
self.logger.log_blue("已完成标志位变为flase")
return 1
else:
self.logger.log_error("请确保机械臂已连接且正在按摩中")
path = get_resource_path('pre_mp3/connect_and_status.mp3')
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
return 2
except Exception as e:
self.logger.log_error(f"请确保机械臂已连接且正在按摩中{e}1")
path = get_resource_path('pre_mp3/connect_and_status.mp3')
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
return 2
elif question_function=='yltj':
path = get_resource_path('pre_mp3/music_control')
path = self.get_random_mp3_file(path)
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
return 1
elif question_function == 'bfyy':
music_chat_message=return_info['chat_message']
retrieve_message = music_chat_message.split("音乐播放调整:")[-1]
self.logger.log_blue(f"retrieve_message:{retrieve_message}")
if 'None' in retrieve_message:
path = get_resource_path('pre_mp3/music_control')
path = self.get_random_mp3_file(path)
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
self.send_instruction(target='resume_music')
else:
response=self.send_instruction(instruction=retrieve_message,target='search')
if response.status_code == 200:
# 获取返回的数据JSON 格式)
data = response.json()
if isinstance(data, list) and not data:
self.logger.log_yellow("音乐为空列表,无法播放")
path = get_resource_path('pre_mp3/music_vip')
path = self.get_random_mp3_file(path)
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
else:
path = get_resource_path('pre_mp3/music_control')
path = self.get_random_mp3_file(path)
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
music_result = data[0]
print("Response received:", data)
print("music_result:",music_result)
music_result={"music_name":music_result[0],
"singer_name":music_result[1],
"songmid":music_result[2],
"album_img_url":(
f"http://y.gtimg.cn/music/photo_new/T002R180x180M000{music_result[3]}.jpg"
if music_result[3]
else ""
)}
# music_result=json.dumps(music_result, ensure_ascii=False, indent=4)
print("music_result:",music_result)
print(type(music_result))
# instruction=music_result
self.send_instruction(instruction=music_result,target='song_clicked')
else:
print(f"Request failed with status code: {response.status_code}")
return 1
elif question_function in ['tzbf', 'ssyy', 'xsyy']:
path = get_resource_path('pre_mp3/music_control')
path = self.get_random_mp3_file(path)
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
if question_function == 'tzbf':
self.send_instruction(target='pause_music')
elif question_function == 'ssyy':
self.send_instruction(target='play_last_song')
elif question_function == 'xsyy':
self.send_instruction(target='play_next_song')
# print("音乐")
return 1
elif question_function=='ksam':
if self.ksam_flag==False:
try:
if self.massage_status['massage_service_started'] == True and self.massage_status['is_massaging'] == False:
path = get_resource_path(self.audio_map[question_function])
path = self.get_random_mp3_file(path)
self.send_instruction({'path': path}, 'ui_lip_sync')
self.speech_audio.player.play(path, remove_file=False)
return 1
else:
self.logger.log_error("请确保机械臂已连接且不在按摩中")
path = get_resource_path('pre_mp3/connect_and_status.mp3')
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
return 2
except Exception as e:
self.logger.log_error(f"开始按摩失败:{e}")
path = get_resource_path('pre_mp3/connect_and_status.mp3')
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
return 2
else:
if self.massage_status['massage_service_started'] == True and self.massage_status['is_massaging'] == False:
path = get_resource_path('pre_mp3/begin.mp3')
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
else:
self.logger.log_error("请确保机械臂已连接且不在按摩中")
path = get_resource_path('pre_mp3/connect_and_status.mp3')
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
return 2
elif question_function=='tzam':
try:
if self.massage_status['massage_service_started'] == True and self.massage_status['is_massaging'] == True:
path = get_resource_path(self.audio_map[question_function])
path = self.get_random_mp3_file(path)
self.send_instruction({'path': path}, 'ui_lip_sync')
self.speech_audio.player.play(path, remove_file=False)
return 1
else:
self.logger.log_error("请确保机械臂已连接且正在按摩中")
path = get_resource_path('pre_mp3/connect_and_status.mp3')
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
return 2
except Exception as e:
self.logger.log_error(f"停止按摩失败:{e}")
path = get_resource_path('pre_mp3/connect_and_status.mp3')
threading.Thread(target=self.speech_audio.player.play, args=(path,False)).start()
return 2
else:
path = get_resource_path(self.audio_map[question_function])
path = self.get_random_mp3_file(path)
self.send_instruction({'path': path}, 'ui_lip_sync')
self.speech_audio.player.play(path, remove_file=False)
return 1
else:
response_generator = return_info['response_generator']
full_content = ''
last_index = 0
full_reasoning_content = ''
self.play_recored_file_thread = None
for response in response_generator:
if self.speech_thread_stop_event.is_set():
return 1
if response.choices:
reasoning_result = getattr(response.choices[0].delta, "reasoning_content", None)
result = getattr(response.choices[0].delta, "content", None)
if reasoning_result:
full_reasoning_content += reasoning_result
instruction = {'message':reasoning_result,'isReasoning':True}
self.send_instruction(instruction,'ui_ai_respon')
if self.speech_thread_stop_event.is_set():
self.logger.log_error("未在播报打断成功----------------------------------------------------------")
return 1
if result:
# print("response:",response)
full_content += result
punctuation_indices = [m.start() for m in re.finditer(punctuation_marks_regex, full_content)] # 标点符号分隔断句位置
for index in punctuation_indices:
if index > last_index:
accumulated_text = full_content[last_index:index+1]
last_index = index + 1
if accumulated_text.strip():
print("accumulated_text:",accumulated_text)
instruction = {'message':accumulated_text.strip()}
self.send_instruction(instruction,'ui_ai_respon')
filename = get_resource_path(self.speech_processor.synthesizer.speech_synthesize(accumulated_text.strip()))
if self.play_recored_file_thread is not None and self.play_recored_file_thread.is_alive(): # 用户打断则等待本句语音播放完成再返回
self.play_recored_file_thread.join()
if self.speech_thread_stop_event.is_set():
self.logger.log_error("正在播报打断成功----------------------------------------------------------")
return 1
# time.sleep(4)
instruction = {'path':filename}
self.send_instruction(instruction,'ui_lip_sync')
if self.speech_thread_stop_event.is_set():
self.logger.log_error("未在播报打断成功----------------------------------------------------------")
return 1
self.play_recored_file_thread = threading.Thread(target=self.speech_audio.player.play, args=(filename,True,8))
self.play_recored_file_thread.start()
self.logger.log_yellow("-----------------------------------------------------")
if self.play_recored_file_thread is not None and self.play_recored_file_thread.is_alive():
self.play_recored_file_thread.join()
self.llm.chat_message.append({'role': 'assistant', 'content': full_content})
return 1
else:
self.logger.log_info("empty input")
# self.speech_audio.player.play('pre_mp3/empty_input.mp3',remove_file=False)
return 0
else:
return 1
def command_process(self,info):
'''处理大模型指令并发送给硬件模块'''
command = info['chat_message']
instruction = ''
if '停止' in command: instruction = 'stop'
elif '加长' in command: instruction = 'insert_queue'
elif '跳过' in command: instruction = 'skip_queue'
else:
if '变重' in command: instruction = 'adjust:force:increase'
if '变轻' in command: instruction = 'adjust:force:decrease'
if '升温' in command: instruction = 'adjust:temperature:increase'
if '降温' in command: instruction = 'adjust:temperature:decrease'
if '增电' in command: instruction = 'adjust:gear:increase'
if '减电' in command: instruction = 'adjust:gear:decrease'
if '增频' in command:
if self.massage_status['massage_head'] == 'thermotherapy_head':
instruction = 'adjust:shake:increase'
elif self.massage_status['massage_head'] == 'shockwave_head':
instruction = 'adjust:frequency:increase'
else:
instruction = 'adjust:frequency:increase'
if '减频' in command:
if self.massage_status['massage_head'] == 'thermotherapy_head':
instruction = 'adjust:shake:decrease'
elif self.massage_status['massage_head'] == 'shockwave_head':
instruction = 'adjust:frequency:decrease'
else:
instruction = 'adjust:frequency:increase'
if '增冲' in command: instruction = 'adjust:press:increase'
if '减冲' in command: instruction = 'adjust:press:decrease'
if '增速' in command: instruction = 'adjust:speed:increase'
if '减速' in command: instruction = 'adjust:speed:decrease'
if '减速' in command: instruction = 'adjust:speed:decrease'
if '换向' in command: instruction = 'adjust:direction:null:null'
if '升高' in command: instruction = 'adjust:high:increase:low'
if '降高' in command: instruction = 'adjust:high:decrease:low'
if '一点点' in command: instruction+= ':low'
elif '较大幅度' in command: instruction+= ':high'
elif '一点' in command: instruction+= ':mid'
if instruction != '':
self.send_instruction(instruction,'massage')
def start_client(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.loop.run_forever()
async def client(self,instruction,uri="ws://localhost:8765"):
async with websockets.connect(uri) as websocket:
self.logger.log_info("begin to send")
await websocket.send(instruction)
self.logger.log_info("Message " + instruction + " sent to the server")
def send_instruction(self,instruction = None,target='massage'):
'''todo:之后改成三个进程用ros通信,每个进程都将自己的指令pub出去需要的自己sub就行'''
try:
if target == 'massage':
uri = "ws://localhost:8765"
asyncio.run_coroutine_threadsafe(self.client(instruction,uri),self.loop)
self.logger.log_info("Instruction " + str(instruction) + " sent to massage")
elif target == 'ui':
requests.post('http://127.0.0.1:5000/update_massage_status', data=instruction)
self.logger.log_info("Instruction " + str(instruction) + " sent to ui")
elif target == 'ui_lip_sync':
requests.post('http://127.0.0.1:5000/lip_sync', data=instruction)
self.logger.log_info("Instruction " + str(instruction) + " sent to ui_lip_sync")
elif target == 'ui_ai_respon':
requests.post('http://127.0.0.1:5000/ai_respon', data=instruction)
# self.logger.log_info("Instruction " + str(instruction) + " sent to ui_ai_respon")
elif target == 'user_input':
requests.post('http://127.0.0.1:5000/user_input', data=instruction)
self.logger.log_info("Instruction " + str(instruction) + " sent to ui user_input")
elif target == 'on_message':
requests.post('http://127.0.0.1:5000/on_message', data=instruction)
self.logger.log_info("Instruction " + str(instruction) + " sent to ui on_message")
# 音乐请求
elif target == 'resume_music':
requests.post('http://127.0.0.1:5000/resume_music')
self.logger.log_info(" sent to ui resume_music")
elif target == 'search':
response=requests.post('http://127.0.0.1:5000/search',json={'term': instruction})
self.logger.log_info(" sent to ui search")
return response
elif target == 'song_clicked':
requests.post('http://127.0.0.1:5000/song_clicked',json=instruction)
self.logger.log_info(" sent to ui song_clicked")
elif target == 'pause_music':
requests.post('http://127.0.0.1:5000/pause_music')
self.logger.log_info(" sent to ui pause_music")
elif target == 'play_last_song':
requests.post('http://127.0.0.1:5000/play_last_song')
self.logger.log_info(" sent to ui play_last_song")
elif target == 'play_next_song':
requests.post('http://127.0.0.1:5000/play_next_song')
self.logger.log_info(" sent to ui play_next_song")
except Exception as e:
self.logger.log_info(f"请求失败: {e}")
def send_message(self,host="127.0.0.1", port=8767, message="UI_recognizing"):
"""
向指定的主机和端口发送消息。
:param host: 目标主机地址 (默认: 127.0.0.1)
:param port: 目标端口 (默认: 8767)
:param message: 要发送的消息内容 (默认: "WAKEUP_TRIGGER")
"""
# 创建一个客户端 socket
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# 连接到服务器
client_socket.connect((host, port))
# 发送消息
client_socket.sendall(message.encode('utf-8'))
print(f"Message sent: {message}")
except BrokenPipeError:
print(f"Error: Broken pipe, could not send message.")
except Exception as e:
print(f"Error sending message: {e}")
finally:
# 关闭连接
client_socket.close()
def start_server(self):
async def server():
# 使用 asyncio.Future() 使服务器保持运行
async with websockets.serve(self.websocket_handler, "0.0.0.0", 8766):
print("WebSocket server started on ws://0.0.0.0:8766")
await asyncio.Future() # 保持服务器运行,直到手动停止
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
try:
self.loop.run_until_complete(server()) # 启动 WebSocket 服务器
except asyncio.CancelledError:
pass
finally:
print("WebSocket server closed")
self.loop.close()
async def websocket_handler(self, websocket, path):
async for message in websocket:
self.logger.log_info(f"Received: {message}")
if 'user_input' in message:
user_input = message.split(':')[1]
self.logger.log_info(user_input)
if self.speech_thread.is_alive():
self.speech_thread_stop_event.set()
self.speech_thread.join()
self.speech_thread_stop_event.clear()
self.speech_thread = threading.Thread(target=self.run_text,args=(user_input,))
self.speech_thread.start()
elif 'running_zone' in message:
running_zone = message.split(':')[1]
self.logger.log_blue(f"Running zone: {running_zone}")
if self.running_zone != running_zone:
self.running_zone = running_zone
filename = re.sub(r'(_left|_right)', '', running_zone)
folder_path = get_resource_path(f'pre_mp3/{filename}')
full_path = self.get_random_mp3_file(folder_path)
self.send_instruction({'path': full_path}, 'ui_lip_sync')
threading.Thread(target=self.speech_audio.player.play, args=(full_path,False, 5)).start()
elif 'speech_audio_start' in message:
print("监听到按住识别开始")
self.ui_speech_interrupted=True
self.ui_speech_cancel=False
self.ui_speech_end_flag = False
self.send_message()
# self.detected_callback()
self.callback_thread = threading.Thread(target=self.detected_callback)
self.callback_thread.start()
elif 'speech_audio_stop' in message:
self.send_message(message="UI_not_recognizing")
print("监听到按住识别结束")
self.ui_speech_end_flag = True
self.ui_speech_interrupted=False
_,self.ui_speech_path = message.split(":", 1)
print("self.ui_speech_path",self.ui_speech_path)
elif 'speech_audio_cancel' in message:
self.ui_speech_interrupted=False
self.ui_speech_end_flag = True
self.ui_speech_cancel = True
self.send_message(message="UI_not_recognizing")
print("UI取消语音")
elif 'change_awaken' in message:
new_awaken_word = message.split(':')[1]
print(new_awaken_word)
print(type(new_awaken_word))
file_path = file_path = os.path.join('Hotword_awaker', 'resource', 'keyword-nhxd.txt')
prompts_path = os.path.join('LLM', 'config', 'chat_prompts_new.txt')
# 打开文件并处理
with open(prompts_path, 'r+', encoding='utf-8') as file:
# 读取文件内容
content = file.read()
# 使用正则表达式匹配并替换 '你的名字:悠悠' 中的名字部分
updated_content = re.sub(r'你的名字:.*?。', f'你的名字:{new_awaken_word}', content)
# 将文件指针移到文件开头
file.seek(0)
# 写入更新后的内容
file.write(updated_content)
file.truncate()
print("替换完成!")
# 打开文件并处理
with open(file_path, 'r+', encoding='utf-8') as file:
# 读取文件内容
content = file.read()
# 使用正则表达式匹配 `任意字符;nCM:300;` 并替换
updated_content = re.sub(r'.*?;nCM:30;', f"{new_awaken_word};nCM:30;", content)
# 将文件指针移到文件开头
file.seek(0)
# 写入更新后的内容
file.write(updated_content)
file.truncate()
print("替换完成!")
# 打开文件并进行内容替换
cpp_file_path = file_path = os.path.join('Hotword_awaker', 'ivw_demo.cpp')
with open(cpp_file_path, 'r+', encoding='utf-8') as file:
# 读取文件内容
content = file.read()
# 替换 `temp.find("悠悠")` 中的关键词
content = re.sub(r'temp\.find\(".*?"\) != string::npos', f'temp.find("{new_awaken_word}") != string::npos', content)
# 替换 `printf("----悠悠,拦截----");` 中的关键词
content = re.sub(r'printf\("----.*?,拦截----"\);', f'printf("----{new_awaken_word},拦截----");', content)
# 将修改后的内容写回文件
file.seek(0)
file.write(content)
file.truncate()
print("C++ 文件修改完成!")
try:
# 使用 subprocess 执行命令
process = subprocess.Popen(
['/bin/sudo', '-S', 'systemctl', 'restart', 'language'], # 命令
stdin=subprocess.PIPE, # 管道传递密码
stdout=subprocess.PIPE, # 捕获输出
stderr=subprocess.PIPE, # 捕获错误
text=True # 以文本模式传递
)
# 写入密码到 stdin
stdout, stderr = process.communicate(input='jsfb\n')
# 打印输出(可选)
print(stdout)
if stderr:
print("Error:", stderr)
print("服务重启完成!")
except Exception as e:
print("执行命令时出错:", e)
elif 'massage_status' in message:
self.massage_status = eval(message[message.find("{"):message.rfind("}")+1])
# print(massage_status)
elif 'massage_plan_finish' in message:
path = get_resource_path(f'pre_mp3/massage_plan_finish.mp3')
self.send_instruction({'path': path}, 'ui_lip_sync')
self.speech_audio.player.play(file_path=path,remove_file=False)
# threading.Thread(target=self.speech_audio.player.play, args=(path, False)).start()
else:
try:
path = get_resource_path(f'pre_mp3/{message}.mp3')
self.send_instruction({'path': path}, 'ui_lip_sync')
threading.Thread(target=self.speech_audio.player.play, args=(path, False)).start()
except Exception as e:
self.logger.log_error("请检查message和mp3是否对应上")
await websocket.send(f"success")
def run(self):
'''启动服务器和客户端的线程'''
self.server_thread = threading.Thread(target=self.start_server)
self.server_thread.start()
self.client_thread = threading.Thread(target=self.start_client)
self.client_thread.start()
if __name__ == "__main__":
args = parse_args()
robostorm = RoboStorm(args)
# print(robostorm.llm_port_config)
# 捕获SIGINT和SIGTERM信号
signal.signal(signal.SIGINT, robostorm.signal_handler) # 捕获 Ctrl+C
signal.signal(signal.SIGTERM, robostorm.signal_handler) # 捕获 systemctl stop
# threading.Thread(target=robostorm.start_server).start()
# threading.Thread(target=robostorm.start_client).start()
# 使用atexit在程序退出时清理
atexit.register(robostorm.cleanup)
# 启动服务
robostorm.run()
if robostorm.Recognize_initialization_successful and robostorm.Synthesizer_initialization_successful and robostorm.Awaken_initialization_successful and robostorm.LLM_initialization_successful:
print("1234567")
threading.Thread(target=robostorm.run_hotword_detect).start()
# 阻止主线程退出
robostorm.server_thread.join()
robostorm.client_thread.join()