390 lines
19 KiB
Python
390 lines
19 KiB
Python
import logging,json
|
||
from datetime import datetime
|
||
from flask import Blueprint, request, Response,session
|
||
from app.services.zhipu_service import ZhipuService
|
||
from app.services.zhipu_alltool_service import ZhipuAlltoolService
|
||
from app.services.zhipu_file_service import ZhipuFileService
|
||
from app.services.zhipu_kb_service import ZhipuKbService
|
||
from app.services.openai_service import OpenaiService
|
||
from app.utils.prompt_repository import PromptRepository # Add this import
|
||
from app.utils.sessions import init_session
|
||
import os
|
||
import re
|
||
|
||
CONFIG_FILE = 'llm_service_config.json'
|
||
DOC_LINKS_FILE = 'doc_links.json'
|
||
|
||
def get_current_service():
|
||
if os.path.exists(CONFIG_FILE):
|
||
with open(CONFIG_FILE, 'r') as f:
|
||
config = json.load(f)
|
||
return config.get('llm_service', 'zhipu')
|
||
return 'zhipu' # Default to zhipu if file doesn't exist
|
||
|
||
def set_current_service(service):
|
||
config = {'llm_service': service}
|
||
with open(CONFIG_FILE, 'w') as f:
|
||
json.dump(config, f)
|
||
|
||
def get_doc_links():
|
||
if os.path.exists(DOC_LINKS_FILE):
|
||
with open(DOC_LINKS_FILE, 'r') as f:
|
||
return json.load(f).get('documents', [])
|
||
return []
|
||
|
||
|
||
|
||
zhipu_controller_v2 = Blueprint('zhipu_controller_v2', __name__)
|
||
|
||
zhipu_service = ZhipuService()
|
||
openai_service = OpenaiService()
|
||
zhipu_alltool_service = ZhipuAlltoolService()
|
||
zhipu_file_service = ZhipuFileService()
|
||
zhipu_kb_service = ZhipuKbService()
|
||
|
||
response_headers = {'Content-Type': 'text/event-stream',
|
||
'Cache-Control': 'no-cache',
|
||
'X-Accel-Buffering': 'no'}
|
||
|
||
logger = logging.getLogger(__name__)
|
||
|
||
def format_chunk(chunk, follow_up, follow_up_after):
|
||
return {
|
||
"content": chunk,
|
||
"follow_up": follow_up,
|
||
"follow_up_after": follow_up_after
|
||
}
|
||
|
||
@zhipu_controller_v2.route('/zhipu/retrive/stream', methods=['POST'])
|
||
def retrive_stream():
|
||
data = request.json
|
||
message = data.get('message', '')
|
||
knowledge_id = data.get('knowledge_id', '')
|
||
prompt_template = data.get('prompt_template', '')
|
||
logger.info(f'/zhipu/retrive/stream v2: {message}')
|
||
|
||
classify_rule = "只有当输入明确表示需要搜索互联网时,才返回web_search"
|
||
classification_result_str = zhipu_alltool_service.func_call_classify_with_voting(message, additional_desc=classify_rule)
|
||
print(f'classification_result: {classification_result_str}')
|
||
# classification_result = json.loads(classification_result_str)
|
||
classification_result = classification_result_str
|
||
|
||
if classification_result.get('category') == 'web_search':
|
||
logger.info(f'question classify: web_search')
|
||
def event_stream_websearch_sse():
|
||
for chunk in zhipu_alltool_service.web_search_sse(message):
|
||
if chunk:
|
||
chunk_out = format_chunk(chunk, None, None)
|
||
print(f'chunk_out: {chunk_out}')
|
||
yield json.dumps(chunk_out) + '\n'
|
||
yield json.dumps(format_chunk("", None, None)) + '\n'
|
||
return Response(event_stream_websearch_sse(),mimetype='text/event-stream', headers=response_headers)
|
||
|
||
elif classification_result.get('category')== 'retrive_knowledge':
|
||
logger.info(f'question classify: retrive_knowledge')
|
||
message = message.replace("我", "我(徐春峰)")
|
||
|
||
ask_for_project = zhipu_alltool_service.func_call_yes_or_no_with_voting(message, "输入问题是否在查询负责的项目信息")
|
||
# ask_for_project = json.loads(ask_for_project)
|
||
if ask_for_project.get('answer') == 'yes':
|
||
message += " ,用markdown表格形式输出,输出字段:客户名称, 商机名称, Sales stage, 预估 ACV, 预计签单时间"
|
||
else:
|
||
ask_for_project = zhipu_alltool_service.func_call_yes_or_no_with_voting(message, "输入问题是否在询问项目的销售阶段或销售信息")
|
||
# ask_for_project = json.loads(ask_for_project)
|
||
if ask_for_project.get('answer') == 'yes':
|
||
message += " ,用markdown表格形式输出,输出字段:客户名称, 商机名称, Sales stage, 预测类型,Timing 风险, 预估 ACV, 预计签单时间"
|
||
|
||
|
||
def event_stream_retrive():
|
||
accumulated_result = ""
|
||
for chunk in zhipu_kb_service.retrive_sse(message, knowledge_id, system_prompt="你是一个销售助理,语言对话请以第一人称你我进行"):
|
||
if chunk:
|
||
accumulated_result += chunk
|
||
chunk_out = format_chunk(chunk, None, None)
|
||
yield json.dumps(chunk_out) + '\n'
|
||
yield json.dumps(format_chunk("", None, None)) + '\n'
|
||
logger.info(f'accumulated_result: {accumulated_result}')
|
||
return Response(event_stream_retrive(), mimetype='text/event-stream', headers=response_headers)
|
||
|
||
elif classification_result.get('category')== 'generate_report':
|
||
def event_stream_generate_report():
|
||
# Here is the hack information
|
||
# 1. 获取到当前的日期
|
||
# 2. append 项目信息
|
||
# contain_project_info = zhipu_alltool_service.func_call_yes_or_no(message, "是否包含项目信息")
|
||
# logger.info(f'contain_project_info: {contain_project_info}')
|
||
# contain_project_info = json.loads(contain_project_info)
|
||
# if contain_project_info.get('answer') == 'yes':
|
||
# logger.info(f'contain_project_info: {contain_project_info}')
|
||
# pass
|
||
# else:
|
||
# return format_chunk("请指定生成日报的项目信息",None,None)
|
||
prompt_date = datetime.now().strftime("%Y-%m-%d")
|
||
prompt_report_template = PromptRepository().get_prompt("report_template")
|
||
prompt_report_title = f"根据用户提问中\"\"\" {message} \"\"\" 中提到的项目信息 在知识库中查找该项目的销售日志。如果销售日志中缺乏模板中的要点(时间,参与人,事件,获得信息,信息来源,项目进展描述)信息,则该要点内容留空,不要填充信息 日报模板: \"\"\" {prompt_report_template} \"\"\"。输出: 日志报告"
|
||
generated_report = ""
|
||
for chunk in zhipu_kb_service.retrive_sse(prompt_report_title + message, knowledge_id, None):
|
||
if chunk:
|
||
print(chunk)
|
||
generated_report += chunk
|
||
chunk_out = format_chunk(chunk, None, None)
|
||
yield json.dumps(chunk_out) + '\n'
|
||
logger.info(generated_report)
|
||
absense_info = ''
|
||
if '视睿电子' in message or '联特科技' in message :
|
||
absense_info = "项目进展描述"
|
||
elif '深圳麦克韦尔' in message:
|
||
absense_info = "信息来源及项目进展描述"
|
||
elif '芝麻地网科' in message:
|
||
absense_info = "获得信息及项目进展描述"
|
||
yield json.dumps(format_chunk("", f"日报缺乏{absense_info}信息,请补充", f"请补充日报缺乏的{absense_info}信息")) + '\n'
|
||
|
||
return Response(event_stream_generate_report(), mimetype='text/event-stream', headers=response_headers)
|
||
elif classification_result.get('category')== 'update_report':
|
||
# 1. submit report
|
||
# submit_result = zhipu_file_service.submit_file(prefix="销售日志",project_name="数字电网项目", file_content=message)
|
||
|
||
#2. 重新生成日报
|
||
def event_stream_generate_report_updated():
|
||
# Here is the hack information
|
||
# 1. 获取到当前的日期
|
||
# 2. append 项目信息
|
||
|
||
prompt_date = datetime.now().strftime("%Y-%m-%d")
|
||
prompt_report_template = PromptRepository().get_prompt("report_template")
|
||
prompt_report_title = f"根据用户提问中\"\"\" {message} \"\"\" 中提到的项目信息 在知识库中找该项目的销售日志并结合用户提供的新的日志信息 \"\"\"{message} \"\"\"生成日报。如果销售日志中缺乏模板中的要点(时间,参与人,事件,获得信息,信息来源,项目进展描述)信息,则该要点内容留空,不要填充信息 日报模板: \"\"\" {prompt_report_template} \"\"\"。输出: 日志报告"
|
||
generated_report = ""
|
||
for chunk in zhipu_kb_service.retrive_sse(prompt_report_title + message, knowledge_id, None):
|
||
if chunk:
|
||
print(chunk)
|
||
generated_report += chunk
|
||
chunk_out = format_chunk(chunk, None, None)
|
||
yield json.dumps(chunk_out) + '\n'
|
||
logger.info(generated_report)
|
||
# yield json.dumps(format_chunk("", None, None)) + '\n'
|
||
return Response(event_stream_generate_report_updated(), mimetype='text/event-stream', headers=response_headers)
|
||
elif classification_result.get('category')== 'clear_report':
|
||
deleted_files = zhipu_file_service.delete_file_by_prefix(prefix="销售日志", project_name="数字电网项目")
|
||
return format_chunk(deleted_files, None, None)
|
||
else:
|
||
return format_chunk( "输入意图判断不明,请明确意图", None, None)
|
||
|
||
|
||
|
||
@zhipu_controller_v2.route('/zhipu/analysis/stream', methods=['POST'])
|
||
def analysis_stream():
|
||
init_session()
|
||
data = request.json
|
||
message = data.get('message', '')
|
||
knowledge_id = data.get('knowledge_id', '')
|
||
message = message.replace("我", "我(徐春峰)")
|
||
|
||
if 'zhipu' in message.lower() or '智谱' in message:
|
||
logger.info(f'switch to zhipu service, save to session')
|
||
session['llm_service'] = 'zhipu'
|
||
set_current_service('zhipu')
|
||
return format_chunk("切换到智谱AI服务", None, None)
|
||
if 'openai' in message.lower() or 'openai' in message:
|
||
logger.info(f'switch to openai service, save to session')
|
||
session['llm_service'] = 'openai'
|
||
set_current_service('openai')
|
||
return format_chunk("切换到openai服务", None, None)
|
||
# 默认使用智谱AI服务
|
||
llm_service = zhipu_service
|
||
logger.info(f'llm_service: {session["llm_service"]}')
|
||
# current_service = session.get('llm_service', 'zhipu') # Default to 'zhipu' if not set
|
||
current_service = get_current_service()
|
||
|
||
if current_service == 'openai':
|
||
logger.info('Using OpenAI service')
|
||
llm_service = openai_service
|
||
else:
|
||
logger.info('Using Zhipu service')
|
||
llm_service = zhipu_service
|
||
|
||
logger.info(f'/zhipu/analysis/stream v2: {message}')
|
||
|
||
intent_categories =["analyze_sales","provide_sales_update_info"]
|
||
|
||
classification_result_str = zhipu_alltool_service.func_call_classify_with_voting(message, intent_categories)
|
||
logger.info(f'classification_result: {classification_result_str}')
|
||
classification_result = classification_result_str
|
||
# classification_result = json.loads(classification_result_str)
|
||
if '更新' in message and '我负责' in message and '项目' in message and '最新' in message:
|
||
classification_result = {"category":"provide_sales_update_info"}
|
||
|
||
# if 'openai' in message.lower():
|
||
# logger.info(f'switch to openai service')
|
||
# llm_service = openai_service
|
||
# message = message.replace("openai", "")
|
||
|
||
|
||
additional_business_info = ""
|
||
if classification_result.get('category') == 'analyze_sales':
|
||
logger.info(f'question classify: analyze_sales')
|
||
# do analyze sales as before
|
||
pass
|
||
elif classification_result.get('category') == 'provide_sales_update_info':
|
||
logger.info(f'question classify: provide_sales_update_info')
|
||
contain_project_info = zhipu_alltool_service.func_call_yes_or_no_with_voting(message, "是否包含项目信息")
|
||
logger.info(f'contain_project_info: {contain_project_info}')
|
||
# contain_project_info = json.loads(contain_project_info)
|
||
if contain_project_info.get('answer') == 'yes':
|
||
additional_business_info = message
|
||
else:
|
||
return format_chunk( "请在补充信息中包含项目信息", None, None)
|
||
else:
|
||
return format_chunk( "输入意图判断不明,请明确意图", None, None)
|
||
|
||
|
||
# 获取business info
|
||
prompt_get_business_info = f"""
|
||
请根据用户提供的如下信息,查找相关的 '当前详细状态及已完成工作','Sales stage' 信息,并返回给用户:
|
||
{message}
|
||
"""
|
||
business_info = zhipu_kb_service.retrive(prompt_get_business_info, knowledge_id, None)
|
||
logger.info(f'business_info: {business_info}')
|
||
|
||
analysis_rule = PromptRepository().get_prompt('sales_analysis')
|
||
print(f'analysis_rule: {analysis_rule}')
|
||
|
||
# 根据当前详细状态及Close节奏,以及Sales stage,给出分析
|
||
prompt_analysis = f"""
|
||
请根据查询到的上述商机信息:
|
||
{business_info}
|
||
"""
|
||
|
||
if additional_business_info and additional_business_info != "":
|
||
logger.info(f'append additional business info: {additional_business_info}')
|
||
prompt_analysis += f"""
|
||
同时,请考虑以下额外的销售进展信息
|
||
{additional_business_info}
|
||
"""
|
||
|
||
prompt_analysis += f"""
|
||
根据如下各销售阶段的销售阶段任务、销售关键动作、阶段转化标准:
|
||
{analysis_rule}
|
||
结合上述商机信息的对应阶段,按照下面的要点和格式,请着重分析新增的销售进展是否会改变上述分析结果,如果会改变,请给出分析结果,如果不会改变,请给出分析结果
|
||
销售阶段分析细节部分的内容使用markdown引用块
|
||
|
||
输出模版、内容和样式:
|
||
|
||
** 一、下一步行动建议**
|
||
(根据上面各销售阶段所定义的销售关键动作,结合目前已经达成的工作和额外的销售进展,给出下一步的的行动建议,其行动建议尽可能采用定义的销售动作,每一个行动建议使用一个标号)
|
||
|
||
** 二、销售阶段分析细节**
|
||
> 1. ***销售阶段分析***
|
||
> 2. ***销售动作日志分析***
|
||
> 3. ***销售动作与销售阶段的关系***
|
||
> 4. ***判断结果***
|
||
> 5. ***销售阶段分析报告***
|
||
|
||
如果用户在下面的输入指令中指定了只需要上面所列的某个或某几个分析,请只输出指定分析的结果,如果未指定,请输出所有分析结果
|
||
{message}
|
||
"""
|
||
|
||
def event_stream():
|
||
accumulated_result = ""
|
||
for chunk in llm_service.generate_response_sse(prompt_analysis):
|
||
if chunk:
|
||
accumulated_result += chunk
|
||
chunk_out = format_chunk(chunk, None, None)
|
||
yield json.dumps(chunk_out) + '\n'
|
||
|
||
logger.info(f'accumulated_result: {accumulated_result}')
|
||
|
||
followup_info = get_analysis_followup_info(message, accumulated_result, "1843318172036575232")
|
||
followup_info += "如有新的销售进展,请补充相关信息,我会为您做进一步分析"
|
||
logger.info(f'followup_info: {followup_info}')
|
||
# Split followup_info into chunks of approximately 15 characters
|
||
chunk_size = 15
|
||
chunks = [followup_info[i:i+chunk_size] for i in range(0, len(followup_info), chunk_size)]
|
||
|
||
# Yield each chunk separately
|
||
for chunk in chunks:
|
||
yield json.dumps(format_chunk("", chunk, "")) + '\n'
|
||
|
||
return Response(event_stream(), mimetype='text/event-stream', headers=response_headers)
|
||
|
||
|
||
def get_analysis_followup_info(origin_message, analysis_text , knowledge_id):
|
||
followup_info = ""
|
||
# 1. Get business contact info
|
||
if '汇理' in origin_message:
|
||
# followup_info += "### 联系人信息\n"
|
||
# followup_info += "- **陈明宇** (技术总监)\n - 📱 186-2155-7823\n"
|
||
# followup_info += "- **王雪梅** (项目经理)\n - 📱 135-8867-4932\n"
|
||
next_action_items = get_detailed_action_items(analysis_text, knowledge_id)
|
||
followup_info = get_detailed_action_guide(next_action_items, "1858506435089068032")
|
||
followup_info += "\n\n"
|
||
return followup_info
|
||
|
||
elif '芝麻地网科' in origin_message:
|
||
followup_info += "### 联系人信息\n"
|
||
followup_info += "- **刘建华** (产品总监)\n - 📱 138-9876-5432\n"
|
||
followup_info += "- **张婷婷** (商务经理)\n - 📱 159-2468-1357\n"
|
||
elif '联特科技' in origin_message:
|
||
followup_info += "### 联系人信息\n"
|
||
followup_info += "- **郭志强** (研发经理)\n - 📱 177-3344-5566\n"
|
||
followup_info += "- **林小华** (项目主管)\n - 📱 136-9988-7766\n"
|
||
elif '视睿电子' in origin_message:
|
||
followup_info += "### 联系人信息\n"
|
||
followup_info += "- **黄伟东** (技术经理)\n - 📱 158-1122-3344\n"
|
||
followup_info += "- **赵敏** (销售总监)\n - 📱 139-5544-3322\n"
|
||
elif '深圳麦克韦尔' in origin_message:
|
||
followup_info += "### 联系人信息\n"
|
||
followup_info += "- **吴晓峰** (部门主管)\n - 📱 182-8899-7766\n"
|
||
followup_info += "- **孙丽华** (商务代表)\n - 📱 137-6655-4433\n"
|
||
else:
|
||
followup_info = ""
|
||
return followup_info
|
||
|
||
action_guide_url = "https://fiq58om72ph.feishu.cn/wiki/GbV3wXWu2igPoRkXSP8cMDzvnkd?from=from_copylink"
|
||
followup_info += "\n### 参考资料\n"
|
||
followup_info += f"- [📚 行动指南]({action_guide_url})\n"
|
||
|
||
sales_stage_definition_url = "https://fiq58om72ph.feishu.cn/wiki/I8Raw0MVKizZTvkR4UWcRmy4n7f?from=from_copylink"
|
||
followup_info += f"- [📖 销售阶段手册]({sales_stage_definition_url})\n"
|
||
|
||
followup_info += "\n\n"
|
||
|
||
return followup_info
|
||
|
||
def get_detailed_action_items(message, knowledge_id):
|
||
prompt_detailed_action_items = f"""
|
||
以下是销售行动建议及分析报告
|
||
{message}
|
||
请提取上述文本中下一步行动建议的原始文本并返回
|
||
"""
|
||
logger.info(f'prompt_detailed_action_items: {prompt_detailed_action_items}')
|
||
detailed_action_items = openai_service.generate_response(prompt_detailed_action_items)
|
||
logger.info(f'detailed_action_items: {detailed_action_items}')
|
||
return detailed_action_items
|
||
|
||
def get_detailed_action_guide(message, knowledge_id):
|
||
prompt_detailed_action_guide = f"""
|
||
以下是销售行动建议及分析报告
|
||
{message}
|
||
请根据上文中所提到的行动建议,查询知识库,给出对应行动建议所对应的详细的行动指南,尽可能保持知识库中答案的格式,如答案中有表格,请保持表格的格式输出,如果答案中包含markdown格式及链接,请保持markdown格式及链接。请完整的输出知识库中的答案内容,不要遗漏信息,不要省略信息,如有邮件模版也请完整输出邮件模版。
|
||
"""
|
||
detailed_action_guide = zhipu_kb_service.retrive(prompt_detailed_action_guide, knowledge_id, None)
|
||
logger.info(f'detailed_action_guide: {detailed_action_guide}')
|
||
|
||
doc_links = get_doc_links()
|
||
|
||
detailed_action_guide = fix_doc_links(detailed_action_guide, doc_links)
|
||
logger.info(f'detailed_action_guide fixed links: {detailed_action_guide}')
|
||
return detailed_action_guide
|
||
|
||
def fix_doc_links(message, doc_links):
|
||
for doc in doc_links:
|
||
title = doc.get('title')
|
||
url = doc.get('url')
|
||
# Replace only if the title is not inside markdown link syntax
|
||
message = re.sub(
|
||
f'{title}(?!\]\\([^)]*\\))',
|
||
f'[{title}]({url})',
|
||
message
|
||
)
|
||
return message
|