Compare commits
19 Commits
| Author | SHA1 | Date |
|---|---|---|
|
|
7c21c497cd | |
|
|
62c996a788 | |
|
|
eb0cd9ce46 | |
|
|
92f93d00d9 | |
|
|
d9caf9acce | |
|
|
2179d4e71e | |
|
|
7a076b595c | |
|
|
5544546d77 | |
|
|
74cb795120 | |
|
|
e5c6e76ad2 | |
|
|
ade2f4d8c2 | |
|
|
96815b76a8 | |
|
|
996de663ae | |
|
|
2f268f73e1 | |
|
|
2c6e0c5f91 | |
|
|
20c00dbb08 | |
|
|
d8e4fc2a38 | |
|
|
d37921099d | |
|
|
21db91ee71 |
|
|
@ -48,4 +48,41 @@ curl -N -X POST http://127.0.0.1:5002/api/v2/zhipu/retrive/stream \
|
|||
-d '{
|
||||
"message":"视睿电子教学课件系统续费项目更新日志:**项目进展描述**:了解到客户需求和降本要求后,与项目经理杨建线下沟通。客户同意在剩余2套系统上增加模块,但要求降价,具体数 量待内部讨论。",
|
||||
"knowledge_id":"1843318172036575232"
|
||||
}'
|
||||
|
||||
|
||||
curl -N -X POST http://127.0.0.1:5002/api/v2/zhipu/analysis/stream \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"message":"分析商机广汽汇理汽车金融有限公司的商机建议",
|
||||
"knowledge_id":"1843318172036575232"
|
||||
}'
|
||||
|
||||
curl -N -X POST http://127.0.0.1:5002/api/v2/zhipu/analysis/stream \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"message":"2. 更新我负责的广汽汇理汽车金融有限公司项目的最新动作:1,今日和客户有做了一次技术交流,他们最近和Ocean Base和Gold DB也做了交流,以及内部也做了沟通,接下来他们希望能够拿出一个业务场景做测试,已确定哪个产品更适合他们。",
|
||||
"knowledge_id":"1843318172036575232"
|
||||
}'
|
||||
|
||||
|
||||
curl -N -X POST http://127.0.0.1:5002/api/v2/zhipu/analysis/stream \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"message":"更新我负责的广汽汇理汽车金融有限公司项目的最新动作:已经完成了POC,客户对POC效果表示满意",
|
||||
"knowledge_id":"1843318172036575232"
|
||||
}'
|
||||
|
||||
curl -N -X POST http://127.0.0.1:5002/api/v2/zhipu/analysis/stream \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"message":"openai",
|
||||
"knowledge_id":"1843318172036575232"
|
||||
}'
|
||||
|
||||
curl -N -X POST http://127.0.0.1:5002/api/v2/zhipu/analysis/stream \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"message":"zhipu",
|
||||
"knowledge_id":"1843318172036575232"
|
||||
}'
|
||||
|
|
@ -19,7 +19,7 @@ def stream_sse():
|
|||
message = data.get('message', '')
|
||||
|
||||
def event_stream():
|
||||
for chunk in zhipu_service.talk_to_zhipu_sse(message):
|
||||
for chunk in zhipu_service.generate_response_sse(message):
|
||||
if chunk:
|
||||
yield chunk
|
||||
|
||||
|
|
@ -30,7 +30,7 @@ def non_stream():
|
|||
data = request.json
|
||||
message = data.get('message', '')
|
||||
|
||||
response = zhipu_service.talk_to_zhipu(message)
|
||||
response = zhipu_service.generate_response(message)
|
||||
print(f'response: {response}')
|
||||
return response
|
||||
|
||||
|
|
@ -188,7 +188,7 @@ def analysis_stream():
|
|||
"""
|
||||
|
||||
def event_stream():
|
||||
for chunk in zhipu_service.talk_to_zhipu_sse(prompt_analysis):
|
||||
for chunk in zhipu_service.generate_response_sse(prompt_analysis):
|
||||
if chunk:
|
||||
yield chunk
|
||||
|
||||
|
|
|
|||
|
|
@ -4,12 +4,43 @@ from flask import Blueprint, request, Response,session
|
|||
from app.services.zhipu_service import ZhipuService
|
||||
from app.services.zhipu_alltool_service import ZhipuAlltoolService
|
||||
from app.services.zhipu_file_service import ZhipuFileService
|
||||
from app.services.zhipu_kb_service import ZhipuKbService
|
||||
from app.services.openai_service import OpenaiService
|
||||
from app.utils.prompt_repository import PromptRepository # Add this import
|
||||
from app.utils.sessions import init_session
|
||||
import os
|
||||
import re
|
||||
|
||||
CONFIG_FILE = 'llm_service_config.json'
|
||||
DOC_LINKS_FILE = 'doc_links.json'
|
||||
|
||||
def get_current_service():
|
||||
if os.path.exists(CONFIG_FILE):
|
||||
with open(CONFIG_FILE, 'r') as f:
|
||||
config = json.load(f)
|
||||
return config.get('llm_service', 'zhipu')
|
||||
return 'zhipu' # Default to zhipu if file doesn't exist
|
||||
|
||||
def set_current_service(service):
|
||||
config = {'llm_service': service}
|
||||
with open(CONFIG_FILE, 'w') as f:
|
||||
json.dump(config, f)
|
||||
|
||||
def get_doc_links():
|
||||
if os.path.exists(DOC_LINKS_FILE):
|
||||
with open(DOC_LINKS_FILE, 'r') as f:
|
||||
return json.load(f).get('documents', [])
|
||||
return []
|
||||
|
||||
|
||||
|
||||
zhipu_controller_v2 = Blueprint('zhipu_controller_v2', __name__)
|
||||
|
||||
zhipu_service = ZhipuService()
|
||||
openai_service = OpenaiService()
|
||||
zhipu_alltool_service = ZhipuAlltoolService()
|
||||
zhipu_file_service = ZhipuFileService()
|
||||
zhipu_kb_service = ZhipuKbService()
|
||||
|
||||
response_headers = {'Content-Type': 'text/event-stream',
|
||||
'Cache-Control': 'no-cache',
|
||||
|
|
@ -66,7 +97,7 @@ def retrive_stream():
|
|||
|
||||
def event_stream_retrive():
|
||||
accumulated_result = ""
|
||||
for chunk in zhipu_service.retrive_sse(message, knowledge_id, system_prompt="你是一个销售助理,语言对话请以第一人称你我进行"):
|
||||
for chunk in zhipu_kb_service.retrive_sse(message, knowledge_id, system_prompt="你是一个销售助理,语言对话请以第一人称你我进行"):
|
||||
if chunk:
|
||||
accumulated_result += chunk
|
||||
chunk_out = format_chunk(chunk, None, None)
|
||||
|
|
@ -92,7 +123,7 @@ def retrive_stream():
|
|||
prompt_report_template = PromptRepository().get_prompt("report_template")
|
||||
prompt_report_title = f"根据用户提问中\"\"\" {message} \"\"\" 中提到的项目信息 在知识库中查找该项目的销售日志。如果销售日志中缺乏模板中的要点(时间,参与人,事件,获得信息,信息来源,项目进展描述)信息,则该要点内容留空,不要填充信息 日报模板: \"\"\" {prompt_report_template} \"\"\"。输出: 日志报告"
|
||||
generated_report = ""
|
||||
for chunk in zhipu_service.retrive_sse(prompt_report_title + message, knowledge_id, None):
|
||||
for chunk in zhipu_kb_service.retrive_sse(prompt_report_title + message, knowledge_id, None):
|
||||
if chunk:
|
||||
print(chunk)
|
||||
generated_report += chunk
|
||||
|
|
@ -121,9 +152,9 @@ def retrive_stream():
|
|||
|
||||
prompt_date = datetime.now().strftime("%Y-%m-%d")
|
||||
prompt_report_template = PromptRepository().get_prompt("report_template")
|
||||
prompt_report_title = f"根据用户提问中\"\"\" {message} \"\"\" 中提到的项目信息 在知识库中查找该项目的销售日志并结合用户提供的新的日志信息 \"\"\"{message} \"\"\"生成日报。如果销售日志中缺乏模板中的要点(时间,参与人,事件,获得信息,信息来源,项目进展描述)信息,则该要点内容留空,不要填充信息 日报模板: \"\"\" {prompt_report_template} \"\"\"。输出: 日志报告"
|
||||
prompt_report_title = f"根据用户提问中\"\"\" {message} \"\"\" 中提到的项目信息 在知识库中找该项目的销售日志并结合用户提供的新的日志信息 \"\"\"{message} \"\"\"生成日报。如果销售日志中缺乏模板中的要点(时间,参与人,事件,获得信息,信息来源,项目进展描述)信息,则该要点内容留空,不要填充信息 日报模板: \"\"\" {prompt_report_template} \"\"\"。输出: 日志报告"
|
||||
generated_report = ""
|
||||
for chunk in zhipu_service.retrive_sse(prompt_report_title + message, knowledge_id, None):
|
||||
for chunk in zhipu_kb_service.retrive_sse(prompt_report_title + message, knowledge_id, None):
|
||||
if chunk:
|
||||
print(chunk)
|
||||
generated_report += chunk
|
||||
|
|
@ -142,11 +173,35 @@ def retrive_stream():
|
|||
|
||||
@zhipu_controller_v2.route('/zhipu/analysis/stream', methods=['POST'])
|
||||
def analysis_stream():
|
||||
init_session()
|
||||
data = request.json
|
||||
message = data.get('message', '')
|
||||
knowledge_id = data.get('knowledge_id', '')
|
||||
message = message.replace("我", "我(徐春峰)")
|
||||
|
||||
if 'zhipu' in message.lower() or '智谱' in message:
|
||||
logger.info(f'switch to zhipu service, save to session')
|
||||
session['llm_service'] = 'zhipu'
|
||||
set_current_service('zhipu')
|
||||
return format_chunk("切换到智谱AI服务", None, None)
|
||||
if 'openai' in message.lower() or 'openai' in message:
|
||||
logger.info(f'switch to openai service, save to session')
|
||||
session['llm_service'] = 'openai'
|
||||
set_current_service('openai')
|
||||
return format_chunk("切换到openai服务", None, None)
|
||||
# 默认使用智谱AI服务
|
||||
llm_service = zhipu_service
|
||||
logger.info(f'llm_service: {session["llm_service"]}')
|
||||
# current_service = session.get('llm_service', 'zhipu') # Default to 'zhipu' if not set
|
||||
current_service = get_current_service()
|
||||
|
||||
if current_service == 'openai':
|
||||
logger.info('Using OpenAI service')
|
||||
llm_service = openai_service
|
||||
else:
|
||||
logger.info('Using Zhipu service')
|
||||
llm_service = zhipu_service
|
||||
|
||||
logger.info(f'/zhipu/analysis/stream v2: {message}')
|
||||
|
||||
intent_categories =["analyze_sales","provide_sales_update_info"]
|
||||
|
|
@ -158,6 +213,11 @@ def analysis_stream():
|
|||
if '更新' in message and '我负责' in message and '项目' in message and '最新' in message:
|
||||
classification_result = {"category":"provide_sales_update_info"}
|
||||
|
||||
# if 'openai' in message.lower():
|
||||
# logger.info(f'switch to openai service')
|
||||
# llm_service = openai_service
|
||||
# message = message.replace("openai", "")
|
||||
|
||||
|
||||
additional_business_info = ""
|
||||
if classification_result.get('category') == 'analyze_sales':
|
||||
|
|
@ -182,7 +242,7 @@ def analysis_stream():
|
|||
请根据用户提供的如下信息,查找相关的 '当前详细状态及已完成工作','Sales stage' 信息,并返回给用户:
|
||||
{message}
|
||||
"""
|
||||
business_info = zhipu_service.retrive(prompt_get_business_info, knowledge_id, None)
|
||||
business_info = zhipu_kb_service.retrive(prompt_get_business_info, knowledge_id, None)
|
||||
logger.info(f'business_info: {business_info}')
|
||||
|
||||
analysis_rule = PromptRepository().get_prompt('sales_analysis')
|
||||
|
|
@ -204,15 +264,20 @@ def analysis_stream():
|
|||
prompt_analysis += f"""
|
||||
根据如下各销售阶段的销售阶段任务、销售关键动作、阶段转化标准:
|
||||
{analysis_rule}
|
||||
结合上述商机信息的对应阶段,按照下面的要点,请着重分析新增的销售进展是否会改变上述分析结果,如果会改变,请给出分析结果,如果不会改变,请给出分析结果
|
||||
1. **销售阶段分析**
|
||||
2. **销售动作日志分析**
|
||||
3. **销售动作与销售阶段的关系**
|
||||
4. **判断结果**
|
||||
5. **销售阶段分析报告**
|
||||
6. **下一步行动建议**
|
||||
根据上面各销售阶段所定义的销售关键动作,结合目前已经达成的工作和额外的销售进展,给出下一步的的行动建议,其行动建议尽可能采用定义的销售动作
|
||||
结合上述商机信息的对应阶段,按照下面的要点和格式,请着重分析新增的销售进展是否会改变上述分析结果,如果会改变,请给出分析结果,如果不会改变,请给出分析结果
|
||||
销售阶段分析细节部分的内容使用markdown引用块
|
||||
|
||||
输出模版、内容和样式:
|
||||
|
||||
** 一、下一步行动建议**
|
||||
(根据上面各销售阶段所定义的销售关键动作,结合目前已经达成的工作和额外的销售进展,给出下一步的的行动建议,其行动建议尽可能采用定义的销售动作,每一个行动建议使用一个标号)
|
||||
|
||||
** 二、销售阶段分析细节**
|
||||
> 1. ***销售阶段分析***
|
||||
> 2. ***销售动作日志分析***
|
||||
> 3. ***销售动作与销售阶段的关系***
|
||||
> 4. ***判断结果***
|
||||
> 5. ***销售阶段分析报告***
|
||||
|
||||
如果用户在下面的输入指令中指定了只需要上面所列的某个或某几个分析,请只输出指定分析的结果,如果未指定,请输出所有分析结果
|
||||
{message}
|
||||
|
|
@ -220,14 +285,105 @@ def analysis_stream():
|
|||
|
||||
def event_stream():
|
||||
accumulated_result = ""
|
||||
for chunk in zhipu_service.talk_to_zhipu_sse(prompt_analysis):
|
||||
for chunk in llm_service.generate_response_sse(prompt_analysis):
|
||||
if chunk:
|
||||
accumulated_result += chunk
|
||||
chunk_out = format_chunk(chunk, None, None)
|
||||
yield json.dumps(chunk_out) + '\n'
|
||||
|
||||
yield json.dumps(format_chunk("", "如有新的销售进展,请补充相关信息,我会为您做进一步分析", "")) + '\n'
|
||||
logger.info(f'accumulated_result: {accumulated_result}')
|
||||
|
||||
followup_info = get_analysis_followup_info(message, accumulated_result, "1843318172036575232")
|
||||
followup_info += "如有新的销售进展,请补充相关信息,我会为您做进一步分析"
|
||||
logger.info(f'followup_info: {followup_info}')
|
||||
# Split followup_info into chunks of approximately 15 characters
|
||||
chunk_size = 15
|
||||
chunks = [followup_info[i:i+chunk_size] for i in range(0, len(followup_info), chunk_size)]
|
||||
|
||||
# Yield each chunk separately
|
||||
for chunk in chunks:
|
||||
yield json.dumps(format_chunk("", chunk, "")) + '\n'
|
||||
|
||||
return Response(event_stream(), mimetype='text/event-stream', headers=response_headers)
|
||||
|
||||
|
||||
def get_analysis_followup_info(origin_message, analysis_text , knowledge_id):
|
||||
followup_info = ""
|
||||
# 1. Get business contact info
|
||||
if '汇理' in origin_message:
|
||||
# followup_info += "### 联系人信息\n"
|
||||
# followup_info += "- **陈明宇** (技术总监)\n - 📱 186-2155-7823\n"
|
||||
# followup_info += "- **王雪梅** (项目经理)\n - 📱 135-8867-4932\n"
|
||||
next_action_items = get_detailed_action_items(analysis_text, knowledge_id)
|
||||
followup_info = get_detailed_action_guide(next_action_items, "1858506435089068032")
|
||||
followup_info += "\n\n"
|
||||
return followup_info
|
||||
|
||||
elif '芝麻地网科' in origin_message:
|
||||
followup_info += "### 联系人信息\n"
|
||||
followup_info += "- **刘建华** (产品总监)\n - 📱 138-9876-5432\n"
|
||||
followup_info += "- **张婷婷** (商务经理)\n - 📱 159-2468-1357\n"
|
||||
elif '联特科技' in origin_message:
|
||||
followup_info += "### 联系人信息\n"
|
||||
followup_info += "- **郭志强** (研发经理)\n - 📱 177-3344-5566\n"
|
||||
followup_info += "- **林小华** (项目主管)\n - 📱 136-9988-7766\n"
|
||||
elif '视睿电子' in origin_message:
|
||||
followup_info += "### 联系人信息\n"
|
||||
followup_info += "- **黄伟东** (技术经理)\n - 📱 158-1122-3344\n"
|
||||
followup_info += "- **赵敏** (销售总监)\n - 📱 139-5544-3322\n"
|
||||
elif '深圳麦克韦尔' in origin_message:
|
||||
followup_info += "### 联系人信息\n"
|
||||
followup_info += "- **吴晓峰** (部门主管)\n - 📱 182-8899-7766\n"
|
||||
followup_info += "- **孙丽华** (商务代表)\n - 📱 137-6655-4433\n"
|
||||
else:
|
||||
followup_info = ""
|
||||
return followup_info
|
||||
|
||||
action_guide_url = "https://fiq58om72ph.feishu.cn/wiki/GbV3wXWu2igPoRkXSP8cMDzvnkd?from=from_copylink"
|
||||
followup_info += "\n### 参考资料\n"
|
||||
followup_info += f"- [📚 行动指南]({action_guide_url})\n"
|
||||
|
||||
sales_stage_definition_url = "https://fiq58om72ph.feishu.cn/wiki/I8Raw0MVKizZTvkR4UWcRmy4n7f?from=from_copylink"
|
||||
followup_info += f"- [📖 销售阶段手册]({sales_stage_definition_url})\n"
|
||||
|
||||
followup_info += "\n\n"
|
||||
|
||||
return followup_info
|
||||
|
||||
def get_detailed_action_items(message, knowledge_id):
|
||||
prompt_detailed_action_items = f"""
|
||||
以下是销售行动建议及分析报告
|
||||
{message}
|
||||
请提取上述文本中下一步行动建议的原始文本并返回
|
||||
"""
|
||||
logger.info(f'prompt_detailed_action_items: {prompt_detailed_action_items}')
|
||||
detailed_action_items = openai_service.generate_response(prompt_detailed_action_items)
|
||||
logger.info(f'detailed_action_items: {detailed_action_items}')
|
||||
return detailed_action_items
|
||||
|
||||
def get_detailed_action_guide(message, knowledge_id):
|
||||
prompt_detailed_action_guide = f"""
|
||||
以下是销售行动建议及分析报告
|
||||
{message}
|
||||
请根据上文中所提到的行动建议,查询知识库,给出对应行动建议所对应的详细的行动指南,尽可能保持知识库中答案的格式,如答案中有表格,请保持表格的格式输出,如果答案中包含markdown格式及链接,请保持markdown格式及链接。请完整的输出知识库中的答案内容,不要遗漏信息,不要省略信息,如有邮件模版也请完整输出邮件模版。
|
||||
"""
|
||||
detailed_action_guide = zhipu_kb_service.retrive(prompt_detailed_action_guide, knowledge_id, None)
|
||||
logger.info(f'detailed_action_guide: {detailed_action_guide}')
|
||||
|
||||
doc_links = get_doc_links()
|
||||
|
||||
detailed_action_guide = fix_doc_links(detailed_action_guide, doc_links)
|
||||
logger.info(f'detailed_action_guide fixed links: {detailed_action_guide}')
|
||||
return detailed_action_guide
|
||||
|
||||
def fix_doc_links(message, doc_links):
|
||||
for doc in doc_links:
|
||||
title = doc.get('title')
|
||||
url = doc.get('url')
|
||||
# Replace only if the title is not inside markdown link syntax
|
||||
message = re.sub(
|
||||
f'{title}(?!\]\\([^)]*\\))',
|
||||
f'[{title}]({url})',
|
||||
message
|
||||
)
|
||||
return message
|
||||
|
|
|
|||
|
|
@ -0,0 +1,11 @@
|
|||
from abc import ABC, abstractmethod
|
||||
|
||||
class AIServiceInterface(ABC):
|
||||
@abstractmethod
|
||||
def generate_response(self, prompt):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def generate_response_sse(self, prompt):
|
||||
pass
|
||||
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
import logging
|
||||
import time
|
||||
from openai import OpenAI
|
||||
from app.services.ai_service_interface import AIServiceInterface
|
||||
# from app.utils.prompt_repository import PromptRepository
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class OpenaiService(AIServiceInterface):
|
||||
def __init__(self):
|
||||
self.client = OpenAI(base_url="https://ai.xorbit.link:8443/e5b2a5e5-b41d-4715-9d50-d4a3b0c1a85f/v1", api_key="sk-proj-e5b2a5e5b41d47159d50d4a3b0c1a85f")
|
||||
|
||||
def generate_response(self, prompt):
|
||||
try:
|
||||
response = self.client.chat.completions.create(
|
||||
model="gpt-4o-mini",
|
||||
messages=[{"role": "system", "content": prompt}],
|
||||
temperature=0.9
|
||||
)
|
||||
return response.choices[0].message.content
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating response: {e}")
|
||||
return "An error occurred while generating the response."
|
||||
|
||||
def generate_response_sse(self, prompt):
|
||||
try:
|
||||
response = self.client.chat.completions.create(
|
||||
model="gpt-4o-mini",
|
||||
messages=[{"role": "system", "content": prompt}],
|
||||
temperature=0.7,
|
||||
stream=True
|
||||
)
|
||||
for chunk in response:
|
||||
if chunk.choices[0].delta.content is not None:
|
||||
yield chunk.choices[0].delta.content
|
||||
yield "answer provided by openai"
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating SSE response: {e}")
|
||||
yield "An error occurred while generating the SSE response."
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Set up logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
# Create an instance of OpenaiService
|
||||
openai_service = OpenaiService()
|
||||
|
||||
# Test the generate_response method
|
||||
test_prompt = "What is the capital of France?"
|
||||
response = openai_service.generate_response(test_prompt)
|
||||
print(f"Response to '{test_prompt}': {response}")
|
||||
|
||||
# Test the generate_response_sse method
|
||||
print("\nTesting generate_response_sse:")
|
||||
sse_prompt = "Count from 1 to 5 slowly."
|
||||
for chunk in openai_service.generate_response_sse(sse_prompt):
|
||||
print(chunk, end='', flush=True)
|
||||
time.sleep(0.1) # Add a small delay to simulate streaming
|
||||
print("\nSSE response complete.")
|
||||
|
||||
|
|
@ -0,0 +1,85 @@
|
|||
import logging
|
||||
import time
|
||||
from zhipuai import ZhipuAI
|
||||
from app.utils.prompt_repository import PromptRepository
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class ZhipuKbService:
|
||||
def __init__(self):
|
||||
self.model_name = "glm-4-plus"
|
||||
self.app_secret_key = "d54f764a1d67c17d857bd3983b772016.GRjowY0fyiMNurLc"
|
||||
logger.info("ZhipuKbService initialized with model: %s", self.model_name)
|
||||
|
||||
|
||||
def retrive(self, message, knowledge_id, prompt_template):
|
||||
logger.info("Starting retrive call with knowledge_id: %s", knowledge_id)
|
||||
start_time = time.time()
|
||||
client = ZhipuAI(api_key=self.app_secret_key)
|
||||
default_prompt = "从文档\n\"\"\"\n{{knowledge}}\n\"\"\"\n中找问题\n\"\"\"\n{{question}}\n\"\"\"\n的答案,找到答案就仅使用文档语句回答问题,找不到答案就用自身知识回答并且告诉用户该信息不是来自文档。\n不要复述问题,直接开始回答。你是一个智能助理,请用该角色的语言风格和对话方式回答问题。"
|
||||
|
||||
if prompt_template is None or prompt_template == "":
|
||||
prompt_template = default_prompt
|
||||
try:
|
||||
response = client.chat.completions.create(
|
||||
model=self.model_name,
|
||||
messages=[
|
||||
{"role": "user", "content": message},
|
||||
],
|
||||
tools=[
|
||||
{
|
||||
"type": "retrieval",
|
||||
"retrieval": {
|
||||
"knowledge_id": knowledge_id,
|
||||
"prompt_template": prompt_template
|
||||
}
|
||||
}
|
||||
],
|
||||
stream=False,
|
||||
max_tokens=4095,
|
||||
temperature=0.01, # default=0.01
|
||||
top_p=0.1, #default=0.1
|
||||
)
|
||||
result = response.choices[0].message.content
|
||||
end_time = time.time()
|
||||
logger.info("retrive call completed in %.2f seconds", end_time - start_time)
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error("Error in retrive: %s", str(e))
|
||||
raise
|
||||
|
||||
def retrive_sse(self, message, knowledge_id, prompt_template=None,system_prompt=None):
|
||||
logger.info("Starting retrive_sse call with knowledge_id: %s, message:%s", knowledge_id, message)
|
||||
start_time = time.time()
|
||||
client = ZhipuAI(api_key=self.app_secret_key)
|
||||
default_prompt = "从文档\n\"\"\"\n{{knowledge}}\n\"\"\"\n中找问题\n\"\"\"\n{{question}}\n\"\"\"\n的答案,找到答案就仅使用文档语句回答问题,找不到答案就告诉用户知识库中没有该信息。\n不要复述问题,直接开始回答。"
|
||||
messages = [{"role": "user", "content": message}]
|
||||
# if system_prompt != None:
|
||||
# messages.append({"role": "system", "content": system_prompt})
|
||||
if prompt_template is None or prompt_template == "":
|
||||
prompt_template = default_prompt
|
||||
try:
|
||||
response = client.chat.completions.create(
|
||||
model=self.model_name,
|
||||
messages=messages,
|
||||
tools=[
|
||||
{
|
||||
"type": "retrieval",
|
||||
"retrieval": {
|
||||
"knowledge_id": knowledge_id,
|
||||
"prompt_template": prompt_template
|
||||
}
|
||||
}
|
||||
],
|
||||
stream=True,
|
||||
max_tokens=4095,
|
||||
temperature=0.01, # default=0.01
|
||||
top_p=0.1, #default=0.1
|
||||
)
|
||||
for chunk in response:
|
||||
yield chunk.choices[0].delta.content
|
||||
end_time = time.time()
|
||||
logger.info("retrive_sse call completed in %.2f seconds", end_time - start_time)
|
||||
except Exception as e:
|
||||
logger.error("Error in retrive_sse: %s", str(e))
|
||||
raise
|
||||
|
|
@ -1,47 +1,47 @@
|
|||
import logging
|
||||
import time
|
||||
from zhipuai import ZhipuAI
|
||||
from app.services.ai_service_interface import AIServiceInterface
|
||||
from app.utils.prompt_repository import PromptRepository
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class ZhipuService:
|
||||
class ZhipuService(AIServiceInterface):
|
||||
def __init__(self):
|
||||
self.model_name = "glm-4"
|
||||
self.app_secret_key = "d54f764a1d67c17d857bd3983b772016.GRjowY0fyiMNurLc"
|
||||
self.client = ZhipuAI(api_key=self.app_secret_key)
|
||||
logger.info("ZhipuService initialized with model: %s", self.model_name)
|
||||
|
||||
def talk_to_zhipu(self, message):
|
||||
logger.info("Starting talk_to_zhipu call")
|
||||
def generate_response(self, prompt):
|
||||
logger.info("Starting generate_response call")
|
||||
start_time = time.time()
|
||||
client = ZhipuAI(api_key=self.app_secret_key)
|
||||
try:
|
||||
response = client.chat.completions.create(
|
||||
response = self.client.chat.completions.create(
|
||||
model=self.model_name,
|
||||
messages=[
|
||||
{"role": "user", "content": message},
|
||||
{"role": "user", "content": prompt},
|
||||
],
|
||||
stream=False,
|
||||
temperature=0.01,
|
||||
top_p=0.1,
|
||||
)
|
||||
accum_resp = response.choices[0].message.content
|
||||
result = response.choices[0].message.content
|
||||
end_time = time.time()
|
||||
logger.info("talk_to_zhipu call completed in %.2f seconds", end_time - start_time)
|
||||
return accum_resp
|
||||
logger.info("generate_response call completed in %.2f seconds", end_time - start_time)
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error("Error in talk_to_zhipu: %s", str(e))
|
||||
logger.error("Error in generate_response: %s", str(e))
|
||||
raise
|
||||
|
||||
def talk_to_zhipu_sse(self, message):
|
||||
logger.info("Starting talk_to_zhipu_sse call")
|
||||
def generate_response_sse(self, prompt):
|
||||
logger.info("Starting generate_response_sse call")
|
||||
start_time = time.time()
|
||||
client = ZhipuAI(api_key=self.app_secret_key)
|
||||
try:
|
||||
response = client.chat.completions.create(
|
||||
response = self.client.chat.completions.create(
|
||||
model=self.model_name,
|
||||
messages=[
|
||||
{"role": "user", "content": message},
|
||||
{"role": "user", "content": prompt},
|
||||
],
|
||||
stream=True,
|
||||
temperature=0.01,
|
||||
|
|
@ -50,21 +50,20 @@ class ZhipuService:
|
|||
for chunk in response:
|
||||
yield chunk.choices[0].delta.content
|
||||
end_time = time.time()
|
||||
logger.info("talk_to_zhipu_sse call completed in %.2f seconds", end_time - start_time)
|
||||
logger.info("generate_response_sse call completed in %.2f seconds", end_time - start_time)
|
||||
except Exception as e:
|
||||
logger.error("Error in talk_to_zhipu_sse: %s", str(e))
|
||||
logger.error("Error in generate_response_sse: %s", str(e))
|
||||
raise
|
||||
|
||||
def retrive(self, message, knowledge_id, prompt_template):
|
||||
logger.info("Starting retrive call with knowledge_id: %s", knowledge_id)
|
||||
start_time = time.time()
|
||||
client = ZhipuAI(api_key=self.app_secret_key)
|
||||
default_prompt = "从文档\n\"\"\"\n{{knowledge}}\n\"\"\"\n中找问题\n\"\"\"\n{{question}}\n\"\"\"\n的答案,找到答案就仅使用文档语句回答问题,找不到答案就用自身知识回答并且告诉用户该信息不是来自文档。\n不要复述问题,直接开始回答。"
|
||||
|
||||
if prompt_template is None or prompt_template == "":
|
||||
prompt_template = default_prompt
|
||||
try:
|
||||
response = client.chat.completions.create(
|
||||
response = self.client.chat.completions.create(
|
||||
model="glm-4",
|
||||
messages=[
|
||||
{"role": "user", "content": message},
|
||||
|
|
@ -93,7 +92,6 @@ class ZhipuService:
|
|||
def retrive_sse(self, message, knowledge_id, prompt_template=None,system_prompt=None):
|
||||
logger.info("Starting retrive_sse call with knowledge_id: %s, message:%s", knowledge_id, message)
|
||||
start_time = time.time()
|
||||
client = ZhipuAI(api_key=self.app_secret_key)
|
||||
default_prompt = "从文档\n\"\"\"\n{{knowledge}}\n\"\"\"\n中找问题\n\"\"\"\n{{question}}\n\"\"\"\n的答案,找到答案就仅使用文档语句回答问题,找不到答案就告诉用户知识库中没有该信息。\n不要复述问题,直接开始回答。"
|
||||
messages = [{"role": "user", "content": message}]
|
||||
# if system_prompt != None:
|
||||
|
|
@ -101,7 +99,7 @@ class ZhipuService:
|
|||
if prompt_template is None or prompt_template == "":
|
||||
prompt_template = default_prompt
|
||||
try:
|
||||
response = client.chat.completions.create(
|
||||
response = self.client.chat.completions.create(
|
||||
model="glm-4",
|
||||
messages=messages,
|
||||
tools=[
|
||||
|
|
@ -132,11 +130,10 @@ class ZhipuService:
|
|||
prompt_report_template = PromptRepository().get_prompt("report_template")
|
||||
|
||||
prompt_report_missing_check = f"""{prompt_report_template}
|
||||
请检查以下日志信息是否完整,如果信息缺失则提示要求用户需要补充的信息要点,如果信息完整请直接返回“上述日志信息为全部信息”。日志信息如下:\n\"\"\"\n{message}\n\"\"\"\n"""
|
||||
请检查以下日志信息是否完整,如果信息缺失则提示要求用户需要补充的信息要点,如果信息完整请直接返回"上述日志信息为全部信息"。日志信息如下:\n\"\"\"\n{message}\n\"\"\"\n"""
|
||||
|
||||
client = ZhipuAI(api_key=self.app_secret_key)
|
||||
try:
|
||||
response = client.chat.completions.create(
|
||||
response = self.client.chat.completions.create(
|
||||
model="glm-4-flash",
|
||||
messages=[
|
||||
{"role": "user", "content": prompt_report_missing_check},
|
||||
|
|
|
|||
|
|
@ -63,11 +63,16 @@ class PromptRepository:
|
|||
|
||||
"""
|
||||
|
||||
DEFAULT_DETAILED_ACTION_GUIDE = """
|
||||
请根据实际需求定义详细行动指南内容
|
||||
"""
|
||||
|
||||
PROMPTS = {
|
||||
"default_retrieval": DEFAULT_RETRIEVAL_PROMPT,
|
||||
"sales_analysis": DEFAUT_SALES_ANALYSIS_PROMPT,
|
||||
"report_template": DEFAULT_REPORT_TEMPLATE,
|
||||
# Add more prompts here as needed
|
||||
"detailed_action_guide": DEFAULT_DETAILED_ACTION_GUIDE,
|
||||
# Add more prompts here as needed
|
||||
}
|
||||
|
||||
@classmethod
|
||||
|
|
|
|||
|
|
@ -9,3 +9,5 @@ def init_session():
|
|||
if 'history' not in session:
|
||||
session['history'] = [] # 初始化会话历史
|
||||
session['session_info'] = {}
|
||||
if 'llm_service' not in session:
|
||||
session['llm_service'] = 'zhipu'
|
||||
|
|
|
|||
|
|
@ -0,0 +1,154 @@
|
|||
问题:如何整理并确认客户需求文档
|
||||
相似问:如何整理并确认已经收集的客户需求文档,以便在后续的技术交流中使用,提升沟通效率
|
||||
答案:
|
||||
为了避免收集客户需求时有遗漏,请参考下表需求调研表中的已知信息和未知信息,请及时收集并补充未知信息。
|
||||
|
||||
#### [客户需求调研表](https://fiq58om72ph.feishu.cn/wiki/NaWtwOXKriZ6gckAIvgcVCAgntc?from=from_copylink)
|
||||
|
||||
|
||||
|
||||
问题:如何识别项目的关键人,以便在后续的沟通中能够得到更多信息
|
||||
答案:
|
||||
建议与项目有关的部门都做沟通,涉及部门:信息技术部,业务部,风险合规部及管理层。
|
||||
|
||||
#### 现在已知联系人
|
||||
- **信息技术部**
|
||||
- 联系人:卢先生
|
||||
- 职位:IT经理
|
||||
- 电话:18911210038
|
||||
|
||||
- **业务部**
|
||||
- 联系人:王灿
|
||||
- 职位:业务员
|
||||
- 电话:13913455783
|
||||
|
||||
- **风险合规部**
|
||||
- 待建立联系
|
||||
> 注:该部门和高层还需要进一步建立联系,识别关键人。
|
||||
|
||||
|
||||
问题:如何根据客户的初步反馈,准备相应的产品演示材料.增加客户的信任度。
|
||||
答案:
|
||||
#### 1. 产品解决方案参考材料
|
||||
客户属于互金行业,建议参考:
|
||||
- [《02-金融领域场景分析》](https://fiq58om72ph.feishu.cn/wiki/I8Raw0MVKizZTvkR4UWcRmy4n7f)
|
||||
- [《01-行业理解及竞争策略》](https://fiq58om72ph.feishu.cn/wiki/I8Raw0MVKizZTvkR4UWcRmy4n7f)
|
||||
- [《多业务融合方案-完整篇》](https://fiq58om72ph.feishu.cn/wiki/I8Raw0MVKizZTvkR4UWcRmy4n7f)
|
||||
|
||||
#### 2. 竞品分析材料
|
||||
客户可能已经和其他竞争对手沟通过,建议详细学习:
|
||||
- [《XX与常见的几个过程数据库比较分析_v1.docx》](https://fiq58om72ph.feishu.cn/wiki/I8Raw0MVKizZTvkR4UWcRmy4n7f)
|
||||
|
||||
#### 3. 成功案例参考
|
||||
互金行业中易宝支付已与我公司达成合作:
|
||||
- [《易宝支付分布式系统采购案例说明》](https://fiq58om72ph.feishu.cn/wiki/I8Raw0MVKizZTvkR4UWcRmy4n7f)
|
||||
|
||||
#### 4. 沟通建议
|
||||
建议与以下部门分别做产品、技术及解决方案的价值沟通:
|
||||
- 信息技术部
|
||||
- 联系人:卢先生
|
||||
- 职位:IT经理
|
||||
- 电话:18911210038
|
||||
- 业务部
|
||||
- 联系人:王灿
|
||||
- 职位:业务员
|
||||
- 电话:13913455783
|
||||
- 风险合规部
|
||||
- 待建立联系
|
||||
|
||||
问题:如何着手准备业务场景测试方案,确保方案能够充分体现产品优势。
|
||||
答案:
|
||||
#### 1. 项目负责人信息
|
||||
- **信息技术部**
|
||||
- 联系人:卢先生
|
||||
- 职位:IT经理
|
||||
- 电话:18911210038
|
||||
|
||||
#### 2. 参考资料
|
||||
- [《金融行业测试方案》](https://fiq58om72ph.feishu.cn/wiki/I8Raw0MVKizZTvkR4UWcRmy4n7f)
|
||||
- 金融行业测试案例可以参考
|
||||
- [《吉致汽车金融金融租赁测试方案》](https://fiq58om72ph.feishu.cn/wiki/I8Raw0MVKizZTvkR4UWcRmy4n7f)
|
||||
- [《吉致汽车金融实时数仓测试方案》](https://fiq58om72ph.feishu.cn/wiki/I8Raw0MVKizZTvkR4UWcRmy4n7f)
|
||||
- 协作人员:
|
||||
- 内部售前:@曹亚文
|
||||
- PTS:@唐超
|
||||
|
||||
#### 3. 重点关注事项
|
||||
客户特别关注:
|
||||
- 产品性能稳定性
|
||||
- 迁移周期
|
||||
> 请与售前和PTS同学沟通清楚,在测试方案中突出以上两点优势。
|
||||
|
||||
#### 4. 实施建议
|
||||
为确保测试方案顺利实施,请注意:
|
||||
- 与客户内部产品部门充分沟通
|
||||
- 与实施部门充分沟通
|
||||
- 充分了解需求
|
||||
- 协调具体测试时间
|
||||
|
||||
|
||||
问题:如何明确客户内部决策链和决策人,并在CRM系统中更新组织架构信息。
|
||||
相似问:识别客户内部决策链和决策人
|
||||
答案:
|
||||
- 系统检测到该客户项目联系人, 卢*,信息不全,请更新如下信息:
|
||||
|
||||
#### [客户项目联系人](https://fiq58om72ph.feishu.cn/wiki/C77pwAaoWiNQSJk0tnGccpF5nEh?from=from_copylink)
|
||||
|
||||
|
||||
问题:如何整理POC结果,准备正式的测试报告,并与客户进行确认。
|
||||
答案:
|
||||
#### 1. 测试报告准备
|
||||
- 协助PTS完成客户测试报告模板填写
|
||||
- 内部对齐测试效果
|
||||
- 进行阶段复盘
|
||||
|
||||
#### 2. 客户确认流程
|
||||
通过邮件方式与客户确认测试结果:
|
||||
- 主要接收人:信息技术部 卢先生
|
||||
- 是否需要抄送其他人待确认
|
||||
|
||||
#### 3. 确认邮件模板
|
||||
**主题**:关于 [《广汽汇理汽车金融有限公司测试结果报告》](https://fiq58om72ph.feishu.cn/wiki/I8Raw0MVKizZTvkR4UWcRmy4n7f) 的确认
|
||||
|
||||
尊敬的卢总:
|
||||
|
||||
您好!
|
||||
|
||||
首先,感谢您对我们产品的关注与支持。我们已经完成了测试工作,并根据测试结果编制了一份详细的测试报告。以下是本次测试的主要发现和结论:
|
||||
|
||||
##### 测试概要
|
||||
- **测试周期**:2023年11月1日至2023年11月30日
|
||||
- **测试范围**:涵盖了系统的主要功能模块,包括:
|
||||
- 车辆监控
|
||||
- 数据分析
|
||||
- 报表生成等
|
||||
- **测试环境**:模拟了贵公司的实际工作环境,确保测试结果的适用性
|
||||
|
||||
##### 主要测试结果
|
||||
- **功能测试**:所有功能点均按预期工作,系统稳定性良好
|
||||
- **性能测试**:系统响应时间满足预设标准,处理效率达到预期要求
|
||||
- **兼容性测试**:系统在各主流浏览器和操作系统上均表现良好,无兼容性问题
|
||||
- **用户体验测试**:用户界面友好,操作流程简便,得到了测试用户的好评
|
||||
|
||||
##### 问题与建议
|
||||
在测试过程中,我们发现了3个轻微问题,主要集中在报表生成模块的个性化设置上,已详细记录在报告中,并提出了相应的优化建议。
|
||||
|
||||
我们非常重视贵公司的反馈,为确保报告的准确性和完整性,请您在3个工作日内审阅附件中的测试报告,并给予确认。如有任何疑问或需要进一步讨论的问题,请随时与我联系。
|
||||
|
||||
**附件**:[《广汽汇理汽车金融有限公司测试结果报告》](https://fiq58om72ph.feishu.cn/wiki/I8Raw0MVKizZTvkR4UWcRmy4n7f)
|
||||
|
||||
感谢您的配合与支持,期待您的回复。
|
||||
|
||||
祝好,
|
||||
|
||||
徐春锋
|
||||
平凯星辰客户经理
|
||||
联系电话:138-xxxx-xxxx
|
||||
电子邮箱:xushunfeng@xxtech.com
|
||||
|
||||
|
||||
问题:如果客户对POC结果满意,如何进一步了解客户内部的项目立项情况,确认客户的项目预算和预期采购数量。
|
||||
相似问:如果客户对POC结果满意,如何进行下一步
|
||||
答案:
|
||||
根据立项要求,请检查系统中商机信息并完善缺失项:
|
||||
#### [商机信息表](https://fiq58om72ph.feishu.cn/wiki/Rpq5w4mFriCsSWk52elcgvbAneg?from=from_copylink)
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
{
|
||||
"documents": [
|
||||
{
|
||||
"title": "《02-金融领域场景分析》",
|
||||
"url": "https://fiq58om72ph.feishu.cn/wiki/I8Raw0MVKizZTvkR4UWcRmy4n7f"
|
||||
},
|
||||
{
|
||||
"title": "《01-行业理解及竞争策略》",
|
||||
"url": "https://fiq58om72ph.feishu.cn/wiki/I8Raw0MVKizZTvkR4UWcRmy4n7f"
|
||||
},
|
||||
{
|
||||
"title": "《多业务融合方案-完整篇》",
|
||||
"url": "https://fiq58om72ph.feishu.cn/wiki/I8Raw0MVKizZTvkR4UWcRmy4n7f"
|
||||
},
|
||||
{
|
||||
"title": "《XX与常见的几个过程数据库比较分析_v1.docx》",
|
||||
"url": "https://fiq58om72ph.feishu.cn/wiki/I8Raw0MVKizZTvkR4UWcRmy4n7f"
|
||||
},
|
||||
{
|
||||
"title": "《易宝支付分布式系统采购案例说明》",
|
||||
"url": "https://fiq58om72ph.feishu.cn/wiki/I8Raw0MVKizZTvkR4UWcRmy4n7f"
|
||||
},
|
||||
{
|
||||
"title": "《金融行业测试方案》",
|
||||
"url": "https://fiq58om72ph.feishu.cn/wiki/I8Raw0MVKizZTvkR4UWcRmy4n7f"
|
||||
},
|
||||
{
|
||||
"title": "《广汽汇理汽车金融有限公司测试结果报告》",
|
||||
"url": "https://fiq58om72ph.feishu.cn/wiki/I8Raw0MVKizZTvkR4UWcRmy4n7f"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
- [《02-金融领域场景分析》](https://fiq58om72ph.feishu.cn/wiki/I8Raw0MVKizZTvkR4UWcRmy4n7f)
|
||||
- [《01-行业理解及竞争策略》](https://fiq58om72ph.feishu.cn/wiki/I8Raw0MVKizZTvkR4UWcRmy4n7f)
|
||||
- [《多业务融合方案-完整篇》](https://fiq58om72ph.feishu.cn/wiki/I8Raw0MVKizZTvkR4UWcRmy4n7f)
|
||||
- [《XX与常见的几个过程数据库比较分析_v1.docx》](https://fiq58om72ph.feishu.cn/wiki/I8Raw0MVKizZTvkR4UWcRmy4n7f)
|
||||
- [《易宝支付分布式系统采购案例说明》](https://fiq58om72ph.feishu.cn/wiki/I8Raw0MVKizZTvkR4UWcRmy4n7f)
|
||||
- [《金融行业测试方案》](https://fiq58om72ph.feishu.cn/wiki/I8Raw0MVKizZTvkR4UWcRmy4n7f)
|
||||
[《广汽汇理汽车金融有限公司测试结果报告》](https://fiq58om72ph.feishu.cn/wiki/I8Raw0MVKizZTvkR4UWcRmy4n7f)
|
||||
|
|
@ -17,4 +17,5 @@ Werkzeug==3.0.4
|
|||
zipp==3.20.2
|
||||
zhipuai==2.1.5.20230904
|
||||
pytz==2024.2
|
||||
flask-debug==0.4.3
|
||||
flask-debug==0.4.3
|
||||
openai==1.52.2
|
||||
Loading…
Reference in New Issue