增加了日志,记录调用zhipu api的时间
This commit is contained in:
parent
fcd3c4f62a
commit
24d4ffbce5
|
|
@ -1,6 +1,6 @@
|
|||
from zhipuai import ZhipuAI
|
||||
import logging
|
||||
|
||||
import time
|
||||
from zhipuai import ZhipuAI
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -8,87 +8,117 @@ class ZhipuService:
|
|||
def __init__(self):
|
||||
self.model_name = "glm-4"
|
||||
self.app_secret_key = "d54f764a1d67c17d857bd3983b772016.GRjowY0fyiMNurLc"
|
||||
logger.info("ZhipuService initialized with model: %s", self.model_name)
|
||||
|
||||
def talk_to_zhipu(self, message):
|
||||
client = ZhipuAI(api_key=self.app_secret_key) # 请填写您自己的APIKey
|
||||
response = client.chat.completions.create(
|
||||
model=self.model_name, # 填写需要调用的模型名称
|
||||
messages=[
|
||||
{"role": "user", "content": message},
|
||||
],
|
||||
stream=False, # 流式输出
|
||||
temperature= 0.01, #随机度,越大越发散,0.01则是一个比较确定和稳定的输出
|
||||
top_p= 0.1, #选择前 10% 概率的 tokens 作为候选,也是影响随机程度
|
||||
)
|
||||
accum_resp = response.choices[0].message.content
|
||||
logger.info("Starting talk_to_zhipu call")
|
||||
start_time = time.time()
|
||||
client = ZhipuAI(api_key=self.app_secret_key)
|
||||
try:
|
||||
response = client.chat.completions.create(
|
||||
model=self.model_name,
|
||||
messages=[
|
||||
{"role": "user", "content": message},
|
||||
],
|
||||
stream=False,
|
||||
temperature=0.01,
|
||||
top_p=0.1,
|
||||
)
|
||||
accum_resp = response.choices[0].message.content
|
||||
end_time = time.time()
|
||||
logger.info("talk_to_zhipu call completed in %.2f seconds", end_time - start_time)
|
||||
return accum_resp
|
||||
except Exception as e:
|
||||
logger.error("Error in talk_to_zhipu: %s", str(e))
|
||||
raise
|
||||
|
||||
return accum_resp
|
||||
|
||||
def talk_to_zhipu_sse(self, message):
|
||||
client = ZhipuAI(api_key=self.app_secret_key) # 请填写您自己的APIKey
|
||||
response = client.chat.completions.create(
|
||||
model=self.model_name, # 填写需要调用的模型名称
|
||||
messages=[
|
||||
{"role": "user", "content": message},
|
||||
],
|
||||
stream=True, # 流式输出
|
||||
temperature= 0.01, #随机度,越大越发散,0.01则是一个比较确定和稳定的输出
|
||||
top_p= 0.1, #选择前 10% 概率的 tokens 作为候选,也是影响随机程度
|
||||
)
|
||||
logger.info("Starting talk_to_zhipu_sse call")
|
||||
start_time = time.time()
|
||||
client = ZhipuAI(api_key=self.app_secret_key)
|
||||
try:
|
||||
response = client.chat.completions.create(
|
||||
model=self.model_name,
|
||||
messages=[
|
||||
{"role": "user", "content": message},
|
||||
],
|
||||
stream=True,
|
||||
temperature=0.01,
|
||||
top_p=0.1,
|
||||
)
|
||||
for chunk in response:
|
||||
yield chunk.choices[0].delta.content
|
||||
end_time = time.time()
|
||||
logger.info("talk_to_zhipu_sse call completed in %.2f seconds", end_time - start_time)
|
||||
except Exception as e:
|
||||
logger.error("Error in talk_to_zhipu_sse: %s", str(e))
|
||||
raise
|
||||
|
||||
for chunk in response:
|
||||
print(chunk.choices[0].delta.content)
|
||||
yield chunk.choices[0].delta.content
|
||||
|
||||
def retrive(self, message, knowledge_id, prompt_template):
|
||||
logger.info("Starting retrive call with knowledge_id: %s", knowledge_id)
|
||||
start_time = time.time()
|
||||
client = ZhipuAI(api_key=self.app_secret_key)
|
||||
default_prompt = "从文档\n\"\"\"\n{{knowledge}}\n\"\"\"\n中找问题\n\"\"\"\n{{question}}\n\"\"\"\n的答案,找到答案就仅使用文档语句回答问题,找不到答案就用自身知识回答并且告诉用户该信息不是来自文档。\n不要复述问题,直接开始回答。"
|
||||
|
||||
if prompt_template is None or prompt_template == "":
|
||||
prompt_template = default_prompt
|
||||
response = client.chat.completions.create(
|
||||
model="glm-4",
|
||||
messages=[
|
||||
{"role": "user", "content": message},
|
||||
],
|
||||
tools=[
|
||||
{
|
||||
"type": "retrieval",
|
||||
"retrieval": {
|
||||
"knowledge_id": knowledge_id,
|
||||
"prompt_template": prompt_template
|
||||
}
|
||||
}
|
||||
],
|
||||
stream=False,
|
||||
temperature=0.01,
|
||||
top_p=0.1,
|
||||
)
|
||||
return response.choices[0].message.content
|
||||
|
||||
def retrive_sse(self, message, knowledge_id, prompt_template):
|
||||
client = ZhipuAI(api_key=self.app_secret_key) # 请填写您自己的APIKey
|
||||
default_prompt = "从文档\n\"\"\"\n{{knowledge}}\n\"\"\"\n中找问题\n\"\"\"\n{{question}}\n\"\"\"\n的答案,找到答案就仅使用文档语句回答问题,找不到答案就用自身知识回答并且告诉用户该信息不是来自文档。\n不要复述问题,直接开始回答。"
|
||||
if prompt_template is None or prompt_template == "":
|
||||
prompt_template = default_prompt
|
||||
response = client.chat.completions.create(
|
||||
model="glm-4", # 填写需要调用的模型名称
|
||||
messages=[
|
||||
{"role": "user", "content": message},
|
||||
],
|
||||
tools=[
|
||||
try:
|
||||
response = client.chat.completions.create(
|
||||
model="glm-4",
|
||||
messages=[
|
||||
{"role": "user", "content": message},
|
||||
],
|
||||
tools=[
|
||||
{
|
||||
"type": "retrieval",
|
||||
"retrieval": {
|
||||
"knowledge_id": knowledge_id,
|
||||
"prompt_template": prompt_template
|
||||
}
|
||||
} # 标准prompt,可以在上面增加prompt文本,但不要改动已有的标准prompt
|
||||
],
|
||||
stream=True, # 流式输出
|
||||
temperature= 0.01, #随机度,越大越发散,0.01则是一个比较确定和稳定的输出
|
||||
top_p= 0.1, #选择前 10% 概率的 tokens 作为候选,也是影响随机程度
|
||||
)
|
||||
for chunk in response:
|
||||
print(chunk.choices[0].delta.content)
|
||||
yield chunk.choices[0].delta.content
|
||||
}
|
||||
],
|
||||
stream=False,
|
||||
temperature=0.01,
|
||||
top_p=0.1,
|
||||
)
|
||||
result = response.choices[0].message.content
|
||||
end_time = time.time()
|
||||
logger.info("retrive call completed in %.2f seconds", end_time - start_time)
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error("Error in retrive: %s", str(e))
|
||||
raise
|
||||
|
||||
def retrive_sse(self, message, knowledge_id, prompt_template):
|
||||
logger.info("Starting retrive_sse call with knowledge_id: %s", knowledge_id)
|
||||
start_time = time.time()
|
||||
client = ZhipuAI(api_key=self.app_secret_key)
|
||||
default_prompt = "从文档\n\"\"\"\n{{knowledge}}\n\"\"\"\n中找问题\n\"\"\"\n{{question}}\n\"\"\"\n的答案,找到答案就仅使用文档语句回答问题,找不到答案就用自身知识回答并且告诉用户该信息不是来自文档。\n不要复述问题,直接开始回答。"
|
||||
if prompt_template is None or prompt_template == "":
|
||||
prompt_template = default_prompt
|
||||
try:
|
||||
response = client.chat.completions.create(
|
||||
model="glm-4",
|
||||
messages=[
|
||||
{"role": "user", "content": message},
|
||||
],
|
||||
tools=[
|
||||
{
|
||||
"type": "retrieval",
|
||||
"retrieval": {
|
||||
"knowledge_id": knowledge_id,
|
||||
"prompt_template": prompt_template
|
||||
}
|
||||
}
|
||||
],
|
||||
stream=True,
|
||||
temperature=0.01,
|
||||
top_p=0.1,
|
||||
)
|
||||
for chunk in response:
|
||||
yield chunk.choices[0].delta.content
|
||||
end_time = time.time()
|
||||
logger.info("retrive_sse call completed in %.2f seconds", end_time - start_time)
|
||||
except Exception as e:
|
||||
logger.error("Error in retrive_sse: %s", str(e))
|
||||
raise
|
||||
Loading…
Reference in New Issue