95 lines
4.2 KiB
Python
95 lines
4.2 KiB
Python
from zhipuai import ZhipuAI
|
||
import logging
|
||
|
||
|
||
logger = logging.getLogger(__name__)
|
||
|
||
class ZhipuService:
|
||
def __init__(self):
|
||
self.model_name = "glm-4"
|
||
self.app_secret_key = "d54f764a1d67c17d857bd3983b772016.GRjowY0fyiMNurLc"
|
||
|
||
def talk_to_zhipu(self, message):
|
||
client = ZhipuAI(api_key=self.app_secret_key) # 请填写您自己的APIKey
|
||
response = client.chat.completions.create(
|
||
model=self.model_name, # 填写需要调用的模型名称
|
||
messages=[
|
||
{"role": "user", "content": message},
|
||
],
|
||
stream=False, # 流式输出
|
||
temperature= 0.01, #随机度,越大越发散,0.01则是一个比较确定和稳定的输出
|
||
top_p= 0.1, #选择前 10% 概率的 tokens 作为候选,也是影响随机程度
|
||
)
|
||
accum_resp = response.choices[0].message.content
|
||
|
||
return accum_resp
|
||
|
||
def talk_to_zhipu_sse(self, message):
|
||
client = ZhipuAI(api_key=self.app_secret_key) # 请填写您自己的APIKey
|
||
response = client.chat.completions.create(
|
||
model=self.model_name, # 填写需要调用的模型名称
|
||
messages=[
|
||
{"role": "user", "content": message},
|
||
],
|
||
stream=True, # 流式输出
|
||
temperature= 0.01, #随机度,越大越发散,0.01则是一个比较确定和稳定的输出
|
||
top_p= 0.1, #选择前 10% 概率的 tokens 作为候选,也是影响随机程度
|
||
)
|
||
|
||
for chunk in response:
|
||
print(chunk.choices[0].delta.content)
|
||
yield chunk.choices[0].delta.content
|
||
|
||
def retrive(self, message, knowledge_id, prompt_template):
|
||
client = ZhipuAI(api_key=self.app_secret_key)
|
||
default_prompt = "从文档\n\"\"\"\n{{knowledge}}\n\"\"\"\n中找问题\n\"\"\"\n{{question}}\n\"\"\"\n的答案,找到答案就仅使用文档语句回答问题,找不到答案就用自身知识回答并且告诉用户该信息不是来自文档。\n不要复述问题,直接开始回答。"
|
||
|
||
if prompt_template is None or prompt_template == "":
|
||
prompt_template = default_prompt
|
||
response = client.chat.completions.create(
|
||
model="glm-4",
|
||
messages=[
|
||
{"role": "user", "content": message},
|
||
],
|
||
tools=[
|
||
{
|
||
"type": "retrieval",
|
||
"retrieval": {
|
||
"knowledge_id": knowledge_id,
|
||
"prompt_template": prompt_template
|
||
}
|
||
}
|
||
],
|
||
stream=False,
|
||
temperature=0.01,
|
||
top_p=0.1,
|
||
)
|
||
return response.choices[0].message.content
|
||
|
||
def retrive_sse(self, message, knowledge_id, prompt_template):
|
||
client = ZhipuAI(api_key=self.app_secret_key) # 请填写您自己的APIKey
|
||
default_prompt = "从文档\n\"\"\"\n{{knowledge}}\n\"\"\"\n中找问题\n\"\"\"\n{{question}}\n\"\"\"\n的答案,找到答案就仅使用文档语句回答问题,找不到答案就用自身知识回答并且告诉用户该信息不是来自文档。\n不要复述问题,直接开始回答。"
|
||
if prompt_template is None or prompt_template == "":
|
||
prompt_template = default_prompt
|
||
response = client.chat.completions.create(
|
||
model="glm-4", # 填写需要调用的模型名称
|
||
messages=[
|
||
{"role": "user", "content": message},
|
||
],
|
||
tools=[
|
||
{
|
||
"type": "retrieval",
|
||
"retrieval": {
|
||
"knowledge_id": knowledge_id,
|
||
"prompt_template": prompt_template
|
||
}
|
||
} # 标准prompt,可以在上面增加prompt文本,但不要改动已有的标准prompt
|
||
],
|
||
stream=True, # 流式输出
|
||
temperature= 0.01, #随机度,越大越发散,0.01则是一个比较确定和稳定的输出
|
||
top_p= 0.1, #选择前 10% 概率的 tokens 作为候选,也是影响随机程度
|
||
)
|
||
for chunk in response:
|
||
print(chunk.choices[0].delta.content)
|
||
yield chunk.choices[0].delta.content
|