接口文档
openai
https://blog.51cto.com/u_11866025/8005293
vllm
python -m vllm.entrypoints.openai.api_server --model facebook/opt-125m https://github.com/vllm-project/vllm/blob/main/vllm/entrypoints/api_server.py
库
https://blog.csdn.net/m0_67431719/article/details/135260066
import os
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
# 聊天模型
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who won the world series in 2020?"},
{"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
{"role": "user", "content": "Where was it played?"}
]
)
print(completion.choices[0].message)
# 文本处理
completion = openai.Completion.create(
model="text-davinci-003",
prompt="你好,chatGPT",
max_tokens=1024,
temperature=0.5
)
# 模型列表
openai.Model.list()
# 向量
import numpy
text = '父亲'
embedding=client.embeddings.create(input=text, model='text-embedding-3-small').data[0].embedding
vector = np.array(embedding)
print(vector.shape)
json
提示词
根据以下JSON格式模板进行输出:
\```json
{
"object1": {
"key1": "{{value1}}",
"key2": "{{value2}}",
"key3": "{{value3}}"
}
}
\```
json实现1
import json
import openai
key = "sk-77274694b69741d98f92ecd2902cb43c"
client = openai.Client(api_key=key, base_url="https://api.deepseek.com/")
leading_text = "{"
# 在kimi中partial
resp = client.chat.completions.create(
model="deepseek-chat",
messages = [
{"role":"system","content":"你是 Kimi,由 Moonshot AI 提供的人工智能助手,你更擅长中文和英文的对话。你会为用户提供安全,有帮助,准确的回答。同时,你会拒绝一些涉及恐怖主义,种族歧视,黄色暴力等问题的回答。Moonshot AI 为专有名词,不可翻译成其他语言。"},
{"role":"user","content":"你是一个情景分析专家,用来分析一句短语中的主语故事中的人物、人物心理和下一步行为预测。你需要阅读完所有的故事,然后输出一个 JSON 格式便于后续的处理。请严格参考 #sample output 格式输出一个合法 JSON 字符\n\n#example:\nsentence: You can look up the word in the dictionary.\n动词短语: look up\n\n#pos_list\n\"副词\", \"连词\", \"动词短语\"\n#sample output:\n[\n {\n \"index\": \"copy [sentence index] value from sentence\", \n \"sentence\": \"*\", \n \"keywords\": \"*\", \n \"POS_tagging\": \"*\", \n }\n]\n\n#input:\n桃园三结义#output:\n "},
# 有的模型不支持,partial 参数需要修改模板,当遇见此参数时。不加|im_end|之类的结束标识。以实现让大模型续写
{"role": "assistant", "content": leading_text, "partial": True},
],
temperature=0.3,
)
assert len(resp.choices) > 0
content = resp.choices[0].message.content
assert content
content = leading_text + content
obj = json.loads(content)
obj
import json
import openai
key = "sk-77274694b69741d98f92ecd2902cb43c"
client = openai.Client(api_key=key, base_url="https://api.deepseek.com/beta")
leading_text = '{'
# 在deepseek中prefix
messages = [
{"role":"system","content":"你是 Kimi,由 Moonshot AI 提供的人工智能助手,你更擅长中文和英文的对话。你会为用户提供安全,有帮助,准确的回答。同时,你会拒绝一些涉及恐怖主义,种族歧视,黄色暴力等问题的回答。Moonshot AI 为专有名词,不可翻译成其他语言。"},
{"role":"user","content":"你是一个情景分析专家,用来分析一句短语中的主语故事中的人物、人物心理和下一步行为预测。你需要阅读完所有的故事,然后输出一个 JSON 格式便于后续的处理。请严格参考 #sample output 格式输出一个合法 JSON 字符\n\n#example:\nsentence: You can look up the word in the dictionary.\n动词短语: look up\n\n#pos_list\n\"副词\", \"连词\", \"动词短语\"\n#sample output:\n[\n {\n \"index\": \"copy [sentence index] value from sentence\", \n \"sentence\": \"*\", \n \"keywords\": \"*\", \n \"POS_tagging\": \"*\", \n }\n]\n\n#input:\n桃园三结义#output:\n "},
# prefix 参数需要修改模板,当遇见此参数时。不加|im_end|之类的结束标识。以实现让大模型续写
{"role": "assistant", "content": leading_text, "prefix": True},
]
response = client.chat.completions.create(
model="deepseek-coder",
messages=messages,
stop=["```"],
)
assert len(resp.choices) > 0
content = resp.choices[0].message.content
print(content)
content = leading_text + content
obj = json.loads(content)
obj
## 其他自训练模型,可以修改消息模板来实现此功能
json实现2
import json
from openai import OpenAI
client = OpenAI(
api_key="<your api key>",
base_url="https://api.deepseek.com",
)
system_prompt = """
The user will provide some exam text. Please parse the "question" and "answer" and output them in JSON format.
EXAMPLE INPUT:
Which is the highest mountain in the world? Mount Everest.
EXAMPLE JSON OUTPUT:
{
"question": "Which is the highest mountain in the world?",
"answer": "Mount Everest"
}
"""
user_prompt = "Which is the longest river in the world? The Nile River."
messages = [{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}]
response = client.chat.completions.create(
model="deepseek-coder",
messages=messages,
response_format={
'type': 'json_object'
}
)
print(json.loads(response.choices[0].message.content))
续写
import json
import openai
key = "sk-77274694b69741d98f92ecd2902cb43c"
client = openai.Client(api_key=key, base_url="https://api.deepseek.com/beta")
leading_text = '{'
messages = [
{"role": "user", "content": "Please write quick sort code"},
{"role": "assistant", "content": "```python\n", "prefix": True}
]
response = client.chat.completions.create(
model="deepseek-coder",
messages=messages,
stop=["```"],
)
response.choices[0].message.content
FUNCTION
from openai import OpenAI
def send_messages(messages):
response = client.chat.completions.create(
model="deepseek-coder",
messages=messages,
tools=tools
)
return response.choices[0].message
client = OpenAI(
api_key="<your api key>",
base_url="https://api.deepseek.com",
)
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get weather of an location, the user shoud supply a location first",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
}
},
"required": ["location"]
},
}
},
]
messages = [{"role": "user", "content": "How's the weather in Hangzhou?"}]
message = send_messages(messages)
print(f"User>\t {messages[0]['content']}")
tool = message.tool_calls[0]
messages.append(message)
messages.append({"role": "tool", "tool_call_id": tool.id, "content": "24℃"})
message = send_messages(messages)
print(f"Model>\t {message.content}")