modified code, add another file for gradio non-stream
This commit is contained in:
parent
8856813907
commit
36529085b3
130
testGenius.py
130
testGenius.py
@ -8,48 +8,54 @@ import gradio as gr
|
|||||||
dashscope.api_key = 'sk-83b8ed0ead0849ae9e63a2ae5bdbde0d' # Rayman's API key
|
dashscope.api_key = 'sk-83b8ed0ead0849ae9e63a2ae5bdbde0d' # Rayman's API key
|
||||||
|
|
||||||
|
|
||||||
def respond(prompt, chat_history, instruction, model, if_stream):
|
def respond_nonStream(prompt, chat_history, instruction, model):
|
||||||
"""
|
|
||||||
与AI助手进行对话,并返回对话历史。
|
|
||||||
|
|
||||||
参数:
|
|
||||||
- prompt: 用户输入的文本。
|
|
||||||
- chat_history: 之前的聊天历史,列表,每个元素是二元组,包括用户输入和AI响应。
|
|
||||||
- instruction: 系统指令,作为对话的起始信息。
|
|
||||||
|
|
||||||
返回值:
|
|
||||||
- 生成器,每次产生一个二元组,包括空字符串和更新后的聊天历史。
|
|
||||||
"""
|
|
||||||
# 构建对话消息结构
|
# 构建对话消息结构
|
||||||
|
messages = [{'role': 'system', 'content': instruction},
|
||||||
|
{'role': 'user', 'content': prompt}]
|
||||||
|
print(f"Messages: {messages}")
|
||||||
|
|
||||||
|
# 初始化空字符串以聚合响应
|
||||||
|
full_response = ""
|
||||||
|
|
||||||
|
# 调用AI模型生成响应
|
||||||
|
try:
|
||||||
|
response = Generation.call(model=model,
|
||||||
|
messages=messages,
|
||||||
|
seed=1234,
|
||||||
|
result_format='message',
|
||||||
|
stream=False,
|
||||||
|
incremental_output=False)
|
||||||
|
print(f"Response: {response}")
|
||||||
|
|
||||||
|
if not chat_history or chat_history[-1][0] != prompt:
|
||||||
|
chat_history.append([prompt, ""])
|
||||||
|
print(f"old chat history: {chat_history}")
|
||||||
|
if response.status_code == HTTPStatus.OK:
|
||||||
|
# 获取响应中的消息内容
|
||||||
|
message = response.output.choices[0]['message']['content']
|
||||||
|
print(f"Generated message: {message}")
|
||||||
|
|
||||||
|
# 更新聊天历史记录中的最后一条记录
|
||||||
|
chat_history[-1] = [prompt, message]
|
||||||
|
print(f"Updated chat_history: {chat_history}")
|
||||||
|
return "", chat_history
|
||||||
|
else:
|
||||||
|
print(f"Error: Received response status {response.status_code}")
|
||||||
|
return "Error: Could not generate response", chat_history
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Exception occurred: {e}")
|
||||||
|
return f"Exception occurred: {e}", chat_history
|
||||||
|
|
||||||
|
|
||||||
|
def respond(prompt, chat_history, instruction, model, if_stream='Stream'):
|
||||||
|
if if_stream == 'Stream':
|
||||||
messages = [{'role': 'system',
|
messages = [{'role': 'system',
|
||||||
'content': instruction},
|
'content': instruction},
|
||||||
{'role': 'user',
|
{'role': 'user',
|
||||||
'content': prompt}
|
'content': prompt}
|
||||||
]
|
]
|
||||||
full_response = "" # 初始化空字符串以聚合响应
|
full_response = "" # 初始化空字符串以聚合响应
|
||||||
# 调用AI模型生成响应
|
|
||||||
# -------非流式输出-------
|
|
||||||
if if_stream == 'Non-Stream':
|
|
||||||
response = Generation.call(model=model,
|
|
||||||
messages=messages,
|
|
||||||
# 设置随机数种子seed,如果没有设置,则随机数种子默认为1234
|
|
||||||
seed=1234,
|
|
||||||
# 将输出设置为"message"格式
|
|
||||||
result_format='message',
|
|
||||||
# 设置输出方式为非流式输出
|
|
||||||
stream=False,
|
|
||||||
# 设置输出方式为非增量式输出
|
|
||||||
incremental_output=False)
|
|
||||||
if not chat_history or chat_history[-1][0] != prompt:
|
|
||||||
chat_history.append((prompt, ""))
|
|
||||||
if response.status_code == HTTPStatus.OK:
|
|
||||||
# 获取响应中的消息内容
|
|
||||||
message = response.output.choices[0]['message']['content']
|
|
||||||
# 将消息内容添加到聊天历史中
|
|
||||||
chat_history.append((prompt, message))
|
|
||||||
# 返回更新后的聊天历史
|
|
||||||
return "", chat_history
|
|
||||||
elif if_stream == 'Stream':
|
|
||||||
# -------流式输出-------
|
# -------流式输出-------
|
||||||
responses = Generation.call(model=model,
|
responses = Generation.call(model=model,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
@ -88,31 +94,45 @@ def respond(prompt, chat_history, instruction, model, if_stream):
|
|||||||
yield "", chat_history
|
yield "", chat_history
|
||||||
break # 出现错误时终止循环
|
break # 出现错误时终止循环
|
||||||
|
|
||||||
|
elif if_stream == 'Non-Stream':
|
||||||
def respond_nonStream(prompt, chat_history, instruction, model):
|
|
||||||
# 构建对话消息结构
|
# 构建对话消息结构
|
||||||
messages = [{'role': 'system',
|
messages = [{'role': 'system', 'content': instruction},
|
||||||
'content': instruction},
|
{'role': 'user', 'content': prompt}]
|
||||||
{'role': 'user',
|
print(f"Messages: {messages}")
|
||||||
'content': prompt}
|
|
||||||
]
|
|
||||||
full_response = "" # 初始化空字符串以聚合响应
|
|
||||||
# 调用AI模型生成响应
|
# 调用AI模型生成响应
|
||||||
responses = Generation.call(model=model,
|
try:
|
||||||
|
response = Generation.call(model=model,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
# 设置随机数种子seed,如果没有设置,则随机数种子默认为1234
|
|
||||||
seed=1234,
|
seed=1234,
|
||||||
# 将输出设置为"message"格式
|
|
||||||
result_format='message',
|
result_format='message',
|
||||||
stream=True, # 设置输出方式为流式输出
|
stream=False,
|
||||||
incremental_output=True, # 增量式流式输出
|
incremental_output=False)
|
||||||
temperature=1.8,
|
print(f"Response: {response}")
|
||||||
top_p=0.9,
|
|
||||||
top_k=999)
|
if not chat_history or chat_history[-1][0] != prompt:
|
||||||
|
chat_history.append([prompt, ""])
|
||||||
|
print(f"old chat history: {chat_history}")
|
||||||
|
if response.status_code == HTTPStatus.OK:
|
||||||
|
# 获取响应中的消息内容
|
||||||
|
message = response.output.choices[0]['message']['content']
|
||||||
|
print(f"Generated message: {message}")
|
||||||
|
|
||||||
|
# 更新聊天历史记录中的最后一条记录
|
||||||
|
chat_history[-1] = [prompt, message]
|
||||||
|
print(f"Updated chat_history: {chat_history}")
|
||||||
|
return "", chat_history
|
||||||
|
else:
|
||||||
|
print(f"Error: Received response status {response.status_code}")
|
||||||
|
return "Error: Could not generate response", chat_history
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Exception occurred: {e}")
|
||||||
|
return f"Exception occurred: {e}", chat_history
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
llm_model_list = ['qwen-turbo','qwen-plus', 'qwen-max']
|
llm_model_list = ['qwen-turbo', 'qwen-plus', 'qwen-max']
|
||||||
init_llm = llm_model_list[0]
|
init_llm = llm_model_list[0]
|
||||||
|
|
||||||
# 创建 Gradio 界面
|
# 创建 Gradio 界面
|
||||||
@ -122,17 +142,20 @@ with gr.Blocks() as demo:
|
|||||||
# AI TestGenius
|
# AI TestGenius
|
||||||
A simple LLM app for generating test cases from function design.
|
A simple LLM app for generating test cases from function design.
|
||||||
""")
|
""")
|
||||||
chatbot = gr.Chatbot()
|
history = [["Hello", "Hello, how can I help you?"]]
|
||||||
|
chatbot = gr.Chatbot(history)
|
||||||
msg = gr.Textbox(label="Prompt")
|
msg = gr.Textbox(label="Prompt")
|
||||||
with gr.Accordion(label="Advanced options", open=False):
|
with gr.Accordion(label="Advanced options", open=False):
|
||||||
system = gr.Textbox(label="System prompts", lines=2,
|
system = gr.Textbox(label="System prompts", lines=2,
|
||||||
value="A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.")
|
value="A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.")
|
||||||
|
# 选择模型
|
||||||
llm = gr.Dropdown(
|
llm = gr.Dropdown(
|
||||||
llm_model_list,
|
llm_model_list,
|
||||||
label='Choose LLM Model',
|
label='Choose LLM Model',
|
||||||
value=init_llm,
|
value=init_llm,
|
||||||
interactive=True
|
interactive=True
|
||||||
)
|
)
|
||||||
|
# 选择是否流式输出
|
||||||
if_stream = gr.Dropdown(
|
if_stream = gr.Dropdown(
|
||||||
["Stream", "Non-Stream"],
|
["Stream", "Non-Stream"],
|
||||||
label='Choose Streaming',
|
label='Choose Streaming',
|
||||||
@ -145,7 +168,6 @@ with gr.Blocks() as demo:
|
|||||||
btn.click(respond, inputs=[msg, chatbot, system, llm, if_stream], outputs=[msg, chatbot]) # click to submit
|
btn.click(respond, inputs=[msg, chatbot, system, llm, if_stream], outputs=[msg, chatbot]) # click to submit
|
||||||
msg.submit(respond, inputs=[msg, chatbot, system, llm, if_stream], outputs=[msg, chatbot]) # Press enter to submit
|
msg.submit(respond, inputs=[msg, chatbot, system, llm, if_stream], outputs=[msg, chatbot]) # Press enter to submit
|
||||||
|
|
||||||
|
|
||||||
# 运行界面
|
# 运行界面
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
gr.close_all()
|
gr.close_all()
|
||||||
|
59
testGradio.py
Normal file
59
testGradio.py
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
import gradio as gr
|
||||||
|
import random
|
||||||
|
import time
|
||||||
|
from http import HTTPStatus
|
||||||
|
import dashscope
|
||||||
|
from dashscope import Generation
|
||||||
|
|
||||||
|
dashscope.api_key = 'sk-83b8ed0ead0849ae9e63a2ae5bdbde0d' # Rayman's API key
|
||||||
|
|
||||||
|
with gr.Blocks() as demo:
|
||||||
|
history = [["Hello","Hello, how can I help you?"]]
|
||||||
|
chatbot = gr.Chatbot(history)
|
||||||
|
msg = gr.Textbox()
|
||||||
|
clear = gr.ClearButton([msg, chatbot])
|
||||||
|
|
||||||
|
|
||||||
|
def respond_nonStream(prompt, chat_history):
|
||||||
|
# 构建对话消息结构
|
||||||
|
messages = [{'role': 'system', 'content': "A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers."},
|
||||||
|
{'role': 'user', 'content': prompt}]
|
||||||
|
print(f"Messages: {messages}")
|
||||||
|
|
||||||
|
# 初始化空字符串以聚合响应
|
||||||
|
full_response = ""
|
||||||
|
|
||||||
|
# 调用AI模型生成响应
|
||||||
|
try:
|
||||||
|
response = Generation.call(model='qwen-turbo',
|
||||||
|
messages=messages,
|
||||||
|
seed=1234,
|
||||||
|
result_format='message',
|
||||||
|
stream=False,
|
||||||
|
incremental_output=False)
|
||||||
|
print(f"Response: {response}")
|
||||||
|
|
||||||
|
if not chat_history or chat_history[-1][0] != prompt:
|
||||||
|
chat_history.append([prompt, ""])
|
||||||
|
print(f"old chat history: {chat_history}")
|
||||||
|
if response.status_code == HTTPStatus.OK:
|
||||||
|
# 获取响应中的消息内容
|
||||||
|
message = response.output.choices[0]['message']['content']
|
||||||
|
print(f"Generated message: {message}")
|
||||||
|
|
||||||
|
# 更新聊天历史记录中的最后一条记录
|
||||||
|
chat_history[-1] = [prompt, message]
|
||||||
|
print(f"Updated chat_history: {chat_history}")
|
||||||
|
return "", chat_history
|
||||||
|
else:
|
||||||
|
print(f"Error: Received response status {response.status_code}")
|
||||||
|
return "Error: Could not generate response", chat_history
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Exception occurred: {e}")
|
||||||
|
return f"Exception occurred: {e}", chat_history
|
||||||
|
|
||||||
|
msg.submit(respond_nonStream, inputs=[msg, chatbot], outputs=[msg, chatbot])
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
demo.launch()
|
Loading…
Reference in New Issue
Block a user