2024-05-07 17:27:11 +08:00
|
|
|
|
# ------------------------------------------
|
|
|
|
|
import os
|
2023-11-03 03:32:17 +08:00
|
|
|
|
import numpy as np
|
2024-05-07 17:27:11 +08:00
|
|
|
|
import altair as alt
|
2020-11-03 14:29:22 +08:00
|
|
|
|
import pandas as pd
|
|
|
|
|
import streamlit as st
|
2024-05-07 17:27:11 +08:00
|
|
|
|
import datetime
|
|
|
|
|
import time
|
|
|
|
|
import random
|
|
|
|
|
import dashscope
|
|
|
|
|
from dashscope import Generation
|
|
|
|
|
from streamlit_option_menu import option_menu
|
|
|
|
|
from http import HTTPStatus
|
2024-05-07 20:34:44 +08:00
|
|
|
|
from PIL import Image
|
|
|
|
|
|
2024-05-07 17:27:11 +08:00
|
|
|
|
|
|
|
|
|
dashscope.api_key = os.getenv("DASHSCOPE_API_KEY") # get api key from environment variable
|
|
|
|
|
|
|
|
|
|
st.set_page_config(layout="wide", page_title='TestGenius')
|
|
|
|
|
|
2024-05-07 20:34:44 +08:00
|
|
|
|
default_title = 'New Chat'
|
2024-05-07 17:27:11 +08:00
|
|
|
|
default_messages = [('user', 'Hello'),
|
|
|
|
|
('assistant', 'Hello, how can I help you?')
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
conversations = [{
|
|
|
|
|
'id': 1,
|
|
|
|
|
'title': 'Hello',
|
|
|
|
|
'messages': default_messages
|
|
|
|
|
}]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def chat(user, message):
|
|
|
|
|
with st.chat_message(user):
|
|
|
|
|
print(user, ':', message)
|
|
|
|
|
st.markdown(message)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if 'conversations' not in st.session_state:
|
|
|
|
|
st.session_state.conversations = conversations
|
|
|
|
|
conversations = st.session_state.conversations
|
|
|
|
|
|
|
|
|
|
# 当前选择的对话
|
|
|
|
|
if 'index' not in st.session_state:
|
|
|
|
|
st.session_state.index = 0
|
|
|
|
|
|
|
|
|
|
AVAILABLE_MODELS = [
|
|
|
|
|
"qwen-max",
|
|
|
|
|
"qwen-turbo",
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
with st.sidebar:
|
2024-05-07 20:34:44 +08:00
|
|
|
|
st.image('logo.png')
|
2024-05-07 17:27:11 +08:00
|
|
|
|
st.subheader('', divider='rainbow')
|
|
|
|
|
st.write('')
|
2024-05-07 20:34:44 +08:00
|
|
|
|
llm = st.selectbox('Choose your Model', AVAILABLE_MODELS, index=0)
|
2024-05-07 17:27:11 +08:00
|
|
|
|
|
|
|
|
|
# if st.button('New Chat'):
|
|
|
|
|
# conversations.append({'title': default_title, 'messages': []})
|
|
|
|
|
# st.session_state.index = len(conversations) - 1
|
|
|
|
|
|
|
|
|
|
titles = []
|
|
|
|
|
for idx, conversation in enumerate(conversations):
|
|
|
|
|
titles.append(conversation['title'])
|
|
|
|
|
|
|
|
|
|
option = option_menu(
|
|
|
|
|
'Conversations',
|
|
|
|
|
titles,
|
|
|
|
|
default_index=st.session_state.index
|
|
|
|
|
)
|
2024-05-07 20:34:44 +08:00
|
|
|
|
uploaded_file = st.file_uploader("Choose a image file", type="jpg")
|
|
|
|
|
|
|
|
|
|
if st.button("Clear Chat History"):
|
|
|
|
|
st.session_state.messages.clear()
|
|
|
|
|
|
|
|
|
|
if uploaded_file:
|
|
|
|
|
image_uploaded = Image.open(uploaded_file)
|
|
|
|
|
image_path = image_uploaded.filename
|
2024-05-07 17:27:11 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def respond(prompt):
|
|
|
|
|
messages = [
|
|
|
|
|
{'role': 'user', 'content': prompt}]
|
2024-05-07 20:34:44 +08:00
|
|
|
|
responses = Generation.call(model="qwen-turbo",
|
2024-05-07 17:27:11 +08:00
|
|
|
|
messages=messages,
|
|
|
|
|
result_format='message', # 设置输出为'message'格式
|
|
|
|
|
stream=True, # 设置输出方式为流式输出
|
|
|
|
|
incremental_output=True # 增量式流式输出
|
|
|
|
|
)
|
|
|
|
|
for response in responses:
|
|
|
|
|
if response.status_code == HTTPStatus.OK:
|
|
|
|
|
yield response.output.choices[0]['message']['content'] + " "
|
|
|
|
|
else:
|
|
|
|
|
yield 'Request id: %s, Status code: %s, error code: %s, error message: %s' % (
|
|
|
|
|
response.request_id, response.status_code,
|
|
|
|
|
response.code, response.message
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def respond_nonStream(prompt, instruction):
|
2024-05-07 20:34:44 +08:00
|
|
|
|
messages = [
|
|
|
|
|
{'role': 'system', 'content': instruction},
|
|
|
|
|
{'role': 'user', 'content': prompt}]
|
|
|
|
|
response = Generation.call(model="qwen-turbo",
|
2024-05-07 17:27:11 +08:00
|
|
|
|
messages=messages,
|
|
|
|
|
result_format='message', # 设置输出为'message'格式
|
2024-05-07 21:26:36 +08:00
|
|
|
|
temperature=1,
|
|
|
|
|
top_p=0.8,
|
|
|
|
|
top_k=50)
|
2024-05-07 17:27:11 +08:00
|
|
|
|
if response.status_code == HTTPStatus.OK:
|
|
|
|
|
return response.output.choices[0]['message']['content']
|
|
|
|
|
else:
|
|
|
|
|
return 'Request id: %s, Status code: %s, error code: %s, error message: %s' % (
|
|
|
|
|
response.request_id, response.status_code,
|
|
|
|
|
response.code, response.message
|
|
|
|
|
)
|
|
|
|
|
|
2020-11-03 14:29:22 +08:00
|
|
|
|
|
2024-05-07 20:34:44 +08:00
|
|
|
|
def respond_image(prompt, image):
|
|
|
|
|
messages = [
|
|
|
|
|
{'role': 'user',
|
|
|
|
|
"content": [
|
|
|
|
|
{"image": image},
|
|
|
|
|
{"text": prompt}
|
|
|
|
|
]
|
|
|
|
|
}
|
|
|
|
|
]
|
|
|
|
|
response = dashscope.MultiModalConversation.call(model="qwen-vl-max",
|
|
|
|
|
messages=messages,
|
|
|
|
|
result_format='message', # 设置输出为'message'格式
|
|
|
|
|
)
|
|
|
|
|
if response.status_code == HTTPStatus.OK:
|
|
|
|
|
return response.output.choices[0]['message']['content']
|
|
|
|
|
else:
|
|
|
|
|
return 'Request id: %s, Status code: %s, error code: %s, error message: %s' % (
|
|
|
|
|
response.request_id, response.status_code,
|
|
|
|
|
response.code, response.message
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
prompt = st.chat_input("Enter your Questions")
|
2024-05-07 17:27:11 +08:00
|
|
|
|
st.session_state.messages = conversations[st.session_state.index]['messages']
|
|
|
|
|
if prompt:
|
|
|
|
|
if conversations[st.session_state.index]['title'] == default_title:
|
|
|
|
|
conversations[st.session_state.index]['title'] = prompt[:12]
|
|
|
|
|
for user, message in st.session_state.messages:
|
|
|
|
|
chat(user, message)
|
|
|
|
|
chat('user', prompt)
|
2024-05-07 21:23:06 +08:00
|
|
|
|
instruction = """
|
|
|
|
|
# 角色定义
|
2024-05-07 21:33:21 +08:00
|
|
|
|
您是一位多语言测试工程师AI,你能够首先判断用户输入的语言类型并告知用户,然后根据用户提供的产品功能描述,不仅生成详细准确的测试用例,
|
2024-05-07 21:23:06 +08:00
|
|
|
|
而且确保测试用例的编写语言与用户输入内容的语言保持一致。无论用户用何种语言(支持的范围内)描述产品功能,您都能以相同语言输出测试用例。
|
2024-05-07 20:34:44 +08:00
|
|
|
|
|
|
|
|
|
# 任务需求
|
2024-05-07 21:23:06 +08:00
|
|
|
|
- 细致分析用户提交的一个或多个产品功能需求。
|
|
|
|
|
- 根据需求文本或通过解读图片中的描述,生成语言一致的测试用例。
|
2024-05-07 20:34:44 +08:00
|
|
|
|
|
|
|
|
|
# 输入处理
|
2024-05-07 21:23:06 +08:00
|
|
|
|
- 用户输入可为不同语言的文字描述、图片,或二者的结合。
|
|
|
|
|
- 对于多个需求,识别并以输入的语言和顺序生成相应的测试用例。
|
2024-05-07 20:34:44 +08:00
|
|
|
|
|
|
|
|
|
# 格式和规范
|
2024-05-07 21:23:06 +08:00
|
|
|
|
- 测试用例应以Markdown格式的表格显示,包含`用例编号`、`测试步骤`及`预期结果`三栏。
|
|
|
|
|
- 与用户输入的语言保持一致,确保不发生语言混淆。
|
|
|
|
|
- 若需求以图片呈现,须解释图像内容,并以相同语言形式处理文字需求。
|
2024-05-07 20:34:44 +08:00
|
|
|
|
|
|
|
|
|
# 输出规则
|
2024-05-07 21:23:06 +08:00
|
|
|
|
- 针对每项产品功能需求输出一个独立的Markdown表格式测试用例。
|
|
|
|
|
- 若存在多个需求,依据需求的提交顺序生成并编号每个测试用例,保证输出的序列性和准确性。
|
|
|
|
|
- 对无关产品功能的输入,以专业和恰当的语言回复:“作为语言适应型测试工程师AI,我专注于生成测试用例,请您提供具体的产品功能需求,以便我以相应语言回答。”
|
|
|
|
|
|
2024-05-07 21:33:21 +08:00
|
|
|
|
# 示例1
|
|
|
|
|
示例
|
|
|
|
|
user:“当用户点击登录按钮时,如果已正确输入用户名和密码,系统应跳转到首页。”
|
|
|
|
|
assistant:
|
|
|
|
|
您输入了中文的功能描述。
|
|
|
|
|
| 用例编号 | 测试步骤 | 预期结果 |
|
|
|
|
|
| -------- | ------------------------------------------------------ | ---------------- |
|
|
|
|
|
| 1 | 1. 输入正确的用户名和密码<br>2. 点击登录按钮 | 系统跳转到首页 |
|
2024-05-07 21:23:06 +08:00
|
|
|
|
|
|
|
|
|
# 示例2
|
|
|
|
|
user: Clicking the registration button submits the data.
|
|
|
|
|
assistant:
|
2024-05-07 21:33:21 +08:00
|
|
|
|
You entered requirement in ENGLISH.
|
|
|
|
|
Test cases:
|
2024-05-07 21:23:06 +08:00
|
|
|
|
| Case Number | Test Steps | Expected Results |
|
|
|
|
|
| -------- | ------------------------------------------------------ | ---------------- |
|
|
|
|
|
| 1 | 1. click the registration button | data submitted |
|
2024-05-07 20:34:44 +08:00
|
|
|
|
"""
|
|
|
|
|
if uploaded_file:
|
|
|
|
|
answer = respond_image(prompt, image_path)
|
|
|
|
|
else:
|
|
|
|
|
answer = respond_nonStream(prompt, instruction)
|
2024-05-07 17:27:11 +08:00
|
|
|
|
st.session_state.messages.append(('user', prompt))
|
|
|
|
|
st.session_state.messages.append(('assistant', answer))
|
|
|
|
|
chat('assistant', answer)
|
|
|
|
|
else:
|
|
|
|
|
for user, message in st.session_state.messages:
|
|
|
|
|
chat(user, message)
|