Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,16 +1,30 @@
|
|
1 |
import gradio as gr
|
2 |
from openai import OpenAI
|
3 |
import os
|
4 |
-
from typing import List, Tuple
|
5 |
import time
|
|
|
6 |
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
client = OpenAI(
|
9 |
base_url="https://api-inference.huggingface.co/v1/",
|
10 |
-
api_key=
|
11 |
)
|
12 |
|
|
|
13 |
MODEL_ID = "Qwen/Qwen2.5-Coder-32B-Instruct"
|
|
|
|
|
|
|
|
|
14 |
|
15 |
DEFAULT_SYSTEM_PROMPT = """
|
16 |
You are an expert software testing agent specializing in designing comprehensive test strategies and writing high-quality automated test scripts. Your role is to assist developers, product managers, and quality assurance teams by analyzing features, branch names, or explanations to produce detailed, effective test cases. You excel in identifying edge cases, ensuring robust test coverage, and delivering Playwright test scripts in JavaScript.
|
@@ -70,8 +84,7 @@ A concise summary of the feature or inferred functionality based on the user inp
|
|
70 |
Manual Test Cases:
|
71 |
|
72 |
vbnet
|
73 |
-
|
74 |
-
Edit
|
75 |
TC001: Verify successful login with valid credentials
|
76 |
Precondition(s): The user must have a valid account.
|
77 |
Steps:
|
@@ -81,9 +94,10 @@ Steps:
|
|
81 |
Expected Result: The user is redirected to the dashboard.
|
82 |
Automated Playwright Test Case (JavaScript):
|
83 |
|
|
|
|
|
84 |
javascript
|
85 |
-
|
86 |
-
Edit
|
87 |
const { test, expect } = require('@playwright/test');
|
88 |
|
89 |
test.describe('Login Feature Tests', () => {
|
@@ -118,100 +132,145 @@ test.describe('Login Feature Tests', () => {
|
|
118 |
await expect(errorMessage).toHaveText('Invalid username or password.');
|
119 |
});
|
120 |
});
|
|
|
|
|
121 |
With this structure, you’ll provide detailed, high-quality test plans that are both actionable and easy to implement. Let me know if you'd like additional examples or refinements!
|
122 |
|
123 |
"""
|
124 |
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
130 |
|
131 |
def format_response(text: str) -> str:
|
132 |
-
"""
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
|
|
138 |
|
139 |
-
def
|
140 |
-
|
141 |
chat_history: List[Tuple[str, str]],
|
142 |
-
system_prompt: str
|
143 |
-
|
144 |
-
|
145 |
-
):
|
146 |
-
"""Generate a response using the OpenAI-compatible Hugging Face API."""
|
147 |
-
# Create conversation history
|
148 |
messages = [{"role": "system", "content": system_prompt}]
|
|
|
149 |
for user_msg, bot_msg in chat_history:
|
150 |
messages.extend([
|
151 |
{"role": "user", "content": user_msg},
|
152 |
{"role": "assistant", "content": bot_msg}
|
153 |
])
|
154 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
155 |
|
156 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
157 |
new_history = chat_history + [(message, "")]
|
158 |
partial_message = ""
|
159 |
-
|
160 |
try:
|
161 |
-
|
|
|
162 |
stream = client.chat.completions.create(
|
163 |
model=MODEL_ID,
|
164 |
messages=messages,
|
165 |
temperature=temperature,
|
166 |
-
max_tokens=max_tokens,
|
167 |
stream=True
|
168 |
)
|
169 |
|
170 |
-
# Process the stream
|
171 |
for chunk in stream:
|
172 |
-
if chunk.choices[0].delta.content
|
173 |
partial_message += chunk.choices[0].delta.content
|
174 |
-
|
175 |
-
new_history[-1] = (message,
|
176 |
yield new_history
|
177 |
-
time.sleep(
|
178 |
|
179 |
-
# Final update without cursor
|
180 |
new_history[-1] = (message, format_response(partial_message))
|
181 |
-
|
182 |
-
|
183 |
except Exception as e:
|
184 |
-
|
185 |
-
new_history[-1] = (message,
|
186 |
-
yield new_history
|
187 |
-
|
188 |
-
# Create Gradio interface
|
189 |
-
with gr.Blocks(css=CSS, theme=gr.themes.Soft()) as demo:
|
190 |
-
gr.Markdown("""
|
191 |
-
<h1 align="center">🧠 AI Reasoning Assistant</h1>
|
192 |
-
<p align="center">Ask me Hard questions</p>
|
193 |
-
""")
|
194 |
|
195 |
-
|
196 |
-
msg = gr.Textbox(label="Your Question", placeholder="Type your question...")
|
197 |
-
|
198 |
-
with gr.Accordion("⚙️ Settings", open=False):
|
199 |
-
system_prompt = gr.TextArea(value=DEFAULT_SYSTEM_PROMPT, label="System Instructions")
|
200 |
-
temperature = gr.Slider(0, 1, value=0.5, label="Creativity")
|
201 |
-
max_tokens = gr.Slider(128, 4096, value=2048, label="Max Response Length")
|
202 |
|
203 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
204 |
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
212 |
|
213 |
if __name__ == "__main__":
|
214 |
-
if not
|
215 |
-
|
216 |
-
|
217 |
-
|
|
|
|
1 |
import gradio as gr
|
2 |
from openai import OpenAI
|
3 |
import os
|
|
|
4 |
import time
|
5 |
+
from typing import List, Tuple, Generator
|
6 |
|
7 |
+
|
8 |
+
import os
|
9 |
+
from openai import OpenAI
|
10 |
+
|
11 |
+
# Load API key from environment
|
12 |
+
HF_API_TOKEN = os.getenv("HF_TOKEN")
|
13 |
+
if not HF_API_TOKEN:
|
14 |
+
raise ValueError("Missing HF_API_TOKEN environment variable")
|
15 |
+
|
16 |
+
# Initialize client with environment-based config
|
17 |
client = OpenAI(
|
18 |
base_url="https://api-inference.huggingface.co/v1/",
|
19 |
+
api_key=HF_API_TOKEN # Uses env variable
|
20 |
)
|
21 |
|
22 |
+
# Constants
|
23 |
MODEL_ID = "Qwen/Qwen2.5-Coder-32B-Instruct"
|
24 |
+
HF_API_TOKEN = os.getenv("HF_TOKEN")
|
25 |
+
DEFAULT_TEMPERATURE = 0.2
|
26 |
+
MAX_TOKENS_LIMIT = 8192
|
27 |
+
STREAM_DELAY = 0.015
|
28 |
|
29 |
DEFAULT_SYSTEM_PROMPT = """
|
30 |
You are an expert software testing agent specializing in designing comprehensive test strategies and writing high-quality automated test scripts. Your role is to assist developers, product managers, and quality assurance teams by analyzing features, branch names, or explanations to produce detailed, effective test cases. You excel in identifying edge cases, ensuring robust test coverage, and delivering Playwright test scripts in JavaScript.
|
|
|
84 |
Manual Test Cases:
|
85 |
|
86 |
vbnet
|
87 |
+
```
|
|
|
88 |
TC001: Verify successful login with valid credentials
|
89 |
Precondition(s): The user must have a valid account.
|
90 |
Steps:
|
|
|
94 |
Expected Result: The user is redirected to the dashboard.
|
95 |
Automated Playwright Test Case (JavaScript):
|
96 |
|
97 |
+
```
|
98 |
+
|
99 |
javascript
|
100 |
+
```
|
|
|
101 |
const { test, expect } = require('@playwright/test');
|
102 |
|
103 |
test.describe('Login Feature Tests', () => {
|
|
|
132 |
await expect(errorMessage).toHaveText('Invalid username or password.');
|
133 |
});
|
134 |
});
|
135 |
+
|
136 |
+
```
|
137 |
With this structure, you’ll provide detailed, high-quality test plans that are both actionable and easy to implement. Let me know if you'd like additional examples or refinements!
|
138 |
|
139 |
"""
|
140 |
|
141 |
+
# HTML formatting tags to emphasize
|
142 |
+
FORMATTING_TAGS = [
|
143 |
+
"[Understand]", "[Plan]", "[Conclude]",
|
144 |
+
"[Reason]", "[Verify]", "[Capabilities]",
|
145 |
+
"[Response Guidelines]"
|
146 |
+
]
|
147 |
+
|
148 |
+
def initialize_client() -> OpenAI:
|
149 |
+
"""Initialize and return the OpenAI client with Hugging Face configuration."""
|
150 |
+
return OpenAI(
|
151 |
+
base_url="https://api-inference.huggingface.co/v1/",
|
152 |
+
api_key=HF_API_TOKEN
|
153 |
+
)
|
154 |
|
155 |
def format_response(text: str) -> str:
|
156 |
+
"""Apply HTML formatting to special tags in the response text."""
|
157 |
+
for tag in FORMATTING_TAGS:
|
158 |
+
text = text.replace(
|
159 |
+
tag,
|
160 |
+
f'<strong class="special-tag">{tag}</strong>'
|
161 |
+
)
|
162 |
+
return text
|
163 |
|
164 |
+
def construct_messages(
|
165 |
+
user_input: str,
|
166 |
chat_history: List[Tuple[str, str]],
|
167 |
+
system_prompt: str
|
168 |
+
) -> List[dict]:
|
169 |
+
"""Construct the message history for the API request."""
|
|
|
|
|
|
|
170 |
messages = [{"role": "system", "content": system_prompt}]
|
171 |
+
|
172 |
for user_msg, bot_msg in chat_history:
|
173 |
messages.extend([
|
174 |
{"role": "user", "content": user_msg},
|
175 |
{"role": "assistant", "content": bot_msg}
|
176 |
])
|
177 |
+
|
178 |
+
messages.append({"role": "user", "content": user_input})
|
179 |
+
return messages
|
180 |
+
|
181 |
+
def handle_api_error(e: Exception) -> str:
|
182 |
+
"""Generate user-friendly error messages for different error types."""
|
183 |
+
error_type = type(e).__name__
|
184 |
+
if "Authentication" in str(e):
|
185 |
+
return "🔒 Authentication Error: Check your API token"
|
186 |
+
elif "Timeout" in str(e):
|
187 |
+
return "⏳ Request Timeout: Try again later"
|
188 |
+
return f"⚠️ Error ({error_type}): {str(e)}"
|
189 |
|
190 |
+
def generate_response(
|
191 |
+
message: str,
|
192 |
+
chat_history: List[Tuple[str, str]],
|
193 |
+
system_prompt: str,
|
194 |
+
temperature: float,
|
195 |
+
max_tokens: int
|
196 |
+
) -> Generator[List[Tuple[str, str]], None, None]:
|
197 |
+
"""Generate streaming response using Hugging Face inference API."""
|
198 |
+
client = initialize_client()
|
199 |
new_history = chat_history + [(message, "")]
|
200 |
partial_message = ""
|
201 |
+
|
202 |
try:
|
203 |
+
messages = construct_messages(message, chat_history, system_prompt)
|
204 |
+
|
205 |
stream = client.chat.completions.create(
|
206 |
model=MODEL_ID,
|
207 |
messages=messages,
|
208 |
temperature=temperature,
|
209 |
+
max_tokens=min(max_tokens, MAX_TOKENS_LIMIT),
|
210 |
stream=True
|
211 |
)
|
212 |
|
|
|
213 |
for chunk in stream:
|
214 |
+
if chunk.choices[0].delta.content:
|
215 |
partial_message += chunk.choices[0].delta.content
|
216 |
+
formatted_response = format_response(partial_message + "▌")
|
217 |
+
new_history[-1] = (message, formatted_response)
|
218 |
yield new_history
|
219 |
+
time.sleep(STREAM_DELAY)
|
220 |
|
|
|
221 |
new_history[-1] = (message, format_response(partial_message))
|
222 |
+
|
|
|
223 |
except Exception as e:
|
224 |
+
error_msg = handle_api_error(e)
|
225 |
+
new_history[-1] = (message, error_msg)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
226 |
|
227 |
+
yield new_history
|
|
|
|
|
|
|
|
|
|
|
|
|
228 |
|
229 |
+
def create_interface() -> gr.Blocks:
|
230 |
+
"""Create and configure the Gradio interface."""
|
231 |
+
css = """
|
232 |
+
.gr-chatbot { min-height: 500px; border-radius: 15px; }
|
233 |
+
.special-tag { color: #2ecc71; font-weight: 600; }
|
234 |
+
footer { visibility: hidden; }
|
235 |
+
"""
|
236 |
|
237 |
+
with gr.Blocks(css=css, theme=gr.themes.Soft()) as interface:
|
238 |
+
gr.Markdown("""
|
239 |
+
# 🧠 AI Test Engineering Assistant
|
240 |
+
## Specialized in Automated Testing Strategies
|
241 |
+
""")
|
242 |
+
|
243 |
+
chatbot = gr.Chatbot(label="Testing Discussion", elem_classes="gr-chatbot")
|
244 |
+
user_input = gr.Textbox(label="Feature Description", placeholder="Describe feature or paste branch name...")
|
245 |
+
|
246 |
+
with gr.Accordion("Engine Parameters", open=False):
|
247 |
+
system_prompt = gr.TextArea(value=DEFAULT_SYSTEM_PROMPT, label="System Instructions")
|
248 |
+
temperature = gr.Slider(0, 1, value=DEFAULT_TEMPERATURE, label="Creativity Level")
|
249 |
+
max_tokens = gr.Slider(128, MAX_TOKENS_LIMIT, value=2048, label="Response Length")
|
250 |
+
|
251 |
+
controls = gr.Row():
|
252 |
+
clear_btn = gr.Button("🧹 Clear History")
|
253 |
+
submit_btn = gr.Button("🚀 Generate Tests")
|
254 |
+
|
255 |
+
user_input.submit(
|
256 |
+
generate_response,
|
257 |
+
[user_input, chatbot, system_prompt, temperature, max_tokens],
|
258 |
+
[chatbot]
|
259 |
+
)
|
260 |
+
|
261 |
+
submit_btn.click(
|
262 |
+
generate_response,
|
263 |
+
[user_input, chatbot, system_prompt, temperature, max_tokens],
|
264 |
+
[chatbot]
|
265 |
+
)
|
266 |
+
|
267 |
+
clear_btn.click(lambda: [], None, chatbot)
|
268 |
+
|
269 |
+
return interface
|
270 |
|
271 |
if __name__ == "__main__":
|
272 |
+
if not HF_API_TOKEN:
|
273 |
+
raise ValueError("HF_API_TOKEN environment variable not set")
|
274 |
+
|
275 |
+
interface = create_interface()
|
276 |
+
interface.launch(server_port=7860, show_error=True)
|