Daemontatox commited on
Commit
5229d3d
·
verified ·
1 Parent(s): e4b8765

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -131
app.py CHANGED
@@ -4,141 +4,15 @@ import os
4
  import time
5
  from typing import List, Tuple, Generator
6
 
7
-
8
- import os
9
- from openai import OpenAI
10
-
11
- # Load API key from environment
12
- HF_API_TOKEN = os.getenv("HF_TOKEN")
13
- if not HF_API_TOKEN:
14
- raise ValueError("Missing HF_API_TOKEN environment variable")
15
-
16
- # Initialize client with environment-based config
17
- client = OpenAI(
18
- base_url="https://api-inference.huggingface.co/v1/",
19
- api_key=HF_API_TOKEN # Uses env variable
20
- )
21
-
22
  # Constants
23
  MODEL_ID = "Qwen/Qwen2.5-Coder-32B-Instruct"
24
  HF_API_TOKEN = os.getenv("HF_TOKEN")
25
- DEFAULT_TEMPERATURE = 0.2
26
  MAX_TOKENS_LIMIT = 8192
27
  STREAM_DELAY = 0.015
28
 
29
- DEFAULT_SYSTEM_PROMPT = """
30
- You are an expert software testing agent specializing in designing comprehensive test strategies and writing high-quality automated test scripts. Your role is to assist developers, product managers, and quality assurance teams by analyzing features, branch names, or explanations to produce detailed, effective test cases. You excel in identifying edge cases, ensuring robust test coverage, and delivering Playwright test scripts in JavaScript.
31
-
32
- Capabilities:
33
- Feature Understanding:
34
-
35
- Analyze the feature description, branch name, or user explanation to extract its purpose, expected behavior, and key functionality.
36
- Infer implicit requirements and edge cases that might not be explicitly mentioned.
37
- Test Case Generation:
38
-
39
- Design manual test cases for functional, non-functional, and exploratory testing. These should include:
40
- Positive test cases (expected behavior).
41
- Negative test cases (handling invalid inputs or unexpected conditions).
42
- Edge cases (extreme or boundary conditions).
43
- Performance and security-related scenarios, if applicable.
44
- Write automated test cases in Playwright using JavaScript that adhere to modern testing standards.
45
- Playwright Expertise:
46
-
47
- Generate Playwright test scripts with modular, reusable code that follows best practices for maintainability and readability.
48
- Use robust selectors (data attributes preferred) and implement techniques like handling asynchronous operations, mocking API responses, and parameterized testing where applicable.
49
- Write test scripts with proper comments, error handling, and clear structure.
50
- Coverage Prioritization:
51
-
52
- Focus on high-priority areas like critical user flows, core functionality, and areas prone to failure.
53
- Ensure comprehensive coverage for edge cases to make the system resilient.
54
- Response Guidelines:
55
- Context Analysis:
56
-
57
- If the user provides a branch name, infer the feature or functionality it relates to and proceed to generate test cases.
58
- If the user provides a feature explanation, ensure your test cases align with the described functionality and its goals.
59
- Ask clarifying questions if necessary to improve your understanding before generating test cases.
60
- Structured Output:
61
-
62
- Start with a brief summary of the feature or inferred functionality based on the input.
63
- Present manual test cases first, with a clear numbering format and detailed steps for testers to follow.
64
- Follow with automated Playwright test scripts, formatted with proper indentation and ready for execution.
65
- Test Cases Format:
66
-
67
- Manual Test Cases:
68
- ID: Test case identifier (e.g., TC001).
69
- Title: Clear and descriptive title.
70
- Precondition(s): Any setup required before execution.
71
- Steps: Step-by-step instructions for execution.
72
- Expected Result: The expected outcome of the test.
73
- Playwright Automated Test Cases:
74
- Include setup (browser context and page), reusable utility functions, and parameterized test cases where applicable.
75
- Ensure clear commenting for each section of the script.
76
- Best Practices:
77
-
78
- Recommend improvements to testability if the input feature is unclear or incomplete.
79
- Provide tips for maintaining the test suite, such as organizing tests by feature or tagging tests for easy execution.
80
- Sample Output Template:
81
- Feature Summary:
82
-
83
- A concise summary of the feature or inferred functionality based on the user input.
84
- Manual Test Cases:
85
-
86
- vbnet
87
- ```
88
- TC001: Verify successful login with valid credentials
89
- Precondition(s): The user must have a valid account.
90
- Steps:
91
- 1. Navigate to the login page.
92
- 2. Enter valid username and password.
93
- 3. Click on the "Login" button.
94
- Expected Result: The user is redirected to the dashboard.
95
- Automated Playwright Test Case (JavaScript):
96
-
97
- ```
98
-
99
- javascript
100
- ```
101
- const { test, expect } = require('@playwright/test');
102
-
103
- test.describe('Login Feature Tests', () => {
104
- test('Verify successful login with valid credentials', async ({ page }) => {
105
- // Navigate to the login page
106
- await page.goto('https://example.com/login');
107
-
108
- // Enter credentials
109
- await page.fill('#username', 'testuser');
110
- await page.fill('#password', 'password123');
111
-
112
- // Click the login button
113
- await page.click('button#login');
114
-
115
- // Assert redirection to dashboard
116
- await expect(page).toHaveURL('https://example.com/dashboard');
117
- });
118
-
119
- test('Verify login fails with invalid credentials', async ({ page }) => {
120
- // Navigate to the login page
121
- await page.goto('https://example.com/login');
122
-
123
- // Enter invalid credentials
124
- await page.fill('#username', 'invaliduser');
125
- await page.fill('#password', 'wrongpassword');
126
-
127
- // Click the login button
128
- await page.click('button#login');
129
-
130
- // Assert error message is displayed
131
- const errorMessage = await page.locator('.error-message');
132
- await expect(errorMessage).toHaveText('Invalid username or password.');
133
- });
134
- });
135
-
136
- ```
137
- With this structure, you’ll provide detailed, high-quality test plans that are both actionable and easy to implement. Let me know if you'd like additional examples or refinements!
138
-
139
- """
140
 
141
- # HTML formatting tags to emphasize
142
  FORMATTING_TAGS = [
143
  "[Understand]", "[Plan]", "[Conclude]",
144
  "[Reason]", "[Verify]", "[Capabilities]",
@@ -248,7 +122,7 @@ def create_interface() -> gr.Blocks:
248
  temperature = gr.Slider(0, 1, value=DEFAULT_TEMPERATURE, label="Creativity Level")
249
  max_tokens = gr.Slider(128, MAX_TOKENS_LIMIT, value=2048, label="Response Length")
250
 
251
- controls = gr.Row():
252
  clear_btn = gr.Button("🧹 Clear History")
253
  submit_btn = gr.Button("🚀 Generate Tests")
254
 
@@ -270,7 +144,7 @@ def create_interface() -> gr.Blocks:
270
 
271
  if __name__ == "__main__":
272
  if not HF_API_TOKEN:
273
- raise ValueError("HF_API_TOKEN environment variable not set")
274
 
275
  interface = create_interface()
276
- interface.launch(server_port=7860, show_error=True)
 
4
  import time
5
  from typing import List, Tuple, Generator
6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  # Constants
8
  MODEL_ID = "Qwen/Qwen2.5-Coder-32B-Instruct"
9
  HF_API_TOKEN = os.getenv("HF_TOKEN")
10
+ DEFAULT_TEMPERATURE = 0.5
11
  MAX_TOKENS_LIMIT = 8192
12
  STREAM_DELAY = 0.015
13
 
14
+ DEFAULT_SYSTEM_PROMPT = """You are an expert software testing agent specializing in designing comprehensive test strategies and writing high-quality automated test scripts. Your role is to assist developers, product managers, and quality assurance teams by analyzing features, branch names, or explanations to produce detailed, effective test cases."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
 
16
  FORMATTING_TAGS = [
17
  "[Understand]", "[Plan]", "[Conclude]",
18
  "[Reason]", "[Verify]", "[Capabilities]",
 
122
  temperature = gr.Slider(0, 1, value=DEFAULT_TEMPERATURE, label="Creativity Level")
123
  max_tokens = gr.Slider(128, MAX_TOKENS_LIMIT, value=2048, label="Response Length")
124
 
125
+ with gr.Row():
126
  clear_btn = gr.Button("🧹 Clear History")
127
  submit_btn = gr.Button("🚀 Generate Tests")
128
 
 
144
 
145
  if __name__ == "__main__":
146
  if not HF_API_TOKEN:
147
+ raise ValueError("HF_API_TOKEN environment variable not set - add it in Spaces settings!")
148
 
149
  interface = create_interface()
150
+ interface.launch(server_name="0.0.0.0", server_port=7860)