Upload backend.py
Browse files- backend.py +122 -0
backend.py
ADDED
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import csv
|
3 |
+
import io
|
4 |
+
import requests
|
5 |
+
import html # For escaping HTML characters
|
6 |
+
from bs4 import BeautifulSoup
|
7 |
+
from openai import OpenAI
|
8 |
+
|
9 |
+
# Initialize OpenAI API with Nvidia's Mistral model
|
10 |
+
client = OpenAI(
|
11 |
+
base_url="https://integrate.api.nvidia.com/v1",
|
12 |
+
api_key="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"
|
13 |
+
)
|
14 |
+
|
15 |
+
def clean_test_case_output(text):
|
16 |
+
"""
|
17 |
+
Cleans the output to handle HTML characters and unwanted tags.
|
18 |
+
"""
|
19 |
+
# Unescape HTML entities (convert < back to < and > back to >)
|
20 |
+
text = html.unescape(text)
|
21 |
+
|
22 |
+
# Use BeautifulSoup to handle HTML tags more comprehensively
|
23 |
+
soup = BeautifulSoup(text, 'html.parser')
|
24 |
+
|
25 |
+
# Convert <br> tags to newlines and remove all other tags
|
26 |
+
cleaned_text = soup.get_text(separator="\n").strip()
|
27 |
+
|
28 |
+
return cleaned_text
|
29 |
+
|
30 |
+
def generate_testcases(user_story):
|
31 |
+
"""
|
32 |
+
Generates advanced QA test cases based on a provided user story by interacting
|
33 |
+
with Nvidia's Mistral model API. The prompt is refined for clarity,
|
34 |
+
and the output is processed for better quality.
|
35 |
+
|
36 |
+
:param user_story: A string representing the user story for which to generate test cases.
|
37 |
+
:return: A list of test cases in the form of dictionaries.
|
38 |
+
"""
|
39 |
+
try:
|
40 |
+
# Example few-shot learning prompt to guide the model
|
41 |
+
completion = client.chat.completions.create(
|
42 |
+
model="nv-mistralai/mistral-nemo-12b-instruct", # Using Mistral model
|
43 |
+
messages=[
|
44 |
+
{"role": "user", "content": f"Generate QA test cases for the following user story: {user_story}"}
|
45 |
+
],
|
46 |
+
temperature=0.06, # Further lowering temperature for precise and deterministic output
|
47 |
+
top_p=0.5, # Prioritize high-probability tokens even more
|
48 |
+
max_tokens=2048, # Increase max tokens to allow longer content
|
49 |
+
stream=True # Streaming the response for faster retrieval
|
50 |
+
)
|
51 |
+
|
52 |
+
# Initialize an empty string to accumulate the response
|
53 |
+
test_cases_text = ""
|
54 |
+
|
55 |
+
# Accumulate the response from the streaming chunks
|
56 |
+
for chunk in completion:
|
57 |
+
if chunk.choices[0].delta.content is not None:
|
58 |
+
test_cases_text += chunk.choices[0].delta.content
|
59 |
+
|
60 |
+
|
61 |
+
# Ensure the entire response is captured before cleaning
|
62 |
+
if test_cases_text.strip() == "":
|
63 |
+
return [{"test_case": "No test cases generated or output was empty."}]
|
64 |
+
|
65 |
+
# Clean the output by unescaping HTML entities and replacing <br> tags
|
66 |
+
test_cases_text = clean_test_case_output(test_cases_text)
|
67 |
+
|
68 |
+
try:
|
69 |
+
# Try to parse the output as JSON, assuming the model returns structured test cases
|
70 |
+
test_cases = json.loads(test_cases_text)
|
71 |
+
if isinstance(test_cases, list):
|
72 |
+
return test_cases # Return structured test cases
|
73 |
+
|
74 |
+
else:
|
75 |
+
return [{"test_case": test_cases_text}] # Return as a list with the text wrapped in a dict
|
76 |
+
|
77 |
+
except json.JSONDecodeError:
|
78 |
+
# Fallback: return the raw text if JSON parsing fails
|
79 |
+
return [{"test_case": test_cases_text}]
|
80 |
+
|
81 |
+
except requests.exceptions.RequestException as e:
|
82 |
+
print(f"API request failed: {str(e)}")
|
83 |
+
return []
|
84 |
+
# Add options for multiple test case formats
|
85 |
+
def export_test_cases(test_cases, format='json'):
|
86 |
+
if not test_cases:
|
87 |
+
return "No test cases to export."
|
88 |
+
|
89 |
+
# Convert test cases (which are currently strings) into a structured format for CSV
|
90 |
+
structured_test_cases = [{'Test Case': case} for case in test_cases]
|
91 |
+
|
92 |
+
if format == 'json':
|
93 |
+
# Improve JSON export to be line-by-line formatted
|
94 |
+
return json.dumps(test_cases, indent=4, separators=(',', ': ')) # More readable format
|
95 |
+
elif format == 'csv':
|
96 |
+
# Check if test_cases is a list of dicts
|
97 |
+
if isinstance(test_cases, list) and isinstance(test_cases[0], dict):
|
98 |
+
output = io.StringIO()
|
99 |
+
csv_writer = csv.DictWriter(output, fieldnames=test_cases[0].keys(), quoting=csv.QUOTE_ALL)
|
100 |
+
csv_writer.writeheader()
|
101 |
+
csv_writer.writerows(test_cases)
|
102 |
+
return output.getvalue()
|
103 |
+
else:
|
104 |
+
raise ValueError("Test cases must be a list of dictionaries for CSV export.")
|
105 |
+
|
106 |
+
# 2. Save test cases as a downloadable file
|
107 |
+
def save_test_cases_as_file(test_cases, format='json'):
|
108 |
+
if not test_cases:
|
109 |
+
return "No test cases to save."
|
110 |
+
|
111 |
+
if format == 'json':
|
112 |
+
with open('test_cases.json', 'w') as f:
|
113 |
+
json.dump(test_cases, f)
|
114 |
+
elif format == 'csv':
|
115 |
+
with open('test_cases.csv', 'w', newline='') as file:
|
116 |
+
dict_writer = csv.DictWriter(file, fieldnames=test_cases[0].keys())
|
117 |
+
dict_writer.writeheader()
|
118 |
+
dict_writer.writerows(test_cases)
|
119 |
+
else:
|
120 |
+
return f"Unsupported format: {format}"
|
121 |
+
return f'{format} file saved'
|
122 |
+
|