-
Notifications
You must be signed in to change notification settings - Fork 162
/
test_cases.py
144 lines (113 loc) · 4.85 KB
/
test_cases.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
import requests
from anthropic import Anthropic
from mistralai import Mistral
from openai import OpenAI, AzureOpenAI
import google.generativeai as genai
# Function to create a prompt to generate mitigating controls
def create_test_cases_prompt(threats):
prompt = f"""
Act as a cyber security expert with more than 20 years experience of using the STRIDE threat modelling methodology.
Your task is to provide Gherkin test cases for the threats identified in a threat model. It is very important that
your responses are tailored to reflect the details of the threats.
Below is the list of identified threats:
{threats}
Use the threat descriptions in the 'Given' steps so that the test cases are specific to the threats identified.
Put the Gherkin syntax inside triple backticks (```) to format the test cases in Markdown. Add a title for each test case.
For example:
```gherkin
Given a user with a valid account
When the user logs in
Then the user should be able to access the system
```
YOUR RESPONSE (do not add introductory text, just provide the Gherkin test cases):
"""
return prompt
# Function to get test cases from the GPT response.
def get_test_cases(api_key, model_name, prompt):
client = OpenAI(api_key=api_key)
response = client.chat.completions.create(
model = model_name,
messages=[
{"role": "system", "content": "You are a helpful assistant that provides Gherkin test cases in Markdown format."},
{"role": "user", "content": prompt}
]
)
# Access the content directly as the response will be in text format
test_cases = response.choices[0].message.content
return test_cases
# Function to get mitigations from the Azure OpenAI response.
def get_test_cases_azure(azure_api_endpoint, azure_api_key, azure_api_version, azure_deployment_name, prompt):
client = AzureOpenAI(
azure_endpoint = azure_api_endpoint,
api_key = azure_api_key,
api_version = azure_api_version,
)
response = client.chat.completions.create(
model = azure_deployment_name,
messages=[
{"role": "system", "content": "You are a helpful assistant that provides Gherkin test cases in Markdown format."},
{"role": "user", "content": prompt}
]
)
# Access the content directly as the response will be in text format
test_cases = response.choices[0].message.content
return test_cases
# Function to get test cases from the Google model's response.
def get_test_cases_google(google_api_key, google_model, prompt):
genai.configure(api_key=google_api_key)
model = genai.GenerativeModel(
google_model,
system_instruction="You are a helpful assistant that provides Gherkin test cases in Markdown format.",
)
response = model.generate_content(prompt)
# Access the content directly as the response will be in text format
test_cases = response.candidates[0].content.parts[0].text
return test_cases
# Function to get test cases from the Mistral model's response.
def get_test_cases_mistral(mistral_api_key, mistral_model, prompt):
client = Mistral(api_key=mistral_api_key)
response = client.chat.complete(
model = mistral_model,
messages=[
{"role": "system", "content": "You are a helpful assistant that provides Gherkin test cases in Markdown format."},
{"role": "user", "content": prompt}
]
)
# Access the content directly as the response will be in text format
test_cases = response.choices[0].message.content
return test_cases
# Function to get test cases from Ollama hosted LLM.
def get_test_cases_ollama(ollama_model, prompt):
url = "http://localhost:11434/api/chat"
data = {
"model": ollama_model,
"stream": False,
"messages": [
{
"role": "system",
"content": "You are a helpful assistant that provides Gherkin test cases in Markdown format."},
{
"role": "user",
"content": prompt
}
]
}
response = requests.post(url, json=data)
outer_json = response.json()
# Access the 'content' attribute of the 'message' dictionary
mitigations = outer_json["message"]["content"]
return mitigations
# Function to get test cases from the Anthropic model's response.
def get_test_cases_anthropic(anthropic_api_key, anthropic_model, prompt):
client = Anthropic(api_key=anthropic_api_key)
response = client.messages.create(
model=anthropic_model,
max_tokens=4096,
system="You are a helpful assistant that provides Gherkin test cases in Markdown format.",
messages=[
{"role": "user", "content": prompt}
]
)
# Access the text content from the first content block
test_cases = response.content[0].text
return test_cases