forked from sonic-net/sonic-mgmt
-
Notifications
You must be signed in to change notification settings - Fork 7
/
Copy pathrun_tests.sh
executable file
·381 lines (339 loc) · 13.2 KB
/
run_tests.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
#!/bin/bash
function show_help_and_exit()
{
echo "Usage ${SCRIPT} [options]"
echo " options with (*) must be provided"
echo " -h -? : get this help"
echo " -a <True|False>: specify if auto-recover is allowed (default: True)"
echo " -b <master_id> : specify name of k8s master group used in k8s inventory, format: k8s_vms{msetnumber}_{servernumber}"
echo " -c <testcases> : specify test cases to execute (default: none, executed all matched)"
echo " -d <dut name> : specify comma-separated DUT names (default: DUT name associated with testbed in testbed file)"
echo " -e <parameters>: specify extra parameter(s) (default: none)"
echo " -E : exit for any error (default: False)"
echo " -f <tb file> : specify testbed file (default testbed.csv)"
echo " -i <inventory> : specify inventory name"
echo " -I <folders> : specify list of test folders, filter out test cases not in the folders (default: none)"
echo " -k <file log> : specify file log level: error|warning|info|debug (default debug)"
echo " -l <cli log> : specify cli log level: error|warning|info|debug (default warning)"
echo " -m <method> : specify test method group|individual|debug (default group)"
echo " -n <testbed> : specify testbed name (*)"
echo " -o : omit the file logs"
echo " -O : run tests in input order rather than alphabetical order"
echo " -p <path> : specify log path (default: logs)"
echo " -q <n> : test will stop after <n> failures (default: not stop on failure)"
echo " -r : retain individual file log for suceeded tests (default: remove)"
echo " -s <tests> : specify list of tests to skip (default: none)"
echo " -S <folders> : specify list of test folders to skip (default: none)"
echo " -t <topology> : specify toplogy: t0|t1|any|combo like t0,any (*)"
echo " -u : bypass util group"
echo " -x : print commands and their arguments as they are executed"
exit $1
}
function get_dut_from_testbed_file() {
if [[ -z ${DUT_NAME} ]]; then
if [[ $TESTBED_FILE == *.csv ]];
then
LINE=`cat $TESTBED_FILE | grep "^$TESTBED_NAME"`
IFS=',' read -ra ARRAY <<< "$LINE"
DUT_NAME=${ARRAY[9]}
elif [[ $TESTBED_FILE == *.yaml ]];
then
content=$(python -c "from __future__ import print_function; import yaml; print('+'.join(str(tb) for tb in yaml.safe_load(open('$TESTBED_FILE')) if '$TESTBED_NAME'==tb['conf-name']))")
IFS=$'+' read -r -a tb_lines <<< $content
tb_line=${tb_lines[0]}
DUT_NAME=$(python -c "from __future__ import print_function; tb=eval(\"$tb_line\"); print(\",\".join(tb[\"dut\"]))")
fi
fi
}
function validate_parameters()
{
RET=0
if [[ -z ${DUT_NAME} ]]; then
echo "DUT name (-d) is not set.."
RET=1
fi
if [[ -z ${TESTBED_NAME} ]]; then
echo "Testbed name (-n) is not set.."
RET=2
fi
if [[ -z ${TOPOLOGY} && -z ${TEST_CASES} ]]; then
echo "Neither TOPOLOGY (-t) nor test case list (-c) is set.."
RET=3
fi
if [[ ${RET} != 0 ]]; then
show_help_and_exit ${RET}
fi
}
function setup_environment()
{
SCRIPT=$0
FULL_PATH=$(realpath ${SCRIPT})
SCRIPT_PATH=$(dirname ${FULL_PATH})
BASE_PATH=$(dirname ${SCRIPT_PATH})
LOG_PATH="logs"
AUTO_RECOVER="True"
BYPASS_UTIL="False"
CLI_LOG_LEVEL='warning'
EXTRA_PARAMETERS=""
FILE_LOG_LEVEL='debug'
INCLUDE_FOLDERS=""
INVENTORY="${BASE_PATH}/ansible/lab,${BASE_PATH}/ansible/veos"
KUBE_MASTER_ID="unset"
OMIT_FILE_LOG="False"
RETAIN_SUCCESS_LOG="False"
SKIP_SCRIPTS=""
SKIP_FOLDERS="ptftests acstests saitests scripts k8s"
TESTBED_FILE="${BASE_PATH}/ansible/testbed.csv"
TEST_CASES=""
TEST_INPUT_ORDER="False"
TEST_METHOD='group'
TEST_MAX_FAIL=0
export ANSIBLE_CONFIG=${BASE_PATH}/ansible
export ANSIBLE_LIBRARY=${BASE_PATH}/ansible/library/
export ANSIBLE_CONNECTION_PLUGINS=${BASE_PATH}/ansible/plugins/connection
rm -fr ${BASE_PATH}/tests/_cache
}
function setup_test_options()
{
# If a test script is explicitly specified in pytest command line, then use `--ignore` to ignore it will not work
# Below logic is to ensure that SKIP_FOLDERS and SKIP_SCRIPTS take precedence over the specified TEST_CASES.
# If a test script is in both ${TEST_CASES} and ${SKIP_SCRIPTS}, the script will not be executed. This design is
# for the scenario of specifying test scripts using pattern like `subfolder/test_*.py`. The pattern will be
# expanded to matched test scripts by bash. Among the expanded scripts, we may want to skip a few. Then we can
# explicitly specify the script to be skipped.
ignores=$(python -c "print '|'.join('''$SKIP_FOLDERS'''.split())")
if [[ -z ${TEST_CASES} ]]; then
# When TEST_CASES is not specified, find all the possible scripts, ignore the scripts under $SKIP_FOLDERS
all_scripts=$(find ./ -name 'test_*.py' | sed s:^./:: | grep -vE "^(${ignores})")
else
# When TEST_CASES is specified, ignore the scripts under $SKIP_FOLDERS
all_scripts=""
for test_script in ${TEST_CASES}; do
all_scripts="${all_scripts} $(echo ${test_script} | sed s:^./:: | grep -vE "^(${ignores})")"
done
fi
# Ignore the scripts specified in $SKIP_SCRIPTS
if [[ x"${TEST_INPUT_ORDER}" == x"True" ]]; then
TEST_CASES=$(python -c "print '\n'.join([testcase for testcase in list('''$all_scripts'''.split()) if testcase not in set('''$SKIP_SCRIPTS'''.split())])")
else
TEST_CASES=$(python -c "print '\n'.join(set('''$all_scripts'''.split()) - set('''$SKIP_SCRIPTS'''.split()))" | sort)
fi
# Check against $INCLUDE_FOLDERS, filter out test cases not in the specified folders
FINAL_CASES=""
includes=$(python -c "print '|'.join('''$INCLUDE_FOLDERS'''.split())")
for test_case in ${TEST_CASES}; do
FINAL_CASES="${FINAL_CASES} $(echo ${test_case} | grep -E "^(${includes})")"
done
TEST_CASES=$(python -c "print '\n'.join('''${FINAL_CASES}'''.split())")
PYTEST_COMMON_OPTS="--inventory ${INVENTORY} \
--host-pattern ${DUT_NAME} \
--testbed ${TESTBED_NAME} \
--testbed_file ${TESTBED_FILE} \
--log-cli-level ${CLI_LOG_LEVEL} \
--log-file-level ${FILE_LOG_LEVEL} \
--kube_master ${KUBE_MASTER_ID} \
--showlocals \
--assert plain \
--show-capture no \
-rav"
if [[ x"${AUTO_RECOVER}" == x"True" ]]; then
PYTEST_COMMON_OPTS="${PYTEST_COMMON_OPTS} --allow_recover"
fi
for skip in ${SKIP_SCRIPTS} ${SKIP_FOLDERS}; do
PYTEST_COMMON_OPTS="${PYTEST_COMMON_OPTS} --ignore=${skip}"
done
if [[ -d ${LOG_PATH} ]]; then
rm -rf ${LOG_PATH}
fi
if [[ x"${OMIT_FILE_LOG}" == x"True" ]]; then
PRET_LOGGING_OPTIONS=""
POST_LOGGING_OPTIONS=""
TEST_LOGGING_OPTIONS=""
else
mkdir -p ${LOG_PATH}
PRET_LOGGING_OPTIONS="--junit-xml=${LOG_PATH}/pretest.xml --log-file=${LOG_PATH}/pretest.log"
POST_LOGGING_OPTIONS="--junit-xml=${LOG_PATH}/posttest.xml --log-file=${LOG_PATH}/posttest.log"
TEST_LOGGING_OPTIONS="--junit-xml=${LOG_PATH}/tr.xml --log-file=${LOG_PATH}/test.log"
fi
UTIL_TOPOLOGY_OPTIONS="--topology util"
if [[ -z ${TOPOLOGY} ]]; then
TEST_TOPOLOGY_OPTIONS=""
else
TEST_TOPOLOGY_OPTIONS="--topology ${TOPOLOGY}"
fi
PYTEST_UTIL_OPTS=${PYTEST_COMMON_OPTS}
# Max failure only applicable to the test session. Not the preparation and cleanup session.
if [[ ${TEST_MAX_FAIL} != 0 ]]; then
PYTEST_COMMON_OPTS="${PYTEST_COMMON_OPTS} --maxfail=${TEST_MAX_FAIL}"
fi
}
function run_debug_tests()
{
echo "=== Show test settings ==="
echo "SCRIPT: ${SCRIPT}"
echo "FULL_PATH: ${FULL_PATH}"
echo "SCRIPT_PATH: ${SCRIPT_PATH}"
echo "BASE_PATH: ${BASE_PATH}"
echo "ANSIBLE_CONFIG: ${ANSIBLE_CONFIG}"
echo "ANSIBLE_LIBRARY: ${ANSIBLE_LIBRARY}"
echo "AUTO_RECOVER: ${AUTO_RECOVER}"
echo "BYPASS_UTIL: ${BYPASS_UTIL}"
echo "CLI_LOG_LEVEL: ${CLI_LOG_LEVEL}"
echo "EXTRA_PARAMETERS: ${EXTRA_PARAMETERS}"
echo "FILE_LOG_LEVEL: ${FILE_LOG_LEVEL}"
echo "INCLUDE_FOLDERS: ${INCLUDE_FOLDERS}"
echo "INVENTORY: ${INVENTORY}"
echo "LOG_PATH: ${LOG_PATH}"
echo "OMIT_FILE_LOG: ${OMIT_FILE_LOG}"
echo "RETAIN_SUCCESS_LOG: ${RETAIN_SUCCESS_LOG}"
echo "SKIP_SCRIPTS: ${SKIP_SCRIPTS}"
echo "SKIP_FOLDERS: ${SKIP_FOLDERS}"
echo "TEST_CASES: ${TEST_CASES}"
echo "TEST_INPUT_ORDER: ${TEST_INPUT_ORDER}"
echo "TEST_MAX_FAIL: ${TEST_MAX_FAIL}"
echo "TEST_METHOD: ${TEST_METHOD}"
echo "TESTBED_FILE: ${TESTBED_FILE}"
echo "TEST_LOGGING_OPTIONS: ${TEST_LOGGING_OPTIONS}"
echo "TEST_TOPOLOGY_OPTIONS: ${TEST_TOPOLOGY_OPTIONS}"
echo "PRET_LOGGING_OPTIONS: ${PRET_LOGGING_OPTIONS}"
echo "POST_LOGGING_OPTIONS: ${POST_LOGGING_OPTIONS}"
echo "UTIL_TOPOLOGY_OPTIONS: ${UTIL_TOPOLOGY_OPTIONS}"
echo "PYTEST_COMMON_OPTS: ${PYTEST_COMMON_OPTS}"
}
function prepare_dut()
{
echo "=== Preparing DUT for subsequent tests ==="
pytest ${PYTEST_UTIL_OPTS} ${PRET_LOGGING_OPTIONS} ${UTIL_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS} -m pretest
}
function cleanup_dut()
{
echo "=== Cleaning up DUT after tests ==="
pytest ${PYTEST_UTIL_OPTS} ${POST_LOGGING_OPTIONS} ${UTIL_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS} -m posttest
}
function run_group_tests()
{
echo "=== Running tests in groups ==="
pytest ${TEST_CASES} ${PYTEST_COMMON_OPTS} ${TEST_LOGGING_OPTIONS} ${TEST_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS}
}
function run_individual_tests()
{
EXIT_CODE=0
echo "=== Running tests individually ==="
for test_script in ${TEST_CASES}; do
if [[ x"${OMIT_FILE_LOG}" != x"True" ]]; then
test_dir=$(dirname ${test_script})
script_name=$(basename ${test_script})
test_name=${script_name%.py}
if [[ ${test_dir} != "." ]]; then
mkdir -p ${LOG_PATH}/${test_dir}
fi
TEST_LOGGING_OPTIONS="--log-file ${LOG_PATH}/${test_dir}/${test_name}.log --junitxml=${LOG_PATH}/${test_dir}/${test_name}.xml"
fi
pytest ${test_script} ${PYTEST_COMMON_OPTS} ${TEST_LOGGING_OPTIONS} ${TEST_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS}
ret_code=$?
# If test passed, no need to keep its log.
if [ ${ret_code} -eq 0 ]; then
if [[ x"${OMIT_FILE_LOG}" != x"True" && x"${RETAIN_SUCCESS_LOG}" == x"False" ]]; then
rm -f ${LOG_PATH}/${test_dir}/${test_name}.log
fi
else
EXIT_CODE=1
if [[ ${TEST_MAX_FAIL} != 0 ]]; then
return ${EXIT_CODE}
fi
fi
done
return ${EXIT_CODE}
}
setup_environment
while getopts "h?a:b:c:d:e:Ef:i:I:k:l:m:n:oOp:q:rs:S:t:ux" opt; do
case ${opt} in
h|\? )
show_help_and_exit 0
;;
a )
AUTO_RECOVER=${OPTARG}
;;
b )
KUBE_MASTER_ID=${OPTARG}
SKIP_FOLDERS=${SKIP_FOLDERS//k8s/}
;;
c )
TEST_CASES="${TEST_CASES} ${OPTARG}"
;;
d )
DUT_NAME=${OPTARG}
;;
e )
EXTRA_PARAMETERS="${EXTRA_PARAMETERS} ${OPTARG}"
;;
E )
set -e
;;
f )
TESTBED_FILE=${OPTARG}
;;
i )
INVENTORY=${OPTARG}
;;
I )
INCLUDE_FOLDERS="${INCLUDE_FOLDERS} ${OPTARG}"
;;
k )
FILE_LOG_LEVEL=${OPTARG}
;;
l )
CLI_LOG_LEVEL=${OPTARG}
;;
m )
TEST_METHOD=${OPTARG}
;;
n )
TESTBED_NAME=${OPTARG}
;;
o )
OMIT_FILE_LOG="True"
;;
O )
TEST_INPUT_ORDER="True"
;;
p )
LOG_PATH=${OPTARG}
;;
q )
TEST_MAX_FAIL=${OPTARG}
;;
r )
RETAIN_SUCCESS_LOG="True"
;;
s )
SKIP_SCRIPTS="${SKIP_SCRIPTS} ${OPTARG}"
;;
S )
SKIP_FOLDERS="${SKIP_FOLDERS} ${OPTARG}"
;;
t )
TOPOLOGY=${OPTARG}
;;
u )
BYPASS_UTIL="True"
;;
x )
set -x
;;
esac
done
get_dut_from_testbed_file
if [[ x"${TEST_METHOD}" != x"debug" ]]; then
validate_parameters
fi
setup_test_options
if [[ x"${TEST_METHOD}" != x"debug" && x"${BYPASS_UTIL}" == x"False" ]]; then
prepare_dut
fi
RC=0
run_${TEST_METHOD}_tests || RC=$?
if [[ x"${TEST_METHOD}" != x"debug" && x"${BYPASS_UTIL}" == x"False" ]]; then
cleanup_dut
fi
exit ${RC}