Skip to content

Commit

Permalink
twister: Add support for Cpputest
Browse files Browse the repository at this point in the history
Similar to gTest, CppuTest is a CPP framework for unit tests.
This commit adds support based on the console output of
a cpputest test suite application.

Signed-off-by: Victor Chavez <vchavezb@protonmail.com>
  • Loading branch information
vChavezB committed Mar 4, 2025
1 parent 5736aed commit 119df95
Show file tree
Hide file tree
Showing 9 changed files with 315 additions and 3 deletions.
25 changes: 23 additions & 2 deletions doc/develop/test/twister.rst
Original file line number Diff line number Diff line change
Expand Up @@ -561,6 +561,7 @@ harness: <string>
- robot
- ctest
- shell
- cpputest

See :ref:`twister_harnesses` for more information.

Expand Down Expand Up @@ -755,8 +756,8 @@ Most everyday users will run with no arguments.
Harnesses
*********

Harnesses ``ztest``, ``gtest`` and ``console`` are based on parsing of the
output and matching certain phrases. ``ztest`` and ``gtest`` harnesses look
Harnesses ``ztest``, ``gtest``, ``console`` and ``cpputest`` are based on parsing of the
output and matching certain phrases. ``ztest``, ``gtest`` and ``cpputest`` harnesses look
for pass/fail/etc. frames defined in those frameworks.

Some widely used harnesses that are not supported yet:
Expand Down Expand Up @@ -802,6 +803,26 @@ Gtest
Use ``gtest`` harness if you've already got tests written in the gTest
framework and do not wish to update them to zTest.

Cpputest
========

``cpputest`` does not output information for each test case result by default. As this
harness is based on parsing the output it is necessry to enable verbose within ``cpputest``:

.. code-block:: cpp
#include <CppUTest/CommandLineTestRunner.h>
#include <posix_board_if.h>
int main(void)
{
const char *cppu_test_args[] = {__FILE__, "-v"};
int num_args = std::size(cppu_test_args);
int test_Res = CommandLineTestRunner::RunAllTests(num_args, cppu_test_args);
posix_exit(test_Res);
return 0;
}
Pytest
======

Expand Down
80 changes: 80 additions & 0 deletions scripts/pylib/twister/twisterlib/harness.py
Original file line number Diff line number Diff line change
Expand Up @@ -1133,6 +1133,86 @@ def _parse_report_file(self, report):
else:
tc.status = TwisterStatus.PASS

class Cpputest(Harness):
TEST_START_PATTERN = r".*(?<!Failure in )TEST\((?P<suite_name>[^,]+), (?P<test_name>[^\)]+)\)"
TEST_FAIL_PATTERN = r".*Failure in TEST\((?P<suite_name>[^,]+), (?P<test_name>[^\)]+)\).*"
FINISHED_PATTERN = r".*(OK|Errors) \(\d+ tests, \d+ ran, \d+ checks, \d+ ignored, \d+ filtered out, \d+ ms\)"

Check failure on line 1139 in scripts/pylib/twister/twisterlib/harness.py

View workflow job for this annotation

GitHub Actions / Run compliance checks on patch series (PR)

Python lint error (E501) see https://docs.astral.sh/ruff/rules/line-too-long

scripts/pylib/twister/twisterlib/harness.py:1139 Line too long (113 > 100)

def __init__(self):
super().__init__()
self.tc = None
self.has_failures = False
self.started = False

def handle(self, line):
if not self.started:
self.instance.testcases = []
self.started = True
if self.status != TwisterStatus.NONE:
return

# Check if a new test starts
test_start_match = re.search(self.TEST_START_PATTERN, line)
if test_start_match:
# If a new test starts and there is an unfinished test, mark it as passed
if self.tc is not None:
self.tc.status = TwisterStatus.PASS
self.tc.output = self.testcase_output
self.testcase_output = ""
self.tc = None

suite_name = test_start_match.group("suite_name")
test_name = test_start_match.group("test_name")
if suite_name not in self.detected_suite_names:
self.detected_suite_names.append(suite_name)

name = "{}.{}.{}".format(self.id, suite_name, test_name)

Check failure on line 1169 in scripts/pylib/twister/twisterlib/harness.py

View workflow job for this annotation

GitHub Actions / Run compliance checks on patch series (PR)

Python lint error (UP032) see https://docs.astral.sh/ruff/rules/f-string

scripts/pylib/twister/twisterlib/harness.py:1169 Use f-string instead of `format` call

tc = self.instance.get_case_by_name(name)
assert tc is None, "CppUTest error, {} running twice".format(name)

Check failure on line 1172 in scripts/pylib/twister/twisterlib/harness.py

View workflow job for this annotation

GitHub Actions / Run compliance checks on patch series (PR)

Python lint error (UP032) see https://docs.astral.sh/ruff/rules/f-string

scripts/pylib/twister/twisterlib/harness.py:1172 Use f-string instead of `format` call

tc = self.instance.get_case_or_create(name)
self.tc = tc
self.tc.status = TwisterStatus.STARTED
self.testcase_output += line + "\n"
self._match = True

# Check if a test failure occurred
test_fail_match = re.search(self.TEST_FAIL_PATTERN, line)
if test_fail_match:
suite_name = test_fail_match.group("suite_name")
test_name = test_fail_match.group("test_name")
name = "{}.{}.{}".format(self.id, suite_name, test_name)

Check failure on line 1185 in scripts/pylib/twister/twisterlib/harness.py

View workflow job for this annotation

GitHub Actions / Run compliance checks on patch series (PR)

Python lint error (UP032) see https://docs.astral.sh/ruff/rules/f-string

scripts/pylib/twister/twisterlib/harness.py:1185 Use f-string instead of `format` call

tc = self.instance.get_case_by_name(name)
if tc is not None:
tc.status = TwisterStatus.FAIL
self.has_failures = True
tc.output = self.testcase_output
self.testcase_output = ""
self.tc = None
return

# Check if the test run finished
finished_match = re.search(self.FINISHED_PATTERN, line)
if finished_match:
# No need to check result if previously there was a failure
# or no tests were run
if self.has_failures or self.tc is None:
return

tc = self.instance.get_case_or_create(self.tc.name)

finish_result = finished_match.group(1)
if finish_result == "OK":
self.status = TwisterStatus.PASS
tc.status = TwisterStatus.PASS
else:
self.status = TwisterStatus.FAIL
tc.status = TwisterStatus.FAIL
return


class HarnessImporter:

@staticmethod
Expand Down
3 changes: 2 additions & 1 deletion scripts/pylib/twister/twisterlib/testinstance.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,8 @@ def testsuite_runnable(testsuite, fixtures):
'gtest',
'robot',
'ctest',
'shell'
'shell',
'cpputest'
]:
can_run = True
# if we have a fixture that is also being supplied on the
Expand Down
130 changes: 130 additions & 0 deletions scripts/tests/twister/test_harness.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
Bsim,
Console,
Gtest,
Cpputest,
Harness,
HarnessImporter,
Pytest,
Expand Down Expand Up @@ -54,6 +55,17 @@
"[00:00:00.000,000] <inf> label: [----------] Global test environment tear-down"
)

SAMPLE_CPPUTEST_NO_TESTS = (
"Errors (ran nothing, 0 tests, 0 ran, 0 checks, 0 ignored, 0 filtered out, 0 ms)")
SAMPLE_CPPUTEST_START_FMT = "[00:00:00.000,000] <inf> label: TEST({suite}, {test})"
SAMPLE_CPPUTEST_END_PASS_FMT = "[00:00:00.000,000] <inf> label: OK ({tests} tests" \
", {ran} ran, {checks} checks, {ignored} ignored," \
" {filtered} filtered out, {time} ms)"
SAMPLE_CPPUTEST_FAIL_FMT = "[00:00:00.000,000] <inf> label: Failure in TEST({suite}, {test})"
SAMPLE_CPPUTEST_END_FAIL_FMT = "[00:00:00.000,000] <inf> label: Errors({failures} failures" \
", {tests} tests, {ran} ran, {checks} checks, {ignored} ignored," \
" {filtered} filtered out, {time} ms)"


def process_logs(harness, logs):
for line in logs:
Expand Down Expand Up @@ -1209,6 +1221,124 @@ def test_gtest_repeated_run(gtest):
],
)

@pytest.fixture
def cpputest(tmp_path):
mock_platform = mock.Mock()
mock_platform.name = "mock_platform"
mock_platform.normalized_name = "mock_platform"
mock_testsuite = mock.Mock()
mock_testsuite.name = "mock_testsuite"
mock_testsuite.detailed_test_id = True
mock_testsuite.id = "id"
mock_testsuite.testcases = []
mock_testsuite.harness_config = {}
outdir = tmp_path / 'cpputest_out'
outdir.mkdir()

instance = TestInstance(
testsuite=mock_testsuite, platform=mock_platform, toolchain='zephyr', outdir=outdir
)

harness = Cpputest()
harness.configure(instance)
return harness


def test_cpputest_start_test_no_suites_detected(cpputest):
process_logs(cpputest, [SAMPLE_CPPUTEST_NO_TESTS])
assert len(cpputest.detected_suite_names) == 0
assert cpputest.status == TwisterStatus.NONE


def test_cpputest_start_test(cpputest):
process_logs(
cpputest,
[
SAMPLE_CPPUTEST_START_FMT.format(
suite="suite_name", test="test_name"
),
],
)
assert cpputest.status == TwisterStatus.NONE
assert len(cpputest.detected_suite_names) == 1
assert cpputest.detected_suite_names[0] == "suite_name"
assert cpputest.instance.get_case_by_name("id.suite_name.test_name") is not None
assert (
cpputest.instance.get_case_by_name("id.suite_name.test_name").status == TwisterStatus.STARTED
)


def test_cpputest_one_test_passed(cpputest):
process_logs(
cpputest,
[
SAMPLE_CPPUTEST_START_FMT.format(
suite="suite_name", test="test_name"
),
SAMPLE_CPPUTEST_END_PASS_FMT.format(
tests=1, ran=1, checks=5, ignored=0, filtered=0, time=10
)
],
)
assert len(cpputest.detected_suite_names) == 1
assert cpputest.detected_suite_names[0] == "suite_name"
assert cpputest.instance.get_case_by_name("id.suite_name.test_name") != TwisterStatus.NONE
assert cpputest.instance.get_case_by_name("id.suite_name.test_name").status == TwisterStatus.PASS


def test_cpputest_multiple_test_passed(cpputest):
logs = []
total_passed_tests = 5
for i in range(0, total_passed_tests):
logs.append(SAMPLE_CPPUTEST_START_FMT.format(suite="suite_name",
test="test_name_%d" % i))

Check failure on line 1294 in scripts/tests/twister/test_harness.py

View workflow job for this annotation

GitHub Actions / Run compliance checks on patch series (PR)

Python lint error (UP031) see https://docs.astral.sh/ruff/rules/printf-string-formatting

scripts/tests/twister/test_harness.py:1294 Use format specifiers instead of percent format
logs.append(SAMPLE_CPPUTEST_END_PASS_FMT.format(
tests=total_passed_tests, ran=total_passed_tests, checks=5, ignored=0, filtered=0, time=10
))
process_logs(cpputest, logs)
assert len(cpputest.detected_suite_names) == 1
assert cpputest.detected_suite_names[0] == "suite_name"
for i in range(0, total_passed_tests):
test_name = "id.suite_name.test_name_%d" % i

Check failure on line 1302 in scripts/tests/twister/test_harness.py

View workflow job for this annotation

GitHub Actions / Run compliance checks on patch series (PR)

Python lint error (UP031) see https://docs.astral.sh/ruff/rules/printf-string-formatting

scripts/tests/twister/test_harness.py:1302 Use format specifiers instead of percent format
assert cpputest.instance.get_case_by_name(test_name) != TwisterStatus.NONE
assert cpputest.instance.get_case_by_name(test_name).status == TwisterStatus.PASS


def test_cpputest_test_failed(cpputest):
process_logs(
cpputest,
[
SAMPLE_CPPUTEST_START_FMT.format(
suite="suite_name", test="test_name"
),
SAMPLE_CPPUTEST_FAIL_FMT.format(
suite="suite_name", test="test_name"
)
],
)
assert cpputest.status == TwisterStatus.NONE
assert len(cpputest.detected_suite_names) == 1
assert cpputest.detected_suite_names[0] == "suite_name"
assert cpputest.instance.get_case_by_name("id.suite_name.test_name") != TwisterStatus.NONE
assert cpputest.instance.get_case_by_name("id.suite_name.test_name").status == TwisterStatus.FAIL


def test_cpputest_test_repeated(cpputest):
with pytest.raises(
AssertionError,
match=r"CppUTest error, id.suite_name.test_name running twice",
):
process_logs(
cpputest,
[
SAMPLE_CPPUTEST_START_FMT.format(
suite="suite_name", test="test_name"
),
SAMPLE_CPPUTEST_START_FMT.format(
suite="suite_name", test="test_name"
),
],
)

def test_bsim_build(monkeypatch, tmp_path):
mocked_instance = mock.Mock()
Expand Down
25 changes: 25 additions & 0 deletions tests/cpputest/base/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
# Copyright (c) 2025, Victor Chavez (vchavezb@protonmail.com)
# SPDX-License-Identifier: Apache-2.0

cmake_minimum_required(VERSION 3.20.0)
find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE})
project(cpputest_sample)

include(FetchContent)
FetchContent_Declare(
CppUTest
GIT_REPOSITORY https://github.com/cpputest/cpputest.git
GIT_TAG v4.0
)
set(TESTS OFF CACHE BOOL "Switch off CppUTest Test build")

FetchContent_MakeAvailable(CppUTest)


target_sources(app PRIVATE src/main.cpp
src/test_suite.cpp
)

target_link_libraries(app PRIVATE
CppUTest
CppUTestExt)
4 changes: 4 additions & 0 deletions tests/cpputest/base/prj.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
CONFIG_CPP=y
CONFIG_LOG=y
CONFIG_STD_CPP17=y
CONFIG_REQUIRES_FULL_LIBCPP=y
20 changes: 20 additions & 0 deletions tests/cpputest/base/src/main.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
/*
* Copyright (c) 2025, Victor Chavez (vchavezb@protonmail.com)
* SPDX-License-Identifier: Apache-2.0
*/
#include <CppUTest/CommandLineTestRunner.h>
#include <posix_board_if.h> // posix_exit

int main(void)
{
/* Enable cpputest verbose mode (-v) to get the name
* of tests that have passed. Otherwise only a dot
* is printed per test.
*/
const char *cppu_test_args[] = {__FILE__, "-v", "-c"};
int num_args = std::size(cppu_test_args);
int test_Res = CommandLineTestRunner::RunAllTests(num_args, cppu_test_args);
/* Exit before main ends as zephyr idle thread runs in the background */
posix_exit(test_Res);
return 0;
}
22 changes: 22 additions & 0 deletions tests/cpputest/base/src/test_suite.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
/*
* Copyright (c) 2025, Victor Chavez (vchavezb@protonmail.com)
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/logging/log.h>
#include <CppUTest/CommandLineTestRunner.h>
#include <CppUTestExt/MockSupport.h>

LOG_MODULE_REGISTER(test_suite, CONFIG_LOG_DEFAULT_LEVEL);

TEST_GROUP(my_test_group) {
void setup() final
{
}
void teardown() final
{
}
};

TEST(my_test_group, test_1) {

}
9 changes: 9 additions & 0 deletions tests/cpputest/base/testcase.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
common:
tags:
- test_framework

tests:
base.my_test_group:
platform_allow:
- native_sim/native/64
harness: "cpputest"

0 comments on commit 119df95

Please sign in to comment.