Skip to content

Commit f69da1d

Browse files
author
Kernel Patches Daemon
committed
adding ci files
1 parent 7625645 commit f69da1d

File tree

62 files changed

+5123
-18
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

62 files changed

+5123
-18
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
name: 'run-veristat'
2+
description: 'Run veristat benchmark'
3+
inputs:
4+
veristat_output:
5+
description: 'Veristat output filepath'
6+
required: true
7+
baseline_name:
8+
description: 'Veristat baseline cache name'
9+
required: true
10+
runs:
11+
using: "composite"
12+
steps:
13+
- uses: actions/upload-artifact@v4
14+
with:
15+
name: ${{ inputs.baseline_name }}
16+
if-no-files-found: error
17+
path: ${{ github.workspace }}/${{ inputs.veristat_output }}
18+
19+
# For pull request:
20+
# - get baseline log from cache
21+
# - compare it to current run
22+
- if: ${{ github.event_name == 'pull_request' }}
23+
uses: actions/cache/restore@v4
24+
with:
25+
key: ${{ inputs.baseline_name }}-${{ github.base_ref }}
26+
restore-keys: |
27+
${{ inputs.baseline_name }}-
28+
path: '${{ github.workspace }}/${{ inputs.baseline_name }}'
29+
30+
- if: ${{ github.event_name == 'pull_request' }}
31+
name: Show veristat comparison
32+
shell: bash
33+
run: ./.github/scripts/compare-veristat-results.sh
34+
env:
35+
BASELINE_PATH: ${{ github.workspace }}/${{ inputs.baseline_name }}
36+
VERISTAT_OUTPUT: ${{ inputs.veristat_output }}
37+
38+
# For push: just put baseline log to cache
39+
- if: ${{ github.event_name == 'push' }}
40+
shell: bash
41+
run: |
42+
mv "${{ github.workspace }}/${{ inputs.veristat_output }}" \
43+
"${{ github.workspace }}/${{ inputs.baseline_name }}"
44+
45+
- if: ${{ github.event_name == 'push' }}
46+
uses: actions/cache/save@v4
47+
with:
48+
key: ${{ inputs.baseline_name }}-${{ github.ref_name }}-${{ github.run_id }}
49+
path: '${{ github.workspace }}/${{ inputs.baseline_name }}'
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
#!/bin/bash
2+
3+
if [[ ! -f "${BASELINE_PATH}" ]]; then
4+
echo "# No ${BASELINE_PATH} available" >> "${GITHUB_STEP_SUMMARY}"
5+
6+
echo "No ${BASELINE_PATH} available"
7+
echo "Printing veristat results"
8+
cat "${VERISTAT_OUTPUT}"
9+
10+
exit
11+
fi
12+
13+
selftests/bpf/veristat \
14+
--output-format csv \
15+
--emit file,prog,verdict,states \
16+
--compare "${BASELINE_PATH}" "${VERISTAT_OUTPUT}" > compare.csv
17+
18+
python3 ./.github/scripts/veristat_compare.py compare.csv

.github/scripts/download-gcc-bpf.sh

+30
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
#!/bin/bash
2+
3+
set -euo pipefail
4+
5+
GCC_BPF_RELEASE_GH_REPO=$1
6+
INSTALL_DIR=$(realpath $2)
7+
8+
cd /tmp
9+
10+
tag=$(gh release list -L 1 -R ${GCC_BPF_RELEASE_GH_REPO} --json tagName -q .[].tagName)
11+
if [[ -z "$tag" ]]; then
12+
echo "Could not find latest GCC BPF release at ${GCC_BPF_RELEASE_GH_REPO}"
13+
exit 1
14+
fi
15+
16+
url="https://github.com/${GCC_BPF_RELEASE_GH_REPO}/releases/download/${tag}/${tag}.tar.zst"
17+
echo "Downloading $url"
18+
wget -q "$url"
19+
20+
tarball=${tag}.tar.zst
21+
dir=$(tar tf $tarball | head -1 || true)
22+
23+
echo "Extracting $tarball ..."
24+
tar -I zstd -xf $tarball && rm -f $tarball
25+
26+
rm -rf $INSTALL_DIR
27+
mv -v $dir $INSTALL_DIR
28+
29+
cd -
30+

.github/scripts/matrix.py

+278
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,278 @@
1+
#!/usr/bin/env python3
2+
3+
import dataclasses
4+
import json
5+
import os
6+
7+
from enum import Enum
8+
from typing import Any, Dict, Final, List, Optional, Set, Union
9+
10+
import requests
11+
12+
MANAGED_OWNER: Final[str] = "kernel-patches"
13+
MANAGED_REPOS: Final[Set[str]] = {
14+
f"{MANAGED_OWNER}/bpf",
15+
f"{MANAGED_OWNER}/vmtest",
16+
}
17+
18+
DEFAULT_SELF_HOSTED_RUNNER_TAGS: Final[List[str]] = ["self-hosted", "docker-noble-main"]
19+
DEFAULT_GITHUB_HOSTED_RUNNER: Final[str] = "ubuntu-24.04"
20+
DEFAULT_GCC_VERSION: Final[int] = 14
21+
DEFAULT_LLVM_VERSION: Final[int] = 20
22+
23+
RUNNERS_BUSY_THRESHOLD: Final[float] = 0.8
24+
25+
26+
class Arch(str, Enum):
27+
"""
28+
CPU architecture supported by CI.
29+
"""
30+
31+
AARCH64 = "aarch64"
32+
S390X = "s390x"
33+
X86_64 = "x86_64"
34+
35+
36+
class Compiler(str, Enum):
37+
GCC = "gcc"
38+
LLVM = "llvm"
39+
40+
41+
def query_runners_from_github() -> List[Dict[str, Any]]:
42+
if "GITHUB_TOKEN" not in os.environ:
43+
return []
44+
token = os.environ["GITHUB_TOKEN"]
45+
headers = {
46+
"Authorization": f"token {token}",
47+
"Accept": "application/vnd.github.v3+json",
48+
}
49+
owner = os.environ["GITHUB_REPOSITORY_OWNER"]
50+
url: Optional[str] = f"https://api.github.com/orgs/{owner}/actions/runners"
51+
# GitHub returns 30 runners per page, fetch all
52+
all_runners = []
53+
try:
54+
while url is not None:
55+
response = requests.get(url, headers=headers)
56+
if response.status_code != 200:
57+
print(f"Failed to query runners: {response.status_code}")
58+
print(f"response: {response.text}")
59+
return []
60+
data = response.json()
61+
all_runners.extend(data.get("runners", []))
62+
# Check for next page URL in Link header
63+
url = None
64+
if "Link" in response.headers:
65+
links = requests.utils.parse_header_links(response.headers["Link"])
66+
for link in links:
67+
if link["rel"] == "next":
68+
url = link["url"]
69+
break
70+
return all_runners
71+
except Exception as e:
72+
print(f"Warning: Failed to query runner status due to exception: {e}")
73+
return []
74+
75+
76+
all_runners_cached: Optional[List[Dict[str, Any]]] = None
77+
78+
79+
def all_runners() -> List[Dict[str, Any]]:
80+
global all_runners_cached
81+
if all_runners_cached is None:
82+
print("Querying runners from GitHub...")
83+
all_runners_cached = query_runners_from_github()
84+
print(f"Github returned {len(all_runners_cached)} runners")
85+
counts = count_by_status(all_runners_cached)
86+
print(
87+
f"Busy: {counts['busy']}, Idle: {counts['idle']}, Offline: {counts['offline']}"
88+
)
89+
return all_runners_cached
90+
91+
92+
def runner_labels(runner: Dict[str, Any]) -> List[str]:
93+
return [label["name"] for label in runner["labels"]]
94+
95+
96+
def is_self_hosted_runner(runner: Dict[str, Any]) -> bool:
97+
labels = runner_labels(runner)
98+
for label in DEFAULT_SELF_HOSTED_RUNNER_TAGS:
99+
if label not in labels:
100+
return False
101+
return True
102+
103+
104+
def self_hosted_runners() -> List[Dict[str, Any]]:
105+
runners = all_runners()
106+
return [r for r in runners if is_self_hosted_runner(r)]
107+
108+
109+
def runners_by_arch(arch: Arch) -> List[Dict[str, Any]]:
110+
runners = self_hosted_runners()
111+
return [r for r in runners if arch.value in runner_labels(r)]
112+
113+
114+
def count_by_status(runners: List[Dict[str, Any]]) -> Dict[str, int]:
115+
result = {"busy": 0, "idle": 0, "offline": 0}
116+
for runner in runners:
117+
if runner["status"] == "online":
118+
if runner["busy"]:
119+
result["busy"] += 1
120+
else:
121+
result["idle"] += 1
122+
else:
123+
result["offline"] += 1
124+
return result
125+
126+
127+
@dataclasses.dataclass
128+
class BuildConfig:
129+
arch: Arch
130+
kernel_compiler: Compiler = Compiler.GCC
131+
gcc_version: int = DEFAULT_GCC_VERSION
132+
llvm_version: int = DEFAULT_LLVM_VERSION
133+
kernel: str = "LATEST"
134+
run_veristat: bool = False
135+
parallel_tests: bool = False
136+
build_release: bool = False
137+
138+
@property
139+
def runs_on(self) -> List[str]:
140+
if is_managed_repo():
141+
return DEFAULT_SELF_HOSTED_RUNNER_TAGS + [self.arch.value]
142+
else:
143+
return [DEFAULT_GITHUB_HOSTED_RUNNER]
144+
145+
@property
146+
def build_runs_on(self) -> List[str]:
147+
if not is_managed_repo():
148+
return [DEFAULT_GITHUB_HOSTED_RUNNER]
149+
150+
# @Temporary: disable codebuild runners for cross-compilation jobs
151+
match self.arch:
152+
case Arch.S390X:
153+
return DEFAULT_SELF_HOSTED_RUNNER_TAGS + [Arch.X86_64.value]
154+
case Arch.AARCH64:
155+
return DEFAULT_SELF_HOSTED_RUNNER_TAGS + [Arch.AARCH64.value]
156+
157+
# For managed repos, check the busyness of relevant self-hosted runners
158+
# If they are too busy, use codebuild
159+
runner_arch = self.arch
160+
# We don't build s390x kernel on s390x runners, because it's too slow
161+
# Cross-compiling on x86_64 is faster
162+
if runner_arch == Arch.S390X:
163+
runner_arch = Arch.X86_64
164+
runners = runners_by_arch(runner_arch)
165+
counts = count_by_status(runners)
166+
online = counts["idle"] + counts["busy"]
167+
busy = counts["busy"]
168+
# if online <= 0, then something is wrong, don't use codebuild
169+
if online > 0 and busy / online > RUNNERS_BUSY_THRESHOLD:
170+
return ["codebuild"]
171+
else:
172+
return DEFAULT_SELF_HOSTED_RUNNER_TAGS + [runner_arch.value]
173+
174+
@property
175+
def tests(self) -> Dict[str, Any]:
176+
tests_list = [
177+
"test_progs",
178+
"test_progs_parallel",
179+
"test_progs_no_alu32",
180+
"test_progs_no_alu32_parallel",
181+
"test_verifier",
182+
]
183+
184+
if self.arch.value != "s390x":
185+
tests_list.append("test_maps")
186+
187+
if self.llvm_version >= 18:
188+
tests_list.append("test_progs_cpuv4")
189+
190+
# if self.arch in [Arch.X86_64, Arch.AARCH64]:
191+
# tests_list.append("sched_ext")
192+
193+
# Don't run GCC BPF runner, because too many tests are failing
194+
# See: https://lore.kernel.org/bpf/87bjw6qpje.fsf@oracle.com/
195+
# if self.arch == Arch.X86_64:
196+
# tests_list.append("test_progs-bpf_gcc")
197+
198+
if not self.parallel_tests:
199+
tests_list = [test for test in tests_list if not test.endswith("parallel")]
200+
201+
return {"include": [generate_test_config(test) for test in tests_list]}
202+
203+
def to_dict(self) -> Dict[str, Any]:
204+
return {
205+
"arch": self.arch.value,
206+
"kernel_compiler": self.kernel_compiler.value,
207+
"gcc_version": DEFAULT_GCC_VERSION,
208+
"llvm_version": DEFAULT_LLVM_VERSION,
209+
"kernel": self.kernel,
210+
"run_veristat": self.run_veristat,
211+
"parallel_tests": self.parallel_tests,
212+
"build_release": self.build_release,
213+
"runs_on": self.runs_on,
214+
"tests": self.tests,
215+
"build_runs_on": self.build_runs_on,
216+
}
217+
218+
219+
def is_managed_repo() -> bool:
220+
return (
221+
os.environ["GITHUB_REPOSITORY_OWNER"] == MANAGED_OWNER
222+
and os.environ["GITHUB_REPOSITORY"] in MANAGED_REPOS
223+
)
224+
225+
226+
def set_output(name, value):
227+
"""Write an output variable to the GitHub output file."""
228+
with open(os.getenv("GITHUB_OUTPUT"), "a", encoding="utf-8") as file:
229+
file.write(f"{name}={value}\n")
230+
231+
232+
def generate_test_config(test: str) -> Dict[str, Union[str, int]]:
233+
"""Create the configuration for the provided test."""
234+
is_parallel = test.endswith("_parallel")
235+
config = {
236+
"test": test,
237+
"continue_on_error": is_parallel,
238+
# While in experimental mode, parallel jobs may get stuck
239+
# anywhere, including in user space where the kernel won't detect
240+
# a problem and panic. We add a second layer of (smaller) timeouts
241+
# here such that if we get stuck in a parallel run, we hit this
242+
# timeout and fail without affecting the overall job success (as
243+
# would be the case if we hit the job-wide timeout). For
244+
# non-experimental jobs, 360 is the default which will be
245+
# superseded by the overall workflow timeout (but we need to
246+
# specify something).
247+
"timeout_minutes": 30 if is_parallel else 360,
248+
}
249+
return config
250+
251+
252+
if __name__ == "__main__":
253+
matrix = [
254+
BuildConfig(
255+
arch=Arch.X86_64,
256+
run_veristat=True,
257+
parallel_tests=True,
258+
),
259+
BuildConfig(
260+
arch=Arch.X86_64,
261+
kernel_compiler=Compiler.LLVM,
262+
build_release=True,
263+
),
264+
BuildConfig(
265+
arch=Arch.AARCH64,
266+
),
267+
BuildConfig(
268+
arch=Arch.S390X,
269+
),
270+
]
271+
272+
# Outside of managed repositories only run on x86_64
273+
if not is_managed_repo():
274+
matrix = [config for config in matrix if config.arch == Arch.X86_64]
275+
276+
json_matrix = json.dumps({"include": [config.to_dict() for config in matrix]})
277+
print(json.dumps(json.loads(json_matrix), indent=4))
278+
set_output("build_matrix", json_matrix)

0 commit comments

Comments
 (0)