forked from kernel-patches/bpf
-
Notifications
You must be signed in to change notification settings - Fork 5
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
31 changed files
with
2,252 additions
and
18 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,202 @@ | ||
#!/usr/bin/env python3 | ||
|
||
# This script reads a CSV file produced by the following invocation: | ||
# | ||
# veristat --emit file,prog,verdict,states \ | ||
# --output-format csv \ | ||
# --compare ... | ||
# | ||
# And produces a markdown summary for the file. | ||
# The summary is printed to standard output and appended to a file | ||
# pointed to by GITHUB_STEP_SUMMARY variable. | ||
# | ||
# Script exits with return code 1 if there are new failures in the | ||
# veristat results. | ||
# | ||
# For testing purposes invoke as follows: | ||
# | ||
# GITHUB_STEP_SUMMARY=/dev/null python3 veristat-compare.py test.csv | ||
# | ||
# File format (columns): | ||
# 0. file_name | ||
# 1. prog_name | ||
# 2. verdict_base | ||
# 3. verdict_comp | ||
# 4. verdict_diff | ||
# 5. total_states_base | ||
# 6. total_states_comp | ||
# 7. total_states_diff | ||
# | ||
# Records sample: | ||
# file-a,a,success,failure,MISMATCH,12,12,+0 (+0.00%) | ||
# file-b,b,success,success,MATCH,67,67,+0 (+0.00%) | ||
# | ||
# For better readability suffixes '_OLD' and '_NEW' | ||
# are used instead of '_base' and '_comp' for variable | ||
# names etc. | ||
|
||
import io | ||
import os | ||
import sys | ||
import csv | ||
import logging | ||
import argparse | ||
from functools import reduce | ||
from dataclasses import dataclass | ||
|
||
TRESHOLD_PCT = 0 | ||
|
||
HEADERS = ['file_name', 'prog_name', 'verdict_base', 'verdict_comp', | ||
'verdict_diff', 'total_states_base', 'total_states_comp', | ||
'total_states_diff'] | ||
|
||
FILE = 0 | ||
PROG = 1 | ||
VERDICT_OLD = 2 | ||
VERDICT_NEW = 3 | ||
STATES_OLD = 5 | ||
STATES_NEW = 6 | ||
|
||
# Given a table row, compute relative increase in the number of | ||
# processed states. | ||
def compute_diff(v): | ||
old = int(v[STATES_OLD]) if v[STATES_OLD] != 'N/A' else 0 | ||
new = int(v[STATES_NEW]) if v[STATES_NEW] != 'N/A' else 0 | ||
if old == 0: | ||
return 1 | ||
return (new - old) / old | ||
|
||
@dataclass | ||
class VeristatInfo: | ||
table: list | ||
changes: bool | ||
new_failures: bool | ||
|
||
# Read CSV table expecting the above described format. | ||
# Return VeristatInfo instance. | ||
def parse_table(csv_filename): | ||
new_failures = False | ||
changes = False | ||
table = [] | ||
|
||
with open(csv_filename, newline='') as file: | ||
reader = csv.reader(file) | ||
headers = next(reader) | ||
if headers != HEADERS: | ||
raise Exception(f'Unexpected table header for {filename}: {headers}') | ||
|
||
for v in reader: | ||
add = False | ||
verdict = v[VERDICT_NEW] | ||
diff = compute_diff(v) | ||
|
||
if v[VERDICT_OLD] != v[VERDICT_NEW]: | ||
changes = True | ||
add = True | ||
verdict = f'{v[VERDICT_OLD]} -> {v[VERDICT_NEW]}' | ||
if v[VERDICT_NEW] == 'failure': | ||
new_failures = True | ||
verdict += ' (!!)' | ||
|
||
if abs(diff * 100) > TRESHOLD_PCT: | ||
changes = True | ||
add = True | ||
|
||
if not add: | ||
continue | ||
|
||
diff_txt = '{:+.1f} %'.format(diff * 100) | ||
table.append([v[FILE], v[PROG], verdict, diff_txt]) | ||
|
||
return VeristatInfo(table=table, | ||
changes=changes, | ||
new_failures=new_failures) | ||
|
||
def format_table(headers, rows, html_mode): | ||
def decorate(val, width): | ||
s = str(val) | ||
if html_mode: | ||
s = s.replace(' -> ', ' → '); | ||
s = s.replace(' (!!)', ' :bangbang: '); | ||
return s.ljust(width) | ||
|
||
column_widths = list(reduce(lambda acc, row: map(max, map(len, row), acc), | ||
rows, | ||
map(len, headers))) | ||
|
||
with io.StringIO() as out: | ||
def print_row(row): | ||
out.write('| ') | ||
out.write(' | '.join(map(decorate, row, column_widths))) | ||
out.write(' |\n') | ||
|
||
print_row(headers) | ||
|
||
out.write('|') | ||
out.write('|'.join(map(lambda w: '-' * (w + 2), column_widths))) | ||
out.write('|\n') | ||
|
||
for row in rows: | ||
print_row(row) | ||
|
||
return out.getvalue() | ||
|
||
def format_section_name(info): | ||
if info.new_failures: | ||
return 'There are new veristat failures' | ||
if info.changes: | ||
return 'There are changes in verification performance' | ||
return 'No changes in verification performance' | ||
|
||
SUMMARY_HEADERS = ['File', 'Program', 'Verdict', 'States Diff (%)'] | ||
|
||
def format_html_summary(info): | ||
section_name = format_section_name(info) | ||
if not info.table: | ||
return f'# {section_name}\n' | ||
|
||
table = format_table(SUMMARY_HEADERS, info.table, True) | ||
return f''' | ||
# {section_name} | ||
<details> | ||
<summary>Click to expand</summary> | ||
{table} | ||
</details> | ||
'''.lstrip() | ||
|
||
def format_text_summary(info): | ||
section_name = format_section_name(info) | ||
table = format_table(SUMMARY_HEADERS, info.table, False) | ||
if not info.table: | ||
return f'# {section_name}\n' | ||
|
||
return f''' | ||
# {section_name} | ||
{table} | ||
'''.lstrip() | ||
|
||
def main(compare_csv_filename, summary_filename): | ||
info = parse_table(compare_csv_filename) | ||
sys.stdout.write(format_text_summary(info)) | ||
with open(summary_filename, 'a') as f: | ||
f.write(format_html_summary(info)) | ||
|
||
if info.new_failures: | ||
return 1 | ||
|
||
return 0 | ||
|
||
if __name__ == '__main__': | ||
parser = argparse.ArgumentParser( | ||
description="""Print veristat comparison output as markdown step summary""" | ||
) | ||
parser.add_argument('filename') | ||
args = parser.parse_args() | ||
summary_filename = os.getenv('GITHUB_STEP_SUMMARY') | ||
if not summary_filename: | ||
logging.error('GITHUB_STEP_SUMMARY environment variable is not set') | ||
sys.exit(1) | ||
sys.exit(main(args.filename, summary_filename)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,22 @@ | ||
name: "lint" | ||
|
||
on: | ||
pull_request: | ||
push: | ||
branches: | ||
- master | ||
|
||
jobs: | ||
shellcheck: | ||
# This workflow gets injected into other Linux repositories, but we don't | ||
# want it to run there. | ||
if: ${{ github.repository == 'kernel-patches/vmtest' }} | ||
name: ShellCheck | ||
runs-on: ubuntu-latest | ||
steps: | ||
- name: Checkout repository | ||
uses: actions/checkout@v3 | ||
- name: Run ShellCheck | ||
uses: ludeeus/action-shellcheck@master | ||
env: | ||
SHELLCHECK_OPTS: --severity=warning --exclude=SC1091 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,169 @@ | ||
name: bpf-ci | ||
|
||
on: | ||
pull_request: | ||
push: | ||
branches: | ||
- bpf_base | ||
- bpf-next_base | ||
|
||
env: | ||
veristat_arch: x86_64 | ||
veristat_toolchain: gcc | ||
|
||
concurrency: | ||
group: ci-test-${{ github.ref_name }} | ||
cancel-in-progress: true | ||
|
||
jobs: | ||
|
||
set-matrix: | ||
# FIXME: set-matrix is lightweight, run it on any self-hosted machines for kernel-patches org | ||
# so we do not wait for GH hosted runners when there potentially all are busy because of bpf-rc | ||
# repo for instance. | ||
# This could be somehow fixed long term by making this action/workflow re-usable and letting the called | ||
# specify what to run on. | ||
runs-on: ${{ github.repository_owner == 'kernel-patches' && 'x86_64' || 'ubuntu-latest' }} | ||
outputs: | ||
build-matrix: ${{ steps.set-matrix-impl.outputs.build_matrix }} | ||
test-matrix: ${{ steps.set-matrix-impl.outputs.test_matrix }} | ||
veristat-runs-on: ${{ steps.set-matrix-impl.outputs.veristat_runs_on }} | ||
steps: | ||
- id: set-matrix-impl | ||
shell: python3 -I {0} | ||
run: | | ||
from json import dumps | ||
from enum import Enum | ||
import os | ||
class Arch(Enum): | ||
""" | ||
CPU architecture supported by CI. | ||
""" | ||
aarch64 = "aarch64" | ||
s390x = "s390x" | ||
x86_64 = "x86_64" | ||
def set_output(name, value): | ||
"""Write an output variable to the GitHub output file.""" | ||
with open(os.getenv("GITHUB_OUTPUT"), "a") as f: | ||
f.write(f"{name}={value}\n") | ||
def generate_test_config(test): | ||
"""Create the configuration for the provided test.""" | ||
experimental = test.endswith("_parallel") | ||
config = { | ||
"test": test, | ||
"continue_on_error": experimental, | ||
# While in experimental mode, parallel jobs may get stuck | ||
# anywhere, including in user space where the kernel won't detect | ||
# a problem and panic. We add a second layer of (smaller) timeouts | ||
# here such that if we get stuck in a parallel run, we hit this | ||
# timeout and fail without affecting the overall job success (as | ||
# would be the case if we hit the job-wide timeout). For | ||
# non-experimental jobs, 360 is the default which will be | ||
# superseded by the overall workflow timeout (but we need to | ||
# specify something). | ||
"timeout_minutes": 30 if experimental else 360, | ||
} | ||
return config | ||
matrix = [ | ||
{"kernel": "LATEST", "runs_on": [], "arch": Arch.x86_64.value, "toolchain": "gcc", "llvm-version": "16"}, | ||
{"kernel": "LATEST", "runs_on": [], "arch": Arch.x86_64.value, "toolchain": "llvm", "llvm-version": "16"}, | ||
{"kernel": "LATEST", "runs_on": [], "arch": Arch.aarch64.value, "toolchain": "gcc", "llvm-version": "16"}, | ||
#{"kernel": "LATEST", "runs_on": [], "arch": Arch.aarch64.value, "toolchain": "llvm", "llvm-version": "16"}, | ||
] | ||
self_hosted_repos = [ | ||
"kernel-patches/bpf", | ||
"kernel-patches/vmtest", | ||
] | ||
for idx in range(len(matrix) - 1, -1, -1): | ||
if matrix[idx]['toolchain'] == 'gcc': | ||
matrix[idx]['toolchain_full'] = 'gcc' | ||
else: | ||
matrix[idx]['toolchain_full'] = 'llvm-' + matrix[idx]['llvm-version'] | ||
# Only a few repository within "kernel-patches" use self-hosted runners. | ||
if "${{ github.repository_owner }}" != "kernel-patches" or "${{ github.repository }}" not in self_hosted_repos: | ||
# Outside of those repositories, we only run on x86_64 GH hosted runners (ubuntu-latest) | ||
for idx in range(len(matrix) - 1, -1, -1): | ||
if matrix[idx]["arch"] != Arch.x86_64.value: | ||
del matrix[idx] | ||
else: | ||
matrix[idx]["runs_on"] = ["ubuntu-latest"] | ||
else: | ||
# Otherwise, run on (self-hosted, arch) runners | ||
for idx in range(len(matrix) - 1, -1, -1): | ||
matrix[idx]["runs_on"].extend(["self-hosted", matrix[idx]["arch"]]) | ||
build_matrix = {"include": matrix} | ||
set_output("build_matrix", dumps(build_matrix)) | ||
def get_tests(config): | ||
tests = [ | ||
"test_progs", | ||
"test_progs_parallel", | ||
"test_progs_no_alu32", | ||
"test_progs_no_alu32_parallel", | ||
"test_maps", | ||
"test_verifier", | ||
] | ||
if config.get("parallel_tests", True): | ||
return tests | ||
return [test for test in tests if not test.endswith("parallel") ] | ||
test_matrix = {"include": [{**config, **generate_test_config(test)} | ||
for config in matrix | ||
for test in get_tests(config) | ||
]} | ||
set_output("test_matrix", dumps(test_matrix)) | ||
veristat_runs_on = next(x['runs_on'] | ||
for x in matrix | ||
if x['arch'] == "${{env.veristat_arch}}" and | ||
x['toolchain'] == "${{env.veristat_toolchain}}") | ||
set_output("veristat_runs_on", veristat_runs_on) | ||
build: | ||
name: build for ${{ matrix.arch }} with ${{ matrix.toolchain_full }} | ||
needs: set-matrix | ||
runs-on: ${{ matrix.runs_on }} | ||
timeout-minutes: 100 | ||
strategy: | ||
fail-fast: false | ||
matrix: ${{ fromJSON(needs.set-matrix.outputs.build-matrix) }} | ||
env: | ||
KERNEL: ${{ matrix.kernel }} | ||
REPO_ROOT: ${{ github.workspace }} | ||
REPO_PATH: "" | ||
KBUILD_OUTPUT: kbuild-output/ | ||
steps: | ||
- run: echo "would build now" | ||
test: | ||
if: ${{ github.event_name != 'push' }} | ||
name: ${{ matrix.test }} on ${{ matrix.arch }} with ${{ matrix.toolchain_full }} | ||
needs: [set-matrix, build] | ||
strategy: | ||
fail-fast: false | ||
matrix: ${{ fromJSON(needs.set-matrix.outputs.test-matrix) }} | ||
runs-on: ${{ matrix.runs_on }} | ||
timeout-minutes: 100 | ||
env: | ||
KERNEL: ${{ matrix.kernel }} | ||
REPO_ROOT: ${{ github.workspace }} | ||
REPO_PATH: "" | ||
KBUILD_OUTPUT: kbuild-output/ | ||
steps: | ||
- run: echo "would build now" | ||
veristat: | ||
name: veristat | ||
needs: [set-matrix, build] | ||
runs-on: ${{ fromJSON(needs.set-matrix.outputs.veristat-runs-on) }} | ||
timeout-minutes: 100 | ||
env: | ||
KERNEL: LATEST | ||
REPO_ROOT: ${{ github.workspace }} | ||
REPO_PATH: "" | ||
KBUILD_OUTPUT: kbuild-output/ | ||
steps: | ||
- run: echo "would run veristat now" |
Oops, something went wrong.