fix/enable rust tests, make build faster

This commit fixes rust tests and configures ci to
run them on commit. It also sets up CI to run those
in a timely manner by caching dependencies and splitting jobs.
This commit is contained in:
patso 2024-09-06 06:00:17 -04:00
parent fb35fdb6f2
commit 082bccb557
No known key found for this signature in database
GPG Key ID: 1C2CAA6553D55978
10 changed files with 245 additions and 36 deletions

View File

@ -0,0 +1,52 @@
name: install-deps
runs:
using: 'composite'
steps:
### OTHER REPOS ####
# Hard turn-off interactive mode
- run: echo 'debconf debconf/frontend select Noninteractive' | sudo debconf-set-selections
shell: bash
# Refresh packages list
- run: sudo apt update
shell: bash
### DOWNLOAD AND INSTALL DEPENDENCIES ###
# Download dependencies packaged by Ubuntu
- run: sudo apt -y install bison busybox-static cargo cmake coreutils cpio elfutils file flex gcc gcc-multilib git iproute2 jq kbd kmod libcap-dev libelf-dev libunwind-dev libvirt-clients libzstd-dev linux-headers-generic linux-tools-common linux-tools-generic make ninja-build pahole pkg-config python3-dev python3-pip python3-requests qemu-kvm rsync rustc stress-ng udev zstd
shell: bash
# clang 17
# Use a custom llvm.sh script which includes the -y flag for
# add-apt-repository. Otherwise, the CI job will hang. If and when
# https://github.com/opencollab/llvm-jenkins.debian.net/pull/26 is
# merged, we can go back to using https://apt.llvm.org/llvm.sh.
- run: wget https://raw.githubusercontent.com/Decave/llvm-jenkins.debian.net/fix_llvmsh/llvm.sh
shell: bash
- run: chmod +x llvm.sh
shell: bash
- run: sudo ./llvm.sh all
shell: bash
- run: sudo ln -sf /usr/bin/clang-17 /usr/bin/clang
shell: bash
- run: sudo ln -sf /usr/bin/llvm-strip-17 /usr/bin/llvm-strip
shell: bash
# meson
- run: pip install meson
shell: bash
# Install virtme-ng
- run: pip install virtme-ng
shell: bash
# Setup KVM support
- run: |
echo 'KERNEL=="kvm", GROUP="kvm", MODE="0666", OPTIONS+="static_node=kvm"' | sudo tee /etc/udev/rules.d/99-kvm4all.rules
sudo udevadm control --reload-rules
sudo udevadm trigger --name-match=kvm
shell: bash
### END DEPENDENCIES ###

138
.github/workflows/caching-build.yml vendored Normal file
View File

@ -0,0 +1,138 @@
name: caching-build
on:
# only runs on main, hourly cache update used by all branches
schedule:
- cron: "0 * * * *"
push:
jobs:
build-kernel:
runs-on: ubuntu-22.04
steps:
# redundancy to exit fast
- run: echo 'debconf debconf/frontend select Noninteractive' | sudo debconf-set-selections
- run: sudo apt update
- run: sudo apt install -y git --no-install-recommends
# get latest head commit of sched_ext for-next
- run: echo "SCHED_EXT_KERNEL_COMMIT=$(git ls-remote https://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext.git heads/for-next | awk '{print $1}')" >> $GITHUB_ENV
- uses: actions/checkout@v4
# use cached kernel if available, create after job if not
- name: Cache Kernel
id: cache-kernel
uses: actions/cache@v4
with:
path: |
linux
key: ${{ env.SCHED_EXT_KERNEL_COMMIT }}
- if: ${{ steps.cache-kernel.outputs.cache-hit != 'true' }}
uses: ./.github/actions/install-deps-action
# cache bzImage alone for rust tests (disk space limit workaround)
- name: Cache bzImage
id: cache-bzImage
uses: actions/cache@v4
with:
path: |
linux/arch/x86/boot/bzImage
key: ${{ env.SCHED_EXT_KERNEL_COMMIT }}
- if: ${{ steps.cache-kernel.outputs.cache-hit != 'true' }}
name: Clone Kernel
# Get the latest sched-ext enabled kernel directly from the korg
# for-next branch
run: git clone --single-branch -b for-next --depth 1 https://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext.git linux
# guard rail because we are caching
- if: ${{ steps.cache-kernel.outputs.cache-hit != 'true' }}
run: cd linux && git checkout ${{ env.SCHED_EXT_KERNEL_COMMIT }}
- if: ${{ steps.cache-kernel.outputs.cache-hit != 'true' }}
# Print the latest commit of the checked out sched-ext kernel
run: cd linux && git log -1 --pretty=format:"%h %ad %s" --date=short
- if: ${{ steps.cache-kernel.outputs.cache-hit != 'true' }}
# Build a minimal kernel (with sched-ext enabled) using virtme-ng
run: cd linux && vng -v --build --config ../.github/workflows/sched-ext.config
- if: ${{ steps.cache-kernel.outputs.cache-hit != 'true' }}
# Generate kernel headers
run: cd linux && make headers
integration-test:
runs-on: ubuntu-22.04
needs: build-kernel
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/install-deps-action
# cache rust deps, invalidate when kernel commit changes
# get latest head commit of sched_ext for-next
- run: echo "SCHED_EXT_KERNEL_COMMIT=$(git ls-remote https://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext.git heads/for-next | awk '{print $1}')" >> $GITHUB_ENV
# use cached kernel if available, create after job if not
- name: Cache Kernel
id: cache-kernel
uses: actions/cache@v4
with:
path: |
linux
key: ${{ env.SCHED_EXT_KERNEL_COMMIT }}
# need to re-run job when kernel head changes between build and test running.
- if: ${{ steps.cache-kernel.outputs.cache-hit != 'true' }}
name: exit if cache stale
run: exit -1
# veristat
- run: wget https://github.com/libbpf/veristat/releases/download/v0.3.2/veristat-v0.3.2-amd64.tar.gz
- run: tar -xvf veristat-v0.3.2-amd64.tar.gz && sudo cp veristat /usr/bin/
- run: sudo chmod +x /usr/bin/veristat && sudo chmod 755 /usr/bin/veristat
# The actual build:
- run: meson setup build -Dkernel=$(pwd)/linux -Dkernel_headers=./linux/usr/include -Denable_stress=true
- run: meson compile -C build
# Print CPU model before running the tests (this can be useful for
# debugging purposes)
- run: grep 'model name' /proc/cpuinfo | head -1
# Test schedulers
- run: meson compile -C build test_sched
# Stress schedulers
- run: meson compile -C build stress_tests
- run: meson compile -C build veristat
rust-test:
runs-on: ubuntu-22.04
needs: build-kernel
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/install-deps-action
# cache rust deps, invalidate when kernel commit changes
# get latest head commit of sched_ext for-next
- run: echo "SCHED_EXT_KERNEL_COMMIT=$(git ls-remote https://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext.git heads/for-next | awk '{print $1}')" >> $GITHUB_ENV
# cache bzImage alone for rust tests
- name: Cache bzImage
id: cache-bzImage
uses: actions/cache@v4
with:
path: |
linux/arch/x86/boot/bzImage
key: ${{ env.SCHED_EXT_KERNEL_COMMIT }}
# need to re-run job when kernel head changes between build and test running.
- if: ${{ steps.cache-bzImage.outputs.cache-hit != 'true' }}
name: exit if cache stale
run: exit -1
- uses: Swatinem/rust-cache@v2
with:
shared-key: ${{ env.SCHED_EXT_KERNEL_COMMIT }}
workspaces: rust
prefix-key: "1"
- run: cargo build --manifest-path rust/Cargo.toml
- run: cargo test --manifest-path rust/Cargo.toml --no-run
- run: vng -m10G --force-9p -r linux/arch/x86/boot/bzImage --net user --exec "cargo test --manifest-path rust/Cargo.toml"

View File

@ -10,11 +10,11 @@ option('cargo', type: 'string', value: 'cargo',
description: 'cargo to use when building rust sub-projects')
option('cargo_home', type: 'string',
description: 'CARGO_HOME env to use when invoking cargo')
option('offline', type: 'boolean', value: 'false',
option('offline', type: 'boolean', value: false,
description: 'Compilation step should not access the internet')
option('enable_rust', type: 'boolean', value: 'true',
option('enable_rust', type: 'boolean', value: true,
description: 'Enable rust sub-projects')
option('enable_stress', type: 'boolean', value: 'true',
option('enable_stress', type: 'boolean', value: true,
description: 'Enable stress tests')
option('kernel', type: 'string', value: 'vmlinuz',
description: 'kernel image used to test schedulers')

View File

@ -6,6 +6,11 @@ authors = ["Andrea Righi <andrea.righi@linux.dev>"]
license = "GPL-2.0-only"
repository = "https://github.com/sched-ext/scx"
description = "Framework to implement sched_ext schedulers running in user space"
include = [
"assets/bpf/intf.h",
"assets/bpf/main.bpf.c",
"assets/bpf.rs",
]
[dependencies]
anyhow = "1.0.65"
@ -23,8 +28,3 @@ scx_utils = { path = "../scx_utils", version = "1.0.4" }
name = "scx_rustland_core"
path = "src/lib.rs"
include = [
"assets/bpf/intf.h",
"assets/bpf/main.bpf.c",
"assets/bpf.rs",
]

View File

@ -591,7 +591,7 @@ mod tests {
println!("vmlinux.h: ver={:?} sha1={:?}", &ver, &sha1,);
assert!(
regex::Regex::new(r"^[1-9][0-9]*\.[1-9][0-9]*(\.[1-9][0-9]*)?$")
regex::Regex::new(r"^([1-9][0-9]*\.[1-9][0-9][a-z0-9-]*)$")
.unwrap()
.is_match(&ver)
);

View File

@ -50,12 +50,12 @@ mod tests {
#[test]
fn test_cargo_ver() {
//assert_eq!(super::*SCX_CARGO_VERSION, 1);
println!("{}", super::*SCX_CARGO_VERSION);
println!("{}", *super::SCX_CARGO_VERSION);
}
#[test]
fn test_full_ver() {
//assert_eq!(super::*SCX_CARGO_VERSION, 1);
println!("{}", super::*SCX_FULL_VERSION);
println!("{}", *super::SCX_FULL_VERSION);
}
}

View File

@ -17,36 +17,42 @@
//! hexadecimal string:
//!
//!```
//! let all_zeroes = cpumask::new();
//! let str = "0xff00ff00";
//! let from_str_mask = cpumask::from_string(str);
//! use scx_utils::Cpumask;
//! let all_zeroes = Cpumask::new();
//! let str = String::from("0xff00ff00");
//! let from_str_mask = Cpumask::from_str(&str);
//!```
//!
//! The hexadecimal string also supports the special values "none" and "all",
//! respectively to specify no CPU (empty mask) or all CPUs (full mask):
//!
//!```
//! let str = "none";
//! let all_zeroes = cpumask::from_string(str);
//! use scx_utils::Cpumask;
//! let str = String::from("none");
//! let all_zeroes = Cpumask::from_str(&str);
//!
//! let str = "all";
//! let all_ones = cpumask::from_string(str);
//! let str = String::from("all");
//! let all_ones = Cpumask::from_str(&str);
//!```
//!
//! A Cpumask can be queried and updated using its helper functions:
//!
//!```
//! info!("{}", mask); // 32:<11111111000000001111111100000000>
//!```rust
//! use log::info;
//! use scx_utils::Cpumask;
//! let str = String::from("none");
//! let mut mask = Cpumask::from_str(&str).unwrap();
//! info!("{:#?}", mask); // 32:<11111111000000001111111100000000>
//! assert!(!mask.test_cpu(0));
//! mask.set_cpu(0);
//! assert!(mask.test_cpu(0));
//!
//! mask.clear();
//! info!("{}", mask); // 32:<00000000000000000000000000000000>
//! info!("{:#?}", mask); // 32:<00000000000000000000000000000000>
//! assert!(!mask.test_cpu(0));
//!
//! mask.setall();
//! info!("{}", mask); // 32:<11111111111111111111111111111111>
//! info!("{:#?}", mask); // 32:<11111111111111111111111111111111>
//! assert!(mask.test_cpu(0));
//!```
@ -335,8 +341,11 @@ pub struct CpumaskIntoIterator {
///
/// # Examples
///
/// ```
/// let mask = Cpumask::from_str(cpumask_str)?;
/// ```rust
/// use log::info;
/// use scx_utils::Cpumask;
/// let str = String::from("all");
/// let mask = Cpumask::from_str(&str).unwrap();
/// for cpu in mask.clone().into_iter() {
/// info!("cpu {} was set", cpu);
/// }

View File

@ -109,7 +109,11 @@
//!
//! Assume we're on a 16-core (32-CPU) host with two core complexes:
//!
//!```
//!```rust
//! use scx_utils::LoadAggregator;
//! use log::info;
//!
//! let mut aggregator = LoadAggregator::new(32, false);
//! // Create a LoadAggregator object, specifying the number of CPUs on the
//! // system, and whether it should only aggregate duty cycle.
//! let mut aggregator = LoadAggregator::new(32, false);
@ -145,7 +149,7 @@
//! example, if we had two tasks with weight 1 in domain 0, and an additional
//! task with weight 100 in domain 1, we would record their loads as follows:
//!
//!```
//!```rust,ignore
//! // Assume the same aggregator as above.
//!
//! // In this version, domain 0 has 2 tasks with weight 1.0 and duty cycle
@ -175,7 +179,9 @@
//! infeasibility) for the whole system, or the sum of duty cycle for the whole
//! system, or the sum of load for each domain (adjusted for infeasibility):
//!
//! ```
//! ```rust
//! use scx_utils::LoadAggregator;
//! use log::info;
//! let mut aggregator = LoadAggregator::new(32, false);
//! aggregator.record_dom_load(0, 1, 1.0);
//! // ...

View File

@ -30,10 +30,12 @@ use metrics_util::registry::Registry;
///
/// Example:
///
/// ```rust
/// ```
/// use std::time::Duration;
/// use scx_utils::LogRecorderBuilder;
/// LogRecorderBuilder::new()
/// .with_reporting_interval(Duration::from_secs(3))
/// .install()?;
/// .install().unwrap();
/// ```
pub struct LogRecorderBuilder {
reporting_interval: Duration,
@ -389,7 +391,7 @@ mod tests {
#[test]
fn test_default_format_counter_for_group() {
let formatter = DefaultMetricFormatter;
let label = Label::new("test_label", "value");
let label = Label::new("test_counter", "test_label");
let key = Key::from_parts("test_counter", vec![label]);
let value = 100;
let rate_per_sec = 10.5;

View File

@ -10,7 +10,7 @@
//!
//! A Topology is comprised of one or more Node objects, which themselves are
//! comprised hierarchically of Cache -> Core -> Cpu objects respectively:
//!
//!```rust,ignore
//! Topology
//! |
//! o---------------------o---------------------o
@ -47,7 +47,7 @@
//! | min_freq 400000 | | min_freq 400000 |
//! | max_freq 5881000 | | min_freq 5881000 |
//! o--------------------------------o o---------------------------------o
//!
//!```
//! Every object contains a Cpumask that spans all CPUs in that point in the
//! topological hierarchy.
//!
@ -56,8 +56,9 @@
//!
//! Topology objects are created using the static new function:
//!
//!```
//! let top = Topology::new()?;
//!```
//! use scx_utils::Topology;
//! let top = Topology::new().unwrap();
//!```
//!
//! Querying Topology
@ -386,8 +387,9 @@ impl Topology {
/// # Example
///
/// ```
/// let topo = Topology::new()?;
/// let topo_map = TopologyMap::new(topo)?;
/// use scx_utils::{TopologyMap, Topology};
/// let topo = Topology::new().unwrap();
/// let topo_map = TopologyMap::new(&topo).unwrap();
///
/// for (core_id, core) in topo_map.iter().enumerate() {
/// for cpu in core {