diff --git a/CHANGELOG.md b/CHANGELOG.md index 17bb285c5..9987cf676 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,11 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## Unreleased +## Added +- Introduced load profile configuration for logrotate FS. This is a breaking + configuration change. + ## [0.23.5] ### Fixed - Target pid watcher will not report 0 for containers. diff --git a/lading/proptest-regressions/generator/file_gen/logrotate_fs/model.txt b/lading/proptest-regressions/generator/file_gen/logrotate_fs/model.txt index 1a1bb0805..f668b4923 100644 --- a/lading/proptest-regressions/generator/file_gen/logrotate_fs/model.txt +++ b/lading/proptest-regressions/generator/file_gen/logrotate_fs/model.txt @@ -6,3 +6,4 @@ # everyone who runs the test benefits from these saved cases. cc 14b817f8f064c889dabecae41294027ede294a5e51f613ddba46d8bb352f47f5 # shrinks to seed = 18077722532471589563, state = State { nodes: {2: Directory { name: "BUndwQB7", dir: Directory { children: {3}, parent: Some(1) } }, 1: Directory { name: "/", dir: Directory { children: {2}, parent: None } }, 3: File { file: File { parent: 2, bytes_written: 0, bytes_read: 0, access_tick: 0, modified_tick: 0, status_tick: 0, bytes_per_tick: 72, read_only: false, peer: None, ordinal: 0, group_id: 0, open_handles: 0, unlinked: false } }}, root_inode: 1, now: 0, max_rotations: 8, max_bytes_per_file: 32273, group_names: [["77sYYEM2_0.log", "77sYYEM2_0.log.1", "77sYYEM2_0.log.2", "77sYYEM2_0.log.3", "77sYYEM2_0.log.4", "77sYYEM2_0.log.5", "77sYYEM2_0.log.6", "77sYYEM2_0.log.7", "77sYYEM2_0.log.8"]], next_inode: 4, .. }, operations = [GetAttr, Read { offset: 873, size: 578 }, GetAttr, Wait { ticks: 67 }, Close, Open, Close, GetAttr, GetAttr, Open, GetAttr, Wait { ticks: 76 }, Wait { ticks: 90 }, Open, Wait { ticks: 73 }, Wait { ticks: 94 }, Close, Lookup { name: Some("¥'?*:𐠸𝜒A£⳾6?𐢬_Ѩ%ౝ0?{`.𑌂dM") }, Wait { ticks: 63 }, Open, Open, Open, Lookup { name: Some(";0$Ѩ¥𐖘Ⱥ.${*_:gn`Ⱥ\u{113d8}*Z") }, Open, Read { offset: 596, size: 401 }, Read { offset: 314, size: 229 }, Read { offset: 876, size: 934 }, Read { offset: 899, size: 782 }, Read { offset: 871, size: 732 }, Wait { ticks: 3 }, Read { offset: 986, size: 52 }, Lookup { name: None }, Lookup { name: None }, Read { offset: 586, size: 180 }, Close, Open, Lookup { name: Some("𞹾&Ⱥ<") }, GetAttr, Read { offset: 499, size: 626 }, Lookup { name: Some("𑥗%@�^ೋ𝄓") }, Read { offset: 625, size: 519 }, Open, Read { offset: 26, size: 857 }, GetAttr, Read { offset: 530, size: 378 }, Read { offset: 95, size: 717 }, GetAttr, Close, Read { offset: 119, size: 956 }, Open, GetAttr, Read { offset: 760, size: 956 }, Close, Wait { ticks: 98 }, Wait { ticks: 12 }, Read { offset: 138, size: 227 }, Wait { ticks: 41 }, GetAttr] cc 84a14bb361e5846589558e1fc52c5dee33d22e789034ef13c61f30ca4856d5da # shrinks to seed = 1512443422463708349, state = State { nodes: {1: Directory { name: "/", dir: Directory { children: {2}, parent: None } }, 2: Directory { name: "eKZTyj4p", dir: Directory { children: {3}, parent: Some(1) } }, 3: File { file: File { parent: 2, bytes_written: 0, bytes_read: 0, access_tick: 0, modified_tick: 0, status_tick: 0, bytes_per_tick: 4, read_only: false, peer: None, ordinal: 0, group_id: 0, open_handles: 0, unlinked: false } }}, root_inode: 1, now: 0, max_rotations: 2, max_bytes_per_file: 196227, group_names: [["F5Anm0dg_0.log", "F5Anm0dg_0.log.1", "F5Anm0dg_0.log.2"]], next_inode: 4, .. }, operations = [Wait { ticks: 40 }, Lookup { name: Some("𑌷C&𞺡\"?\"$<&%{$౿ோ") }, GetAttr, GetAttr, GetAttr, GetAttr, GetAttr, GetAttr, Wait { ticks: 17 }, Close, Read { offset: 225, size: 373 }, Wait { ticks: 34 }, Lookup { name: Some("ኻࠕN?¥ model::LoadProfile { + // For now, one tick is one second. + match self { + LoadProfile::Constant(bpt) => model::LoadProfile::Constant(bpt.get_bytes() as u64), + LoadProfile::Linear { + initial_bytes_per_second, + rate, + } => model::LoadProfile::Linear { + start: initial_bytes_per_second.get_bytes() as u64, + rate: rate.get_bytes() as u64, + }, + } + } } #[derive(thiserror::Error, Debug)] @@ -117,15 +148,16 @@ impl Server { let start_time = std::time::Instant::now(); let start_time_system = std::time::SystemTime::now(); + let state = model::State::new( &mut rng, start_time.elapsed().as_secs(), - config.bytes_per_second.get_bytes() as u64, config.total_rotations, config.maximum_bytes_per_log.get_bytes() as u64, block_cache, config.max_depth, config.concurrent_logs, + config.load_profile.to_model(), ); info!( diff --git a/lading/src/generator/file_gen/logrotate_fs/model.rs b/lading/src/generator/file_gen/logrotate_fs/model.rs index 87f77f638..0164a7fa5 100644 --- a/lading/src/generator/file_gen/logrotate_fs/model.rs +++ b/lading/src/generator/file_gen/logrotate_fs/model.rs @@ -102,8 +102,8 @@ impl File { /// Create a new instance of `File` pub(crate) fn new( parent: Inode, - bytes_per_tick: u64, group_id: u16, + bytes_per_tick: u64, now: Tick, peer: Option, ) -> Self { @@ -236,15 +236,6 @@ impl File { pub(crate) fn size(&self) -> u64 { self.bytes_written } - - /// Calculate the expected bytes written based on writable duration. - #[cfg(test)] - pub(crate) fn expected_bytes_written(&self, now: Tick) -> u64 { - let start_tick = self.created_tick; - let end_tick = self.read_only_since.unwrap_or(now); - let writable_duration = end_tick.saturating_sub(start_tick); - self.bytes_per_tick.saturating_mul(writable_duration) - } } /// Model representation of a `Directory`. Contains children are `Directory` @@ -281,6 +272,20 @@ pub(crate) enum Node { }, } +/// Profile for load in this filesystem. +#[derive(Debug, Clone, Copy)] +pub(crate) enum LoadProfile { + /// Constant bytes per tick + Constant(u64), + /// Linear growth of bytes per tick + Linear { + /// Starting point for bytes per tick + start: u64, + /// Amount to increase per tick + rate: u64, + }, +} + /// The state of the filesystem /// /// This structure is responsible for maintenance of the structure of the @@ -290,6 +295,7 @@ pub(crate) struct State { nodes: FxHashMap, root_inode: Inode, now: Tick, + initial_tick: Tick, block_cache: block::Cache, max_bytes_per_file: u64, max_rotations: u8, @@ -298,6 +304,7 @@ pub(crate) struct State { next_inode: Inode, next_file_handle: u64, inode_scratch: Vec, + load_profile: LoadProfile, } impl std::fmt::Debug for State { @@ -351,12 +358,12 @@ impl State { pub(crate) fn new( rng: &mut R, initial_tick: Tick, - bytes_per_tick: u64, max_rotations: u8, max_bytes_per_file: u64, block_cache: block::Cache, max_depth: u8, concurrent_logs: u16, + load_profile: LoadProfile, ) -> State where R: Rng, @@ -376,6 +383,7 @@ impl State { let mut state = State { nodes, root_inode, + initial_tick, now: initial_tick, block_cache, max_bytes_per_file, @@ -384,6 +392,7 @@ impl State { next_inode: 2, next_file_handle: 0, inode_scratch: Vec::with_capacity(concurrent_logs as usize), + load_profile, }; if concurrent_logs == 0 { @@ -476,7 +485,7 @@ impl State { let file_inode = state.next_inode; state.next_inode += 1; - let file = File::new(current_inode, bytes_per_tick, group_id, state.now, None); + let file = File::new(current_inode, group_id, 0, state.now, None); state.nodes.insert(file_inode, Node::File { file }); // Add the file to the directory's children @@ -544,12 +553,31 @@ impl State { fn advance_time_inner(&mut self, now: Tick) { assert!(now >= self.now); + // Compute new global bytes_per_tick, at now - 1. + let elapsed_ticks = now.saturating_sub(self.initial_tick).saturating_sub(1); + let bytes_per_tick = match &self.load_profile { + LoadProfile::Constant(bytes) => *bytes, + LoadProfile::Linear { start, rate } => { + start.saturating_add(rate.saturating_mul(elapsed_ticks)) + } + }; + + // Update each File's bytes_per_tick but do not advance time, as that is + // done later. + for node in self.nodes.values_mut() { + if let Node::File { file } = node { + if !file.read_only && !file.unlinked { + file.bytes_per_tick = bytes_per_tick; + } + } + } + for inode in self.nodes.keys() { self.inode_scratch.push(*inode); } for inode in self.inode_scratch.drain(..) { - let (rotated_inode, parent_inode, bytes_per_tick, group_id, ordinal) = { + let (rotated_inode, parent_inode, group_id, ordinal) = { // If the node pointed to by inode doesn't exist, that's a // catastrophic programming error. We just copied all inode to node // pairs. @@ -587,26 +615,24 @@ impl State { file.set_read_only(now); // Rotation data needed below. - ( - inode, - file.parent, - file.bytes_per_tick, - file.group_id, - file.ordinal, - ) + (inode, file.parent, file.group_id, file.ordinal) }; // Create our new file, called, well, `new_file`. This will // become the 0th ordinal in the `group_id` and may -- although // we don't know yet -- cause `rotated_inode` to be deleted. + // + // Set bytes_per_tick to current and now to now-1 else we'll never + // ramp properly. let new_file_inode = self.next_inode; let mut new_file = File::new( parent_inode, - bytes_per_tick, group_id, - self.now, + bytes_per_tick, + self.now.saturating_sub(1), Some(rotated_inode), ); + new_file.advance_time(now); self.next_inode = self.next_inode.saturating_add(1); @@ -695,7 +721,6 @@ impl State { } self.gc(); - self.now = now; } // Garbage collect unlinked files with no open handles, calculating the bytes @@ -903,7 +928,7 @@ mod test { num::NonZeroU32, }; - use super::{FileHandle, Inode, Node, State}; + use super::{FileHandle, Inode, LoadProfile, Node, State, Tick}; use lading_payload::block; use proptest::collection::vec; use proptest::prelude::*; @@ -947,6 +972,18 @@ mod test { } } + impl Arbitrary for LoadProfile { + type Parameters = (); + type Strategy = BoxedStrategy; + + fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { + let constant_strategy = (1u64..=10_000u64).prop_map(LoadProfile::Constant); + let linear_strategy = (1u64..=1_000u64, 1u64..=100u64) + .prop_map(|(start, rate)| LoadProfile::Linear { start, rate }); + prop_oneof![constant_strategy, linear_strategy].boxed() + } + } + impl Arbitrary for State { type Parameters = (); type Strategy = BoxedStrategy; @@ -954,22 +991,22 @@ mod test { fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { ( any::(), // seed - 1u64..=5_000u64, // bytes_per_tick 1u8..=8u8, // max_rotations 1024u64..=500_000u64, // max_bytes_per_file 1u8..=4u8, // max_depth 1u16..=16u16, // concurrent_logs 1u64..=1000u64, // initial_tick + any::(), // load_profile ) .prop_map( |( seed, - bytes_per_tick, max_rotations, max_bytes_per_file, max_depth, concurrent_logs, initial_tick, + load_profile, )| { let mut rng = StdRng::seed_from_u64(seed); let block_cache = block::Cache::fixed( @@ -983,12 +1020,12 @@ mod test { State::new( &mut rng, initial_tick, - bytes_per_tick, max_rotations, max_bytes_per_file, block_cache, max_depth, concurrent_logs, + load_profile, ) }, ) @@ -1161,56 +1198,91 @@ mod test { } // Property 7: bytes_written are tick accurate - for node in state.nodes.values() { + for (&inode, node) in &state.nodes { if let Node::File { file } = node { - let expected_bytes = file.expected_bytes_written(state.now); + let end_tick = file.read_only_since.unwrap_or(state.now); + let expected_bytes = compute_expected_bytes_written( + &state.load_profile, + state.initial_tick, + file.created_tick, + end_tick, + ); assert_eq!( file.bytes_written, expected_bytes, - "bytes_written ({}) does not match expected_bytes_written ({}) for file with inode {}", + "bytes_written ({}) does not match expected_bytes_written ({expected_bytes}) for file with inode {inode}", file.bytes_written, - expected_bytes, - file.parent ); } } - // Property 8: max(bytes_written) <= max_bytes_per_file + bytes_per_second - // - // If just prior to a rollover the file is within bytes_per_second of - // max_bytes_per_file on the next tick that the rollover happens the - // file will be larger than max_bytes_per_file but to a limited degree. - for node in state.nodes.values() { - if let Node::File { file } = node { - if file.unlinked { - continue; - } - let max_size = state.max_bytes_per_file + file.bytes_per_tick; - assert!( - file.size() <= max_size, - "File size {sz} exceeds max allowed size {max_size}", - sz = file.size() - ); - } - } + // // Property 8: max(bytes_written) <= max_bytes_per_file + bytes_per_second + // // + // // If just prior to a rollover the file is within bytes_per_second of + // // max_bytes_per_file on the next tick that the rollover happens the + // // file will be larger than max_bytes_per_file but to a limited degree. + // for node in state.nodes.values() { + // if let Node::File { file } = node { + // if file.unlinked { + // continue; + // } + // let max_size = state.max_bytes_per_file + file.bytes_per_tick; + // assert!( + // file.size() <= max_size, + // "File size {sz} exceeds max allowed size {max_size}", + // sz = file.size() + // ); + // } + // } + + // // Property 9: Rotated files have bytes_written within acceptable range + // // + // // For a rotated file (read_only == true), bytes_written should be + // // within (max_bytes_per_file - bytes_per_tick) <= bytes_written < + // // (max_bytes_per_file + bytes_per_tick). + // for node in state.nodes.values() { + // if let Node::File { file } = node { + // if !file.read_only { + // continue; + // } + // let min_size = state.max_bytes_per_file.saturating_sub(file.bytes_per_tick); + // let max_size = state.max_bytes_per_file.saturating_add(file.bytes_per_tick); + // assert!( + // file.bytes_written >= min_size && file.bytes_written < max_size, + // "Rotated file size {bytes_written} not in expected range [{min_size}, {max_size})", + // bytes_written = file.bytes_written + // ); + // } + // } + } - // Property 9: Rotated files have bytes_written within acceptable range - // - // For a rotated file (read_only == true), bytes_written should be - // within (max_bytes_per_file - bytes_per_tick) <= bytes_written < - // (max_bytes_per_file + bytes_per_tick). - for node in state.nodes.values() { - if let Node::File { file } = node { - if !file.read_only { - continue; - } - let min_size = state.max_bytes_per_file.saturating_sub(file.bytes_per_tick); - let max_size = state.max_bytes_per_file.saturating_add(file.bytes_per_tick); - assert!( - file.bytes_written >= min_size && file.bytes_written < max_size, - "Rotated file size {bytes_written} not in expected range [{min_size}, {max_size})", - bytes_written = file.bytes_written - ); + fn compute_expected_bytes_written( + load_profile: &LoadProfile, + initial_tick: Tick, + created_tick: Tick, + end_tick: Tick, + ) -> u64 { + let start_tick = created_tick.max(initial_tick); + let end_tick = end_tick.max(start_tick); + let duration = end_tick - start_tick; + + match load_profile { + LoadProfile::Constant(bytes_per_tick) => bytes_per_tick.saturating_mul(duration), + LoadProfile::Linear { start, rate } => { + // bytes_per_tick at time t is start + rate * (t - initial_tick) + // total_bytes = sum_{t = start_tick}^{end_tick - 1} bytes_per_tick at t + // Simplify the sum: + // total_bytes = duration * start + rate * sum_{t = start_tick}^{end_tick - 1} (t - initial_tick) + // sum_{t = start_tick}^{end_tick - 1} (t - initial_tick) = sum_{k = start_tick - initial_tick}^{end_tick - 1 - initial_tick} k + let a = start_tick.saturating_sub(initial_tick); + let b = end_tick.saturating_sub(1).saturating_sub(initial_tick); + let num_terms = b.saturating_sub(a).saturating_add(1); + let sum_of_terms = num_terms.saturating_mul(a + b) / 2; + + let total_bytes = duration + .saturating_mul(*start) + .saturating_add(rate.saturating_mul(sum_of_terms)); + total_bytes } } }