1
use std::hint::black_box;
2
use std::sync::Arc;
3
use std::sync::Barrier;
4
use std::sync::atomic::AtomicBool;
5
use std::thread::{self};
6

            
7
use criterion::Criterion;
8
use rand::distr::Bernoulli;
9
use rand::distr::Distribution;
10

            
11
/// The number of iterations to run for each benchmark.
12
pub const NUM_ITERATIONS: usize = 100000;
13
/// The number of threads to use for the benchmarks.
14
pub const THREADS: [usize; 6] = [1, 2, 4, 8, 16, 20];
15
/// The (average) number of read operations per write operation.
16
pub const READ_RATIOS: [u32; 6] = [1, 10, 100, 1000, 10000, 100000];
17

            
18
/// Execute the benchmarks for a given readers-writer lock implementation.
19
#[allow(clippy::too_many_arguments)]
20
pub fn benchmark<T, R, W>(
21
    c: &mut Criterion,
22
    name: &str,
23
    shared: T,
24
    read: R,
25
    write: W,
26
    num_threads: usize,
27
    num_iterations: usize,
28
    read_ratio: u32,
29
) where
30
    T: Clone + Send + 'static,
31
    R: FnOnce(&T) + Send + Copy + 'static,
32
    W: FnOnce(&T) + Send + Copy + 'static,
33
{
34
    // Share threads to avoid overhead.
35
    let mut threads = vec![];
36

            
37
    #[derive(Clone)]
38
    struct ThreadInfo<T> {
39
        busy: Arc<AtomicBool>,
40
        begin_barrier: Arc<Barrier>,
41
        end_barrier: Arc<Barrier>,
42
        dist: Bernoulli,
43
        shared: T,
44
    }
45

            
46
    let info = ThreadInfo {
47
        busy: Arc::new(AtomicBool::new(true)),
48
        begin_barrier: Arc::new(Barrier::new(num_threads + 1)),
49
        end_barrier: Arc::new(Barrier::new(num_threads + 1)),
50
        dist: Bernoulli::from_ratio(1, read_ratio).unwrap(),
51
        shared,
52
    };
53

            
54
    for _ in 0..num_threads {
55
        let info = info.clone();
56
        threads.push(thread::spawn(move || {
57
            let mut rng = rand::rng();
58

            
59
            loop {
60
                info.begin_barrier.wait();
61

            
62
                if !info.busy.load(std::sync::atomic::Ordering::SeqCst) {
63
                    // Quit the thread.
64
                    break;
65
                }
66

            
67
                // We execute it a fixed number of times.
68
                for _ in 0..num_iterations {
69
                    if info.dist.sample(&mut rng) {
70
                        write(&info.shared);
71
                        black_box(());
72
                    } else {
73
                        read(&info.shared);
74
                        black_box(());
75
                    }
76
                }
77

            
78
                info.end_barrier.wait();
79
            }
80
        }));
81
    }
82

            
83
    c.bench_function(
84
        format!("{name} {num_threads} {num_iterations} {read_ratio}").as_str(),
85
        |bencher| {
86
            bencher.iter(|| {
87
                info.begin_barrier.wait();
88

            
89
                info.end_barrier.wait();
90
            });
91
        },
92
    );
93

            
94
    // Tell the threads to quit and wait for them to join.
95
    info.busy.store(false, std::sync::atomic::Ordering::SeqCst);
96
    info.begin_barrier.wait();
97

            
98
    for thread in threads {
99
        thread.join().unwrap();
100
    }
101
}