-
Notifications
You must be signed in to change notification settings - Fork 104
/
Copy pathdrb.rs
247 lines (206 loc) · 8.75 KB
/
drb.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
// Copyright (c) 2021-2024 Espresso Systems (espressosys.com)
// This file is part of the HotShot repository.
// You should have received a copy of the MIT License
// along with the HotShot repository. If not, see <https://mit-license.org/>.
use std::collections::BTreeMap;
use sha2::{Digest, Sha256};
use tokio::task::JoinHandle;
use crate::traits::node_implementation::{ConsensusTime, NodeType};
// TODO: Add the following consts once we bench the hash time.
// <https://github.com/EspressoSystems/HotShot/issues/3880>
// /// Highest number of hashes that a hardware can complete in a second.
// const `HASHES_PER_SECOND`
// /// Time a DRB calculation will take, in terms of number of views.
// const `DRB_CALCULATION_NUM_VIEW`: u64 = 300;
// TODO: Replace this with an accurate number calculated by `fn difficulty_level()` once we bench
// the hash time.
// <https://github.com/EspressoSystems/HotShot/issues/3880>
/// Arbitrary number of times the hash function will be repeatedly called.
const DIFFICULTY_LEVEL: u64 = 10;
/// DRB seed input for epoch 1 and 2.
pub const INITIAL_DRB_SEED_INPUT: [u8; 32] = [0; 32];
/// DRB result for epoch 1 and 2.
pub const INITIAL_DRB_RESULT: [u8; 32] = [0; 32];
/// Alias for DRB seed input for `compute_drb_result`, serialized from the QC signature.
pub type DrbSeedInput = [u8; 32];
/// Alias for DRB result from `compute_drb_result`.
pub type DrbResult = [u8; 32];
/// Number of previous results and seeds to keep
pub const KEEP_PREVIOUS_RESULT_COUNT: u64 = 8;
// TODO: Use `HASHES_PER_SECOND` * `VIEW_TIMEOUT` * `DRB_CALCULATION_NUM_VIEW` to calculate this
// once we bench the hash time.
// <https://github.com/EspressoSystems/HotShot/issues/3880>
/// Difficulty level of the DRB calculation.
///
/// Represents the number of times the hash function will be repeatedly called.
#[must_use]
pub fn difficulty_level() -> u64 {
unimplemented!("Use an arbitrary `DIFFICULTY_LEVEL` for now before we bench the hash time.");
}
/// Compute the DRB result for the leader rotation.
///
/// This is to be started two epochs in advance and spawned in a non-blocking thread.
///
/// # Arguments
/// * `drb_seed_input` - Serialized QC signature.
#[must_use]
pub fn compute_drb_result<TYPES: NodeType>(drb_seed_input: DrbSeedInput) -> DrbResult {
let mut hash = drb_seed_input.to_vec();
for _iter in 0..DIFFICULTY_LEVEL {
// TODO: This may be optimized to avoid memcopies after we bench the hash time.
// <https://github.com/EspressoSystems/HotShot/issues/3880>
hash = Sha256::digest(hash).to_vec();
}
// Convert the hash to the DRB result.
let mut drb_result = [0u8; 32];
drb_result.copy_from_slice(&hash);
drb_result
}
/// Alias for in-progress DRB computation task, if there's any.
pub type DrbComputation<TYPES> = Option<(<TYPES as NodeType>::Epoch, JoinHandle<DrbResult>)>;
/// Seeds for DRB computation and computed results.
#[derive(Clone, Debug)]
pub struct DrbSeedsAndResults<TYPES: NodeType> {
/// Stored inputs to computations
pub seeds: BTreeMap<TYPES::Epoch, DrbSeedInput>,
/// Stored results from computations
pub results: BTreeMap<TYPES::Epoch, DrbResult>,
}
impl<TYPES: NodeType> DrbSeedsAndResults<TYPES> {
#[must_use]
/// Constructor with initial values for epochs 1 and 2.
pub fn new() -> Self {
Self {
seeds: BTreeMap::from([
(TYPES::Epoch::new(1), INITIAL_DRB_SEED_INPUT),
(TYPES::Epoch::new(2), INITIAL_DRB_SEED_INPUT),
]),
results: BTreeMap::from([
(TYPES::Epoch::new(1), INITIAL_DRB_RESULT),
(TYPES::Epoch::new(2), INITIAL_DRB_RESULT),
]),
}
}
/// Stores a seed for a particular epoch for later use by `start_drb_task`.
pub fn store_seed(&mut self, epoch: TYPES::Epoch, drb_seed_input: DrbSeedInput) {
self.seeds.insert(epoch, drb_seed_input);
}
/// Garbage collects internal data structures
pub fn garbage_collect(&mut self, epoch: TYPES::Epoch) {
if epoch.u64() < KEEP_PREVIOUS_RESULT_COUNT {
return;
}
let retain_epoch = epoch - KEEP_PREVIOUS_RESULT_COUNT;
// N.B. x.split_off(y) returns the part of the map where key >= y
// Remove result entries older than EPOCH
self.results = self.results.split_off(&retain_epoch);
// Remove result entries older than EPOCH+1
self.seeds = self.seeds.split_off(&(retain_epoch + 1));
}
}
impl<TYPES: NodeType> Default for DrbSeedsAndResults<TYPES> {
fn default() -> Self {
Self::new()
}
}
/// Functions for leader selection based on the DRB.
///
/// The algorithm we use is:
///
/// Initialization:
/// - obtain `drb: [u8; 32]` from the DRB calculation
/// - sort the stake table for a given epoch by `xor(drb, public_key)`
/// - generate a cdf of the cumulative stake using this newly-sorted table,
/// along with a hash of the stake table entries
///
/// Selecting a leader:
/// - calculate the SHA512 hash of the `drb_result`, `view_number` and `stake_table_hash`
/// - find the first index in the cdf for which the remainder of this hash modulo the `total_stake`
/// is strictly smaller than the cdf entry
/// - return the corresponding node as the leader for that view
pub mod election {
use primitive_types::{U256, U512};
use sha2::{Digest, Sha256, Sha512};
use crate::traits::signature_key::{SignatureKey, StakeTableEntryType};
/// Calculate `xor(drb.cycle(), public_key)`, returning the result as a vector of bytes
fn cyclic_xor(drb: [u8; 32], public_key: Vec<u8>) -> Vec<u8> {
let drb: Vec<u8> = drb.to_vec();
let mut result: Vec<u8> = vec![];
for (drb_byte, public_key_byte) in public_key.iter().zip(drb.iter().cycle()) {
result.push(drb_byte ^ public_key_byte);
}
result
}
/// Generate the stake table CDF, as well as a hash of the resulting stake table
pub fn generate_stake_cdf<Key: SignatureKey, Entry: StakeTableEntryType<Key>>(
mut stake_table: Vec<Entry>,
drb: [u8; 32],
) -> RandomizedCommittee<Entry> {
// sort by xor(public_key, drb_result)
stake_table.sort_by(|a, b| {
cyclic_xor(drb, a.public_key().to_bytes())
.cmp(&cyclic_xor(drb, b.public_key().to_bytes()))
});
let mut hasher = Sha256::new();
let mut cumulative_stake = U256::from(0);
let mut cdf = vec![];
for entry in stake_table {
cumulative_stake += entry.stake();
hasher.update(entry.public_key().to_bytes());
cdf.push((entry, cumulative_stake));
}
RandomizedCommittee {
cdf,
stake_table_hash: hasher.finalize().into(),
drb,
}
}
/// select the leader for a view
///
/// # Panics
/// Panics if `cdf` is empty. Results in undefined behaviour if `cdf` is not ordered.
///
/// Note that we try to downcast a U512 to a U256,
/// but this should never panic because the U512 should be strictly smaller than U256::MAX by construction.
pub fn select_randomized_leader<
SignatureKey,
Entry: StakeTableEntryType<SignatureKey> + Clone,
>(
randomized_committee: &RandomizedCommittee<Entry>,
view: u64,
) -> Entry {
let RandomizedCommittee {
cdf,
stake_table_hash,
drb,
} = randomized_committee;
// We hash the concatenated drb, view and stake table hash.
let mut hasher = Sha512::new();
hasher.update(drb);
hasher.update(view.to_le_bytes());
hasher.update(stake_table_hash);
let raw_breakpoint: [u8; 64] = hasher.finalize().into();
// then calculate the remainder modulo the total stake as a U512
let remainder: U512 =
U512::from_little_endian(&raw_breakpoint) % U512::from(cdf.last().unwrap().1);
// and drop the top 32 bytes, downcasting to a U256
let breakpoint: U256 = U256::try_from(remainder).unwrap();
// now find the first index where the breakpoint is strictly smaller than the cdf
//
// in principle, this may result in an index larger than `cdf.len()`.
// however, we have ensured by construction that `breakpoint < total_stake`
// and so the largest index we can actually return is `cdf.len() - 1`
let index = cdf.partition_point(|(_, cumulative_stake)| breakpoint >= *cumulative_stake);
// and return the corresponding entry
cdf[index].0.clone()
}
#[derive(Clone, Debug)]
pub struct RandomizedCommittee<Entry> {
/// cdf of nodes by cumulative stake
cdf: Vec<(Entry, U256)>,
/// Hash of the stake table
stake_table_hash: [u8; 32],
/// DRB result
drb: [u8; 32],
}
}