3 The yarrow pseudo-randomness generator.
5 Copyright (C) 2001, 2008, 2013 Niels Möller
7 This file is part of GNU Nettle.
9 GNU Nettle is free software: you can redistribute it and/or
10 modify it under the terms of either:
12 * the GNU Lesser General Public License as published by the Free
13 Software Foundation; either version 3 of the License, or (at your
14 option) any later version.
18 * the GNU General Public License as published by the Free
19 Software Foundation; either version 2 of the License, or (at your
20 option) any later version.
22 or both in parallel, as here.
24 GNU Nettle is distributed in the hope that it will be useful,
25 but WITHOUT ANY WARRANTY; without even the implied warranty of
26 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27 General Public License for more details.
29 You should have received copies of the GNU General Public License and
30 the GNU Lesser General Public License along with this program. If
31 not, see http://www.gnu.org/licenses/.
47 #define YARROW_DEBUG 0
56 /* An upper limit on the entropy (in bits) in one octet of sample
58 #define YARROW_MULTIPLIER 4
60 /* Entropy threshold for reseeding from the fast pool */
61 #define YARROW_FAST_THRESHOLD 100
63 /* Entropy threshold for reseeding from the fast pool */
64 #define YARROW_SLOW_THRESHOLD 160
66 /* Number of sources that must exceed the threshold for slow reseed */
67 #define YARROW_SLOW_K 2
69 /* The number of iterations when reseeding, P_t in the yarrow paper.
70 * Should be chosen so that reseeding takes on the order of 0.1-1
72 #define YARROW_RESEED_ITERATIONS 1500
74 /* Entropy estimates sticks to this value, it is treated as infinity
75 * in calculations. It should fit comfortably in an uint32_t, to avoid
77 #define YARROW_MAX_ENTROPY 0x100000
79 /* Forward declarations */
81 yarrow_gate(struct yarrow256_ctx *ctx);
84 yarrow256_init(struct yarrow256_ctx *ctx,
86 struct yarrow_source *s)
90 sha256_init(&ctx->pools[0]);
91 sha256_init(&ctx->pools[1]);
95 /* Not strictly necessary, but it makes it easier to see if the
97 memset(ctx->counter, 0, sizeof(ctx->counter));
102 for (i = 0; i<n; i++)
104 ctx->sources[i].estimate[YARROW_FAST] = 0;
105 ctx->sources[i].estimate[YARROW_SLOW] = 0;
106 ctx->sources[i].next = YARROW_FAST;
111 yarrow256_seed(struct yarrow256_ctx *ctx,
113 const uint8_t *seed_file)
117 sha256_update(&ctx->pools[YARROW_FAST], length, seed_file);
118 yarrow256_fast_reseed(ctx);
121 /* FIXME: Generalize so that it generates a few more blocks at a
124 yarrow_generate_block(struct yarrow256_ctx *ctx,
129 aes256_encrypt(&ctx->key, sizeof(ctx->counter), block, ctx->counter);
131 /* Increment counter, treating it as a big-endian number. This is
132 * machine independent, and follows appendix B of the NIST
133 * specification of cipher modes of operation.
135 * We could keep a representation of the counter as 4 32-bit values,
136 * and write entire words (in big-endian byteorder) into the counter
137 * block, whenever they change. */
138 for (i = sizeof(ctx->counter); i--; )
140 if (++ctx->counter[i])
146 yarrow_iterate(uint8_t *digest)
148 uint8_t v0[SHA256_DIGEST_SIZE];
151 memcpy(v0, digest, SHA256_DIGEST_SIZE);
153 /* When hashed inside the loop, i should run from 1 to
154 * YARROW_RESEED_ITERATIONS */
155 for (i = 0; ++i < YARROW_RESEED_ITERATIONS; )
158 struct sha256_ctx hash;
162 /* Hash v_i | v_0 | i */
163 WRITE_UINT32(count, i);
164 sha256_update(&hash, SHA256_DIGEST_SIZE, digest);
165 sha256_update(&hash, sizeof(v0), v0);
166 sha256_update(&hash, sizeof(count), count);
168 sha256_digest(&hash, SHA256_DIGEST_SIZE, digest);
172 /* NOTE: The SHA-256 digest size equals the AES key size, so we need
173 * no "size adaptor". */
176 yarrow256_fast_reseed(struct yarrow256_ctx *ctx)
178 uint8_t digest[SHA256_DIGEST_SIZE];
182 fprintf(stderr, "yarrow256_fast_reseed\n");
185 /* We feed two block of output using the current key into the pool
186 * before emptying it. */
189 uint8_t blocks[AES_BLOCK_SIZE * 2];
191 yarrow_generate_block(ctx, blocks);
192 yarrow_generate_block(ctx, blocks + AES_BLOCK_SIZE);
193 sha256_update(&ctx->pools[YARROW_FAST], sizeof(blocks), blocks);
196 sha256_digest(&ctx->pools[YARROW_FAST], sizeof(digest), digest);
199 yarrow_iterate(digest);
201 aes256_set_encrypt_key(&ctx->key, digest);
204 /* Derive new counter value */
205 memset(ctx->counter, 0, sizeof(ctx->counter));
206 aes256_encrypt(&ctx->key, sizeof(ctx->counter), ctx->counter, ctx->counter);
208 /* Reset estimates. */
209 for (i = 0; i<ctx->nsources; i++)
210 ctx->sources[i].estimate[YARROW_FAST] = 0;
214 yarrow256_slow_reseed(struct yarrow256_ctx *ctx)
216 uint8_t digest[SHA256_DIGEST_SIZE];
220 fprintf(stderr, "yarrow256_slow_reseed\n");
223 /* Get digest of the slow pool*/
224 sha256_digest(&ctx->pools[YARROW_SLOW], sizeof(digest), digest);
226 /* Feed it into the fast pool */
227 sha256_update(&ctx->pools[YARROW_FAST], sizeof(digest), digest);
229 yarrow256_fast_reseed(ctx);
231 /* Reset estimates. */
232 for (i = 0; i<ctx->nsources; i++)
233 ctx->sources[i].estimate[YARROW_SLOW] = 0;
237 yarrow256_update(struct yarrow256_ctx *ctx,
238 unsigned source_index, unsigned entropy,
239 size_t length, const uint8_t *data)
241 enum yarrow_pool_id current;
242 struct yarrow_source *source;
244 assert(source_index < ctx->nsources);
247 /* Nothing happens */
250 source = &ctx->sources[source_index];
253 /* While seeding, use the slow pool */
254 current = YARROW_SLOW;
257 current = source->next;
258 source->next = !source->next;
261 sha256_update(&ctx->pools[current], length, data);
263 /* NOTE: We should be careful to avoid overflows in the estimates. */
264 if (source->estimate[current] < YARROW_MAX_ENTROPY)
266 if (entropy > YARROW_MAX_ENTROPY)
267 entropy = YARROW_MAX_ENTROPY;
269 if ( (length < (YARROW_MAX_ENTROPY / YARROW_MULTIPLIER))
270 && (entropy > YARROW_MULTIPLIER * length) )
271 entropy = YARROW_MULTIPLIER * length;
273 entropy += source->estimate[current];
274 if (entropy > YARROW_MAX_ENTROPY)
275 entropy = YARROW_MAX_ENTROPY;
277 source->estimate[current] = entropy;
280 /* Check for seed/reseed */
286 "yarrow256_update: source_index = %d,\n"
287 " fast pool estimate = %d\n",
288 source_index, source->estimate[YARROW_FAST]);
290 if (source->estimate[YARROW_FAST] >= YARROW_FAST_THRESHOLD)
292 yarrow256_fast_reseed(ctx);
300 if (!yarrow256_needed_sources(ctx))
302 yarrow256_slow_reseed(ctx);
314 yarrow_gate(struct yarrow256_ctx *ctx)
316 uint8_t key[AES256_KEY_SIZE];
319 for (i = 0; i < sizeof(key); i+= AES_BLOCK_SIZE)
320 yarrow_generate_block(ctx, key + i);
322 aes256_set_encrypt_key(&ctx->key, key);
326 yarrow256_random(struct yarrow256_ctx *ctx, size_t length, uint8_t *dst)
330 while (length >= AES_BLOCK_SIZE)
332 yarrow_generate_block(ctx, dst);
333 dst += AES_BLOCK_SIZE;
334 length -= AES_BLOCK_SIZE;
338 uint8_t buffer[AES_BLOCK_SIZE];
340 assert(length < AES_BLOCK_SIZE);
341 yarrow_generate_block(ctx, buffer);
342 memcpy(dst, buffer, length);
348 yarrow256_is_seeded(struct yarrow256_ctx *ctx)
354 yarrow256_needed_sources(struct yarrow256_ctx *ctx)
356 /* FIXME: This is somewhat inefficient. It would be better to
357 * either maintain the count, or do this loop only if the
358 * current source just crossed the threshold. */
361 for (i = k = 0; i < ctx->nsources; i++)
362 if (ctx->sources[i].estimate[YARROW_SLOW] >= YARROW_SLOW_THRESHOLD)
367 "yarrow256_needed_sources: source_index = %d,\n"
368 " slow pool estimate = %d,\n"
369 " number of sources above threshold = %d\n",
370 source_index, source->estimate[YARROW_SLOW], k);
373 return (k < YARROW_SLOW_K) ? (YARROW_SLOW_K - k) : 0;