forked from Minki/linux
random: use a different mixing algorithm for add_device_randomness()
add_device_randomness() use of crng_fast_load() was highly
problematic. Some callers of add_device_randomness() can pass in a
large amount of static information. This would immediately promote
the crng_init state from 0 to 1, without really doing much to
initialize the primary_crng's internal state with something even
vaguely unpredictable.
Since we don't have the speed constraints of add_interrupt_randomness(),
we can do a better job mixing in the what unpredictability a device
driver or architecture maintainer might see fit to give us, and do it
in a way which does not bump the crng_init_cnt variable.
Also, since add_device_randomness() doesn't bump any entropy
accounting in crng_init state 0, mix the device randomness into the
input_pool entropy pool as well. This is related to CVE-2018-1108.
Reported-by: Jann Horn <jannh@google.com>
Fixes: ee7998c50c
("random: do not ignore early device randomness")
Cc: stable@kernel.org # 4.13+
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
This commit is contained in:
parent
43838a23a0
commit
dc12baacb9
@ -787,6 +787,10 @@ static void crng_initialize(struct crng_state *crng)
|
||||
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* crng_fast_load() can be called by code in the interrupt service
|
||||
* path. So we can't afford to dilly-dally.
|
||||
*/
|
||||
static int crng_fast_load(const char *cp, size_t len)
|
||||
{
|
||||
unsigned long flags;
|
||||
@ -813,6 +817,51 @@ static int crng_fast_load(const char *cp, size_t len)
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* crng_slow_load() is called by add_device_randomness, which has two
|
||||
* attributes. (1) We can't trust the buffer passed to it is
|
||||
* guaranteed to be unpredictable (so it might not have any entropy at
|
||||
* all), and (2) it doesn't have the performance constraints of
|
||||
* crng_fast_load().
|
||||
*
|
||||
* So we do something more comprehensive which is guaranteed to touch
|
||||
* all of the primary_crng's state, and which uses a LFSR with a
|
||||
* period of 255 as part of the mixing algorithm. Finally, we do
|
||||
* *not* advance crng_init_cnt since buffer we may get may be something
|
||||
* like a fixed DMI table (for example), which might very well be
|
||||
* unique to the machine, but is otherwise unvarying.
|
||||
*/
|
||||
static int crng_slow_load(const char *cp, size_t len)
|
||||
{
|
||||
unsigned long flags;
|
||||
static unsigned char lfsr = 1;
|
||||
unsigned char tmp;
|
||||
unsigned i, max = CHACHA20_KEY_SIZE;
|
||||
const char * src_buf = cp;
|
||||
char * dest_buf = (char *) &primary_crng.state[4];
|
||||
|
||||
if (!spin_trylock_irqsave(&primary_crng.lock, flags))
|
||||
return 0;
|
||||
if (crng_init != 0) {
|
||||
spin_unlock_irqrestore(&primary_crng.lock, flags);
|
||||
return 0;
|
||||
}
|
||||
if (len > max)
|
||||
max = len;
|
||||
|
||||
for (i = 0; i < max ; i++) {
|
||||
tmp = lfsr;
|
||||
lfsr >>= 1;
|
||||
if (tmp & 1)
|
||||
lfsr ^= 0xE1;
|
||||
tmp = dest_buf[i % CHACHA20_KEY_SIZE];
|
||||
dest_buf[i % CHACHA20_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
|
||||
lfsr += (tmp << 3) | (tmp >> 5);
|
||||
}
|
||||
spin_unlock_irqrestore(&primary_crng.lock, flags);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
|
||||
{
|
||||
unsigned long flags;
|
||||
@ -981,10 +1030,8 @@ void add_device_randomness(const void *buf, unsigned int size)
|
||||
unsigned long time = random_get_entropy() ^ jiffies;
|
||||
unsigned long flags;
|
||||
|
||||
if (!crng_ready()) {
|
||||
crng_fast_load(buf, size);
|
||||
return;
|
||||
}
|
||||
if (!crng_ready() && size)
|
||||
crng_slow_load(buf, size);
|
||||
|
||||
trace_add_device_randomness(size, _RET_IP_);
|
||||
spin_lock_irqsave(&input_pool.lock, flags);
|
||||
|
Loading…
Reference in New Issue
Block a user