forked from Minki/linux
Change get_random_{int,log} to use the CRNG used by /dev/urandom and
getrandom(2). It's faster and arguably more secure than cut-down MD5 that we had been using. Also do some code cleanup. -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEK2m5VNv+CHkogTfJ8vlZVpUNgaMFAljCENEACgkQ8vlZVpUN gaP8lwf7BFtF52mKQcsVYxxZtRPH5dQwJCh3rohQ0WEJi5hHyZPZNz24dPHgc8Xl GDq7v7o10dL3aeK6P51lYNcDb9xwYakCXm5sw46c5juca/VAVaxHb/kSDPSPUCNj 7n7mNSM61UhYAN10AXi9FGJo/Rdr0U5F1VfoWVYqaHYsItYLCjlSk6ob7vKxCPUd 458qaGBvK8luwQgFPQftJ20j81zXNuRe5JHjCQ2LtaRWM8kNI/wmyNSokD73BkZl k8B7VqG4YpKp+4xgThp12GpXHrKB9kzQfmM4dZQQiGai9Ni59+iNqEcumv0Jb5MG gY/m5Wc1Q45/5FosPXQYHzMPHrSJ3A== =g1OD -----END PGP SIGNATURE----- Merge tag 'random_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random Pull random updates from Ted Ts'o: "Change get_random_{int,log} to use the CRNG used by /dev/urandom and getrandom(2). It's faster and arguably more secure than cut-down MD5 that we had been using. Also do some code cleanup" * tag 'random_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random: random: move random_min_urandom_seed into CONFIG_SYSCTL ifdef block random: convert get_random_int/long into get_random_u32/u64 random: use chacha20 for get_random_int/long random: fix comment for unused random_min_urandom_seed random: remove variable limit random: remove stale urandom_init_wait random: remove stale maybe_reseed_primary_crng
This commit is contained in:
commit
84c37c168c
@ -312,13 +312,6 @@ static int random_read_wakeup_bits = 64;
|
||||
*/
|
||||
static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
|
||||
|
||||
/*
|
||||
* The minimum number of seconds between urandom pool reseeding. We
|
||||
* do this to limit the amount of entropy that can be drained from the
|
||||
* input pool even if there are heavy demands on /dev/urandom.
|
||||
*/
|
||||
static int random_min_urandom_seed = 60;
|
||||
|
||||
/*
|
||||
* Originally, we used a primitive polynomial of degree .poolwords
|
||||
* over GF(2). The taps for various sizes are defined below. They
|
||||
@ -409,7 +402,6 @@ static struct poolinfo {
|
||||
*/
|
||||
static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
|
||||
static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
|
||||
static DECLARE_WAIT_QUEUE_HEAD(urandom_init_wait);
|
||||
static struct fasync_struct *fasync;
|
||||
|
||||
static DEFINE_SPINLOCK(random_ready_list_lock);
|
||||
@ -467,7 +459,6 @@ struct entropy_store {
|
||||
int entropy_count;
|
||||
int entropy_total;
|
||||
unsigned int initialized:1;
|
||||
unsigned int limit:1;
|
||||
unsigned int last_data_init:1;
|
||||
__u8 last_data[EXTRACT_SIZE];
|
||||
};
|
||||
@ -485,7 +476,6 @@ static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
|
||||
static struct entropy_store input_pool = {
|
||||
.poolinfo = &poolinfo_table[0],
|
||||
.name = "input",
|
||||
.limit = 1,
|
||||
.lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
|
||||
.pool = input_pool_data
|
||||
};
|
||||
@ -493,7 +483,6 @@ static struct entropy_store input_pool = {
|
||||
static struct entropy_store blocking_pool = {
|
||||
.poolinfo = &poolinfo_table[1],
|
||||
.name = "blocking",
|
||||
.limit = 1,
|
||||
.pull = &input_pool,
|
||||
.lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
|
||||
.pool = blocking_pool_data,
|
||||
@ -855,13 +844,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
|
||||
spin_unlock_irqrestore(&primary_crng.lock, flags);
|
||||
}
|
||||
|
||||
static inline void maybe_reseed_primary_crng(void)
|
||||
{
|
||||
if (crng_init > 2 &&
|
||||
time_after(jiffies, primary_crng.init_time + CRNG_RESEED_INTERVAL))
|
||||
crng_reseed(&primary_crng, &input_pool);
|
||||
}
|
||||
|
||||
static inline void crng_wait_ready(void)
|
||||
{
|
||||
wait_event_interruptible(crng_init_wait, crng_ready());
|
||||
@ -1220,15 +1202,6 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
|
||||
r->entropy_count > r->poolinfo->poolfracbits)
|
||||
return;
|
||||
|
||||
if (r->limit == 0 && random_min_urandom_seed) {
|
||||
unsigned long now = jiffies;
|
||||
|
||||
if (time_before(now,
|
||||
r->last_pulled + random_min_urandom_seed * HZ))
|
||||
return;
|
||||
r->last_pulled = now;
|
||||
}
|
||||
|
||||
_xfer_secondary_pool(r, nbytes);
|
||||
}
|
||||
|
||||
@ -1236,8 +1209,6 @@ static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
|
||||
{
|
||||
__u32 tmp[OUTPUT_POOL_WORDS];
|
||||
|
||||
/* For /dev/random's pool, always leave two wakeups' worth */
|
||||
int rsvd_bytes = r->limit ? 0 : random_read_wakeup_bits / 4;
|
||||
int bytes = nbytes;
|
||||
|
||||
/* pull at least as much as a wakeup */
|
||||
@ -1248,7 +1219,7 @@ static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
|
||||
trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
|
||||
ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
|
||||
bytes = extract_entropy(r->pull, tmp, bytes,
|
||||
random_read_wakeup_bits / 8, rsvd_bytes);
|
||||
random_read_wakeup_bits / 8, 0);
|
||||
mix_pool_bytes(r, tmp, bytes);
|
||||
credit_entropy_bits(r, bytes*8);
|
||||
}
|
||||
@ -1276,7 +1247,7 @@ static void push_to_pool(struct work_struct *work)
|
||||
static size_t account(struct entropy_store *r, size_t nbytes, int min,
|
||||
int reserved)
|
||||
{
|
||||
int entropy_count, orig;
|
||||
int entropy_count, orig, have_bytes;
|
||||
size_t ibytes, nfrac;
|
||||
|
||||
BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
|
||||
@ -1285,14 +1256,12 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
|
||||
retry:
|
||||
entropy_count = orig = ACCESS_ONCE(r->entropy_count);
|
||||
ibytes = nbytes;
|
||||
/* If limited, never pull more than available */
|
||||
if (r->limit) {
|
||||
int have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
|
||||
/* never pull more than available */
|
||||
have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
|
||||
|
||||
if ((have_bytes -= reserved) < 0)
|
||||
have_bytes = 0;
|
||||
ibytes = min_t(size_t, ibytes, have_bytes);
|
||||
}
|
||||
if ((have_bytes -= reserved) < 0)
|
||||
have_bytes = 0;
|
||||
ibytes = min_t(size_t, ibytes, have_bytes);
|
||||
if (ibytes < min)
|
||||
ibytes = 0;
|
||||
|
||||
@ -1912,6 +1881,7 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
|
||||
static int min_read_thresh = 8, min_write_thresh;
|
||||
static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
|
||||
static int max_write_thresh = INPUT_POOL_WORDS * 32;
|
||||
static int random_min_urandom_seed = 60;
|
||||
static char sysctl_bootid[16];
|
||||
|
||||
/*
|
||||
@ -2042,63 +2012,64 @@ struct ctl_table random_table[] = {
|
||||
};
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
|
||||
static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
|
||||
|
||||
int random_int_secret_init(void)
|
||||
{
|
||||
get_random_bytes(random_int_secret, sizeof(random_int_secret));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash)
|
||||
__aligned(sizeof(unsigned long));
|
||||
struct batched_entropy {
|
||||
union {
|
||||
u64 entropy_u64[CHACHA20_BLOCK_SIZE / sizeof(u64)];
|
||||
u32 entropy_u32[CHACHA20_BLOCK_SIZE / sizeof(u32)];
|
||||
};
|
||||
unsigned int position;
|
||||
};
|
||||
|
||||
/*
|
||||
* Get a random word for internal kernel use only. Similar to urandom but
|
||||
* with the goal of minimal entropy pool depletion. As a result, the random
|
||||
* value is not cryptographically secure but for several uses the cost of
|
||||
* depleting entropy is too high
|
||||
* Get a random word for internal kernel use only. The quality of the random
|
||||
* number is either as good as RDRAND or as good as /dev/urandom, with the
|
||||
* goal of being quite fast and not depleting entropy.
|
||||
*/
|
||||
unsigned int get_random_int(void)
|
||||
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
|
||||
u64 get_random_u64(void)
|
||||
{
|
||||
__u32 *hash;
|
||||
unsigned int ret;
|
||||
u64 ret;
|
||||
struct batched_entropy *batch;
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
if (arch_get_random_long((unsigned long *)&ret))
|
||||
return ret;
|
||||
#else
|
||||
if (arch_get_random_long((unsigned long *)&ret) &&
|
||||
arch_get_random_long((unsigned long *)&ret + 1))
|
||||
return ret;
|
||||
#endif
|
||||
|
||||
batch = &get_cpu_var(batched_entropy_u64);
|
||||
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
|
||||
extract_crng((u8 *)batch->entropy_u64);
|
||||
batch->position = 0;
|
||||
}
|
||||
ret = batch->entropy_u64[batch->position++];
|
||||
put_cpu_var(batched_entropy_u64);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(get_random_u64);
|
||||
|
||||
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
|
||||
u32 get_random_u32(void)
|
||||
{
|
||||
u32 ret;
|
||||
struct batched_entropy *batch;
|
||||
|
||||
if (arch_get_random_int(&ret))
|
||||
return ret;
|
||||
|
||||
hash = get_cpu_var(get_random_int_hash);
|
||||
|
||||
hash[0] += current->pid + jiffies + random_get_entropy();
|
||||
md5_transform(hash, random_int_secret);
|
||||
ret = hash[0];
|
||||
put_cpu_var(get_random_int_hash);
|
||||
|
||||
batch = &get_cpu_var(batched_entropy_u32);
|
||||
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
|
||||
extract_crng((u8 *)batch->entropy_u32);
|
||||
batch->position = 0;
|
||||
}
|
||||
ret = batch->entropy_u32[batch->position++];
|
||||
put_cpu_var(batched_entropy_u32);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(get_random_int);
|
||||
|
||||
/*
|
||||
* Same as get_random_int(), but returns unsigned long.
|
||||
*/
|
||||
unsigned long get_random_long(void)
|
||||
{
|
||||
__u32 *hash;
|
||||
unsigned long ret;
|
||||
|
||||
if (arch_get_random_long(&ret))
|
||||
return ret;
|
||||
|
||||
hash = get_cpu_var(get_random_int_hash);
|
||||
|
||||
hash[0] += current->pid + jiffies + random_get_entropy();
|
||||
md5_transform(hash, random_int_secret);
|
||||
ret = *(unsigned long *)hash;
|
||||
put_cpu_var(get_random_int_hash);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(get_random_long);
|
||||
EXPORT_SYMBOL(get_random_u32);
|
||||
|
||||
/**
|
||||
* randomize_page - Generate a random, page aligned address
|
||||
|
@ -37,14 +37,26 @@ extern void get_random_bytes(void *buf, int nbytes);
|
||||
extern int add_random_ready_callback(struct random_ready_callback *rdy);
|
||||
extern void del_random_ready_callback(struct random_ready_callback *rdy);
|
||||
extern void get_random_bytes_arch(void *buf, int nbytes);
|
||||
extern int random_int_secret_init(void);
|
||||
|
||||
#ifndef MODULE
|
||||
extern const struct file_operations random_fops, urandom_fops;
|
||||
#endif
|
||||
|
||||
unsigned int get_random_int(void);
|
||||
unsigned long get_random_long(void);
|
||||
u32 get_random_u32(void);
|
||||
u64 get_random_u64(void);
|
||||
static inline unsigned int get_random_int(void)
|
||||
{
|
||||
return get_random_u32();
|
||||
}
|
||||
static inline unsigned long get_random_long(void)
|
||||
{
|
||||
#if BITS_PER_LONG == 64
|
||||
return get_random_u64();
|
||||
#else
|
||||
return get_random_u32();
|
||||
#endif
|
||||
}
|
||||
|
||||
unsigned long randomize_page(unsigned long start, unsigned long range);
|
||||
|
||||
u32 prandom_u32(void);
|
||||
|
@ -882,7 +882,6 @@ static void __init do_basic_setup(void)
|
||||
do_ctors();
|
||||
usermodehelper_enable();
|
||||
do_initcalls();
|
||||
random_int_secret_init();
|
||||
}
|
||||
|
||||
static void __init do_pre_smp_initcalls(void)
|
||||
|
Loading…
Reference in New Issue
Block a user