@@ -523,7 +523,6 @@ EXPORT_SYMBOL(get_random_bytes);
static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
{
- bool large_request = nbytes > 256;
ssize_t ret = 0;
size_t len;
u32 chacha_state[CHACHA_STATE_WORDS];
@@ -540,15 +539,6 @@ static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
do {
- if (large_request) {
- if (signal_pending(current)) {
- if (!ret)
- ret = -ERESTARTSYS;
- break;
- }
- cond_resched();
- }
-
chacha20_block(chacha_state, output);
if (unlikely(chacha_state[12] == 0))
++chacha_state[13];
@@ -562,6 +552,13 @@ static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
nbytes -= len;
buf += len;
ret += len;
+
+ BUILD_BUG_ON(PAGE_SIZE % CHACHA_BLOCK_SIZE != 0);
+ if (!(ret % PAGE_SIZE) && nbytes) {
+ if (signal_pending(current))
+ break;
+ cond_resched();
+ }
} while (nbytes);
memzero_explicit(chacha_state, sizeof(chacha_state));
In a651a7ba294c ("random: check for signal_pending() outside of need_resched() check"), Jann pointed out that we previously were only checking the TIF_NOTIFY_SIGNAL and TIF_SIGPENDING flags if the process had TIF_NEED_RESCHED set, which meant in practice, super long reads to /dev/[u]random would delay signal handling by a long time. I tried this, and indeed I wasn't able to interrupt a /dev/urandom read until after several megabytes had been read. The bug he fixed has always been there, and so code that reads from /dev/urandom without checking the return value of read() has mostly worked for a long time, for most sizes, not just for <= 256. Maybe it makes sense to keep that code working. The reason it was so small prior, ignoring the fact that it didn't work anyway, was likely because /dev/random used to block, and that could happen for pretty large lengths of time while entropy was gathered. But now, it's just a chacha20 call, which is extremely fast and is just operating on pure data, without having to wait for some external event. In that sense, /dev/[u]random is a lot more like /dev/zero. Taking a page out of /dev/zero's read_zero() function, it always returns at least one chunk, and then checks for signals after each chunk. Chunk sizes there are of length PAGE_SIZE. Let's just copy the same thing for /dev/[u]random, and check for signals and cond_resched() for every PAGE_SIZE amount of data. This makes the behavior more consistent with expectations, and should mitigate the impact of Jann's fix for the age-old signal check bug. Cc: Jann Horn <jannh@google.com> Cc: Theodore Ts'o <tytso@mit.edu> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com> --- drivers/char/random.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-)