]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge remote-tracking branch 'random/dev'
authorThierry Reding <treding@nvidia.com>
Thu, 24 Oct 2013 13:02:45 +0000 (15:02 +0200)
committerThierry Reding <treding@nvidia.com>
Thu, 24 Oct 2013 13:02:45 +0000 (15:02 +0200)
Conflicts:
drivers/char/random.c

drivers/char/random.c
include/trace/events/random.h

index 7a744d39175638a381835a8cadce7039f58ee3bf..7cfc146bf7ba44452994d055af2afac65515ec9e 100644 (file)
 #include <linux/ptrace.h>
 #include <linux/kmemcheck.h>
 #include <linux/irq.h>
+#include <linux/workqueue.h>
 
 #include <asm/processor.h>
 #include <asm/uaccess.h>
 /*
  * Configuration information
  */
-#define INPUT_POOL_WORDS 128
-#define OUTPUT_POOL_WORDS 32
-#define SEC_XFER_SIZE 512
-#define EXTRACT_SIZE 10
+#define INPUT_POOL_SHIFT       12
+#define INPUT_POOL_WORDS       (1 << (INPUT_POOL_SHIFT-5))
+#define OUTPUT_POOL_SHIFT      10
+#define OUTPUT_POOL_WORDS      (1 << (OUTPUT_POOL_SHIFT-5))
+#define SEC_XFER_SIZE          512
+#define EXTRACT_SIZE           10
 
 #define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
 
+/*
+ * To allow fractional bits to be tracked, the entropy_count field is
+ * denominated in units of 1/8th bits.
+ *
+ * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
+ * credit_entropy_bits() needs to be 64 bits wide.
+ */
+#define ENTROPY_SHIFT 3
+#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
+
 /*
  * The minimum number of bits of entropy before we wake up a read on
  * /dev/random.  Should be enough to do a significant reseed.
@@ -287,108 +300,100 @@ static int random_read_wakeup_thresh = 64;
  * should wake up processes which are selecting or polling on write
  * access to /dev/random.
  */
-static int random_write_wakeup_thresh = 128;
+static int random_write_wakeup_thresh = 28 * OUTPUT_POOL_WORDS;
 
 /*
- * When the input pool goes over trickle_thresh, start dropping most
- * samples to avoid wasting CPU time and reduce lock contention.
+ * The minimum number of seconds between urandom pool resending.  We
+ * do this to limit the amount of entropy that can be drained from the
+ * input pool even if there are heavy demands on /dev/urandom.
  */
-
-static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28;
-
-static DEFINE_PER_CPU(int, trickle_count);
+static int random_min_urandom_seed = 60;
 
 /*
- * A pool of size .poolwords is stirred with a primitive polynomial
- * of degree .poolwords over GF(2).  The taps for various sizes are
- * defined below.  They are chosen to be evenly spaced (minimum RMS
- * distance from evenly spaced; the numbers in the comments are a
- * scaled squared error sum) except for the last tap, which is 1 to
- * get the twisting happening as fast as possible.
+ * Originally, we used a primitive polynomial of degree .poolwords
+ * over GF(2).  The taps for various sizes are defined below.  They
+ * were chosen to be evenly spaced except for the last tap, which is 1
+ * to get the twisting happening as fast as possible.
+ *
+ * For the purposes of better mixing, we use the CRC-32 polynomial as
+ * well to make a (modified) twisted Generalized Feedback Shift
+ * Register.  (See M. Matsumoto & Y. Kurita, 1992.  Twisted GFSR
+ * generators.  ACM Transactions on Modeling and Computer Simulation
+ * 2(3):179-194.  Also see M. Matsumoto & Y. Kurita, 1994.  Twisted
+ * GFSR generators II.  ACM Transactions on Mdeling and Computer
+ * Simulation 4:254-266)
+ *
+ * Thanks to Colin Plumb for suggesting this.
+ *
+ * The mixing operation is much less sensitive than the output hash,
+ * where we use SHA-1.  All that we want of mixing operation is that
+ * it be a good non-cryptographic hash; i.e. it not produce collisions
+ * when fed "random" data of the sort we expect to see.  As long as
+ * the pool state differs for different inputs, we have preserved the
+ * input entropy and done a good job.  The fact that an intelligent
+ * attacker can construct inputs that will produce controlled
+ * alterations to the pool's state is not important because we don't
+ * consider such inputs to contribute any randomness.  The only
+ * property we need with respect to them is that the attacker can't
+ * increase his/her knowledge of the pool's state.  Since all
+ * additions are reversible (knowing the final state and the input,
+ * you can reconstruct the initial state), if an attacker has any
+ * uncertainty about the initial state, he/she can only shuffle that
+ * uncertainty about, but never cause any collisions (which would
+ * decrease the uncertainty).
+ *
+ * Our mixing functions were analyzed by Lacharme, Roeck, Strubel, and
+ * Videau in their paper, "The Linux Pseudorandom Number Generator
+ * Revisited" (see: http://eprint.iacr.org/2012/251.pdf).  In their
+ * paper, they point out that we are not using a true Twisted GFSR,
+ * since Matsumoto & Kurita used a trinomial feedback polynomial (that
+ * is, with only three taps, instead of the six that we are using).
+ * As a result, the resulting polynomial is neither primitive nor
+ * irreducible, and hence does not have a maximal period over
+ * GF(2**32).  They suggest a slight change to the generator
+ * polynomial which improves the resulting TGFSR polynomial to be
+ * irreducible, which we have made here.
  */
 static struct poolinfo {
-       int poolwords;
+       int poolbitshift, poolwords, poolbytes, poolbits, poolfracbits;
+#define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5)
        int tap1, tap2, tap3, tap4, tap5;
 } poolinfo_table[] = {
-       /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
-       { 128,  103,    76,     51,     25,     1 },
-       /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
-       { 32,   26,     20,     14,     7,      1 },
+       /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
+       /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
+       { S(128),       104,    76,     51,     25,     1 },
+       /* was: x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 */
+       /* x^32 + x^26 + x^19 + x^14 + x^7 + x + 1 */
+       { S(32),        26,     19,     14,     7,      1 },
 #if 0
        /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1  -- 115 */
-       { 2048, 1638,   1231,   819,    411,    1 },
+       { S(2048),      1638,   1231,   819,    411,    1 },
 
        /* x^1024 + x^817 + x^615 + x^412 + x^204 + x + 1 -- 290 */
-       { 1024, 817,    615,    412,    204,    1 },
+       { S(1024),      817,    615,    412,    204,    1 },
 
        /* x^1024 + x^819 + x^616 + x^410 + x^207 + x^2 + 1 -- 115 */
-       { 1024, 819,    616,    410,    207,    2 },
+       { S(1024),      819,    616,    410,    207,    2 },
 
        /* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */
-       { 512,  411,    308,    208,    104,    1 },
+       { S(512),       411,    308,    208,    104,    1 },
 
        /* x^512 + x^409 + x^307 + x^206 + x^102 + x^2 + 1 -- 95 */
-       { 512,  409,    307,    206,    102,    2 },
+       { S(512),       409,    307,    206,    102,    2 },
        /* x^512 + x^409 + x^309 + x^205 + x^103 + x^2 + 1 -- 95 */
-       { 512,  409,    309,    205,    103,    2 },
+       { S(512),       409,    309,    205,    103,    2 },
 
        /* x^256 + x^205 + x^155 + x^101 + x^52 + x + 1 -- 125 */
-       { 256,  205,    155,    101,    52,     1 },
+       { S(256),       205,    155,    101,    52,     1 },
 
        /* x^128 + x^103 + x^78 + x^51 + x^27 + x^2 + 1 -- 70 */
-       { 128,  103,    78,     51,     27,     2 },
+       { S(128),       103,    78,     51,     27,     2 },
 
        /* x^64 + x^52 + x^39 + x^26 + x^14 + x + 1 -- 15 */
-       { 64,   52,     39,     26,     14,     1 },
+       { S(64),        52,     39,     26,     14,     1 },
 #endif
 };
 
-#define POOLBITS       poolwords*32
-#define POOLBYTES      poolwords*4
-
-/*
- * For the purposes of better mixing, we use the CRC-32 polynomial as
- * well to make a twisted Generalized Feedback Shift Reigster
- *
- * (See M. Matsumoto & Y. Kurita, 1992.  Twisted GFSR generators.  ACM
- * Transactions on Modeling and Computer Simulation 2(3):179-194.
- * Also see M. Matsumoto & Y. Kurita, 1994.  Twisted GFSR generators
- * II.  ACM Transactions on Mdeling and Computer Simulation 4:254-266)
- *
- * Thanks to Colin Plumb for suggesting this.
- *
- * We have not analyzed the resultant polynomial to prove it primitive;
- * in fact it almost certainly isn't.  Nonetheless, the irreducible factors
- * of a random large-degree polynomial over GF(2) are more than large enough
- * that periodicity is not a concern.
- *
- * The input hash is much less sensitive than the output hash.  All
- * that we want of it is that it be a good non-cryptographic hash;
- * i.e. it not produce collisions when fed "random" data of the sort
- * we expect to see.  As long as the pool state differs for different
- * inputs, we have preserved the input entropy and done a good job.
- * The fact that an intelligent attacker can construct inputs that
- * will produce controlled alterations to the pool's state is not
- * important because we don't consider such inputs to contribute any
- * randomness.  The only property we need with respect to them is that
- * the attacker can't increase his/her knowledge of the pool's state.
- * Since all additions are reversible (knowing the final state and the
- * input, you can reconstruct the initial state), if an attacker has
- * any uncertainty about the initial state, he/she can only shuffle
- * that uncertainty about, but never cause any collisions (which would
- * decrease the uncertainty).
- *
- * The chosen system lets the state of the pool be (essentially) the input
- * modulo the generator polymnomial.  Now, for random primitive polynomials,
- * this is a universal class of hash functions, meaning that the chance
- * of a collision is limited by the attacker's knowledge of the generator
- * polynomail, so if it is chosen at random, an attacker can never force
- * a collision.  Here, we use a fixed polynomial, but we *can* assume that
- * ###--> it is unknown to the processes generating the input entropy. <-###
- * Because of this important property, this is a good, collision-resistant
- * hash; hash collisions will occur no more often than chance.
- */
-
 /*
  * Static global variables
  */
@@ -396,17 +401,6 @@ static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
 static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
 static struct fasync_struct *fasync;
 
-static bool debug;
-module_param(debug, bool, 0644);
-#define DEBUG_ENT(fmt, arg...) do { \
-       if (debug) \
-               printk(KERN_DEBUG "random %04d %04d %04d: " \
-               fmt,\
-               input_pool.entropy_count,\
-               blocking_pool.entropy_count,\
-               nonblocking_pool.entropy_count,\
-               ## arg); } while (0)
-
 /**********************************************************************
  *
  * OS independent entropy store.   Here are the functions which handle
@@ -417,23 +411,26 @@ module_param(debug, bool, 0644);
 struct entropy_store;
 struct entropy_store {
        /* read-only data: */
-       struct poolinfo *poolinfo;
+       const struct poolinfo *poolinfo;
        __u32 *pool;
        const char *name;
        struct entropy_store *pull;
-       int limit;
+       struct work_struct push_work;
 
        /* read-write data: */
+       unsigned long last_pulled;
        spinlock_t lock;
-       unsigned add_ptr;
-       unsigned input_rotate;
+       unsigned short add_ptr;
+       unsigned short input_rotate;
        int entropy_count;
        int entropy_total;
        unsigned int initialized:1;
-       bool last_data_init;
+       unsigned int limit:1;
+       unsigned int last_data_init:1;
        __u8 last_data[EXTRACT_SIZE];
 };
 
+static void push_to_pool(struct work_struct *work);
 static __u32 input_pool_data[INPUT_POOL_WORDS];
 static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
 static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
@@ -452,7 +449,9 @@ static struct entropy_store blocking_pool = {
        .limit = 1,
        .pull = &input_pool,
        .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
-       .pool = blocking_pool_data
+       .pool = blocking_pool_data,
+       .push_work = __WORK_INITIALIZER(blocking_pool.push_work,
+                                       push_to_pool),
 };
 
 static struct entropy_store nonblocking_pool = {
@@ -460,7 +459,9 @@ static struct entropy_store nonblocking_pool = {
        .name = "nonblocking",
        .pull = &input_pool,
        .lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock),
-       .pool = nonblocking_pool_data
+       .pool = nonblocking_pool_data,
+       .push_work = __WORK_INITIALIZER(nonblocking_pool.push_work,
+                                       push_to_pool),
 };
 
 static __u32 const twist_table[8] = {
@@ -498,7 +499,7 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
 
        /* mix one byte at a time to simplify size handling and churn faster */
        while (nbytes--) {
-               w = rol32(*bytes++, input_rotate & 31);
+               w = rol32(*bytes++, input_rotate);
                i = (i - 1) & wordmask;
 
                /* XOR in the various taps */
@@ -518,7 +519,7 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
                 * rotation, so that successive passes spread the
                 * input bits across the pool evenly.
                 */
-               input_rotate += i ? 7 : 14;
+               input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
        }
 
        ACCESS_ONCE(r->input_rotate) = input_rotate;
@@ -561,62 +562,149 @@ struct fast_pool {
  * collector.  It's hardcoded for an 128 bit pool and assumes that any
  * locks that might be needed are taken by the caller.
  */
-static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
+static void fast_mix(struct fast_pool *f, __u32 input[4])
 {
-       const char      *bytes = in;
        __u32           w;
-       unsigned        i = f->count;
        unsigned        input_rotate = f->rotate;
 
-       while (nbytes--) {
-               w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
-                       f->pool[(i + 1) & 3];
-               f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
-               input_rotate += (i++ & 3) ? 7 : 14;
-       }
-       f->count = i;
+       w = rol32(input[0], input_rotate) ^ f->pool[0] ^ f->pool[3];
+       f->pool[0] = (w >> 3) ^ twist_table[w & 7];
+       input_rotate = (input_rotate + 14) & 31;
+       w = rol32(input[1], input_rotate) ^ f->pool[1] ^ f->pool[0];
+       f->pool[1] = (w >> 3) ^ twist_table[w & 7];
+       input_rotate = (input_rotate + 7) & 31;
+       w = rol32(input[2], input_rotate) ^ f->pool[2] ^ f->pool[1];
+       f->pool[2] = (w >> 3) ^ twist_table[w & 7];
+       input_rotate = (input_rotate + 7) & 31;
+       w = rol32(input[3], input_rotate) ^ f->pool[3] ^ f->pool[2];
+       f->pool[3] = (w >> 3) ^ twist_table[w & 7];
+       input_rotate = (input_rotate + 7) & 31;
+
        f->rotate = input_rotate;
+       f->count++;
 }
 
 /*
- * Credit (or debit) the entropy store with n bits of entropy
+ * Credit (or debit) the entropy store with n bits of entropy.
+ * Use credit_entropy_bits_safe() if the value comes from userspace
+ * or otherwise should be checked for extreme values.
  */
 static void credit_entropy_bits(struct entropy_store *r, int nbits)
 {
        int entropy_count, orig;
+       const int pool_size = r->poolinfo->poolfracbits;
+       int nfrac = nbits << ENTROPY_SHIFT;
 
        if (!nbits)
                return;
 
-       DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
 retry:
        entropy_count = orig = ACCESS_ONCE(r->entropy_count);
-       entropy_count += nbits;
+       if (nfrac < 0) {
+               /* Debit */
+               entropy_count += nfrac;
+       } else {
+               /*
+                * Credit: we have to account for the possibility of
+                * overwriting already present entropy.  Even in the
+                * ideal case of pure Shannon entropy, new contributions
+                * approach the full value asymptotically:
+                *
+                * entropy <- entropy + (pool_size - entropy) *
+                *      (1 - exp(-add_entropy/pool_size))
+                *
+                * For add_entropy <= pool_size/2 then
+                * (1 - exp(-add_entropy/pool_size)) >=
+                *    (add_entropy/pool_size)*0.7869...
+                * so we can approximate the exponential with
+                * 3/4*add_entropy/pool_size and still be on the
+                * safe side by adding at most pool_size/2 at a time.
+                *
+                * The use of pool_size-2 in the while statement is to
+                * prevent rounding artifacts from making the loop
+                * arbitrarily long; this limits the loop to log2(pool_size)*2
+                * turns no matter how large nbits is.
+                */
+               int pnfrac = nfrac;
+               const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
+               /* The +2 corresponds to the /4 in the denominator */
+
+               do {
+                       unsigned int anfrac = min(pnfrac, pool_size/2);
+                       unsigned int add =
+                               ((pool_size - entropy_count)*anfrac*3) >> s;
+
+                       entropy_count += add;
+                       pnfrac -= anfrac;
+               } while (unlikely(entropy_count < pool_size-2 && pnfrac));
+       }
 
        if (entropy_count < 0) {
-               DEBUG_ENT("negative entropy/overflow\n");
+               pr_warn("random: negative entropy/overflow: pool %s count %d\n",
+                       r->name, entropy_count);
+               WARN_ON(1);
                entropy_count = 0;
-       } else if (entropy_count > r->poolinfo->POOLBITS)
-               entropy_count = r->poolinfo->POOLBITS;
+       } else if (entropy_count > pool_size)
+               entropy_count = pool_size;
        if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
                goto retry;
 
+       r->entropy_total += nbits;
        if (!r->initialized && nbits > 0) {
-               r->entropy_total += nbits;
-               if (r->entropy_total > 128)
+               if (r->entropy_total > 128) {
                        r->initialized = 1;
+                       r->entropy_total = 0;
+               }
        }
 
-       trace_credit_entropy_bits(r->name, nbits, entropy_count,
+       trace_credit_entropy_bits(r->name, nbits,
+                                 entropy_count >> ENTROPY_SHIFT,
                                  r->entropy_total, _RET_IP_);
 
-       /* should we wake readers? */
-       if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
-               wake_up_interruptible(&random_read_wait);
-               kill_fasync(&fasync, SIGIO, POLL_IN);
+       if (r == &input_pool) {
+               int entropy_bytes = entropy_count >> ENTROPY_SHIFT;
+
+               /* should we wake readers? */
+               if (entropy_bytes >= random_read_wakeup_thresh) {
+                       wake_up_interruptible(&random_read_wait);
+                       kill_fasync(&fasync, SIGIO, POLL_IN);
+               }
+               /* If the input pool is getting full, send some
+                * entropy to the two output pools, flipping back and
+                * forth between them, until the output pools are 75%
+                * full.
+                */
+               if (entropy_bytes > random_write_wakeup_thresh &&
+                   r->initialized &&
+                   r->entropy_total >= 2*random_read_wakeup_thresh) {
+                       static struct entropy_store *last = &blocking_pool;
+                       struct entropy_store *other = &blocking_pool;
+
+                       if (last == &blocking_pool)
+                               other = &nonblocking_pool;
+                       if (other->entropy_count <=
+                           3 * other->poolinfo->poolfracbits / 4)
+                               last = other;
+                       if (last->entropy_count <=
+                           3 * last->poolinfo->poolfracbits / 4) {
+                               schedule_work(&last->push_work);
+                               r->entropy_total = 0;
+                       }
+               }
        }
 }
 
+static void credit_entropy_bits_safe(struct entropy_store *r, int nbits)
+{
+       const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1));
+
+       /* Cap the value to avoid overflows */
+       nbits = min(nbits,  nbits_max);
+       nbits = max(nbits, -nbits_max);
+
+       credit_entropy_bits(r, nbits);
+}
+
 /*********************************************************************
  *
  * Entropy input management
@@ -641,11 +729,18 @@ struct timer_rand_state {
 void add_device_randomness(const void *buf, unsigned int size)
 {
        unsigned long time = random_get_entropy() ^ jiffies;
+       unsigned long flags;
 
-       mix_pool_bytes(&input_pool, buf, size, NULL);
-       mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
-       mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
-       mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
+       trace_add_device_randomness(size, _RET_IP_);
+       spin_lock_irqsave(&input_pool.lock, flags);
+       _mix_pool_bytes(&input_pool, buf, size, NULL);
+       _mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
+       spin_unlock_irqrestore(&input_pool.lock, flags);
+
+       spin_lock_irqsave(&nonblocking_pool.lock, flags);
+       _mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
+       _mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
+       spin_unlock_irqrestore(&nonblocking_pool.lock, flags);
 }
 EXPORT_SYMBOL(add_device_randomness);
 
@@ -671,10 +766,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
        long delta, delta2, delta3;
 
        preempt_disable();
-       /* if over the trickle threshold, use only 1 in 4096 samples */
-       if (input_pool.entropy_count > trickle_thresh &&
-           ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff))
-               goto out;
 
        sample.jiffies = jiffies;
        sample.cycles = random_get_entropy();
@@ -716,7 +807,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
                credit_entropy_bits(&input_pool,
                                    min_t(int, fls(delta>>1), 11));
        }
-out:
        preempt_enable();
 }
 
@@ -729,10 +819,10 @@ void add_input_randomness(unsigned int type, unsigned int code,
        if (value == last_value)
                return;
 
-       DEBUG_ENT("input event\n");
        last_value = value;
        add_timer_randomness(&input_timer_state,
                             (type << 4) ^ code ^ (code >> 4) ^ value);
+       trace_add_input_randomness(ENTROPY_BITS(&input_pool));
 }
 EXPORT_SYMBOL_GPL(add_input_randomness);
 
@@ -744,20 +834,21 @@ void add_interrupt_randomness(int irq, int irq_flags)
        struct fast_pool        *fast_pool = &__get_cpu_var(irq_randomness);
        struct pt_regs          *regs = get_irq_regs();
        unsigned long           now = jiffies;
-       __u32                   input[4], cycles = random_get_entropy();
-
-       input[0] = cycles ^ jiffies;
-       input[1] = irq;
-       if (regs) {
-               __u64 ip = instruction_pointer(regs);
-               input[2] = ip;
-               input[3] = ip >> 32;
-       }
+       cycles_t                cycles = random_get_entropy();
+       __u32                   input[4], c_high, j_high;
+       __u64                   ip;
 
-       fast_mix(fast_pool, input, sizeof(input));
+       c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
+       j_high = (sizeof(now) > 4) ? now >> 32 : 0;
+       input[0] = cycles ^ j_high ^ irq;
+       input[1] = now ^ c_high;
+       ip = regs ? instruction_pointer(regs) : _RET_IP_;
+       input[2] = ip;
+       input[3] = ip >> 32;
 
-       if ((fast_pool->count & 1023) &&
-           !time_after(now, fast_pool->last + HZ))
+       fast_mix(fast_pool, input);
+
+       if ((fast_pool->count & 63) && !time_after(now, fast_pool->last + HZ))
                return;
 
        fast_pool->last = now;
@@ -786,10 +877,8 @@ void add_disk_randomness(struct gendisk *disk)
        if (!disk || !disk->random)
                return;
        /* first major is 1, so we get >= 0x200 here */
-       DEBUG_ENT("disk event %d:%d\n",
-                 MAJOR(disk_devt(disk)), MINOR(disk_devt(disk)));
-
        add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
+       trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
 }
 #endif
 
@@ -807,30 +896,58 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
  * from the primary pool to the secondary extraction pool. We make
  * sure we pull enough for a 'catastrophic reseed'.
  */
+static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes);
 static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
 {
-       __u32   tmp[OUTPUT_POOL_WORDS];
+       if (r->limit == 0 && random_min_urandom_seed) {
+               unsigned long now = jiffies;
 
-       if (r->pull && r->entropy_count < nbytes * 8 &&
-           r->entropy_count < r->poolinfo->POOLBITS) {
-               /* If we're limited, always leave two wakeup worth's BITS */
-               int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4;
-               int bytes = nbytes;
-
-               /* pull at least as many as BYTES as wakeup BITS */
-               bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
-               /* but never more than the buffer size */
-               bytes = min_t(int, bytes, sizeof(tmp));
-
-               DEBUG_ENT("going to reseed %s with %d bits "
-                         "(%zu of %d requested)\n",
-                         r->name, bytes * 8, nbytes * 8, r->entropy_count);
-
-               bytes = extract_entropy(r->pull, tmp, bytes,
-                                       random_read_wakeup_thresh / 8, rsvd);
-               mix_pool_bytes(r, tmp, bytes, NULL);
-               credit_entropy_bits(r, bytes*8);
+               if (time_before(now,
+                               r->last_pulled + random_min_urandom_seed * HZ))
+                       return;
+               r->last_pulled = now;
        }
+       if (r->pull &&
+           r->entropy_count < (nbytes << (ENTROPY_SHIFT + 3)) &&
+           r->entropy_count < r->poolinfo->poolfracbits)
+               _xfer_secondary_pool(r, nbytes);
+}
+
+static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
+{
+       __u32   tmp[OUTPUT_POOL_WORDS];
+
+       /* For /dev/random's pool, always leave two wakeup worth's BITS */
+       int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4;
+       int bytes = nbytes;
+
+       /* pull at least as many as BYTES as wakeup BITS */
+       bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
+       /* but never more than the buffer size */
+       bytes = min_t(int, bytes, sizeof(tmp));
+
+       trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
+                                 ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
+       bytes = extract_entropy(r->pull, tmp, bytes,
+                               random_read_wakeup_thresh / 8, rsvd);
+       mix_pool_bytes(r, tmp, bytes, NULL);
+       credit_entropy_bits(r, bytes*8);
+}
+
+/*
+ * Used as a workqueue function so that when the input pool is getting
+ * full, we can "spill over" some entropy to the output pools.  That
+ * way the output pools can store some of the excess entropy instead
+ * of letting it go to waste.
+ */
+static void push_to_pool(struct work_struct *work)
+{
+       struct entropy_store *r = container_of(work, struct entropy_store,
+                                             push_work);
+       BUG_ON(!r);
+       _xfer_secondary_pool(r, random_read_wakeup_thresh/8);
+       trace_push_to_pool(r->name, r->entropy_count >> ENTROPY_SHIFT,
+                          r->pull->entropy_count >> ENTROPY_SHIFT);
 }
 
 /*
@@ -850,50 +967,48 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
 {
        unsigned long flags;
        int wakeup_write = 0;
+       int have_bytes;
+       int entropy_count, orig;
+       size_t ibytes;
 
        /* Hold lock while accounting */
        spin_lock_irqsave(&r->lock, flags);
 
-       BUG_ON(r->entropy_count > r->poolinfo->POOLBITS);
-       DEBUG_ENT("trying to extract %zu bits from %s\n",
-                 nbytes * 8, r->name);
+       BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
 
        /* Can we pull enough? */
-       if (r->entropy_count / 8 < min + reserved) {
-               nbytes = 0;
-       } else {
-               int entropy_count, orig;
 retry:
-               entropy_count = orig = ACCESS_ONCE(r->entropy_count);
+       entropy_count = orig = ACCESS_ONCE(r->entropy_count);
+       have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
+       ibytes = nbytes;
+       if (have_bytes < min + reserved) {
+               ibytes = 0;
+       } else {
                /* If limited, never pull more than available */
-               if (r->limit && nbytes + reserved >= entropy_count / 8)
-                       nbytes = entropy_count/8 - reserved;
-
-               if (entropy_count / 8 >= nbytes + reserved) {
-                       entropy_count -= nbytes*8;
-                       if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
-                               goto retry;
-               } else {
-                       entropy_count = reserved;
-                       if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
-                               goto retry;
-               }
+               if (r->limit && ibytes + reserved >= have_bytes)
+                       ibytes = have_bytes - reserved;
 
-               if (entropy_count < random_write_wakeup_thresh)
-                       wakeup_write = 1;
-       }
+               if (have_bytes >= ibytes + reserved)
+                       entropy_count -= ibytes << (ENTROPY_SHIFT + 3);
+               else
+                       entropy_count = reserved << (ENTROPY_SHIFT + 3);
 
-       DEBUG_ENT("debiting %zu entropy credits from %s%s\n",
-                 nbytes * 8, r->name, r->limit ? "" : " (unlimited)");
+               if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+                       goto retry;
 
+               if ((r->entropy_count >> ENTROPY_SHIFT)
+                   < random_write_wakeup_thresh)
+                       wakeup_write = 1;
+       }
        spin_unlock_irqrestore(&r->lock, flags);
 
+       trace_debit_entropy(r->name, 8 * ibytes);
        if (wakeup_write) {
                wake_up_interruptible(&random_write_wait);
                kill_fasync(&fasync, SIGIO, POLL_OUT);
        }
 
-       return nbytes;
+       return ibytes;
 }
 
 static void extract_buf(struct entropy_store *r, __u8 *out)
@@ -901,7 +1016,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
        int i;
        union {
                __u32 w[5];
-               unsigned long l[LONGS(EXTRACT_SIZE)];
+               unsigned long l[LONGS(20)];
        } hash;
        __u32 workspace[SHA_WORKSPACE_WORDS];
        __u8 extract[64];
@@ -913,6 +1028,17 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
        for (i = 0; i < r->poolinfo->poolwords; i += 16)
                sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
 
+       /*
+        * If we have a architectural hardware random number
+        * generator, mix that in, too.
+        */
+       for (i = 0; i < LONGS(20); i++) {
+               unsigned long v;
+               if (!arch_get_random_long(&v))
+                       break;
+               hash.l[i] ^= v;
+       }
+
        /*
         * We mix the hash back into the pool to prevent backtracking
         * attacks (where the attacker knows the state of the pool
@@ -942,17 +1068,6 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
        hash.w[1] ^= hash.w[4];
        hash.w[2] ^= rol32(hash.w[2], 16);
 
-       /*
-        * If we have a architectural hardware random number
-        * generator, mix that in, too.
-        */
-       for (i = 0; i < LONGS(EXTRACT_SIZE); i++) {
-               unsigned long v;
-               if (!arch_get_random_long(&v))
-                       break;
-               hash.l[i] ^= v;
-       }
-
        memcpy(out, &hash, EXTRACT_SIZE);
        memset(&hash, 0, sizeof(hash));
 }
@@ -968,10 +1083,10 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
        if (fips_enabled) {
                spin_lock_irqsave(&r->lock, flags);
                if (!r->last_data_init) {
-                       r->last_data_init = true;
+                       r->last_data_init = 1;
                        spin_unlock_irqrestore(&r->lock, flags);
                        trace_extract_entropy(r->name, EXTRACT_SIZE,
-                                             r->entropy_count, _RET_IP_);
+                                             ENTROPY_BITS(r), _RET_IP_);
                        xfer_secondary_pool(r, EXTRACT_SIZE);
                        extract_buf(r, tmp);
                        spin_lock_irqsave(&r->lock, flags);
@@ -980,7 +1095,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
                spin_unlock_irqrestore(&r->lock, flags);
        }
 
-       trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
+       trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
        xfer_secondary_pool(r, nbytes);
        nbytes = account(r, nbytes, min, reserved);
 
@@ -1013,7 +1128,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
        ssize_t ret = 0, i;
        __u8 tmp[EXTRACT_SIZE];
 
-       trace_extract_entropy_user(r->name, nbytes, r->entropy_count, _RET_IP_);
+       trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
        xfer_secondary_pool(r, nbytes);
        nbytes = account(r, nbytes, 0, 0);
 
@@ -1053,6 +1168,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
  */
 void get_random_bytes(void *buf, int nbytes)
 {
+       trace_get_random_bytes(nbytes, _RET_IP_);
        extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
 }
 EXPORT_SYMBOL(get_random_bytes);
@@ -1071,7 +1187,7 @@ void get_random_bytes_arch(void *buf, int nbytes)
 {
        char *p = buf;
 
-       trace_get_random_bytes(nbytes, _RET_IP_);
+       trace_get_random_bytes_arch(nbytes, _RET_IP_);
        while (nbytes) {
                unsigned long v;
                int chunk = min(nbytes, (int)sizeof(unsigned long));
@@ -1107,9 +1223,10 @@ static void init_std_data(struct entropy_store *r)
 
        r->entropy_count = 0;
        r->entropy_total = 0;
-       r->last_data_init = false;
+       r->last_data_init = 0;
+       r->last_pulled = jiffies;
        mix_pool_bytes(r, &now, sizeof(now), NULL);
-       for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
+       for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
                if (!arch_get_random_long(&rv))
                        break;
                mix_pool_bytes(r, &rv, sizeof(rv), NULL);
@@ -1164,8 +1281,6 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
                if (n > SEC_XFER_SIZE)
                        n = SEC_XFER_SIZE;
 
-               DEBUG_ENT("reading %zu bits\n", n*8);
-
                n = extract_entropy_user(&blocking_pool, buf, n);
 
                if (n < 0) {
@@ -1173,8 +1288,9 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
                        break;
                }
 
-               DEBUG_ENT("read got %zd bits (%zd still needed)\n",
-                         n*8, (nbytes-n)*8);
+               trace_random_read(n*8, (nbytes-n)*8,
+                                 ENTROPY_BITS(&blocking_pool),
+                                 ENTROPY_BITS(&input_pool));
 
                if (n == 0) {
                        if (file->f_flags & O_NONBLOCK) {
@@ -1182,13 +1298,9 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
                                break;
                        }
 
-                       DEBUG_ENT("sleeping?\n");
-
                        wait_event_interruptible(random_read_wait,
-                               input_pool.entropy_count >=
-                                                random_read_wakeup_thresh);
-
-                       DEBUG_ENT("awake\n");
+                               ENTROPY_BITS(&input_pool) >=
+                               random_read_wakeup_thresh);
 
                        if (signal_pending(current)) {
                                retval = -ERESTARTSYS;
@@ -1211,7 +1323,11 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
 static ssize_t
 urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
 {
-       return extract_entropy_user(&nonblocking_pool, buf, nbytes);
+       int ret = extract_entropy_user(&nonblocking_pool, buf, nbytes);
+
+       trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool),
+                          ENTROPY_BITS(&input_pool));
+       return ret;
 }
 
 static unsigned int
@@ -1222,9 +1338,9 @@ random_poll(struct file *file, poll_table * wait)
        poll_wait(file, &random_read_wait, wait);
        poll_wait(file, &random_write_wait, wait);
        mask = 0;
-       if (input_pool.entropy_count >= random_read_wakeup_thresh)
+       if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_thresh)
                mask |= POLLIN | POLLRDNORM;
-       if (input_pool.entropy_count < random_write_wakeup_thresh)
+       if (ENTROPY_BITS(&input_pool) < random_write_wakeup_thresh)
                mask |= POLLOUT | POLLWRNORM;
        return mask;
 }
@@ -1275,7 +1391,8 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
        switch (cmd) {
        case RNDGETENTCNT:
                /* inherently racy, no point locking */
-               if (put_user(input_pool.entropy_count, p))
+               ent_count = ENTROPY_BITS(&input_pool);
+               if (put_user(ent_count, p))
                        return -EFAULT;
                return 0;
        case RNDADDTOENTCNT:
@@ -1283,7 +1400,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
                        return -EPERM;
                if (get_user(ent_count, p))
                        return -EFAULT;
-               credit_entropy_bits(&input_pool, ent_count);
+               credit_entropy_bits_safe(&input_pool, ent_count);
                return 0;
        case RNDADDENTROPY:
                if (!capable(CAP_SYS_ADMIN))
@@ -1298,7 +1415,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
                                    size);
                if (retval < 0)
                        return retval;
-               credit_entropy_bits(&input_pool, ent_count);
+               credit_entropy_bits_safe(&input_pool, ent_count);
                return 0;
        case RNDZAPENTCNT:
        case RNDCLEARPOOL:
@@ -1405,6 +1522,23 @@ static int proc_do_uuid(struct ctl_table *table, int write,
        return proc_dostring(&fake_table, write, buffer, lenp, ppos);
 }
 
+/*
+ * Return entropy available scaled to integral bits
+ */
+static int proc_do_entropy(ctl_table *table, int write,
+                          void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       ctl_table fake_table;
+       int entropy_count;
+
+       entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
+
+       fake_table.data = &entropy_count;
+       fake_table.maxlen = sizeof(entropy_count);
+
+       return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
+}
+
 static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
 extern struct ctl_table random_table[];
 struct ctl_table random_table[] = {
@@ -1419,7 +1553,7 @@ struct ctl_table random_table[] = {
                .procname       = "entropy_avail",
                .maxlen         = sizeof(int),
                .mode           = 0444,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_do_entropy,
                .data           = &input_pool.entropy_count,
        },
        {
@@ -1440,6 +1574,13 @@ struct ctl_table random_table[] = {
                .extra1         = &min_write_thresh,
                .extra2         = &max_write_thresh,
        },
+       {
+               .procname       = "urandom_min_reseed_secs",
+               .data           = &random_min_urandom_seed,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
        {
                .procname       = "boot_id",
                .data           = &sysctl_bootid,
index 422df19de732b687da6fbce4a54f90e48a6aae44..805af6db41cc6091a877d8ddf58742cb2e3fce55 100644 (file)
@@ -7,6 +7,25 @@
 #include <linux/writeback.h>
 #include <linux/tracepoint.h>
 
+TRACE_EVENT(add_device_randomness,
+       TP_PROTO(int bytes, unsigned long IP),
+
+       TP_ARGS(bytes, IP),
+
+       TP_STRUCT__entry(
+               __field(          int,  bytes                   )
+               __field(unsigned long,  IP                      )
+       ),
+
+       TP_fast_assign(
+               __entry->bytes          = bytes;
+               __entry->IP             = IP;
+       ),
+
+       TP_printk("bytes %d caller %pF",
+               __entry->bytes, (void *)__entry->IP)
+);
+
 DECLARE_EVENT_CLASS(random__mix_pool_bytes,
        TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
 
@@ -68,7 +87,112 @@ TRACE_EVENT(credit_entropy_bits,
                  (void *)__entry->IP)
 );
 
-TRACE_EVENT(get_random_bytes,
+TRACE_EVENT(push_to_pool,
+       TP_PROTO(const char *pool_name, int pool_bits, int input_bits),
+
+       TP_ARGS(pool_name, pool_bits, input_bits),
+
+       TP_STRUCT__entry(
+               __field( const char *,  pool_name               )
+               __field(          int,  pool_bits               )
+               __field(          int,  input_bits              )
+       ),
+
+       TP_fast_assign(
+               __entry->pool_name      = pool_name;
+               __entry->pool_bits      = pool_bits;
+               __entry->input_bits     = input_bits;
+       ),
+
+       TP_printk("%s: pool_bits %d input_pool_bits %d",
+                 __entry->pool_name, __entry->pool_bits,
+                 __entry->input_bits)
+);
+
+TRACE_EVENT(debit_entropy,
+       TP_PROTO(const char *pool_name, int debit_bits),
+
+       TP_ARGS(pool_name, debit_bits),
+
+       TP_STRUCT__entry(
+               __field( const char *,  pool_name               )
+               __field(          int,  debit_bits              )
+       ),
+
+       TP_fast_assign(
+               __entry->pool_name      = pool_name;
+               __entry->debit_bits     = debit_bits;
+       ),
+
+       TP_printk("%s: debit_bits %d", __entry->pool_name,
+                 __entry->debit_bits)
+);
+
+TRACE_EVENT(add_input_randomness,
+       TP_PROTO(int input_bits),
+
+       TP_ARGS(input_bits),
+
+       TP_STRUCT__entry(
+               __field(          int,  input_bits              )
+       ),
+
+       TP_fast_assign(
+               __entry->input_bits     = input_bits;
+       ),
+
+       TP_printk("input_pool_bits %d", __entry->input_bits)
+);
+
+TRACE_EVENT(add_disk_randomness,
+       TP_PROTO(dev_t dev, int input_bits),
+
+       TP_ARGS(dev, input_bits),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(          int,  input_bits              )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = dev;
+               __entry->input_bits     = input_bits;
+       ),
+
+       TP_printk("dev %d,%d input_pool_bits %d", MAJOR(__entry->dev),
+                 MINOR(__entry->dev), __entry->input_bits)
+);
+
+TRACE_EVENT(xfer_secondary_pool,
+       TP_PROTO(const char *pool_name, int xfer_bits, int request_bits,
+                int pool_entropy, int input_entropy),
+
+       TP_ARGS(pool_name, xfer_bits, request_bits, pool_entropy,
+               input_entropy),
+
+       TP_STRUCT__entry(
+               __field( const char *,  pool_name               )
+               __field(          int,  xfer_bits               )
+               __field(          int,  request_bits            )
+               __field(          int,  pool_entropy            )
+               __field(          int,  input_entropy           )
+       ),
+
+       TP_fast_assign(
+               __entry->pool_name      = pool_name;
+               __entry->xfer_bits      = xfer_bits;
+               __entry->request_bits   = request_bits;
+               __entry->pool_entropy   = pool_entropy;
+               __entry->input_entropy  = input_entropy;
+       ),
+
+       TP_printk("pool %s xfer_bits %d request_bits %d pool_entropy %d "
+                 "input_entropy %d", __entry->pool_name, __entry->xfer_bits,
+                 __entry->request_bits, __entry->pool_entropy,
+                 __entry->input_entropy)
+);
+
+DECLARE_EVENT_CLASS(random__get_random_bytes,
        TP_PROTO(int nbytes, unsigned long IP),
 
        TP_ARGS(nbytes, IP),
@@ -86,6 +210,18 @@ TRACE_EVENT(get_random_bytes,
        TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP)
 );
 
+DEFINE_EVENT(random__get_random_bytes, get_random_bytes,
+       TP_PROTO(int nbytes, unsigned long IP),
+
+       TP_ARGS(nbytes, IP)
+);
+
+DEFINE_EVENT(random__get_random_bytes, get_random_bytes_arch,
+       TP_PROTO(int nbytes, unsigned long IP),
+
+       TP_ARGS(nbytes, IP)
+);
+
 DECLARE_EVENT_CLASS(random__extract_entropy,
        TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
                 unsigned long IP),
@@ -126,7 +262,52 @@ DEFINE_EVENT(random__extract_entropy, extract_entropy_user,
        TP_ARGS(pool_name, nbytes, entropy_count, IP)
 );
 
+TRACE_EVENT(random_read,
+       TP_PROTO(int got_bits, int need_bits, int pool_left, int input_left),
+
+       TP_ARGS(got_bits, need_bits, pool_left, input_left),
+
+       TP_STRUCT__entry(
+               __field(          int,  got_bits                )
+               __field(          int,  need_bits               )
+               __field(          int,  pool_left               )
+               __field(          int,  input_left              )
+       ),
+
+       TP_fast_assign(
+               __entry->got_bits       = got_bits;
+               __entry->need_bits      = need_bits;
+               __entry->pool_left      = pool_left;
+               __entry->input_left     = input_left;
+       ),
+
+       TP_printk("got_bits %d still_needed_bits %d "
+                 "blocking_pool_entropy_left %d input_entropy_left %d",
+                 __entry->got_bits, __entry->got_bits, __entry->pool_left,
+                 __entry->input_left)
+);
+
+TRACE_EVENT(urandom_read,
+       TP_PROTO(int got_bits, int pool_left, int input_left),
+
+       TP_ARGS(got_bits, pool_left, input_left),
+
+       TP_STRUCT__entry(
+               __field(          int,  got_bits                )
+               __field(          int,  pool_left               )
+               __field(          int,  input_left              )
+       ),
+
+       TP_fast_assign(
+               __entry->got_bits       = got_bits;
+               __entry->pool_left      = pool_left;
+               __entry->input_left     = input_left;
+       ),
 
+       TP_printk("got_bits %d nonblocking_pool_entropy_left %d "
+                 "input_entropy_left %d", __entry->got_bits,
+                 __entry->pool_left, __entry->input_left)
+);
 
 #endif /* _TRACE_RANDOM_H */