From f9e20b8a1d93dec3b6389ca9a7575765c9cc733d Mon Sep 17 00:00:00 2001 From: Leah Rowe Date: Mon, 6 Mar 2023 19:21:46 +0000 Subject: util/nvmutil: optimise rhex() further reduce the number of calls to read() by using bit shifts. when rnum is zero, read again. in most cases, a nibble will not be zero, so this will usually result in about 13-15 of of 16 nibbles being used. this is in comparison to 8 nibbles being used before, which means that the number of calls to read() are roughly halved. at the same time, the extra amount of logic is minimal (and probably less) when compiled, outside of calls to read(), because shifting is better optimised (on 64-bit machines, the uint64_t will be shifted with just a single instruction, if the compiler is decent), whereas the alternative would be to always precisely use exactly 16 nibbles by counting up to 16, which would involve the use of an and mask and still need a shift, plus... you get the point. this is probably the most efficient code ever written, for generating random numbers between the value of 0 and 15 --- util/nvmutil/nvmutil.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'util/nvmutil') diff --git a/util/nvmutil/nvmutil.c b/util/nvmutil/nvmutil.c index 1a873b2a..479b1c2b 100644 --- a/util/nvmutil/nvmutil.c +++ b/util/nvmutil/nvmutil.c @@ -261,10 +261,9 @@ rhex(void) { static int rfd = -1; static uint64_t rnum = 0; - static size_t rindex = 8; + uint8_t rval; - if (rindex == 8) { - rindex = 0; + if (rnum == 0) { if (rfd == -1) if ((rfd = open("/dev/urandom", O_RDONLY)) == -1) err(errno, "/dev/urandom"); @@ -272,7 +271,10 @@ rhex(void) err(errno, "/dev/urandom"); } - return ((uint8_t *) &rnum)[rindex++] & 0xf; + rval = (uint8_t) (rnum & 0xf); + rnum >>= 4; + + return rval; } void -- cgit v1.2.1