summaryrefslogtreecommitdiff
path: root/util/nvmutil
diff options
context:
space:
mode:
authorLeah Rowe <leah@libreboot.org>2023-03-06 19:21:46 +0000
committerLeah Rowe <leah@libreboot.org>2023-03-06 21:30:33 +0000
commitf9e20b8a1d93dec3b6389ca9a7575765c9cc733d (patch)
treed47929611a974f07b34d45557061122f084a1770 /util/nvmutil
parentf04855c29d076431300965bce0c9eefff5a20f1b (diff)
util/nvmutil: optimise rhex() further
reduce the number of calls to read() by using bit shifts. when rnum is zero, read again. in most cases, a nibble will not be zero, so this will usually result in about 13-15 of of 16 nibbles being used. this is in comparison to 8 nibbles being used before, which means that the number of calls to read() are roughly halved. at the same time, the extra amount of logic is minimal (and probably less) when compiled, outside of calls to read(), because shifting is better optimised (on 64-bit machines, the uint64_t will be shifted with just a single instruction, if the compiler is decent), whereas the alternative would be to always precisely use exactly 16 nibbles by counting up to 16, which would involve the use of an and mask and still need a shift, plus... you get the point. this is probably the most efficient code ever written, for generating random numbers between the value of 0 and 15
Diffstat (limited to 'util/nvmutil')
-rw-r--r--util/nvmutil/nvmutil.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/util/nvmutil/nvmutil.c b/util/nvmutil/nvmutil.c
index 1a873b2a..479b1c2b 100644
--- a/util/nvmutil/nvmutil.c
+++ b/util/nvmutil/nvmutil.c
@@ -261,10 +261,9 @@ rhex(void)
{
static int rfd = -1;
static uint64_t rnum = 0;
- static size_t rindex = 8;
+ uint8_t rval;
- if (rindex == 8) {
- rindex = 0;
+ if (rnum == 0) {
if (rfd == -1)
if ((rfd = open("/dev/urandom", O_RDONLY)) == -1)
err(errno, "/dev/urandom");
@@ -272,7 +271,10 @@ rhex(void)
err(errno, "/dev/urandom");
}
- return ((uint8_t *) &rnum)[rindex++] & 0xf;
+ rval = (uint8_t) (rnum & 0xf);
+ rnum >>= 4;
+
+ return rval;
}
void