summaryrefslogtreecommitdiff
path: root/util.h
diff options
context:
space:
mode:
authorStefano Brivio <sbrivio@redhat.com>2019-11-19 00:18:17 (GMT)
committerStefano Brivio <sbrivio@redhat.com>2019-11-19 00:18:17 (GMT)
commita724e8dbd67ce3d9bf5a24bd836dea4ad3a5516f (patch)
tree8575f185b5f2e773a7334ffe1dd5891a70bb2151 /util.h
pipapo: Initial importHEADmaster
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
Diffstat (limited to 'util.h')
-rw-r--r--util.h110
1 files changed, 110 insertions, 0 deletions
diff --git a/util.h b/util.h
new file mode 100644
index 0000000..94fc0f2
--- /dev/null
+++ b/util.h
@@ -0,0 +1,110 @@
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+
+#define min(x, y) (((x) < (y)) ? (x) : (y))
+
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+
+#ifdef VERBOSE
+#define verbose(...) fprintf(stdout, __VA_ARGS__)
+#else
+#define verbose(...) do { } while (0)
+#endif
+
+void set_bit(uint8_t *data, int n, int len);
+void bit_sum(uint8_t *v, int n, int len);
+void bit_sub(uint8_t *v, int n, int len);
+void andmem(uint8_t *dst, uint8_t *src, int len);
+void ormem(uint8_t *dst, uint8_t *src, int len);
+int ffs_clear(uint8_t *data, int len, int byte_offset);
+int test_bit(uint8_t *data, int n);
+void fold_bits(uint8_t *data, int bit, int n, int len);
+
+/**
+ * fill() - Fill bits in given region
+ * @data: Memory region
+ * @start: First bit to set
+ * @len: Number of bits to set
+ *
+ * Equivalent to bitmap_set() in kernel.
+ */
+__always_inline void fill(uint8_t *data, int start, int len)
+{
+ uint8_t mask;
+
+ data += start / 8;
+ if (likely(start % 8 || len < 8)) {
+ if (len + start % 8 < 8) {
+ mask = 0xff >> (8 - len);
+ *data |= mask << (start % 8);
+ return;
+ }
+
+ *data |= 0xff << (start % 8);
+ len -= 8 - start % 8;
+ data++;
+
+ if (len < 8) {
+ mask = 0xff >> (8 - len);
+ *data |= mask;
+ return;
+ }
+ }
+ memset(data, 0xff, len / 8);
+ data += len / 8;
+
+ if (len %= 8)
+ *data |= 0xff >> (8 - len);
+}
+
+/**
+ * ffs_and_fill() - For each set bit, set bits from selected mapping table item
+ * @map: Bitmap to be scanned for set bits
+ * @offset: Optional, initial offset, bytes
+ * @len: Length of bitmap in 64-bit words
+ * @dst: Destination bitmap
+ * @mt: Mapping table containing bit set specifiers
+ * @one: Find a single bit and return, don't fill
+ *
+ * Original implementation by Daniel Lemire, public domain.
+ *
+ * For each bit set in map, select the bucket from mapping table with index
+ * corresponding to the position of the bit set. Use start bit and amount of
+ * bits specified in bucket to fill region in dst.
+ *
+ * If __builtin_ctzl() is available, this is vastly more efficient compared to
+ * looking for bits using ffs64() as we can directly iterate without having to
+ * return the bit position and clearing it or caching it for each set bit.
+ *
+ * If __builtin_ctzl() is not available, ffs_clear() will be used instead.
+ *
+ * Return: 0 on match, bit position if 'one' is passed, -1 otherwise.
+ */
+__always_inline int ffs_and_fill(uint8_t *map, int offset, int len,
+ uint8_t *dst, union map_bucket *mt, int one)
+{
+ uint64_t bitset;
+ int k, ret = -1;
+
+ for (k = offset / 8; k < len; k++) {
+ bitset = ((uint64_t *)map)[k];
+ while (bitset) {
+ uint64_t t = bitset & -bitset;
+ int r = __builtin_ctzl(bitset);
+
+ if (unlikely(one))
+ return(k * 64 + r);
+
+ ret = 0;
+
+ fill(dst, mt[k * 64 + r].to, mt[k * 64 + r].n);
+
+ bitset ^= t;
+ }
+ map[k] = 0;
+ }
+
+ return ret;
+}