summaryrefslogtreecommitdiff
path: root/set.c
diff options
context:
space:
mode:
authorStefano Brivio <sbrivio@redhat.com>2019-11-19 00:18:17 (GMT)
committerStefano Brivio <sbrivio@redhat.com>2019-11-19 00:18:17 (GMT)
commita724e8dbd67ce3d9bf5a24bd836dea4ad3a5516f (patch)
tree8575f185b5f2e773a7334ffe1dd5891a70bb2151 /set.c
pipapo: Initial importHEADmaster
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
Diffstat (limited to 'set.c')
-rw-r--r--set.c902
1 files changed, 902 insertions, 0 deletions
diff --git a/set.c b/set.c
new file mode 100644
index 0000000..6943b36
--- /dev/null
+++ b/set.c
@@ -0,0 +1,902 @@
+/* PIPAPO - PIle PAcket POlicies
+ *
+ * set.c - Insertion, listing, deletion
+ *
+ * Author: Stefano Brivio <sbrivio@redhat.com>
+ * License: GPLv2
+ */
+
+#include <arpa/inet.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "pipapo.h"
+#include "set.h"
+#include "util.h"
+
+/**
+ * struct rule_map - Internal convenience only: first number and amount of rules
+ * @x: Number of first rule
+ * @n: Rule count
+ */
+struct rule_map {
+ int x, n;
+};
+
+/**
+ * base_step_diff() - Check if setting 'step' bit in mask changes it
+ * @base: Mask we are expanding
+ * @step: Step bit for given expansion step
+ * @len: Total length of mask space (set and unset bits), bytes
+ *
+ * Convenience function for mask expansion.
+ *
+ * Return: non-zero if given step bit changes mask base, 0 otherwise.
+ */
+static int base_step_diff(uint8_t *base, int step, int len)
+{
+ uint8_t tmp[16];
+
+ memcpy(tmp, base, len);
+ set_bit(tmp + len - 1 - step / 8, step % 8, len);
+
+ return memcmp(tmp, base, len);
+}
+
+/**
+ * base_step_after_end() - Check if mask reaches after range end with given step
+ * @base: Mask we are expanding
+ * @end: End of range
+ * @step: Step bit for given expansion step
+ * @len: Total length of mask space (set and unset bits), bytes
+ *
+ * Convenience function for mask expansion.
+ *
+ * Return: non-zero if mask exceeds range with step bit, 0 otherwise.
+ */
+static int base_step_after_end(uint8_t *base, uint8_t *end, int step, int len)
+{
+ uint8_t tmp[16];
+ int i;
+
+ memcpy(tmp, base, len);
+
+ for (i = 0; i <= step; i++)
+ set_bit(tmp + len - 1 - i / 8, i % 8, len);
+
+ return memcmp(tmp, end, len) > 0;
+}
+
+/**
+ * resize() - Resize lookup and mapping tables according to new number of rules
+ * @f: Field containing lookup and mapping tables
+ * @old_rules: Previous amount of rules in field
+ * @rules: New amount of rules
+ *
+ * Increase, decrease or maintain tables size depending on new amount of rules,
+ * and copy data over. In case the new size is smaller, throw away data for
+ * highest-numbered rules.
+ *
+ * Return: 0 on success, -ENOMEM on allocation failure.
+ */
+static int resize(struct field *f, int old_rules, int rules)
+{
+ uint8_t *new_lt = NULL, *new_p, *old_lt = f->lt, *old_p;
+ union map_bucket *new_mt, *old_mt = f->mt;
+ ssize_t new_bucket_size, copy;
+ int group, bucket;
+
+ new_bucket_size = DIV_ROUND_UP(rules, 8);
+#if defined(MATCH_AVX2) || defined(MATCH_CTZL)
+ new_bucket_size = round_up(new_bucket_size, 32);
+#endif
+
+ if (new_bucket_size == f->bsize)
+ goto mt;
+
+ if (new_bucket_size > f->bsize)
+ copy = f->bsize;
+ else
+ copy = new_bucket_size;
+
+ new_p = new_lt = calloc(new_bucket_size, f->groups * BUCKETS);
+ if (!new_lt)
+ return -ENOMEM;
+
+ old_p = old_lt;
+ for (group = 0; group < f->groups; group++) {
+ for (bucket = 0; bucket < BUCKETS; bucket++) {
+ memcpy(new_p, old_p, copy);
+ new_p += copy;
+ old_p += copy;
+
+ if (new_bucket_size > f->bsize)
+ new_p += new_bucket_size - f->bsize;
+ else
+ old_p += f->bsize - new_bucket_size;
+ }
+ }
+mt:
+ new_mt = calloc(rules, sizeof(*new_mt));
+ if (!new_mt) {
+ free(new_lt);
+ return -ENOMEM;
+ }
+
+ if (f->mt)
+ memcpy(new_mt, f->mt, min(old_rules, rules) * sizeof(*new_mt));
+
+ f->bsize = new_bucket_size;
+
+ if (new_lt) {
+ f->lt = new_lt;
+ free(old_lt);
+ }
+
+ f->mt = new_mt;
+ free(old_mt);
+
+ return 0;
+}
+
+/**
+ * bucket_set_bit() - Set rule bit in lookup bucket according to group value
+ * @f: Field containing lookup table
+ * @rule: Rule bit number to be set
+ * @group: Bit group in field
+ * @v: Value of 4-bit group
+ */
+static void bucket_set_bit(struct field *f, int rule, int group, int v)
+{
+ uint8_t *pos;
+
+ pos = f->lt + f->bsize * BUCKETS * group;
+ pos += f->bsize * v;
+
+ set_bit(pos, rule, f->bsize);
+}
+
+/**
+ * insert() - Insert new rule in field given binary data and mask length
+ * @f: Field containing lookup table
+ * @data: Base value of classifying entry
+ * @mask_len: Length of mask, matches field length for non-ranged entry
+ *
+ * Insert a new rule reference in lookup buckets corresponding to data and
+ * mask_len. This implements algorithm step 3.4.
+ *
+ * Return: 1 on success, negative error code on failure.
+ */
+static int insert(struct field *f, uint8_t *data, int mask_len)
+{
+ int rule = f->rules++, group, ret, i, v;
+ uint8_t mask;
+
+ ret = resize(f, f->rules - 1, f->rules);
+ if (ret)
+ return ret;
+
+ for (group = 0; group < f->groups; group++) {
+ if (group % 2)
+ v = data[group / 2] & 0x0f;
+ else
+ v = (data[group / 2] & 0xf0) >> 4;
+
+ if (mask_len >= (group + 1) * 4) {
+ /* Not masked */
+ bucket_set_bit(f, rule, group, v);
+ } else if (mask_len <= group * 4) {
+ /* Completely masked */
+ for (i = 0; i < (4 << 2); i++)
+ bucket_set_bit(f, rule, group, i);
+ } else {
+ /* The mask limit falls on this group */
+ mask = 0x0f >> (mask_len - group * 4);
+ for (i = 0; i < (4 << 2); i++) {
+ if ((i & ~mask) == (v & ~mask))
+ bucket_set_bit(f, rule, group, i);
+ }
+ }
+ }
+
+ return 1;
+}
+
+/**
+ * expand() - Expand range to composing netmasks and insert into lookup table
+ * @f: Field containing lookup table
+ * @start: Start of range
+ * @end: End of range
+ * @mask_len: Length of mask, matches field length for non-ranged entry TODO
+ *
+ * Expand range to composing netmasks and insert corresponding rule references
+ * in lookup buckets. This implements algorithm steps 3.3 - 3.4.
+ *
+ * Return: number of inserted rules on success, negative error code on failure.
+ */
+static int expand(struct field *f, uint8_t *start, uint8_t *end, int len)
+{
+ int step, masks = 0, err;
+ uint8_t base[16];
+
+ memcpy(base, start, len);
+ while (memcmp(base, end, len) <= 0) {
+ step = 0;
+ while (base_step_diff(base, step, len)) {
+ if (base_step_after_end(base, end, step, len))
+ break;
+ step++;
+
+ if (step >= len * 8)
+ goto out;
+ }
+
+ err = insert(f, base, len * 8 - step);
+ if (err < 0)
+ return err;
+
+ masks++;
+ bit_sum(base, step, len);
+ }
+
+out:
+ return masks;
+}
+
+/**
+ * map() - Insert references in mapping tables, mapping rules between fields
+ * @s: Set data
+ * @layout: Set layout
+ * @rmap: Table of rule maps, arrays of first rule and amount of rules
+ * in next field a given rule maps to, for each field
+ * @key: Verdict key the inserted rules finally map to, in last field
+ *
+ * This implements algorithm steps 3.5 - 3.6.
+ */
+static void map(struct set *s, struct desc_spec **layout,
+ struct rule_map rmap[16], uint32_t key)
+{
+ struct field *f;
+ int i, j;
+
+ for (i = 0, f = s->fields; layout[i + 1]->type != KEY; i++, f++) {
+ for (j = 0; j < rmap[i].n; j++) {
+ f->mt[rmap[i].x + j].to = rmap[i + 1].x;
+ f->mt[rmap[i].x + j].n = rmap[i + 1].n;
+ }
+ }
+
+ for (j = 0; j < rmap[i].n; j++)
+ f->mt[rmap[i].x + j].key = key;
+}
+
+/**
+ * add() - Insert one entry in set data
+ * @s: Set data
+ * @layout: Set layout
+ * @data: Concatenation of structs with parsed values for each field item
+ *
+ * This is the entry point for all algorithm steps in section 3.
+ */
+int add(struct set *s, struct desc_spec **layout, uint8_t *data)
+{
+ uint8_t zero_mac[MAC_LEN] = { 0 };
+ struct rule_map rmap[16] = { 0 };
+ char buf[BUFSIZ], buf2[BUFSIZ];
+ struct field *f;
+ struct addr6 *a6;
+ struct addr *a;
+ struct port *p;
+ struct mac *m;
+ int i, ret = 0;
+
+ (void)buf;
+ (void)buf2;
+
+ for_each_field(f, s, layout) {
+ /* See union map_bucket */
+ if (f->rules >= (1 << (24 - 1)) - 256)
+ return -ENOSPC;
+ }
+
+ verbose("Adding entry:\n");
+
+ for_each_field(f, s, layout) {
+ rmap[i].x = f->rules;
+
+ verbose(" inserting %s, ", layout[i]->label);
+
+ switch (layout[i]->type) {
+ case ADDR:
+ a = (struct addr *)data;
+
+ if (a->cidr == 0) {
+ verbose("start: %s, end: %s, ",
+ inet_ntop(AF_INET, &a->start, buf,
+ BUFSIZ),
+ inet_ntop(AF_INET, &a->end, buf2,
+ BUFSIZ));
+ } else if (a->cidr == 32) {
+ verbose("address: %s, ",
+ inet_ntop(AF_INET, &a->start, buf,
+ BUFSIZ));
+ } else {
+ verbose("address: %s/%i, ",
+ inet_ntop(AF_INET, &a->start, buf,
+ BUFSIZ),
+ a->cidr);
+ }
+
+ if (a->cidr)
+ ret = insert(f, (uint8_t *)&a->start, a->cidr);
+ else
+ ret = expand(f, (uint8_t *)&a->start,
+ (uint8_t *)&a->end, ADDR_LEN);
+ if (ret < 0)
+ return ret;
+
+ data += sizeof(*a);
+ break;
+ case ADDR6:
+ a6 = (struct addr6 *)data;
+
+ if (a6->cidr == 0) {
+ verbose("start: %s, end: %s, ",
+ inet_ntop(AF_INET6, &a6->start, buf,
+ BUFSIZ),
+ inet_ntop(AF_INET6, &a6->end, buf2,
+ BUFSIZ));
+ } else if (a6->cidr == 128) {
+ verbose("address: %s, ",
+ inet_ntop(AF_INET6, &a6->start, buf,
+ BUFSIZ));
+ } else {
+ verbose("address: %s/%i, ",
+ inet_ntop(AF_INET6, &a6->start, buf,
+ BUFSIZ),
+ a6->cidr);
+ }
+
+ if (a6->cidr)
+ ret = insert(f, (uint8_t *)&a6->start,
+ a6->cidr);
+ else
+ ret = expand(f, (uint8_t *)&a6->start,
+ (uint8_t *)&a6->end, ADDR6_LEN);
+ if (ret < 0)
+ return ret;
+
+ data += sizeof(*a6);
+ break;
+ case PORT:
+ p = (struct port *)data;
+
+ if (p->end)
+ verbose("start: %i, end: %i, ",
+ ntohs(p->start), ntohs(p->end));
+ else
+ verbose("port: %i, ", ntohs(p->start));
+
+ if (p->end)
+ ret = expand(f, (uint8_t *)&p->start,
+ (uint8_t *)&p->end, PORT_LEN);
+ else
+ ret = insert(f, (uint8_t *)&p->start,
+ ADDR6_LEN * 8);
+ if (ret < 0)
+ return ret;
+
+ data += sizeof(*p);
+ break;
+ case MAC:
+ m = (struct mac *)data;
+
+ if (memcmp(&m->end, zero_mac, MAC_LEN))
+ verbose("start: %02x:%02x:%02x:%02x:%02x:%02x, "
+ "end: %02x:%02x:%02x:%02x:%02x:%02x, ",
+ m->start[0], m->start[1], m->start[2],
+ m->start[3], m->start[4], m->start[5],
+ m->end[0], m->end[1], m->end[2],
+ m->end[3], m->end[4], m->end[5]);
+ else
+ verbose("mac: %02x:%02x:%02x:%02x:%02x:%02x, ",
+ m->start[0], m->start[1], m->start[2],
+ m->start[3], m->start[4], m->start[5]);
+
+ if (memcmp(&m->end, zero_mac, MAC_LEN))
+ ret = expand(f, m->start, m->end, MAC_LEN);
+ else
+ ret = insert(f, m->start, MAC_LEN * 8);
+ if (ret < 0)
+ return ret;
+
+ data += sizeof(*m);
+ break;
+ case KEY:
+ break;
+ }
+
+ if (ret > 1)
+ verbose("rules %i-%i\n", rmap[i].x,
+ rmap[i].x + ret - 1);
+ else
+ verbose("rule %i\n", rmap[i].x);
+ rmap[i].n = ret;
+ }
+
+ map(s, layout, rmap, *(uint32_t *)data);
+
+ return 0;
+}
+
+/**
+ * rules_same_key() - Find amount of rules mapping to the same rules/key
+ * @f: Field containing mapping table
+ * @start: First rule to be checked against subsequent ones
+ *
+ * This is used to find out how many rules were created as part of the same set
+ * entry, for listing and deletion.
+ *
+ * Return: amount of rules mapping to the same rules or key given starting one
+ */
+static int rules_same_key(struct field *f, int start)
+{
+ uint32_t key;
+ int r;
+
+ for (r = start; r < f->rules; r++) {
+ if (r != start && key != f->mt[r].key)
+ return r - start;
+
+ key = f->mt[r].key;
+ }
+
+ if (r != start)
+ return r - start;
+
+ return 0;
+}
+
+/**
+ * aggregate() - Aggregate rules back to originating entries, print or match
+ * @f: Field containing lookup and mapping tables
+ * @type: Field type
+ * @start: First rule for entry
+ * @len: Amount of rules originated from same entry
+ * @match: Optional (used for deletion), struct with field value
+ *
+ * This is used in listing, to print originating entries for rules found in
+ * lookup tables, and in deletion, to check a group of rules against the values
+ * we want to delete from a given set field.
+ *
+ * Return: 0 on match or if no match is requested, non-zero otherwise.
+ */
+static int aggregate(struct field *f, enum desc_type type, int start, int len,
+ uint8_t *match)
+{
+ uint8_t left[16] = { 0 }, *l = left, right[16] = { 0 }, *r = right;
+ uint8_t zero_mac[MAC_LEN] = { 0 };
+ char buf_l[BUFSIZ], buf_r[BUFSIZ];
+ int g, b, x0, x1, mask_len = 0;
+ struct addr6 *a6;
+ struct addr *a;
+ struct port *p;
+ struct mac *m;
+
+ for (g = 0; g < f->groups; g++) {
+ x0 = x1 = -1;
+ for (b = 0; b < BUCKETS; b++) {
+ if (test_bit(f->lt + (g * BUCKETS + b) * f->bsize,
+ start))
+ if (x0 == -1)
+ x0 = b;
+ if (test_bit(f->lt + (g * BUCKETS + b) * f->bsize,
+ start + len - 1))
+ x1 = b;
+ }
+
+ if (g % 2) {
+ *(l++) |= x0 & 0x0f;
+ *(r++) |= x1 & 0x0f;
+ } else {
+ *l |= x0 << 4;
+ *r |= x1 << 4;
+ }
+
+ if (x1 - x0 == 0)
+ mask_len += 4;
+ else if (x1 - x0 == 1)
+ mask_len += 3;
+ else if (x1 - x0 == 3)
+ mask_len += 2;
+ else if (x1 - x0 == 7)
+ mask_len += 1;
+ }
+
+ l = left;
+ r = right;
+
+ switch (type) {
+ case ADDR:
+ if (match) {
+ a = (struct addr *)match;
+ if (!a->cidr)
+ return !memcmp(&a->start, l, ADDR_LEN) &&
+ !memcmp(&a->end, r, ADDR_LEN);
+ return !memcmp(&a->start, l, ADDR_LEN) &&
+ (a->cidr == mask_len);
+ }
+
+ inet_ntop(AF_INET, l, buf_l, BUFSIZ);
+ inet_ntop(AF_INET, r, buf_r, BUFSIZ);
+ if (mask_len == 32)
+ fprintf(stdout, "%s ", buf_l);
+ else if (len == 1)
+ fprintf(stdout, "%s/%i ", buf_l, mask_len);
+ else
+ fprintf(stdout, "%s-%s ", buf_l, buf_r);
+ break;
+ case ADDR6:
+ if (match) {
+ a6 = (struct addr6 *)match;
+ if (!a6->cidr)
+ return !memcmp(&a6->start, l, ADDR6_LEN) &&
+ !memcmp(&a6->end, r, ADDR6_LEN);
+ return !memcmp(&a6->start, l, ADDR_LEN) &&
+ (a6->cidr == mask_len);
+ }
+
+ inet_ntop(AF_INET6, l, buf_l, BUFSIZ);
+ inet_ntop(AF_INET6, r, buf_r, BUFSIZ);
+ if (mask_len == 128)
+ fprintf(stdout, "%s ", buf_l);
+ else if (len == 1)
+ fprintf(stdout, "%s/%i ", buf_l, mask_len);
+ else
+ fprintf(stdout, "%s-%s ", buf_l, buf_r);
+ break;
+ case PORT:
+ if (match) {
+ p = (struct port *)match;
+ return p->start == *(uint16_t *)l &&
+ (!p->end || p->end == *(uint16_t *)r);
+ }
+
+ if (mask_len == 16 && len == 1)
+ fprintf(stdout, "%u ", ntohs(*(uint16_t *)l));
+ else
+ fprintf(stdout, "%u-%u ", ntohs(*(uint16_t *)l),
+ ntohs(*(uint16_t *)r));
+ break;
+ case MAC:
+ if (match) {
+ m = (struct mac *)match;
+ return !memcmp(&m->start, l, MAC_LEN) &&
+ (memcmp(zero_mac, &m->end, MAC_LEN) ||
+ !memcmp(&m->end, r, MAC_LEN));
+ }
+
+ if (mask_len == 48 && len == 1)
+ fprintf(stdout, "%02x:%02x:%02x:%02x:%02x:%02x ",
+ l[0], l[1], l[2], l[3], l[4], l[5]);
+ else
+ fprintf(stdout, "%02x:%02x:%02x:%02x:%02x:%02x-"
+ "%02x:%02x:%02x:%02x:%02x:%02x ",
+ l[0], l[1], l[2], l[3], l[4], l[5],
+ r[0], r[1], r[2], r[3], r[4], r[5]);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * unmap() - Delete group of rules from mapping table, renumber remaining ones
+ * @mt: Mapping table
+ * @rules: Original amount of rules in mapping table
+ * @start: First rule to be deleted
+ * @n: Amount of rules originated from same entry
+ * @to_offset: First rule index in next field this group of rules maps to
+ * @is_key: If this is the last field, delete key from mapping table
+ *
+ * This is used to unmap rules from the mapping table for a single field,
+ * maintaining consistency and compactness for the existing ones. In pictures:
+ * let's assume that we want to delete rules 2 and 3 from the following mapping
+ * table:
+ *
+ * rules
+ * 0 1 2 3 4
+ * map to: 4-10 4-10 11-15 11-15 16-18
+ *
+ * the result will be:
+ *
+ * rules
+ * 0 1 2
+ * map to: 4-10 4-10 11-13
+ *
+ * for fields before the last one. In case this is the mapping table for the
+ * last field in a set, and its rules map to verdict keys:
+ *
+ * rules
+ * 0 1 2 3 4
+ * key: 42 42 33 33 44
+ *
+ * the result will be:
+ *
+ * rules
+ * 0 1 2
+ * key: 42 42 44
+ */
+void unmap(union map_bucket *mt, int rules, int start, int n, int to_offset,
+ int is_key)
+{
+ int i;
+
+ memmove(mt + start, mt + start + n, (rules - start - n) * sizeof(*mt));
+ memset(mt + rules - n, 0, n * sizeof(*mt));
+
+ if (is_key)
+ return;
+
+ for (i = start; i < rules - n; i++)
+ mt[i].to -= to_offset;
+}
+
+/**
+ * drop() - Delete entry from lookup and mapping tables, given rule mapping
+ * @s: Set data
+ * @layout: Set layout
+ * @rmap: Table of rule maps, arrays of first rule and amount of rules
+ * in next field a given entry maps to, for each field
+ *
+ * For each rule in lookup table buckets mapping to this set of rules, drop
+ * all bits set in lookup table mapping. In pictures, assuming we want to drop
+ * rules 0 and 1 from this lookup table:
+ *
+ * bucket
+ * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ * 0 0 1,2
+ * 1 1,2 0
+ * 2 0 1,2
+ * 3 0 1,2
+ * 4 0,1,2
+ * 5 0 1 2
+ * 6 0,1,2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+ * 7 1,2 1,2 1 1 1 0,1 1 1 1 1 1 1 1 1 1 1
+ *
+ * rule 2 becomes rule 0, and the result will be:
+ *
+ * bucket
+ * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ * 0 0
+ * 1 0
+ * 2 0
+ * 3 0
+ * 4 0
+ * 5 0
+ * 6 0
+ * 7 0 0
+ *
+ * once this is done, call unmap() to drop all the corresponding rule references
+ * from mapping tables.
+ */
+static void drop(struct set *s, struct desc_spec **layout,
+ struct rule_map rmap[16])
+{
+ struct field *f;
+ int i, g, b;
+
+ for_each_field(f, s, layout) {
+ for (g = 0; g < f->groups; g++) {
+ for (b = 0; b < BUCKETS; b++)
+ fold_bits(f->lt + (g * BUCKETS + b) * f->bsize,
+ rmap[i].x, rmap[i].n, f->bsize);
+ }
+ unmap(f->mt, f->rules, rmap[i].x, rmap[i].n, rmap[i + 1].n,
+ layout[i + 1]->type == KEY);
+
+ resize(f, f->rules, f->rules - rmap[i].n);
+ f->rules -= rmap[i].n;
+ }
+}
+
+/**
+ * list_or_del() - List set entries, or delete entry if del_match is passed
+ * @s: Set data
+ * @layout: Set layout
+ * @del_match: Optional, delete matching entry (concatenation of values)
+ *
+ * Return: 0 on listing and successful deletion, -ENOENT on match failure.
+ */
+int list_or_del(struct set *s, struct desc_spec **layout, uint8_t *del_match)
+{
+ int f0_rules, fx_rules, start, first_rule = 0, i, l, found;
+ struct rule_map rmap[16] = { 0 };
+ uint8_t *p = del_match;
+ struct field *f;
+
+ if (!del_match) {
+ fprintf(stdout, "List:");
+ for (l = 0; layout[l]->type != KEY; l++)
+ fprintf(stdout, " %s", layout[l]->label);
+ fprintf(stdout, "\n");
+ }
+
+ while ((f0_rules = rules_same_key(s->fields + 0, first_rule))) {
+ start = first_rule;
+ fx_rules = f0_rules;
+
+ p = del_match;
+ for (i = 0, f = s->fields; layout[i]->type != KEY; i++, f++) {
+ found = aggregate(f, layout[i]->type, start, fx_rules,
+ p);
+
+ rmap[i].x = start;
+ rmap[i].n = fx_rules;
+
+ if (del_match) {
+ if (!found)
+ break;
+ if (layout[i]->type == ADDR)
+ p += sizeof(struct addr);
+ else if (layout[i]->type == ADDR6)
+ p += sizeof(struct addr6);
+ else if (layout[i]->type == PORT)
+ p += sizeof(struct port);
+ else if (layout[i]->type == MAC)
+ p += sizeof(struct mac);
+
+ if (layout[i + 1]->type == KEY &&
+ *(uint32_t *)p == f->mt[start].key) {
+ drop(s, layout, rmap);
+ return 0;
+ }
+ }
+
+ if (layout[i + 1]->type == KEY) {
+ fprintf(stdout, "%u\n", f->mt[start].key);
+ } else {
+ fx_rules = f->mt[start].n;
+ start = f->mt[start].to;
+ }
+ }
+
+ first_rule += f0_rules;
+ }
+
+ if (del_match)
+ return -ENOENT;
+
+ return 0;
+}
+
+/**
+ * init() - Initialise set data
+ * @set: Empty set, array of fields
+ * @layout: Parsed set layout
+ *
+ * Return: 0 on success, -ENOMEM on allocation failure.
+ */
+int init(struct set *s, struct desc_spec **layout)
+{
+ struct field *f;
+ int i;
+
+ for (i = 0; layout[i]->type != KEY; i++);
+
+ s->fields = malloc(sizeof(struct field) * i);
+ if (!s->fields)
+ return -ENOMEM;
+
+ s->fields[i - 1].groups = 0;
+
+ for_each_field(f, s, layout) {
+ f->groups = layout[i]->len * 2; /* 4-bit groups */
+ f->offset = layout[i]->offset;
+ f->bsize = 0;
+ f->rules = 0;
+ f->lt = NULL;
+ f->mt = NULL;
+ }
+
+ return 0;
+}
+
+#ifdef VERBOSE
+/**
+ * show_lookup() - Print lookup table for given field
+ * @field: Field containing lookup table
+ *
+ * For documentation purposes only.
+ */
+void show_lookup(struct field *f)
+{
+ int bucket, group, in_group = 0, v;
+ uint8_t *copy, *p;
+
+ copy = malloc(f->groups * BUCKETS * f->bsize);
+ memcpy(copy, f->lt, f->groups * BUCKETS * f->bsize);
+
+ fprintf(stdout, "%20s\n", "bucket");
+ fprintf(stdout, "group ");
+ for (bucket = 0; bucket < BUCKETS; bucket++)
+ fprintf(stdout, "%5i", bucket);
+ fprintf(stdout, "\n");
+
+ p = copy;
+ for (group = 0; group < f->groups; group++) {
+ if (in_group)
+ fprintf(stdout, " ");
+ else
+ fprintf(stdout, "%3i ", group);
+
+ in_group = 0;
+ for (bucket = 0; bucket < BUCKETS; bucket++) {
+ v = ffs_clear(p, f->bsize, 0);
+ if (v >= 0) {
+ in_group = 1;
+ fprintf(stdout, "%5i", v);
+ } else {
+ fprintf(stdout, " ");
+ }
+ p += f->bsize;
+ }
+
+ if (in_group) {
+ p -= BUCKETS * f->bsize;
+ group--;
+ }
+ fprintf(stdout, "\n");
+ }
+
+ free(copy);
+}
+
+/**
+ * show_lookup() - Print mapping table for given field
+ * @field: Field containing mapping table
+ * @to_key: Last field: print verdict keys instead of mapping to next field
+ *
+ * For documentation purposes only.
+ */
+void show_mapping(struct field *f, int to_key)
+{
+ uint32_t prev_key;
+ int r, prev_r;
+
+ prev_key = f->mt[0].key;
+ prev_r = 0;
+ for (r = 1; r < f->rules + 1; r++) {
+ if (r < f->rules && f->mt[r].key == prev_key)
+ continue;
+
+ if (prev_r != r - 1)
+ fprintf(stdout, "( %i-%i => ", prev_r, r - 1);
+ else
+ fprintf(stdout, "( %i => ", r - 1);
+
+ if (to_key) {
+ fprintf(stdout, "%u ) ", prev_key);
+ } else {
+ if (f->mt[r - 1].n > 1) {
+ fprintf(stdout, "%i-%i ) ", f->mt[r - 1].to,
+ f->mt[r - 1].to + f->mt[r - 1].n - 1);
+ } else {
+ fprintf(stdout, "%i ) ", f->mt[r - 1].to);
+ }
+ }
+
+ if (r < f->rules) {
+ prev_key = f->mt[r].key;
+ prev_r = r;
+ }
+ }
+
+ fprintf(stdout, "\n");
+}
+#endif