summaryrefslogtreecommitdiffstats
path: root/src/segtree.c
diff options
context:
space:
mode:
authorPablo Neira Ayuso <pablo@netfilter.org>2021-05-10 18:52:45 +0200
committerPablo Neira Ayuso <pablo@netfilter.org>2021-05-11 21:39:01 +0200
commit419d196886889e9b37f76f8c803cb08dcbc05505 (patch)
treeeed1e42837a4a55d8cf16e65914b9ae961adddcd /src/segtree.c
parent62b02808594d962f83e8b76f4da32da0673c7cfe (diff)
src: add set element catch-all support
Add a catchall expression (EXPR_SET_ELEM_CATCHALL). Use the asterisk (*) to represent the catch-all set element, e.g. table x { set y { type ipv4_addr counter elements = { 1.2.3.4 counter packets 0 bytes 0, * counter packets 0 bytes 0 } } } Special handling for segtree: zap the catch-all element from the set element list and re-add it after processing. Remove wildcard_expr deadcode in src/parser_bison.y This patch also adds several tests for the tests/py and tests/shell infrastructures. Acked-by: Phil Sutter <phil@nwl.cc> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'src/segtree.c')
-rw-r--r--src/segtree.c41
1 files changed, 40 insertions, 1 deletions
diff --git a/src/segtree.c b/src/segtree.c
index 353a0053..a4e047e7 100644
--- a/src/segtree.c
+++ b/src/segtree.c
@@ -618,10 +618,27 @@ int set_to_intervals(struct list_head *errs, struct set *set,
struct expr *init, bool add, unsigned int debug_mask,
bool merge, struct output_ctx *octx)
{
+ struct expr *catchall = NULL, *i, *in, *key;
struct elementary_interval *ei, *next;
struct seg_tree tree;
LIST_HEAD(list);
+ list_for_each_entry_safe(i, in, &init->expressions, list) {
+ if (i->etype == EXPR_MAPPING)
+ key = i->left->key;
+ else if (i->etype == EXPR_SET_ELEM)
+ key = i->key;
+ else
+ continue;
+
+ if (key->etype == EXPR_SET_ELEM_CATCHALL) {
+ init->size--;
+ catchall = i;
+ list_del(&i->list);
+ break;
+ }
+ }
+
seg_tree_init(&tree, set, init, debug_mask);
if (set_to_segtree(errs, set, init, &tree, add, merge) < 0)
return -1;
@@ -643,6 +660,11 @@ int set_to_intervals(struct list_head *errs, struct set *set,
pr_gmp_debug("\n");
}
+ if (catchall) {
+ list_add_tail(&catchall->list, &init->expressions);
+ init->size++;
+ }
+
return 0;
}
@@ -682,6 +704,9 @@ struct expr *get_set_intervals(const struct set *set, const struct expr *init)
i->flags |= EXPR_F_INTERVAL_END;
compound_expr_add(new_init, expr_clone(i));
break;
+ case EXPR_SET_ELEM_CATCHALL:
+ compound_expr_add(new_init, expr_clone(i));
+ break;
default:
range_expr_value_low(low, i);
set_elem_add(set, new_init, low, 0, i->byteorder);
@@ -941,8 +966,8 @@ next:
void interval_map_decompose(struct expr *set)
{
+ struct expr *i, *next, *low = NULL, *end, *catchall = NULL, *key;
struct expr **elements, **ranges;
- struct expr *i, *next, *low = NULL, *end;
unsigned int n, m, size;
mpz_t range, p;
bool interval;
@@ -959,6 +984,17 @@ void interval_map_decompose(struct expr *set)
/* Sort elements */
n = 0;
list_for_each_entry_safe(i, next, &set->expressions, list) {
+ key = NULL;
+ if (i->etype == EXPR_SET_ELEM)
+ key = i->key;
+ else if (i->etype == EXPR_MAPPING)
+ key = i->left->key;
+
+ if (key && key->etype == EXPR_SET_ELEM_CATCHALL) {
+ list_del(&i->list);
+ catchall = i;
+ continue;
+ }
compound_expr_remove(set, i);
elements[n++] = i;
}
@@ -1094,6 +1130,9 @@ void interval_map_decompose(struct expr *set)
compound_expr_add(set, i);
out:
+ if (catchall)
+ compound_expr_add(set, catchall);
+
mpz_clear(range);
mpz_clear(p);