summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorPhil Sutter <phil@nwl.cc>2018-01-10 21:32:04 +0100
committerPablo Neira Ayuso <pablo@netfilter.org>2018-01-11 13:20:38 +0100
commit9a4b513014cfdeaad6d247b72a7924b3a536cfe9 (patch)
tree4bf67caa47b85400ec036a2562e17f439c3007e4 /src
parent0b3ccd27e12d1df442aa3eac40a2ccb63d6c6407 (diff)
src: Don't merge adjacent/overlapping ranges
Previously, when adding multiple ranges to a set they were merged if overlapping or adjacent. This might cause inconvenience though since it is afterwards not easily possible anymore to remove one of the merged ranges again while keeping the others in place. Since it is not possible to have overlapping ranges, this patch adds a check for newly added ranges to make sure they don't overlap if merging is turned off. Note that it is not possible (yet?) to enable range merging using nft tool. Testsuite had to be adjusted as well: One test in tests/py changed avoid adding overlapping ranges and the test in tests/shell which explicitly tests for this feature dropped. Signed-off-by: Phil Sutter <phil@nwl.cc> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'src')
-rw-r--r--src/libnftables.c1
-rw-r--r--src/rule.c8
-rw-r--r--src/segtree.c38
3 files changed, 38 insertions, 9 deletions
diff --git a/src/libnftables.c b/src/libnftables.c
index c86d8947..8a18bb78 100644
--- a/src/libnftables.c
+++ b/src/libnftables.c
@@ -43,6 +43,7 @@ static int nft_netlink(struct nft_ctx *nft,
ctx.nf_sock = nf_sock;
ctx.cache = &nft->cache;
ctx.debug_mask = nft->debug_mask;
+ ctx.range_merge = nft->range_merge;
init_list_head(&ctx.list);
ret = do_command(&ctx, cmd);
if (ret < 0)
diff --git a/src/rule.c b/src/rule.c
index bb9add07..edd0ff6f 100644
--- a/src/rule.c
+++ b/src/rule.c
@@ -997,7 +997,8 @@ static int do_add_setelems(struct netlink_ctx *ctx, const struct handle *h,
set = set_lookup(table, h->set);
if (set->flags & NFT_SET_INTERVAL &&
- set_to_intervals(ctx->msgs, set, init, true, ctx->debug_mask) < 0)
+ set_to_intervals(ctx->msgs, set, init, true,
+ ctx->debug_mask, ctx->range_merge) < 0)
return -1;
return __do_add_setelems(ctx, h, set, init, flags);
@@ -1009,7 +1010,7 @@ static int do_add_set(struct netlink_ctx *ctx, const struct handle *h,
if (set->init != NULL) {
if (set->flags & NFT_SET_INTERVAL &&
set_to_intervals(ctx->msgs, set, set->init, true,
- ctx->debug_mask) < 0)
+ ctx->debug_mask, ctx->range_merge) < 0)
return -1;
}
if (netlink_add_set(ctx, h, set, flags) < 0)
@@ -1108,7 +1109,8 @@ static int do_delete_setelems(struct netlink_ctx *ctx, const struct handle *h,
set = set_lookup(table, h->set);
if (set->flags & NFT_SET_INTERVAL &&
- set_to_intervals(ctx->msgs, set, expr, false, ctx->debug_mask) < 0)
+ set_to_intervals(ctx->msgs, set, expr, false,
+ ctx->debug_mask, ctx->range_merge) < 0)
return -1;
if (netlink_delete_setelems(ctx, h, expr) < 0)
diff --git a/src/segtree.c b/src/segtree.c
index 8d36cc9b..d2c4efaa 100644
--- a/src/segtree.c
+++ b/src/segtree.c
@@ -375,8 +375,26 @@ static int set_overlap(struct list_head *msgs, const struct set *set,
return 0;
}
+static int intervals_overlap(struct list_head *msgs,
+ struct elementary_interval **intervals,
+ unsigned int keylen)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < keylen - 1; i++) {
+ for (j = i + 1; j < keylen; j++) {
+ if (interval_overlap(intervals[i], intervals[j]))
+ return expr_error(msgs, intervals[j]->expr,
+ "interval overlaps with previous one");
+ }
+ }
+
+ return 0;
+}
+
static int set_to_segtree(struct list_head *msgs, struct set *set,
- struct expr *init, struct seg_tree *tree, bool add)
+ struct expr *init, struct seg_tree *tree,
+ bool add, bool merge)
{
struct elementary_interval *intervals[init->size];
struct expr *i, *next;
@@ -394,6 +412,12 @@ static int set_to_segtree(struct list_head *msgs, struct set *set,
n = expr_to_intervals(init, tree->keylen, intervals);
+ if (add && !merge) {
+ err = intervals_overlap(msgs, intervals, n);
+ if (err < 0)
+ return err;
+ }
+
list_for_each_entry_safe(i, next, &init->expressions, list) {
list_del(&i->list);
expr_free(i);
@@ -450,7 +474,7 @@ static bool segtree_needs_first_segment(const struct set *set,
static void segtree_linearize(struct list_head *list, const struct set *set,
const struct expr *init, struct seg_tree *tree,
- bool add)
+ bool add, bool merge)
{
bool needs_first_segment = segtree_needs_first_segment(set, init, add);
struct elementary_interval *ei, *nei, *prev = NULL;
@@ -489,7 +513,8 @@ static void segtree_linearize(struct list_head *list, const struct set *set,
mpz_sub_ui(q, ei->left, 1);
nei = ei_alloc(p, q, NULL, EI_F_INTERVAL_END);
list_add_tail(&nei->list, list);
- } else if (add && ei->expr->ops->type != EXPR_MAPPING) {
+ } else if (add && merge &&
+ ei->expr->ops->type != EXPR_MAPPING) {
/* Merge contiguous segments only in case of
* new additions.
*/
@@ -550,16 +575,17 @@ static void set_insert_interval(struct expr *set, struct seg_tree *tree,
}
int set_to_intervals(struct list_head *errs, struct set *set,
- struct expr *init, bool add, unsigned int debug_mask)
+ struct expr *init, bool add, unsigned int debug_mask,
+ bool merge)
{
struct elementary_interval *ei, *next;
struct seg_tree tree;
LIST_HEAD(list);
seg_tree_init(&tree, set, init, debug_mask);
- if (set_to_segtree(errs, set, init, &tree, add) < 0)
+ if (set_to_segtree(errs, set, init, &tree, add, merge) < 0)
return -1;
- segtree_linearize(&list, set, init, &tree, add);
+ segtree_linearize(&list, set, init, &tree, add, merge);
init->size = 0;
list_for_each_entry_safe(ei, next, &list, list) {