From 62a3d29539aa109fed1c8a20d63ef95948b13842 Mon Sep 17 00:00:00 2001 From: Jozsef Kadlecsik Date: Mon, 23 Aug 2010 16:48:14 +0200 Subject: Cleanup, compatibility - Use is_vmalloc_addr when freeing vmalloc or kmalloc-ed areas. Thus we can get rid of a flag and simplify some functions. - When checking "same" sets, ignore hash size, because resizing changes it. - 2.6.35 compatibility added. - Discuss backward/forward compatibilities in the README file. --- kernel/include/linux/netfilter/ip_set.h | 28 ++++++++++----------------- kernel/include/linux/netfilter/ip_set_chash.h | 24 +++++++++-------------- 2 files changed, 19 insertions(+), 33 deletions(-) (limited to 'kernel/include/linux') diff --git a/kernel/include/linux/netfilter/ip_set.h b/kernel/include/linux/netfilter/ip_set.h index b83454a..1c41396 100644 --- a/kernel/include/linux/netfilter/ip_set.h +++ b/kernel/include/linux/netfilter/ip_set.h @@ -245,13 +245,6 @@ struct ip_set_type_variant { bool (*same_set)(const struct ip_set *a, const struct ip_set *b); }; -/* Flags for the set type variants */ -enum ip_set_type_flags { - /* Set members created by kmalloc */ - IP_SET_FLAG_KMALLOC_BIT = 0, - IP_SET_FLAG_KMALLOC = (1 << IP_SET_FLAG_KMALLOC_BIT), -}; - /* The core set type structure */ struct ip_set_type { struct list_head list; @@ -294,8 +287,6 @@ struct ip_set { const struct ip_set_type_variant *variant; /* The actual INET family */ u8 family; - /* Set type flags, filled/modified by create/resize */ - u8 flags; /* The type specific data */ void *data; }; @@ -318,12 +309,14 @@ extern int ip_set_test(ip_set_id_t id, const struct sk_buff *skb, /* Allocate members */ static inline void * -ip_set_alloc(size_t size, gfp_t gfp_mask, u8 *flags) +ip_set_alloc(size_t size, gfp_t gfp_mask) { - void *members = kzalloc(size, gfp_mask | __GFP_NOWARN); + void *members = NULL; + + if (size < KMALLOC_MAX_SIZE) + members = kzalloc(size, gfp_mask | __GFP_NOWARN); if (members) { - *flags |= IP_SET_FLAG_KMALLOC; pr_debug("%p: allocated with kmalloc", members); return members; } @@ -331,21 +324,20 @@ ip_set_alloc(size_t size, gfp_t gfp_mask, u8 *flags) members = __vmalloc(size, gfp_mask | __GFP_ZERO, PAGE_KERNEL); if (!members) return NULL; - *flags &= ~IP_SET_FLAG_KMALLOC; pr_debug("%p: allocated with vmalloc", members); return members; } static inline void -ip_set_free(void *members, u8 flags) +ip_set_free(void *members) { pr_debug("%p: free with %s", members, - flags & IP_SET_FLAG_KMALLOC ? "kmalloc" : "vmalloc"); - if (flags & IP_SET_FLAG_KMALLOC) - kfree(members); - else + is_vmalloc_addr(members) ? "vfree" : "kfree"); + if (is_vmalloc_addr(members)) vfree(members); + else + kfree(members); } static inline bool diff --git a/kernel/include/linux/netfilter/ip_set_chash.h b/kernel/include/linux/netfilter/ip_set_chash.h index e0e16bd..5e615e4 100644 --- a/kernel/include/linux/netfilter/ip_set_chash.h +++ b/kernel/include/linux/netfilter/ip_set_chash.h @@ -86,7 +86,7 @@ del_cidr(struct chash_nets *nets, u8 host_mask, u8 cidr) } static void -chash_destroy(struct slist *t, u8 htable_bits, u8 flags) +chash_destroy(struct slist *t, u8 htable_bits) { struct slist *n, *tmp; u32 i; @@ -96,7 +96,7 @@ chash_destroy(struct slist *t, u8 htable_bits, u8 flags) /* FIXME: slab cache */ kfree(n); - ip_set_free(t, flags); + ip_set_free(t); } static size_t @@ -146,7 +146,7 @@ ip_set_hash_destroy(struct ip_set *set) if (with_timeout(h->timeout)) del_timer_sync(&h->gc); - chash_destroy(h->htable, h->htable_bits, set->flags); + chash_destroy(h->htable, h->htable_bits); kfree(h); set->data = NULL; @@ -296,7 +296,6 @@ type_pf_resize(struct ip_set *set, gfp_t gfp_flags, bool retried) struct slist *t, *n; const struct type_pf_elem *data; u32 i, j; - u8 oflags, flags; int ret; retry: @@ -306,12 +305,11 @@ retry: /* In case we have plenty of memory :-) */ return -IPSET_ERR_HASH_FULL; t = ip_set_alloc(jhash_size(htable_bits) * sizeof(struct slist), - gfp_flags, &flags); + gfp_flags); if (!t) return -ENOMEM; write_lock_bh(&set->lock); - flags = oflags = set->flags; for (i = 0; i < jhash_size(h->htable_bits); i++) { next_slot: slist_for_each(n, &h->htable[i]) { @@ -325,7 +323,7 @@ next_slot: data, gfp_flags); if (ret < 0) { write_unlock_bh(&set->lock); - chash_destroy(t, htable_bits, flags); + chash_destroy(t, htable_bits); if (ret == -EAGAIN) goto retry; return ret; @@ -339,10 +337,9 @@ next_slot: h->htable = t; h->htable_bits = htable_bits; - set->flags = flags; write_unlock_bh(&set->lock); - chash_destroy(n, i, oflags); + chash_destroy(n, i); return 0; } @@ -750,7 +747,6 @@ type_pf_tresize(struct ip_set *set, gfp_t gfp_flags, bool retried) struct slist *t, *n; const struct type_pf_elem *data; u32 i, j; - u8 oflags, flags; int ret; /* Try to cleanup once */ @@ -770,12 +766,11 @@ retry: /* In case we have plenty of memory :-) */ return -IPSET_ERR_HASH_FULL; t = ip_set_alloc(jhash_size(htable_bits) * sizeof(struct slist), - gfp_flags, &flags); + gfp_flags); if (!t) return -ENOMEM; write_lock_bh(&set->lock); - flags = oflags = set->flags; for (i = 0; i < jhash_size(h->htable_bits); i++) { next_slot: slist_for_each(n, &h->htable[i]) { @@ -790,7 +785,7 @@ next_slot: type_pf_data_timeout(data)); if (ret < 0) { write_unlock_bh(&set->lock); - chash_destroy(t, htable_bits, flags); + chash_destroy(t, htable_bits); if (ret == -EAGAIN) goto retry; return ret; @@ -804,10 +799,9 @@ next_slot: h->htable = t; h->htable_bits = htable_bits; - set->flags = flags; write_unlock_bh(&set->lock); - chash_destroy(n, i, oflags); + chash_destroy(n, i); return 0; } -- cgit v1.2.3