summaryrefslogtreecommitdiffstats
path: root/kernel/ip_set_hash_net.c
diff options
context:
space:
mode:
authorJozsef Kadlecsik <kadlec@blackhole.kfki.hu>2010-12-10 13:54:51 +0100
committerJozsef Kadlecsik <kadlec@blackhole.kfki.hu>2010-12-10 13:54:51 +0100
commit5a5c5896d89670a853c5b6557c1480ac0e239f86 (patch)
tree8f0074dd5e986d2c1813e410bfaa278e19bda1ac /kernel/ip_set_hash_net.c
parent4150bd9b04e91e6e5b24b918f5d1c4c338a0755c (diff)
Convert hash types from chash to ahash.
Instead of the cache friendly hashing, use the array based hashing. According to my tests the latter uses less memory, faster at lookup and deletion, and only slower at insertion.
Diffstat (limited to 'kernel/ip_set_hash_net.c')
-rw-r--r--kernel/ip_set_hash_net.c28
1 files changed, 12 insertions, 16 deletions
diff --git a/kernel/ip_set_hash_net.c b/kernel/ip_set_hash_net.c
index d252760..97cdb83 100644
--- a/kernel/ip_set_hash_net.c
+++ b/kernel/ip_set_hash_net.c
@@ -134,13 +134,13 @@ nla_put_failure:
#define PF 4
#define HOST_MASK 32
-#include <linux/netfilter/ipset/ip_set_chash.h>
+#include <linux/netfilter/ipset/ip_set_ahash.h>
static int
hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb,
enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
{
- struct chash *h = set->data;
+ struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_net4_elem data = { .cidr = h->nets[0].cidr || HOST_MASK };
@@ -166,7 +166,7 @@ static int
hash_net4_uadt(struct ip_set *set, struct nlattr *head, int len,
enum ipset_adt adt, u32 *lineno, u32 flags)
{
- struct chash *h = set->data;
+ struct ip_set_hash *h = set->data;
struct nlattr *tb[IPSET_ATTR_ADT_MAX+1];
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_net4_elem data = { .cidr = HOST_MASK };
@@ -206,14 +206,12 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *head, int len,
static bool
hash_net_same_set(const struct ip_set *a, const struct ip_set *b)
{
- struct chash *x = a->data;
- struct chash *y = b->data;
+ struct ip_set_hash *x = a->data;
+ struct ip_set_hash *y = b->data;
/* Resizing changes htable_bits, so we ignore it */
return x->maxelem == y->maxelem
- && x->timeout == y->timeout
- && x->array_size == y->array_size
- && x->chain_limit == y->chain_limit;
+ && x->timeout == y->timeout;
}
/* The type variant functions: IPv6 */
@@ -319,13 +317,13 @@ nla_put_failure:
#define PF 6
#define HOST_MASK 128
-#include <linux/netfilter/ipset/ip_set_chash.h>
+#include <linux/netfilter/ipset/ip_set_ahash.h>
static int
hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb,
enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
{
- struct chash *h = set->data;
+ struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_net6_elem data = { .cidr = h->nets[0].cidr || HOST_MASK };
@@ -344,7 +342,7 @@ static int
hash_net6_uadt(struct ip_set *set, struct nlattr *head, int len,
enum ipset_adt adt, u32 *lineno, u32 flags)
{
- struct chash *h = set->data;
+ struct ip_set_hash *h = set->data;
struct nlattr *tb[IPSET_ATTR_ADT_MAX+1];
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_net6_elem data = { .cidr = HOST_MASK };
@@ -397,7 +395,7 @@ hash_net_create(struct ip_set *set, struct nlattr *head, int len, u32 flags)
{
struct nlattr *tb[IPSET_ATTR_CREATE_MAX+1];
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
- struct chash *h;
+ struct ip_set_hash *h;
u8 hbits;
if (!(set->family == AF_INET || set->family == AF_INET6))
@@ -417,21 +415,19 @@ hash_net_create(struct ip_set *set, struct nlattr *head, int len, u32 flags)
maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
h = kzalloc(sizeof(*h)
- + sizeof(struct chash_nets)
+ + sizeof(struct ip_set_hash_nets)
* (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
if (!h)
return -ENOMEM;
h->maxelem = maxelem;
- h->array_size = CHASH_DEFAULT_ARRAY_SIZE;
- h->chain_limit = CHASH_DEFAULT_CHAIN_LIMIT;
get_random_bytes(&h->initval, sizeof(h->initval));
h->timeout = IPSET_NO_TIMEOUT;
hbits = htable_bits(hashsize);
h->table = ip_set_alloc(
sizeof(struct htable)
- + jhash_size(hbits) * sizeof(struct slist),
+ + jhash_size(hbits) * sizeof(struct hbucket),
GFP_KERNEL);
if (!h->table) {
kfree(h);