summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBart De Schuymer <bdschuym@pandora.be>2002-06-01 19:23:47 +0000
committerBart De Schuymer <bdschuym@pandora.be>2002-06-01 19:23:47 +0000
commitd891e9e5bc309d5aeb2ab774c76b34a92085b3e7 (patch)
tree94b68fc1c01f90cad62a171c27007ff317031008
Initial revision
-rw-r--r--br-nf-bds/README11
-rw-r--r--br-nf-bds/linux/include/linux/netfilter.h189
-rw-r--r--br-nf-bds/linux/include/linux/netfilter_ipv4.h80
-rw-r--r--br-nf-bds/linux/include/linux/skbuff.h1152
-rw-r--r--br-nf-bds/linux/net/Config.in95
-rw-r--r--br-nf-bds/linux/net/bridge/Makefile18
-rw-r--r--br-nf-bds/linux/net/bridge/br.c89
-rw-r--r--br-nf-bds/linux/net/bridge/br_forward.c152
-rw-r--r--br-nf-bds/linux/net/bridge/br_input.c168
-rw-r--r--br-nf-bds/linux/net/bridge/br_netfilter.c567
-rw-r--r--br-nf-bds/linux/net/bridge/br_private.h212
-rw-r--r--br-nf-bds/linux/net/ipv4/ip_output.c1016
-rw-r--r--br-nf-bds/linux/net/ipv4/netfilter/ip_tables.c1811
-rw-r--r--br-nf-bds/linux/net/ipv4/netfilter/ipt_LOG.c363
-rw-r--r--br-nf-bds/patches/bridge-nf-0.0.7-bds-against-2.4.18.diff975
-rw-r--r--br-nf-bds/patches/bridge-nf-0.0.8-bds-against-2.4.18.diff983
-rw-r--r--kernel/README6
-rw-r--r--kernel/linux/include/linux/br_db.h53
-rw-r--r--kernel/linux/include/linux/if_bridge.h114
-rw-r--r--kernel/linux/include/linux/netfilter_bridge.h36
-rw-r--r--kernel/linux/include/linux/netfilter_bridge/ebt_arp.h26
-rw-r--r--kernel/linux/include/linux/netfilter_bridge/ebt_ip.h24
-rw-r--r--kernel/linux/include/linux/netfilter_bridge/ebt_log.h17
-rw-r--r--kernel/linux/include/linux/netfilter_bridge/ebt_nat.h13
-rw-r--r--kernel/linux/include/linux/netfilter_bridge/ebt_redirect.h11
-rw-r--r--kernel/linux/include/linux/netfilter_bridge/ebt_vlan.h18
-rw-r--r--kernel/linux/include/linux/netfilter_bridge/ebtables.h332
-rw-r--r--kernel/linux/net/Config.in98
-rw-r--r--kernel/linux/net/Makefile63
-rw-r--r--kernel/linux/net/bridge/br_input.c178
-rw-r--r--kernel/linux/net/bridge/br_private.h212
-rw-r--r--kernel/linux/net/bridge/netfilter/Config.in15
-rw-r--r--kernel/linux/net/bridge/netfilter/Makefile25
-rw-r--r--kernel/linux/net/bridge/netfilter/br_db.c357
-rw-r--r--kernel/linux/net/bridge/netfilter/ebt_arp.c107
-rw-r--r--kernel/linux/net/bridge/netfilter/ebt_ip.c81
-rw-r--r--kernel/linux/net/bridge/netfilter/ebt_log.c111
-rw-r--r--kernel/linux/net/bridge/netfilter/ebt_nat.c106
-rw-r--r--kernel/linux/net/bridge/netfilter/ebt_redirect.c65
-rw-r--r--kernel/linux/net/bridge/netfilter/ebt_vlan.c124
-rw-r--r--kernel/linux/net/bridge/netfilter/ebtable_broute.c80
-rw-r--r--kernel/linux/net/bridge/netfilter/ebtable_filter.c93
-rw-r--r--kernel/linux/net/bridge/netfilter/ebtable_nat.c156
-rw-r--r--kernel/linux/net/bridge/netfilter/ebtables.c1189
-rw-r--r--kernel/linux/net/netsyms.c595
-rw-r--r--kernel/patches/base-patches/ebtables-v2.0pre1_vs_2.4.18.diff2621
-rw-r--r--kernel/patches/base-patches/ebtables-v2.0pre2_vs_2.4.18.diff2752
-rw-r--r--kernel/patches/base-patches/ebtables-v2.0pre3_vs_2.4.18.diff3108
-rw-r--r--kernel/patches/base-patches/ebtables-v2.0pre4_vs_2.4.18.diff3122
-rw-r--r--kernel/patches/base-patches/ebtables-v2.0pre5_vs_2.4.18.diff3133
-rw-r--r--kernel/patches/base-patches/ebtables-v2.0pre6_vs_2.4.18.diff3135
-rw-r--r--kernel/patches/base-patches/ebtables-v2.0pre7_vs_2.4.18.diff3285
-rw-r--r--kernel/patches/incremental-patches/ebtables-v2.0_vs_2.4.18.pre2.001.diff11
-rw-r--r--kernel/patches/incremental-patches/ebtables-v2.0_vs_2.4.18.pre3.001.diff166
-rw-r--r--kernel/patches/incremental-patches/ebtables-v2.0_vs_2.4.18.pre3.002.diff66
-rw-r--r--kernel/patches/incremental-patches/ebtables-v2.0_vs_2.4.18.pre3.003.diff367
-rw-r--r--kernel/patches/incremental-patches/ebtables-v2.0_vs_2.4.18.pre3.004.diff252
-rw-r--r--kernel/patches/incremental-patches/ebtables-v2.0_vs_2.4.18.pre3.005.diff22
-rw-r--r--kernel/patches/incremental-patches/ebtables-v2.0_vs_2.4.18.pre4.001.diff14
-rw-r--r--kernel/patches/incremental-patches/ebtables-v2.0_vs_2.4.18.pre5.001.diff51
-rw-r--r--kernel/patches/incremental-patches/ebtables-v2.0_vs_2.4.18.pre6.001.diff12
-rw-r--r--kernel/patches/incremental-patches/ebtables-v2.0_vs_2.4.18.pre7.001.diff172
-rw-r--r--userspace/ebtables2/COPYING339
-rw-r--r--userspace/ebtables2/ChangeLog51
-rw-r--r--userspace/ebtables2/INSTALL27
-rw-r--r--userspace/ebtables2/Makefile58
-rw-r--r--userspace/ebtables2/THANKS9
-rw-r--r--userspace/ebtables2/communication.c454
-rw-r--r--userspace/ebtables2/ebtables.8434
-rw-r--r--userspace/ebtables2/ebtables.c1655
-rw-r--r--userspace/ebtables2/ethertypes34
-rw-r--r--userspace/ebtables2/extensions/Makefile12
-rw-r--r--userspace/ebtables2/extensions/ebt_arp.c289
-rw-r--r--userspace/ebtables2/extensions/ebt_ip.c318
-rw-r--r--userspace/ebtables2/extensions/ebt_log.c197
-rw-r--r--userspace/ebtables2/extensions/ebt_nat.c222
-rw-r--r--userspace/ebtables2/extensions/ebt_redirect.c109
-rw-r--r--userspace/ebtables2/extensions/ebt_standard.c70
-rw-r--r--userspace/ebtables2/extensions/ebt_vlan.c231
-rw-r--r--userspace/ebtables2/extensions/ebtable_broute.c25
-rw-r--r--userspace/ebtables2/extensions/ebtable_filter.c32
-rw-r--r--userspace/ebtables2/extensions/ebtable_nat.c32
-rw-r--r--userspace/ebtables2/include/ebtables_u.h206
-rw-r--r--userspace/patches/incremental-patches/ebtables-v2.0pre2.001.diff121
-rw-r--r--userspace/patches/incremental-patches/ebtables-v2.0pre2.002.diff2204
-rw-r--r--userspace/patches/incremental-patches/ebtables-v2.0pre2.003.diff40
-rw-r--r--userspace/patches/incremental-patches/ebtables-v2.0pre2.004.diff50
-rw-r--r--userspace/patches/incremental-patches/ebtables-v2.0pre3.001.diff245
-rw-r--r--userspace/patches/incremental-patches/ebtables-v2.0pre3.002.diff194
-rw-r--r--userspace/patches/incremental-patches/ebtables-v2.0pre3.003.diff66
-rw-r--r--userspace/patches/incremental-patches/ebtables-v2.0pre3.004.diff483
-rw-r--r--userspace/patches/incremental-patches/ebtables-v2.0pre4.001.diff522
-rw-r--r--userspace/patches/incremental-patches/ebtables-v2.0pre5.001.diff50
-rw-r--r--userspace/patches/incremental-patches/ebtables-v2.0pre5.002.diff61
-rw-r--r--userspace/patches/incremental-patches/ebtables-v2.0pre6.001.diff314
-rw-r--r--userspace/patches/zipped/ebtables-v2.0pre1.tar.gzbin0 -> 32529 bytes
-rw-r--r--userspace/patches/zipped/ebtables-v2.0pre2.tar.gzbin0 -> 49417 bytes
-rw-r--r--userspace/patches/zipped/ebtables-v2.0pre3.tar.gzbin0 -> 51235 bytes
-rw-r--r--userspace/patches/zipped/ebtables-v2.0pre4.tar.gzbin0 -> 52135 bytes
-rw-r--r--userspace/patches/zipped/ebtables-v2.0pre5.tar.gzbin0 -> 35558 bytes
-rw-r--r--userspace/patches/zipped/ebtables-v2.0pre6.tar.gzbin0 -> 36908 bytes
101 files changed, 43629 insertions, 0 deletions
diff --git a/br-nf-bds/README b/br-nf-bds/README
new file mode 100644
index 0000000..235524d
--- /dev/null
+++ b/br-nf-bds/README
@@ -0,0 +1,11 @@
+---
+These patches differ from Lennert's patches because I don't agree with
+Lennert's. Don't worry, mine are better ;)
+
+Date of first branch: April 27, 2002
+
+Changes in the policy will be mentioned in this file.
+
+Bart De Schuymer,
+June 1, 2002
+---
diff --git a/br-nf-bds/linux/include/linux/netfilter.h b/br-nf-bds/linux/include/linux/netfilter.h
new file mode 100644
index 0000000..eb1adc6
--- /dev/null
+++ b/br-nf-bds/linux/include/linux/netfilter.h
@@ -0,0 +1,189 @@
+#ifndef __LINUX_NETFILTER_H
+#define __LINUX_NETFILTER_H
+
+#ifdef __KERNEL__
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/net.h>
+#include <linux/if.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#endif
+
+/* Responses from hook functions. */
+#define NF_DROP 0
+#define NF_ACCEPT 1
+#define NF_STOLEN 2
+#define NF_QUEUE 3
+#define NF_REPEAT 4
+#define NF_MAX_VERDICT NF_REPEAT
+
+/* Generic cache responses from hook functions. */
+#define NFC_ALTERED 0x8000
+#define NFC_UNKNOWN 0x4000
+
+#ifdef __KERNEL__
+#include <linux/config.h>
+#ifdef CONFIG_NETFILTER
+
+extern void netfilter_init(void);
+
+/* Largest hook number + 1 */
+#define NF_MAX_HOOKS 8
+
+struct sk_buff;
+struct net_device;
+
+typedef unsigned int nf_hookfn(unsigned int hooknum,
+ struct sk_buff **skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *));
+
+struct nf_hook_ops
+{
+ struct list_head list;
+
+ /* User fills in from here down. */
+ nf_hookfn *hook;
+ int pf;
+ int hooknum;
+ /* Hooks are ordered in ascending priority. */
+ int priority;
+};
+
+struct nf_sockopt_ops
+{
+ struct list_head list;
+
+ int pf;
+
+ /* Non-inclusive ranges: use 0/0/NULL to never get called. */
+ int set_optmin;
+ int set_optmax;
+ int (*set)(struct sock *sk, int optval, void *user, unsigned int len);
+
+ int get_optmin;
+ int get_optmax;
+ int (*get)(struct sock *sk, int optval, void *user, int *len);
+
+ /* Number of users inside set() or get(). */
+ unsigned int use;
+ struct task_struct *cleanup_task;
+};
+
+/* Each queued (to userspace) skbuff has one of these. */
+struct nf_info
+{
+ /* The ops struct which sent us to userspace. */
+ struct nf_hook_ops *elem;
+
+ /* If we're sent to userspace, this keeps housekeeping info */
+ int pf;
+ unsigned int hook;
+ struct net_device *indev, *outdev;
+ int (*okfn)(struct sk_buff *);
+};
+
+/* Function to register/unregister hook points. */
+int nf_register_hook(struct nf_hook_ops *reg);
+void nf_unregister_hook(struct nf_hook_ops *reg);
+
+/* Functions to register get/setsockopt ranges (non-inclusive). You
+ need to check permissions yourself! */
+int nf_register_sockopt(struct nf_sockopt_ops *reg);
+void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
+
+extern struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS];
+
+/* Activate hook; either okfn or kfree_skb called, unless a hook
+ returns NF_STOLEN (in which case, it's up to the hook to deal with
+ the consequences).
+
+ Returns -ERRNO if packet dropped. Zero means queued, stolen or
+ accepted.
+*/
+
+/* RR:
+ > I don't want nf_hook to return anything because people might forget
+ > about async and trust the return value to mean "packet was ok".
+
+ AK:
+ Just document it clearly, then you can expect some sense from kernel
+ coders :)
+*/
+
+/* This is gross, but inline doesn't cut it for avoiding the function
+ call in fast path: gcc doesn't inline (needs value tracking?). --RR */
+#ifdef CONFIG_NETFILTER_DEBUG
+#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) \
+ nf_hook_slow((pf), (hook), (skb), (indev), (outdev), (okfn), INT_MIN)
+#define NF_HOOK_THRESH nf_hook_slow
+#else
+#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) \
+(list_empty(&nf_hooks[(pf)][(hook)]) \
+ ? (okfn)(skb) \
+ : nf_hook_slow((pf), (hook), (skb), (indev), (outdev), (okfn), INT_MIN))
+#define NF_HOOK_THRESH(pf, hook, skb, indev, outdev, okfn, thresh) \
+(list_empty(&nf_hooks[(pf)][(hook)]) \
+ ? (okfn)(skb) \
+ : nf_hook_slow((pf), (hook), (skb), (indev), (outdev), (okfn), (thresh)))
+#endif
+
+int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb,
+ struct net_device *indev, struct net_device *outdev,
+ int (*okfn)(struct sk_buff *), int thresh);
+
+/* Call setsockopt() */
+int nf_setsockopt(struct sock *sk, int pf, int optval, char *opt,
+ int len);
+int nf_getsockopt(struct sock *sk, int pf, int optval, char *opt,
+ int *len);
+
+/* Packet queuing */
+typedef int (*nf_queue_outfn_t)(struct sk_buff *skb,
+ struct nf_info *info, void *data);
+extern int nf_register_queue_handler(int pf,
+ nf_queue_outfn_t outfn, void *data);
+extern int nf_unregister_queue_handler(int pf);
+extern void nf_reinject(struct sk_buff *skb,
+ struct nf_info *info,
+ unsigned int verdict);
+
+extern void (*ip_ct_attach)(struct sk_buff *, struct nf_ct_info *);
+
+#ifdef CONFIG_NETFILTER_DEBUG
+extern void nf_dump_skb(int pf, struct sk_buff *skb);
+#endif
+
+/* FIXME: Before cache is ever used, this must be implemented for real. */
+extern void nf_invalidate_cache(int pf);
+
+#else /* !CONFIG_NETFILTER */
+#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb)
+#endif /*CONFIG_NETFILTER*/
+
+/* From arch/i386/kernel/smp.c:
+ *
+ * Why isn't this somewhere standard ??
+ *
+ * Maybe because this procedure is horribly buggy, and does
+ * not deserve to live. Think about signedness issues for five
+ * seconds to see why. - Linus
+ */
+
+/* Two signed, return a signed. */
+#define SMAX(a,b) ((ssize_t)(a)>(ssize_t)(b) ? (ssize_t)(a) : (ssize_t)(b))
+#define SMIN(a,b) ((ssize_t)(a)<(ssize_t)(b) ? (ssize_t)(a) : (ssize_t)(b))
+
+/* Two unsigned, return an unsigned. */
+#define UMAX(a,b) ((size_t)(a)>(size_t)(b) ? (size_t)(a) : (size_t)(b))
+#define UMIN(a,b) ((size_t)(a)<(size_t)(b) ? (size_t)(a) : (size_t)(b))
+
+/* Two unsigned, return a signed. */
+#define SUMAX(a,b) ((size_t)(a)>(size_t)(b) ? (ssize_t)(a) : (ssize_t)(b))
+#define SUMIN(a,b) ((size_t)(a)<(size_t)(b) ? (ssize_t)(a) : (ssize_t)(b))
+#endif /*__KERNEL__*/
+
+#endif /*__LINUX_NETFILTER_H*/
diff --git a/br-nf-bds/linux/include/linux/netfilter_ipv4.h b/br-nf-bds/linux/include/linux/netfilter_ipv4.h
new file mode 100644
index 0000000..946190a
--- /dev/null
+++ b/br-nf-bds/linux/include/linux/netfilter_ipv4.h
@@ -0,0 +1,80 @@
+#ifndef __LINUX_IP_NETFILTER_H
+#define __LINUX_IP_NETFILTER_H
+
+/* IPv4-specific defines for netfilter.
+ * (C)1998 Rusty Russell -- This code is GPL.
+ */
+
+#include <linux/config.h>
+#include <linux/netfilter.h>
+
+/* IP Cache bits. */
+/* Src IP address. */
+#define NFC_IP_SRC 0x0001
+/* Dest IP address. */
+#define NFC_IP_DST 0x0002
+/* Input device. */
+#define NFC_IP_IF_IN 0x0004
+/* Output device. */
+#define NFC_IP_IF_OUT 0x0008
+/* TOS. */
+#define NFC_IP_TOS 0x0010
+/* Protocol. */
+#define NFC_IP_PROTO 0x0020
+/* IP options. */
+#define NFC_IP_OPTIONS 0x0040
+/* Frag & flags. */
+#define NFC_IP_FRAG 0x0080
+
+/* Per-protocol information: only matters if proto match. */
+/* TCP flags. */
+#define NFC_IP_TCPFLAGS 0x0100
+/* Source port. */
+#define NFC_IP_SRC_PT 0x0200
+/* Dest port. */
+#define NFC_IP_DST_PT 0x0400
+/* Something else about the proto */
+#define NFC_IP_PROTO_UNKNOWN 0x2000
+
+/* IP Hooks */
+/* After promisc drops, checksum checks. */
+#define NF_IP_PRE_ROUTING 0
+/* If the packet is destined for this box. */
+#define NF_IP_LOCAL_IN 1
+/* If the packet is destined for another interface. */
+#define NF_IP_FORWARD 2
+/* Packets coming from a local process. */
+#define NF_IP_LOCAL_OUT 3
+/* Packets about to hit the wire. */
+#define NF_IP_POST_ROUTING 4
+#define NF_IP_NUMHOOKS 5
+
+enum nf_ip_hook_priorities {
+ NF_IP_PRI_FIRST = INT_MIN,
+ NF_IP_PRI_CONNTRACK = -200,
+ NF_IP_PRI_BRIDGE_SABOTAGE_FORWARD = -175,
+ NF_IP_PRI_MANGLE = -150,
+ NF_IP_PRI_NAT_DST = -100,
+ NF_IP_PRI_BRIDGE_SABOTAGE = -50,
+ NF_IP_PRI_FILTER = 0,
+ NF_IP_PRI_NAT_SRC = 100,
+ NF_IP_PRI_LAST = INT_MAX,
+};
+
+/* Arguments for setsockopt SOL_IP: */
+/* 2.0 firewalling went from 64 through 71 (and +256, +512, etc). */
+/* 2.2 firewalling (+ masq) went from 64 through 76 */
+/* 2.4 firewalling went 64 through 67. */
+#define SO_ORIGINAL_DST 80
+
+#ifdef __KERNEL__
+#ifdef CONFIG_NETFILTER_DEBUG
+void nf_debug_ip_local_deliver(struct sk_buff *skb);
+void nf_debug_ip_loopback_xmit(struct sk_buff *newskb);
+void nf_debug_ip_finish_output2(struct sk_buff *skb);
+#endif /*CONFIG_NETFILTER_DEBUG*/
+
+extern int ip_route_me_harder(struct sk_buff **pskb);
+#endif /*__KERNEL__*/
+
+#endif /*__LINUX_IP_NETFILTER_H*/
diff --git a/br-nf-bds/linux/include/linux/skbuff.h b/br-nf-bds/linux/include/linux/skbuff.h
new file mode 100644
index 0000000..257b586
--- /dev/null
+++ b/br-nf-bds/linux/include/linux/skbuff.h
@@ -0,0 +1,1152 @@
+/*
+ * Definitions for the 'struct sk_buff' memory handlers.
+ *
+ * Authors:
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ * Florian La Roche, <rzsfl@rz.uni-sb.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_SKBUFF_H
+#define _LINUX_SKBUFF_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/cache.h>
+
+#include <asm/atomic.h>
+#include <asm/types.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+
+#define HAVE_ALLOC_SKB /* For the drivers to know */
+#define HAVE_ALIGNABLE_SKB /* Ditto 8) */
+#define SLAB_SKB /* Slabified skbuffs */
+
+#define CHECKSUM_NONE 0
+#define CHECKSUM_HW 1
+#define CHECKSUM_UNNECESSARY 2
+
+#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES-1)) & ~(SMP_CACHE_BYTES-1))
+#define SKB_MAX_ORDER(X,ORDER) (((PAGE_SIZE<<(ORDER)) - (X) - sizeof(struct skb_shared_info))&~(SMP_CACHE_BYTES-1))
+#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X),0))
+#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0,2))
+
+/* A. Checksumming of received packets by device.
+ *
+ * NONE: device failed to checksum this packet.
+ * skb->csum is undefined.
+ *
+ * UNNECESSARY: device parsed packet and wouldbe verified checksum.
+ * skb->csum is undefined.
+ * It is bad option, but, unfortunately, many of vendors do this.
+ * Apparently with secret goal to sell you new device, when you
+ * will add new protocol to your host. F.e. IPv6. 8)
+ *
+ * HW: the most generic way. Device supplied checksum of _all_
+ * the packet as seen by netif_rx in skb->csum.
+ * NOTE: Even if device supports only some protocols, but
+ * is able to produce some skb->csum, it MUST use HW,
+ * not UNNECESSARY.
+ *
+ * B. Checksumming on output.
+ *
+ * NONE: skb is checksummed by protocol or csum is not required.
+ *
+ * HW: device is required to csum packet as seen by hard_start_xmit
+ * from skb->h.raw to the end and to record the checksum
+ * at skb->h.raw+skb->csum.
+ *
+ * Device must show its capabilities in dev->features, set
+ * at device setup time.
+ * NETIF_F_HW_CSUM - it is clever device, it is able to checksum
+ * everything.
+ * NETIF_F_NO_CSUM - loopback or reliable single hop media.
+ * NETIF_F_IP_CSUM - device is dumb. It is able to csum only
+ * TCP/UDP over IPv4. Sigh. Vendors like this
+ * way by an unknown reason. Though, see comment above
+ * about CHECKSUM_UNNECESSARY. 8)
+ *
+ * Any questions? No questions, good. --ANK
+ */
+
+#ifdef __i386__
+#define NET_CALLER(arg) (*(((void**)&arg)-1))
+#else
+#define NET_CALLER(arg) __builtin_return_address(0)
+#endif
+
+#ifdef CONFIG_NETFILTER
+struct nf_conntrack {
+ atomic_t use;
+ void (*destroy)(struct nf_conntrack *);
+};
+
+struct nf_ct_info {
+ struct nf_conntrack *master;
+};
+#endif
+
+struct sk_buff_head {
+ /* These two members must be first. */
+ struct sk_buff * next;
+ struct sk_buff * prev;
+
+ __u32 qlen;
+ spinlock_t lock;
+};
+
+struct sk_buff;
+
+#define MAX_SKB_FRAGS 6
+
+typedef struct skb_frag_struct skb_frag_t;
+
+struct skb_frag_struct
+{
+ struct page *page;
+ __u16 page_offset;
+ __u16 size;
+};
+
+/* This data is invariant across clones and lives at
+ * the end of the header data, ie. at skb->end.
+ */
+struct skb_shared_info {
+ atomic_t dataref;
+ unsigned int nr_frags;
+ struct sk_buff *frag_list;
+ skb_frag_t frags[MAX_SKB_FRAGS];
+};
+
+struct sk_buff {
+ /* These two members must be first. */
+ struct sk_buff * next; /* Next buffer in list */
+ struct sk_buff * prev; /* Previous buffer in list */
+
+ struct sk_buff_head * list; /* List we are on */
+ struct sock *sk; /* Socket we are owned by */
+ struct timeval stamp; /* Time we arrived */
+ struct net_device *dev; /* Device we arrived on/are leaving by */
+ struct net_device *physindev; /* Physical device we arrived on */
+ struct net_device *physoutdev; /* Physical device we will leave by */
+
+ /* Transport layer header */
+ union
+ {
+ struct tcphdr *th;
+ struct udphdr *uh;
+ struct icmphdr *icmph;
+ struct igmphdr *igmph;
+ struct iphdr *ipiph;
+ struct spxhdr *spxh;
+ unsigned char *raw;
+ } h;
+
+ /* Network layer header */
+ union
+ {
+ struct iphdr *iph;
+ struct ipv6hdr *ipv6h;
+ struct arphdr *arph;
+ struct ipxhdr *ipxh;
+ unsigned char *raw;
+ } nh;
+
+ /* Link layer header */
+ union
+ {
+ struct ethhdr *ethernet;
+ unsigned char *raw;
+ } mac;
+
+ struct dst_entry *dst;
+
+ /*
+ * This is the control buffer. It is free to use for every
+ * layer. Please put your private variables there. If you
+ * want to keep them across layers you have to do a skb_clone()
+ * first. This is owned by whoever has the skb queued ATM.
+ */
+ char cb[48];
+
+ unsigned int len; /* Length of actual data */
+ unsigned int data_len;
+ unsigned int csum; /* Checksum */
+ unsigned char __unused, /* Dead field, may be reused */
+ cloned, /* head may be cloned (check refcnt to be sure). */
+ pkt_type, /* Packet class */
+ ip_summed; /* Driver fed us an IP checksum */
+ __u32 priority; /* Packet queueing priority */
+ atomic_t users; /* User count - see datagram.c,tcp.c */
+ unsigned short protocol; /* Packet protocol from driver. */
+ unsigned short security; /* Security level of packet */
+ unsigned int truesize; /* Buffer size */
+
+ unsigned char *head; /* Head of buffer */
+ unsigned char *data; /* Data head pointer */
+ unsigned char *tail; /* Tail pointer */
+ unsigned char *end; /* End pointer */
+
+ void (*destructor)(struct sk_buff *); /* Destruct function */
+#ifdef CONFIG_NETFILTER
+ /* Can be used for communication between hooks. */
+ unsigned long nfmark;
+ /* Cache info */
+ __u32 nfcache;
+ /* Associated connection, if any */
+ struct nf_ct_info *nfct;
+#ifdef CONFIG_NETFILTER_DEBUG
+ unsigned int nf_debug;
+#endif
+#endif /*CONFIG_NETFILTER*/
+
+#if defined(CONFIG_HIPPI)
+ union{
+ __u32 ifield;
+ } private;
+#endif
+
+#ifdef CONFIG_NET_SCHED
+ __u32 tc_index; /* traffic control index */
+#endif
+};
+
+#define SK_WMEM_MAX 65535
+#define SK_RMEM_MAX 65535
+
+#ifdef __KERNEL__
+/*
+ * Handling routines are only of interest to the kernel
+ */
+#include <linux/slab.h>
+
+#include <asm/system.h>
+
+extern void __kfree_skb(struct sk_buff *skb);
+extern struct sk_buff * alloc_skb(unsigned int size, int priority);
+extern void kfree_skbmem(struct sk_buff *skb);
+extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority);
+extern struct sk_buff * skb_copy(const struct sk_buff *skb, int priority);
+extern struct sk_buff * pskb_copy(struct sk_buff *skb, int gfp_mask);
+extern int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask);
+extern struct sk_buff * skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom);
+extern struct sk_buff * skb_copy_expand(const struct sk_buff *skb,
+ int newheadroom,
+ int newtailroom,
+ int priority);
+#define dev_kfree_skb(a) kfree_skb(a)
+extern void skb_over_panic(struct sk_buff *skb, int len, void *here);
+extern void skb_under_panic(struct sk_buff *skb, int len, void *here);
+
+/* Internal */
+#define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end))
+
+/**
+ * skb_queue_empty - check if a queue is empty
+ * @list: queue head
+ *
+ * Returns true if the queue is empty, false otherwise.
+ */
+
+static inline int skb_queue_empty(struct sk_buff_head *list)
+{
+ return (list->next == (struct sk_buff *) list);
+}
+
+/**
+ * skb_get - reference buffer
+ * @skb: buffer to reference
+ *
+ * Makes another reference to a socket buffer and returns a pointer
+ * to the buffer.
+ */
+
+static inline struct sk_buff *skb_get(struct sk_buff *skb)
+{
+ atomic_inc(&skb->users);
+ return skb;
+}
+
+/*
+ * If users==1, we are the only owner and are can avoid redundant
+ * atomic change.
+ */
+
+/**
+ * kfree_skb - free an sk_buff
+ * @skb: buffer to free
+ *
+ * Drop a reference to the buffer and free it if the usage count has
+ * hit zero.
+ */
+
+static inline void kfree_skb(struct sk_buff *skb)
+{
+ if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
+ __kfree_skb(skb);
+}
+
+/* Use this if you didn't touch the skb state [for fast switching] */
+static inline void kfree_skb_fast(struct sk_buff *skb)
+{
+ if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
+ kfree_skbmem(skb);
+}
+
+/**
+ * skb_cloned - is the buffer a clone
+ * @skb: buffer to check
+ *
+ * Returns true if the buffer was generated with skb_clone() and is
+ * one of multiple shared copies of the buffer. Cloned buffers are
+ * shared data so must not be written to under normal circumstances.
+ */
+
+static inline int skb_cloned(struct sk_buff *skb)
+{
+ return skb->cloned && atomic_read(&skb_shinfo(skb)->dataref) != 1;
+}
+
+/**
+ * skb_shared - is the buffer shared
+ * @skb: buffer to check
+ *
+ * Returns true if more than one person has a reference to this
+ * buffer.
+ */
+
+static inline int skb_shared(struct sk_buff *skb)
+{
+ return (atomic_read(&skb->users) != 1);
+}
+
+/**
+ * skb_share_check - check if buffer is shared and if so clone it
+ * @skb: buffer to check
+ * @pri: priority for memory allocation
+ *
+ * If the buffer is shared the buffer is cloned and the old copy
+ * drops a reference. A new clone with a single reference is returned.
+ * If the buffer is not shared the original buffer is returned. When
+ * being called from interrupt status or with spinlocks held pri must
+ * be GFP_ATOMIC.
+ *
+ * NULL is returned on a memory allocation failure.
+ */
+
+static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
+{
+ if (skb_shared(skb)) {
+ struct sk_buff *nskb;
+ nskb = skb_clone(skb, pri);
+ kfree_skb(skb);
+ return nskb;
+ }
+ return skb;
+}
+
+
+/*
+ * Copy shared buffers into a new sk_buff. We effectively do COW on
+ * packets to handle cases where we have a local reader and forward
+ * and a couple of other messy ones. The normal one is tcpdumping
+ * a packet thats being forwarded.
+ */
+
+/**
+ * skb_unshare - make a copy of a shared buffer
+ * @skb: buffer to check
+ * @pri: priority for memory allocation
+ *
+ * If the socket buffer is a clone then this function creates a new
+ * copy of the data, drops a reference count on the old copy and returns
+ * the new copy with the reference count at 1. If the buffer is not a clone
+ * the original buffer is returned. When called with a spinlock held or
+ * from interrupt state @pri must be %GFP_ATOMIC
+ *
+ * %NULL is returned on a memory allocation failure.
+ */
+
+static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
+{
+ struct sk_buff *nskb;
+ if(!skb_cloned(skb))
+ return skb;
+ nskb=skb_copy(skb, pri);
+ kfree_skb(skb); /* Free our shared copy */
+ return nskb;
+}
+
+/**
+ * skb_peek
+ * @list_: list to peek at
+ *
+ * Peek an &sk_buff. Unlike most other operations you _MUST_
+ * be careful with this one. A peek leaves the buffer on the
+ * list and someone else may run off with it. You must hold
+ * the appropriate locks or have a private queue to do this.
+ *
+ * Returns %NULL for an empty list or a pointer to the head element.
+ * The reference count is not incremented and the reference is therefore
+ * volatile. Use with caution.
+ */
+
+static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
+{
+ struct sk_buff *list = ((struct sk_buff *)list_)->next;
+ if (list == (struct sk_buff *)list_)
+ list = NULL;
+ return list;
+}
+
+/**
+ * skb_peek_tail
+ * @list_: list to peek at
+ *
+ * Peek an &sk_buff. Unlike most other operations you _MUST_
+ * be careful with this one. A peek leaves the buffer on the
+ * list and someone else may run off with it. You must hold
+ * the appropriate locks or have a private queue to do this.
+ *
+ * Returns %NULL for an empty list or a pointer to the tail element.
+ * The reference count is not incremented and the reference is therefore
+ * volatile. Use with caution.
+ */
+
+static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
+{
+ struct sk_buff *list = ((struct sk_buff *)list_)->prev;
+ if (list == (struct sk_buff *)list_)
+ list = NULL;
+ return list;
+}
+
+/**
+ * skb_queue_len - get queue length
+ * @list_: list to measure
+ *
+ * Return the length of an &sk_buff queue.
+ */
+
+static inline __u32 skb_queue_len(struct sk_buff_head *list_)
+{
+ return(list_->qlen);
+}
+
+static inline void skb_queue_head_init(struct sk_buff_head *list)
+{
+ spin_lock_init(&list->lock);
+ list->prev = (struct sk_buff *)list;
+ list->next = (struct sk_buff *)list;
+ list->qlen = 0;
+}
+
+/*
+ * Insert an sk_buff at the start of a list.
+ *
+ * The "__skb_xxxx()" functions are the non-atomic ones that
+ * can only be called with interrupts disabled.
+ */
+
+/**
+ * __skb_queue_head - queue a buffer at the list head
+ * @list: list to use
+ * @newsk: buffer to queue
+ *
+ * Queue a buffer at the start of a list. This function takes no locks
+ * and you must therefore hold required locks before calling it.
+ *
+ * A buffer cannot be placed on two lists at the same time.
+ */
+
+static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ struct sk_buff *prev, *next;
+
+ newsk->list = list;
+ list->qlen++;
+ prev = (struct sk_buff *)list;
+ next = prev->next;
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+}
+
+
+/**
+ * skb_queue_head - queue a buffer at the list head
+ * @list: list to use
+ * @newsk: buffer to queue
+ *
+ * Queue a buffer at the start of the list. This function takes the
+ * list lock and can be used safely with other locking &sk_buff functions
+ * safely.
+ *
+ * A buffer cannot be placed on two lists at the same time.
+ */
+
+static inline void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&list->lock, flags);
+ __skb_queue_head(list, newsk);
+ spin_unlock_irqrestore(&list->lock, flags);
+}
+
+/**
+ * __skb_queue_tail - queue a buffer at the list tail
+ * @list: list to use
+ * @newsk: buffer to queue
+ *
+ * Queue a buffer at the end of a list. This function takes no locks
+ * and you must therefore hold required locks before calling it.
+ *
+ * A buffer cannot be placed on two lists at the same time.
+ */
+
+
+static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ struct sk_buff *prev, *next;
+
+ newsk->list = list;
+ list->qlen++;
+ next = (struct sk_buff *)list;
+ prev = next->prev;
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+}
+
+/**
+ * skb_queue_tail - queue a buffer at the list tail
+ * @list: list to use
+ * @newsk: buffer to queue
+ *
+ * Queue a buffer at the tail of the list. This function takes the
+ * list lock and can be used safely with other locking &sk_buff functions
+ * safely.
+ *
+ * A buffer cannot be placed on two lists at the same time.
+ */
+
+static inline void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&list->lock, flags);
+ __skb_queue_tail(list, newsk);
+ spin_unlock_irqrestore(&list->lock, flags);
+}
+
+/**
+ * __skb_dequeue - remove from the head of the queue
+ * @list: list to dequeue from
+ *
+ * Remove the head of the list. This function does not take any locks
+ * so must be used with appropriate locks held only. The head item is
+ * returned or %NULL if the list is empty.
+ */
+
+static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
+{
+ struct sk_buff *next, *prev, *result;
+
+ prev = (struct sk_buff *) list;
+ next = prev->next;
+ result = NULL;
+ if (next != prev) {
+ result = next;
+ next = next->next;
+ list->qlen--;
+ next->prev = prev;
+ prev->next = next;
+ result->next = NULL;
+ result->prev = NULL;
+ result->list = NULL;
+ }
+ return result;
+}
+
+/**
+ * skb_dequeue - remove from the head of the queue
+ * @list: list to dequeue from
+ *
+ * Remove the head of the list. The list lock is taken so the function
+ * may be used safely with other locking list functions. The head item is
+ * returned or %NULL if the list is empty.
+ */
+
+static inline struct sk_buff *skb_dequeue(struct sk_buff_head *list)
+{
+ long flags;
+ struct sk_buff *result;
+
+ spin_lock_irqsave(&list->lock, flags);
+ result = __skb_dequeue(list);
+ spin_unlock_irqrestore(&list->lock, flags);
+ return result;
+}
+
+/*
+ * Insert a packet on a list.
+ */
+
+static inline void __skb_insert(struct sk_buff *newsk,
+ struct sk_buff * prev, struct sk_buff *next,
+ struct sk_buff_head * list)
+{
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+ newsk->list = list;
+ list->qlen++;
+}
+
+/**
+ * skb_insert - insert a buffer
+ * @old: buffer to insert before
+ * @newsk: buffer to insert
+ *
+ * Place a packet before a given packet in a list. The list locks are taken
+ * and this function is atomic with respect to other list locked calls
+ * A buffer cannot be placed on two lists at the same time.
+ */
+
+static inline void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&old->list->lock, flags);
+ __skb_insert(newsk, old->prev, old, old->list);
+ spin_unlock_irqrestore(&old->list->lock, flags);
+}
+
+/*
+ * Place a packet after a given packet in a list.
+ */
+
+static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
+{
+ __skb_insert(newsk, old, old->next, old->list);
+}
+
+/**
+ * skb_append - append a buffer
+ * @old: buffer to insert after
+ * @newsk: buffer to insert
+ *
+ * Place a packet after a given packet in a list. The list locks are taken
+ * and this function is atomic with respect to other list locked calls.
+ * A buffer cannot be placed on two lists at the same time.
+ */
+
+
+static inline void skb_append(struct sk_buff *old, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&old->list->lock, flags);
+ __skb_append(old, newsk);
+ spin_unlock_irqrestore(&old->list->lock, flags);
+}
+
+/*
+ * remove sk_buff from list. _Must_ be called atomically, and with
+ * the list known..
+ */
+
+static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
+{
+ struct sk_buff * next, * prev;
+
+ list->qlen--;
+ next = skb->next;
+ prev = skb->prev;
+ skb->next = NULL;
+ skb->prev = NULL;
+ skb->list = NULL;
+ next->prev = prev;
+ prev->next = next;
+}
+
+/**
+ * skb_unlink - remove a buffer from a list
+ * @skb: buffer to remove
+ *
+ * Place a packet after a given packet in a list. The list locks are taken
+ * and this function is atomic with respect to other list locked calls
+ *
+ * Works even without knowing the list it is sitting on, which can be
+ * handy at times. It also means that THE LIST MUST EXIST when you
+ * unlink. Thus a list must have its contents unlinked before it is
+ * destroyed.
+ */
+
+static inline void skb_unlink(struct sk_buff *skb)
+{
+ struct sk_buff_head *list = skb->list;
+
+ if(list) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&list->lock, flags);
+ if(skb->list == list)
+ __skb_unlink(skb, skb->list);
+ spin_unlock_irqrestore(&list->lock, flags);
+ }
+}
+
+/* XXX: more streamlined implementation */
+
+/**
+ * __skb_dequeue_tail - remove from the tail of the queue
+ * @list: list to dequeue from
+ *
+ * Remove the tail of the list. This function does not take any locks
+ * so must be used with appropriate locks held only. The tail item is
+ * returned or %NULL if the list is empty.
+ */
+
+static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
+{
+ struct sk_buff *skb = skb_peek_tail(list);
+ if (skb)
+ __skb_unlink(skb, list);
+ return skb;
+}
+
+/**
+ * skb_dequeue - remove from the head of the queue
+ * @list: list to dequeue from
+ *
+ * Remove the head of the list. The list lock is taken so the function
+ * may be used safely with other locking list functions. The tail item is
+ * returned or %NULL if the list is empty.
+ */
+
+static inline struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
+{
+ long flags;
+ struct sk_buff *result;
+
+ spin_lock_irqsave(&list->lock, flags);
+ result = __skb_dequeue_tail(list);
+ spin_unlock_irqrestore(&list->lock, flags);
+ return result;
+}
+
+static inline int skb_is_nonlinear(const struct sk_buff *skb)
+{
+ return skb->data_len;
+}
+
+static inline int skb_headlen(const struct sk_buff *skb)
+{
+ return skb->len - skb->data_len;
+}
+
+#define SKB_PAGE_ASSERT(skb) do { if (skb_shinfo(skb)->nr_frags) BUG(); } while (0)
+#define SKB_FRAG_ASSERT(skb) do { if (skb_shinfo(skb)->frag_list) BUG(); } while (0)
+#define SKB_LINEAR_ASSERT(skb) do { if (skb_is_nonlinear(skb)) BUG(); } while (0)
+
+/*
+ * Add data to an sk_buff
+ */
+
+static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
+{
+ unsigned char *tmp=skb->tail;
+ SKB_LINEAR_ASSERT(skb);
+ skb->tail+=len;
+ skb->len+=len;
+ return tmp;
+}
+
+/**
+ * skb_put - add data to a buffer
+ * @skb: buffer to use
+ * @len: amount of data to add
+ *
+ * This function extends the used data area of the buffer. If this would
+ * exceed the total buffer size the kernel will panic. A pointer to the
+ * first byte of the extra data is returned.
+ */
+
+static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
+{
+ unsigned char *tmp=skb->tail;
+ SKB_LINEAR_ASSERT(skb);
+ skb->tail+=len;
+ skb->len+=len;
+ if(skb->tail>skb->end) {
+ skb_over_panic(skb, len, current_text_addr());
+ }
+ return tmp;
+}
+
+static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
+{
+ skb->data-=len;
+ skb->len+=len;
+ return skb->data;
+}
+
+/**
+ * skb_push - add data to the start of a buffer
+ * @skb: buffer to use
+ * @len: amount of data to add
+ *
+ * This function extends the used data area of the buffer at the buffer
+ * start. If this would exceed the total buffer headroom the kernel will
+ * panic. A pointer to the first byte of the extra data is returned.
+ */
+
+static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
+{
+ skb->data-=len;
+ skb->len+=len;
+ if(skb->data<skb->head) {
+ skb_under_panic(skb, len, current_text_addr());
+ }
+ return skb->data;
+}
+
+static inline char *__skb_pull(struct sk_buff *skb, unsigned int len)
+{
+ skb->len-=len;
+ if (skb->len < skb->data_len)
+ BUG();
+ return skb->data+=len;
+}
+
+/**
+ * skb_pull - remove data from the start of a buffer
+ * @skb: buffer to use
+ * @len: amount of data to remove
+ *
+ * This function removes data from the start of a buffer, returning
+ * the memory to the headroom. A pointer to the next data in the buffer
+ * is returned. Once the data has been pulled future pushes will overwrite
+ * the old data.
+ */
+
+static inline unsigned char * skb_pull(struct sk_buff *skb, unsigned int len)
+{
+ if (len > skb->len)
+ return NULL;
+ return __skb_pull(skb,len);
+}
+
+extern unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta);
+
+static inline char *__pskb_pull(struct sk_buff *skb, unsigned int len)
+{
+ if (len > skb_headlen(skb) &&
+ __pskb_pull_tail(skb, len-skb_headlen(skb)) == NULL)
+ return NULL;
+ skb->len -= len;
+ return skb->data += len;
+}
+
+static inline unsigned char * pskb_pull(struct sk_buff *skb, unsigned int len)
+{
+ if (len > skb->len)
+ return NULL;
+ return __pskb_pull(skb,len);
+}
+
+static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
+{
+ if (len <= skb_headlen(skb))
+ return 1;
+ if (len > skb->len)
+ return 0;
+ return (__pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL);
+}
+
+/**
+ * skb_headroom - bytes at buffer head
+ * @skb: buffer to check
+ *
+ * Return the number of bytes of free space at the head of an &sk_buff.
+ */
+
+static inline int skb_headroom(const struct sk_buff *skb)
+{
+ return skb->data-skb->head;
+}
+
+/**
+ * skb_tailroom - bytes at buffer end
+ * @skb: buffer to check
+ *
+ * Return the number of bytes of free space at the tail of an sk_buff
+ */
+
+static inline int skb_tailroom(const struct sk_buff *skb)
+{
+ return skb_is_nonlinear(skb) ? 0 : skb->end-skb->tail;
+}
+
+/**
+ * skb_reserve - adjust headroom
+ * @skb: buffer to alter
+ * @len: bytes to move
+ *
+ * Increase the headroom of an empty &sk_buff by reducing the tail
+ * room. This is only allowed for an empty buffer.
+ */
+
+static inline void skb_reserve(struct sk_buff *skb, unsigned int len)
+{
+ skb->data+=len;
+ skb->tail+=len;
+}
+
+extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
+
+static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
+{
+ if (!skb->data_len) {
+ skb->len = len;
+ skb->tail = skb->data+len;
+ } else {
+ ___pskb_trim(skb, len, 0);
+ }
+}
+
+/**
+ * skb_trim - remove end from a buffer
+ * @skb: buffer to alter
+ * @len: new length
+ *
+ * Cut the length of a buffer down by removing data from the tail. If
+ * the buffer is already under the length specified it is not modified.
+ */
+
+static inline void skb_trim(struct sk_buff *skb, unsigned int len)
+{
+ if (skb->len > len) {
+ __skb_trim(skb, len);
+ }
+}
+
+
+static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
+{
+ if (!skb->data_len) {
+ skb->len = len;
+ skb->tail = skb->data+len;
+ return 0;
+ } else {
+ return ___pskb_trim(skb, len, 1);
+ }
+}
+
+static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
+{
+ if (len < skb->len)
+ return __pskb_trim(skb, len);
+ return 0;
+}
+
+/**
+ * skb_orphan - orphan a buffer
+ * @skb: buffer to orphan
+ *
+ * If a buffer currently has an owner then we call the owner's
+ * destructor function and make the @skb unowned. The buffer continues
+ * to exist but is no longer charged to its former owner.
+ */
+
+
+static inline void skb_orphan(struct sk_buff *skb)
+{
+ if (skb->destructor)
+ skb->destructor(skb);
+ skb->destructor = NULL;
+ skb->sk = NULL;
+}
+
+/**
+ * skb_purge - empty a list
+ * @list: list to empty
+ *
+ * Delete all buffers on an &sk_buff list. Each buffer is removed from
+ * the list and one reference dropped. This function takes the list
+ * lock and is atomic with respect to other list locking functions.
+ */
+
+
+static inline void skb_queue_purge(struct sk_buff_head *list)
+{
+ struct sk_buff *skb;
+ while ((skb=skb_dequeue(list))!=NULL)
+ kfree_skb(skb);
+}
+
+/**
+ * __skb_purge - empty a list
+ * @list: list to empty
+ *
+ * Delete all buffers on an &sk_buff list. Each buffer is removed from
+ * the list and one reference dropped. This function does not take the
+ * list lock and the caller must hold the relevant locks to use it.
+ */
+
+
+static inline void __skb_queue_purge(struct sk_buff_head *list)
+{
+ struct sk_buff *skb;
+ while ((skb=__skb_dequeue(list))!=NULL)
+ kfree_skb(skb);
+}
+
+/**
+ * __dev_alloc_skb - allocate an skbuff for sending
+ * @length: length to allocate
+ * @gfp_mask: get_free_pages mask, passed to alloc_skb
+ *
+ * Allocate a new &sk_buff and assign it a usage count of one. The
+ * buffer has unspecified headroom built in. Users should allocate
+ * the headroom they think they need without accounting for the
+ * built in space. The built in space is used for optimisations.
+ *
+ * %NULL is returned in there is no free memory.
+ */
+
+static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
+ int gfp_mask)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(length+16, gfp_mask);
+ if (skb)
+ skb_reserve(skb,16);
+ return skb;
+}
+
+/**
+ * dev_alloc_skb - allocate an skbuff for sending
+ * @length: length to allocate
+ *
+ * Allocate a new &sk_buff and assign it a usage count of one. The
+ * buffer has unspecified headroom built in. Users should allocate
+ * the headroom they think they need without accounting for the
+ * built in space. The built in space is used for optimisations.
+ *
+ * %NULL is returned in there is no free memory. Although this function
+ * allocates memory it can be called from an interrupt.
+ */
+
+static inline struct sk_buff *dev_alloc_skb(unsigned int length)
+{
+ return __dev_alloc_skb(length, GFP_ATOMIC);
+}
+
+/**
+ * skb_cow - copy header of skb when it is required
+ * @skb: buffer to cow
+ * @headroom: needed headroom
+ *
+ * If the skb passed lacks sufficient headroom or its data part
+ * is shared, data is reallocated. If reallocation fails, an error
+ * is returned and original skb is not changed.
+ *
+ * The result is skb with writable area skb->head...skb->tail
+ * and at least @headroom of space at head.
+ */
+
+static inline int
+skb_cow(struct sk_buff *skb, unsigned int headroom)
+{
+ int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb);
+
+ if (delta < 0)
+ delta = 0;
+
+ if (delta || skb_cloned(skb))
+ return pskb_expand_head(skb, (delta+15)&~15, 0, GFP_ATOMIC);
+ return 0;
+}
+
+/**
+ * skb_linearize - convert paged skb to linear one
+ * @skb: buffer to linarize
+ * @gfp: allocation mode
+ *
+ * If there is no free memory -ENOMEM is returned, otherwise zero
+ * is returned and the old skb data released. */
+int skb_linearize(struct sk_buff *skb, int gfp);
+
+static inline void *kmap_skb_frag(const skb_frag_t *frag)
+{
+#ifdef CONFIG_HIGHMEM
+ if (in_irq())
+ BUG();
+
+ local_bh_disable();
+#endif
+ return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ);
+}
+
+static inline void kunmap_skb_frag(void *vaddr)
+{
+ kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
+#ifdef CONFIG_HIGHMEM
+ local_bh_enable();
+#endif
+}
+
+#define skb_queue_walk(queue, skb) \
+ for (skb = (queue)->next; \
+ (skb != (struct sk_buff *)(queue)); \
+ skb=skb->next)
+
+
+extern struct sk_buff * skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err);
+extern unsigned int datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait);
+extern int skb_copy_datagram(const struct sk_buff *from, int offset, char *to,int size);
+extern int skb_copy_datagram_iovec(const struct sk_buff *from, int offset, struct iovec *to,int size);
+extern int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int *csump);
+extern int skb_copy_and_csum_datagram_iovec(const struct sk_buff *skb, int hlen, struct iovec *iov);
+extern void skb_free_datagram(struct sock * sk, struct sk_buff *skb);
+
+extern unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len, unsigned int csum);
+extern int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
+extern unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int csum);
+extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
+
+extern void skb_init(void);
+extern void skb_add_mtu(int mtu);
+
+#ifdef CONFIG_NETFILTER
+static inline void
+nf_conntrack_put(struct nf_ct_info *nfct)
+{
+ if (nfct && atomic_dec_and_test(&nfct->master->use))
+ nfct->master->destroy(nfct->master);
+}
+static inline void
+nf_conntrack_get(struct nf_ct_info *nfct)
+{
+ if (nfct)
+ atomic_inc(&nfct->master->use);
+}
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_SKBUFF_H */
diff --git a/br-nf-bds/linux/net/Config.in b/br-nf-bds/linux/net/Config.in
new file mode 100644
index 0000000..946679f
--- /dev/null
+++ b/br-nf-bds/linux/net/Config.in
@@ -0,0 +1,95 @@
+#
+# Network configuration
+#
+mainmenu_option next_comment
+comment 'Networking options'
+tristate 'Packet socket' CONFIG_PACKET
+if [ "$CONFIG_PACKET" != "n" ]; then
+ bool ' Packet socket: mmapped IO' CONFIG_PACKET_MMAP
+fi
+
+tristate 'Netlink device emulation' CONFIG_NETLINK_DEV
+
+bool 'Network packet filtering (replaces ipchains)' CONFIG_NETFILTER
+if [ "$CONFIG_NETFILTER" = "y" ]; then
+ bool ' Network packet filtering debugging' CONFIG_NETFILTER_DEBUG
+fi
+bool 'Socket Filtering' CONFIG_FILTER
+tristate 'Unix domain sockets' CONFIG_UNIX
+bool 'TCP/IP networking' CONFIG_INET
+if [ "$CONFIG_INET" = "y" ]; then
+ source net/ipv4/Config.in
+ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+# IPv6 as module will cause a CRASH if you try to unload it
+ tristate ' The IPv6 protocol (EXPERIMENTAL)' CONFIG_IPV6
+ if [ "$CONFIG_IPV6" != "n" ]; then
+ source net/ipv6/Config.in
+ fi
+ fi
+ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+ source net/khttpd/Config.in
+ fi
+fi
+if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+ bool 'Asynchronous Transfer Mode (ATM) (EXPERIMENTAL)' CONFIG_ATM
+ if [ "$CONFIG_ATM" = "y" ]; then
+ if [ "$CONFIG_INET" = "y" ]; then
+ bool ' Classical IP over ATM' CONFIG_ATM_CLIP
+ if [ "$CONFIG_ATM_CLIP" = "y" ]; then
+ bool ' Do NOT send ICMP if no neighbour' CONFIG_ATM_CLIP_NO_ICMP
+ fi
+ fi
+ tristate ' LAN Emulation (LANE) support' CONFIG_ATM_LANE
+ if [ "$CONFIG_INET" = "y" -a "$CONFIG_ATM_LANE" != "n" ]; then
+ tristate ' Multi-Protocol Over ATM (MPOA) support' CONFIG_ATM_MPOA
+ fi
+ fi
+
+ dep_tristate '802.1Q VLAN Support (EXPERIMENTAL)' CONFIG_VLAN_8021Q $CONFIG_EXPERIMENTAL
+
+fi
+
+comment ' '
+tristate 'The IPX protocol' CONFIG_IPX
+if [ "$CONFIG_IPX" != "n" ]; then
+ source net/ipx/Config.in
+fi
+tristate 'Appletalk protocol support' CONFIG_ATALK
+tristate 'DECnet Support' CONFIG_DECNET
+if [ "$CONFIG_DECNET" != "n" ]; then
+ source net/decnet/Config.in
+fi
+dep_tristate '802.1d Ethernet Bridging' CONFIG_BRIDGE $CONFIG_INET
+if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+ if [ "$CONFIG_BRIDGE" != "n" -a "$CONFIG_NETFILTER" != "n" ]; then
+ bool ' netfilter (firewalling) support' CONFIG_BRIDGE_NF
+ fi
+ tristate 'CCITT X.25 Packet Layer (EXPERIMENTAL)' CONFIG_X25
+ tristate 'LAPB Data Link Driver (EXPERIMENTAL)' CONFIG_LAPB
+ bool '802.2 LLC (EXPERIMENTAL)' CONFIG_LLC
+ bool 'Frame Diverter (EXPERIMENTAL)' CONFIG_NET_DIVERT
+# if [ "$CONFIG_LLC" = "y" ]; then
+# bool ' Netbeui (EXPERIMENTAL)' CONFIG_NETBEUI
+# fi
+ if [ "$CONFIG_INET" = "y" ]; then
+ tristate 'Acorn Econet/AUN protocols (EXPERIMENTAL)' CONFIG_ECONET
+ fi
+ if [ "$CONFIG_ECONET" != "n" ]; then
+ bool ' AUN over UDP' CONFIG_ECONET_AUNUDP
+ bool ' Native Econet' CONFIG_ECONET_NATIVE
+ fi
+ tristate 'WAN router' CONFIG_WAN_ROUTER
+ bool 'Fast switching (read help!)' CONFIG_NET_FASTROUTE
+ bool 'Forwarding between high speed interfaces' CONFIG_NET_HW_FLOWCONTROL
+fi
+
+mainmenu_option next_comment
+comment 'QoS and/or fair queueing'
+bool 'QoS and/or fair queueing' CONFIG_NET_SCHED
+if [ "$CONFIG_NET_SCHED" = "y" ]; then
+ source net/sched/Config.in
+fi
+#bool 'Network code profiler' CONFIG_NET_PROFILE
+endmenu
+
+endmenu
diff --git a/br-nf-bds/linux/net/bridge/Makefile b/br-nf-bds/linux/net/bridge/Makefile
new file mode 100644
index 0000000..25c2314
--- /dev/null
+++ b/br-nf-bds/linux/net/bridge/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for the IEEE 802.1d ethernet bridging layer.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definition is now in the main makefile...
+
+O_TARGET := bridge.o
+obj-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \
+ br_ioctl.o br_notify.o br_stp.o br_stp_bpdu.o \
+ br_stp_if.o br_stp_timer.o
+obj-m := $(O_TARGET)
+
+obj-$(CONFIG_BRIDGE_NF) += br_netfilter.o
+
+include $(TOPDIR)/Rules.make
diff --git a/br-nf-bds/linux/net/bridge/br.c b/br-nf-bds/linux/net/bridge/br.c
new file mode 100644
index 0000000..2ef8028
--- /dev/null
+++ b/br-nf-bds/linux/net/bridge/br.c
@@ -0,0 +1,89 @@
+/*
+ * Generic parts
+ * Linux ethernet bridge
+ *
+ * Authors:
+ * Lennert Buytenhek <buytenh@gnu.org>
+ *
+ * $Id: br.c,v 1.1 2002/06/01 19:23:52 bdschuym Exp $
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/if_bridge.h>
+#include <asm/uaccess.h>
+#include "br_private.h"
+
+#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+#include "../atm/lec.h"
+#endif
+
+void br_dec_use_count()
+{
+ MOD_DEC_USE_COUNT;
+}
+
+void br_inc_use_count()
+{
+ MOD_INC_USE_COUNT;
+}
+
+static int __init br_init(void)
+{
+ printk(KERN_INFO "NET4: Ethernet Bridge 008 for NET4.0\n");
+
+#ifdef CONFIG_BRIDGE_NF
+ if (br_netfilter_init())
+ return 1;
+#endif
+
+ br_handle_frame_hook = br_handle_frame;
+ br_ioctl_hook = br_ioctl_deviceless_stub;
+#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+ br_fdb_get_hook = br_fdb_get;
+ br_fdb_put_hook = br_fdb_put;
+#endif
+ register_netdevice_notifier(&br_device_notifier);
+
+ return 0;
+}
+
+static void __br_clear_frame_hook(void)
+{
+ br_handle_frame_hook = NULL;
+}
+
+static void __br_clear_ioctl_hook(void)
+{
+ br_ioctl_hook = NULL;
+}
+
+static void __exit br_deinit(void)
+{
+#ifdef CONFIG_BRIDGE_NF
+ br_netfilter_fini();
+#endif
+ unregister_netdevice_notifier(&br_device_notifier);
+ br_call_ioctl_atomic(__br_clear_ioctl_hook);
+ net_call_rx_atomic(__br_clear_frame_hook);
+#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+ br_fdb_get_hook = NULL;
+ br_fdb_put_hook = NULL;
+#endif
+}
+
+EXPORT_NO_SYMBOLS;
+
+module_init(br_init)
+module_exit(br_deinit)
+MODULE_LICENSE("GPL");
diff --git a/br-nf-bds/linux/net/bridge/br_forward.c b/br-nf-bds/linux/net/bridge/br_forward.c
new file mode 100644
index 0000000..05d1587
--- /dev/null
+++ b/br-nf-bds/linux/net/bridge/br_forward.c
@@ -0,0 +1,152 @@
+/*
+ * Forwarding decision
+ * Linux ethernet bridge
+ *
+ * Authors:
+ * Lennert Buytenhek <buytenh@gnu.org>
+ *
+ * $Id: br_forward.c,v 1.1 2002/06/01 19:23:53 bdschuym Exp $
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_bridge.h>
+#include <linux/netfilter_bridge.h>
+#include "br_private.h"
+
+static inline int should_deliver(struct net_bridge_port *p, struct sk_buff *skb)
+{
+ if (skb->dev == p->dev ||
+ p->state != BR_STATE_FORWARDING)
+ return 0;
+
+ return 1;
+}
+
+int br_dev_queue_push_xmit(struct sk_buff *skb)
+{
+ skb_push(skb, ETH_HLEN);
+ dev_queue_xmit(skb);
+
+ return 0;
+}
+
+int br_forward_finish(struct sk_buff *skb)
+{
+ NF_HOOK(PF_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
+ br_dev_queue_push_xmit);
+
+ return 0;
+}
+
+static void __br_deliver(struct net_bridge_port *to, struct sk_buff *skb)
+{
+ struct net_device *indev;
+
+ indev = skb->dev;
+ skb->dev = to->dev;
+
+ NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, indev, skb->dev,
+ br_forward_finish);
+}
+
+static void __br_forward(struct net_bridge_port *to, struct sk_buff *skb)
+{
+ struct net_device *indev;
+
+ indev = skb->dev;
+ skb->dev = to->dev;
+
+ NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
+ br_forward_finish);
+}
+
+/* called under bridge lock */
+void br_deliver(struct net_bridge_port *to, struct sk_buff *skb)
+{
+ if (should_deliver(to, skb)) {
+ __br_deliver(to, skb);
+ return;
+ }
+
+ kfree_skb(skb);
+}
+
+/* called under bridge lock */
+void br_forward(struct net_bridge_port *to, struct sk_buff *skb)
+{
+ if (should_deliver(to, skb)) {
+ __br_forward(to, skb);
+ return;
+ }
+
+ kfree_skb(skb);
+}
+
+/* called under bridge lock */
+static void br_flood(struct net_bridge *br, struct sk_buff *skb, int clone,
+ void (*__packet_hook)(struct net_bridge_port *p, struct sk_buff *skb))
+{
+ struct net_bridge_port *p;
+ struct net_bridge_port *prev;
+
+ if (clone) {
+ struct sk_buff *skb2;
+
+ if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) {
+ br->statistics.tx_dropped++;
+ return;
+ }
+
+ skb = skb2;
+ }
+
+ prev = NULL;
+
+ p = br->port_list;
+ while (p != NULL) {
+ if (should_deliver(p, skb)) {
+ if (prev != NULL) {
+ struct sk_buff *skb2;
+
+ if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) {
+ br->statistics.tx_dropped++;
+ kfree_skb(skb);
+ return;
+ }
+
+ __packet_hook(prev, skb2);
+ }
+
+ prev = p;
+ }
+
+ p = p->next;
+ }
+
+ if (prev != NULL) {
+ __packet_hook(prev, skb);
+ return;
+ }
+
+ kfree_skb(skb);
+}
+
+/* called under bridge lock */
+void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, int clone)
+{
+ br_flood(br, skb, clone, __br_deliver);
+}
+
+/* called under bridge lock */
+void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, int clone)
+{
+ br_flood(br, skb, clone, __br_forward);
+}
diff --git a/br-nf-bds/linux/net/bridge/br_input.c b/br-nf-bds/linux/net/bridge/br_input.c
new file mode 100644
index 0000000..b9487dc
--- /dev/null
+++ b/br-nf-bds/linux/net/bridge/br_input.c
@@ -0,0 +1,168 @@
+/*
+ * Handle incoming frames
+ * Linux ethernet bridge
+ *
+ * Authors:
+ * Lennert Buytenhek <buytenh@gnu.org>
+ *
+ * $Id: br_input.c,v 1.1 2002/06/01 19:23:53 bdschuym Exp $
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/netfilter_bridge.h>
+#include "br_private.h"
+
+unsigned char bridge_ula[6] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
+
+static int br_pass_frame_up_finish(struct sk_buff *skb)
+{
+ netif_rx(skb);
+
+ return 0;
+}
+
+static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb)
+{
+ struct net_device *indev;
+
+ br->statistics.rx_packets++;
+ br->statistics.rx_bytes += skb->len;
+
+ indev = skb->dev;
+ skb->dev = &br->dev;
+ skb->pkt_type = PACKET_HOST;
+ skb_push(skb, ETH_HLEN);
+ skb->protocol = eth_type_trans(skb, &br->dev);
+
+ NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
+ br_pass_frame_up_finish);
+}
+
+int br_handle_frame_finish(struct sk_buff *skb)
+{
+ struct net_bridge *br;
+ unsigned char *dest;
+ struct net_bridge_fdb_entry *dst;
+ struct net_bridge_port *p;
+ int passedup;
+
+ dest = skb->mac.ethernet->h_dest;
+
+ p = skb->dev->br_port;
+ if (p == NULL)
+ goto err_nolock;
+
+ br = p->br;
+ read_lock(&br->lock);
+ if (skb->dev->br_port == NULL)
+ goto err;
+
+ passedup = 0;
+ if (br->dev.flags & IFF_PROMISC) {
+ struct sk_buff *skb2;
+
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (skb2 != NULL) {
+ passedup = 1;
+ br_pass_frame_up(br, skb2);
+ }
+ }
+
+ if (dest[0] & 1) {
+ br_flood_forward(br, skb, !passedup);
+ if (!passedup)
+ br_pass_frame_up(br, skb);
+ goto out;
+ }
+
+ dst = br_fdb_get(br, dest);
+ if (dst != NULL && dst->is_local) {
+ if (!passedup)
+ br_pass_frame_up(br, skb);
+ else
+ kfree_skb(skb);
+ br_fdb_put(dst);
+ goto out;
+ }
+
+ if (dst != NULL) {
+ br_forward(dst->dst, skb);
+ br_fdb_put(dst);
+ goto out;
+ }
+
+ br_flood_forward(br, skb, 0);
+
+out:
+ read_unlock(&br->lock);
+ return 0;
+
+err:
+ read_unlock(&br->lock);
+err_nolock:
+ kfree_skb(skb);
+ return 0;
+}
+
+void br_handle_frame(struct sk_buff *skb)
+{
+ struct net_bridge *br;
+ unsigned char *dest;
+ struct net_bridge_port *p;
+
+ dest = skb->mac.ethernet->h_dest;
+
+ p = skb->dev->br_port;
+ if (p == NULL)
+ goto err_nolock;
+
+ br = p->br;
+ read_lock(&br->lock);
+ if (skb->dev->br_port == NULL)
+ goto err;
+
+ if (!(br->dev.flags & IFF_UP) ||
+ p->state == BR_STATE_DISABLED)
+ goto err;
+
+ if (skb->mac.ethernet->h_source[0] & 1)
+ goto err;
+
+ if (p->state == BR_STATE_LEARNING ||
+ p->state == BR_STATE_FORWARDING)
+ br_fdb_insert(br, p, skb->mac.ethernet->h_source, 0);
+
+ if (br->stp_enabled &&
+ !memcmp(dest, bridge_ula, 5) &&
+ !(dest[5] & 0xF0))
+ goto handle_special_frame;
+
+ if (p->state == BR_STATE_FORWARDING) {
+ NF_HOOK(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
+ br_handle_frame_finish);
+ read_unlock(&br->lock);
+ return;
+ }
+
+err:
+ read_unlock(&br->lock);
+err_nolock:
+ kfree_skb(skb);
+ return;
+
+handle_special_frame:
+ if (!dest[5]) {
+ br_stp_handle_bpdu(skb);
+ return;
+ }
+
+ kfree_skb(skb);
+}
diff --git a/br-nf-bds/linux/net/bridge/br_netfilter.c b/br-nf-bds/linux/net/bridge/br_netfilter.c
new file mode 100644
index 0000000..6f0981d
--- /dev/null
+++ b/br-nf-bds/linux/net/bridge/br_netfilter.c
@@ -0,0 +1,567 @@
+/*
+ * Handle firewalling
+ * Linux ethernet bridge
+ *
+ * Authors:
+ * Lennert Buytenhek <buytenh@gnu.org>
+ * Bart De Schuymer <bart.de.schuymer@pandora.be>
+ *
+ * $Id: br_netfilter.c,v 1.1 2002/06/01 19:23:54 bdschuym Exp $
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Lennert dedicates this file to Kerstin Wurdinger.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ip.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_ether.h>
+#include <linux/netfilter_bridge.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/in_route.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <asm/uaccess.h>
+#include <asm/checksum.h>
+#include "br_private.h"
+
+
+#ifndef WE_REALLY_INSIST_ON_NOT_HAVING_NAT_SUPPORT
+/* As the original source/destination addresses are variables private to this
+ * file, we store them in unused space at the end of the control buffer.
+ * On 64-bit platforms the TCP control buffer size still leaves us 8 bytes
+ * of space at the end, so that fits. Usage of the original source address
+ * and the original destination address never overlaps (daddr is needed
+ * around PRE_ROUTING, and saddr around POST_ROUTING), so that's okay as
+ * well.
+ */
+#define skb_origaddr(skb) (*((u32 *)((skb)->cb + sizeof((skb)->cb) - 4)))
+
+#define store_orig_dstaddr(skb) (skb_origaddr(skb) = (skb)->nh.iph->daddr)
+#define store_orig_srcaddr(skb) (skb_origaddr(skb) = (skb)->nh.iph->saddr)
+#define dnat_took_place(skb) (skb_origaddr(skb) != (skb)->nh.iph->daddr)
+#define snat_took_place(skb) (skb_origaddr(skb) != (skb)->nh.iph->saddr)
+#else
+#define store_orig_dstaddr(skb)
+#define store_orig_srcaddr(skb)
+#define dnat_took_place(skb) (0)
+#define snat_took_place(skb) (0)
+#endif
+
+
+#define has_bridge_parent(device) ((device)->br_port != NULL)
+#define bridge_parent(device) (&((device)->br_port->br->dev))
+
+
+/* As opposed to the DNAT case, for the SNAT case it's not quite
+ * clear what we should do with ethernet addresses in NAT'ed
+ * packets. Use this heuristic for now.
+ */
+static inline void __maybe_fixup_src_address(struct sk_buff *skb)
+{
+ if (snat_took_place(skb) &&
+ inet_addr_type(skb->nh.iph->saddr) == RTN_LOCAL) {
+ memcpy(skb->mac.ethernet->h_source,
+ bridge_parent(skb->dev)->dev_addr,
+ ETH_ALEN);
+ }
+}
+
+
+/* We need these fake structures to make netfilter happy --
+ * lots of places assume that skb->dst != NULL, which isn't
+ * all that unreasonable.
+ *
+ * Currently, we fill in the PMTU entry because netfilter
+ * refragmentation needs it, and the rt_flags entry because
+ * ipt_REJECT needs it. Future netfilter modules might
+ * require us to fill additional fields.
+ */
+static struct net_device __fake_net_device = {
+ hard_header_len: ETH_HLEN
+};
+
+static struct rtable __fake_rtable = {
+ u: {
+ dst: {
+ __refcnt: ATOMIC_INIT(1),
+ dev: &__fake_net_device,
+ pmtu: 1500
+ }
+ },
+
+ rt_flags: 0
+};
+
+
+/* PF_BRIDGE/PRE_ROUTING *********************************************/
+static void __br_dnat_complain(void)
+{
+ static unsigned long last_complaint = 0;
+
+ if (jiffies - last_complaint >= 5 * HZ) {
+ printk(KERN_WARNING "Performing cross-bridge DNAT requires IP "
+ "forwarding to be enabled\n");
+ last_complaint = jiffies;
+ }
+}
+
+
+static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
+{
+ skb->dev = bridge_parent(skb->dev);
+ skb->dst->output(skb);
+ return 0;
+}
+
+/* This requires some explaining. If DNAT has taken place,
+ * we will need to fix up the destination ethernet address,
+ * and this is a tricky process.
+ *
+ * There are two cases to consider:
+ * 1. The packet was DNAT'ed to a device in the same bridge
+ * port group as it was received on. We can still bridge
+ * the packet.
+ * 2. The packet was DNAT'ed to a different device, either
+ * a non-bridged device or another bridge port group.
+ * The packet will need to be routed.
+ *
+ * The way to distinguish between the two is by calling ip_route_input()
+ * and looking at skb->dst->dev, which it changed to the destination device
+ * if ip_route_input() succeeds.
+ *
+ * Let us first consider ip_route_input() succeeds:
+ *
+ * If skb->dst->dev equals the logical bridge device the packet came in on,
+ * we can consider this bridging. We then call skb->dst->output() which will
+ * make the packet enter br_nf_local_out() not much later. In that function
+ * it is assured that the iptables FORWARD chain is traversed for the packet.
+ *
+ * Else, the packet is considered to be routed and we just change the
+ * destination MAC address so that the packet will later be passed up to the ip
+ * stack to be routed.
+ *
+ * Let us now consider ip_route_input() fails:
+ *
+ * After a "echo '0' > /proc/sys/net/ipv4/ip_forward" ip_route_input() will
+ * fail, while ip_route_output() will return success. The source address for
+ * for ip_route_output() is set to zero, so ip_route_output()
+ * thinks we're handling a locally generated packet and won't care if
+ * ip forwarding is allowed. We send a warning message to the users's log
+ * telling her to put ip forwarding on.
+ *
+ * ip_route_input() will also fail if there is no route available. Then we just
+ * drop the packet.
+ *
+ * The other special thing happening here is putting skb->physoutdev on
+ * &__fake_net_device (resp. NULL) for bridged (resp. routed) packets. This is
+ * needed so that br_nf_local_out() can know that it has to give the packets to
+ * the BR_NF_FORWARD (resp. BR_NF_LOCAL_OUT) bridge hook. See that function.
+ * --Lennert, 20020411
+ * --Bart, 20020416 (updated)
+ */
+
+static int br_nf_pre_routing_finish(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ struct iphdr *iph = skb->nh.iph;
+
+ if (dnat_took_place(skb)) {
+ if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev)) {
+ struct rtable *rt;
+
+ if (!ip_route_output(&rt, iph->daddr, 0, iph->tos, 0)) {
+ // bridged dnated traffic isn't dependent on
+ // disabled ip_forwarding
+ if (((struct dst_entry *)rt)->dev == dev) {
+ skb->dst = (struct dst_entry *)rt;
+ goto bridged_dnat;
+ }
+ __br_dnat_complain();
+ dst_release((struct dst_entry *)rt);
+ }
+ kfree_skb(skb);
+ return 0;
+ } else {
+ if (skb->dst->dev == dev) {
+bridged_dnat:
+ // tell br_nf_local_out this is a bridged frame
+ skb->physoutdev = &__fake_net_device;
+ skb->dev = skb->physindev;
+ NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
+ br_nf_pre_routing_finish_bridge, 1);
+ return 0;
+ }
+ // tell br_nf_local_out this is a routed frame
+ skb->physoutdev = NULL;
+ memcpy(skb->mac.ethernet->h_dest, dev->dev_addr, ETH_ALEN);
+ }
+ } else {
+ skb->dst = (struct dst_entry *)&__fake_rtable;
+ dst_hold(skb->dst);
+ }
+
+ skb->dev = skb->physindev;
+ NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
+ br_handle_frame_finish, 1);
+
+ return 0;
+}
+
+/* Replicate the checks that IPv4 does on packet reception.
+ * Set skb->dev to the bridge device (i.e. parent of the
+ * receiving device) to make netfilter happy, the REDIRECT
+ * target in particular. Save the original destination IP
+ * address to be able to detect DNAT afterwards.
+ */
+static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *))
+{
+ struct iphdr *iph;
+ __u32 len;
+ struct sk_buff *skb;
+
+ if ((*pskb)->protocol != __constant_htons(ETH_P_IP))
+ return NF_ACCEPT;
+
+ if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL)
+ goto out;
+
+ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+ goto inhdr_error;
+
+ iph = skb->nh.iph;
+ if (iph->ihl < 5 || iph->version != 4)
+ goto inhdr_error;
+
+ if (!pskb_may_pull(skb, 4*iph->ihl))
+ goto inhdr_error;
+
+ iph = skb->nh.iph;
+ if (ip_fast_csum((__u8 *)iph, iph->ihl) != 0)
+ goto inhdr_error;
+
+ len = ntohs(iph->tot_len);
+ if (skb->len < len || len < 4*iph->ihl)
+ goto inhdr_error;
+
+ if (skb->len > len) {
+ __pskb_trim(skb, len);
+ if (skb->ip_summed == CHECKSUM_HW)
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
+ skb->physindev = skb->dev;
+ skb->dev = bridge_parent(skb->dev);
+ if (skb->pkt_type == PACKET_OTHERHOST)
+ skb->pkt_type = PACKET_HOST;
+ store_orig_dstaddr(skb);
+ NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, skb->dev, NULL,
+ br_nf_pre_routing_finish);
+
+ return NF_STOLEN;
+
+inhdr_error:
+// IP_INC_STATS_BH(IpInHdrErrors);
+out:
+ return NF_DROP;
+}
+
+
+/* PF_BRIDGE/LOCAL_IN ************************************************/
+/* The packet is locally destined, which requires a real
+ * dst_entry, so detach the fake one. On the way up, the
+ * packet would pass through PRE_ROUTING again (which already
+ * took place when the packet entered the bridge), but we
+ * register an IPv4 PRE_ROUTING 'sabotage' hook that will
+ * prevent this from happening.
+ */
+static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *))
+{
+ struct sk_buff *skb = *pskb;
+
+ if (skb->protocol != __constant_htons(ETH_P_IP))
+ return NF_ACCEPT;
+
+ if (skb->dst == (struct dst_entry *)&__fake_rtable) {
+ dst_release(skb->dst);
+ skb->dst = NULL;
+ }
+
+ return NF_ACCEPT;
+}
+
+
+/* PF_BRIDGE/FORWARD *************************************************/
+static int br_nf_forward_finish(struct sk_buff *skb)
+{
+ NF_HOOK_THRESH(PF_BRIDGE, NF_BR_FORWARD, skb, skb->physindev,
+ skb->dev, br_forward_finish, 1);
+
+ return 0;
+}
+
+/* This is the 'purely bridged' case. We pass the packet to
+ * netfilter with indev and outdev set to the bridge device,
+ * but we are still able to filter on the 'real' indev/outdev
+ * because another bit of the bridge-nf patch overloads the
+ * '-i' and '-o' iptables interface checks to take
+ * skb->phys{in,out}dev into account as well (so both the real
+ * device and the bridge device will match).
+ */
+static unsigned int br_nf_forward(unsigned int hook, struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *))
+{
+ struct sk_buff *skb = *pskb;
+
+ // don't mess with non-ip frames, also don't mess with the ip-packets
+ // when br_nf_local_out_finish explicitly says so.
+ if (skb->protocol != __constant_htons(ETH_P_IP) || skb->physindev == NULL)
+ return NF_ACCEPT;
+
+ skb->physoutdev = skb->dev;
+ NF_HOOK(PF_INET, NF_IP_FORWARD, skb, bridge_parent(skb->physindev),
+ bridge_parent(skb->dev), br_nf_forward_finish);
+
+ return NF_STOLEN;
+}
+
+
+/* PF_BRIDGE/LOCAL_OUT ***********************************************/
+static int br_nf_local_out_finish_forward(struct sk_buff *skb)
+{
+ struct net_device *dev;
+
+ dev = skb->physindev;
+ // tell br_nf_forward to stay away
+ skb->physindev = NULL;
+ NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, dev, skb->dev,
+ br_forward_finish);
+
+ return 0;
+}
+
+static int br_nf_local_out_finish(struct sk_buff *skb)
+{
+ NF_HOOK_THRESH(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
+ br_forward_finish, INT_MIN + 1);
+
+ return 0;
+}
+
+
+/* This hook sees both locally originated IP packets and forwarded
+ * IP packets (in both cases the destination device is a bridge
+ * device). For the sake of interface transparency (i.e. properly
+ * overloading the '-o' option), we steal packets destined to
+ * a bridge device away from the IPv4 FORWARD and OUTPUT hooks,
+ * and reinject them later, when we have determined the real
+ * output device. This reinjecting happens here.
+ *
+ * If skb->physindev is NULL, the bridge-nf code never touched
+ * this packet before, and so the packet was locally originated.
+ * We call the IPv4 LOCAL_OUT hook.
+ *
+ * If skb->physindev isn't NULL, there are two cases:
+ * 1. The packet was IP routed.
+ * 2. The packet was cross-bridge DNAT'ed (see the comment near
+ * PF_BRIDGE/PRE_ROUTING).
+ * In both cases, we call the IPv4 FORWARD hook. In case 1,
+ * if the packet originally came from a bridge device, and in
+ * case 2, skb->physindev will have a bridge device as parent,
+ * so we use that parent device as indev. Otherwise, we just
+ * use physindev.
+ *
+ * If skb->physoutdev == NULL the bridge code never touched the
+ * packet or the packet was routed in br_nf_pre_routing_finish().
+ * We give the packet to the bridge NF_BR_LOCAL_OUT hook.
+ * If not, the packet is actually a bridged one so we give it to
+ * the NF_BR_FORWARD hook.
+ */
+
+static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, int (*_okfn)(struct sk_buff *))
+{
+ int hookno, prio;
+ int (*okfn)(struct sk_buff *skb);
+ struct net_device *realindev;
+ struct sk_buff *skb = *pskb;
+
+ if (skb->protocol != __constant_htons(ETH_P_IP))
+ return NF_ACCEPT;
+
+ /* Sometimes we get packets with NULL ->dst here (for example,
+ * running a dhcp client daemon triggers this).
+ */
+ if (skb->dst == NULL)
+ return NF_ACCEPT;
+
+ // bridged, take forward
+ // (see big note in front of br_nf_pre_routing_finish)
+ if (skb->physoutdev == &__fake_net_device) {
+ okfn = br_nf_local_out_finish_forward;
+ } else if (skb->physoutdev == NULL) {
+ // non-bridged: routed or locally generated traffic, take local_out
+ // (see big note in front of br_nf_pre_routing_finish)
+ okfn = br_nf_local_out_finish;
+ } else {
+ printk("ARGH: bridge_or_routed hack doesn't work\n");
+ okfn = br_nf_local_out_finish;
+ }
+
+ skb->physoutdev = skb->dev;
+
+ hookno = NF_IP_LOCAL_OUT;
+ prio = NF_IP_PRI_BRIDGE_SABOTAGE;
+ if ((realindev = skb->physindev) != NULL) {
+ hookno = NF_IP_FORWARD;
+ // there is an iptables mangle table FORWARD chain with
+ // priority -150. This chain should see the physical out-dev.
+ prio = NF_IP_PRI_BRIDGE_SABOTAGE_FORWARD;
+ if (has_bridge_parent(realindev))
+ realindev = bridge_parent(realindev);
+ }
+
+ NF_HOOK_THRESH(PF_INET, hookno, skb, realindev,
+ bridge_parent(skb->dev), okfn, prio + 1);
+
+ return NF_STOLEN;
+}
+
+
+/* PF_BRIDGE/POST_ROUTING ********************************************/
+static int br_nf_post_routing_finish(struct sk_buff *skb)
+{
+ __maybe_fixup_src_address(skb);
+ NF_HOOK_THRESH(PF_BRIDGE, NF_BR_POST_ROUTING, skb, NULL,
+ bridge_parent(skb->dev), br_dev_queue_push_xmit, 1);
+
+ return 0;
+}
+
+static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *))
+{
+ struct sk_buff *skb = *pskb;
+
+ /* Be very paranoid. */
+ if (skb->mac.raw < skb->head || skb->mac.raw + ETH_HLEN > skb->data) {
+ printk(KERN_CRIT "Argh!! Fuck me harder with a chainsaw. ");
+ if (skb->dev != NULL) {
+ printk("[%s]", skb->dev->name);
+ if (has_bridge_parent(skb->dev))
+ printk("[%s]", bridge_parent(skb->dev)->name);
+ }
+ printk("\n");
+ return NF_ACCEPT;
+ }
+
+ if (skb->protocol != __constant_htons(ETH_P_IP))
+ return NF_ACCEPT;
+
+ /* Sometimes we get packets with NULL ->dst here (for example,
+ * running a dhcp client daemon triggers this).
+ */
+ if (skb->dst == NULL)
+ return NF_ACCEPT;
+
+ store_orig_srcaddr(skb);
+ NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL,
+ bridge_parent(skb->dev), br_nf_post_routing_finish);
+
+ return NF_STOLEN;
+}
+
+
+/* IPv4/SABOTAGE *****************************************************/
+/* Don't hand locally destined packets to PF_INET/PRE_ROUTING
+ * for the second time. */
+static unsigned int ipv4_sabotage_in(unsigned int hook, struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *))
+{
+ if (in->hard_start_xmit == br_dev_xmit &&
+ okfn != br_nf_pre_routing_finish) {
+ okfn(*pskb);
+ return NF_STOLEN;
+ }
+
+ return NF_ACCEPT;
+}
+
+/* Postpone execution of PF_INET/FORWARD, PF_INET/LOCAL_OUT
+ * and PF_INET/POST_ROUTING until we have done the forwarding
+ * decision in the bridge code and have determined skb->physoutdev.
+ */
+static unsigned int ipv4_sabotage_out(unsigned int hook, struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *))
+{
+ if (out->hard_start_xmit == br_dev_xmit &&
+ okfn != br_nf_forward_finish &&
+ okfn != br_nf_local_out_finish &&
+ okfn != br_nf_post_routing_finish) {
+ struct sk_buff *skb = *pskb;
+
+ if (hook == NF_IP_FORWARD && skb->physindev == NULL)
+ skb->physindev = (struct net_device *)in;
+ okfn(skb);
+ return NF_STOLEN;
+ }
+
+ return NF_ACCEPT;
+}
+
+
+static struct nf_hook_ops br_nf_ops[] = {
+ { { NULL, NULL }, br_nf_pre_routing, PF_BRIDGE, NF_BR_PRE_ROUTING, 0 },
+ { { NULL, NULL }, br_nf_local_in, PF_BRIDGE, NF_BR_LOCAL_IN, 0 },
+ { { NULL, NULL }, br_nf_forward, PF_BRIDGE, NF_BR_FORWARD, 0 },
+ // we need INT_MIN, so innocent NF_BR_LOCAL_OUT functions don't
+ // get bridged traffic as input
+ { { NULL, NULL }, br_nf_local_out, PF_BRIDGE, NF_BR_LOCAL_OUT, INT_MIN },
+ { { NULL, NULL }, br_nf_post_routing, PF_BRIDGE, NF_BR_POST_ROUTING, 0 },
+
+ { { NULL, NULL }, ipv4_sabotage_in, PF_INET, NF_IP_PRE_ROUTING, NF_IP_PRI_FIRST },
+
+ { { NULL, NULL }, ipv4_sabotage_out, PF_INET, NF_IP_FORWARD, NF_IP_PRI_BRIDGE_SABOTAGE_FORWARD },
+ { { NULL, NULL }, ipv4_sabotage_out, PF_INET, NF_IP_LOCAL_OUT, NF_IP_PRI_BRIDGE_SABOTAGE },
+ { { NULL, NULL }, ipv4_sabotage_out, PF_INET, NF_IP_POST_ROUTING, NF_IP_PRI_FIRST },
+};
+
+#define NUMHOOKS (sizeof(br_nf_ops)/sizeof(br_nf_ops[0]))
+
+
+int br_netfilter_init(void)
+{
+ int i;
+
+#ifndef WE_REALLY_INSIST_ON_NOT_HAVING_NAT_SUPPORT
+ if (sizeof(struct tcp_skb_cb) + 4 >= sizeof(((struct sk_buff *)NULL)->cb)) {
+ extern int __too_little_space_in_control_buffer(void);
+ __too_little_space_in_control_buffer();
+ }
+#endif
+
+ for (i=0;i<NUMHOOKS;i++) {
+ int ret;
+
+ if ((ret = nf_register_hook(&br_nf_ops[i])) >= 0)
+ continue;
+
+ while (i--)
+ nf_unregister_hook(&br_nf_ops[i]);
+
+ return ret;
+ }
+
+ printk(KERN_NOTICE "Bridge firewalling registered\n");
+
+ return 0;
+}
+
+void br_netfilter_fini(void)
+{
+ int i;
+
+ for (i=NUMHOOKS-1;i>=0;i--)
+ nf_unregister_hook(&br_nf_ops[i]);
+}
diff --git a/br-nf-bds/linux/net/bridge/br_private.h b/br-nf-bds/linux/net/bridge/br_private.h
new file mode 100644
index 0000000..155afc9
--- /dev/null
+++ b/br-nf-bds/linux/net/bridge/br_private.h
@@ -0,0 +1,212 @@
+/*
+ * Linux ethernet bridge
+ *
+ * Authors:
+ * Lennert Buytenhek <buytenh@gnu.org>
+ *
+ * $Id: br_private.h,v 1.1 2002/06/01 19:23:55 bdschuym Exp $
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _BR_PRIVATE_H
+#define _BR_PRIVATE_H
+
+#include <linux/netdevice.h>
+#include <linux/miscdevice.h>
+#include <linux/if_bridge.h>
+#include "br_private_timer.h"
+
+#define BR_HASH_BITS 8
+#define BR_HASH_SIZE (1 << BR_HASH_BITS)
+
+#define BR_HOLD_TIME (1*HZ)
+
+typedef struct bridge_id bridge_id;
+typedef struct mac_addr mac_addr;
+typedef __u16 port_id;
+
+struct bridge_id
+{
+ unsigned char prio[2];
+ unsigned char addr[6];
+};
+
+struct mac_addr
+{
+ unsigned char addr[6];
+ unsigned char pad[2];
+};
+
+struct net_bridge_fdb_entry
+{
+ struct net_bridge_fdb_entry *next_hash;
+ struct net_bridge_fdb_entry **pprev_hash;
+ atomic_t use_count;
+ mac_addr addr;
+ struct net_bridge_port *dst;
+ unsigned long ageing_timer;
+ unsigned is_local:1;
+ unsigned is_static:1;
+};
+
+struct net_bridge_port
+{
+ struct net_bridge_port *next;
+ struct net_bridge *br;
+ struct net_device *dev;
+ int port_no;
+
+ /* STP */
+ port_id port_id;
+ int state;
+ int path_cost;
+ bridge_id designated_root;
+ int designated_cost;
+ bridge_id designated_bridge;
+ port_id designated_port;
+ unsigned topology_change_ack:1;
+ unsigned config_pending:1;
+ int priority;
+
+ struct br_timer forward_delay_timer;
+ struct br_timer hold_timer;
+ struct br_timer message_age_timer;
+};
+
+struct net_bridge
+{
+ struct net_bridge *next;
+ rwlock_t lock;
+ struct net_bridge_port *port_list;
+ struct net_device dev;
+ struct net_device_stats statistics;
+ rwlock_t hash_lock;
+ struct net_bridge_fdb_entry *hash[BR_HASH_SIZE];
+ struct timer_list tick;
+
+ /* STP */
+ bridge_id designated_root;
+ int root_path_cost;
+ int root_port;
+ int max_age;
+ int hello_time;
+ int forward_delay;
+ bridge_id bridge_id;
+ int bridge_max_age;
+ int bridge_hello_time;
+ int bridge_forward_delay;
+ unsigned stp_enabled:1;
+ unsigned topology_change:1;
+ unsigned topology_change_detected:1;
+
+ struct br_timer hello_timer;
+ struct br_timer tcn_timer;
+ struct br_timer topology_change_timer;
+ struct br_timer gc_timer;
+
+ int ageing_time;
+ int gc_interval;
+};
+
+extern struct notifier_block br_device_notifier;
+extern unsigned char bridge_ula[6];
+
+/* br.c */
+extern void br_dec_use_count(void);
+extern void br_inc_use_count(void);
+
+/* br_device.c */
+extern int br_dev_xmit(struct sk_buff *skb, struct net_device *dev);
+extern void br_dev_setup(struct net_device *dev);
+extern int br_dev_xmit(struct sk_buff *skb, struct net_device *dev);
+
+/* br_fdb.c */
+extern void br_fdb_changeaddr(struct net_bridge_port *p,
+ unsigned char *newaddr);
+extern void br_fdb_cleanup(struct net_bridge *br);
+extern void br_fdb_delete_by_port(struct net_bridge *br,
+ struct net_bridge_port *p);
+extern struct net_bridge_fdb_entry *br_fdb_get(struct net_bridge *br,
+ unsigned char *addr);
+extern void br_fdb_put(struct net_bridge_fdb_entry *ent);
+extern int br_fdb_get_entries(struct net_bridge *br,
+ unsigned char *_buf,
+ int maxnum,
+ int offset);
+extern void br_fdb_insert(struct net_bridge *br,
+ struct net_bridge_port *source,
+ unsigned char *addr,
+ int is_local);
+
+/* br_forward.c */
+extern void br_deliver(struct net_bridge_port *to,
+ struct sk_buff *skb);
+extern int br_dev_queue_push_xmit(struct sk_buff *skb);
+extern void br_forward(struct net_bridge_port *to,
+ struct sk_buff *skb);
+extern int br_forward_finish(struct sk_buff *skb);
+extern void br_flood_deliver(struct net_bridge *br,
+ struct sk_buff *skb,
+ int clone);
+extern void br_flood_forward(struct net_bridge *br,
+ struct sk_buff *skb,
+ int clone);
+
+/* br_if.c */
+extern int br_add_bridge(char *name);
+extern int br_del_bridge(char *name);
+extern int br_add_if(struct net_bridge *br,
+ struct net_device *dev);
+extern int br_del_if(struct net_bridge *br,
+ struct net_device *dev);
+extern int br_get_bridge_ifindices(int *indices,
+ int num);
+extern void br_get_port_ifindices(struct net_bridge *br,
+ int *ifindices);
+
+/* br_input.c */
+extern int br_handle_frame_finish(struct sk_buff *skb);
+extern void br_handle_frame(struct sk_buff *skb);
+
+/* br_ioctl.c */
+extern void br_call_ioctl_atomic(void (*fn)(void));
+extern int br_ioctl(struct net_bridge *br,
+ unsigned int cmd,
+ unsigned long arg0,
+ unsigned long arg1,
+ unsigned long arg2);
+extern int br_ioctl_deviceless_stub(unsigned long arg);
+
+/* br_netfilter.c */
+extern int br_netfilter_init(void);
+extern void br_netfilter_fini(void);
+
+/* br_stp.c */
+extern int br_is_root_bridge(struct net_bridge *br);
+extern struct net_bridge_port *br_get_port(struct net_bridge *br,
+ int port_no);
+extern void br_init_port(struct net_bridge_port *p);
+extern port_id br_make_port_id(struct net_bridge_port *p);
+extern void br_become_designated_port(struct net_bridge_port *p);
+
+/* br_stp_if.c */
+extern void br_stp_enable_bridge(struct net_bridge *br);
+extern void br_stp_disable_bridge(struct net_bridge *br);
+extern void br_stp_enable_port(struct net_bridge_port *p);
+extern void br_stp_disable_port(struct net_bridge_port *p);
+extern void br_stp_recalculate_bridge_id(struct net_bridge *br);
+extern void br_stp_set_bridge_priority(struct net_bridge *br,
+ int newprio);
+extern void br_stp_set_port_priority(struct net_bridge_port *p,
+ int newprio);
+extern void br_stp_set_path_cost(struct net_bridge_port *p,
+ int path_cost);
+
+/* br_stp_bpdu.c */
+extern void br_stp_handle_bpdu(struct sk_buff *skb);
+
+#endif
diff --git a/br-nf-bds/linux/net/ipv4/ip_output.c b/br-nf-bds/linux/net/ipv4/ip_output.c
new file mode 100644
index 0000000..c8d6fc4
--- /dev/null
+++ b/br-nf-bds/linux/net/ipv4/ip_output.c
@@ -0,0 +1,1016 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * The Internet Protocol (IP) output module.
+ *
+ * Version: $Id: ip_output.c,v 1.1 2002/06/01 19:23:49 bdschuym Exp $
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Donald Becker, <becker@super.org>
+ * Alan Cox, <Alan.Cox@linux.org>
+ * Richard Underwood
+ * Stefan Becker, <stefanb@yello.ping.de>
+ * Jorge Cwik, <jorge@laser.satlink.net>
+ * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
+ *
+ * See ip_input.c for original log
+ *
+ * Fixes:
+ * Alan Cox : Missing nonblock feature in ip_build_xmit.
+ * Mike Kilburn : htons() missing in ip_build_xmit.
+ * Bradford Johnson: Fix faulty handling of some frames when
+ * no route is found.
+ * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
+ * (in case if packet not accepted by
+ * output firewall rules)
+ * Mike McLagan : Routing by source
+ * Alexey Kuznetsov: use new route cache
+ * Andi Kleen: Fix broken PMTU recovery and remove
+ * some redundant tests.
+ * Vitaly E. Lavrov : Transparent proxy revived after year coma.
+ * Andi Kleen : Replace ip_reply with ip_send_reply.
+ * Andi Kleen : Split fast and slow ip_build_xmit path
+ * for decreased register pressure on x86
+ * and more readibility.
+ * Marc Boucher : When call_out_firewall returns FW_QUEUE,
+ * silently drop skb instead of failing with -EPERM.
+ * Detlev Wengorz : Copy protocol for fragments.
+ */
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/config.h>
+
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/init.h>
+
+#include <net/snmp.h>
+#include <net/ip.h>
+#include <net/protocol.h>
+#include <net/route.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/arp.h>
+#include <net/icmp.h>
+#include <net/raw.h>
+#include <net/checksum.h>
+#include <net/inetpeer.h>
+#include <linux/igmp.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/mroute.h>
+#include <linux/netlink.h>
+
+/*
+ * Shall we try to damage output packets if routing dev changes?
+ */
+
+int sysctl_ip_dynaddr = 0;
+int sysctl_ip_default_ttl = IPDEFTTL;
+
+/* Generate a checksum for an outgoing IP datagram. */
+__inline__ void ip_send_check(struct iphdr *iph)
+{
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+}
+
+/* dev_loopback_xmit for use with netfilter. */
+static int ip_dev_loopback_xmit(struct sk_buff *newskb)
+{
+ newskb->mac.raw = newskb->data;
+ __skb_pull(newskb, newskb->nh.raw - newskb->data);
+ newskb->pkt_type = PACKET_LOOPBACK;
+ newskb->ip_summed = CHECKSUM_UNNECESSARY;
+ BUG_TRAP(newskb->dst);
+
+#ifdef CONFIG_NETFILTER_DEBUG
+ nf_debug_ip_loopback_xmit(newskb);
+#endif
+ netif_rx(newskb);
+ return 0;
+}
+
+/* Don't just hand NF_HOOK skb->dst->output, in case netfilter hook
+ changes route */
+static inline int
+output_maybe_reroute(struct sk_buff *skb)
+{
+ return skb->dst->output(skb);
+}
+
+/*
+ * Add an ip header to a skbuff and send it out.
+ */
+int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
+ u32 saddr, u32 daddr, struct ip_options *opt)
+{
+ struct rtable *rt = (struct rtable *)skb->dst;
+ struct iphdr *iph;
+
+ /* Build the IP header. */
+ if (opt)
+ iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr) + opt->optlen);
+ else
+ iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr));
+
+ iph->version = 4;
+ iph->ihl = 5;
+ iph->tos = sk->protinfo.af_inet.tos;
+ iph->frag_off = 0;
+ if (ip_dont_fragment(sk, &rt->u.dst))
+ iph->frag_off |= htons(IP_DF);
+ iph->ttl = sk->protinfo.af_inet.ttl;
+ iph->daddr = rt->rt_dst;
+ iph->saddr = rt->rt_src;
+ iph->protocol = sk->protocol;
+ iph->tot_len = htons(skb->len);
+ ip_select_ident(iph, &rt->u.dst, sk);
+ skb->nh.iph = iph;
+
+ if (opt && opt->optlen) {
+ iph->ihl += opt->optlen>>2;
+ ip_options_build(skb, opt, daddr, rt, 0);
+ }
+ ip_send_check(iph);
+
+ /* Send it out. */
+ return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
+ output_maybe_reroute);
+}
+
+static inline int ip_finish_output2(struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb->dst;
+ struct hh_cache *hh = dst->hh;
+
+#ifdef CONFIG_NETFILTER_DEBUG
+ nf_debug_ip_finish_output2(skb);
+#endif /*CONFIG_NETFILTER_DEBUG*/
+
+ if (hh) {
+ read_lock_bh(&hh->hh_lock);
+ memcpy(skb->data - 16, hh->hh_data, 16);
+ read_unlock_bh(&hh->hh_lock);
+ skb_push(skb, hh->hh_len);
+ return hh->hh_output(skb);
+ } else if (dst->neighbour)
+ return dst->neighbour->output(skb);
+
+ if (net_ratelimit())
+ printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+__inline__ int ip_finish_output(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dst->dev;
+
+ skb->dev = dev;
+ skb->protocol = __constant_htons(ETH_P_IP);
+
+ return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
+ ip_finish_output2);
+}
+
+int ip_mc_output(struct sk_buff *skb)
+{
+ struct sock *sk = skb->sk;
+ struct rtable *rt = (struct rtable*)skb->dst;
+ struct net_device *dev = rt->u.dst.dev;
+
+ /*
+ * If the indicated interface is up and running, send the packet.
+ */
+ IP_INC_STATS(IpOutRequests);
+#ifdef CONFIG_IP_ROUTE_NAT
+ if (rt->rt_flags & RTCF_NAT)
+ ip_do_nat(skb);
+#endif
+
+ skb->dev = dev;
+ skb->protocol = __constant_htons(ETH_P_IP);
+
+ /*
+ * Multicasts are looped back for other local users
+ */
+
+ if (rt->rt_flags&RTCF_MULTICAST) {
+ if ((!sk || sk->protinfo.af_inet.mc_loop)
+#ifdef CONFIG_IP_MROUTE
+ /* Small optimization: do not loopback not local frames,
+ which returned after forwarding; they will be dropped
+ by ip_mr_input in any case.
+ Note, that local frames are looped back to be delivered
+ to local recipients.
+
+ This check is duplicated in ip_mr_input at the moment.
+ */
+ && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
+#endif
+ ) {
+ struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
+ if (newskb)
+ NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
+ newskb->dev,
+ ip_dev_loopback_xmit);
+ }
+
+ /* Multicasts with ttl 0 must not go beyond the host */
+
+ if (skb->nh.iph->ttl == 0) {
+ kfree_skb(skb);
+ return 0;
+ }
+ }
+
+ if (rt->rt_flags&RTCF_BROADCAST) {
+ struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
+ if (newskb)
+ NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
+ newskb->dev, ip_dev_loopback_xmit);
+ }
+
+ return ip_finish_output(skb);
+}
+
+int ip_output(struct sk_buff *skb)
+{
+#ifdef CONFIG_IP_ROUTE_NAT
+ struct rtable *rt = (struct rtable*)skb->dst;
+#endif
+
+ IP_INC_STATS(IpOutRequests);
+
+#ifdef CONFIG_IP_ROUTE_NAT
+ if (rt->rt_flags&RTCF_NAT)
+ ip_do_nat(skb);
+#endif
+
+ return ip_finish_output(skb);
+}
+
+/* Queues a packet to be sent, and starts the transmitter if necessary.
+ * This routine also needs to put in the total length and compute the
+ * checksum. We use to do this in two stages, ip_build_header() then
+ * this, but that scheme created a mess when routes disappeared etc.
+ * So we do it all here, and the TCP send engine has been changed to
+ * match. (No more unroutable FIN disasters, etc. wheee...) This will
+ * most likely make other reliable transport layers above IP easier
+ * to implement under Linux.
+ */
+static inline int ip_queue_xmit2(struct sk_buff *skb)
+{
+ struct sock *sk = skb->sk;
+ struct rtable *rt = (struct rtable *)skb->dst;
+ struct net_device *dev;
+ struct iphdr *iph = skb->nh.iph;
+
+ dev = rt->u.dst.dev;
+
+ /* This can happen when the transport layer has segments queued
+ * with a cached route, and by the time we get here things are
+ * re-routed to a device with a different MTU than the original
+ * device. Sick, but we must cover it.
+ */
+ if (skb_headroom(skb) < dev->hard_header_len && dev->hard_header) {
+ struct sk_buff *skb2;
+
+ skb2 = skb_realloc_headroom(skb, (dev->hard_header_len + 15) & ~15);
+ kfree_skb(skb);
+ if (skb2 == NULL)
+ return -ENOMEM;
+ if (sk)
+ skb_set_owner_w(skb2, sk);
+ skb = skb2;
+ iph = skb->nh.iph;
+ }
+
+ if (skb->len > rt->u.dst.pmtu)
+ goto fragment;
+
+ if (ip_dont_fragment(sk, &rt->u.dst))
+ iph->frag_off |= __constant_htons(IP_DF);
+
+ ip_select_ident(iph, &rt->u.dst, sk);
+
+ /* Add an IP checksum. */
+ ip_send_check(iph);
+
+ skb->priority = sk->priority;
+ return skb->dst->output(skb);
+
+fragment:
+ if (ip_dont_fragment(sk, &rt->u.dst)) {
+ /* Reject packet ONLY if TCP might fragment
+ * it itself, if were careful enough.
+ */
+ iph->frag_off |= __constant_htons(IP_DF);
+ NETDEBUG(printk(KERN_DEBUG "sending pkt_too_big to self\n"));
+
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ htonl(rt->u.dst.pmtu));
+ kfree_skb(skb);
+ return -EMSGSIZE;
+ }
+ ip_select_ident(iph, &rt->u.dst, sk);
+ if (skb->ip_summed == CHECKSUM_HW &&
+ (skb = skb_checksum_help(skb)) == NULL)
+ return -ENOMEM;
+ return ip_fragment(skb, skb->dst->output);
+}
+
+int ip_queue_xmit(struct sk_buff *skb)
+{
+ struct sock *sk = skb->sk;
+ struct ip_options *opt = sk->protinfo.af_inet.opt;
+ struct rtable *rt;
+ struct iphdr *iph;
+
+ /* Skip all of this if the packet is already routed,
+ * f.e. by something like SCTP.
+ */
+ rt = (struct rtable *) skb->dst;
+ if (rt != NULL)
+ goto packet_routed;
+
+ /* Make sure we can route this packet. */
+ rt = (struct rtable *)__sk_dst_check(sk, 0);
+ if (rt == NULL) {
+ u32 daddr;
+
+ /* Use correct destination address if we have options. */
+ daddr = sk->daddr;
+ if(opt && opt->srr)
+ daddr = opt->faddr;
+
+ /* If this fails, retransmit mechanism of transport layer will
+ * keep trying until route appears or the connection times itself
+ * out.
+ */
+ if (ip_route_output(&rt, daddr, sk->saddr,
+ RT_CONN_FLAGS(sk),
+ sk->bound_dev_if))
+ goto no_route;
+ __sk_dst_set(sk, &rt->u.dst);
+ sk->route_caps = rt->u.dst.dev->features;
+ }
+ skb->dst = dst_clone(&rt->u.dst);
+
+packet_routed:
+ if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
+ goto no_route;
+
+ /* OK, we know where to send it, allocate and build IP header. */
+ iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
+ *((__u16 *)iph) = htons((4 << 12) | (5 << 8) | (sk->protinfo.af_inet.tos & 0xff));
+ iph->tot_len = htons(skb->len);
+ iph->frag_off = 0;
+ iph->ttl = sk->protinfo.af_inet.ttl;
+ iph->protocol = sk->protocol;
+ iph->saddr = rt->rt_src;
+ iph->daddr = rt->rt_dst;
+ skb->nh.iph = iph;
+ /* Transport layer set skb->h.foo itself. */
+
+ if(opt && opt->optlen) {
+ iph->ihl += opt->optlen >> 2;
+ ip_options_build(skb, opt, sk->daddr, rt, 0);
+ }
+
+ return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
+ ip_queue_xmit2);
+
+no_route:
+ IP_INC_STATS(IpOutNoRoutes);
+ kfree_skb(skb);
+ return -EHOSTUNREACH;
+}
+
+/*
+ * Build and send a packet, with as little as one copy
+ *
+ * Doesn't care much about ip options... option length can be
+ * different for fragment at 0 and other fragments.
+ *
+ * Note that the fragment at the highest offset is sent first,
+ * so the getfrag routine can fill in the TCP/UDP checksum header
+ * field in the last fragment it sends... actually it also helps
+ * the reassemblers, they can put most packets in at the head of
+ * the fragment queue, and they know the total size in advance. This
+ * last feature will measurably improve the Linux fragment handler one
+ * day.
+ *
+ * The callback has five args, an arbitrary pointer (copy of frag),
+ * the source IP address (may depend on the routing table), the
+ * destination address (char *), the offset to copy from, and the
+ * length to be copied.
+ */
+
+static int ip_build_xmit_slow(struct sock *sk,
+ int getfrag (const void *,
+ char *,
+ unsigned int,
+ unsigned int),
+ const void *frag,
+ unsigned length,
+ struct ipcm_cookie *ipc,
+ struct rtable *rt,
+ int flags)
+{
+ unsigned int fraglen, maxfraglen, fragheaderlen;
+ int err;
+ int offset, mf;
+ int mtu;
+ u16 id;
+
+ int hh_len = (rt->u.dst.dev->hard_header_len + 15)&~15;
+ int nfrags=0;
+ struct ip_options *opt = ipc->opt;
+ int df = 0;
+
+ mtu = rt->u.dst.pmtu;
+ if (ip_dont_fragment(sk, &rt->u.dst))
+ df = htons(IP_DF);
+
+ length -= sizeof(struct iphdr);
+
+ if (opt) {
+ fragheaderlen = sizeof(struct iphdr) + opt->optlen;
+ maxfraglen = ((mtu-sizeof(struct iphdr)-opt->optlen) & ~7) + fragheaderlen;
+ } else {
+ fragheaderlen = sizeof(struct iphdr);
+
+ /*
+ * Fragheaderlen is the size of 'overhead' on each buffer. Now work
+ * out the size of the frames to send.
+ */
+
+ maxfraglen = ((mtu-sizeof(struct iphdr)) & ~7) + fragheaderlen;
+ }
+
+ if (length + fragheaderlen > 0xFFFF) {
+ ip_local_error(sk, EMSGSIZE, rt->rt_dst, sk->dport, mtu);
+ return -EMSGSIZE;
+ }
+
+ /*
+ * Start at the end of the frame by handling the remainder.
+ */
+
+ offset = length - (length % (maxfraglen - fragheaderlen));
+
+ /*
+ * Amount of memory to allocate for final fragment.
+ */
+
+ fraglen = length - offset + fragheaderlen;
+
+ if (length-offset==0) {
+ fraglen = maxfraglen;
+ offset -= maxfraglen-fragheaderlen;
+ }
+
+ /*
+ * The last fragment will not have MF (more fragments) set.
+ */
+
+ mf = 0;
+
+ /*
+ * Don't fragment packets for path mtu discovery.
+ */
+
+ if (offset > 0 && sk->protinfo.af_inet.pmtudisc==IP_PMTUDISC_DO) {
+ ip_local_error(sk, EMSGSIZE, rt->rt_dst, sk->dport, mtu);
+ return -EMSGSIZE;
+ }
+ if (flags&MSG_PROBE)
+ goto out;
+
+ /*
+ * Begin outputting the bytes.
+ */
+
+ id = sk->protinfo.af_inet.id++;
+
+ do {
+ char *data;
+ struct sk_buff * skb;
+
+ /*
+ * Get the memory we require with some space left for alignment.
+ */
+
+ skb = sock_alloc_send_skb(sk, fraglen+hh_len+15, flags&MSG_DONTWAIT, &err);
+ if (skb == NULL)
+ goto error;
+
+ /*
+ * Fill in the control structures
+ */
+
+ skb->priority = sk->priority;
+ skb->dst = dst_clone(&rt->u.dst);
+ skb_reserve(skb, hh_len);
+
+ /*
+ * Find where to start putting bytes.
+ */
+
+ data = skb_put(skb, fraglen);
+ skb->nh.iph = (struct iphdr *)data;
+
+ /*
+ * Only write IP header onto non-raw packets
+ */
+
+ {
+ struct iphdr *iph = (struct iphdr *)data;
+
+ iph->version = 4;
+ iph->ihl = 5;
+ if (opt) {
+ iph->ihl += opt->optlen>>2;
+ ip_options_build(skb, opt,
+ ipc->addr, rt, offset);
+ }
+ iph->tos = sk->protinfo.af_inet.tos;
+ iph->tot_len = htons(fraglen - fragheaderlen + iph->ihl*4);
+ iph->frag_off = htons(offset>>3)|mf|df;
+ iph->id = id;
+ if (!mf) {
+ if (offset || !df) {
+ /* Select an unpredictable ident only
+ * for packets without DF or having
+ * been fragmented.
+ */
+ __ip_select_ident(iph, &rt->u.dst);
+ id = iph->id;
+ }
+
+ /*
+ * Any further fragments will have MF set.
+ */
+ mf = htons(IP_MF);
+ }
+ if (rt->rt_type == RTN_MULTICAST)
+ iph->ttl = sk->protinfo.af_inet.mc_ttl;
+ else
+ iph->ttl = sk->protinfo.af_inet.ttl;
+ iph->protocol = sk->protocol;
+ iph->check = 0;
+ iph->saddr = rt->rt_src;
+ iph->daddr = rt->rt_dst;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ data += iph->ihl*4;
+ }
+
+ /*
+ * User data callback
+ */
+
+ if (getfrag(frag, data, offset, fraglen-fragheaderlen)) {
+ err = -EFAULT;
+ kfree_skb(skb);
+ goto error;
+ }
+
+ offset -= (maxfraglen-fragheaderlen);
+ fraglen = maxfraglen;
+
+ nfrags++;
+
+ err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL,
+ skb->dst->dev, output_maybe_reroute);
+ if (err) {
+ if (err > 0)
+ err = sk->protinfo.af_inet.recverr ? net_xmit_errno(err) : 0;
+ if (err)
+ goto error;
+ }
+ } while (offset >= 0);
+
+ if (nfrags>1)
+ ip_statistics[smp_processor_id()*2 + !in_softirq()].IpFragCreates += nfrags;
+out:
+ return 0;
+
+error:
+ IP_INC_STATS(IpOutDiscards);
+ if (nfrags>1)
+ ip_statistics[smp_processor_id()*2 + !in_softirq()].IpFragCreates += nfrags;
+ return err;
+}
+
+/*
+ * Fast path for unfragmented packets.
+ */
+int ip_build_xmit(struct sock *sk,
+ int getfrag (const void *,
+ char *,
+ unsigned int,
+ unsigned int),
+ const void *frag,
+ unsigned length,
+ struct ipcm_cookie *ipc,
+ struct rtable *rt,
+ int flags)
+{
+ int err;
+ struct sk_buff *skb;
+ int df;
+ struct iphdr *iph;
+
+ /*
+ * Try the simple case first. This leaves fragmented frames, and by
+ * choice RAW frames within 20 bytes of maximum size(rare) to the long path
+ */
+
+ if (!sk->protinfo.af_inet.hdrincl) {
+ length += sizeof(struct iphdr);
+
+ /*
+ * Check for slow path.
+ */
+ if (length > rt->u.dst.pmtu || ipc->opt != NULL)
+ return ip_build_xmit_slow(sk,getfrag,frag,length,ipc,rt,flags);
+ } else {
+ if (length > rt->u.dst.dev->mtu) {
+ ip_local_error(sk, EMSGSIZE, rt->rt_dst, sk->dport, rt->u.dst.dev->mtu);
+ return -EMSGSIZE;
+ }
+ }
+ if (flags&MSG_PROBE)
+ goto out;
+
+ /*
+ * Do path mtu discovery if needed.
+ */
+ df = 0;
+ if (ip_dont_fragment(sk, &rt->u.dst))
+ df = htons(IP_DF);
+
+ /*
+ * Fast path for unfragmented frames without options.
+ */
+ {
+ int hh_len = (rt->u.dst.dev->hard_header_len + 15)&~15;
+
+ skb = sock_alloc_send_skb(sk, length+hh_len+15,
+ flags&MSG_DONTWAIT, &err);
+ if(skb==NULL)
+ goto error;
+ skb_reserve(skb, hh_len);
+ }
+
+ skb->priority = sk->priority;
+ skb->dst = dst_clone(&rt->u.dst);
+
+ skb->nh.iph = iph = (struct iphdr *)skb_put(skb, length);
+
+ if(!sk->protinfo.af_inet.hdrincl) {
+ iph->version=4;
+ iph->ihl=5;
+ iph->tos=sk->protinfo.af_inet.tos;
+ iph->tot_len = htons(length);
+ iph->frag_off = df;
+ iph->ttl=sk->protinfo.af_inet.mc_ttl;
+ ip_select_ident(iph, &rt->u.dst, sk);
+ if (rt->rt_type != RTN_MULTICAST)
+ iph->ttl=sk->protinfo.af_inet.ttl;
+ iph->protocol=sk->protocol;
+ iph->saddr=rt->rt_src;
+ iph->daddr=rt->rt_dst;
+ iph->check=0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ err = getfrag(frag, ((char *)iph)+iph->ihl*4,0, length-iph->ihl*4);
+ }
+ else
+ err = getfrag(frag, (void *)iph, 0, length);
+
+ if (err)
+ goto error_fault;
+
+ err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
+ output_maybe_reroute);
+ if (err > 0)
+ err = sk->protinfo.af_inet.recverr ? net_xmit_errno(err) : 0;
+ if (err)
+ goto error;
+out:
+ return 0;
+
+error_fault:
+ err = -EFAULT;
+ kfree_skb(skb);
+error:
+ IP_INC_STATS(IpOutDiscards);
+ return err;
+}
+
+/*
+ * This IP datagram is too large to be sent in one piece. Break it up into
+ * smaller pieces (each of size equal to IP header plus
+ * a block of the data of the original IP data part) that will yet fit in a
+ * single device frame, and queue such a frame for sending.
+ *
+ * Yes this is inefficient, feel free to submit a quicker one.
+ */
+
+int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
+{
+ struct iphdr *iph;
+ int raw = 0;
+ int ptr;
+ struct net_device *dev;
+ struct sk_buff *skb2;
+ unsigned int mtu, hlen, left, len;
+ int offset;
+ int not_last_frag;
+ struct rtable *rt = (struct rtable*)skb->dst;
+ int err = 0;
+
+ dev = rt->u.dst.dev;
+
+ /*
+ * Point into the IP datagram header.
+ */
+
+ iph = skb->nh.iph;
+
+ /*
+ * Setup starting values.
+ */
+
+ hlen = iph->ihl * 4;
+ left = skb->len - hlen; /* Space per frame */
+ mtu = rt->u.dst.pmtu - hlen; /* Size of data space */
+ ptr = raw + hlen; /* Where to start from */
+
+ /*
+ * Fragment the datagram.
+ */
+
+ offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
+ not_last_frag = iph->frag_off & htons(IP_MF);
+
+ /*
+ * Keep copying data until we run out.
+ */
+
+ while(left > 0) {
+ len = left;
+ /* IF: it doesn't fit, use 'mtu' - the data space left */
+ if (len > mtu)
+ len = mtu;
+ /* IF: we are not sending upto and including the packet end
+ then align the next start on an eight byte boundary */
+ if (len < left) {
+ len &= ~7;
+ }
+ /*
+ * Allocate buffer.
+ */
+
+ if ((skb2 = alloc_skb(len+hlen+dev->hard_header_len+15,GFP_ATOMIC)) == NULL) {
+ NETDEBUG(printk(KERN_INFO "IP: frag: no memory for new fragment!\n"));
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ /*
+ * Set up data on packet
+ */
+
+ skb2->pkt_type = skb->pkt_type;
+ skb2->priority = skb->priority;
+ skb_reserve(skb2, (dev->hard_header_len+15)&~15);
+ skb_put(skb2, len + hlen);
+ skb2->nh.raw = skb2->data;
+ skb2->h.raw = skb2->data + hlen;
+ skb2->protocol = skb->protocol;
+ skb2->security = skb->security;
+
+ /*
+ * Charge the memory for the fragment to any owner
+ * it might possess
+ */
+
+ if (skb->sk)
+ skb_set_owner_w(skb2, skb->sk);
+ skb2->dst = dst_clone(skb->dst);
+ skb2->dev = skb->dev;
+ skb2->physindev = skb->physindev;
+ skb2->physoutdev = skb->physoutdev;
+
+ /*
+ * Copy the packet header into the new buffer.
+ */
+
+ memcpy(skb2->nh.raw, skb->data, hlen);
+
+ /*
+ * Copy a block of the IP datagram.
+ */
+ if (skb_copy_bits(skb, ptr, skb2->h.raw, len))
+ BUG();
+ left -= len;
+
+ /*
+ * Fill in the new header fields.
+ */
+ iph = skb2->nh.iph;
+ iph->frag_off = htons((offset >> 3));
+
+ /* ANK: dirty, but effective trick. Upgrade options only if
+ * the segment to be fragmented was THE FIRST (otherwise,
+ * options are already fixed) and make it ONCE
+ * on the initial skb, so that all the following fragments
+ * will inherit fixed options.
+ */
+ if (offset == 0)
+ ip_options_fragment(skb);
+
+ /* Copy the flags to each fragment. */
+ IPCB(skb2)->flags = IPCB(skb)->flags;
+
+ /*
+ * Added AC : If we are fragmenting a fragment that's not the
+ * last fragment then keep MF on each bit
+ */
+ if (left > 0 || not_last_frag)
+ iph->frag_off |= htons(IP_MF);
+ ptr += len;
+ offset += len;
+
+#ifdef CONFIG_NET_SCHED
+ skb2->tc_index = skb->tc_index;
+#endif
+#ifdef CONFIG_NETFILTER
+ skb2->nfmark = skb->nfmark;
+ /* Connection association is same as pre-frag packet */
+ skb2->nfct = skb->nfct;
+ nf_conntrack_get(skb2->nfct);
+#ifdef CONFIG_NETFILTER_DEBUG
+ skb2->nf_debug = skb->nf_debug;
+#endif
+#endif
+
+ /*
+ * Put this fragment into the sending queue.
+ */
+
+ IP_INC_STATS(IpFragCreates);
+
+ iph->tot_len = htons(len + hlen);
+
+ ip_send_check(iph);
+ memcpy(skb2->data - 16, skb->data - 16, 16);
+
+ err = output(skb2);
+ if (err)
+ goto fail;
+ }
+ kfree_skb(skb);
+ IP_INC_STATS(IpFragOKs);
+ return err;
+
+fail:
+ kfree_skb(skb);
+ IP_INC_STATS(IpFragFails);
+ return err;
+}
+
+/*
+ * Fetch data from kernel space and fill in checksum if needed.
+ */
+static int ip_reply_glue_bits(const void *dptr, char *to, unsigned int offset,
+ unsigned int fraglen)
+{
+ struct ip_reply_arg *dp = (struct ip_reply_arg*)dptr;
+ u16 *pktp = (u16 *)to;
+ struct iovec *iov;
+ int len;
+ int hdrflag = 1;
+
+ iov = &dp->iov[0];
+ if (offset >= iov->iov_len) {
+ offset -= iov->iov_len;
+ iov++;
+ hdrflag = 0;
+ }
+ len = iov->iov_len - offset;
+ if (fraglen > len) { /* overlapping. */
+ dp->csum = csum_partial_copy_nocheck(iov->iov_base+offset, to, len,
+ dp->csum);
+ offset = 0;
+ fraglen -= len;
+ to += len;
+ iov++;
+ }
+
+ dp->csum = csum_partial_copy_nocheck(iov->iov_base+offset, to, fraglen,
+ dp->csum);
+
+ if (hdrflag && dp->csumoffset)
+ *(pktp + dp->csumoffset) = csum_fold(dp->csum); /* fill in checksum */
+ return 0;
+}
+
+/*
+ * Generic function to send a packet as reply to another packet.
+ * Used to send TCP resets so far. ICMP should use this function too.
+ *
+ * Should run single threaded per socket because it uses the sock
+ * structure to pass arguments.
+ */
+void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
+ unsigned int len)
+{
+ struct {
+ struct ip_options opt;
+ char data[40];
+ } replyopts;
+ struct ipcm_cookie ipc;
+ u32 daddr;
+ struct rtable *rt = (struct rtable*)skb->dst;
+
+ if (ip_options_echo(&replyopts.opt, skb))
+ return;
+
+ daddr = ipc.addr = rt->rt_src;
+ ipc.opt = NULL;
+
+ if (replyopts.opt.optlen) {
+ ipc.opt = &replyopts.opt;
+
+ if (ipc.opt->srr)
+ daddr = replyopts.opt.faddr;
+ }
+
+ if (ip_route_output(&rt, daddr, rt->rt_spec_dst, RT_TOS(skb->nh.iph->tos), 0))
+ return;
+
+ /* And let IP do all the hard work.
+
+ This chunk is not reenterable, hence spinlock.
+ Note that it uses the fact, that this function is called
+ with locally disabled BH and that sk cannot be already spinlocked.
+ */
+ bh_lock_sock(sk);
+ sk->protinfo.af_inet.tos = skb->nh.iph->tos;
+ sk->priority = skb->priority;
+ sk->protocol = skb->nh.iph->protocol;
+ ip_build_xmit(sk, ip_reply_glue_bits, arg, len, &ipc, rt, MSG_DONTWAIT);
+ bh_unlock_sock(sk);
+
+ ip_rt_put(rt);
+}
+
+/*
+ * IP protocol layer initialiser
+ */
+
+static struct packet_type ip_packet_type =
+{
+ __constant_htons(ETH_P_IP),
+ NULL, /* All devices */
+ ip_rcv,
+ (void*)1,
+ NULL,
+};
+
+/*
+ * IP registers the packet type and then calls the subprotocol initialisers
+ */
+
+void __init ip_init(void)
+{
+ dev_add_pack(&ip_packet_type);
+
+ ip_rt_init();
+ inet_initpeers();
+
+#ifdef CONFIG_IP_MULTICAST
+ proc_net_create("igmp", 0, ip_mc_procinfo);
+#endif
+}
diff --git a/br-nf-bds/linux/net/ipv4/netfilter/ip_tables.c b/br-nf-bds/linux/net/ipv4/netfilter/ip_tables.c
new file mode 100644
index 0000000..a840f66
--- /dev/null
+++ b/br-nf-bds/linux/net/ipv4/netfilter/ip_tables.c
@@ -0,0 +1,1811 @@
+/*
+ * Packet matching code.
+ *
+ * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
+ * Copyright (C) 2009-2002 Netfilter core team <coreteam@netfilter.org>
+ *
+ * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
+ * - increase module usage count as soon as we have rules inside
+ * a table
+ */
+#include <linux/config.h>
+#include <linux/skbuff.h>
+#include <linux/kmod.h>
+#include <linux/vmalloc.h>
+#include <linux/netdevice.h>
+#include <linux/module.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/icmp.h>
+#include <net/ip.h>
+#include <asm/uaccess.h>
+#include <asm/semaphore.h>
+#include <linux/proc_fs.h>
+
+#include <linux/netfilter_ipv4/ip_tables.h>
+
+/*#define DEBUG_IP_FIREWALL*/
+/*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
+/*#define DEBUG_IP_FIREWALL_USER*/
+
+#ifdef DEBUG_IP_FIREWALL
+#define dprintf(format, args...) printk(format , ## args)
+#else
+#define dprintf(format, args...)
+#endif
+
+#ifdef DEBUG_IP_FIREWALL_USER
+#define duprintf(format, args...) printk(format , ## args)
+#else
+#define duprintf(format, args...)
+#endif
+
+#ifdef CONFIG_NETFILTER_DEBUG
+#define IP_NF_ASSERT(x) \
+do { \
+ if (!(x)) \
+ printk("IP_NF_ASSERT: %s:%s:%u\n", \
+ __FUNCTION__, __FILE__, __LINE__); \
+} while(0)
+#else
+#define IP_NF_ASSERT(x)
+#endif
+#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
+
+/* Mutex protects lists (only traversed in user context). */
+static DECLARE_MUTEX(ipt_mutex);
+
+/* Must have mutex */
+#define ASSERT_READ_LOCK(x) IP_NF_ASSERT(down_trylock(&ipt_mutex) != 0)
+#define ASSERT_WRITE_LOCK(x) IP_NF_ASSERT(down_trylock(&ipt_mutex) != 0)
+#include <linux/netfilter_ipv4/lockhelp.h>
+#include <linux/netfilter_ipv4/listhelp.h>
+
+#if 0
+/* All the better to debug you with... */
+#define static
+#define inline
+#endif
+
+/* Locking is simple: we assume at worst case there will be one packet
+ in user context and one from bottom halves (or soft irq if Alexey's
+ softnet patch was applied).
+
+ We keep a set of rules for each CPU, so we can avoid write-locking
+ them; doing a readlock_bh() stops packets coming through if we're
+ in user context.
+
+ To be cache friendly on SMP, we arrange them like so:
+ [ n-entries ]
+ ... cache-align padding ...
+ [ n-entries ]
+
+ Hence the start of any table is given by get_table() below. */
+
+/* The table itself */
+struct ipt_table_info
+{
+ /* Size per table */
+ unsigned int size;
+ /* Number of entries: FIXME. --RR */
+ unsigned int number;
+ /* Initial number of entries. Needed for module usage count */
+ unsigned int initial_entries;
+
+ /* Entry points and underflows */
+ unsigned int hook_entry[NF_IP_NUMHOOKS];
+ unsigned int underflow[NF_IP_NUMHOOKS];
+
+ /* ipt_entry tables: one per CPU */
+ char entries[0] __attribute__((aligned(SMP_CACHE_BYTES)));
+};
+
+static LIST_HEAD(ipt_target);
+static LIST_HEAD(ipt_match);
+static LIST_HEAD(ipt_tables);
+#define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0)
+
+#ifdef CONFIG_SMP
+#define TABLE_OFFSET(t,p) (SMP_ALIGN((t)->size)*(p))
+#else
+#define TABLE_OFFSET(t,p) 0
+#endif
+
+#if 0
+#define down(x) do { printk("DOWN:%u:" #x "\n", __LINE__); down(x); } while(0)
+#define down_interruptible(x) ({ int __r; printk("DOWNi:%u:" #x "\n", __LINE__); __r = down_interruptible(x); if (__r != 0) printk("ABORT-DOWNi:%u\n", __LINE__); __r; })
+#define up(x) do { printk("UP:%u:" #x "\n", __LINE__); up(x); } while(0)
+#endif
+
+/* Returns whether matches rule or not. */
+static inline int
+ip_packet_match(const struct iphdr *ip,
+ const char *indev,
+ const char *physindev,
+ const char *outdev,
+ const char *physoutdev,
+ const struct ipt_ip *ipinfo,
+ int isfrag)
+{
+ size_t i;
+ unsigned long ret;
+ unsigned long ret2;
+
+#define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
+
+ if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
+ IPT_INV_SRCIP)
+ || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
+ IPT_INV_DSTIP)) {
+ dprintf("Source or dest mismatch.\n");
+
+ dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
+ NIPQUAD(ip->saddr),
+ NIPQUAD(ipinfo->smsk.s_addr),
+ NIPQUAD(ipinfo->src.s_addr),
+ ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
+ dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
+ NIPQUAD(ip->daddr),
+ NIPQUAD(ipinfo->dmsk.s_addr),
+ NIPQUAD(ipinfo->dst.s_addr),
+ ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
+ return 0;
+ }
+
+ /* Look for ifname matches; this should unroll nicely. */
+ for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
+ ret |= (((const unsigned long *)indev)[i]
+ ^ ((const unsigned long *)ipinfo->iniface)[i])
+ & ((const unsigned long *)ipinfo->iniface_mask)[i];
+ }
+
+ for (i = 0, ret2 = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
+ ret2 |= (((const unsigned long *)physindev)[i]
+ ^ ((const unsigned long *)ipinfo->iniface)[i])
+ & ((const unsigned long *)ipinfo->iniface_mask)[i];
+ }
+
+ if (FWINV(ret != 0 && ret2 != 0, IPT_INV_VIA_IN)) {
+ dprintf("VIA in mismatch (%s vs %s).%s\n",
+ indev, ipinfo->iniface,
+ ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
+ return 0;
+ }
+
+ for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
+ ret |= (((const unsigned long *)outdev)[i]
+ ^ ((const unsigned long *)ipinfo->outiface)[i])
+ & ((const unsigned long *)ipinfo->outiface_mask)[i];
+ }
+
+ for (i = 0, ret2 = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
+ ret2 |= (((const unsigned long *)physoutdev)[i]
+ ^ ((const unsigned long *)ipinfo->outiface)[i])
+ & ((const unsigned long *)ipinfo->outiface_mask)[i];
+ }
+
+ if (FWINV(ret != 0 && ret2 != 0, IPT_INV_VIA_OUT)) {
+ dprintf("VIA out mismatch (%s vs %s).%s\n",
+ outdev, ipinfo->outiface,
+ ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
+ return 0;
+ }
+
+ /* Check specific protocol */
+ if (ipinfo->proto
+ && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
+ dprintf("Packet protocol %hi does not match %hi.%s\n",
+ ip->protocol, ipinfo->proto,
+ ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
+ return 0;
+ }
+
+ /* If we have a fragment rule but the packet is not a fragment
+ * then we return zero */
+ if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
+ dprintf("Fragment rule but not fragment.%s\n",
+ ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline int
+ip_checkentry(const struct ipt_ip *ip)
+{
+ if (ip->flags & ~IPT_F_MASK) {
+ duprintf("Unknown flag bits set: %08X\n",
+ ip->flags & ~IPT_F_MASK);
+ return 0;
+ }
+ if (ip->invflags & ~IPT_INV_MASK) {
+ duprintf("Unknown invflag bits set: %08X\n",
+ ip->invflags & ~IPT_INV_MASK);
+ return 0;
+ }
+ return 1;
+}
+
+static unsigned int
+ipt_error(struct sk_buff **pskb,
+ unsigned int hooknum,
+ const struct net_device *in,
+ const struct net_device *out,
+ const void *targinfo,
+ void *userinfo)
+{
+ if (net_ratelimit())
+ printk("ip_tables: error: `%s'\n", (char *)targinfo);
+
+ return NF_DROP;
+}
+
+static inline
+int do_match(struct ipt_entry_match *m,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int offset,
+ const void *hdr,
+ u_int16_t datalen,
+ int *hotdrop)
+{
+ /* Stop iteration if it doesn't match */
+ if (!m->u.kernel.match->match(skb, in, out, m->data,
+ offset, hdr, datalen, hotdrop))
+ return 1;
+ else
+ return 0;
+}
+
+static inline struct ipt_entry *
+get_entry(void *base, unsigned int offset)
+{
+ return (struct ipt_entry *)(base + offset);
+}
+
+/* Returns one of the generic firewall policies, like NF_ACCEPT. */
+unsigned int
+ipt_do_table(struct sk_buff **pskb,
+ unsigned int hook,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct ipt_table *table,
+ void *userdata)
+{
+ static const char nulldevname[IFNAMSIZ] = { 0 };
+ u_int16_t offset;
+ struct iphdr *ip;
+ void *protohdr;
+ u_int16_t datalen;
+ int hotdrop = 0;
+ /* Initializing verdict to NF_DROP keeps gcc happy. */
+ unsigned int verdict = NF_DROP;
+ const char *indev, *outdev;
+ const char *physindev, *physoutdev;
+ void *table_base;
+ struct ipt_entry *e, *back;
+
+ /* Initialization */
+ ip = (*pskb)->nh.iph;
+ protohdr = (u_int32_t *)ip + ip->ihl;
+ datalen = (*pskb)->len - ip->ihl * 4;
+ indev = in ? in->name : nulldevname;
+ outdev = out ? out->name : nulldevname;
+ physindev = (*pskb)->physindev ? (*pskb)->physindev->name : nulldevname;
+ physoutdev = (*pskb)->physoutdev ? (*pskb)->physoutdev->name : nulldevname;
+
+ /* We handle fragments by dealing with the first fragment as
+ * if it was a normal packet. All other fragments are treated
+ * normally, except that they will NEVER match rules that ask
+ * things we don't know, ie. tcp syn flag or ports). If the
+ * rule is also a fragment-specific rule, non-fragments won't
+ * match it. */
+ offset = ntohs(ip->frag_off) & IP_OFFSET;
+
+ read_lock_bh(&table->lock);
+ IP_NF_ASSERT(table->valid_hooks & (1 << hook));
+ table_base = (void *)table->private->entries
+ + TABLE_OFFSET(table->private,
+ cpu_number_map(smp_processor_id()));
+ e = get_entry(table_base, table->private->hook_entry[hook]);
+
+#ifdef CONFIG_NETFILTER_DEBUG
+ /* Check noone else using our table */
+ if (((struct ipt_entry *)table_base)->comefrom != 0xdead57ac
+ && ((struct ipt_entry *)table_base)->comefrom != 0xeeeeeeec) {
+ printk("ASSERT: CPU #%u, %s comefrom(%p) = %X\n",
+ smp_processor_id(),
+ table->name,
+ &((struct ipt_entry *)table_base)->comefrom,
+ ((struct ipt_entry *)table_base)->comefrom);
+ }
+ ((struct ipt_entry *)table_base)->comefrom = 0x57acc001;
+#endif
+
+ /* For return from builtin chain */
+ back = get_entry(table_base, table->private->underflow[hook]);
+
+ do {
+ IP_NF_ASSERT(e);
+ IP_NF_ASSERT(back);
+ (*pskb)->nfcache |= e->nfcache;
+ if (ip_packet_match(ip, indev, physindev, outdev, physoutdev, &e->ip, offset)) {
+ struct ipt_entry_target *t;
+
+ if (IPT_MATCH_ITERATE(e, do_match,
+ *pskb, in, out,
+ offset, protohdr,
+ datalen, &hotdrop) != 0)
+ goto no_match;
+
+ ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
+
+ t = ipt_get_target(e);
+ IP_NF_ASSERT(t->u.kernel.target);
+ /* Standard target? */
+ if (!t->u.kernel.target->target) {
+ int v;
+
+ v = ((struct ipt_standard_target *)t)->verdict;
+ if (v < 0) {
+ /* Pop from stack? */
+ if (v != IPT_RETURN) {
+ verdict = (unsigned)(-v) - 1;
+ break;
+ }
+ e = back;
+ back = get_entry(table_base,
+ back->comefrom);
+ continue;
+ }
+ if (table_base + v
+ != (void *)e + e->next_offset) {
+ /* Save old back ptr in next entry */
+ struct ipt_entry *next
+ = (void *)e + e->next_offset;
+ next->comefrom
+ = (void *)back - table_base;
+ /* set back pointer to next entry */
+ back = next;
+ }
+
+ e = get_entry(table_base, v);
+ } else {
+ /* Targets which reenter must return
+ abs. verdicts */
+#ifdef CONFIG_NETFILTER_DEBUG
+ ((struct ipt_entry *)table_base)->comefrom
+ = 0xeeeeeeec;
+#endif
+ verdict = t->u.kernel.target->target(pskb,
+ hook,
+ in, out,
+ t->data,
+ userdata);
+
+#ifdef CONFIG_NETFILTER_DEBUG
+ if (((struct ipt_entry *)table_base)->comefrom
+ != 0xeeeeeeec
+ && verdict == IPT_CONTINUE) {
+ printk("Target %s reentered!\n",
+ t->u.kernel.target->name);
+ verdict = NF_DROP;
+ }
+ ((struct ipt_entry *)table_base)->comefrom
+ = 0x57acc001;
+#endif
+ /* Target might have changed stuff. */
+ ip = (*pskb)->nh.iph;
+ protohdr = (u_int32_t *)ip + ip->ihl;
+ datalen = (*pskb)->len - ip->ihl * 4;
+
+ if (verdict == IPT_CONTINUE)
+ e = (void *)e + e->next_offset;
+ else
+ /* Verdict */
+ break;
+ }
+ } else {
+
+ no_match:
+ e = (void *)e + e->next_offset;
+ }
+ } while (!hotdrop);
+
+#ifdef CONFIG_NETFILTER_DEBUG
+ ((struct ipt_entry *)table_base)->comefrom = 0xdead57ac;
+#endif
+ read_unlock_bh(&table->lock);
+
+#ifdef DEBUG_ALLOW_ALL
+ return NF_ACCEPT;
+#else
+ if (hotdrop)
+ return NF_DROP;
+ else return verdict;
+#endif
+}
+
+/* If it succeeds, returns element and locks mutex */
+static inline void *
+find_inlist_lock_noload(struct list_head *head,
+ const char *name,
+ int *error,
+ struct semaphore *mutex)
+{
+ void *ret;
+
+#if 0
+ duprintf("find_inlist: searching for `%s' in %s.\n",
+ name, head == &ipt_target ? "ipt_target"
+ : head == &ipt_match ? "ipt_match"
+ : head == &ipt_tables ? "ipt_tables" : "UNKNOWN");
+#endif
+
+ *error = down_interruptible(mutex);
+ if (*error != 0)
+ return NULL;
+
+ ret = list_named_find(head, name);
+ if (!ret) {
+ *error = -ENOENT;
+ up(mutex);
+ }
+ return ret;
+}
+
+#ifndef CONFIG_KMOD
+#define find_inlist_lock(h,n,p,e,m) find_inlist_lock_noload((h),(n),(e),(m))
+#else
+static void *
+find_inlist_lock(struct list_head *head,
+ const char *name,
+ const char *prefix,
+ int *error,
+ struct semaphore *mutex)
+{
+ void *ret;
+
+ ret = find_inlist_lock_noload(head, name, error, mutex);
+ if (!ret) {
+ char modulename[IPT_FUNCTION_MAXNAMELEN + strlen(prefix) + 1];
+ strcpy(modulename, prefix);
+ strcat(modulename, name);
+ duprintf("find_inlist: loading `%s'.\n", modulename);
+ request_module(modulename);
+ ret = find_inlist_lock_noload(head, name, error, mutex);
+ }
+
+ return ret;
+}
+#endif
+
+static inline struct ipt_table *
+find_table_lock(const char *name, int *error, struct semaphore *mutex)
+{
+ return find_inlist_lock(&ipt_tables, name, "iptable_", error, mutex);
+}
+
+static inline struct ipt_match *
+find_match_lock(const char *name, int *error, struct semaphore *mutex)
+{
+ return find_inlist_lock(&ipt_match, name, "ipt_", error, mutex);
+}
+
+static inline struct ipt_target *
+find_target_lock(const char *name, int *error, struct semaphore *mutex)
+{
+ return find_inlist_lock(&ipt_target, name, "ipt_", error, mutex);
+}
+
+/* All zeroes == unconditional rule. */
+static inline int
+unconditional(const struct ipt_ip *ip)
+{
+ unsigned int i;
+
+ for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
+ if (((__u32 *)ip)[i])
+ return 0;
+
+ return 1;
+}
+
+/* Figures out from what hook each rule can be called: returns 0 if
+ there are loops. Puts hook bitmask in comefrom. */
+static int
+mark_source_chains(struct ipt_table_info *newinfo, unsigned int valid_hooks)
+{
+ unsigned int hook;
+
+ /* No recursion; use packet counter to save back ptrs (reset
+ to 0 as we leave), and comefrom to save source hook bitmask */
+ for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
+ unsigned int pos = newinfo->hook_entry[hook];
+ struct ipt_entry *e
+ = (struct ipt_entry *)(newinfo->entries + pos);
+
+ if (!(valid_hooks & (1 << hook)))
+ continue;
+
+ /* Set initial back pointer. */
+ e->counters.pcnt = pos;
+
+ for (;;) {
+ struct ipt_standard_target *t
+ = (void *)ipt_get_target(e);
+
+ if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
+ printk("iptables: loop hook %u pos %u %08X.\n",
+ hook, pos, e->comefrom);
+ return 0;
+ }
+ e->comefrom
+ |= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
+
+ /* Unconditional return/END. */
+ if (e->target_offset == sizeof(struct ipt_entry)
+ && (strcmp(t->target.u.user.name,
+ IPT_STANDARD_TARGET) == 0)
+ && t->verdict < 0
+ && unconditional(&e->ip)) {
+ unsigned int oldpos, size;
+
+ /* Return: backtrack through the last
+ big jump. */
+ do {
+ e->comefrom ^= (1<<NF_IP_NUMHOOKS);
+#ifdef DEBUG_IP_FIREWALL_USER
+ if (e->comefrom
+ & (1 << NF_IP_NUMHOOKS)) {
+ duprintf("Back unset "
+ "on hook %u "
+ "rule %u\n",
+ hook, pos);
+ }
+#endif
+ oldpos = pos;
+ pos = e->counters.pcnt;
+ e->counters.pcnt = 0;
+
+ /* We're at the start. */
+ if (pos == oldpos)
+ goto next;
+
+ e = (struct ipt_entry *)
+ (newinfo->entries + pos);
+ } while (oldpos == pos + e->next_offset);
+
+ /* Move along one */
+ size = e->next_offset;
+ e = (struct ipt_entry *)
+ (newinfo->entries + pos + size);
+ e->counters.pcnt = pos;
+ pos += size;
+ } else {
+ int newpos = t->verdict;
+
+ if (strcmp(t->target.u.user.name,
+ IPT_STANDARD_TARGET) == 0
+ && newpos >= 0) {
+ /* This a jump; chase it. */
+ duprintf("Jump rule %u -> %u\n",
+ pos, newpos);
+ } else {
+ /* ... this is a fallthru */
+ newpos = pos + e->next_offset;
+ }
+ e = (struct ipt_entry *)
+ (newinfo->entries + newpos);
+ e->counters.pcnt = pos;
+ pos = newpos;
+ }
+ }
+ next:
+ duprintf("Finished chain %u\n", hook);
+ }
+ return 1;
+}
+
+static inline int
+cleanup_match(struct ipt_entry_match *m, unsigned int *i)
+{
+ if (i && (*i)-- == 0)
+ return 1;
+
+ if (m->u.kernel.match->destroy)
+ m->u.kernel.match->destroy(m->data,
+ m->u.match_size - sizeof(*m));
+
+ if (m->u.kernel.match->me)
+ __MOD_DEC_USE_COUNT(m->u.kernel.match->me);
+
+ return 0;
+}
+
+static inline int
+standard_check(const struct ipt_entry_target *t,
+ unsigned int max_offset)
+{
+ struct ipt_standard_target *targ = (void *)t;
+
+ /* Check standard info. */
+ if (t->u.target_size
+ != IPT_ALIGN(sizeof(struct ipt_standard_target))) {
+ duprintf("standard_check: target size %u != %u\n",
+ t->u.target_size,
+ IPT_ALIGN(sizeof(struct ipt_standard_target)));
+ return 0;
+ }
+
+ if (targ->verdict >= 0
+ && targ->verdict > max_offset - sizeof(struct ipt_entry)) {
+ duprintf("ipt_standard_check: bad verdict (%i)\n",
+ targ->verdict);
+ return 0;
+ }
+
+ if (targ->verdict < -NF_MAX_VERDICT - 1) {
+ duprintf("ipt_standard_check: bad negative verdict (%i)\n",
+ targ->verdict);
+ return 0;
+ }
+ return 1;
+}
+
+static inline int
+check_match(struct ipt_entry_match *m,
+ const char *name,
+ const struct ipt_ip *ip,
+ unsigned int hookmask,
+ unsigned int *i)
+{
+ int ret;
+ struct ipt_match *match;
+
+ match = find_match_lock(m->u.user.name, &ret, &ipt_mutex);
+ if (!match) {
+ duprintf("check_match: `%s' not found\n", m->u.user.name);
+ return ret;
+ }
+ if (match->me)
+ __MOD_INC_USE_COUNT(match->me);
+ m->u.kernel.match = match;
+ up(&ipt_mutex);
+
+ if (m->u.kernel.match->checkentry
+ && !m->u.kernel.match->checkentry(name, ip, m->data,
+ m->u.match_size - sizeof(*m),
+ hookmask)) {
+ if (m->u.kernel.match->me)
+ __MOD_DEC_USE_COUNT(m->u.kernel.match->me);
+ duprintf("ip_tables: check failed for `%s'.\n",
+ m->u.kernel.match->name);
+ return -EINVAL;
+ }
+
+ (*i)++;
+ return 0;
+}
+
+static struct ipt_target ipt_standard_target;
+
+static inline int
+check_entry(struct ipt_entry *e, const char *name, unsigned int size,
+ unsigned int *i)
+{
+ struct ipt_entry_target *t;
+ struct ipt_target *target;
+ int ret;
+ unsigned int j;
+
+ if (!ip_checkentry(&e->ip)) {
+ duprintf("ip_tables: ip check failed %p %s.\n", e, name);
+ return -EINVAL;
+ }
+
+ j = 0;
+ ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
+ if (ret != 0)
+ goto cleanup_matches;
+
+ t = ipt_get_target(e);
+ target = find_target_lock(t->u.user.name, &ret, &ipt_mutex);
+ if (!target) {
+ duprintf("check_entry: `%s' not found\n", t->u.user.name);
+ goto cleanup_matches;
+ }
+ if (target->me)
+ __MOD_INC_USE_COUNT(target->me);
+ t->u.kernel.target = target;
+ up(&ipt_mutex);
+
+ if (t->u.kernel.target == &ipt_standard_target) {
+ if (!standard_check(t, size)) {
+ ret = -EINVAL;
+ goto cleanup_matches;
+ }
+ } else if (t->u.kernel.target->checkentry
+ && !t->u.kernel.target->checkentry(name, e, t->data,
+ t->u.target_size
+ - sizeof(*t),
+ e->comefrom)) {
+ if (t->u.kernel.target->me)
+ __MOD_DEC_USE_COUNT(t->u.kernel.target->me);
+ duprintf("ip_tables: check failed for `%s'.\n",
+ t->u.kernel.target->name);
+ ret = -EINVAL;
+ goto cleanup_matches;
+ }
+
+ (*i)++;
+ return 0;
+
+ cleanup_matches:
+ IPT_MATCH_ITERATE(e, cleanup_match, &j);
+ return ret;
+}
+
+static inline int
+check_entry_size_and_hooks(struct ipt_entry *e,
+ struct ipt_table_info *newinfo,
+ unsigned char *base,
+ unsigned char *limit,
+ const unsigned int *hook_entries,
+ const unsigned int *underflows,
+ unsigned int *i)
+{
+ unsigned int h;
+
+ if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
+ || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
+ duprintf("Bad offset %p\n", e);
+ return -EINVAL;
+ }
+
+ if (e->next_offset
+ < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
+ duprintf("checking: element %p size %u\n",
+ e, e->next_offset);
+ return -EINVAL;
+ }
+
+ /* Check hooks & underflows */
+ for (h = 0; h < NF_IP_NUMHOOKS; h++) {
+ if ((unsigned char *)e - base == hook_entries[h])
+ newinfo->hook_entry[h] = hook_entries[h];
+ if ((unsigned char *)e - base == underflows[h])
+ newinfo->underflow[h] = underflows[h];
+ }
+
+ /* FIXME: underflows must be unconditional, standard verdicts
+ < 0 (not IPT_RETURN). --RR */
+
+ /* Clear counters and comefrom */
+ e->counters = ((struct ipt_counters) { 0, 0 });
+ e->comefrom = 0;
+
+ (*i)++;
+ return 0;
+}
+
+static inline int
+cleanup_entry(struct ipt_entry *e, unsigned int *i)
+{
+ struct ipt_entry_target *t;
+
+ if (i && (*i)-- == 0)
+ return 1;
+
+ /* Cleanup all matches */
+ IPT_MATCH_ITERATE(e, cleanup_match, NULL);
+ t = ipt_get_target(e);
+ if (t->u.kernel.target->destroy)
+ t->u.kernel.target->destroy(t->data,
+ t->u.target_size - sizeof(*t));
+ if (t->u.kernel.target->me)
+ __MOD_DEC_USE_COUNT(t->u.kernel.target->me);
+
+ return 0;
+}
+
+/* Checks and translates the user-supplied table segment (held in
+ newinfo) */
+static int
+translate_table(const char *name,
+ unsigned int valid_hooks,
+ struct ipt_table_info *newinfo,
+ unsigned int size,
+ unsigned int number,
+ const unsigned int *hook_entries,
+ const unsigned int *underflows)
+{
+ unsigned int i;
+ int ret;
+
+ newinfo->size = size;
+ newinfo->number = number;
+
+ /* Init all hooks to impossible value. */
+ for (i = 0; i < NF_IP_NUMHOOKS; i++) {
+ newinfo->hook_entry[i] = 0xFFFFFFFF;
+ newinfo->underflow[i] = 0xFFFFFFFF;
+ }
+
+ duprintf("translate_table: size %u\n", newinfo->size);
+ i = 0;
+ /* Walk through entries, checking offsets. */
+ ret = IPT_ENTRY_ITERATE(newinfo->entries, newinfo->size,
+ check_entry_size_and_hooks,
+ newinfo,
+ newinfo->entries,
+ newinfo->entries + size,
+ hook_entries, underflows, &i);
+ if (ret != 0)
+ return ret;
+
+ if (i != number) {
+ duprintf("translate_table: %u not %u entries\n",
+ i, number);
+ return -EINVAL;
+ }
+
+ /* Check hooks all assigned */
+ for (i = 0; i < NF_IP_NUMHOOKS; i++) {
+ /* Only hooks which are valid */
+ if (!(valid_hooks & (1 << i)))
+ continue;
+ if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
+ duprintf("Invalid hook entry %u %u\n",
+ i, hook_entries[i]);
+ return -EINVAL;
+ }
+ if (newinfo->underflow[i] == 0xFFFFFFFF) {
+ duprintf("Invalid underflow %u %u\n",
+ i, underflows[i]);
+ return -EINVAL;
+ }
+ }
+
+ if (!mark_source_chains(newinfo, valid_hooks))
+ return -ELOOP;
+
+ /* Finally, each sanity check must pass */
+ i = 0;
+ ret = IPT_ENTRY_ITERATE(newinfo->entries, newinfo->size,
+ check_entry, name, size, &i);
+
+ if (ret != 0) {
+ IPT_ENTRY_ITERATE(newinfo->entries, newinfo->size,
+ cleanup_entry, &i);
+ return ret;
+ }
+
+ /* And one copy for every other CPU */
+ for (i = 1; i < smp_num_cpus; i++) {
+ memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i,
+ newinfo->entries,
+ SMP_ALIGN(newinfo->size));
+ }
+
+ return ret;
+}
+
+static struct ipt_table_info *
+replace_table(struct ipt_table *table,
+ unsigned int num_counters,
+ struct ipt_table_info *newinfo,
+ int *error)
+{
+ struct ipt_table_info *oldinfo;
+
+#ifdef CONFIG_NETFILTER_DEBUG
+ {
+ struct ipt_entry *table_base;
+ unsigned int i;
+
+ for (i = 0; i < smp_num_cpus; i++) {
+ table_base =
+ (void *)newinfo->entries
+ + TABLE_OFFSET(newinfo, i);
+
+ table_base->comefrom = 0xdead57ac;
+ }
+ }
+#endif
+
+ /* Do the substitution. */
+ write_lock_bh(&table->lock);
+ /* Check inside lock: is the old number correct? */
+ if (num_counters != table->private->number) {
+ duprintf("num_counters != table->private->number (%u/%u)\n",
+ num_counters, table->private->number);
+ write_unlock_bh(&table->lock);
+ *error = -EAGAIN;
+ return NULL;
+ }
+ oldinfo = table->private;
+ table->private = newinfo;
+ newinfo->initial_entries = oldinfo->initial_entries;
+ write_unlock_bh(&table->lock);
+
+ return oldinfo;
+}
+
+/* Gets counters. */
+static inline int
+add_entry_to_counter(const struct ipt_entry *e,
+ struct ipt_counters total[],
+ unsigned int *i)
+{
+ ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
+
+ (*i)++;
+ return 0;
+}
+
+static void
+get_counters(const struct ipt_table_info *t,
+ struct ipt_counters counters[])
+{
+ unsigned int cpu;
+ unsigned int i;
+
+ for (cpu = 0; cpu < smp_num_cpus; cpu++) {
+ i = 0;
+ IPT_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu),
+ t->size,
+ add_entry_to_counter,
+ counters,
+ &i);
+ }
+}
+
+static int
+copy_entries_to_user(unsigned int total_size,
+ struct ipt_table *table,
+ void *userptr)
+{
+ unsigned int off, num, countersize;
+ struct ipt_entry *e;
+ struct ipt_counters *counters;
+ int ret = 0;
+
+ /* We need atomic snapshot of counters: rest doesn't change
+ (other than comefrom, which userspace doesn't care
+ about). */
+ countersize = sizeof(struct ipt_counters) * table->private->number;
+ counters = vmalloc(countersize);
+
+ if (counters == NULL)
+ return -ENOMEM;
+
+ /* First, sum counters... */
+ memset(counters, 0, countersize);
+ write_lock_bh(&table->lock);
+ get_counters(table->private, counters);
+ write_unlock_bh(&table->lock);
+
+ /* ... then copy entire thing from CPU 0... */
+ if (copy_to_user(userptr, table->private->entries, total_size) != 0) {
+ ret = -EFAULT;
+ goto free_counters;
+ }
+
+ /* FIXME: use iterator macros --RR */
+ /* ... then go back and fix counters and names */
+ for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
+ unsigned int i;
+ struct ipt_entry_match *m;
+ struct ipt_entry_target *t;
+
+ e = (struct ipt_entry *)(table->private->entries + off);
+ if (copy_to_user(userptr + off
+ + offsetof(struct ipt_entry, counters),
+ &counters[num],
+ sizeof(counters[num])) != 0) {
+ ret = -EFAULT;
+ goto free_counters;
+ }
+
+ for (i = sizeof(struct ipt_entry);
+ i < e->target_offset;
+ i += m->u.match_size) {
+ m = (void *)e + i;
+
+ if (copy_to_user(userptr + off + i
+ + offsetof(struct ipt_entry_match,
+ u.user.name),
+ m->u.kernel.match->name,
+ strlen(m->u.kernel.match->name)+1)
+ != 0) {
+ ret = -EFAULT;
+ goto free_counters;
+ }
+ }
+
+ t = ipt_get_target(e);
+ if (copy_to_user(userptr + off + e->target_offset
+ + offsetof(struct ipt_entry_target,
+ u.user.name),
+ t->u.kernel.target->name,
+ strlen(t->u.kernel.target->name)+1) != 0) {
+ ret = -EFAULT;
+ goto free_counters;
+ }
+ }
+
+ free_counters:
+ vfree(counters);
+ return ret;
+}
+
+static int
+get_entries(const struct ipt_get_entries *entries,
+ struct ipt_get_entries *uptr)
+{
+ int ret;
+ struct ipt_table *t;
+
+ t = find_table_lock(entries->name, &ret, &ipt_mutex);
+ if (t) {
+ duprintf("t->private->number = %u\n",
+ t->private->number);
+ if (entries->size == t->private->size)
+ ret = copy_entries_to_user(t->private->size,
+ t, uptr->entrytable);
+ else {
+ duprintf("get_entries: I've got %u not %u!\n",
+ t->private->size,
+ entries->size);
+ ret = -EINVAL;
+ }
+ up(&ipt_mutex);
+ } else
+ duprintf("get_entries: Can't find %s!\n",
+ entries->name);
+
+ return ret;
+}
+
+static int
+do_replace(void *user, unsigned int len)
+{
+ int ret;
+ struct ipt_replace tmp;
+ struct ipt_table *t;
+ struct ipt_table_info *newinfo, *oldinfo;
+ struct ipt_counters *counters;
+
+ if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
+ return -EFAULT;
+
+ /* Hack: Causes ipchains to give correct error msg --RR */
+ if (len != sizeof(tmp) + tmp.size)
+ return -ENOPROTOOPT;
+
+ /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
+ if ((SMP_ALIGN(tmp.size) >> PAGE_SHIFT) + 2 > num_physpages)
+ return -ENOMEM;
+
+ newinfo = vmalloc(sizeof(struct ipt_table_info)
+ + SMP_ALIGN(tmp.size) * smp_num_cpus);
+ if (!newinfo)
+ return -ENOMEM;
+
+ if (copy_from_user(newinfo->entries, user + sizeof(tmp),
+ tmp.size) != 0) {
+ ret = -EFAULT;
+ goto free_newinfo;
+ }
+
+ counters = vmalloc(tmp.num_counters * sizeof(struct ipt_counters));
+ if (!counters) {
+ ret = -ENOMEM;
+ goto free_newinfo;
+ }
+ memset(counters, 0, tmp.num_counters * sizeof(struct ipt_counters));
+
+ ret = translate_table(tmp.name, tmp.valid_hooks,
+ newinfo, tmp.size, tmp.num_entries,
+ tmp.hook_entry, tmp.underflow);
+ if (ret != 0)
+ goto free_newinfo_counters;
+
+ duprintf("ip_tables: Translated table\n");
+
+ t = find_table_lock(tmp.name, &ret, &ipt_mutex);
+ if (!t)
+ goto free_newinfo_counters_untrans;
+
+ /* You lied! */
+ if (tmp.valid_hooks != t->valid_hooks) {
+ duprintf("Valid hook crap: %08X vs %08X\n",
+ tmp.valid_hooks, t->valid_hooks);
+ ret = -EINVAL;
+ goto free_newinfo_counters_untrans_unlock;
+ }
+
+ oldinfo = replace_table(t, tmp.num_counters, newinfo, &ret);
+ if (!oldinfo)
+ goto free_newinfo_counters_untrans_unlock;
+
+ /* Update module usage count based on number of rules */
+ duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
+ oldinfo->number, oldinfo->initial_entries, newinfo->number);
+ if (t->me && (oldinfo->number <= oldinfo->initial_entries) &&
+ (newinfo->number > oldinfo->initial_entries))
+ __MOD_INC_USE_COUNT(t->me);
+ else if (t->me && (oldinfo->number > oldinfo->initial_entries) &&
+ (newinfo->number <= oldinfo->initial_entries))
+ __MOD_DEC_USE_COUNT(t->me);
+
+ /* Get the old counters. */
+ get_counters(oldinfo, counters);
+ /* Decrease module usage counts and free resource */
+ IPT_ENTRY_ITERATE(oldinfo->entries, oldinfo->size, cleanup_entry,NULL);
+ vfree(oldinfo);
+ /* Silent error: too late now. */
+ copy_to_user(tmp.counters, counters,
+ sizeof(struct ipt_counters) * tmp.num_counters);
+ vfree(counters);
+ up(&ipt_mutex);
+ return 0;
+
+ free_newinfo_counters_untrans_unlock:
+ up(&ipt_mutex);
+ free_newinfo_counters_untrans:
+ IPT_ENTRY_ITERATE(newinfo->entries, newinfo->size, cleanup_entry,NULL);
+ free_newinfo_counters:
+ vfree(counters);
+ free_newinfo:
+ vfree(newinfo);
+ return ret;
+}
+
+/* We're lazy, and add to the first CPU; overflow works its fey magic
+ * and everything is OK. */
+static inline int
+add_counter_to_entry(struct ipt_entry *e,
+ const struct ipt_counters addme[],
+ unsigned int *i)
+{
+#if 0
+ duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
+ *i,
+ (long unsigned int)e->counters.pcnt,
+ (long unsigned int)e->counters.bcnt,
+ (long unsigned int)addme[*i].pcnt,
+ (long unsigned int)addme[*i].bcnt);
+#endif
+
+ ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
+
+ (*i)++;
+ return 0;
+}
+
+static int
+do_add_counters(void *user, unsigned int len)
+{
+ unsigned int i;
+ struct ipt_counters_info tmp, *paddc;
+ struct ipt_table *t;
+ int ret;
+
+ if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
+ return -EFAULT;
+
+ if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct ipt_counters))
+ return -EINVAL;
+
+ paddc = vmalloc(len);
+ if (!paddc)
+ return -ENOMEM;
+
+ if (copy_from_user(paddc, user, len) != 0) {
+ ret = -EFAULT;
+ goto free;
+ }
+
+ t = find_table_lock(tmp.name, &ret, &ipt_mutex);
+ if (!t)
+ goto free;
+
+ write_lock_bh(&t->lock);
+ if (t->private->number != paddc->num_counters) {
+ ret = -EINVAL;
+ goto unlock_up_free;
+ }
+
+ i = 0;
+ IPT_ENTRY_ITERATE(t->private->entries,
+ t->private->size,
+ add_counter_to_entry,
+ paddc->counters,
+ &i);
+ unlock_up_free:
+ write_unlock_bh(&t->lock);
+ up(&ipt_mutex);
+ free:
+ vfree(paddc);
+
+ return ret;
+}
+
+static int
+do_ipt_set_ctl(struct sock *sk, int cmd, void *user, unsigned int len)
+{
+ int ret;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ switch (cmd) {
+ case IPT_SO_SET_REPLACE:
+ ret = do_replace(user, len);
+ break;
+
+ case IPT_SO_SET_ADD_COUNTERS:
+ ret = do_add_counters(user, len);
+ break;
+
+ default:
+ duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int
+do_ipt_get_ctl(struct sock *sk, int cmd, void *user, int *len)
+{
+ int ret;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ switch (cmd) {
+ case IPT_SO_GET_INFO: {
+ char name[IPT_TABLE_MAXNAMELEN];
+ struct ipt_table *t;
+
+ if (*len != sizeof(struct ipt_getinfo)) {
+ duprintf("length %u != %u\n", *len,
+ sizeof(struct ipt_getinfo));
+ ret = -EINVAL;
+ break;
+ }
+
+ if (copy_from_user(name, user, sizeof(name)) != 0) {
+ ret = -EFAULT;
+ break;
+ }
+ name[IPT_TABLE_MAXNAMELEN-1] = '\0';
+ t = find_table_lock(name, &ret, &ipt_mutex);
+ if (t) {
+ struct ipt_getinfo info;
+
+ info.valid_hooks = t->valid_hooks;
+ memcpy(info.hook_entry, t->private->hook_entry,
+ sizeof(info.hook_entry));
+ memcpy(info.underflow, t->private->underflow,
+ sizeof(info.underflow));
+ info.num_entries = t->private->number;
+ info.size = t->private->size;
+ strcpy(info.name, name);
+
+ if (copy_to_user(user, &info, *len) != 0)
+ ret = -EFAULT;
+ else
+ ret = 0;
+
+ up(&ipt_mutex);
+ }
+ }
+ break;
+
+ case IPT_SO_GET_ENTRIES: {
+ struct ipt_get_entries get;
+
+ if (*len < sizeof(get)) {
+ duprintf("get_entries: %u < %u\n", *len, sizeof(get));
+ ret = -EINVAL;
+ } else if (copy_from_user(&get, user, sizeof(get)) != 0) {
+ ret = -EFAULT;
+ } else if (*len != sizeof(struct ipt_get_entries) + get.size) {
+ duprintf("get_entries: %u != %u\n", *len,
+ sizeof(struct ipt_get_entries) + get.size);
+ ret = -EINVAL;
+ } else
+ ret = get_entries(&get, user);
+ break;
+ }
+
+ default:
+ duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/* Registration hooks for targets. */
+int
+ipt_register_target(struct ipt_target *target)
+{
+ int ret;
+
+ MOD_INC_USE_COUNT;
+ ret = down_interruptible(&ipt_mutex);
+ if (ret != 0) {
+ MOD_DEC_USE_COUNT;
+ return ret;
+ }
+ if (!list_named_insert(&ipt_target, target)) {
+ duprintf("ipt_register_target: `%s' already in list!\n",
+ target->name);
+ ret = -EINVAL;
+ MOD_DEC_USE_COUNT;
+ }
+ up(&ipt_mutex);
+ return ret;
+}
+
+void
+ipt_unregister_target(struct ipt_target *target)
+{
+ down(&ipt_mutex);
+ LIST_DELETE(&ipt_target, target);
+ up(&ipt_mutex);
+ MOD_DEC_USE_COUNT;
+}
+
+int
+ipt_register_match(struct ipt_match *match)
+{
+ int ret;
+
+ MOD_INC_USE_COUNT;
+ ret = down_interruptible(&ipt_mutex);
+ if (ret != 0) {
+ MOD_DEC_USE_COUNT;
+ return ret;
+ }
+ if (!list_named_insert(&ipt_match, match)) {
+ duprintf("ipt_register_match: `%s' already in list!\n",
+ match->name);
+ MOD_DEC_USE_COUNT;
+ ret = -EINVAL;
+ }
+ up(&ipt_mutex);
+
+ return ret;
+}
+
+void
+ipt_unregister_match(struct ipt_match *match)
+{
+ down(&ipt_mutex);
+ LIST_DELETE(&ipt_match, match);
+ up(&ipt_mutex);
+ MOD_DEC_USE_COUNT;
+}
+
+int ipt_register_table(struct ipt_table *table)
+{
+ int ret;
+ struct ipt_table_info *newinfo;
+ static struct ipt_table_info bootstrap
+ = { 0, 0, 0, { 0 }, { 0 }, { } };
+
+ MOD_INC_USE_COUNT;
+ newinfo = vmalloc(sizeof(struct ipt_table_info)
+ + SMP_ALIGN(table->table->size) * smp_num_cpus);
+ if (!newinfo) {
+ ret = -ENOMEM;
+ MOD_DEC_USE_COUNT;
+ return ret;
+ }
+ memcpy(newinfo->entries, table->table->entries, table->table->size);
+
+ ret = translate_table(table->name, table->valid_hooks,
+ newinfo, table->table->size,
+ table->table->num_entries,
+ table->table->hook_entry,
+ table->table->underflow);
+ if (ret != 0) {
+ vfree(newinfo);
+ MOD_DEC_USE_COUNT;
+ return ret;
+ }
+
+ ret = down_interruptible(&ipt_mutex);
+ if (ret != 0) {
+ vfree(newinfo);
+ MOD_DEC_USE_COUNT;
+ return ret;
+ }
+
+ /* Don't autoload: we'd eat our tail... */
+ if (list_named_find(&ipt_tables, table->name)) {
+ ret = -EEXIST;
+ goto free_unlock;
+ }
+
+ /* Simplifies replace_table code. */
+ table->private = &bootstrap;
+ if (!replace_table(table, 0, newinfo, &ret))
+ goto free_unlock;
+
+ duprintf("table->private->number = %u\n",
+ table->private->number);
+
+ /* save number of initial entries */
+ table->private->initial_entries = table->private->number;
+
+ table->lock = RW_LOCK_UNLOCKED;
+ list_prepend(&ipt_tables, table);
+
+ unlock:
+ up(&ipt_mutex);
+ return ret;
+
+ free_unlock:
+ vfree(newinfo);
+ MOD_DEC_USE_COUNT;
+ goto unlock;
+}
+
+void ipt_unregister_table(struct ipt_table *table)
+{
+ down(&ipt_mutex);
+ LIST_DELETE(&ipt_tables, table);
+ up(&ipt_mutex);
+
+ /* Decrease module usage counts and free resources */
+ IPT_ENTRY_ITERATE(table->private->entries, table->private->size,
+ cleanup_entry, NULL);
+ vfree(table->private);
+ MOD_DEC_USE_COUNT;
+}
+
+/* Returns 1 if the port is matched by the range, 0 otherwise */
+static inline int
+port_match(u_int16_t min, u_int16_t max, u_int16_t port, int invert)
+{
+ int ret;
+
+ ret = (port >= min && port <= max) ^ invert;
+ return ret;
+}
+
+static int
+tcp_find_option(u_int8_t option,
+ const struct tcphdr *tcp,
+ u_int16_t datalen,
+ int invert,
+ int *hotdrop)
+{
+ unsigned int i = sizeof(struct tcphdr);
+ const u_int8_t *opt = (u_int8_t *)tcp;
+
+ duprintf("tcp_match: finding option\n");
+ /* If we don't have the whole header, drop packet. */
+ if (tcp->doff * 4 > datalen) {
+ *hotdrop = 1;
+ return 0;
+ }
+
+ while (i < tcp->doff * 4) {
+ if (opt[i] == option) return !invert;
+ if (opt[i] < 2) i++;
+ else i += opt[i+1]?:1;
+ }
+
+ return invert;
+}
+
+static int
+tcp_match(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const void *matchinfo,
+ int offset,
+ const void *hdr,
+ u_int16_t datalen,
+ int *hotdrop)
+{
+ const struct tcphdr *tcp = hdr;
+ const struct ipt_tcp *tcpinfo = matchinfo;
+
+ /* To quote Alan:
+
+ Don't allow a fragment of TCP 8 bytes in. Nobody normal
+ causes this. Its a cracker trying to break in by doing a
+ flag overwrite to pass the direction checks.
+ */
+
+ if (offset == 1) {
+ duprintf("Dropping evil TCP offset=1 frag.\n");
+ *hotdrop = 1;
+ return 0;
+ } else if (offset == 0 && datalen < sizeof(struct tcphdr)) {
+ /* We've been asked to examine this packet, and we
+ can't. Hence, no choice but to drop. */
+ duprintf("Dropping evil TCP offset=0 tinygram.\n");
+ *hotdrop = 1;
+ return 0;
+ }
+
+ /* FIXME: Try tcp doff >> packet len against various stacks --RR */
+
+#define FWINVTCP(bool,invflg) ((bool) ^ !!(tcpinfo->invflags & invflg))
+
+ /* Must not be a fragment. */
+ return !offset
+ && port_match(tcpinfo->spts[0], tcpinfo->spts[1],
+ ntohs(tcp->source),
+ !!(tcpinfo->invflags & IPT_TCP_INV_SRCPT))
+ && port_match(tcpinfo->dpts[0], tcpinfo->dpts[1],
+ ntohs(tcp->dest),
+ !!(tcpinfo->invflags & IPT_TCP_INV_DSTPT))
+ && FWINVTCP((((unsigned char *)tcp)[13]
+ & tcpinfo->flg_mask)
+ == tcpinfo->flg_cmp,
+ IPT_TCP_INV_FLAGS)
+ && (!tcpinfo->option
+ || tcp_find_option(tcpinfo->option, tcp, datalen,
+ tcpinfo->invflags
+ & IPT_TCP_INV_OPTION,
+ hotdrop));
+}
+
+/* Called when user tries to insert an entry of this type. */
+static int
+tcp_checkentry(const char *tablename,
+ const struct ipt_ip *ip,
+ void *matchinfo,
+ unsigned int matchsize,
+ unsigned int hook_mask)
+{
+ const struct ipt_tcp *tcpinfo = matchinfo;
+
+ /* Must specify proto == TCP, and no unknown invflags */
+ return ip->proto == IPPROTO_TCP
+ && !(ip->invflags & IPT_INV_PROTO)
+ && matchsize == IPT_ALIGN(sizeof(struct ipt_tcp))
+ && !(tcpinfo->invflags & ~IPT_TCP_INV_MASK);
+}
+
+static int
+udp_match(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const void *matchinfo,
+ int offset,
+ const void *hdr,
+ u_int16_t datalen,
+ int *hotdrop)
+{
+ const struct udphdr *udp = hdr;
+ const struct ipt_udp *udpinfo = matchinfo;
+
+ if (offset == 0 && datalen < sizeof(struct udphdr)) {
+ /* We've been asked to examine this packet, and we
+ can't. Hence, no choice but to drop. */
+ duprintf("Dropping evil UDP tinygram.\n");
+ *hotdrop = 1;
+ return 0;
+ }
+
+ /* Must not be a fragment. */
+ return !offset
+ && port_match(udpinfo->spts[0], udpinfo->spts[1],
+ ntohs(udp->source),
+ !!(udpinfo->invflags & IPT_UDP_INV_SRCPT))
+ && port_match(udpinfo->dpts[0], udpinfo->dpts[1],
+ ntohs(udp->dest),
+ !!(udpinfo->invflags & IPT_UDP_INV_DSTPT));
+}
+
+/* Called when user tries to insert an entry of this type. */
+static int
+udp_checkentry(const char *tablename,
+ const struct ipt_ip *ip,
+ void *matchinfo,
+ unsigned int matchinfosize,
+ unsigned int hook_mask)
+{
+ const struct ipt_udp *udpinfo = matchinfo;
+
+ /* Must specify proto == UDP, and no unknown invflags */
+ if (ip->proto != IPPROTO_UDP || (ip->invflags & IPT_INV_PROTO)) {
+ duprintf("ipt_udp: Protocol %u != %u\n", ip->proto,
+ IPPROTO_UDP);
+ return 0;
+ }
+ if (matchinfosize != IPT_ALIGN(sizeof(struct ipt_udp))) {
+ duprintf("ipt_udp: matchsize %u != %u\n",
+ matchinfosize, IPT_ALIGN(sizeof(struct ipt_udp)));
+ return 0;
+ }
+ if (udpinfo->invflags & ~IPT_UDP_INV_MASK) {
+ duprintf("ipt_udp: unknown flags %X\n",
+ udpinfo->invflags);
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Returns 1 if the type and code is matched by the range, 0 otherwise */
+static inline int
+icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
+ u_int8_t type, u_int8_t code,
+ int invert)
+{
+ return (type == test_type && code >= min_code && code <= max_code)
+ ^ invert;
+}
+
+static int
+icmp_match(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const void *matchinfo,
+ int offset,
+ const void *hdr,
+ u_int16_t datalen,
+ int *hotdrop)
+{
+ const struct icmphdr *icmp = hdr;
+ const struct ipt_icmp *icmpinfo = matchinfo;
+
+ if (offset == 0 && datalen < 2) {
+ /* We've been asked to examine this packet, and we
+ can't. Hence, no choice but to drop. */
+ duprintf("Dropping evil ICMP tinygram.\n");
+ *hotdrop = 1;
+ return 0;
+ }
+
+ /* Must not be a fragment. */
+ return !offset
+ && icmp_type_code_match(icmpinfo->type,
+ icmpinfo->code[0],
+ icmpinfo->code[1],
+ icmp->type, icmp->code,
+ !!(icmpinfo->invflags&IPT_ICMP_INV));
+}
+
+/* Called when user tries to insert an entry of this type. */
+static int
+icmp_checkentry(const char *tablename,
+ const struct ipt_ip *ip,
+ void *matchinfo,
+ unsigned int matchsize,
+ unsigned int hook_mask)
+{
+ const struct ipt_icmp *icmpinfo = matchinfo;
+
+ /* Must specify proto == ICMP, and no unknown invflags */
+ return ip->proto == IPPROTO_ICMP
+ && !(ip->invflags & IPT_INV_PROTO)
+ && matchsize == IPT_ALIGN(sizeof(struct ipt_icmp))
+ && !(icmpinfo->invflags & ~IPT_ICMP_INV);
+}
+
+/* The built-in targets: standard (NULL) and error. */
+static struct ipt_target ipt_standard_target
+= { { NULL, NULL }, IPT_STANDARD_TARGET, NULL, NULL, NULL };
+static struct ipt_target ipt_error_target
+= { { NULL, NULL }, IPT_ERROR_TARGET, ipt_error, NULL, NULL };
+
+static struct nf_sockopt_ops ipt_sockopts
+= { { NULL, NULL }, PF_INET, IPT_BASE_CTL, IPT_SO_SET_MAX+1, do_ipt_set_ctl,
+ IPT_BASE_CTL, IPT_SO_GET_MAX+1, do_ipt_get_ctl, 0, NULL };
+
+static struct ipt_match tcp_matchstruct
+= { { NULL, NULL }, "tcp", &tcp_match, &tcp_checkentry, NULL };
+static struct ipt_match udp_matchstruct
+= { { NULL, NULL }, "udp", &udp_match, &udp_checkentry, NULL };
+static struct ipt_match icmp_matchstruct
+= { { NULL, NULL }, "icmp", &icmp_match, &icmp_checkentry, NULL };
+
+#ifdef CONFIG_PROC_FS
+static inline int print_name(const struct ipt_table *t,
+ off_t start_offset, char *buffer, int length,
+ off_t *pos, unsigned int *count)
+{
+ if ((*count)++ >= start_offset) {
+ unsigned int namelen;
+
+ namelen = sprintf(buffer + *pos, "%s\n", t->name);
+ if (*pos + namelen > length) {
+ /* Stop iterating */
+ return 1;
+ }
+ *pos += namelen;
+ }
+ return 0;
+}
+
+static int ipt_get_tables(char *buffer, char **start, off_t offset, int length)
+{
+ off_t pos = 0;
+ unsigned int count = 0;
+
+ if (down_interruptible(&ipt_mutex) != 0)
+ return 0;
+
+ LIST_FIND(&ipt_tables, print_name, struct ipt_table *,
+ offset, buffer, length, &pos, &count);
+
+ up(&ipt_mutex);
+
+ /* `start' hack - see fs/proc/generic.c line ~105 */
+ *start=(char *)((unsigned long)count-offset);
+ return pos;
+}
+#endif /*CONFIG_PROC_FS*/
+
+static int __init init(void)
+{
+ int ret;
+
+ /* Noone else will be downing sem now, so we won't sleep */
+ down(&ipt_mutex);
+ list_append(&ipt_target, &ipt_standard_target);
+ list_append(&ipt_target, &ipt_error_target);
+ list_append(&ipt_match, &tcp_matchstruct);
+ list_append(&ipt_match, &udp_matchstruct);
+ list_append(&ipt_match, &icmp_matchstruct);
+ up(&ipt_mutex);
+
+ /* Register setsockopt */
+ ret = nf_register_sockopt(&ipt_sockopts);
+ if (ret < 0) {
+ duprintf("Unable to register sockopts.\n");
+ return ret;
+ }
+
+#ifdef CONFIG_PROC_FS
+ {
+ struct proc_dir_entry *proc;
+
+ proc = proc_net_create("ip_tables_names", 0, ipt_get_tables);
+ if (!proc) {
+ nf_unregister_sockopt(&ipt_sockopts);
+ return -ENOMEM;
+ }
+ proc->owner = THIS_MODULE;
+ }
+#endif
+
+ printk("ip_tables: (C) 2000-2002 Netfilter core team\n");
+ return 0;
+}
+
+static void __exit fini(void)
+{
+ nf_unregister_sockopt(&ipt_sockopts);
+#ifdef CONFIG_PROC_FS
+ proc_net_remove("ip_tables_names");
+#endif
+}
+
+EXPORT_SYMBOL(ipt_register_table);
+EXPORT_SYMBOL(ipt_unregister_table);
+EXPORT_SYMBOL(ipt_register_match);
+EXPORT_SYMBOL(ipt_unregister_match);
+EXPORT_SYMBOL(ipt_do_table);
+EXPORT_SYMBOL(ipt_register_target);
+EXPORT_SYMBOL(ipt_unregister_target);
+
+module_init(init);
+module_exit(fini);
+MODULE_LICENSE("GPL");
diff --git a/br-nf-bds/linux/net/ipv4/netfilter/ipt_LOG.c b/br-nf-bds/linux/net/ipv4/netfilter/ipt_LOG.c
new file mode 100644
index 0000000..48bb12f
--- /dev/null
+++ b/br-nf-bds/linux/net/ipv4/netfilter/ipt_LOG.c
@@ -0,0 +1,363 @@
+/*
+ * This is a module which is used for logging packets.
+ */
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/spinlock.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/tcp.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+
+struct in_device;
+#include <net/route.h>
+#include <linux/netfilter_ipv4/ipt_LOG.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+struct esphdr {
+ __u32 spi;
+}; /* FIXME evil kludge */
+
+/* Use lock to serialize, so printks don't overlap */
+static spinlock_t log_lock = SPIN_LOCK_UNLOCKED;
+
+/* One level of recursion won't kill us */
+static void dump_packet(const struct ipt_log_info *info,
+ struct iphdr *iph, unsigned int len, int recurse)
+{
+ void *protoh = (u_int32_t *)iph + iph->ihl;
+ unsigned int datalen = len - iph->ihl * 4;
+
+ /* Important fields:
+ * TOS, len, DF/MF, fragment offset, TTL, src, dst, options. */
+ /* Max length: 40 "SRC=255.255.255.255 DST=255.255.255.255 " */
+ printk("SRC=%u.%u.%u.%u DST=%u.%u.%u.%u ",
+ NIPQUAD(iph->saddr), NIPQUAD(iph->daddr));
+
+ /* Max length: 46 "LEN=65535 TOS=0xFF PREC=0xFF TTL=255 ID=65535 " */
+ printk("LEN=%u TOS=0x%02X PREC=0x%02X TTL=%u ID=%u ",
+ ntohs(iph->tot_len), iph->tos & IPTOS_TOS_MASK,
+ iph->tos & IPTOS_PREC_MASK, iph->ttl, ntohs(iph->id));
+
+ /* Max length: 6 "CE DF MF " */
+ if (ntohs(iph->frag_off) & IP_CE)
+ printk("CE ");
+ if (ntohs(iph->frag_off) & IP_DF)
+ printk("DF ");
+ if (ntohs(iph->frag_off) & IP_MF)
+ printk("MF ");
+
+ /* Max length: 11 "FRAG:65535 " */
+ if (ntohs(iph->frag_off) & IP_OFFSET)
+ printk("FRAG:%u ", ntohs(iph->frag_off) & IP_OFFSET);
+
+ if ((info->logflags & IPT_LOG_IPOPT)
+ && iph->ihl * 4 != sizeof(struct iphdr)) {
+ unsigned int i;
+
+ /* Max length: 127 "OPT (" 15*4*2chars ") " */
+ printk("OPT (");
+ for (i = sizeof(struct iphdr); i < iph->ihl * 4; i++)
+ printk("%02X", ((u_int8_t *)iph)[i]);
+ printk(") ");
+ }
+
+ switch (iph->protocol) {
+ case IPPROTO_TCP: {
+ struct tcphdr *tcph = protoh;
+
+ /* Max length: 10 "PROTO=TCP " */
+ printk("PROTO=TCP ");
+
+ if (ntohs(iph->frag_off) & IP_OFFSET)
+ break;
+
+ /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+ if (datalen < sizeof (*tcph)) {
+ printk("INCOMPLETE [%u bytes] ", datalen);
+ break;
+ }
+
+ /* Max length: 20 "SPT=65535 DPT=65535 " */
+ printk("SPT=%u DPT=%u ",
+ ntohs(tcph->source), ntohs(tcph->dest));
+ /* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
+ if (info->logflags & IPT_LOG_TCPSEQ)
+ printk("SEQ=%u ACK=%u ",
+ ntohl(tcph->seq), ntohl(tcph->ack_seq));
+ /* Max length: 13 "WINDOW=65535 " */
+ printk("WINDOW=%u ", ntohs(tcph->window));
+ /* Max length: 9 "RES=0x3F " */
+ printk("RES=0x%02x ", (u_int8_t)(ntohl(tcp_flag_word(tcph) & TCP_RESERVED_BITS) >> 22));
+ /* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */
+ if (tcph->cwr)
+ printk("CWR ");
+ if (tcph->ece)
+ printk("ECE ");
+ if (tcph->urg)
+ printk("URG ");
+ if (tcph->ack)
+ printk("ACK ");
+ if (tcph->psh)
+ printk("PSH ");
+ if (tcph->rst)
+ printk("RST ");
+ if (tcph->syn)
+ printk("SYN ");
+ if (tcph->fin)
+ printk("FIN ");
+ /* Max length: 11 "URGP=65535 " */
+ printk("URGP=%u ", ntohs(tcph->urg_ptr));
+
+ if ((info->logflags & IPT_LOG_TCPOPT)
+ && tcph->doff * 4 != sizeof(struct tcphdr)) {
+ unsigned int i;
+
+ /* Max length: 127 "OPT (" 15*4*2chars ") " */
+ printk("OPT (");
+ for (i =sizeof(struct tcphdr); i < tcph->doff * 4; i++)
+ printk("%02X", ((u_int8_t *)tcph)[i]);
+ printk(") ");
+ }
+ break;
+ }
+ case IPPROTO_UDP: {
+ struct udphdr *udph = protoh;
+
+ /* Max length: 10 "PROTO=UDP " */
+ printk("PROTO=UDP ");
+
+ if (ntohs(iph->frag_off) & IP_OFFSET)
+ break;
+
+ /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+ if (datalen < sizeof (*udph)) {
+ printk("INCOMPLETE [%u bytes] ", datalen);
+ break;
+ }
+
+ /* Max length: 20 "SPT=65535 DPT=65535 " */
+ printk("SPT=%u DPT=%u LEN=%u ",
+ ntohs(udph->source), ntohs(udph->dest),
+ ntohs(udph->len));
+ break;
+ }
+ case IPPROTO_ICMP: {
+ struct icmphdr *icmph = protoh;
+ static size_t required_len[NR_ICMP_TYPES+1]
+ = { [ICMP_ECHOREPLY] = 4,
+ [ICMP_DEST_UNREACH]
+ = 8 + sizeof(struct iphdr) + 8,
+ [ICMP_SOURCE_QUENCH]
+ = 8 + sizeof(struct iphdr) + 8,
+ [ICMP_REDIRECT]
+ = 8 + sizeof(struct iphdr) + 8,
+ [ICMP_ECHO] = 4,
+ [ICMP_TIME_EXCEEDED]
+ = 8 + sizeof(struct iphdr) + 8,
+ [ICMP_PARAMETERPROB]
+ = 8 + sizeof(struct iphdr) + 8,
+ [ICMP_TIMESTAMP] = 20,
+ [ICMP_TIMESTAMPREPLY] = 20,
+ [ICMP_ADDRESS] = 12,
+ [ICMP_ADDRESSREPLY] = 12 };
+
+ /* Max length: 11 "PROTO=ICMP " */
+ printk("PROTO=ICMP ");
+
+ if (ntohs(iph->frag_off) & IP_OFFSET)
+ break;
+
+ /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+ if (datalen < 4) {
+ printk("INCOMPLETE [%u bytes] ", datalen);
+ break;
+ }
+
+ /* Max length: 18 "TYPE=255 CODE=255 " */
+ printk("TYPE=%u CODE=%u ", icmph->type, icmph->code);
+
+ /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+ if (icmph->type <= NR_ICMP_TYPES
+ && required_len[icmph->type]
+ && datalen < required_len[icmph->type]) {
+ printk("INCOMPLETE [%u bytes] ", datalen);
+ break;
+ }
+
+ switch (icmph->type) {
+ case ICMP_ECHOREPLY:
+ case ICMP_ECHO:
+ /* Max length: 19 "ID=65535 SEQ=65535 " */
+ printk("ID=%u SEQ=%u ",
+ ntohs(icmph->un.echo.id),
+ ntohs(icmph->un.echo.sequence));
+ break;
+
+ case ICMP_PARAMETERPROB:
+ /* Max length: 14 "PARAMETER=255 " */
+ printk("PARAMETER=%u ",
+ ntohl(icmph->un.gateway) >> 24);
+ break;
+ case ICMP_REDIRECT:
+ /* Max length: 24 "GATEWAY=255.255.255.255 " */
+ printk("GATEWAY=%u.%u.%u.%u ", NIPQUAD(icmph->un.gateway));
+ /* Fall through */
+ case ICMP_DEST_UNREACH:
+ case ICMP_SOURCE_QUENCH:
+ case ICMP_TIME_EXCEEDED:
+ /* Max length: 3+maxlen */
+ if (recurse) {
+ printk("[");
+ dump_packet(info,
+ (struct iphdr *)(icmph + 1),
+ datalen-sizeof(struct icmphdr),
+ 0);
+ printk("] ");
+ }
+
+ /* Max length: 10 "MTU=65535 " */
+ if (icmph->type == ICMP_DEST_UNREACH
+ && icmph->code == ICMP_FRAG_NEEDED)
+ printk("MTU=%u ", ntohs(icmph->un.frag.mtu));
+ }
+ break;
+ }
+ /* Max Length */
+ case IPPROTO_AH:
+ case IPPROTO_ESP: {
+ struct esphdr *esph = protoh;
+ int esp= (iph->protocol==IPPROTO_ESP);
+
+ /* Max length: 10 "PROTO=ESP " */
+ printk("PROTO=%s ",esp? "ESP" : "AH");
+
+ if (ntohs(iph->frag_off) & IP_OFFSET)
+ break;
+
+ /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+ if (datalen < sizeof (*esph)) {
+ printk("INCOMPLETE [%u bytes] ", datalen);
+ break;
+ }
+
+ /* Length: 15 "SPI=0xF1234567 " */
+ printk("SPI=0x%x ", ntohl(esph->spi) );
+ break;
+ }
+ /* Max length: 10 "PROTO 255 " */
+ default:
+ printk("PROTO=%u ", iph->protocol);
+ }
+
+ /* Proto Max log string length */
+ /* IP: 40+46+6+11+127 = 230 */
+ /* TCP: 10+max(25,20+30+13+9+32+11+127) = 252 */
+ /* UDP: 10+max(25,20) = 35 */
+ /* ICMP: 11+max(25, 18+25+max(19,14,24+3+n+10,3+n+10)) = 91+n */
+ /* ESP: 10+max(25)+15 = 50 */
+ /* AH: 9+max(25)+15 = 49 */
+ /* unknown: 10 */
+
+ /* (ICMP allows recursion one level deep) */
+ /* maxlen = IP + ICMP + IP + max(TCP,UDP,ICMP,unknown) */
+ /* maxlen = 230+ 91 + 230 + 252 = 803 */
+}
+
+static unsigned int
+ipt_log_target(struct sk_buff **pskb,
+ unsigned int hooknum,
+ const struct net_device *in,
+ const struct net_device *out,
+ const void *targinfo,
+ void *userinfo)
+{
+ struct iphdr *iph = (*pskb)->nh.iph;
+ const struct ipt_log_info *loginfo = targinfo;
+ char level_string[4] = "< >";
+
+ level_string[1] = '0' + (loginfo->level % 8);
+ spin_lock_bh(&log_lock);
+ printk(level_string);
+ printk("%sIN=%s ", loginfo->prefix, in ? in->name : "");
+ if ((*pskb)->physindev && in != (*pskb)->physindev)
+ printk("PHYSIN=%s ", (*pskb)->physindev->name);
+ printk("OUT=%s ", out ? out->name : "");
+ if ((*pskb)->physoutdev && out != (*pskb)->physoutdev)
+ printk("PHYSOUT=%s ", (*pskb)->physoutdev->name);
+
+ if (in && !out) {
+ /* MAC logging for input chain only. */
+ printk("MAC=");
+ if ((*pskb)->dev && (*pskb)->dev->hard_header_len && (*pskb)->mac.raw != (void*)iph) {
+ int i;
+ unsigned char *p = (*pskb)->mac.raw;
+ for (i = 0; i < (*pskb)->dev->hard_header_len; i++,p++)
+ printk("%02x%c", *p,
+ i==(*pskb)->dev->hard_header_len - 1
+ ? ' ':':');
+ } else
+ printk(" ");
+ }
+
+ dump_packet(loginfo, iph, (*pskb)->len, 1);
+ printk("\n");
+ spin_unlock_bh(&log_lock);
+
+ return IPT_CONTINUE;
+}
+
+static int ipt_log_checkentry(const char *tablename,
+ const struct ipt_entry *e,
+ void *targinfo,
+ unsigned int targinfosize,
+ unsigned int hook_mask)
+{
+ const struct ipt_log_info *loginfo = targinfo;
+
+ if (targinfosize != IPT_ALIGN(sizeof(struct ipt_log_info))) {
+ DEBUGP("LOG: targinfosize %u != %u\n",
+ targinfosize, IPT_ALIGN(sizeof(struct ipt_log_info)));
+ return 0;
+ }
+
+ if (loginfo->level >= 8) {
+ DEBUGP("LOG: level %u >= 8\n", loginfo->level);
+ return 0;
+ }
+
+ if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') {
+ DEBUGP("LOG: prefix term %i\n",
+ loginfo->prefix[sizeof(loginfo->prefix)-1]);
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct ipt_target ipt_log_reg
+= { { NULL, NULL }, "LOG", ipt_log_target, ipt_log_checkentry, NULL,
+ THIS_MODULE };
+
+static int __init init(void)
+{
+ if (ipt_register_target(&ipt_log_reg))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void __exit fini(void)
+{
+ ipt_unregister_target(&ipt_log_reg);
+}
+
+module_init(init);
+module_exit(fini);
+MODULE_LICENSE("GPL");
diff --git a/br-nf-bds/patches/bridge-nf-0.0.7-bds-against-2.4.18.diff b/br-nf-bds/patches/bridge-nf-0.0.7-bds-against-2.4.18.diff
new file mode 100644
index 0000000..eb33426
--- /dev/null
+++ b/br-nf-bds/patches/bridge-nf-0.0.7-bds-against-2.4.18.diff
@@ -0,0 +1,975 @@
+All patches are the same as Lennert's 0.0.6 except the last patch
+(br_netfilter.c).
+
+--- linux-2.4.18/include/linux/netfilter.h Thu Nov 22 20:47:48 2001
++++ linux-2.4.18-brnf0.0.6/include/linux/netfilter.h Tue Feb 26 04:53:26 2002
+@@ -117,17 +117,23 @@
+ /* This is gross, but inline doesn't cut it for avoiding the function
+ call in fast path: gcc doesn't inline (needs value tracking?). --RR */
+ #ifdef CONFIG_NETFILTER_DEBUG
+-#define NF_HOOK nf_hook_slow
++#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) \
++ nf_hook_slow((pf), (hook), (skb), (indev), (outdev), (okfn), INT_MIN)
++#define NF_HOOK_THRESH nf_hook_slow
+ #else
+ #define NF_HOOK(pf, hook, skb, indev, outdev, okfn) \
+ (list_empty(&nf_hooks[(pf)][(hook)]) \
+ ? (okfn)(skb) \
+- : nf_hook_slow((pf), (hook), (skb), (indev), (outdev), (okfn)))
++ : nf_hook_slow((pf), (hook), (skb), (indev), (outdev), (okfn), INT_MIN))
++#define NF_HOOK_THRESH(pf, hook, skb, indev, outdev, okfn, thresh) \
++(list_empty(&nf_hooks[(pf)][(hook)]) \
++ ? (okfn)(skb) \
++ : nf_hook_slow((pf), (hook), (skb), (indev), (outdev), (okfn), (thresh)))
+ #endif
+
+ int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb,
+ struct net_device *indev, struct net_device *outdev,
+- int (*okfn)(struct sk_buff *));
++ int (*okfn)(struct sk_buff *), int thresh);
+
+ /* Call setsockopt() */
+ int nf_setsockopt(struct sock *sk, int pf, int optval, char *opt,
+--- linux-2.4.18/include/linux/netfilter_ipv4.h Mon Feb 25 20:38:13 2002
++++ linux-2.4.18-brnf0.0.6/include/linux/netfilter_ipv4.h Tue Feb 26 04:53:57 2002
+@@ -54,6 +54,7 @@
+ NF_IP_PRI_CONNTRACK = -200,
+ NF_IP_PRI_MANGLE = -150,
+ NF_IP_PRI_NAT_DST = -100,
++ NF_IP_PRI_BRIDGE_SABOTAGE = -50,
+ NF_IP_PRI_FILTER = 0,
+ NF_IP_PRI_NAT_SRC = 100,
+ NF_IP_PRI_LAST = INT_MAX,
+--- linux-2.4.18/include/linux/skbuff.h Thu Nov 22 20:46:26 2001
++++ linux-2.4.18-brnf0.0.6/include/linux/skbuff.h Tue Feb 26 04:53:47 2002
+@@ -135,6 +135,8 @@
+ struct sock *sk; /* Socket we are owned by */
+ struct timeval stamp; /* Time we arrived */
+ struct net_device *dev; /* Device we arrived on/are leaving by */
++ struct net_device *physindev; /* Physical device we arrived on */
++ struct net_device *physoutdev; /* Physical device we will leave by */
+
+ /* Transport layer header */
+ union
+--- linux-2.4.18/net/bridge/br.c Mon Feb 25 20:38:14 2002
++++ linux-2.4.18-brnf0.0.6/net/bridge/br.c Tue Feb 26 04:54:47 2002
+@@ -42,6 +42,11 @@
+ {
+ printk(KERN_INFO "NET4: Ethernet Bridge 008 for NET4.0\n");
+
++#ifdef CONFIG_BRIDGE_NF
++ if (br_netfilter_init())
++ return 1;
++#endif
++
+ br_handle_frame_hook = br_handle_frame;
+ br_ioctl_hook = br_ioctl_deviceless_stub;
+ #if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+@@ -65,6 +70,9 @@
+
+ static void __exit br_deinit(void)
+ {
++#ifdef CONFIG_BRIDGE_NF
++ br_netfilter_fini();
++#endif
+ unregister_netdevice_notifier(&br_device_notifier);
+ br_call_ioctl_atomic(__br_clear_ioctl_hook);
+ net_call_rx_atomic(__br_clear_frame_hook);
+--- linux-2.4.18/net/bridge/br_forward.c Wed Aug 15 10:54:35 2001
++++ linux-2.4.18-brnf0.0.6/net/bridge/br_forward.c Tue Feb 26 04:54:15 2002
+@@ -30,7 +30,7 @@
+ return 1;
+ }
+
+-static int __dev_queue_push_xmit(struct sk_buff *skb)
++int br_dev_queue_push_xmit(struct sk_buff *skb)
+ {
+ skb_push(skb, ETH_HLEN);
+ dev_queue_xmit(skb);
+@@ -38,10 +38,10 @@
+ return 0;
+ }
+
+-static int __br_forward_finish(struct sk_buff *skb)
++int br_forward_finish(struct sk_buff *skb)
+ {
+ NF_HOOK(PF_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
+- __dev_queue_push_xmit);
++ br_dev_queue_push_xmit);
+
+ return 0;
+ }
+@@ -54,7 +54,7 @@
+ skb->dev = to->dev;
+
+ NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, indev, skb->dev,
+- __br_forward_finish);
++ br_forward_finish);
+ }
+
+ static void __br_forward(struct net_bridge_port *to, struct sk_buff *skb)
+@@ -65,7 +65,7 @@
+ skb->dev = to->dev;
+
+ NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
+- __br_forward_finish);
++ br_forward_finish);
+ }
+
+ /* called under bridge lock */
+--- linux-2.4.18/net/bridge/br_input.c Mon Feb 25 20:38:14 2002
++++ linux-2.4.18-brnf0.0.6/net/bridge/br_input.c Tue Feb 26 04:54:24 2002
+@@ -46,7 +46,7 @@
+ br_pass_frame_up_finish);
+ }
+
+-static int br_handle_frame_finish(struct sk_buff *skb)
++int br_handle_frame_finish(struct sk_buff *skb)
+ {
+ struct net_bridge *br;
+ unsigned char *dest;
+--- linux-2.4.18/net/bridge/br_private.h Mon Feb 25 20:38:14 2002
++++ linux-2.4.18-brnf0.0.6/net/bridge/br_private.h Tue Feb 26 04:54:47 2002
+@@ -120,6 +120,7 @@
+ extern void br_inc_use_count(void);
+
+ /* br_device.c */
++extern int br_dev_xmit(struct sk_buff *skb, struct net_device *dev);
+ extern void br_dev_setup(struct net_device *dev);
+ extern int br_dev_xmit(struct sk_buff *skb, struct net_device *dev);
+
+@@ -144,8 +145,10 @@
+ /* br_forward.c */
+ extern void br_deliver(struct net_bridge_port *to,
+ struct sk_buff *skb);
++extern int br_dev_queue_push_xmit(struct sk_buff *skb);
+ extern void br_forward(struct net_bridge_port *to,
+ struct sk_buff *skb);
++extern int br_forward_finish(struct sk_buff *skb);
+ extern void br_flood_deliver(struct net_bridge *br,
+ struct sk_buff *skb,
+ int clone);
+@@ -166,6 +169,7 @@
+ int *ifindices);
+
+ /* br_input.c */
++extern int br_handle_frame_finish(struct sk_buff *skb);
+ extern void br_handle_frame(struct sk_buff *skb);
+
+ /* br_ioctl.c */
+@@ -177,6 +181,10 @@
+ unsigned long arg2);
+ extern int br_ioctl_deviceless_stub(unsigned long arg);
+
++/* br_netfilter.c */
++extern int br_netfilter_init(void);
++extern void br_netfilter_fini(void);
++
+ /* br_stp.c */
+ extern int br_is_root_bridge(struct net_bridge *br);
+ extern struct net_bridge_port *br_get_port(struct net_bridge *br,
+--- linux-2.4.18/net/bridge/Makefile Fri Dec 29 23:07:24 2000
++++ linux-2.4.18-brnf0.0.6/net/bridge/Makefile Tue Feb 26 04:54:47 2002
+@@ -13,4 +13,6 @@
+ br_stp_if.o br_stp_timer.o
+ obj-m := $(O_TARGET)
+
++obj-$(CONFIG_BRIDGE_NF) += br_netfilter.o
++
+ include $(TOPDIR)/Rules.make
+--- linux-2.4.18/net/Config.in Mon Feb 25 20:38:14 2002
++++ linux-2.4.18-brnf0.0.6/net/Config.in Tue Feb 26 04:54:47 2002
+@@ -61,6 +61,9 @@
+ fi
+ dep_tristate '802.1d Ethernet Bridging' CONFIG_BRIDGE $CONFIG_INET
+ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
++ if [ "$CONFIG_BRIDGE" != "n" -a "$CONFIG_NETFILTER" != "n" ]; then
++ bool ' netfilter (firewalling) support' CONFIG_BRIDGE_NF
++ fi
+ tristate 'CCITT X.25 Packet Layer (EXPERIMENTAL)' CONFIG_X25
+ tristate 'LAPB Data Link Driver (EXPERIMENTAL)' CONFIG_LAPB
+ bool '802.2 LLC (EXPERIMENTAL)' CONFIG_LLC
+--- linux-2.4.18/net/core/netfilter.c Mon Feb 25 20:38:14 2002
++++ linux-2.4.18-brnf0.0.6/net/core/netfilter.c Tue Feb 26 04:53:47 2002
+@@ -343,10 +343,15 @@
+ const struct net_device *indev,
+ const struct net_device *outdev,
+ struct list_head **i,
+- int (*okfn)(struct sk_buff *))
++ int (*okfn)(struct sk_buff *),
++ int hook_thresh)
+ {
+ for (*i = (*i)->next; *i != head; *i = (*i)->next) {
+ struct nf_hook_ops *elem = (struct nf_hook_ops *)*i;
++
++ if (hook_thresh > elem->priority)
++ continue;
++
+ switch (elem->hook(hook, skb, indev, outdev, okfn)) {
+ case NF_QUEUE:
+ return NF_QUEUE;
+@@ -414,6 +419,8 @@
+ {
+ int status;
+ struct nf_info *info;
++ struct net_device *physindev;
++ struct net_device *physoutdev;
+
+ if (!queue_handler[pf].outfn) {
+ kfree_skb(skb);
+@@ -436,11 +443,16 @@
+ if (indev) dev_hold(indev);
+ if (outdev) dev_hold(outdev);
+
++ if ((physindev = skb->physindev)) dev_hold(physindev);
++ if ((physoutdev = skb->physoutdev)) dev_hold(physoutdev);
++
+ status = queue_handler[pf].outfn(skb, info, queue_handler[pf].data);
+ if (status < 0) {
+ /* James M doesn't say fuck enough. */
+ if (indev) dev_put(indev);
+ if (outdev) dev_put(outdev);
++ if (physindev) dev_put(physindev);
++ if (physoutdev) dev_put(physoutdev);
+ kfree(info);
+ kfree_skb(skb);
+ return;
+@@ -450,7 +462,8 @@
+ int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb,
+ struct net_device *indev,
+ struct net_device *outdev,
+- int (*okfn)(struct sk_buff *))
++ int (*okfn)(struct sk_buff *),
++ int hook_thresh)
+ {
+ struct list_head *elem;
+ unsigned int verdict;
+@@ -482,7 +495,7 @@
+
+ elem = &nf_hooks[pf][hook];
+ verdict = nf_iterate(&nf_hooks[pf][hook], &skb, hook, indev,
+- outdev, &elem, okfn);
++ outdev, &elem, okfn, hook_thresh);
+ if (verdict == NF_QUEUE) {
+ NFDEBUG("nf_hook: Verdict = QUEUE.\n");
+ nf_queue(skb, elem, pf, hook, indev, outdev, okfn);
+@@ -531,7 +544,7 @@
+ verdict = nf_iterate(&nf_hooks[info->pf][info->hook],
+ &skb, info->hook,
+ info->indev, info->outdev, &elem,
+- info->okfn);
++ info->okfn, INT_MIN);
+ }
+
+ switch (verdict) {
+--- linux-2.4.18/net/core/skbuff.c Fri Dec 21 18:42:05 2001
++++ linux-2.4.18-brnf0.0.6/net/core/skbuff.c Tue Feb 26 04:53:47 2002
+@@ -231,6 +231,8 @@
+ skb->sk = NULL;
+ skb->stamp.tv_sec=0; /* No idea about time */
+ skb->dev = NULL;
++ skb->physindev = NULL;
++ skb->physoutdev = NULL;
+ skb->dst = NULL;
+ memset(skb->cb, 0, sizeof(skb->cb));
+ skb->pkt_type = PACKET_HOST; /* Default type */
+@@ -362,6 +364,8 @@
+ n->sk = NULL;
+ C(stamp);
+ C(dev);
++ C(physindev);
++ C(physoutdev);
+ C(h);
+ C(nh);
+ C(mac);
+@@ -417,6 +421,8 @@
+ new->list=NULL;
+ new->sk=NULL;
+ new->dev=old->dev;
++ new->physindev=old->physindev;
++ new->physoutdev=old->physoutdev;
+ new->priority=old->priority;
+ new->protocol=old->protocol;
+ new->dst=dst_clone(old->dst);
+--- linux-2.4.18/net/ipv4/ip_output.c Tue Feb 26 04:52:58 2002
++++ linux-2.4.18-brnf0.0.6/net/ipv4/ip_output.c Tue Feb 26 04:53:47 2002
+@@ -819,6 +819,8 @@
+ skb_set_owner_w(skb2, skb->sk);
+ skb2->dst = dst_clone(skb->dst);
+ skb2->dev = skb->dev;
++ skb2->physindev = skb->physindev;
++ skb2->physoutdev = skb->physoutdev;
+
+ /*
+ * Copy the packet header into the new buffer.
+@@ -882,6 +884,7 @@
+ iph->tot_len = htons(len + hlen);
+
+ ip_send_check(iph);
++ memcpy(skb2->data - 16, skb->data - 16, 16);
+
+ err = output(skb2);
+ if (err)
+--- linux-2.4.18/net/ipv4/netfilter/ip_tables.c Mon Feb 25 20:38:14 2002
++++ linux-2.4.18-brnf0.0.6/net/ipv4/netfilter/ip_tables.c Tue Feb 26 04:53:52 2002
+@@ -121,12 +121,15 @@
+ static inline int
+ ip_packet_match(const struct iphdr *ip,
+ const char *indev,
++ const char *physindev,
+ const char *outdev,
++ const char *physoutdev,
+ const struct ipt_ip *ipinfo,
+ int isfrag)
+ {
+ size_t i;
+ unsigned long ret;
++ unsigned long ret2;
+
+ #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
+
+@@ -156,7 +159,13 @@
+ & ((const unsigned long *)ipinfo->iniface_mask)[i];
+ }
+
+- if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
++ for (i = 0, ret2 = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
++ ret2 |= (((const unsigned long *)physindev)[i]
++ ^ ((const unsigned long *)ipinfo->iniface)[i])
++ & ((const unsigned long *)ipinfo->iniface_mask)[i];
++ }
++
++ if (FWINV(ret != 0 && ret2 != 0, IPT_INV_VIA_IN)) {
+ dprintf("VIA in mismatch (%s vs %s).%s\n",
+ indev, ipinfo->iniface,
+ ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
+@@ -169,7 +178,13 @@
+ & ((const unsigned long *)ipinfo->outiface_mask)[i];
+ }
+
+- if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
++ for (i = 0, ret2 = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
++ ret2 |= (((const unsigned long *)physoutdev)[i]
++ ^ ((const unsigned long *)ipinfo->outiface)[i])
++ & ((const unsigned long *)ipinfo->outiface_mask)[i];
++ }
++
++ if (FWINV(ret != 0 && ret2 != 0, IPT_INV_VIA_OUT)) {
+ dprintf("VIA out mismatch (%s vs %s).%s\n",
+ outdev, ipinfo->outiface,
+ ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
+@@ -268,6 +283,7 @@
+ /* Initializing verdict to NF_DROP keeps gcc happy. */
+ unsigned int verdict = NF_DROP;
+ const char *indev, *outdev;
++ const char *physindev, *physoutdev;
+ void *table_base;
+ struct ipt_entry *e, *back;
+
+@@ -277,6 +293,9 @@
+ datalen = (*pskb)->len - ip->ihl * 4;
+ indev = in ? in->name : nulldevname;
+ outdev = out ? out->name : nulldevname;
++ physindev = (*pskb)->physindev ? (*pskb)->physindev->name : nulldevname;
++ physoutdev = (*pskb)->physoutdev ? (*pskb)->physoutdev->name : nulldevname;
++
+ /* We handle fragments by dealing with the first fragment as
+ * if it was a normal packet. All other fragments are treated
+ * normally, except that they will NEVER match rules that ask
+@@ -312,7 +331,7 @@
+ IP_NF_ASSERT(e);
+ IP_NF_ASSERT(back);
+ (*pskb)->nfcache |= e->nfcache;
+- if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
++ if (ip_packet_match(ip, indev, physindev, outdev, physoutdev, &e->ip, offset)) {
+ struct ipt_entry_target *t;
+
+ if (IPT_MATCH_ITERATE(e, do_match,
+--- linux-2.4.18/net/ipv4/netfilter/ipt_LOG.c Mon Feb 25 20:38:14 2002
++++ linux-2.4.18-brnf0.0.6/net/ipv4/netfilter/ipt_LOG.c Tue Feb 26 04:54:03 2002
+@@ -285,10 +285,13 @@
+ level_string[1] = '0' + (loginfo->level % 8);
+ spin_lock_bh(&log_lock);
+ printk(level_string);
+- printk("%sIN=%s OUT=%s ",
+- loginfo->prefix,
+- in ? in->name : "",
+- out ? out->name : "");
++ printk("%sIN=%s ", loginfo->prefix, in ? in->name : "");
++ if ((*pskb)->physindev && in != (*pskb)->physindev)
++ printk("PHYSIN=%s ", (*pskb)->physindev->name);
++ printk("OUT=%s ", out ? out->name : "");
++ if ((*pskb)->physoutdev && out != (*pskb)->physoutdev)
++ printk("PHYSOUT=%s ", (*pskb)->physoutdev->name);
++
+ if (in && !out) {
+ /* MAC logging for input chain only. */
+ printk("MAC=");
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ bridge-nf-0.0.7-bds/net/bridge/br_netfilter.c Fri Apr 26 22:56:26 2002
+@@ -0,0 +1,565 @@
++/*
++ * Handle firewalling
++ * Linux ethernet bridge
++ *
++ * Authors:
++ * Lennert Buytenhek <buytenh@gnu.org>
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * $Id: bridge-nf-0.0.7-bds-against-2.4.18.diff,v 1.1 2002/06/01 19:23:59 bdschuym Exp $
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ * Lennert dedicates this file to Kerstin Wurdinger.
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/ip.h>
++#include <linux/netdevice.h>
++#include <linux/skbuff.h>
++#include <linux/if_ether.h>
++#include <linux/netfilter_bridge.h>
++#include <linux/netfilter_ipv4.h>
++#include <linux/in_route.h>
++#include <net/ip.h>
++#include <net/tcp.h>
++#include <asm/uaccess.h>
++#include <asm/checksum.h>
++#include "br_private.h"
++
++
++#ifndef WE_REALLY_INSIST_ON_NOT_HAVING_NAT_SUPPORT
++/* As the original source/destination addresses are variables private to this
++ * file, we store them in unused space at the end of the control buffer.
++ * On 64-bit platforms the TCP control buffer size still leaves us 8 bytes
++ * of space at the end, so that fits. Usage of the original source address
++ * and the original destination address never overlaps (daddr is needed
++ * around PRE_ROUTING, and saddr around POST_ROUTING), so that's okay as
++ * well.
++ */
++#define skb_origaddr(skb) (*((u32 *)((skb)->cb + sizeof((skb)->cb) - 4)))
++
++#define store_orig_dstaddr(skb) (skb_origaddr(skb) = (skb)->nh.iph->daddr)
++#define store_orig_srcaddr(skb) (skb_origaddr(skb) = (skb)->nh.iph->saddr)
++#define dnat_took_place(skb) (skb_origaddr(skb) != (skb)->nh.iph->daddr)
++#define snat_took_place(skb) (skb_origaddr(skb) != (skb)->nh.iph->saddr)
++#else
++#define store_orig_dstaddr(skb)
++#define store_orig_srcaddr(skb)
++#define dnat_took_place(skb) (0)
++#define snat_took_place(skb) (0)
++#endif
++
++
++#define has_bridge_parent(device) ((device)->br_port != NULL)
++#define bridge_parent(device) (&((device)->br_port->br->dev))
++
++
++/* As opposed to the DNAT case, for the SNAT case it's not quite
++ * clear what we should do with ethernet addresses in NAT'ed
++ * packets. Use this heuristic for now.
++ */
++static inline void __maybe_fixup_src_address(struct sk_buff *skb)
++{
++ if (snat_took_place(skb) &&
++ inet_addr_type(skb->nh.iph->saddr) == RTN_LOCAL) {
++ memcpy(skb->mac.ethernet->h_source,
++ bridge_parent(skb->dev)->dev_addr,
++ ETH_ALEN);
++ }
++}
++
++
++/* We need these fake structures to make netfilter happy --
++ * lots of places assume that skb->dst != NULL, which isn't
++ * all that unreasonable.
++ *
++ * Currently, we fill in the PMTU entry because netfilter
++ * refragmentation needs it, and the rt_flags entry because
++ * ipt_REJECT needs it. Future netfilter modules might
++ * require us to fill additional fields.
++ */
++static struct net_device __fake_net_device = {
++ hard_header_len: ETH_HLEN
++};
++
++static struct rtable __fake_rtable = {
++ u: {
++ dst: {
++ __refcnt: ATOMIC_INIT(1),
++ dev: &__fake_net_device,
++ pmtu: 1500
++ }
++ },
++
++ rt_flags: 0
++};
++
++
++/* PF_BRIDGE/PRE_ROUTING *********************************************/
++static void __br_dnat_complain(void)
++{
++ static unsigned long last_complaint = 0;
++
++ if (jiffies - last_complaint >= 5 * HZ) {
++ printk(KERN_WARNING "Performing cross-bridge DNAT requires IP "
++ "forwarding to be enabled\n");
++ last_complaint = jiffies;
++ }
++}
++
++
++static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
++{
++ skb->dev = bridge_parent(skb->dev);
++ skb->dst->output(skb);
++ return 0;
++}
++
++/* This requires some explaining. If DNAT has taken place,
++ * we will need to fix up the destination ethernet address,
++ * and this is a tricky process.
++ *
++ * There are two cases to consider:
++ * 1. The packet was DNAT'ed to a device in the same bridge
++ * port group as it was received on. We can still bridge
++ * the packet.
++ * 2. The packet was DNAT'ed to a different device, either
++ * a non-bridged device or another bridge port group.
++ * The packet will need to be routed.
++ *
++ * The way to distinguish between the two is by calling ip_route_input()
++ * and looking at skb->dst->dev, which it changed to the destination device
++ * if ip_route_input() succeeds.
++ *
++ * Let us first consider ip_route_input() succeeds:
++ *
++ * If skb->dst->dev equals the logical bridge device the packet came in on,
++ * we can consider this bridging. We then call skb->dst->output() which will
++ * make the packet enter br_nf_local_out() not much later. In that function
++ * it is assured that the iptables FORWARD chain is traversed for the packet.
++ *
++ * Else, the packet is considered to be routed and we just change the
++ * destination MAC address so that the packet will later be passed up to the ip
++ * stack to be routed.
++ *
++ * Let us now consider ip_route_input() fails:
++ *
++ * After a "echo '0' > /proc/sys/net/ipv4/ip_forward" ip_route_input() will
++ * fail, while ip_route_output() will return success. The source address for
++ * for ip_route_output() is set to zero, so ip_route_output()
++ * thinks we're handling a locally generated packet and won't care if
++ * ip forwarding is allowed. We send a warning message to the users's log
++ * telling her to put ip forwarding on.
++ *
++ * ip_route_input() will also fail if there is no route available. Then we just
++ * drop the packet.
++ *
++ * The other special thing happening here is putting skb->physoutdev on
++ * &__fake_net_device (resp. NULL) for bridged (resp. routed) packets. This is
++ * needed so that br_nf_local_out() can know that it has to give the packets to
++ * the BR_NF_FORWARD (resp. BR_NF_LOCAL_OUT) bridge hook. See that function.
++ * --Lennert, 20020411
++ * --Bart, 20020416 (updated)
++ */
++
++static int br_nf_pre_routing_finish(struct sk_buff *skb)
++{
++ struct net_device *dev = skb->dev;
++ struct iphdr *iph = skb->nh.iph;
++
++ if (dnat_took_place(skb)) {
++ if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev)) {
++ struct rtable *rt;
++
++ if (!ip_route_output(&rt, iph->daddr, 0, iph->tos, 0)) {
++ // bridged dnated traffic isn't dependent on
++ // disabled ip_forwarding
++ if (((struct dst_entry *)rt)->dev == dev) {
++ skb->dst = (struct dst_entry *)rt;
++ goto bridged_dnat;
++ }
++ __br_dnat_complain();
++ dst_release((struct dst_entry *)rt);
++ }
++ kfree_skb(skb);
++ return 0;
++ } else {
++ if (skb->dst->dev == dev) {
++bridged_dnat:
++ // tell br_nf_local_out this is a bridged frame
++ skb->physoutdev = &__fake_net_device;
++ skb->dev = skb->physindev;
++ NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
++ br_nf_pre_routing_finish_bridge, 1);
++ return 0;
++ }
++ // tell br_nf_local_out this is a routed frame
++ skb->physoutdev = NULL;
++ memcpy(skb->mac.ethernet->h_dest, dev->dev_addr, ETH_ALEN);
++ }
++ } else {
++ skb->dst = (struct dst_entry *)&__fake_rtable;
++ dst_hold(skb->dst);
++ }
++
++ skb->dev = skb->physindev;
++ NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
++ br_handle_frame_finish, 1);
++
++ return 0;
++}
++
++/* Replicate the checks that IPv4 does on packet reception.
++ * Set skb->dev to the bridge device (i.e. parent of the
++ * receiving device) to make netfilter happy, the REDIRECT
++ * target in particular. Save the original destination IP
++ * address to be able to detect DNAT afterwards.
++ */
++static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *))
++{
++ struct iphdr *iph;
++ __u32 len;
++ struct sk_buff *skb;
++
++ if ((*pskb)->protocol != __constant_htons(ETH_P_IP))
++ return NF_ACCEPT;
++
++ if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL)
++ goto out;
++
++ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
++ goto inhdr_error;
++
++ iph = skb->nh.iph;
++ if (iph->ihl < 5 || iph->version != 4)
++ goto inhdr_error;
++
++ if (!pskb_may_pull(skb, 4*iph->ihl))
++ goto inhdr_error;
++
++ iph = skb->nh.iph;
++ if (ip_fast_csum((__u8 *)iph, iph->ihl) != 0)
++ goto inhdr_error;
++
++ len = ntohs(iph->tot_len);
++ if (skb->len < len || len < 4*iph->ihl)
++ goto inhdr_error;
++
++ if (skb->len > len) {
++ __pskb_trim(skb, len);
++ if (skb->ip_summed == CHECKSUM_HW)
++ skb->ip_summed = CHECKSUM_NONE;
++ }
++
++ skb->physindev = skb->dev;
++ skb->dev = bridge_parent(skb->dev);
++ if (skb->pkt_type == PACKET_OTHERHOST)
++ skb->pkt_type = PACKET_HOST;
++ store_orig_dstaddr(skb);
++ NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, skb->dev, NULL,
++ br_nf_pre_routing_finish);
++
++ return NF_STOLEN;
++
++inhdr_error:
++// IP_INC_STATS_BH(IpInHdrErrors);
++out:
++ return NF_DROP;
++}
++
++
++/* PF_BRIDGE/LOCAL_IN ************************************************/
++/* The packet is locally destined, which requires a real
++ * dst_entry, so detach the fake one. On the way up, the
++ * packet would pass through PRE_ROUTING again (which already
++ * took place when the packet entered the bridge), but we
++ * register an IPv4 PRE_ROUTING 'sabotage' hook that will
++ * prevent this from happening.
++ */
++static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *))
++{
++ struct sk_buff *skb = *pskb;
++
++ if (skb->protocol != __constant_htons(ETH_P_IP))
++ return NF_ACCEPT;
++
++ if (skb->dst == (struct dst_entry *)&__fake_rtable) {
++ dst_release(skb->dst);
++ skb->dst = NULL;
++ }
++
++ return NF_ACCEPT;
++}
++
++
++/* PF_BRIDGE/FORWARD *************************************************/
++static int br_nf_forward_finish(struct sk_buff *skb)
++{
++ NF_HOOK_THRESH(PF_BRIDGE, NF_BR_FORWARD, skb, skb->physindev,
++ skb->dev, br_forward_finish, 1);
++
++ return 0;
++}
++
++/* This is the 'purely bridged' case. We pass the packet to
++ * netfilter with indev and outdev set to the bridge device,
++ * but we are still able to filter on the 'real' indev/outdev
++ * because another bit of the bridge-nf patch overloads the
++ * '-i' and '-o' iptables interface checks to take
++ * skb->phys{in,out}dev into account as well (so both the real
++ * device and the bridge device will match).
++ */
++static unsigned int br_nf_forward(unsigned int hook, struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *))
++{
++ struct sk_buff *skb = *pskb;
++
++ // don't mess with non-ip frames, also don't mess with the ip-packets
++ // when br_nf_local_out_finish explicitly says so.
++ if (skb->protocol != __constant_htons(ETH_P_IP) || skb->physindev == NULL)
++ return NF_ACCEPT;
++
++ skb->physoutdev = skb->dev;
++ NF_HOOK(PF_INET, NF_IP_FORWARD, skb, bridge_parent(skb->physindev),
++ bridge_parent(skb->dev), br_nf_forward_finish);
++
++ return NF_STOLEN;
++}
++
++
++/* PF_BRIDGE/LOCAL_OUT ***********************************************/
++static int br_nf_local_out_finish_forward(struct sk_buff *skb)
++{
++ struct net_device *dev;
++
++ dev = skb->physindev;
++ // tell br_nf_forward to stay away
++ skb->physindev = NULL;
++ NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, dev, skb->dev,
++ br_forward_finish);
++
++ return 0;
++}
++
++static int br_nf_local_out_finish(struct sk_buff *skb)
++{
++ NF_HOOK_THRESH(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
++ br_forward_finish, INT_MIN + 1);
++
++ return 0;
++}
++
++
++/* This hook sees both locally originated IP packets and forwarded
++ * IP packets (in both cases the destination device is a bridge
++ * device). For the sake of interface transparency (i.e. properly
++ * overloading the '-o' option), we steal packets destined to
++ * a bridge device away from the IPv4 FORWARD and OUTPUT hooks,
++ * and reinject them later, when we have determined the real
++ * output device. This reinjecting happens here.
++ *
++ * If skb->physindev is NULL, the bridge-nf code never touched
++ * this packet before, and so the packet was locally originated.
++ * We call the IPv4 LOCAL_OUT hook.
++ *
++ * If skb->physindev isn't NULL, there are two cases:
++ * 1. The packet was IP routed.
++ * 2. The packet was cross-bridge DNAT'ed (see the comment near
++ * PF_BRIDGE/PRE_ROUTING).
++ * In both cases, we call the IPv4 FORWARD hook. In case 1,
++ * if the packet originally came from a bridge device, and in
++ * case 2, skb->physindev will have a bridge device as parent,
++ * so we use that parent device as indev. Otherwise, we just
++ * use physindev.
++ *
++ * If skb->physoutdev == NULL the bridge code never touched the
++ * packet or the packet was routed in br_nf_pre_routing_finish().
++ * We give the packet to the bridge NF_BR_LOCAL_OUT hook.
++ * If not, the packet is actually a bridged one so we give it to
++ * the NF_BR_FORWARD hook.
++ */
++
++static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, int (*_okfn)(struct sk_buff *))
++{
++ int hookno;
++ int (*okfn)(struct sk_buff *skb);
++ struct net_device *realindev;
++ struct sk_buff *skb = *pskb;
++
++ if (skb->protocol != __constant_htons(ETH_P_IP))
++ return NF_ACCEPT;
++
++ /* Sometimes we get packets with NULL ->dst here (for example,
++ * running a dhcp client daemon triggers this).
++ */
++ if (skb->dst == NULL)
++ return NF_ACCEPT;
++
++ // bridged, take forward
++ // (see big note in front of br_nf_pre_routing_finish)
++ if (skb->physoutdev == &__fake_net_device) {
++ okfn = br_nf_local_out_finish_forward;
++ } else if (skb->physoutdev == NULL) {
++ // non-bridged: routed or locally generated traffic, take local_out
++ // (see big note in front of br_nf_pre_routing_finish)
++ okfn = br_nf_local_out_finish;
++ } else {
++ printk("ARGH: bridge_or_routed hack doesn't work\n");
++ okfn = br_nf_local_out_finish;
++ }
++
++ skb->physoutdev = skb->dev;
++
++ hookno = NF_IP_LOCAL_OUT;
++ if ((realindev = skb->physindev) != NULL) {
++ hookno = NF_IP_FORWARD;
++ if (has_bridge_parent(realindev))
++ realindev = bridge_parent(realindev);
++ }
++
++ NF_HOOK_THRESH(PF_INET, hookno, skb, realindev,
++ bridge_parent(skb->dev),
++ okfn,
++ NF_IP_PRI_BRIDGE_SABOTAGE + 1);
++
++ return NF_STOLEN;
++}
++
++
++/* PF_BRIDGE/POST_ROUTING ********************************************/
++static int br_nf_post_routing_finish(struct sk_buff *skb)
++{
++ __maybe_fixup_src_address(skb);
++ NF_HOOK_THRESH(PF_BRIDGE, NF_BR_POST_ROUTING, skb, NULL,
++ bridge_parent(skb->dev), br_dev_queue_push_xmit, 1);
++
++ return 0;
++}
++
++static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *))
++{
++ struct sk_buff *skb = *pskb;
++
++ /* Be very paranoid. */
++ if (skb->mac.raw < skb->head || skb->mac.raw + ETH_HLEN > skb->data) {
++ printk(KERN_CRIT "Argh!! Fuck me harder with a chainsaw. ");
++ if (skb->dev != NULL) {
++ printk("[%s]", skb->dev->name);
++ if (has_bridge_parent(skb->dev))
++ printk("[%s]", bridge_parent(skb->dev)->name);
++ }
++ printk("\n");
++ return NF_ACCEPT;
++ }
++
++ if (skb->protocol != __constant_htons(ETH_P_IP))
++ return NF_ACCEPT;
++
++ /* Sometimes we get packets with NULL ->dst here (for example,
++ * running a dhcp client daemon triggers this).
++ */
++ if (skb->dst == NULL)
++ return NF_ACCEPT;
++
++ store_orig_srcaddr(skb);
++ NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL,
++ bridge_parent(skb->dev), br_nf_post_routing_finish);
++
++ return NF_STOLEN;
++}
++
++
++/* IPv4/SABOTAGE *****************************************************/
++/* Don't hand locally destined packets to PF_INET/PRE_ROUTING
++ * for the second time. */
++static unsigned int ipv4_sabotage_in(unsigned int hook, struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *))
++{
++ if (in->hard_start_xmit == br_dev_xmit &&
++ okfn != br_nf_pre_routing_finish) {
++ okfn(*pskb);
++ return NF_STOLEN;
++ }
++
++ return NF_ACCEPT;
++}
++
++/* Postpone execution of PF_INET/FORWARD, PF_INET/LOCAL_OUT
++ * and PF_INET/POST_ROUTING until we have done the forwarding
++ * decision in the bridge code and have determined skb->physoutdev.
++ */
++static unsigned int ipv4_sabotage_out(unsigned int hook, struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *))
++{
++ if (out->hard_start_xmit == br_dev_xmit &&
++ okfn != br_nf_forward_finish &&
++ okfn != br_nf_local_out_finish &&
++ okfn != br_nf_post_routing_finish) {
++ struct sk_buff *skb = *pskb;
++
++ if (hook == NF_IP_FORWARD && skb->physindev == NULL)
++ skb->physindev = (struct net_device *)in;
++ okfn(skb);
++ return NF_STOLEN;
++ }
++
++ return NF_ACCEPT;
++}
++
++
++static struct nf_hook_ops br_nf_ops[] = {
++ { { NULL, NULL }, br_nf_pre_routing, PF_BRIDGE, NF_BR_PRE_ROUTING, 0 },
++ { { NULL, NULL }, br_nf_local_in, PF_BRIDGE, NF_BR_LOCAL_IN, 0 },
++ { { NULL, NULL }, br_nf_forward, PF_BRIDGE, NF_BR_FORWARD, 0 },
++ // we need INT_MIN, so innocent NF_BR_LOCAL_OUT functions don't
++ // get bridged traffic as input
++ { { NULL, NULL }, br_nf_local_out, PF_BRIDGE, NF_BR_LOCAL_OUT, INT_MIN },
++ { { NULL, NULL }, br_nf_post_routing, PF_BRIDGE, NF_BR_POST_ROUTING, 0 },
++
++ { { NULL, NULL }, ipv4_sabotage_in, PF_INET, NF_IP_PRE_ROUTING, NF_IP_PRI_FIRST },
++
++ { { NULL, NULL }, ipv4_sabotage_out, PF_INET, NF_IP_FORWARD, NF_IP_PRI_BRIDGE_SABOTAGE },
++ { { NULL, NULL }, ipv4_sabotage_out, PF_INET, NF_IP_LOCAL_OUT, NF_IP_PRI_BRIDGE_SABOTAGE },
++ { { NULL, NULL }, ipv4_sabotage_out, PF_INET, NF_IP_POST_ROUTING, NF_IP_PRI_FIRST },
++};
++
++#define NUMHOOKS (sizeof(br_nf_ops)/sizeof(br_nf_ops[0]))
++
++
++int br_netfilter_init(void)
++{
++ int i;
++
++#ifndef WE_REALLY_INSIST_ON_NOT_HAVING_NAT_SUPPORT
++ if (sizeof(struct tcp_skb_cb) + 4 >= sizeof(((struct sk_buff *)NULL)->cb)) {
++ extern int __too_little_space_in_control_buffer(void);
++ __too_little_space_in_control_buffer();
++ }
++#endif
++
++ for (i=0;i<NUMHOOKS;i++) {
++ int ret;
++
++ if ((ret = nf_register_hook(&br_nf_ops[i])) >= 0)
++ continue;
++
++ while (i--)
++ nf_unregister_hook(&br_nf_ops[i]);
++
++ return ret;
++ }
++
++ printk(KERN_NOTICE "Bridge firewalling registered\n");
++
++ return 0;
++}
++
++void br_netfilter_fini(void)
++{
++ int i;
++
++ for (i=NUMHOOKS-1;i>=0;i--)
++ nf_unregister_hook(&br_nf_ops[i]);
++}
diff --git a/br-nf-bds/patches/bridge-nf-0.0.8-bds-against-2.4.18.diff b/br-nf-bds/patches/bridge-nf-0.0.8-bds-against-2.4.18.diff
new file mode 100644
index 0000000..ad742c7
--- /dev/null
+++ b/br-nf-bds/patches/bridge-nf-0.0.8-bds-against-2.4.18.diff
@@ -0,0 +1,983 @@
+bridge-nf-0.0.8-bds - 26 May
+
+difference between 0.0.7 and 0.0.8:
+
+let iptables mangle table FORWARD chain see the physical out-dev
+
+--- linux/include/linux/netfilter.h Thu Nov 22 20:47:48 2001
++++ bridge-nf-0.0.8-bds/include/linux/netfilter.h Sun May 26 12:49:04 2002
+@@ -117,17 +117,23 @@
+ /* This is gross, but inline doesn't cut it for avoiding the function
+ call in fast path: gcc doesn't inline (needs value tracking?). --RR */
+ #ifdef CONFIG_NETFILTER_DEBUG
+-#define NF_HOOK nf_hook_slow
++#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) \
++ nf_hook_slow((pf), (hook), (skb), (indev), (outdev), (okfn), INT_MIN)
++#define NF_HOOK_THRESH nf_hook_slow
+ #else
+ #define NF_HOOK(pf, hook, skb, indev, outdev, okfn) \
+ (list_empty(&nf_hooks[(pf)][(hook)]) \
+ ? (okfn)(skb) \
+- : nf_hook_slow((pf), (hook), (skb), (indev), (outdev), (okfn)))
++ : nf_hook_slow((pf), (hook), (skb), (indev), (outdev), (okfn), INT_MIN))
++#define NF_HOOK_THRESH(pf, hook, skb, indev, outdev, okfn, thresh) \
++(list_empty(&nf_hooks[(pf)][(hook)]) \
++ ? (okfn)(skb) \
++ : nf_hook_slow((pf), (hook), (skb), (indev), (outdev), (okfn), (thresh)))
+ #endif
+
+ int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb,
+ struct net_device *indev, struct net_device *outdev,
+- int (*okfn)(struct sk_buff *));
++ int (*okfn)(struct sk_buff *), int thresh);
+
+ /* Call setsockopt() */
+ int nf_setsockopt(struct sock *sk, int pf, int optval, char *opt,
+--- linux/include/linux/netfilter_ipv4.h Mon Feb 25 20:38:13 2002
++++ bridge-nf-0.0.8-bds/include/linux/netfilter_ipv4.h Sun May 26 12:52:30 2002
+@@ -52,8 +52,10 @@
+ enum nf_ip_hook_priorities {
+ NF_IP_PRI_FIRST = INT_MIN,
+ NF_IP_PRI_CONNTRACK = -200,
++ NF_IP_PRI_BRIDGE_SABOTAGE_FORWARD = -175,
+ NF_IP_PRI_MANGLE = -150,
+ NF_IP_PRI_NAT_DST = -100,
++ NF_IP_PRI_BRIDGE_SABOTAGE = -50,
+ NF_IP_PRI_FILTER = 0,
+ NF_IP_PRI_NAT_SRC = 100,
+ NF_IP_PRI_LAST = INT_MAX,
+--- linux/include/linux/skbuff.h Thu Nov 22 20:46:26 2001
++++ bridge-nf-0.0.8-bds/include/linux/skbuff.h Sun May 26 12:49:04 2002
+@@ -135,6 +135,8 @@
+ struct sock *sk; /* Socket we are owned by */
+ struct timeval stamp; /* Time we arrived */
+ struct net_device *dev; /* Device we arrived on/are leaving by */
++ struct net_device *physindev; /* Physical device we arrived on */
++ struct net_device *physoutdev; /* Physical device we will leave by */
+
+ /* Transport layer header */
+ union
+--- linux/net/bridge/br.c Mon Feb 25 20:38:14 2002
++++ bridge-nf-0.0.8-bds/net/bridge/br.c Sun May 26 12:49:04 2002
+@@ -42,6 +42,11 @@
+ {
+ printk(KERN_INFO "NET4: Ethernet Bridge 008 for NET4.0\n");
+
++#ifdef CONFIG_BRIDGE_NF
++ if (br_netfilter_init())
++ return 1;
++#endif
++
+ br_handle_frame_hook = br_handle_frame;
+ br_ioctl_hook = br_ioctl_deviceless_stub;
+ #if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+@@ -65,6 +70,9 @@
+
+ static void __exit br_deinit(void)
+ {
++#ifdef CONFIG_BRIDGE_NF
++ br_netfilter_fini();
++#endif
+ unregister_netdevice_notifier(&br_device_notifier);
+ br_call_ioctl_atomic(__br_clear_ioctl_hook);
+ net_call_rx_atomic(__br_clear_frame_hook);
+--- linux/net/bridge/br_forward.c Wed Aug 15 10:54:35 2001
++++ bridge-nf-0.0.8-bds/net/bridge/br_forward.c Sun May 26 12:49:04 2002
+@@ -30,7 +30,7 @@
+ return 1;
+ }
+
+-static int __dev_queue_push_xmit(struct sk_buff *skb)
++int br_dev_queue_push_xmit(struct sk_buff *skb)
+ {
+ skb_push(skb, ETH_HLEN);
+ dev_queue_xmit(skb);
+@@ -38,10 +38,10 @@
+ return 0;
+ }
+
+-static int __br_forward_finish(struct sk_buff *skb)
++int br_forward_finish(struct sk_buff *skb)
+ {
+ NF_HOOK(PF_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
+- __dev_queue_push_xmit);
++ br_dev_queue_push_xmit);
+
+ return 0;
+ }
+@@ -54,7 +54,7 @@
+ skb->dev = to->dev;
+
+ NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, indev, skb->dev,
+- __br_forward_finish);
++ br_forward_finish);
+ }
+
+ static void __br_forward(struct net_bridge_port *to, struct sk_buff *skb)
+@@ -65,7 +65,7 @@
+ skb->dev = to->dev;
+
+ NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
+- __br_forward_finish);
++ br_forward_finish);
+ }
+
+ /* called under bridge lock */
+--- linux/net/bridge/br_input.c Mon Feb 25 20:38:14 2002
++++ bridge-nf-0.0.8-bds/net/bridge/br_input.c Sun May 26 12:49:04 2002
+@@ -46,7 +46,7 @@
+ br_pass_frame_up_finish);
+ }
+
+-static int br_handle_frame_finish(struct sk_buff *skb)
++int br_handle_frame_finish(struct sk_buff *skb)
+ {
+ struct net_bridge *br;
+ unsigned char *dest;
+--- linux/net/bridge/br_private.h Mon Feb 25 20:38:14 2002
++++ bridge-nf-0.0.8-bds/net/bridge/br_private.h Sun May 26 12:49:04 2002
+@@ -120,6 +120,7 @@
+ extern void br_inc_use_count(void);
+
+ /* br_device.c */
++extern int br_dev_xmit(struct sk_buff *skb, struct net_device *dev);
+ extern void br_dev_setup(struct net_device *dev);
+ extern int br_dev_xmit(struct sk_buff *skb, struct net_device *dev);
+
+@@ -144,8 +145,10 @@
+ /* br_forward.c */
+ extern void br_deliver(struct net_bridge_port *to,
+ struct sk_buff *skb);
++extern int br_dev_queue_push_xmit(struct sk_buff *skb);
+ extern void br_forward(struct net_bridge_port *to,
+ struct sk_buff *skb);
++extern int br_forward_finish(struct sk_buff *skb);
+ extern void br_flood_deliver(struct net_bridge *br,
+ struct sk_buff *skb,
+ int clone);
+@@ -166,6 +169,7 @@
+ int *ifindices);
+
+ /* br_input.c */
++extern int br_handle_frame_finish(struct sk_buff *skb);
+ extern void br_handle_frame(struct sk_buff *skb);
+
+ /* br_ioctl.c */
+@@ -177,6 +181,10 @@
+ unsigned long arg2);
+ extern int br_ioctl_deviceless_stub(unsigned long arg);
+
++/* br_netfilter.c */
++extern int br_netfilter_init(void);
++extern void br_netfilter_fini(void);
++
+ /* br_stp.c */
+ extern int br_is_root_bridge(struct net_bridge *br);
+ extern struct net_bridge_port *br_get_port(struct net_bridge *br,
+--- linux/net/bridge/Makefile Fri Dec 29 23:07:24 2000
++++ bridge-nf-0.0.8-bds/net/bridge/Makefile Sun May 26 12:49:04 2002
+@@ -13,4 +13,6 @@
+ br_stp_if.o br_stp_timer.o
+ obj-m := $(O_TARGET)
+
++obj-$(CONFIG_BRIDGE_NF) += br_netfilter.o
++
+ include $(TOPDIR)/Rules.make
+--- linux/net/Config.in Mon Feb 25 20:38:14 2002
++++ bridge-nf-0.0.8-bds/net/Config.in Sun May 26 12:49:04 2002
+@@ -61,6 +61,9 @@
+ fi
+ dep_tristate '802.1d Ethernet Bridging' CONFIG_BRIDGE $CONFIG_INET
+ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
++ if [ "$CONFIG_BRIDGE" != "n" -a "$CONFIG_NETFILTER" != "n" ]; then
++ bool ' netfilter (firewalling) support' CONFIG_BRIDGE_NF
++ fi
+ tristate 'CCITT X.25 Packet Layer (EXPERIMENTAL)' CONFIG_X25
+ tristate 'LAPB Data Link Driver (EXPERIMENTAL)' CONFIG_LAPB
+ bool '802.2 LLC (EXPERIMENTAL)' CONFIG_LLC
+--- linux/net/core/netfilter.c Mon Feb 25 20:38:14 2002
++++ bridge-nf-0.0.8-bds/net/core/netfilter.c Sun May 26 12:49:04 2002
+@@ -343,10 +343,15 @@
+ const struct net_device *indev,
+ const struct net_device *outdev,
+ struct list_head **i,
+- int (*okfn)(struct sk_buff *))
++ int (*okfn)(struct sk_buff *),
++ int hook_thresh)
+ {
+ for (*i = (*i)->next; *i != head; *i = (*i)->next) {
+ struct nf_hook_ops *elem = (struct nf_hook_ops *)*i;
++
++ if (hook_thresh > elem->priority)
++ continue;
++
+ switch (elem->hook(hook, skb, indev, outdev, okfn)) {
+ case NF_QUEUE:
+ return NF_QUEUE;
+@@ -414,6 +419,8 @@
+ {
+ int status;
+ struct nf_info *info;
++ struct net_device *physindev;
++ struct net_device *physoutdev;
+
+ if (!queue_handler[pf].outfn) {
+ kfree_skb(skb);
+@@ -436,11 +443,16 @@
+ if (indev) dev_hold(indev);
+ if (outdev) dev_hold(outdev);
+
++ if ((physindev = skb->physindev)) dev_hold(physindev);
++ if ((physoutdev = skb->physoutdev)) dev_hold(physoutdev);
++
+ status = queue_handler[pf].outfn(skb, info, queue_handler[pf].data);
+ if (status < 0) {
+ /* James M doesn't say fuck enough. */
+ if (indev) dev_put(indev);
+ if (outdev) dev_put(outdev);
++ if (physindev) dev_put(physindev);
++ if (physoutdev) dev_put(physoutdev);
+ kfree(info);
+ kfree_skb(skb);
+ return;
+@@ -450,7 +462,8 @@
+ int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb,
+ struct net_device *indev,
+ struct net_device *outdev,
+- int (*okfn)(struct sk_buff *))
++ int (*okfn)(struct sk_buff *),
++ int hook_thresh)
+ {
+ struct list_head *elem;
+ unsigned int verdict;
+@@ -482,7 +495,7 @@
+
+ elem = &nf_hooks[pf][hook];
+ verdict = nf_iterate(&nf_hooks[pf][hook], &skb, hook, indev,
+- outdev, &elem, okfn);
++ outdev, &elem, okfn, hook_thresh);
+ if (verdict == NF_QUEUE) {
+ NFDEBUG("nf_hook: Verdict = QUEUE.\n");
+ nf_queue(skb, elem, pf, hook, indev, outdev, okfn);
+@@ -531,7 +544,7 @@
+ verdict = nf_iterate(&nf_hooks[info->pf][info->hook],
+ &skb, info->hook,
+ info->indev, info->outdev, &elem,
+- info->okfn);
++ info->okfn, INT_MIN);
+ }
+
+ switch (verdict) {
+--- linux/net/core/skbuff.c Fri Dec 21 18:42:05 2001
++++ bridge-nf-0.0.8-bds/net/core/skbuff.c Sun May 26 12:49:04 2002
+@@ -231,6 +231,8 @@
+ skb->sk = NULL;
+ skb->stamp.tv_sec=0; /* No idea about time */
+ skb->dev = NULL;
++ skb->physindev = NULL;
++ skb->physoutdev = NULL;
+ skb->dst = NULL;
+ memset(skb->cb, 0, sizeof(skb->cb));
+ skb->pkt_type = PACKET_HOST; /* Default type */
+@@ -362,6 +364,8 @@
+ n->sk = NULL;
+ C(stamp);
+ C(dev);
++ C(physindev);
++ C(physoutdev);
+ C(h);
+ C(nh);
+ C(mac);
+@@ -417,6 +421,8 @@
+ new->list=NULL;
+ new->sk=NULL;
+ new->dev=old->dev;
++ new->physindev=old->physindev;
++ new->physoutdev=old->physoutdev;
+ new->priority=old->priority;
+ new->protocol=old->protocol;
+ new->dst=dst_clone(old->dst);
+--- linux/net/ipv4/ip_output.c Wed Oct 17 23:16:39 2001
++++ bridge-nf-0.0.8-bds/net/ipv4/ip_output.c Sun May 26 12:49:04 2002
+@@ -819,6 +819,8 @@
+ skb_set_owner_w(skb2, skb->sk);
+ skb2->dst = dst_clone(skb->dst);
+ skb2->dev = skb->dev;
++ skb2->physindev = skb->physindev;
++ skb2->physoutdev = skb->physoutdev;
+
+ /*
+ * Copy the packet header into the new buffer.
+@@ -882,6 +884,7 @@
+ iph->tot_len = htons(len + hlen);
+
+ ip_send_check(iph);
++ memcpy(skb2->data - 16, skb->data - 16, 16);
+
+ err = output(skb2);
+ if (err)
+--- linux/net/ipv4/netfilter/ip_tables.c Mon Feb 25 20:38:14 2002
++++ bridge-nf-0.0.8-bds/net/ipv4/netfilter/ip_tables.c Sun May 26 12:49:04 2002
+@@ -121,12 +121,15 @@
+ static inline int
+ ip_packet_match(const struct iphdr *ip,
+ const char *indev,
++ const char *physindev,
+ const char *outdev,
++ const char *physoutdev,
+ const struct ipt_ip *ipinfo,
+ int isfrag)
+ {
+ size_t i;
+ unsigned long ret;
++ unsigned long ret2;
+
+ #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
+
+@@ -156,7 +159,13 @@
+ & ((const unsigned long *)ipinfo->iniface_mask)[i];
+ }
+
+- if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
++ for (i = 0, ret2 = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
++ ret2 |= (((const unsigned long *)physindev)[i]
++ ^ ((const unsigned long *)ipinfo->iniface)[i])
++ & ((const unsigned long *)ipinfo->iniface_mask)[i];
++ }
++
++ if (FWINV(ret != 0 && ret2 != 0, IPT_INV_VIA_IN)) {
+ dprintf("VIA in mismatch (%s vs %s).%s\n",
+ indev, ipinfo->iniface,
+ ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
+@@ -169,7 +178,13 @@
+ & ((const unsigned long *)ipinfo->outiface_mask)[i];
+ }
+
+- if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
++ for (i = 0, ret2 = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
++ ret2 |= (((const unsigned long *)physoutdev)[i]
++ ^ ((const unsigned long *)ipinfo->outiface)[i])
++ & ((const unsigned long *)ipinfo->outiface_mask)[i];
++ }
++
++ if (FWINV(ret != 0 && ret2 != 0, IPT_INV_VIA_OUT)) {
+ dprintf("VIA out mismatch (%s vs %s).%s\n",
+ outdev, ipinfo->outiface,
+ ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
+@@ -268,6 +283,7 @@
+ /* Initializing verdict to NF_DROP keeps gcc happy. */
+ unsigned int verdict = NF_DROP;
+ const char *indev, *outdev;
++ const char *physindev, *physoutdev;
+ void *table_base;
+ struct ipt_entry *e, *back;
+
+@@ -277,6 +293,9 @@
+ datalen = (*pskb)->len - ip->ihl * 4;
+ indev = in ? in->name : nulldevname;
+ outdev = out ? out->name : nulldevname;
++ physindev = (*pskb)->physindev ? (*pskb)->physindev->name : nulldevname;
++ physoutdev = (*pskb)->physoutdev ? (*pskb)->physoutdev->name : nulldevname;
++
+ /* We handle fragments by dealing with the first fragment as
+ * if it was a normal packet. All other fragments are treated
+ * normally, except that they will NEVER match rules that ask
+@@ -312,7 +331,7 @@
+ IP_NF_ASSERT(e);
+ IP_NF_ASSERT(back);
+ (*pskb)->nfcache |= e->nfcache;
+- if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
++ if (ip_packet_match(ip, indev, physindev, outdev, physoutdev, &e->ip, offset)) {
+ struct ipt_entry_target *t;
+
+ if (IPT_MATCH_ITERATE(e, do_match,
+--- linux/net/ipv4/netfilter/ipt_LOG.c Mon Feb 25 20:38:14 2002
++++ bridge-nf-0.0.8-bds/net/ipv4/netfilter/ipt_LOG.c Sun May 26 12:49:04 2002
+@@ -285,10 +285,13 @@
+ level_string[1] = '0' + (loginfo->level % 8);
+ spin_lock_bh(&log_lock);
+ printk(level_string);
+- printk("%sIN=%s OUT=%s ",
+- loginfo->prefix,
+- in ? in->name : "",
+- out ? out->name : "");
++ printk("%sIN=%s ", loginfo->prefix, in ? in->name : "");
++ if ((*pskb)->physindev && in != (*pskb)->physindev)
++ printk("PHYSIN=%s ", (*pskb)->physindev->name);
++ printk("OUT=%s ", out ? out->name : "");
++ if ((*pskb)->physoutdev && out != (*pskb)->physoutdev)
++ printk("PHYSOUT=%s ", (*pskb)->physoutdev->name);
++
+ if (in && !out) {
+ /* MAC logging for input chain only. */
+ printk("MAC=");
+--- /dev/null Sat May 18 12:04:21 2002
++++ bridge-nf-0.0.8-bds/net/bridge/br_netfilter.c Sun May 26 12:59:00 2002
+@@ -0,0 +1,567 @@
++/*
++ * Handle firewalling
++ * Linux ethernet bridge
++ *
++ * Authors:
++ * Lennert Buytenhek <buytenh@gnu.org>
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * $Id: bridge-nf-0.0.8-bds-against-2.4.18.diff,v 1.1 2002/06/01 19:24:01 bdschuym Exp $
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ * Lennert dedicates this file to Kerstin Wurdinger.
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/ip.h>
++#include <linux/netdevice.h>
++#include <linux/skbuff.h>
++#include <linux/if_ether.h>
++#include <linux/netfilter_bridge.h>
++#include <linux/netfilter_ipv4.h>
++#include <linux/in_route.h>
++#include <net/ip.h>
++#include <net/tcp.h>
++#include <asm/uaccess.h>
++#include <asm/checksum.h>
++#include "br_private.h"
++
++
++#ifndef WE_REALLY_INSIST_ON_NOT_HAVING_NAT_SUPPORT
++/* As the original source/destination addresses are variables private to this
++ * file, we store them in unused space at the end of the control buffer.
++ * On 64-bit platforms the TCP control buffer size still leaves us 8 bytes
++ * of space at the end, so that fits. Usage of the original source address
++ * and the original destination address never overlaps (daddr is needed
++ * around PRE_ROUTING, and saddr around POST_ROUTING), so that's okay as
++ * well.
++ */
++#define skb_origaddr(skb) (*((u32 *)((skb)->cb + sizeof((skb)->cb) - 4)))
++
++#define store_orig_dstaddr(skb) (skb_origaddr(skb) = (skb)->nh.iph->daddr)
++#define store_orig_srcaddr(skb) (skb_origaddr(skb) = (skb)->nh.iph->saddr)
++#define dnat_took_place(skb) (skb_origaddr(skb) != (skb)->nh.iph->daddr)
++#define snat_took_place(skb) (skb_origaddr(skb) != (skb)->nh.iph->saddr)
++#else
++#define store_orig_dstaddr(skb)
++#define store_orig_srcaddr(skb)
++#define dnat_took_place(skb) (0)
++#define snat_took_place(skb) (0)
++#endif
++
++
++#define has_bridge_parent(device) ((device)->br_port != NULL)
++#define bridge_parent(device) (&((device)->br_port->br->dev))
++
++
++/* As opposed to the DNAT case, for the SNAT case it's not quite
++ * clear what we should do with ethernet addresses in NAT'ed
++ * packets. Use this heuristic for now.
++ */
++static inline void __maybe_fixup_src_address(struct sk_buff *skb)
++{
++ if (snat_took_place(skb) &&
++ inet_addr_type(skb->nh.iph->saddr) == RTN_LOCAL) {
++ memcpy(skb->mac.ethernet->h_source,
++ bridge_parent(skb->dev)->dev_addr,
++ ETH_ALEN);
++ }
++}
++
++
++/* We need these fake structures to make netfilter happy --
++ * lots of places assume that skb->dst != NULL, which isn't
++ * all that unreasonable.
++ *
++ * Currently, we fill in the PMTU entry because netfilter
++ * refragmentation needs it, and the rt_flags entry because
++ * ipt_REJECT needs it. Future netfilter modules might
++ * require us to fill additional fields.
++ */
++static struct net_device __fake_net_device = {
++ hard_header_len: ETH_HLEN
++};
++
++static struct rtable __fake_rtable = {
++ u: {
++ dst: {
++ __refcnt: ATOMIC_INIT(1),
++ dev: &__fake_net_device,
++ pmtu: 1500
++ }
++ },
++
++ rt_flags: 0
++};
++
++
++/* PF_BRIDGE/PRE_ROUTING *********************************************/
++static void __br_dnat_complain(void)
++{
++ static unsigned long last_complaint = 0;
++
++ if (jiffies - last_complaint >= 5 * HZ) {
++ printk(KERN_WARNING "Performing cross-bridge DNAT requires IP "
++ "forwarding to be enabled\n");
++ last_complaint = jiffies;
++ }
++}
++
++
++static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
++{
++ skb->dev = bridge_parent(skb->dev);
++ skb->dst->output(skb);
++ return 0;
++}
++
++/* This requires some explaining. If DNAT has taken place,
++ * we will need to fix up the destination ethernet address,
++ * and this is a tricky process.
++ *
++ * There are two cases to consider:
++ * 1. The packet was DNAT'ed to a device in the same bridge
++ * port group as it was received on. We can still bridge
++ * the packet.
++ * 2. The packet was DNAT'ed to a different device, either
++ * a non-bridged device or another bridge port group.
++ * The packet will need to be routed.
++ *
++ * The way to distinguish between the two is by calling ip_route_input()
++ * and looking at skb->dst->dev, which it changed to the destination device
++ * if ip_route_input() succeeds.
++ *
++ * Let us first consider ip_route_input() succeeds:
++ *
++ * If skb->dst->dev equals the logical bridge device the packet came in on,
++ * we can consider this bridging. We then call skb->dst->output() which will
++ * make the packet enter br_nf_local_out() not much later. In that function
++ * it is assured that the iptables FORWARD chain is traversed for the packet.
++ *
++ * Else, the packet is considered to be routed and we just change the
++ * destination MAC address so that the packet will later be passed up to the ip
++ * stack to be routed.
++ *
++ * Let us now consider ip_route_input() fails:
++ *
++ * After a "echo '0' > /proc/sys/net/ipv4/ip_forward" ip_route_input() will
++ * fail, while ip_route_output() will return success. The source address for
++ * for ip_route_output() is set to zero, so ip_route_output()
++ * thinks we're handling a locally generated packet and won't care if
++ * ip forwarding is allowed. We send a warning message to the users's log
++ * telling her to put ip forwarding on.
++ *
++ * ip_route_input() will also fail if there is no route available. Then we just
++ * drop the packet.
++ *
++ * The other special thing happening here is putting skb->physoutdev on
++ * &__fake_net_device (resp. NULL) for bridged (resp. routed) packets. This is
++ * needed so that br_nf_local_out() can know that it has to give the packets to
++ * the BR_NF_FORWARD (resp. BR_NF_LOCAL_OUT) bridge hook. See that function.
++ * --Lennert, 20020411
++ * --Bart, 20020416 (updated)
++ */
++
++static int br_nf_pre_routing_finish(struct sk_buff *skb)
++{
++ struct net_device *dev = skb->dev;
++ struct iphdr *iph = skb->nh.iph;
++
++ if (dnat_took_place(skb)) {
++ if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev)) {
++ struct rtable *rt;
++
++ if (!ip_route_output(&rt, iph->daddr, 0, iph->tos, 0)) {
++ // bridged dnated traffic isn't dependent on
++ // disabled ip_forwarding
++ if (((struct dst_entry *)rt)->dev == dev) {
++ skb->dst = (struct dst_entry *)rt;
++ goto bridged_dnat;
++ }
++ __br_dnat_complain();
++ dst_release((struct dst_entry *)rt);
++ }
++ kfree_skb(skb);
++ return 0;
++ } else {
++ if (skb->dst->dev == dev) {
++bridged_dnat:
++ // tell br_nf_local_out this is a bridged frame
++ skb->physoutdev = &__fake_net_device;
++ skb->dev = skb->physindev;
++ NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
++ br_nf_pre_routing_finish_bridge, 1);
++ return 0;
++ }
++ // tell br_nf_local_out this is a routed frame
++ skb->physoutdev = NULL;
++ memcpy(skb->mac.ethernet->h_dest, dev->dev_addr, ETH_ALEN);
++ }
++ } else {
++ skb->dst = (struct dst_entry *)&__fake_rtable;
++ dst_hold(skb->dst);
++ }
++
++ skb->dev = skb->physindev;
++ NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
++ br_handle_frame_finish, 1);
++
++ return 0;
++}
++
++/* Replicate the checks that IPv4 does on packet reception.
++ * Set skb->dev to the bridge device (i.e. parent of the
++ * receiving device) to make netfilter happy, the REDIRECT
++ * target in particular. Save the original destination IP
++ * address to be able to detect DNAT afterwards.
++ */
++static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *))
++{
++ struct iphdr *iph;
++ __u32 len;
++ struct sk_buff *skb;
++
++ if ((*pskb)->protocol != __constant_htons(ETH_P_IP))
++ return NF_ACCEPT;
++
++ if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL)
++ goto out;
++
++ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
++ goto inhdr_error;
++
++ iph = skb->nh.iph;
++ if (iph->ihl < 5 || iph->version != 4)
++ goto inhdr_error;
++
++ if (!pskb_may_pull(skb, 4*iph->ihl))
++ goto inhdr_error;
++
++ iph = skb->nh.iph;
++ if (ip_fast_csum((__u8 *)iph, iph->ihl) != 0)
++ goto inhdr_error;
++
++ len = ntohs(iph->tot_len);
++ if (skb->len < len || len < 4*iph->ihl)
++ goto inhdr_error;
++
++ if (skb->len > len) {
++ __pskb_trim(skb, len);
++ if (skb->ip_summed == CHECKSUM_HW)
++ skb->ip_summed = CHECKSUM_NONE;
++ }
++
++ skb->physindev = skb->dev;
++ skb->dev = bridge_parent(skb->dev);
++ if (skb->pkt_type == PACKET_OTHERHOST)
++ skb->pkt_type = PACKET_HOST;
++ store_orig_dstaddr(skb);
++ NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, skb->dev, NULL,
++ br_nf_pre_routing_finish);
++
++ return NF_STOLEN;
++
++inhdr_error:
++// IP_INC_STATS_BH(IpInHdrErrors);
++out:
++ return NF_DROP;
++}
++
++
++/* PF_BRIDGE/LOCAL_IN ************************************************/
++/* The packet is locally destined, which requires a real
++ * dst_entry, so detach the fake one. On the way up, the
++ * packet would pass through PRE_ROUTING again (which already
++ * took place when the packet entered the bridge), but we
++ * register an IPv4 PRE_ROUTING 'sabotage' hook that will
++ * prevent this from happening.
++ */
++static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *))
++{
++ struct sk_buff *skb = *pskb;
++
++ if (skb->protocol != __constant_htons(ETH_P_IP))
++ return NF_ACCEPT;
++
++ if (skb->dst == (struct dst_entry *)&__fake_rtable) {
++ dst_release(skb->dst);
++ skb->dst = NULL;
++ }
++
++ return NF_ACCEPT;
++}
++
++
++/* PF_BRIDGE/FORWARD *************************************************/
++static int br_nf_forward_finish(struct sk_buff *skb)
++{
++ NF_HOOK_THRESH(PF_BRIDGE, NF_BR_FORWARD, skb, skb->physindev,
++ skb->dev, br_forward_finish, 1);
++
++ return 0;
++}
++
++/* This is the 'purely bridged' case. We pass the packet to
++ * netfilter with indev and outdev set to the bridge device,
++ * but we are still able to filter on the 'real' indev/outdev
++ * because another bit of the bridge-nf patch overloads the
++ * '-i' and '-o' iptables interface checks to take
++ * skb->phys{in,out}dev into account as well (so both the real
++ * device and the bridge device will match).
++ */
++static unsigned int br_nf_forward(unsigned int hook, struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *))
++{
++ struct sk_buff *skb = *pskb;
++
++ // don't mess with non-ip frames, also don't mess with the ip-packets
++ // when br_nf_local_out_finish explicitly says so.
++ if (skb->protocol != __constant_htons(ETH_P_IP) || skb->physindev == NULL)
++ return NF_ACCEPT;
++
++ skb->physoutdev = skb->dev;
++ NF_HOOK(PF_INET, NF_IP_FORWARD, skb, bridge_parent(skb->physindev),
++ bridge_parent(skb->dev), br_nf_forward_finish);
++
++ return NF_STOLEN;
++}
++
++
++/* PF_BRIDGE/LOCAL_OUT ***********************************************/
++static int br_nf_local_out_finish_forward(struct sk_buff *skb)
++{
++ struct net_device *dev;
++
++ dev = skb->physindev;
++ // tell br_nf_forward to stay away
++ skb->physindev = NULL;
++ NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, dev, skb->dev,
++ br_forward_finish);
++
++ return 0;
++}
++
++static int br_nf_local_out_finish(struct sk_buff *skb)
++{
++ NF_HOOK_THRESH(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
++ br_forward_finish, INT_MIN + 1);
++
++ return 0;
++}
++
++
++/* This hook sees both locally originated IP packets and forwarded
++ * IP packets (in both cases the destination device is a bridge
++ * device). For the sake of interface transparency (i.e. properly
++ * overloading the '-o' option), we steal packets destined to
++ * a bridge device away from the IPv4 FORWARD and OUTPUT hooks,
++ * and reinject them later, when we have determined the real
++ * output device. This reinjecting happens here.
++ *
++ * If skb->physindev is NULL, the bridge-nf code never touched
++ * this packet before, and so the packet was locally originated.
++ * We call the IPv4 LOCAL_OUT hook.
++ *
++ * If skb->physindev isn't NULL, there are two cases:
++ * 1. The packet was IP routed.
++ * 2. The packet was cross-bridge DNAT'ed (see the comment near
++ * PF_BRIDGE/PRE_ROUTING).
++ * In both cases, we call the IPv4 FORWARD hook. In case 1,
++ * if the packet originally came from a bridge device, and in
++ * case 2, skb->physindev will have a bridge device as parent,
++ * so we use that parent device as indev. Otherwise, we just
++ * use physindev.
++ *
++ * If skb->physoutdev == NULL the bridge code never touched the
++ * packet or the packet was routed in br_nf_pre_routing_finish().
++ * We give the packet to the bridge NF_BR_LOCAL_OUT hook.
++ * If not, the packet is actually a bridged one so we give it to
++ * the NF_BR_FORWARD hook.
++ */
++
++static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, int (*_okfn)(struct sk_buff *))
++{
++ int hookno, prio;
++ int (*okfn)(struct sk_buff *skb);
++ struct net_device *realindev;
++ struct sk_buff *skb = *pskb;
++
++ if (skb->protocol != __constant_htons(ETH_P_IP))
++ return NF_ACCEPT;
++
++ /* Sometimes we get packets with NULL ->dst here (for example,
++ * running a dhcp client daemon triggers this).
++ */
++ if (skb->dst == NULL)
++ return NF_ACCEPT;
++
++ // bridged, take forward
++ // (see big note in front of br_nf_pre_routing_finish)
++ if (skb->physoutdev == &__fake_net_device) {
++ okfn = br_nf_local_out_finish_forward;
++ } else if (skb->physoutdev == NULL) {
++ // non-bridged: routed or locally generated traffic, take local_out
++ // (see big note in front of br_nf_pre_routing_finish)
++ okfn = br_nf_local_out_finish;
++ } else {
++ printk("ARGH: bridge_or_routed hack doesn't work\n");
++ okfn = br_nf_local_out_finish;
++ }
++
++ skb->physoutdev = skb->dev;
++
++ hookno = NF_IP_LOCAL_OUT;
++ prio = NF_IP_PRI_BRIDGE_SABOTAGE;
++ if ((realindev = skb->physindev) != NULL) {
++ hookno = NF_IP_FORWARD;
++ // there is an iptables mangle table FORWARD chain with
++ // priority -150. This chain should see the physical out-dev.
++ prio = NF_IP_PRI_BRIDGE_SABOTAGE_FORWARD;
++ if (has_bridge_parent(realindev))
++ realindev = bridge_parent(realindev);
++ }
++
++ NF_HOOK_THRESH(PF_INET, hookno, skb, realindev,
++ bridge_parent(skb->dev), okfn, prio + 1);
++
++ return NF_STOLEN;
++}
++
++
++/* PF_BRIDGE/POST_ROUTING ********************************************/
++static int br_nf_post_routing_finish(struct sk_buff *skb)
++{
++ __maybe_fixup_src_address(skb);
++ NF_HOOK_THRESH(PF_BRIDGE, NF_BR_POST_ROUTING, skb, NULL,
++ bridge_parent(skb->dev), br_dev_queue_push_xmit, 1);
++
++ return 0;
++}
++
++static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *))
++{
++ struct sk_buff *skb = *pskb;
++
++ /* Be very paranoid. */
++ if (skb->mac.raw < skb->head || skb->mac.raw + ETH_HLEN > skb->data) {
++ printk(KERN_CRIT "Argh!! Fuck me harder with a chainsaw. ");
++ if (skb->dev != NULL) {
++ printk("[%s]", skb->dev->name);
++ if (has_bridge_parent(skb->dev))
++ printk("[%s]", bridge_parent(skb->dev)->name);
++ }
++ printk("\n");
++ return NF_ACCEPT;
++ }
++
++ if (skb->protocol != __constant_htons(ETH_P_IP))
++ return NF_ACCEPT;
++
++ /* Sometimes we get packets with NULL ->dst here (for example,
++ * running a dhcp client daemon triggers this).
++ */
++ if (skb->dst == NULL)
++ return NF_ACCEPT;
++
++ store_orig_srcaddr(skb);
++ NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL,
++ bridge_parent(skb->dev), br_nf_post_routing_finish);
++
++ return NF_STOLEN;
++}
++
++
++/* IPv4/SABOTAGE *****************************************************/
++/* Don't hand locally destined packets to PF_INET/PRE_ROUTING
++ * for the second time. */
++static unsigned int ipv4_sabotage_in(unsigned int hook, struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *))
++{
++ if (in->hard_start_xmit == br_dev_xmit &&
++ okfn != br_nf_pre_routing_finish) {
++ okfn(*pskb);
++ return NF_STOLEN;
++ }
++
++ return NF_ACCEPT;
++}
++
++/* Postpone execution of PF_INET/FORWARD, PF_INET/LOCAL_OUT
++ * and PF_INET/POST_ROUTING until we have done the forwarding
++ * decision in the bridge code and have determined skb->physoutdev.
++ */
++static unsigned int ipv4_sabotage_out(unsigned int hook, struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *))
++{
++ if (out->hard_start_xmit == br_dev_xmit &&
++ okfn != br_nf_forward_finish &&
++ okfn != br_nf_local_out_finish &&
++ okfn != br_nf_post_routing_finish) {
++ struct sk_buff *skb = *pskb;
++
++ if (hook == NF_IP_FORWARD && skb->physindev == NULL)
++ skb->physindev = (struct net_device *)in;
++ okfn(skb);
++ return NF_STOLEN;
++ }
++
++ return NF_ACCEPT;
++}
++
++
++static struct nf_hook_ops br_nf_ops[] = {
++ { { NULL, NULL }, br_nf_pre_routing, PF_BRIDGE, NF_BR_PRE_ROUTING, 0 },
++ { { NULL, NULL }, br_nf_local_in, PF_BRIDGE, NF_BR_LOCAL_IN, 0 },
++ { { NULL, NULL }, br_nf_forward, PF_BRIDGE, NF_BR_FORWARD, 0 },
++ // we need INT_MIN, so innocent NF_BR_LOCAL_OUT functions don't
++ // get bridged traffic as input
++ { { NULL, NULL }, br_nf_local_out, PF_BRIDGE, NF_BR_LOCAL_OUT, INT_MIN },
++ { { NULL, NULL }, br_nf_post_routing, PF_BRIDGE, NF_BR_POST_ROUTING, 0 },
++
++ { { NULL, NULL }, ipv4_sabotage_in, PF_INET, NF_IP_PRE_ROUTING, NF_IP_PRI_FIRST },
++
++ { { NULL, NULL }, ipv4_sabotage_out, PF_INET, NF_IP_FORWARD, NF_IP_PRI_BRIDGE_SABOTAGE_FORWARD },
++ { { NULL, NULL }, ipv4_sabotage_out, PF_INET, NF_IP_LOCAL_OUT, NF_IP_PRI_BRIDGE_SABOTAGE },
++ { { NULL, NULL }, ipv4_sabotage_out, PF_INET, NF_IP_POST_ROUTING, NF_IP_PRI_FIRST },
++};
++
++#define NUMHOOKS (sizeof(br_nf_ops)/sizeof(br_nf_ops[0]))
++
++
++int br_netfilter_init(void)
++{
++ int i;
++
++#ifndef WE_REALLY_INSIST_ON_NOT_HAVING_NAT_SUPPORT
++ if (sizeof(struct tcp_skb_cb) + 4 >= sizeof(((struct sk_buff *)NULL)->cb)) {
++ extern int __too_little_space_in_control_buffer(void);
++ __too_little_space_in_control_buffer();
++ }
++#endif
++
++ for (i=0;i<NUMHOOKS;i++) {
++ int ret;
++
++ if ((ret = nf_register_hook(&br_nf_ops[i])) >= 0)
++ continue;
++
++ while (i--)
++ nf_unregister_hook(&br_nf_ops[i]);
++
++ return ret;
++ }
++
++ printk(KERN_NOTICE "Bridge firewalling registered\n");
++
++ return 0;
++}
++
++void br_netfilter_fini(void)
++{
++ int i;
++
++ for (i=NUMHOOKS-1;i>=0;i--)
++ nf_unregister_hook(&br_nf_ops[i]);
++}
diff --git a/kernel/README b/kernel/README
new file mode 100644
index 0000000..617a201
--- /dev/null
+++ b/kernel/README
@@ -0,0 +1,6 @@
+Here are the source code and patches for the kernel files ebtables
+changes/creates.
+
+Bart De Schuymer,
+June 1, 2002
+
diff --git a/kernel/linux/include/linux/br_db.h b/kernel/linux/include/linux/br_db.h
new file mode 100644
index 0000000..fae1279
--- /dev/null
+++ b/kernel/linux/include/linux/br_db.h
@@ -0,0 +1,53 @@
+/*
+ * bridge ethernet protocol filter
+ *
+ * Authors:
+ * Bart De Schuymer <bart.de.schuymer@pandora.be>
+ *
+ * br_db.h,v 1.1 2001/04/16
+ *
+ * This code is stongly inspired on the iptables code which is
+ * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __LINUX_BRIDGE_DB_H
+#define __LINUX_BRIDGE_DB_H
+#include <linux/if.h> /* IFNAMSIZ */
+#ifdef __KERNEL__
+#include <linux/if_bridge.h>
+#include <linux/netfilter_bridge.h>
+#else
+#include <linux/netfilter_bridge.h>
+#endif
+#define BRDB_BASE_CTL 135
+
+#define BRDB_SO_SET_ALLOWDB (BRDB_BASE_CTL)
+#define BRDB_SO_SET_MAX (BRDB_SO_SET_ALLOWDB+1)
+
+#define BRDB_SO_GET_DBINFO (BRDB_BASE_CTL)
+#define BRDB_SO_GET_DB (BRDB_SO_GET_DBINFO+1)
+#define BRDB_SO_GET_MAX (BRDB_SO_GET_DB+1)
+
+#define BRDB_NODB 0
+#define BRDB_DB 1
+
+#define INITIAL_DBSIZE 10
+#define IDENTIFY802_3 46
+
+struct brdb_dbinfo {
+ __u32 nentries;
+};
+
+struct brdb_dbentry {
+ __u8 in[IFNAMSIZ];
+ __u8 out[IFNAMSIZ];
+ __u16 ethproto;
+ __u32 hook;
+};
+
+#endif
diff --git a/kernel/linux/include/linux/if_bridge.h b/kernel/linux/include/linux/if_bridge.h
new file mode 100644
index 0000000..926b45f
--- /dev/null
+++ b/kernel/linux/include/linux/if_bridge.h
@@ -0,0 +1,114 @@
+/*
+ * Linux ethernet bridge
+ *
+ * Authors:
+ * Lennert Buytenhek <buytenh@gnu.org>
+ *
+ * $Id: if_bridge.h,v 1.1 2002/06/01 19:24:08 bdschuym Exp $
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_IF_BRIDGE_H
+#define _LINUX_IF_BRIDGE_H
+
+#include <linux/types.h>
+
+#define BRCTL_VERSION 1
+
+#define BRCTL_GET_VERSION 0
+#define BRCTL_GET_BRIDGES 1
+#define BRCTL_ADD_BRIDGE 2
+#define BRCTL_DEL_BRIDGE 3
+#define BRCTL_ADD_IF 4
+#define BRCTL_DEL_IF 5
+#define BRCTL_GET_BRIDGE_INFO 6
+#define BRCTL_GET_PORT_LIST 7
+#define BRCTL_SET_BRIDGE_FORWARD_DELAY 8
+#define BRCTL_SET_BRIDGE_HELLO_TIME 9
+#define BRCTL_SET_BRIDGE_MAX_AGE 10
+#define BRCTL_SET_AGEING_TIME 11
+#define BRCTL_SET_GC_INTERVAL 12
+#define BRCTL_GET_PORT_INFO 13
+#define BRCTL_SET_BRIDGE_STP_STATE 14
+#define BRCTL_SET_BRIDGE_PRIORITY 15
+#define BRCTL_SET_PORT_PRIORITY 16
+#define BRCTL_SET_PATH_COST 17
+#define BRCTL_GET_FDB_ENTRIES 18
+
+#define BR_STATE_DISABLED 0
+#define BR_STATE_LISTENING 1
+#define BR_STATE_LEARNING 2
+#define BR_STATE_FORWARDING 3
+#define BR_STATE_BLOCKING 4
+
+struct __bridge_info
+{
+ __u64 designated_root;
+ __u64 bridge_id;
+ __u32 root_path_cost;
+ __u32 max_age;
+ __u32 hello_time;
+ __u32 forward_delay;
+ __u32 bridge_max_age;
+ __u32 bridge_hello_time;
+ __u32 bridge_forward_delay;
+ __u8 topology_change;
+ __u8 topology_change_detected;
+ __u8 root_port;
+ __u8 stp_enabled;
+ __u32 ageing_time;
+ __u32 gc_interval;
+ __u32 hello_timer_value;
+ __u32 tcn_timer_value;
+ __u32 topology_change_timer_value;
+ __u32 gc_timer_value;
+};
+
+struct __port_info
+{
+ __u64 designated_root;
+ __u64 designated_bridge;
+ __u16 port_id;
+ __u16 designated_port;
+ __u32 path_cost;
+ __u32 designated_cost;
+ __u8 state;
+ __u8 top_change_ack;
+ __u8 config_pending;
+ __u8 unused0;
+ __u32 message_age_timer_value;
+ __u32 forward_delay_timer_value;
+ __u32 hold_timer_value;
+};
+
+struct __fdb_entry
+{
+ __u8 mac_addr[6];
+ __u8 port_no;
+ __u8 is_local;
+ __u32 ageing_timer_value;
+ __u32 unused;
+};
+
+#ifdef __KERNEL__
+
+#include <linux/netdevice.h>
+
+struct net_bridge;
+struct net_bridge_port;
+
+extern int (*br_ioctl_hook)(unsigned long arg);
+extern int (*br_handle_frame_hook)(struct sk_buff *skb);
+#if defined(CONFIG_BRIDGE_EBT_BROUTE) || \
+ defined(CONFIG_BRIDGE_EBT_BROUTE_MODULE)
+extern unsigned int (*broute_decision) (unsigned int hook, struct sk_buff **pskb,
+ const struct net_device *in, const struct net_device *out,
+ int (*okfn)(struct sk_buff *));
+#endif
+#endif
+
+#endif
diff --git a/kernel/linux/include/linux/netfilter_bridge.h b/kernel/linux/include/linux/netfilter_bridge.h
new file mode 100644
index 0000000..2c950c2
--- /dev/null
+++ b/kernel/linux/include/linux/netfilter_bridge.h
@@ -0,0 +1,36 @@
+#ifndef __LINUX_BRIDGE_NETFILTER_H
+#define __LINUX_BRIDGE_NETFILTER_H
+
+/* bridge-specific defines for netfilter.
+ */
+
+#include <linux/config.h>
+#include <linux/netfilter.h>
+
+/* Bridge Hooks */
+/* After promisc drops, checksum checks. */
+#define NF_BR_PRE_ROUTING 0
+/* If the packet is destined for this box. */
+#define NF_BR_LOCAL_IN 1
+/* If the packet is destined for another interface. */
+#define NF_BR_FORWARD 2
+/* Packets coming from a local process. */
+#define NF_BR_LOCAL_OUT 3
+/* Packets about to hit the wire. */
+#define NF_BR_POST_ROUTING 4
+/* Not really a hook, but used for the ebtables broute table */
+#define NF_BR_BROUTING 5
+#define NF_BR_NUMHOOKS 6
+
+enum nf_br_hook_priorities {
+ NF_BR_PRI_FIRST = INT_MIN,
+ NF_BR_PRI_FILTER_BRIDGED = -200,
+ NF_BR_PRI_FILTER_OTHER = 200,
+ NF_BR_PRI_NAT_DST_BRIDGED = -300,
+ NF_BR_PRI_NAT_DST_OTHER = 100,
+ NF_BR_PRI_NAT_SRC_BRIDGED = -100,
+ NF_BR_PRI_NAT_SRC_OTHER = 300,
+ NF_BR_PRI_LAST = INT_MAX,
+};
+
+#endif
diff --git a/kernel/linux/include/linux/netfilter_bridge/ebt_arp.h b/kernel/linux/include/linux/netfilter_bridge/ebt_arp.h
new file mode 100644
index 0000000..a29f926
--- /dev/null
+++ b/kernel/linux/include/linux/netfilter_bridge/ebt_arp.h
@@ -0,0 +1,26 @@
+#ifndef __LINUX_BRIDGE_EBT_ARP_H
+#define __LINUX_BRIDGE_EBT_ARP_H
+
+#define EBT_ARP_OPCODE 0x01
+#define EBT_ARP_HTYPE 0x02
+#define EBT_ARP_PTYPE 0x04
+#define EBT_ARP_SRC_IP 0x08
+#define EBT_ARP_DST_IP 0x10
+#define EBT_ARP_MASK (EBT_ARP_OPCODE | EBT_ARP_HTYPE | EBT_ARP_PTYPE | \
+ EBT_ARP_SRC_IP | EBT_ARP_DST_IP)
+#define EBT_ARP_MATCH "arp"
+
+struct ebt_arp_info
+{
+ __u16 htype;
+ __u16 ptype;
+ __u16 opcode;
+ __u32 saddr;
+ __u32 smsk;
+ __u32 daddr;
+ __u32 dmsk;
+ __u8 bitmask;
+ __u8 invflags;
+};
+
+#endif
diff --git a/kernel/linux/include/linux/netfilter_bridge/ebt_ip.h b/kernel/linux/include/linux/netfilter_bridge/ebt_ip.h
new file mode 100644
index 0000000..f4f9ed1
--- /dev/null
+++ b/kernel/linux/include/linux/netfilter_bridge/ebt_ip.h
@@ -0,0 +1,24 @@
+#ifndef __LINUX_BRIDGE_EBT_IP_H
+#define __LINUX_BRIDGE_EBT_IP_H
+
+#define EBT_IP_SOURCE 0x01
+#define EBT_IP_DEST 0x02
+#define EBT_IP_TOS 0x04
+#define EBT_IP_PROTO 0x08
+#define EBT_IP_MASK (EBT_IP_SOURCE | EBT_IP_DEST | EBT_IP_TOS | EBT_IP_PROTO)
+#define EBT_IP_MATCH "ip"
+
+// the same values are used for the invflags
+struct ebt_ip_info
+{
+ __u32 saddr;
+ __u32 daddr;
+ __u32 smsk;
+ __u32 dmsk;
+ __u8 tos;
+ __u8 protocol;
+ __u8 bitmask;
+ __u8 invflags;
+};
+
+#endif
diff --git a/kernel/linux/include/linux/netfilter_bridge/ebt_log.h b/kernel/linux/include/linux/netfilter_bridge/ebt_log.h
new file mode 100644
index 0000000..9343d11
--- /dev/null
+++ b/kernel/linux/include/linux/netfilter_bridge/ebt_log.h
@@ -0,0 +1,17 @@
+#ifndef __LINUX_BRIDGE_EBT_LOG_H
+#define __LINUX_BRIDGE_EBT_LOG_H
+
+#define EBT_LOG_IP 0x01 // if the frame is made by ip, log the ip information
+#define EBT_LOG_ARP 0x02
+#define EBT_LOG_MASK (EBT_LOG_IP | EBT_LOG_ARP)
+#define EBT_LOG_PREFIX_SIZE 30
+#define EBT_LOG_WATCHER "log"
+
+struct ebt_log_info
+{
+ __u8 loglevel;
+ __u8 prefix[EBT_LOG_PREFIX_SIZE];
+ __u32 bitmask;
+};
+
+#endif
diff --git a/kernel/linux/include/linux/netfilter_bridge/ebt_nat.h b/kernel/linux/include/linux/netfilter_bridge/ebt_nat.h
new file mode 100644
index 0000000..53c81d2
--- /dev/null
+++ b/kernel/linux/include/linux/netfilter_bridge/ebt_nat.h
@@ -0,0 +1,13 @@
+#ifndef __LINUX_BRIDGE_EBT_NAT_H
+#define __LINUX_BRIDGE_EBT_NAT_H
+
+struct ebt_nat_info
+{
+ unsigned char mac[ETH_ALEN];
+ // EBT_ACCEPT, EBT_DROP or EBT_CONTINUE
+ __u8 target;
+};
+#define EBT_SNAT_TARGET "snat"
+#define EBT_DNAT_TARGET "dnat"
+
+#endif
diff --git a/kernel/linux/include/linux/netfilter_bridge/ebt_redirect.h b/kernel/linux/include/linux/netfilter_bridge/ebt_redirect.h
new file mode 100644
index 0000000..82cd309
--- /dev/null
+++ b/kernel/linux/include/linux/netfilter_bridge/ebt_redirect.h
@@ -0,0 +1,11 @@
+#ifndef __LINUX_BRIDGE_EBT_REDIRECT_H
+#define __LINUX_BRIDGE_EBT_REDIRECT_H
+
+struct ebt_redirect_info
+{
+ // EBT_ACCEPT, EBT_DROP or EBT_CONTINUE
+ __u8 target;
+};
+#define EBT_REDIRECT_TARGET "redirect"
+
+#endif
diff --git a/kernel/linux/include/linux/netfilter_bridge/ebt_vlan.h b/kernel/linux/include/linux/netfilter_bridge/ebt_vlan.h
new file mode 100644
index 0000000..079112b
--- /dev/null
+++ b/kernel/linux/include/linux/netfilter_bridge/ebt_vlan.h
@@ -0,0 +1,18 @@
+#ifndef __LINUX_BRIDGE_EBT_VLAN_H
+#define __LINUX_BRIDGE_EBT_VLAN_H
+
+#define EBT_VLAN_ID 0x01
+#define EBT_VLAN_PRIO 0x02
+#define EBT_VLAN_MASK (EBT_VLAN_ID | EBT_VLAN_PRIO)
+#define EBT_VLAN_MATCH "vlan"
+
+struct ebt_vlan_info {
+ __u16 id; /* VLAN ID {1-4095} */
+ __u16 prio; /* VLAN Priority {0-7} */
+ __u8 bitmask; /* Args bitmask bit 1=1 - ID arg,
+ bit 2=1 - Pirority arg */
+ __u8 invflags; /* Inverse bitmask bit 1=1 - inversed ID arg,
+ bit 2=1 - inversed Pirority arg */
+};
+
+#endif
diff --git a/kernel/linux/include/linux/netfilter_bridge/ebtables.h b/kernel/linux/include/linux/netfilter_bridge/ebtables.h
new file mode 100644
index 0000000..f4f9e90
--- /dev/null
+++ b/kernel/linux/include/linux/netfilter_bridge/ebtables.h
@@ -0,0 +1,332 @@
+/*
+ * ebtables
+ *
+ * Authors:
+ * Bart De Schuymer <bart.de.schuymer@pandora.be>
+ *
+ * ebtables.c,v 2.0, April, 2002
+ *
+ * This code is stongly inspired on the iptables code which is
+ * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
+ */
+
+#ifndef __LINUX_BRIDGE_EFF_H
+#define __LINUX_BRIDGE_EFF_H
+#include <linux/if.h> // IFNAMSIZ
+#include <linux/netfilter_bridge.h>
+#include <linux/if_ether.h> // ETH_ALEN
+
+#define EBT_TABLE_MAXNAMELEN 32
+#define EBT_FUNCTION_MAXNAMELEN EBT_TABLE_MAXNAMELEN
+
+// [gs]etsockopt numbers
+#define EBT_BASE_CTL 128
+
+#define EBT_SO_SET_ENTRIES (EBT_BASE_CTL)
+#define EBT_SO_SET_COUNTERS (EBT_SO_SET_ENTRIES+1)
+#define EBT_SO_SET_MAX (EBT_SO_SET_COUNTERS+1)
+
+#define EBT_SO_GET_INFO (EBT_BASE_CTL)
+#define EBT_SO_GET_ENTRIES (EBT_SO_GET_INFO+1)
+#define EBT_SO_GET_MAX (EBT_SO_GET_ENTRIES+1)
+
+#define EBT_ACCEPT 0
+#define EBT_DROP 1
+#define EBT_CONTINUE 2
+#define NUM_STANDARD_TARGETS 3
+
+struct ebt_entries {
+ // this field is always set to zero (including userspace).
+ // See EBT_ENTRY_OR_ENTRIES.
+ // Must be same size as ebt_entry.bitmask
+ __u32 distinguisher;
+ // one standard (accept or drop) per hook
+ __u8 policy;
+ // nr. of entries
+ __u32 nentries;
+ // entry list
+ __u8 data[0];
+};
+
+// used for the bitmask of struct ebt_entry
+
+// This is a hack to make a difference between an ebt_entry struct and an
+// ebt_entries struct when traversing the entries from start to end.
+// Using this simplifies the code alot, while still being able to use
+// ebt_entries.
+// Contrary, iptables doesn't use something like ebt_entries and therefore uses
+// different techniques for naming the policy and such. So, iptables doesn't
+// need a hack like this.
+#define EBT_ENTRY_OR_ENTRIES 0x01
+// these are the normal masks
+#define EBT_NOPROTO 0x02
+#define EBT_802_3 0x04
+#define EBT_SOURCEMAC 0x08
+#define EBT_DESTMAC 0x10
+#define EBT_F_MASK (EBT_NOPROTO | EBT_802_3 | EBT_SOURCEMAC | EBT_DESTMAC \
+ | EBT_ENTRY_OR_ENTRIES)
+
+#define EBT_IPROTO 0x01
+#define EBT_IIN 0x02
+#define EBT_IOUT 0x04
+#define EBT_ISOURCE 0x8
+#define EBT_IDEST 0x10
+#define EBT_ILOGICALIN 0x20
+#define EBT_ILOGICALOUT 0x40
+#define EBT_INV_MASK (EBT_IPROTO | EBT_IIN | EBT_IOUT | EBT_ILOGICALIN \
+ | EBT_ILOGICALOUT | EBT_ISOURCE | EBT_IDEST)
+
+struct ebt_counter
+{
+ __u64 pcnt;
+};
+
+struct ebt_entry_match
+{
+ union {
+ char name[EBT_FUNCTION_MAXNAMELEN];
+ struct ebt_match *match;
+ } u;
+ // size of data
+ unsigned int match_size;
+ unsigned char data[0];
+};
+
+struct ebt_entry_watcher
+{
+ union {
+ char name[EBT_FUNCTION_MAXNAMELEN];
+ struct ebt_watcher *watcher;
+ } u;
+ // size of data
+ unsigned int watcher_size;
+ unsigned char data[0];
+};
+
+struct ebt_entry_target
+{
+ union {
+ char name[EBT_FUNCTION_MAXNAMELEN];
+ struct ebt_target *target;
+ } u;
+ // size of data
+ unsigned int target_size;
+ unsigned char data[0];
+};
+
+#define EBT_STANDARD_TARGET "standard"
+struct ebt_standard_target
+{
+ struct ebt_entry_target target;
+ __u8 verdict;
+};
+
+// one entry
+struct ebt_entry {
+ // this needs to be the first field
+ __u32 bitmask;
+ __u32 invflags;
+ __u16 ethproto;
+ // the physical in-dev
+ __u8 in[IFNAMSIZ];
+ // the logical in-dev
+ __u8 logical_in[IFNAMSIZ];
+ // the physical out-dev
+ __u8 out[IFNAMSIZ];
+ // the logical out-dev
+ __u8 logical_out[IFNAMSIZ];
+ __u8 sourcemac[ETH_ALEN];
+ __u8 sourcemsk[ETH_ALEN];
+ __u8 destmac[ETH_ALEN];
+ __u8 destmsk[ETH_ALEN];
+ // sizeof ebt_entry + matches
+ __u16 watchers_offset;
+ // sizeof ebt_entry + matches + watchers
+ __u16 target_offset;
+ // sizeof ebt_entry + matches + watchers + target
+ __u16 next_offset;
+ unsigned char elems[0];
+};
+
+struct ebt_replace
+{
+ char name[EBT_TABLE_MAXNAMELEN];
+ unsigned int valid_hooks;
+ // nr of rules in the table
+ unsigned int nentries;
+ // total size of the entries
+ unsigned int entries_size;
+ // start of the chains
+ struct ebt_entries *hook_entry[NF_BR_NUMHOOKS];
+ // how many counters in front of it?
+ unsigned int counter_entry[NF_BR_NUMHOOKS];
+ // nr of counters userspace expects back
+ unsigned int num_counters;
+ // where the kernel will put the old counters
+ struct ebt_counter *counters;
+ char *entries;
+};
+
+#ifdef __KERNEL__
+
+struct ebt_match
+{
+ struct list_head list;
+ const char name[EBT_FUNCTION_MAXNAMELEN];
+ // 0 == it matches
+ int (*match)(const struct sk_buff *skb, const struct net_device *in,
+ const struct net_device *out, const void *matchdata,
+ unsigned int datalen, const struct ebt_counter *c);
+ // 0 == let it in
+ int (*check)(const char *tablename, unsigned int hooknr,
+ const struct ebt_entry *e, void *matchdata, unsigned int datalen);
+ void (*destroy)(void *matchdata, unsigned int datalen);
+ struct module *me;
+};
+
+struct ebt_watcher
+{
+ struct list_head list;
+ const char name[EBT_FUNCTION_MAXNAMELEN];
+ void (*watcher)(const struct sk_buff *skb, const struct net_device *in,
+ const struct net_device *out, const void *watcherdata,
+ unsigned int datalen, const struct ebt_counter *c);
+ // 0 == let it in
+ int (*check)(const char *tablename, unsigned int hooknr,
+ const struct ebt_entry *e, void *watcherdata, unsigned int datalen);
+ void (*destroy)(void *watcherdata, unsigned int datalen);
+ struct module *me;
+};
+
+struct ebt_target
+{
+ struct list_head list;
+ const char name[EBT_FUNCTION_MAXNAMELEN];
+ // returns one of the standard verdicts
+ __u8 (*target)(struct sk_buff **pskb,
+ unsigned int hooknr,
+ const struct net_device *in,
+ const struct net_device *out,
+ const void *targetdata,
+ unsigned int datalen);
+ // 0 == let it in
+ int (*check)(const char *tablename, unsigned int hooknr,
+ const struct ebt_entry *e, void *targetdata, unsigned int datalen);
+ void (*destroy)(void *targetdata, unsigned int datalen);
+ struct module *me;
+};
+
+struct ebt_table_info
+{
+ // total size of the entries
+ unsigned int entries_size;
+ unsigned int nentries;
+ // pointers to the start of the chains
+ struct ebt_entries *hook_entry[NF_BR_NUMHOOKS];
+ // how many counters in front of the counters bolonging to a chain
+ unsigned int counter_entry[NF_BR_NUMHOOKS];
+ struct ebt_counter *counters;
+ char *entries;
+};
+
+struct ebt_table
+{
+ struct list_head list;
+ char name[EBT_TABLE_MAXNAMELEN];
+ struct ebt_replace *table;
+ unsigned int valid_hooks;
+ rwlock_t lock;
+ // e.g. could be the table explicitly only allows certain
+ // matches, targets, ... 0 == let it in
+ int (*check)(const struct ebt_table_info *info,
+ unsigned int valid_hooks);
+ // the data used by the kernel
+ struct ebt_table_info *private;
+};
+
+extern int ebt_register_table(struct ebt_table *table);
+extern void ebt_unregister_table(struct ebt_table *table);
+extern int ebt_register_match(struct ebt_match *match);
+extern void ebt_unregister_match(struct ebt_match *match);
+extern int ebt_register_watcher(struct ebt_watcher *watcher);
+extern void ebt_unregister_watcher(struct ebt_watcher *watcher);
+extern int ebt_register_target(struct ebt_target *target);
+extern void ebt_unregister_target(struct ebt_target *target);
+extern unsigned int ebt_do_table(unsigned int hook, struct sk_buff **pskb,
+ const struct net_device *in, const struct net_device *out,
+ struct ebt_table *table);
+
+#endif /* __KERNEL__ */
+
+// blatently stolen from ip_tables.h
+// fn returns 0 to continue iteration
+#define EBT_MATCH_ITERATE(e, fn, args...) \
+({ \
+ unsigned int __i; \
+ int __ret = 0; \
+ struct ebt_entry_match *__match; \
+ \
+ for (__i = sizeof(struct ebt_entry); \
+ __i < (e)->watchers_offset; \
+ __i += __match->match_size + \
+ sizeof(struct ebt_entry_match)) { \
+ __match = (void *)(e) + __i; \
+ \
+ __ret = fn(__match , ## args); \
+ if (__ret != 0) \
+ break; \
+ } \
+ if (__ret == 0) { \
+ if (__i != (e)->watchers_offset) \
+ __ret = -EINVAL; \
+ } \
+ __ret; \
+})
+
+#define EBT_WATCHER_ITERATE(e, fn, args...) \
+({ \
+ unsigned int __i; \
+ int __ret = 0; \
+ struct ebt_entry_watcher *__watcher; \
+ \
+ for (__i = e->watchers_offset; \
+ __i < (e)->target_offset; \
+ __i += __watcher->watcher_size + \
+ sizeof(struct ebt_entry_watcher)) { \
+ __watcher = (void *)(e) + __i; \
+ \
+ __ret = fn(__watcher , ## args); \
+ if (__ret != 0) \
+ break; \
+ } \
+ if (__ret == 0) { \
+ if (__i != (e)->target_offset) \
+ __ret = -EINVAL; \
+ } \
+ __ret; \
+})
+
+#define EBT_ENTRY_ITERATE(entries, size, fn, args...) \
+({ \
+ unsigned int __i; \
+ int __ret = 0; \
+ struct ebt_entry *__entry; \
+ \
+ for (__i = 0; __i < (size);) { \
+ __entry = (void *)(entries) + __i; \
+ __ret = fn(__entry , ## args); \
+ if (__ret != 0) \
+ break; \
+ if (__entry->bitmask != 0) \
+ __i += __entry->next_offset; \
+ else \
+ __i += sizeof(struct ebt_entries); \
+ } \
+ if (__ret == 0) { \
+ if (__i != (size)) \
+ __ret = -EINVAL; \
+ } \
+ __ret; \
+})
+
+#endif
diff --git a/kernel/linux/net/Config.in b/kernel/linux/net/Config.in
new file mode 100644
index 0000000..76b32b4
--- /dev/null
+++ b/kernel/linux/net/Config.in
@@ -0,0 +1,98 @@
+#
+# Network configuration
+#
+mainmenu_option next_comment
+comment 'Networking options'
+tristate 'Packet socket' CONFIG_PACKET
+if [ "$CONFIG_PACKET" != "n" ]; then
+ bool ' Packet socket: mmapped IO' CONFIG_PACKET_MMAP
+fi
+
+tristate 'Netlink device emulation' CONFIG_NETLINK_DEV
+
+bool 'Network packet filtering (replaces ipchains)' CONFIG_NETFILTER
+if [ "$CONFIG_NETFILTER" = "y" ]; then
+ bool ' Network packet filtering debugging' CONFIG_NETFILTER_DEBUG
+fi
+bool 'Socket Filtering' CONFIG_FILTER
+tristate 'Unix domain sockets' CONFIG_UNIX
+bool 'TCP/IP networking' CONFIG_INET
+if [ "$CONFIG_INET" = "y" ]; then
+ source net/ipv4/Config.in
+ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+# IPv6 as module will cause a CRASH if you try to unload it
+ tristate ' The IPv6 protocol (EXPERIMENTAL)' CONFIG_IPV6
+ if [ "$CONFIG_IPV6" != "n" ]; then
+ source net/ipv6/Config.in
+ fi
+ fi
+ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+ source net/khttpd/Config.in
+ fi
+fi
+if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+ bool 'Asynchronous Transfer Mode (ATM) (EXPERIMENTAL)' CONFIG_ATM
+ if [ "$CONFIG_ATM" = "y" ]; then
+ if [ "$CONFIG_INET" = "y" ]; then
+ bool ' Classical IP over ATM' CONFIG_ATM_CLIP
+ if [ "$CONFIG_ATM_CLIP" = "y" ]; then
+ bool ' Do NOT send ICMP if no neighbour' CONFIG_ATM_CLIP_NO_ICMP
+ fi
+ fi
+ tristate ' LAN Emulation (LANE) support' CONFIG_ATM_LANE
+ if [ "$CONFIG_INET" = "y" -a "$CONFIG_ATM_LANE" != "n" ]; then
+ tristate ' Multi-Protocol Over ATM (MPOA) support' CONFIG_ATM_MPOA
+ fi
+ fi
+
+ dep_tristate '802.1Q VLAN Support (EXPERIMENTAL)' CONFIG_VLAN_8021Q $CONFIG_EXPERIMENTAL
+
+fi
+
+comment ' '
+tristate 'The IPX protocol' CONFIG_IPX
+if [ "$CONFIG_IPX" != "n" ]; then
+ source net/ipx/Config.in
+fi
+tristate 'Appletalk protocol support' CONFIG_ATALK
+tristate 'DECnet Support' CONFIG_DECNET
+if [ "$CONFIG_DECNET" != "n" ]; then
+ source net/decnet/Config.in
+fi
+dep_tristate '802.1d Ethernet Bridging' CONFIG_BRIDGE $CONFIG_INET
+if [ "$CONFIG_BRIDGE" != "n" -a "$CONFIG_NETFILTER" != "n" ]; then
+ source net/bridge/netfilter/Config.in
+fi
+if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+ if [ "$CONFIG_BRIDGE" != "n" -a "$CONFIG_NETFILTER" != "n" ]; then
+ bool ' netfilter (firewalling) support' CONFIG_BRIDGE_NF
+ fi
+ tristate 'CCITT X.25 Packet Layer (EXPERIMENTAL)' CONFIG_X25
+ tristate 'LAPB Data Link Driver (EXPERIMENTAL)' CONFIG_LAPB
+ bool '802.2 LLC (EXPERIMENTAL)' CONFIG_LLC
+ bool 'Frame Diverter (EXPERIMENTAL)' CONFIG_NET_DIVERT
+# if [ "$CONFIG_LLC" = "y" ]; then
+# bool ' Netbeui (EXPERIMENTAL)' CONFIG_NETBEUI
+# fi
+ if [ "$CONFIG_INET" = "y" ]; then
+ tristate 'Acorn Econet/AUN protocols (EXPERIMENTAL)' CONFIG_ECONET
+ fi
+ if [ "$CONFIG_ECONET" != "n" ]; then
+ bool ' AUN over UDP' CONFIG_ECONET_AUNUDP
+ bool ' Native Econet' CONFIG_ECONET_NATIVE
+ fi
+ tristate 'WAN router' CONFIG_WAN_ROUTER
+ bool 'Fast switching (read help!)' CONFIG_NET_FASTROUTE
+ bool 'Forwarding between high speed interfaces' CONFIG_NET_HW_FLOWCONTROL
+fi
+
+mainmenu_option next_comment
+comment 'QoS and/or fair queueing'
+bool 'QoS and/or fair queueing' CONFIG_NET_SCHED
+if [ "$CONFIG_NET_SCHED" = "y" ]; then
+ source net/sched/Config.in
+fi
+#bool 'Network code profiler' CONFIG_NET_PROFILE
+endmenu
+
+endmenu
diff --git a/kernel/linux/net/Makefile b/kernel/linux/net/Makefile
new file mode 100644
index 0000000..25d02b2
--- /dev/null
+++ b/kernel/linux/net/Makefile
@@ -0,0 +1,63 @@
+#
+# Makefile for the linux networking.
+#
+# 2 Sep 2000, Christoph Hellwig <hch@infradead.org>
+# Rewritten to use lists instead of if-statements.
+#
+
+O_TARGET := network.o
+
+mod-subdirs := bridge/netfilter ipv4/netfilter ipv6/netfilter ipx irda \
+ bluetooth atm netlink sched
+export-objs := netsyms.o
+
+subdir-y := core ethernet
+subdir-m := ipv4 # hum?
+
+
+subdir-$(CONFIG_NET) += 802 sched netlink
+subdir-$(CONFIG_INET) += ipv4
+subdir-$(CONFIG_NETFILTER) += ipv4/netfilter
+subdir-$(CONFIG_UNIX) += unix
+subdir-$(CONFIG_IPV6) += ipv6
+
+ifneq ($(CONFIG_IPV6),n)
+ifneq ($(CONFIG_IPV6),)
+subdir-$(CONFIG_NETFILTER) += ipv6/netfilter
+endif
+endif
+
+ifneq ($(CONFIG_BRIDGE),n)
+ifneq ($CONFIG_BRIDGE),)
+subdir-$(CONFIG_BRIDGE) += bridge/netfilter
+endif
+endif
+
+subdir-$(CONFIG_KHTTPD) += khttpd
+subdir-$(CONFIG_PACKET) += packet
+subdir-$(CONFIG_NET_SCHED) += sched
+subdir-$(CONFIG_BRIDGE) += bridge
+subdir-$(CONFIG_IPX) += ipx
+subdir-$(CONFIG_ATALK) += appletalk
+subdir-$(CONFIG_WAN_ROUTER) += wanrouter
+subdir-$(CONFIG_X25) += x25
+subdir-$(CONFIG_LAPB) += lapb
+subdir-$(CONFIG_NETROM) += netrom
+subdir-$(CONFIG_ROSE) += rose
+subdir-$(CONFIG_AX25) += ax25
+subdir-$(CONFIG_IRDA) += irda
+subdir-$(CONFIG_BLUEZ) += bluetooth
+subdir-$(CONFIG_SUNRPC) += sunrpc
+subdir-$(CONFIG_ATM) += atm
+subdir-$(CONFIG_DECNET) += decnet
+subdir-$(CONFIG_ECONET) += econet
+subdir-$(CONFIG_VLAN_8021Q) += 8021q
+
+
+obj-y := socket.o $(join $(subdir-y), $(patsubst %,/%.o,$(notdir $(subdir-y))))
+ifeq ($(CONFIG_NET),y)
+obj-$(CONFIG_MODULES) += netsyms.o
+obj-$(CONFIG_SYSCTL) += sysctl_net.o
+endif
+
+include $(TOPDIR)/Rules.make
diff --git a/kernel/linux/net/bridge/br_input.c b/kernel/linux/net/bridge/br_input.c
new file mode 100644
index 0000000..0a5d3eb
--- /dev/null
+++ b/kernel/linux/net/bridge/br_input.c
@@ -0,0 +1,178 @@
+/*
+ * Handle incoming frames
+ * Linux ethernet bridge
+ *
+ * Authors:
+ * Lennert Buytenhek <buytenh@gnu.org>
+ *
+ * $Id: br_input.c,v 1.1 2002/06/01 19:24:03 bdschuym Exp $
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/netfilter_bridge.h>
+#include "br_private.h"
+#if defined(CONFIG_BRIDGE_EBT_BROUTE) || \
+ defined(CONFIG_BRIDGE_EBT_BROUTE_MODULE)
+#include <linux/netfilter.h>
+#endif
+unsigned char bridge_ula[6] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
+
+static int br_pass_frame_up_finish(struct sk_buff *skb)
+{
+ netif_rx(skb);
+
+ return 0;
+}
+
+static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb)
+{
+ struct net_device *indev;
+
+ br->statistics.rx_packets++;
+ br->statistics.rx_bytes += skb->len;
+
+ indev = skb->dev;
+ skb->dev = &br->dev;
+ skb->pkt_type = PACKET_HOST;
+ skb_push(skb, ETH_HLEN);
+ skb->protocol = eth_type_trans(skb, &br->dev);
+
+ NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
+ br_pass_frame_up_finish);
+}
+
+int br_handle_frame_finish(struct sk_buff *skb)
+{
+ struct net_bridge *br;
+ unsigned char *dest;
+ struct net_bridge_fdb_entry *dst;
+ struct net_bridge_port *p;
+ int passedup;
+
+ dest = skb->mac.ethernet->h_dest;
+
+ p = skb->dev->br_port;
+ if (p == NULL)
+ goto err_nolock;
+
+ br = p->br;
+ read_lock(&br->lock);
+ if (skb->dev->br_port == NULL)
+ goto err;
+
+ passedup = 0;
+ if (br->dev.flags & IFF_PROMISC) {
+ struct sk_buff *skb2;
+
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (skb2 != NULL) {
+ passedup = 1;
+ br_pass_frame_up(br, skb2);
+ }
+ }
+
+ if (dest[0] & 1) {
+ br_flood_forward(br, skb, !passedup);
+ if (!passedup)
+ br_pass_frame_up(br, skb);
+ goto out;
+ }
+
+ dst = br_fdb_get(br, dest);
+ if (dst != NULL && dst->is_local) {
+ if (!passedup)
+ br_pass_frame_up(br, skb);
+ else
+ kfree_skb(skb);
+ br_fdb_put(dst);
+ goto out;
+ }
+
+ if (dst != NULL) {
+ br_forward(dst->dst, skb);
+ br_fdb_put(dst);
+ goto out;
+ }
+
+ br_flood_forward(br, skb, 0);
+
+out:
+ read_unlock(&br->lock);
+ return 0;
+
+err:
+ read_unlock(&br->lock);
+err_nolock:
+ kfree_skb(skb);
+ return 0;
+}
+
+int br_handle_frame(struct sk_buff *skb)
+{
+ struct net_bridge *br;
+ unsigned char *dest;
+ struct net_bridge_port *p;
+
+ dest = skb->mac.ethernet->h_dest;
+
+ p = skb->dev->br_port;
+ if (p == NULL)
+ goto err_nolock;
+
+ br = p->br;
+ read_lock(&br->lock);
+ if (skb->dev->br_port == NULL)
+ goto err;
+
+ if (!(br->dev.flags & IFF_UP) ||
+ p->state == BR_STATE_DISABLED)
+ goto err;
+
+ if (skb->mac.ethernet->h_source[0] & 1)
+ goto err;
+
+ if (p->state == BR_STATE_LEARNING ||
+ p->state == BR_STATE_FORWARDING)
+ br_fdb_insert(br, p, skb->mac.ethernet->h_source, 0);
+
+ if (br->stp_enabled &&
+ !memcmp(dest, bridge_ula, 5) &&
+ !(dest[5] & 0xF0))
+ goto handle_special_frame;
+
+ if (p->state == BR_STATE_FORWARDING) {
+#if defined(CONFIG_BRIDGE_EBT_BROUTE) || \
+ defined(CONFIG_BRIDGE_EBT_BROUTE_MODULE)
+ if (broute_decision && broute_decision(NF_BR_BROUTING, &skb,
+ skb->dev, NULL, NULL) == NF_DROP)
+ return -1;
+#endif
+ NF_HOOK(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
+ br_handle_frame_finish);
+ read_unlock(&br->lock);
+ return 0;
+ }
+
+err:
+ read_unlock(&br->lock);
+err_nolock:
+ kfree_skb(skb);
+ return 0;
+
+handle_special_frame:
+ if (!dest[5]) {
+ br_stp_handle_bpdu(skb);
+ return 0;
+ }
+
+ kfree_skb(skb);
+ return 0;
+}
diff --git a/kernel/linux/net/bridge/br_private.h b/kernel/linux/net/bridge/br_private.h
new file mode 100644
index 0000000..5e33c9b
--- /dev/null
+++ b/kernel/linux/net/bridge/br_private.h
@@ -0,0 +1,212 @@
+/*
+ * Linux ethernet bridge
+ *
+ * Authors:
+ * Lennert Buytenhek <buytenh@gnu.org>
+ *
+ * $Id: br_private.h,v 1.1 2002/06/01 19:24:03 bdschuym Exp $
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _BR_PRIVATE_H
+#define _BR_PRIVATE_H
+
+#include <linux/netdevice.h>
+#include <linux/miscdevice.h>
+#include <linux/if_bridge.h>
+#include "br_private_timer.h"
+
+#define BR_HASH_BITS 8
+#define BR_HASH_SIZE (1 << BR_HASH_BITS)
+
+#define BR_HOLD_TIME (1*HZ)
+
+typedef struct bridge_id bridge_id;
+typedef struct mac_addr mac_addr;
+typedef __u16 port_id;
+
+struct bridge_id
+{
+ unsigned char prio[2];
+ unsigned char addr[6];
+};
+
+struct mac_addr
+{
+ unsigned char addr[6];
+ unsigned char pad[2];
+};
+
+struct net_bridge_fdb_entry
+{
+ struct net_bridge_fdb_entry *next_hash;
+ struct net_bridge_fdb_entry **pprev_hash;
+ atomic_t use_count;
+ mac_addr addr;
+ struct net_bridge_port *dst;
+ unsigned long ageing_timer;
+ unsigned is_local:1;
+ unsigned is_static:1;
+};
+
+struct net_bridge_port
+{
+ struct net_bridge_port *next;
+ struct net_bridge *br;
+ struct net_device *dev;
+ int port_no;
+
+ /* STP */
+ port_id port_id;
+ int state;
+ int path_cost;
+ bridge_id designated_root;
+ int designated_cost;
+ bridge_id designated_bridge;
+ port_id designated_port;
+ unsigned topology_change_ack:1;
+ unsigned config_pending:1;
+ int priority;
+
+ struct br_timer forward_delay_timer;
+ struct br_timer hold_timer;
+ struct br_timer message_age_timer;
+};
+
+struct net_bridge
+{
+ struct net_bridge *next;
+ rwlock_t lock;
+ struct net_bridge_port *port_list;
+ struct net_device dev;
+ struct net_device_stats statistics;
+ rwlock_t hash_lock;
+ struct net_bridge_fdb_entry *hash[BR_HASH_SIZE];
+ struct timer_list tick;
+
+ /* STP */
+ bridge_id designated_root;
+ int root_path_cost;
+ int root_port;
+ int max_age;
+ int hello_time;
+ int forward_delay;
+ bridge_id bridge_id;
+ int bridge_max_age;
+ int bridge_hello_time;
+ int bridge_forward_delay;
+ unsigned stp_enabled:1;
+ unsigned topology_change:1;
+ unsigned topology_change_detected:1;
+
+ struct br_timer hello_timer;
+ struct br_timer tcn_timer;
+ struct br_timer topology_change_timer;
+ struct br_timer gc_timer;
+
+ int ageing_time;
+ int gc_interval;
+};
+
+extern struct notifier_block br_device_notifier;
+extern unsigned char bridge_ula[6];
+
+/* br.c */
+extern void br_dec_use_count(void);
+extern void br_inc_use_count(void);
+
+/* br_device.c */
+extern int br_dev_xmit(struct sk_buff *skb, struct net_device *dev);
+extern void br_dev_setup(struct net_device *dev);
+extern int br_dev_xmit(struct sk_buff *skb, struct net_device *dev);
+
+/* br_fdb.c */
+extern void br_fdb_changeaddr(struct net_bridge_port *p,
+ unsigned char *newaddr);
+extern void br_fdb_cleanup(struct net_bridge *br);
+extern void br_fdb_delete_by_port(struct net_bridge *br,
+ struct net_bridge_port *p);
+extern struct net_bridge_fdb_entry *br_fdb_get(struct net_bridge *br,
+ unsigned char *addr);
+extern void br_fdb_put(struct net_bridge_fdb_entry *ent);
+extern int br_fdb_get_entries(struct net_bridge *br,
+ unsigned char *_buf,
+ int maxnum,
+ int offset);
+extern void br_fdb_insert(struct net_bridge *br,
+ struct net_bridge_port *source,
+ unsigned char *addr,
+ int is_local);
+
+/* br_forward.c */
+extern void br_deliver(struct net_bridge_port *to,
+ struct sk_buff *skb);
+extern int br_dev_queue_push_xmit(struct sk_buff *skb);
+extern void br_forward(struct net_bridge_port *to,
+ struct sk_buff *skb);
+extern int br_forward_finish(struct sk_buff *skb);
+extern void br_flood_deliver(struct net_bridge *br,
+ struct sk_buff *skb,
+ int clone);
+extern void br_flood_forward(struct net_bridge *br,
+ struct sk_buff *skb,
+ int clone);
+
+/* br_if.c */
+extern int br_add_bridge(char *name);
+extern int br_del_bridge(char *name);
+extern int br_add_if(struct net_bridge *br,
+ struct net_device *dev);
+extern int br_del_if(struct net_bridge *br,
+ struct net_device *dev);
+extern int br_get_bridge_ifindices(int *indices,
+ int num);
+extern void br_get_port_ifindices(struct net_bridge *br,
+ int *ifindices);
+
+/* br_input.c */
+extern int br_handle_frame_finish(struct sk_buff *skb);
+extern int br_handle_frame(struct sk_buff *skb);
+
+/* br_ioctl.c */
+extern void br_call_ioctl_atomic(void (*fn)(void));
+extern int br_ioctl(struct net_bridge *br,
+ unsigned int cmd,
+ unsigned long arg0,
+ unsigned long arg1,
+ unsigned long arg2);
+extern int br_ioctl_deviceless_stub(unsigned long arg);
+
+/* br_netfilter.c */
+extern int br_netfilter_init(void);
+extern void br_netfilter_fini(void);
+
+/* br_stp.c */
+extern int br_is_root_bridge(struct net_bridge *br);
+extern struct net_bridge_port *br_get_port(struct net_bridge *br,
+ int port_no);
+extern void br_init_port(struct net_bridge_port *p);
+extern port_id br_make_port_id(struct net_bridge_port *p);
+extern void br_become_designated_port(struct net_bridge_port *p);
+
+/* br_stp_if.c */
+extern void br_stp_enable_bridge(struct net_bridge *br);
+extern void br_stp_disable_bridge(struct net_bridge *br);
+extern void br_stp_enable_port(struct net_bridge_port *p);
+extern void br_stp_disable_port(struct net_bridge_port *p);
+extern void br_stp_recalculate_bridge_id(struct net_bridge *br);
+extern void br_stp_set_bridge_priority(struct net_bridge *br,
+ int newprio);
+extern void br_stp_set_port_priority(struct net_bridge_port *p,
+ int newprio);
+extern void br_stp_set_path_cost(struct net_bridge_port *p,
+ int path_cost);
+
+/* br_stp_bpdu.c */
+extern void br_stp_handle_bpdu(struct sk_buff *skb);
+
+#endif
diff --git a/kernel/linux/net/bridge/netfilter/Config.in b/kernel/linux/net/bridge/netfilter/Config.in
new file mode 100644
index 0000000..69c176d
--- /dev/null
+++ b/kernel/linux/net/bridge/netfilter/Config.in
@@ -0,0 +1,15 @@
+#
+# Bridge netfilter configuration
+#
+dep_tristate ' Bridge: ebtables' CONFIG_BRIDGE_EBT $CONFIG_BRIDGE
+dep_tristate ' ebt: filter table support' CONFIG_BRIDGE_EBT_T_FILTER $CONFIG_BRIDGE_EBT
+dep_tristate ' ebt: nat table support' CONFIG_BRIDGE_EBT_T_NAT $CONFIG_BRIDGE_EBT
+dep_tristate ' ebt: broute table support' CONFIG_BRIDGE_EBT_BROUTE $CONFIG_BRIDGE_EBT
+dep_tristate ' ebt: LOG support' CONFIG_BRIDGE_EBT_LOG $CONFIG_BRIDGE_EBT
+dep_tristate ' ebt: IP filter support' CONFIG_BRIDGE_EBT_IPF $CONFIG_BRIDGE_EBT
+dep_tristate ' ebt: ARP filter support' CONFIG_BRIDGE_EBT_ARPF $CONFIG_BRIDGE_EBT
+dep_tristate ' ebt: 802.1Q VLAN filter support (EXPERIMENTAL)' CONFIG_BRIDGE_EBT_VLANF $CONFIG_BRIDGE_EBT
+dep_tristate ' ebt: nat target support' CONFIG_BRIDGE_EBT_NAT $CONFIG_BRIDGE_EBT
+dep_tristate ' ebt: redirect target support' CONFIG_BRIDGE_EBT_REDIRECT $CONFIG_BRIDGE_EBT
+dep_tristate ' Bridge: ethernet database' CONFIG_BRIDGE_DB $CONFIG_BRIDGE
+
diff --git a/kernel/linux/net/bridge/netfilter/Makefile b/kernel/linux/net/bridge/netfilter/Makefile
new file mode 100644
index 0000000..12eefaa
--- /dev/null
+++ b/kernel/linux/net/bridge/netfilter/Makefile
@@ -0,0 +1,25 @@
+#
+# Makefile for the netfilter modules on top of bridging.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definition is now in the main makefile...
+
+O_TARGET := netfilter.o
+
+export-objs = ebtables.o
+
+obj-$(CONFIG_BRIDGE_EBT) += ebtables.o
+obj-$(CONFIG_BRIDGE_EBT_T_FILTER) += ebtable_filter.o
+obj-$(CONFIG_BRIDGE_EBT_T_NAT) += ebtable_nat.o
+obj-$(CONFIG_BRIDGE_EBT_BROUTE) += ebtable_broute.o
+obj-$(CONFIG_BRIDGE_DB) += br_db.o
+obj-$(CONFIG_BRIDGE_EBT_IPF) += ebt_ip.o
+obj-$(CONFIG_BRIDGE_EBT_ARPF) += ebt_arp.o
+obj-$(CONFIG_BRIDGE_EBT_VLANF) += ebt_vlan.o
+obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_log.o
+obj-$(CONFIG_BRIDGE_EBT_NAT) += ebt_nat.o
+obj-$(CONFIG_BRIDGE_EBT_REDIRECT) += ebt_redirect.o
+include $(TOPDIR)/Rules.make
diff --git a/kernel/linux/net/bridge/netfilter/br_db.c b/kernel/linux/net/bridge/netfilter/br_db.c
new file mode 100644
index 0000000..ad63647
--- /dev/null
+++ b/kernel/linux/net/bridge/netfilter/br_db.c
@@ -0,0 +1,357 @@
+/*
+ * bridge ethernet protocol database
+ *
+ * Authors:
+ * Bart De Schuymer <bart.de.schuymer@pandora.be>
+ *
+ * br_db.c, April, 2002
+ *
+ * This code is stongly inspired on the iptables code which is
+ * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/skbuff.h>
+#include <linux/if_ether.h>
+#include <linux/netfilter_bridge.h>
+#include <linux/br_db.h>
+#include <linux/socket.h> /* PF_BRIDGE */
+#include <linux/spinlock.h> /* rwlock_t */
+#include <asm/errno.h>
+#include <asm/uaccess.h> /* copy_[to,from]_user */
+#include <linux/smp.h> /* multiprocessors */
+
+#define BUGPRINT(format, args...) printk("kernel msg: brdb bug: please report to author: "format, ## args)
+/*#define BUGPRINT(format, args...)*/
+#define MEMPRINT(format, args...) printk("kernel msg: brdb : out of memory: "format, ## args)
+/*#define MEMPRINT(format, args...)*/
+
+/* database variables */
+static __u16 allowdb = BRDB_NODB;
+static struct brdb_dbentry **flowdb = NULL;
+static unsigned int *dbsize;
+static unsigned int *dbnum;
+/* database lock */
+static rwlock_t brdb_dblock;
+
+static inline int brdb_dev_check(char *entry, const struct net_device *device){
+ if (*entry == '\0') return 0;
+ if (!device) return 1;
+ return strncmp(entry, device->name, IFNAMSIZ);
+}
+
+static inline int brdb_proto_check(unsigned int a, unsigned int b){
+ if (a == b || ( a == IDENTIFY802_3 && ntohs(b) < 1536 )) return 0;
+ return 1;
+}
+
+static unsigned int maintaindb (unsigned int hook, struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct brdb_dbentry *hlp;
+ int i, cpunr;
+ unsigned short ethproto = ((**pskb).mac.ethernet)->h_proto;
+
+ cpunr = cpu_number_map(smp_processor_id());
+
+ read_lock_bh(&brdb_dblock);
+
+ if (allowdb == BRDB_NODB) {// must be after readlock
+ read_unlock_bh(&brdb_dblock);
+ return NF_ACCEPT;
+ }
+ hlp = flowdb[cpunr];
+ /* search for existing entry */
+ for (i = 0; i < dbnum[cpunr]; i++) {
+ if (hlp->hook == hook && !brdb_proto_check(hlp->ethproto, ethproto) &&
+ !brdb_dev_check(hlp->in, in) && !brdb_dev_check(hlp->out, out)) {
+ read_unlock_bh(&brdb_dblock);
+ return NF_ACCEPT;
+ }
+ hlp++;
+ }
+ /* add new entry to database */
+ if (dbnum[cpunr] == dbsize[cpunr]) {
+ dbsize[cpunr] *= 2;
+ if ( !( hlp = (struct brdb_dbentry *) vmalloc(dbsize[cpunr] * sizeof(struct brdb_dbentry)) ) ) {
+ dbsize[cpunr] /= 2;
+ MEMPRINT("maintaindb && nomemory\n");
+ read_unlock_bh(&brdb_dblock);
+ return NF_ACCEPT;
+ }
+ memcpy(hlp, flowdb[cpunr], dbnum[cpunr] * sizeof(struct brdb_dbentry));
+ vfree(flowdb[cpunr]);
+ flowdb[cpunr] = hlp;
+ }
+
+ hlp = flowdb[cpunr] + dbnum[cpunr];
+ hlp->hook = hook;
+ if (in)
+ strncpy(hlp->in, in->name, IFNAMSIZ);
+ else
+ hlp->in[0] = '\0';
+ if (out)
+ strncpy(hlp->out, out->name, IFNAMSIZ);
+ else
+ hlp->out[0] = '\0';
+ if (ntohs(ethproto) < 1536)
+ hlp->ethproto = IDENTIFY802_3;
+ else
+ hlp->ethproto = ethproto;
+ dbnum[cpunr]++;
+
+ read_unlock_bh(&brdb_dblock);
+
+ return NF_ACCEPT;
+}
+
+static int copy_db(void *user, int *len)
+{
+ int i, j, nentries = 0, ret;
+ struct brdb_dbentry *begin, *end1, *end2, *point, *point2;
+
+ write_lock_bh(&brdb_dblock);
+ for (i = 0; i < smp_num_cpus; i++)
+ nentries += dbnum[i];
+ if (*len > nentries)
+ return -EINVAL;
+
+ if ( !(begin = (struct brdb_dbentry *) vmalloc((*len) * sizeof(struct brdb_dbentry))) )
+ return -ENOMEM;
+ memcpy(begin, flowdb[0], dbnum[0] * sizeof(struct brdb_dbentry));
+ end1 = begin + dbnum[0];
+ for (i = 1; i < smp_num_cpus; i++) {/* cycle databases per cpu */
+ point2 = flowdb[i];
+ end2 = end1;
+ for (j = 0; j < dbnum[i]; j++) {/* cycle entries of a cpu's database (point2) */
+ for (point = begin; point != end2; point++)/* cycle different entries we found so far */
+ if (point->hook == point2->hook && !strncmp(point->in, point2->in, IFNAMSIZ) &&
+ !strncmp(point->out, point2->out, IFNAMSIZ) && point->ethproto == point2->ethproto)
+ goto out;/* already exists in a database of another cpu */
+
+ memcpy(end1, point2, sizeof(struct brdb_dbentry));
+ end1++;
+out:
+ point2++;
+ }
+ }
+ write_unlock_bh(&brdb_dblock);
+ i = (int)( (char *)end1 - (char *)begin);
+ *len = i < *len ? i : *len;
+ if (copy_to_user(user, begin, *len * sizeof(struct brdb_dbentry)) != 0)
+ ret = -EFAULT;
+ else
+ ret = 0;
+ vfree(begin);
+ return ret;
+}
+
+static int switch_nodb(void){
+ int i;
+
+ if (!flowdb)
+ BUGPRINT("switch_nodb && !flowdb\n");
+ for (i = 0; i < smp_num_cpus; i++)
+ vfree(flowdb[i]);
+ vfree(flowdb);
+ if (!dbsize)
+ BUGPRINT("switch_nodb && !dbsize\n");
+ vfree(dbsize);
+ if (!dbnum)
+ BUGPRINT("switch_nodb && !dbnum\n");
+ vfree(dbnum);
+ flowdb = NULL;
+ allowdb = BRDB_NODB;
+ return 0;
+}
+
+static int switch_db(void)
+{
+ int i, j;
+
+ if (flowdb) BUGPRINT("switch_db && flowdb\n");
+ if ( !(flowdb = (struct brdb_dbentry **) vmalloc(smp_num_cpus * sizeof(struct brdb_dbentry *))) ) {
+ MEMPRINT("switch_db && nomemory\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < smp_num_cpus; i++)
+ if ( !(flowdb[i] = (struct brdb_dbentry *) vmalloc(INITIAL_DBSIZE * sizeof(struct brdb_dbentry))) )
+ goto sw_free1;
+ else
+ memset(flowdb[i], 0, INITIAL_DBSIZE * sizeof(struct brdb_dbentry));
+
+ if ( !(dbnum = (int*) vmalloc(smp_num_cpus * sizeof(int))) )
+ goto sw_free2;
+
+ if ( !(dbsize = (int*) vmalloc(smp_num_cpus * sizeof(int))) )
+ goto sw_free3;
+
+ for (i = 0; i < smp_num_cpus; i++) {
+ dbnum[i] = 0;
+ dbsize[i] = INITIAL_DBSIZE;
+ }
+ allowdb = BRDB_DB;
+ return 0;
+
+sw_free3:
+ MEMPRINT("switch_db && nomemory2\n");
+ vfree(dbnum);
+ dbnum = NULL;
+sw_free2:
+ MEMPRINT("switch_db && nomemory3\n");
+sw_free1:
+ MEMPRINT("switch_db && nomemory4\n");
+ for (j = 0; j<i; j++)
+ vfree(flowdb[j]);
+ vfree(flowdb);
+ allowdb = BRDB_NODB;
+ return -ENOMEM;
+}
+
+static int
+do_brdb_set_ctl(struct sock *sk, int cmd, void *user, unsigned int len)
+{
+ int ret;
+ __u16 adb;
+ switch(cmd) {
+ case BRDB_SO_SET_ALLOWDB:
+ if (len != sizeof(__u16)) {
+ ret = -EINVAL;
+ break;
+ }
+ if (copy_from_user(&adb, user, len) != 0) {
+ ret = -EFAULT;
+ break;
+ }
+ if (adb != BRDB_DB && adb != BRDB_NODB) {
+ ret = -EINVAL;
+ break;
+ }
+ write_lock_bh(&brdb_dblock);
+ if (adb == allowdb) {
+ ret = 0;
+ write_unlock_bh(&brdb_dblock);
+ break;
+ }
+ if (allowdb == BRDB_DB)
+ ret = switch_nodb();
+ else
+ ret = switch_db();
+ write_unlock_bh(&brdb_dblock);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int
+do_brdb_get_ctl(struct sock *sk, int cmd, void *user, int *len)
+{
+ struct brdb_dbinfo help2;
+ int i, ret;
+ switch(cmd) {
+ case BRDB_SO_GET_DBINFO:
+ if (sizeof(struct brdb_dbinfo) != *len)
+ return -EINVAL;
+ write_lock_bh(&brdb_dblock);
+ /* 0 == no database
+ * i-1 == number of entries (if database)
+ */
+ if (allowdb == BRDB_NODB)
+ help2.nentries = 0;
+ else {
+ help2.nentries = 1;
+ for (i = 0; i < smp_num_cpus; i++)
+ help2.nentries += dbnum[i];
+ }
+ write_unlock_bh(&brdb_dblock);
+ if (copy_to_user(user, &help2, sizeof(help2)) != 0)
+ ret = -EFAULT;
+ else
+ ret = 0;
+ break;
+
+ case BRDB_SO_GET_DB:
+ if (*len == 0 || allowdb == BRDB_NODB)
+ return -EINVAL;
+ ret = copy_db(user, len);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static struct nf_sockopt_ops brdb_sockopts
+= { { NULL, NULL }, PF_INET, BRDB_BASE_CTL, BRDB_SO_SET_MAX+1, do_brdb_set_ctl,
+ BRDB_BASE_CTL, BRDB_SO_GET_MAX+1, do_brdb_get_ctl, 0, NULL };
+
+
+static struct nf_hook_ops brdb_br_ops[] = {
+ { { NULL, NULL }, maintaindb, PF_BRIDGE, NF_BR_PRE_ROUTING, -250},
+ { { NULL, NULL }, maintaindb, PF_BRIDGE, NF_BR_LOCAL_IN, -250},
+ { { NULL, NULL }, maintaindb, PF_BRIDGE, NF_BR_FORWARD, -250},
+ { { NULL, NULL }, maintaindb, PF_BRIDGE, NF_BR_LOCAL_OUT, -250},
+ { { NULL, NULL }, maintaindb, PF_BRIDGE, NF_BR_POST_ROUTING, -250}
+};
+
+static int __init init(void)
+{
+ int ret;
+
+ if ((ret = nf_register_hook(&brdb_br_ops[0])) < 0)
+ return ret;
+
+ if ((ret = nf_register_hook(&brdb_br_ops[1])) < 0)
+ goto clean0;
+
+ if ((ret = nf_register_hook(&brdb_br_ops[2])) < 0)
+ goto clean1;
+
+ if ((ret = nf_register_hook(&brdb_br_ops[3])) < 0)
+ goto clean2;
+
+ if ((ret = nf_register_hook(&brdb_br_ops[4])) < 0)
+ goto clean3;
+
+ /* Register setsockopt */
+ if ((ret = nf_register_sockopt(&brdb_sockopts)) < 0)
+ goto clean4;
+
+ rwlock_init(&brdb_dblock);
+ printk("Bridge ethernet database registered\n");
+ return ret;
+
+clean4: nf_unregister_hook(&brdb_br_ops[4]);
+clean3: nf_unregister_hook(&brdb_br_ops[3]);
+clean2: nf_unregister_hook(&brdb_br_ops[2]);
+clean1: nf_unregister_hook(&brdb_br_ops[1]);
+clean0: nf_unregister_hook(&brdb_br_ops[0]);
+
+ return ret;
+}
+
+static void __exit fini(void)
+{
+ nf_unregister_hook(&brdb_br_ops[4]);
+ nf_unregister_hook(&brdb_br_ops[3]);
+ nf_unregister_hook(&brdb_br_ops[2]);
+ nf_unregister_hook(&brdb_br_ops[1]);
+ nf_unregister_hook(&brdb_br_ops[0]);
+ nf_unregister_sockopt(&brdb_sockopts);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/kernel/linux/net/bridge/netfilter/ebt_arp.c b/kernel/linux/net/bridge/netfilter/ebt_arp.c
new file mode 100644
index 0000000..44c65c4
--- /dev/null
+++ b/kernel/linux/net/bridge/netfilter/ebt_arp.c
@@ -0,0 +1,107 @@
+/*
+ * ebt_arp
+ *
+ * Authors:
+ * Bart De Schuymer <bart.de.schuymer@pandora.be>
+ * Tim Gardner <timg@tpi.com>
+ *
+ * April, 2002
+ *
+ */
+
+#include <linux/netfilter_bridge/ebtables.h>
+#include <linux/netfilter_bridge/ebt_arp.h>
+#include <linux/if_arp.h>
+#include <linux/module.h>
+
+#define FWINV2(bool,invflg) ((bool) ^ !!(infostuff->invflags & invflg))
+static int ebt_filter_arp(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const void *data,
+ unsigned int datalen, const struct ebt_counter *c)
+{
+ struct ebt_arp_info *infostuff = (struct ebt_arp_info *)data;
+
+ if (infostuff->bitmask & EBT_ARP_OPCODE && FWINV2(infostuff->opcode !=
+ ((*skb).nh.arph)->ar_op, EBT_ARP_OPCODE))
+ return 1;
+ if (infostuff->bitmask & EBT_ARP_HTYPE && FWINV2(infostuff->htype !=
+ ((*skb).nh.arph)->ar_hrd, EBT_ARP_HTYPE))
+ return 1;
+ if (infostuff->bitmask & EBT_ARP_PTYPE && FWINV2(infostuff->ptype !=
+ ((*skb).nh.arph)->ar_pro, EBT_ARP_PTYPE))
+ return 1;
+
+ if (infostuff->bitmask & (EBT_ARP_SRC_IP | EBT_ARP_DST_IP))
+ {
+ __u32 arp_len = sizeof(struct arphdr) +
+ (2*(((*skb).nh.arph)->ar_hln)) +
+ (2*(((*skb).nh.arph)->ar_pln));
+ __u32 dst;
+ __u32 src;
+
+ // Make sure the packet is long enough.
+ if ((((*skb).nh.raw) + arp_len) > (*skb).tail)
+ return 1;
+ // IPV4 addresses are always 4 bytes.
+ if (((*skb).nh.arph)->ar_pln != sizeof(__u32))
+ return 1;
+
+ if (infostuff->bitmask & EBT_ARP_SRC_IP) {
+ memcpy(&src, ((*skb).nh.raw) + sizeof(struct arphdr) +
+ ((*skb).nh.arph)->ar_hln, sizeof(__u32));
+ if (FWINV2(infostuff->saddr != (src & infostuff->smsk),
+ EBT_ARP_SRC_IP))
+ return 1;
+ }
+
+ if (infostuff->bitmask & EBT_ARP_DST_IP) {
+ memcpy(&dst, ((*skb).nh.raw)+sizeof(struct arphdr) +
+ (2*(((*skb).nh.arph)->ar_hln)) +
+ (((*skb).nh.arph)->ar_pln), sizeof(__u32));
+ if (FWINV2(infostuff->daddr != (dst & infostuff->dmsk),
+ EBT_ARP_DST_IP))
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int ebt_arp_check(const char *tablename, unsigned int hooknr,
+ const struct ebt_entry *e, void *data, unsigned int datalen)
+{
+ struct ebt_arp_info *infostuff = (struct ebt_arp_info *) data;
+
+ if (datalen != sizeof(struct ebt_arp_info))
+ return -EINVAL;
+ if (e->bitmask & (EBT_NOPROTO | EBT_802_3) ||
+ (e->ethproto != __constant_htons(ETH_P_ARP) &&
+ e->ethproto != __constant_htons(ETH_P_RARP)) ||
+ e->invflags & EBT_IPROTO)
+ return -EINVAL;
+ if (infostuff->bitmask & ~EBT_ARP_MASK)
+ return -EINVAL;
+ return 0;
+}
+
+static struct ebt_match filter_arp =
+{
+ {NULL, NULL}, EBT_ARP_MATCH, ebt_filter_arp, ebt_arp_check, NULL,
+ THIS_MODULE
+};
+
+static int __init init(void)
+{
+ return ebt_register_match(&filter_arp);
+}
+
+static void __exit fini(void)
+{
+ ebt_unregister_match(&filter_arp);
+}
+
+module_init(init);
+module_exit(fini);
+EXPORT_NO_SYMBOLS;
+MODULE_LICENSE("GPL");
diff --git a/kernel/linux/net/bridge/netfilter/ebt_ip.c b/kernel/linux/net/bridge/netfilter/ebt_ip.c
new file mode 100644
index 0000000..c34b1b5
--- /dev/null
+++ b/kernel/linux/net/bridge/netfilter/ebt_ip.c
@@ -0,0 +1,81 @@
+/*
+ * ebt_ip
+ *
+ * Authors:
+ * Bart De Schuymer <bart.de.schuymer@pandora.be>
+ *
+ * April, 2002
+ *
+ */
+
+#include <linux/netfilter_bridge/ebtables.h>
+#include <linux/netfilter_bridge/ebt_ip.h>
+#include <linux/ip.h>
+#include <linux/module.h>
+
+#define FWINV2(bool,invflg) ((bool) ^ !!(infostuff->invflags & invflg))
+static int ebt_filter_ip(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const void *data,
+ unsigned int datalen, const struct ebt_counter *c)
+{
+ struct ebt_ip_info *infostuff = (struct ebt_ip_info *) data;
+
+ if (infostuff->bitmask & EBT_IP_TOS &&
+ FWINV2(infostuff->tos != ((*skb).nh.iph)->tos, EBT_IP_TOS))
+ return 1;
+ if (infostuff->bitmask & EBT_IP_PROTO && FWINV2(infostuff->protocol !=
+ ((*skb).nh.iph)->protocol, EBT_IP_PROTO))
+ return 1;
+ if (infostuff->bitmask & EBT_IP_SOURCE &&
+ FWINV2((((*skb).nh.iph)->saddr & infostuff->smsk) !=
+ infostuff->saddr, EBT_IP_SOURCE))
+ return 1;
+ if ((infostuff->bitmask & EBT_IP_DEST) &&
+ FWINV2((((*skb).nh.iph)->daddr & infostuff->dmsk) !=
+ infostuff->daddr, EBT_IP_DEST))
+ return 1;
+ return 0;
+}
+
+static int ebt_ip_check(const char *tablename, unsigned int hooknr,
+ const struct ebt_entry *e, void *data, unsigned int datalen)
+{
+ struct ebt_ip_info *infostuff = (struct ebt_ip_info *) data;
+
+ if (datalen != sizeof(struct ebt_ip_info)) {
+ return -EINVAL;
+ }
+ if (e->bitmask & (EBT_NOPROTO | EBT_802_3) ||
+ e->ethproto != __constant_htons(ETH_P_IP) ||
+ e->invflags & EBT_IPROTO)
+ {
+ return -EINVAL;
+ }
+ if (infostuff->bitmask & ~EBT_IP_MASK) {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct ebt_match filter_ip =
+{
+ {NULL, NULL}, EBT_IP_MATCH, ebt_filter_ip, ebt_ip_check, NULL,
+ THIS_MODULE
+};
+
+static int __init init(void)
+{
+ return ebt_register_match(&filter_ip);
+}
+
+static void __exit fini(void)
+{
+ ebt_unregister_match(&filter_ip);
+}
+
+module_init(init);
+module_exit(fini);
+EXPORT_NO_SYMBOLS;
+MODULE_LICENSE("GPL");
diff --git a/kernel/linux/net/bridge/netfilter/ebt_log.c b/kernel/linux/net/bridge/netfilter/ebt_log.c
new file mode 100644
index 0000000..e7f0506
--- /dev/null
+++ b/kernel/linux/net/bridge/netfilter/ebt_log.c
@@ -0,0 +1,111 @@
+/*
+ * ebt_log
+ *
+ * Authors:
+ * Bart De Schuymer <bart.de.schuymer@pandora.be>
+ *
+ * April, 2002
+ *
+ */
+
+#include <linux/netfilter_bridge/ebtables.h>
+#include <linux/netfilter_bridge/ebt_log.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/if_arp.h>
+#include <linux/spinlock.h>
+
+static spinlock_t ebt_log_lock = SPIN_LOCK_UNLOCKED;
+
+static int ebt_log_check(const char *tablename, unsigned int hooknr,
+ const struct ebt_entry *e, void *data, unsigned int datalen)
+{
+ struct ebt_log_info *loginfo = (struct ebt_log_info *)data;
+
+ if (datalen != sizeof(struct ebt_log_info))
+ return -EINVAL;
+ if (loginfo->bitmask & ~EBT_LOG_MASK)
+ return -EINVAL;
+ if (loginfo->loglevel >= 8)
+ return -EINVAL;
+ loginfo->prefix[EBT_LOG_PREFIX_SIZE - 1] = '\0';
+ return 0;
+}
+
+static void ebt_log(const struct sk_buff *skb, const struct net_device *in,
+ const struct net_device *out, const void *data, unsigned int datalen,
+ const struct ebt_counter *c)
+{
+ struct ebt_log_info *loginfo = (struct ebt_log_info *)data;
+ char level_string[4] = "< >";
+ level_string[1] = '0' + loginfo->loglevel;
+
+ spin_lock_bh(&ebt_log_lock);
+ printk(level_string);
+ // max length: 29 + 10 + 2 * 16
+ printk("%s IN=%s OUT=%s ",
+ loginfo->prefix,
+ in ? in->name : "",
+ out ? out->name : "");
+
+ if (skb->dev->hard_header_len) {
+ int i;
+ unsigned char *p = (skb->mac.ethernet)->h_source;
+ printk("MAC source = ");
+ for (i = 0; i < ETH_ALEN; i++,p++)
+ printk("%02x%c", *p,
+ i == ETH_ALEN - 1
+ ? ' ':':');// length: 31
+ printk("MAC dest = ");
+ p = (skb->mac.ethernet)->h_dest;
+ for (i = 0; i < ETH_ALEN; i++,p++)
+ printk("%02x%c", *p,
+ i == ETH_ALEN - 1
+ ? ' ':':');// length: 29
+ }
+ // length: 14
+ printk("proto = 0x%04x", ntohs(((*skb).mac.ethernet)->h_proto));
+
+ if ((loginfo->bitmask & EBT_LOG_IP) && skb->mac.ethernet->h_proto ==
+ htons(ETH_P_IP)){
+ struct iphdr *iph = skb->nh.iph;
+ // max length: 46
+ printk(" IP SRC=%u.%u.%u.%u IP DST=%u.%u.%u.%u,",
+ NIPQUAD(iph->saddr), NIPQUAD(iph->daddr));
+ // max length: 26
+ printk(" IP tos=0x%02X, IP proto=%d", iph->tos, iph->protocol);
+ }
+
+ if ((loginfo->bitmask & EBT_LOG_ARP) &&
+ ((skb->mac.ethernet->h_proto == __constant_htons(ETH_P_ARP)) ||
+ (skb->mac.ethernet->h_proto == __constant_htons(ETH_P_RARP)))) {
+ struct arphdr * arph = skb->nh.arph;
+ // max length: 40
+ printk(" ARP HTYPE=%d, PTYPE=0x%04x, OPCODE=%d",
+ ntohs(arph->ar_hrd), ntohs(arph->ar_pro),
+ ntohs(arph->ar_op));
+ }
+ printk("\n");
+ spin_unlock_bh(&ebt_log_lock);
+}
+
+struct ebt_watcher log =
+{
+ {NULL, NULL}, EBT_LOG_WATCHER, ebt_log, ebt_log_check, NULL,
+ THIS_MODULE
+};
+
+static int __init init(void)
+{
+ return ebt_register_watcher(&log);
+}
+
+static void __exit fini(void)
+{
+ ebt_unregister_watcher(&log);
+}
+
+module_init(init);
+module_exit(fini);
+EXPORT_NO_SYMBOLS;
+MODULE_LICENSE("GPL");
diff --git a/kernel/linux/net/bridge/netfilter/ebt_nat.c b/kernel/linux/net/bridge/netfilter/ebt_nat.c
new file mode 100644
index 0000000..16694c5
--- /dev/null
+++ b/kernel/linux/net/bridge/netfilter/ebt_nat.c
@@ -0,0 +1,106 @@
+/*
+ * ebt_nat
+ *
+ * Authors:
+ * Bart De Schuymer <bart.de.schuymer@pandora.be>
+ *
+ * April, 2002
+ *
+ */
+
+#include <linux/netfilter_bridge/ebtables.h>
+#include <linux/netfilter_bridge/ebt_nat.h>
+#include <linux/netfilter_bridge.h>
+#include <linux/skbuff.h>
+#include <linux/module.h>
+#include <net/sock.h>
+
+static __u8 ebt_target_snat(struct sk_buff **pskb, unsigned int hooknr,
+ const struct net_device *in, const struct net_device *out,
+ const void *data, unsigned int datalen)
+{
+ struct ebt_nat_info *infostuff = (struct ebt_nat_info *) data;
+
+ memcpy(((**pskb).mac.ethernet)->h_source, infostuff->mac,
+ ETH_ALEN * sizeof(unsigned char));
+ return infostuff->target;
+}
+
+static __u8 ebt_target_dnat(struct sk_buff **pskb, unsigned int hooknr,
+ const struct net_device *in, const struct net_device *out,
+ const void *data, unsigned int datalen)
+{
+ struct ebt_nat_info *infostuff = (struct ebt_nat_info *) data;
+
+ memcpy(((**pskb).mac.ethernet)->h_dest, infostuff->mac,
+ ETH_ALEN * sizeof(unsigned char));
+ return infostuff->target;
+}
+
+static int ebt_target_snat_check(const char *tablename, unsigned int hooknr,
+ const struct ebt_entry *e, void *data, unsigned int datalen)
+{
+ struct ebt_nat_info *infostuff = (struct ebt_nat_info *) data;
+
+ if (strcmp(tablename, "nat"))
+ return -EINVAL;
+ if (datalen != sizeof(struct ebt_nat_info))
+ return -EINVAL;
+ if (hooknr != NF_BR_POST_ROUTING)
+ return -EINVAL;
+ if (infostuff->target >= NUM_STANDARD_TARGETS)
+ return -EINVAL;
+ return 0;
+}
+
+static int ebt_target_dnat_check(const char *tablename, unsigned int hooknr,
+ const struct ebt_entry *e, void *data, unsigned int datalen)
+{
+ struct ebt_nat_info *infostuff = (struct ebt_nat_info *) data;
+
+ if ( (strcmp(tablename, "nat") ||
+ (hooknr != NF_BR_PRE_ROUTING && hooknr != NF_BR_LOCAL_OUT)) &&
+ (strcmp(tablename, "broute") || hooknr != NF_BR_BROUTING) )
+ return -EINVAL;
+ if (datalen != sizeof(struct ebt_nat_info))
+ return -EINVAL;
+ if (infostuff->target >= NUM_STANDARD_TARGETS)
+ return -EINVAL;
+ return 0;
+}
+
+static struct ebt_target snat =
+{
+ {NULL, NULL}, EBT_SNAT_TARGET, ebt_target_snat, ebt_target_snat_check,
+ NULL, THIS_MODULE
+};
+
+static struct ebt_target dnat =
+{
+ {NULL, NULL}, EBT_DNAT_TARGET, ebt_target_dnat, ebt_target_dnat_check,
+ NULL, THIS_MODULE
+};
+
+static int __init init(void)
+{
+ int ret;
+ ret = ebt_register_target(&snat);
+ if (ret != 0)
+ return ret;
+ ret = ebt_register_target(&dnat);
+ if (ret == 0)
+ return 0;
+ ebt_unregister_target(&snat);
+ return ret;
+}
+
+static void __exit fini(void)
+{
+ ebt_unregister_target(&snat);
+ ebt_unregister_target(&dnat);
+}
+
+module_init(init);
+module_exit(fini);
+EXPORT_NO_SYMBOLS;
+MODULE_LICENSE("GPL");
diff --git a/kernel/linux/net/bridge/netfilter/ebt_redirect.c b/kernel/linux/net/bridge/netfilter/ebt_redirect.c
new file mode 100644
index 0000000..c26d57b
--- /dev/null
+++ b/kernel/linux/net/bridge/netfilter/ebt_redirect.c
@@ -0,0 +1,65 @@
+/*
+ * ebt_redirect
+ *
+ * Authors:
+ * Bart De Schuymer <bart.de.schuymer@pandora.be>
+ *
+ * April, 2002
+ *
+ */
+
+#include <linux/netfilter_bridge/ebtables.h>
+#include <linux/netfilter_bridge/ebt_redirect.h>
+#include <linux/netfilter_bridge.h>
+#include <linux/skbuff.h>
+#include <linux/module.h>
+#include <net/sock.h>
+#include "../br_private.h"
+
+static __u8 ebt_target_redirect(struct sk_buff **pskb, unsigned int hooknr,
+ const struct net_device *in, const struct net_device *out,
+ const void *data, unsigned int datalen)
+{
+ struct ebt_redirect_info *infostuff = (struct ebt_redirect_info *) data;
+
+ memcpy((**pskb).mac.ethernet->h_dest,
+ in->br_port->br->dev.dev_addr, ETH_ALEN);
+ (*pskb)->pkt_type = PACKET_HOST;
+ return infostuff->target;
+}
+
+static int ebt_target_redirect_check(const char *tablename, unsigned int hooknr,
+ const struct ebt_entry *e, void *data, unsigned int datalen)
+{
+ struct ebt_redirect_info *infostuff = (struct ebt_redirect_info *) data;
+
+ if ( (strcmp(tablename, "nat") || hooknr != NF_BR_PRE_ROUTING) &&
+ (strcmp(tablename, "broute") || hooknr != NF_BR_BROUTING) )
+ return -EINVAL;
+ if (datalen != sizeof(struct ebt_redirect_info))
+ return -EINVAL;
+ if (infostuff->target >= NUM_STANDARD_TARGETS)
+ return -EINVAL;
+ return 0;
+}
+
+static struct ebt_target redirect_target =
+{
+ {NULL, NULL}, EBT_REDIRECT_TARGET, ebt_target_redirect,
+ ebt_target_redirect_check, NULL, THIS_MODULE
+};
+
+static int __init init(void)
+{
+ return ebt_register_target(&redirect_target);
+}
+
+static void __exit fini(void)
+{
+ ebt_unregister_target(&redirect_target);
+}
+
+module_init(init);
+module_exit(fini);
+EXPORT_NO_SYMBOLS;
+MODULE_LICENSE("GPL");
diff --git a/kernel/linux/net/bridge/netfilter/ebt_vlan.c b/kernel/linux/net/bridge/netfilter/ebt_vlan.c
new file mode 100644
index 0000000..8ad921d
--- /dev/null
+++ b/kernel/linux/net/bridge/netfilter/ebt_vlan.c
@@ -0,0 +1,124 @@
+/*
+ * ebt_vlan kernelspace
+ *
+ * Authors:
+ * Bart De Schuymer <bart.de.schuymer@pandora.be>
+ * Nick Fedchik <nick@fedchik.org.ua>
+ *
+ * May, 2002
+ */
+
+#include <linux/netfilter_bridge/ebtables.h>
+#include <linux/netfilter_bridge/ebt_vlan.h>
+#include <linux/if_vlan.h>
+#include <linux/if_ether.h>
+#include <linux/module.h>
+
+static unsigned char debug;
+MODULE_PARM (debug, "0-1b");
+MODULE_PARM_DESC (debug, "debug=1 is turn on debug messages");
+
+static int ebt_filter_vlan (const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const void *data,
+ unsigned int datalen,
+ const struct ebt_counter *c)
+{
+ struct ebt_vlan_info *infostuff = (struct ebt_vlan_info *) data;
+ struct vlan_ethhdr *vlanethhdr =
+ (struct vlan_ethhdr *) skb->mac.raw;
+ unsigned short v_id;
+ unsigned short v_prio;
+
+ /*
+ * Calculate 802.1Q VLAN ID and Priority
+ * Reserved one bit (13) for CFI
+ */
+ v_id = ntohs ((unsigned short) vlanethhdr->h_vlan_TCI) & 0xFFF;
+ v_prio = ntohs ((unsigned short) vlanethhdr->h_vlan_TCI) >> 13;
+
+ /*
+ * Checking VLANs
+ */
+ if (infostuff->bitmask & EBT_VLAN_ID) { /* Is VLAN ID parsed? */
+ if (!((infostuff->id == v_id)
+ ^ !!(infostuff->invflags & EBT_VLAN_ID)))
+ return 1;
+ if (debug)
+ printk (KERN_DEBUG
+ "ebt_vlan: matched ID=%s%d (mask=%X)\n",
+ (infostuff->invflags & EBT_VLAN_ID) ? "!" : "",
+ infostuff->id,
+ (unsigned char) infostuff->bitmask);
+ }
+ /*
+ * Checking Priority
+ */
+ if (infostuff->bitmask & EBT_VLAN_PRIO) { /* Is VLAN Prio parsed? */
+ if (!( (infostuff->prio == v_prio)
+ ^ !!(infostuff->invflags & EBT_VLAN_PRIO)))
+ return 1; /* missed */
+ if (debug)
+ printk (KERN_DEBUG
+ "ebt_vlan: matched Prio=%s%d (mask=%X)\n",
+ (infostuff->invflags & EBT_VLAN_PRIO) ? "!" : "",
+ infostuff->prio,
+ (unsigned char) infostuff->bitmask);
+ }
+ /*
+ * rule matched
+ */
+ return 0;
+}
+
+/*
+ * ebt_vlan_check() is called when userspace delivers the table to the kernel,
+ * * it is called to check that userspace doesn't give a bad table.
+ */
+static int ebt_vlan_check (const char *tablename, unsigned int hooknr,
+ const struct ebt_entry *e, void *data,
+ unsigned int datalen)
+{
+ struct ebt_vlan_info *infostuff = (struct ebt_vlan_info *) data;
+
+ if (datalen != sizeof (struct ebt_vlan_info))
+ return -EINVAL;
+
+ if (e->ethproto != __constant_htons (ETH_P_8021Q))
+ return -EINVAL;
+
+ if (infostuff->bitmask & ~EBT_VLAN_MASK) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct ebt_match filter_vlan = {
+ {NULL, NULL}, EBT_VLAN_MATCH, ebt_filter_vlan, ebt_vlan_check,
+ NULL,
+ THIS_MODULE
+};
+
+static int __init init (void)
+{
+ printk (KERN_INFO
+ "ebt_vlan: 802.1Q VLAN matching module for EBTables\n");
+ if (debug)
+ printk (KERN_DEBUG
+ "ebt_vlan: 802.1Q matching debug is on\n");
+ return ebt_register_match (&filter_vlan);
+}
+
+static void __exit fini (void)
+{
+ ebt_unregister_match (&filter_vlan);
+}
+
+module_init (init);
+module_exit (fini);
+EXPORT_NO_SYMBOLS;
+MODULE_AUTHOR ("Nick Fedchik <nick@fedchik.org.ua>");
+MODULE_DESCRIPTION ("802.1Q VLAN matching module for ebtables, v0.1");
+MODULE_LICENSE ("GPL");
diff --git a/kernel/linux/net/bridge/netfilter/ebtable_broute.c b/kernel/linux/net/bridge/netfilter/ebtable_broute.c
new file mode 100644
index 0000000..ce880a2
--- /dev/null
+++ b/kernel/linux/net/bridge/netfilter/ebtable_broute.c
@@ -0,0 +1,80 @@
+/*
+ * ebtable_broute
+ *
+ * Authors:
+ * Bart De Schuymer <bart.de.schuymer@pandora.be>
+ *
+ * April, 2002
+ *
+ * This table lets you choose between routing and bridging for frames
+ * entering on a bridge enslaved nic. This table is traversed before any
+ * other ebtables table. See net/bridge/br_input.c.
+ */
+
+#include <linux/netfilter_bridge/ebtables.h>
+#include <linux/netfilter_bridge.h>
+#include <linux/netdevice.h>
+#include <linux/module.h>
+#include <linux/if_bridge.h>
+#include <linux/brlock.h>
+
+// EBT_ACCEPT means the frame will be bridged
+// EBT_DROP means the frame will be routed
+static struct ebt_entries initial_chain =
+ {0, EBT_ACCEPT, 0};
+
+static struct ebt_replace initial_table =
+{
+ "broute", 1 << NF_BR_BROUTING, 0, sizeof(struct ebt_entries),
+ { [NF_BR_BROUTING]&initial_chain}, {},
+ 0, NULL, (char *)&initial_chain
+};
+
+static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
+{
+ if (valid_hooks & ~(1 << NF_BR_BROUTING))
+ return -EINVAL;
+ return 0;
+}
+
+static struct ebt_table broute_table =
+{
+ {NULL, NULL}, "broute", &initial_table, 1 << NF_BR_BROUTING,
+ RW_LOCK_UNLOCKED, check, NULL
+};
+
+static unsigned int
+ebt_broute (unsigned int hook, struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ return ebt_do_table(hook, pskb, in, out, &broute_table);
+}
+
+static int __init init(void)
+{
+ int ret;
+
+ ret = ebt_register_table(&broute_table);
+ if (ret < 0)
+ return ret;
+ br_write_lock_bh(BR_NETPROTO_LOCK);
+ // in br_input.c, br_handle_frame() wants to call broute_decision()
+ broute_decision = ebt_broute;
+ br_write_unlock_bh(BR_NETPROTO_LOCK);
+ return ret;
+}
+
+static void __exit fini(void)
+{
+ br_write_lock_bh(BR_NETPROTO_LOCK);
+ broute_decision = NULL;
+ br_write_unlock_bh(BR_NETPROTO_LOCK);
+ ebt_unregister_table(&broute_table);
+}
+
+module_init(init);
+module_exit(fini);
+EXPORT_NO_SYMBOLS;
+MODULE_LICENSE("GPL");
diff --git a/kernel/linux/net/bridge/netfilter/ebtable_filter.c b/kernel/linux/net/bridge/netfilter/ebtable_filter.c
new file mode 100644
index 0000000..e16f696
--- /dev/null
+++ b/kernel/linux/net/bridge/netfilter/ebtable_filter.c
@@ -0,0 +1,93 @@
+/*
+ * ebtable_filter
+ *
+ * Authors:
+ * Bart De Schuymer <bart.de.schuymer@pandora.be>
+ *
+ * April, 2002
+ *
+ */
+
+#include <linux/netfilter_bridge/ebtables.h>
+#include <linux/netfilter_bridge.h>
+#include <linux/module.h>
+
+#define FILTER_VALID_HOOKS ((1 << NF_BR_LOCAL_IN) | (1 << NF_BR_FORWARD) | \
+ (1 << NF_BR_LOCAL_OUT))
+
+static struct ebt_entries initial_chains[] =
+{
+ {0, EBT_ACCEPT, 0},
+ {0, EBT_ACCEPT, 0},
+ {0, EBT_ACCEPT, 0}
+};
+
+static struct ebt_replace initial_table =
+{
+ "filter", FILTER_VALID_HOOKS, 0, 3 * sizeof(struct ebt_entries),
+ { [NF_BR_LOCAL_IN]&initial_chains[0], [NF_BR_FORWARD]&initial_chains[1],
+ [NF_BR_LOCAL_OUT]&initial_chains[2] },{},
+ 0, NULL, (char *)initial_chains
+};
+
+static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
+{
+ if (valid_hooks & ~FILTER_VALID_HOOKS)
+ return -EINVAL;
+ return 0;
+}
+
+static struct ebt_table frame_filter =
+{
+ {NULL, NULL}, "filter", &initial_table, FILTER_VALID_HOOKS,
+ RW_LOCK_UNLOCKED, check, NULL
+};
+
+static unsigned int ebt_hook (unsigned int hook, struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ return ebt_do_table(hook, pskb, in, out, &frame_filter);
+}
+
+static struct nf_hook_ops ebt_ops_filter[] = {
+ { { NULL, NULL }, ebt_hook, PF_BRIDGE, NF_BR_LOCAL_IN,
+ NF_BR_PRI_FILTER_BRIDGED},
+ { { NULL, NULL }, ebt_hook, PF_BRIDGE, NF_BR_FORWARD,
+ NF_BR_PRI_FILTER_BRIDGED},
+ { { NULL, NULL }, ebt_hook, PF_BRIDGE, NF_BR_LOCAL_OUT,
+ NF_BR_PRI_FILTER_OTHER}
+};
+
+static int __init init(void)
+{
+ int i, j, ret;
+
+ ret = ebt_register_table(&frame_filter);
+ if (ret < 0)
+ return ret;
+ for (i = 0; i < sizeof(ebt_ops_filter) / sizeof(ebt_ops_filter[0]); i++)
+ if ((ret = nf_register_hook(&ebt_ops_filter[i])) < 0)
+ goto cleanup;
+ return ret;
+cleanup:
+ for (j = 0; j < i; j++)
+ nf_unregister_hook(&ebt_ops_filter[j]);
+ ebt_unregister_table(&frame_filter);
+ return ret;
+}
+
+static void __exit fini(void)
+{
+ int i;
+
+ for (i = 0; i < sizeof(ebt_ops_filter) / sizeof(ebt_ops_filter[0]); i++)
+ nf_unregister_hook(&ebt_ops_filter[i]);
+ ebt_unregister_table(&frame_filter);
+}
+
+module_init(init);
+module_exit(fini);
+EXPORT_NO_SYMBOLS;
+MODULE_LICENSE("GPL");
diff --git a/kernel/linux/net/bridge/netfilter/ebtable_nat.c b/kernel/linux/net/bridge/netfilter/ebtable_nat.c
new file mode 100644
index 0000000..b99db09
--- /dev/null
+++ b/kernel/linux/net/bridge/netfilter/ebtable_nat.c
@@ -0,0 +1,156 @@
+/*
+ * ebtable_nat
+ *
+ * Authors:
+ * Bart De Schuymer <bart.de.schuymer@pandora.be>
+ *
+ * April, 2002
+ *
+ */
+
+#include <linux/netfilter_bridge/ebtables.h>
+#include <linux/netfilter_bridge.h>
+#include <linux/netdevice.h>
+#include <linux/module.h>
+#define NAT_VALID_HOOKS ((1 << NF_BR_PRE_ROUTING) | (1 << NF_BR_LOCAL_OUT) | \
+ (1 << NF_BR_POST_ROUTING))
+
+static struct ebt_entries initial_chains[] =
+{
+ {0, EBT_ACCEPT, 0},
+ {0, EBT_ACCEPT, 0},
+ {0, EBT_ACCEPT, 0}
+};
+
+static struct ebt_replace initial_table =
+{
+ "nat", NAT_VALID_HOOKS, 0, 3 * sizeof(struct ebt_entries),
+ { [NF_BR_PRE_ROUTING]&initial_chains[0], [NF_BR_LOCAL_OUT]&initial_chains[1],
+ [NF_BR_POST_ROUTING]&initial_chains[2] }, {},
+ 0, NULL, (char *)initial_chains
+};
+
+static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
+{
+ if (valid_hooks & ~NAT_VALID_HOOKS)
+ return -EINVAL;
+ return 0;
+}
+
+static struct ebt_table frame_nat =
+{
+ {NULL, NULL}, "nat", &initial_table, NAT_VALID_HOOKS,
+ RW_LOCK_UNLOCKED, check, NULL
+};
+
+// used for snat to know if the frame comes from FORWARD or LOCAL_OUT.
+// needed because of the bridge-nf patch (that allows use of iptables
+// on bridged traffic)
+// if the packet is routed, we want the ebtables stuff on POSTROUTING
+// to be executed _after_ the iptables stuff. when it's bridged, it's
+// the way around
+static struct net_device __fake_net_device = {
+ hard_header_len: ETH_HLEN
+};
+
+static unsigned int
+ebt_nat_dst (unsigned int hook, struct sk_buff **pskb,
+ const struct net_device *in, const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ return ebt_do_table(hook, pskb, in, out, &frame_nat);
+}
+
+// let snat know this frame is routed
+static unsigned int ebt_clear_physin (unsigned int hook, struct sk_buff **pskb,
+ const struct net_device *in, const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ (*pskb)->physindev = NULL;
+ return NF_ACCEPT;
+}
+
+// let snat know this frame is bridged
+static unsigned int ebt_set_physin (unsigned int hook, struct sk_buff **pskb,
+ const struct net_device *in, const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ (*pskb)->physindev = &__fake_net_device;
+ return NF_ACCEPT;
+}
+
+static unsigned int ebt_nat_src (unsigned int hook, struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ // this is a routed packet
+ if ((*pskb)->physindev == NULL)
+ return NF_ACCEPT;
+ if ((*pskb)->physindev != &__fake_net_device)
+ printk("ebtables (br_nat_src): physindev hack "
+ "doesn't work - BUG\n");
+
+ return ebt_do_table(hook, pskb, in, out, &frame_nat);
+}
+
+static unsigned int ebt_nat_src_route (unsigned int hook, struct sk_buff **pskb,
+ const struct net_device *in, const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ // this is a bridged packet
+ if ((*pskb)->physindev == &__fake_net_device)
+ return NF_ACCEPT;
+ if ((*pskb)->physindev)
+ printk("ebtables (br_nat_src_route): physindev hack "
+ "doesn't work - BUG\n");
+
+ return ebt_do_table(hook, pskb, in, out, &frame_nat);
+}
+
+static struct nf_hook_ops ebt_ops_nat[] = {
+ { { NULL, NULL }, ebt_nat_dst, PF_BRIDGE, NF_BR_LOCAL_OUT,
+ NF_BR_PRI_NAT_DST_OTHER},
+ { { NULL, NULL }, ebt_nat_src, PF_BRIDGE, NF_BR_POST_ROUTING,
+ NF_BR_PRI_NAT_SRC_BRIDGED},
+ { { NULL, NULL }, ebt_nat_src_route, PF_BRIDGE, NF_BR_POST_ROUTING,
+ NF_BR_PRI_NAT_SRC_OTHER},
+ { { NULL, NULL }, ebt_nat_dst, PF_BRIDGE, NF_BR_PRE_ROUTING,
+ NF_BR_PRI_NAT_DST_BRIDGED},
+ { { NULL, NULL }, ebt_clear_physin, PF_BRIDGE, NF_BR_LOCAL_OUT,
+ NF_BR_PRI_FILTER_OTHER + 1},
+ { { NULL, NULL }, ebt_set_physin, PF_BRIDGE, NF_BR_FORWARD,
+ NF_BR_PRI_FILTER_OTHER + 1}
+};
+
+static int __init init(void)
+{
+ int i, ret, j;
+
+ ret = ebt_register_table(&frame_nat);
+ if (ret < 0)
+ return ret;
+ for (i = 0; i < sizeof(ebt_ops_nat) / sizeof(ebt_ops_nat[0]); i++)
+ if ((ret = nf_register_hook(&ebt_ops_nat[i])) < 0)
+ goto cleanup;
+ return ret;
+cleanup:
+ for (j = 0; j < i; j++)
+ nf_unregister_hook(&ebt_ops_nat[j]);
+ ebt_unregister_table(&frame_nat);
+ return ret;
+}
+
+static void __exit fini(void)
+{
+ int i;
+
+ for (i = 0; i < sizeof(ebt_ops_nat) / sizeof(ebt_ops_nat[0]); i++)
+ nf_unregister_hook(&ebt_ops_nat[i]);
+ ebt_unregister_table(&frame_nat);
+}
+
+module_init(init);
+module_exit(fini);
+EXPORT_NO_SYMBOLS;
+MODULE_LICENSE("GPL");
diff --git a/kernel/linux/net/bridge/netfilter/ebtables.c b/kernel/linux/net/bridge/netfilter/ebtables.c
new file mode 100644
index 0000000..3f8d550
--- /dev/null
+++ b/kernel/linux/net/bridge/netfilter/ebtables.c
@@ -0,0 +1,1189 @@
+/*
+ * ebtables
+ *
+ * Author:
+ * Bart De Schuymer <bart.de.schuymer@pandora.be>
+ *
+ * ebtables.c,v 2.0, April, 2002
+ *
+ * This code is stongly inspired on the iptables code which is
+ * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+// used for print_string
+#include <linux/sched.h>
+#include <linux/tty.h>
+
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/skbuff.h>
+#include <linux/if_ether.h>
+#include <linux/netfilter_bridge.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_bridge/ebtables.h>
+#include <linux/spinlock.h>
+#include <asm/uaccess.h>
+#include <linux/smp.h>
+#include <net/sock.h>
+// needed for logical [in,out]-dev filtering
+#include "../br_private.h"
+
+// list_named_find
+#define ASSERT_READ_LOCK(x)
+#define ASSERT_WRITE_LOCK(x)
+#include <linux/netfilter_ipv4/listhelp.h>
+
+#if 0 // use this for remote debugging
+#define BUGPRINT(args) print_string(args);
+#else
+#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
+ "report to author: "format, ## args)
+// #define BUGPRINT(format, args...)
+#endif
+#define MEMPRINT(format, args...) printk("kernel msg: ebtables "\
+ ": out of memory: "format, ## args)
+// #define MEMPRINT(format, args...)
+
+static void print_string(char *str);
+
+static DECLARE_MUTEX(ebt_mutex);
+static LIST_HEAD(ebt_tables);
+static LIST_HEAD(ebt_targets);
+static LIST_HEAD(ebt_matches);
+static LIST_HEAD(ebt_watchers);
+
+static struct ebt_target ebt_standard_target =
+{ {NULL, NULL}, EBT_STANDARD_TARGET, NULL, NULL, NULL, NULL};
+
+static inline int ebt_do_watcher (struct ebt_entry_watcher *w,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const struct ebt_counter *c)
+{
+ w->u.watcher->watcher(skb, in, out, w->data,
+ w->watcher_size, c);
+ // watchers don't give a verdict
+ return 0;
+}
+
+static inline int ebt_do_match (struct ebt_entry_match *m,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const struct ebt_counter *c)
+{
+ return m->u.match->match(skb, in, out, m->data,
+ m->match_size, c);
+}
+
+static inline int ebt_dev_check(char *entry, const struct net_device *device)
+{
+ if (*entry == '\0')
+ return 0;
+ if (!device)
+ return 1;
+ return strncmp(entry, device->name, IFNAMSIZ);
+}
+
+// Do some firewalling
+unsigned int ebt_do_table (unsigned int hook, struct sk_buff **pskb,
+ const struct net_device *in, const struct net_device *out,
+ struct ebt_table *table)
+{
+ int i, nentries;
+ struct ebt_entry *point;
+ struct ebt_counter *counter_base;
+ struct ebt_entry_target *t;
+ __u8 verdict;
+
+ read_lock_bh(&table->lock);
+ nentries = table->private->hook_entry[hook]->nentries;
+ point = (struct ebt_entry *)(table->private->hook_entry[hook]->data);
+ counter_base = table->private->counters +
+ cpu_number_map(smp_processor_id()) * table->private->nentries +
+ table->private->counter_entry[hook];
+ #define FWINV(bool,invflg) ((bool) ^ !!(point->invflags & invflg))
+ for (i = 0; i < nentries; i++) {
+ if ( ( point->bitmask & EBT_NOPROTO ||
+ FWINV(point->ethproto == ((**pskb).mac.ethernet)->h_proto,
+ EBT_IPROTO)
+ || FWINV(ntohs(((**pskb).mac.ethernet)->h_proto) < 1536 &&
+ (point->bitmask & EBT_802_3), EBT_IPROTO) )
+ && FWINV(!ebt_dev_check((char *)(point->in), in), EBT_IIN)
+ && FWINV(!ebt_dev_check((char *)(point->out), out), EBT_IOUT)
+ && ((!in || !in->br_port) ? 1 : FWINV(!ebt_dev_check((char *)
+ (point->logical_in), &in->br_port->br->dev), EBT_ILOGICALIN))
+ && ((!out || !out->br_port) ? 1 :
+ FWINV(!ebt_dev_check((char *)
+ (point->logical_out), &out->br_port->br->dev), EBT_ILOGICALOUT))
+
+ ) {
+ char hlpmac[6];
+ int j;
+
+ if (point->bitmask & EBT_SOURCEMAC) {
+ for (j = 0; j < 6; j++)
+ hlpmac[j] = ((**pskb).mac.ethernet)->
+ h_source[j] & point->sourcemsk[j];
+ if (FWINV(!!memcmp(point->sourcemac, hlpmac,
+ ETH_ALEN), EBT_ISOURCE) )
+ goto letscontinue;
+ }
+
+ if (point->bitmask & EBT_DESTMAC) {
+ for (j = 0; j < 6; j++)
+ hlpmac[j] = ((**pskb).mac.ethernet)->
+ h_dest[j] & point->destmsk[j];
+ if (FWINV(!!memcmp(point->destmac, hlpmac,
+ ETH_ALEN), EBT_IDEST) )
+ goto letscontinue;
+ }
+
+ if (EBT_MATCH_ITERATE(point, ebt_do_match, *pskb, in,
+ out, counter_base + i) != 0)
+ goto letscontinue;
+
+ // increase counter
+ (*(counter_base + i)).pcnt++;
+
+ // these should only watch: not modify, nor tell us
+ // what to do with the packet
+ EBT_WATCHER_ITERATE(point, ebt_do_watcher, *pskb, in,
+ out, counter_base + i);
+
+ t = (struct ebt_entry_target *)
+ (((char *)point) + point->target_offset);
+ // standard target
+ if (!t->u.target->target)
+ verdict =
+ ((struct ebt_standard_target *)t)->verdict;
+ else
+ verdict = t->u.target->target(pskb, hook,
+ in, out, t->data, t->target_size);
+ if (verdict == EBT_ACCEPT) {
+ read_unlock_bh(&table->lock);
+ return NF_ACCEPT;
+ }
+ if (verdict == EBT_DROP) {
+ read_unlock_bh(&table->lock);
+ return NF_DROP;
+ }
+ if (verdict != EBT_CONTINUE) {
+ read_unlock_bh(&table->lock);
+ BUGPRINT("Illegal target while "
+ "firewalling!!\n");
+ // Try not to get oopsen
+ return NF_DROP;
+ }
+ }
+letscontinue:
+ point = (struct ebt_entry *)
+ (((char *)point) + point->next_offset);
+ }
+
+ if ( table->private->hook_entry[hook]->policy == EBT_ACCEPT ) {
+ read_unlock_bh(&table->lock);
+ return NF_ACCEPT;
+ }
+ read_unlock_bh(&table->lock);
+ return NF_DROP;
+}
+
+static inline int
+ebt_check_match(struct ebt_entry_match *m, struct ebt_entry *e,
+ const char *name, unsigned int hook, unsigned int *cnt)
+{
+ struct ebt_match *match;
+ int ret;
+
+ m->u.name[EBT_FUNCTION_MAXNAMELEN - 1] = '\0';
+ ret = down_interruptible(&ebt_mutex);
+ if (ret != 0)
+ return -EFAULT;
+ if (!(match = (struct ebt_match *)
+ list_named_find(&ebt_matches, m->u.name))) {
+ up(&ebt_mutex);
+ return -ENOENT;
+ }
+ m->u.match = match;
+ if (match->check &&
+ match->check(name, hook, e, m->data,
+ m->match_size) != 0) {
+ BUGPRINT("match->check failed\n");
+ up(&ebt_mutex);
+ return -EINVAL;
+ }
+ if (match->me)
+ __MOD_INC_USE_COUNT(match->me);
+ up(&ebt_mutex);
+ (*cnt)++;
+ return 0;
+}
+
+static inline int
+ebt_check_watcher(struct ebt_entry_watcher *w, struct ebt_entry *e,
+ const char *name, unsigned int hook, unsigned int *cnt)
+{
+ struct ebt_watcher *watcher;
+ int ret;
+
+ ret = down_interruptible(&ebt_mutex);
+ if (ret != 0)
+ return -EFAULT;
+ w->u.name[EBT_FUNCTION_MAXNAMELEN - 1] = '\0';
+ if (!(watcher = (struct ebt_watcher *)
+ list_named_find(&ebt_watchers, w->u.name))) {
+ up(&ebt_mutex);
+ return -ENOENT;
+ }
+ w->u.watcher = watcher;
+ if (watcher->check &&
+ watcher->check(name, hook, e, w->data,
+ w->watcher_size) != 0) {
+ BUGPRINT("watcher->check failed\n");
+ up(&ebt_mutex);
+ return -EINVAL;
+ }
+ if (watcher->me)
+ __MOD_INC_USE_COUNT(watcher->me);
+ up(&ebt_mutex);
+ (*cnt)++;
+ return 0;
+}
+
+// this one is very careful, as it is the first function
+// to parse the userspace data
+static inline int
+ebt_check_entry_size_and_hooks(struct ebt_entry *e,
+ struct ebt_table_info *newinfo, char *base, char *limit,
+ struct ebt_entries **hook_entries, unsigned int *n, unsigned int *cnt,
+ unsigned int *totalcnt, unsigned int valid_hooks)
+{
+ int i;
+
+ for (i = 0; i < NF_BR_NUMHOOKS; i++) {
+ if ((valid_hooks & (1 << i)) == 0)
+ continue;
+ if ( (char *)hook_entries[i] - base ==
+ (char *)e - newinfo->entries)
+ break;
+ }
+ // beginning of a new chain
+ if (i != NF_BR_NUMHOOKS) {
+ if ((e->bitmask & EBT_ENTRY_OR_ENTRIES) != 0) {
+ // we make userspace set this right,
+ // so there is no misunderstanding
+ BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
+ "in distinguisher\n");
+ return -EINVAL;
+ }
+ // this checks if the previous chain has as many entries
+ // as it said it has
+ if (*n != *cnt) {
+ BUGPRINT("nentries does not equal the nr of entries "
+ "in the chain\n");
+ return -EINVAL;
+ }
+ // before we look at the struct, be sure it is not too big
+ if ((char *)hook_entries[i] + sizeof(struct ebt_entries)
+ > limit) {
+ BUGPRINT("entries_size too small\n");
+ return -EINVAL;
+ }
+ if (((struct ebt_entries *)e)->policy != EBT_DROP &&
+ ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
+ BUGPRINT("bad policy\n");
+ return -EINVAL;
+ }
+ *n = ((struct ebt_entries *)e)->nentries;
+ *cnt = 0;
+ newinfo->hook_entry[i] = (struct ebt_entries *)e;
+ newinfo->counter_entry[i] = *totalcnt;
+ return 0;
+ }
+ // a plain old entry, heh
+ if (sizeof(struct ebt_entry) > e->watchers_offset ||
+ e->watchers_offset > e->target_offset ||
+ e->target_offset > e->next_offset) {
+ BUGPRINT("entry offsets not in right order\n");
+ return -EINVAL;
+ }
+ if (((char *)e) + e->next_offset - newinfo->entries > limit - base) {
+ BUGPRINT("entry offsets point too far\n");
+ return -EINVAL;
+ }
+
+ if ((e->bitmask & EBT_ENTRY_OR_ENTRIES) == 0) {
+ BUGPRINT("EBT_ENTRY_OR_ENTRIES should be set in "
+ "bitmask for an entry\n");
+ return -EINVAL;
+ }
+ (*cnt)++;
+ (*totalcnt)++;
+ return 0;
+}
+
+static inline int
+ebt_cleanup_match(struct ebt_entry_match *m, unsigned int *i)
+{
+ if (i && (*i)-- == 0)
+ return 1;
+ if (m->u.match->destroy)
+ m->u.match->destroy(m->data, m->match_size);
+ if (m->u.match->me)
+ __MOD_DEC_USE_COUNT(m->u.match->me);
+
+ return 0;
+}
+
+static inline int
+ebt_cleanup_watcher(struct ebt_entry_watcher *w, unsigned int *i)
+{
+ if (i && (*i)-- == 0)
+ return 1;
+ if (w->u.watcher->destroy)
+ w->u.watcher->destroy(w->data, w->watcher_size);
+ if (w->u.watcher->me)
+ __MOD_DEC_USE_COUNT(w->u.watcher->me);
+
+ return 0;
+}
+
+static inline int
+ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo,
+ const char *name, unsigned int *cnt, unsigned int valid_hooks)
+{
+ struct ebt_entry_target *t;
+ struct ebt_target *target;
+ unsigned int i, j, hook = 0;
+ int ret;
+
+ // Don't mess with the struct ebt_entries
+ if ((e->bitmask & EBT_ENTRY_OR_ENTRIES) == 0)
+ return 0;
+
+ if (e->bitmask & ~EBT_F_MASK) {
+ BUGPRINT("Unknown flag for bitmask\n");
+ return -EINVAL;
+ }
+ if (e->invflags & ~EBT_INV_MASK) {
+ BUGPRINT("Unknown flag for inv bitmask\n");
+ return -EINVAL;
+ }
+ if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
+ BUGPRINT("NOPROTO & 802_3 not allowed\n");
+ return -EINVAL;
+ }
+ e->in[IFNAMSIZ - 1] = '\0';
+ e->out[IFNAMSIZ - 1] = '\0';
+ e->logical_in[IFNAMSIZ - 1] = '\0';
+ e->logical_out[IFNAMSIZ - 1] = '\0';
+ // what hook do we belong to?
+ for (i = 0; i < NF_BR_NUMHOOKS; i++) {
+ if ((valid_hooks & (1 << i)) == 0)
+ continue;
+ if ((char *)newinfo->hook_entry[i] < (char *)e)
+ hook = i;
+ else
+ break;
+ }
+ i = 0;
+ ret = EBT_MATCH_ITERATE(e, ebt_check_match, e, name, hook, &i);
+ if (ret != 0)
+ goto cleanup_matches;
+ j = 0;
+ ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, e, name, hook, &j);
+ if (ret != 0)
+ goto cleanup_watchers;
+ t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
+ ret = down_interruptible(&ebt_mutex);
+ if (ret != 0)
+ goto cleanup_watchers;
+ t->u.name[EBT_FUNCTION_MAXNAMELEN - 1] = '\0';
+ if (!(target = (struct ebt_target *)
+ list_named_find(&ebt_targets, t->u.name))) {
+ ret = -ENOENT;
+ up(&ebt_mutex);
+ goto cleanup_watchers;
+ }
+ if (target->me)
+ __MOD_INC_USE_COUNT(target->me);
+ up(&ebt_mutex);
+
+ t->u.target = target;
+ if (t->u.target == &ebt_standard_target) {
+ if (e->target_offset + sizeof(struct ebt_standard_target) >
+ e->next_offset) {
+ BUGPRINT("Standard target size too big\n");
+ ret = -EFAULT;
+ goto cleanup_watchers;
+ }
+ if (((struct ebt_standard_target *)t)->verdict >=
+ NUM_STANDARD_TARGETS) {
+ BUGPRINT("Invalid standard target\n");
+ ret = -EFAULT;
+ goto cleanup_watchers;
+ }
+ } else if (t->u.target->check &&
+ t->u.target->check(name, hook, e, t->data,
+ t->target_size) != 0) {
+ if (t->u.target->me)
+ __MOD_DEC_USE_COUNT(t->u.target->me);
+ ret = -EFAULT;
+ goto cleanup_watchers;
+ }
+ (*cnt)++;
+ return 0;
+cleanup_watchers:
+ EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, &j);
+cleanup_matches:
+ EBT_MATCH_ITERATE(e, ebt_cleanup_match, &i);
+ return ret;
+}
+
+static inline int
+ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt)
+{
+ struct ebt_entry_target *t;
+
+ if (e->bitmask == 0)
+ return 0;
+ // we're done
+ if (cnt && (*cnt)-- == 0)
+ return 1;
+ EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, NULL);
+ EBT_MATCH_ITERATE(e, ebt_cleanup_match, NULL);
+ t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
+ if (t->u.target->destroy)
+ t->u.target->destroy(t->data, t->target_size);
+ if (t->u.target->me)
+ __MOD_DEC_USE_COUNT(t->u.target->me);
+
+ return 0;
+}
+
+// do the parsing of the table/chains/entries/matches/watchers/targets, heh
+static int translate_table(struct ebt_replace *repl,
+ struct ebt_table_info *newinfo)
+{
+ unsigned int i, j, k;
+ int ret;
+
+ i = 0;
+ while (i < NF_BR_NUMHOOKS && !(repl->valid_hooks & (1 << i)))
+ i++;
+ if (i == NF_BR_NUMHOOKS) {
+ BUGPRINT("No valid hooks specified\n");
+ return -EINVAL;
+ }
+ if (repl->hook_entry[i] != (struct ebt_entries *)repl->entries) {
+ BUGPRINT("Chains don't start at beginning\n");
+ return -EINVAL;
+ }
+ // make sure chains are ordered after each other in same order
+ // as their corresponding hooks
+ for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
+ if (!(repl->valid_hooks & (1 << j)))
+ continue;
+ if ( repl->hook_entry[j] <= repl->hook_entry[i] ) {
+ BUGPRINT("Hook order must be followed\n");
+ return -EINVAL;
+ }
+ i = j;
+ }
+
+ for (i = 0; i < NF_BR_NUMHOOKS; i++) {
+ newinfo->hook_entry[i] = NULL;
+ newinfo->counter_entry[i] = 0;
+ }
+
+ newinfo->entries_size = repl->entries_size;
+ newinfo->nentries = repl->nentries;
+
+ // do some early checkings and initialize some things
+ i = 0; // holds the expected nr. of entries for the chain
+ j = 0; // holds the up to now counted entries for the chain
+ k = 0; // holds the total nr. of entries, should equal
+ // newinfo->nentries afterwards
+ ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
+ ebt_check_entry_size_and_hooks, newinfo, repl->entries,
+ repl->entries + repl->entries_size, repl->hook_entry, &i, &j, &k,
+ repl->valid_hooks);
+
+ if (ret != 0)
+ return ret;
+
+ if (i != j) {
+ BUGPRINT("nentries does not equal the nr of entries in the "
+ "(last) chain\n");
+ return -EINVAL;
+ }
+ if (k != newinfo->nentries) {
+ BUGPRINT("Total nentries is wrong\n");
+ return -EINVAL;
+ }
+
+ // check if all valid hooks have a chain
+ for (i = 0; i < NF_BR_NUMHOOKS; i++) {
+ if (newinfo->hook_entry[i] == NULL &&
+ (repl->valid_hooks & (1 << i))){
+ BUGPRINT("Valid hook without chain\n");
+ return -EINVAL;
+ }
+ }
+
+ // we just don't trust anything
+ repl->name[EBT_TABLE_MAXNAMELEN - 1] = '\0';
+ // used to know what we need to clean up if something goes wrong
+ i = 0;
+ ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
+ ebt_check_entry, newinfo, repl->name, &i, repl->valid_hooks);
+ if (ret != 0) {
+ BUGPRINT("ebt_check_entry gave fault back\n");
+ EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, ebt_cleanup_entry, &i);
+ }
+ return ret;
+}
+
+// called under write_lock
+static inline void get_counters(struct ebt_table_info *info,
+ struct ebt_counter *counters)
+{
+ int i, cpu, counter_base;
+
+ // counters of cpu 0
+ memcpy(counters, info->counters,
+ sizeof(struct ebt_counter) * info->nentries);
+ // add other counters to those of cpu 0
+ for (cpu = 1; cpu < smp_num_cpus; cpu++) {
+ counter_base = cpu * info->nentries;
+ for (i = 0; i < info->nentries; i++)
+ counters[i].pcnt +=
+ info->counters[counter_base + i].pcnt;
+ }
+}
+
+// replace the table
+static int do_replace(void *user, unsigned int len)
+{
+ int ret;
+ struct ebt_table_info *newinfo;
+ struct ebt_replace tmp;
+ struct ebt_table *t;
+ struct ebt_counter *counterstmp = NULL;
+ // used to be able to unlock earlier
+ struct ebt_table_info *table;
+
+ if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
+ return -EFAULT;
+
+ if (len != sizeof(tmp) + tmp.entries_size) {
+ BUGPRINT("Wrong len argument\n");
+ return -EINVAL;
+ }
+
+ if (tmp.entries_size == 0) {
+ BUGPRINT("Entries_size never zero\n");
+ return -EINVAL;
+ }
+ newinfo = (struct ebt_table_info *)
+ vmalloc(sizeof(struct ebt_table_info));
+ if (!newinfo)
+ return -ENOMEM;
+
+ if (tmp.nentries) {
+ newinfo->counters = (struct ebt_counter *)vmalloc(
+ sizeof(struct ebt_counter) * tmp.nentries * smp_num_cpus);
+ if (!newinfo->counters) {
+ ret = -ENOMEM;
+ goto free_newinfo;
+ }
+ memset(newinfo->counters, 0,
+ sizeof(struct ebt_counter) * tmp.nentries * smp_num_cpus);
+ }
+ else
+ newinfo->counters = NULL;
+
+ newinfo->entries = (char *)vmalloc(tmp.entries_size);
+ if (!newinfo->entries) {
+ ret = -ENOMEM;
+ goto free_counters;
+ }
+ if (copy_from_user(
+ newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
+ BUGPRINT("Couldn't copy entries from userspace\n");
+ ret = -EFAULT;
+ goto free_entries;
+ }
+
+ // the user wants counters back
+ // the check on the size is done later, when we have the lock
+ if (tmp.num_counters) {
+ counterstmp = (struct ebt_counter *)
+ vmalloc(tmp.num_counters * sizeof(struct ebt_counter));
+ if (!counterstmp) {
+ ret = -ENOMEM;
+ goto free_entries;
+ }
+ }
+ else
+ counterstmp = NULL;
+
+ ret = translate_table(&tmp, newinfo);
+
+ if (ret != 0)
+ goto free_counterstmp;
+
+ ret = down_interruptible(&ebt_mutex);
+
+ if (ret != 0)
+ goto free_cleanup;
+
+ if (!(t = (struct ebt_table *)list_named_find(&ebt_tables, tmp.name))) {
+ ret = -ENOENT;
+ // give some help to the poor user
+ print_string("The table is not present, try insmod\n");
+ goto free_unlock;
+ }
+
+ // the table doesn't like it
+ if (t->check && (ret = t->check(newinfo, tmp.valid_hooks)))
+ goto free_unlock;
+
+ if (tmp.num_counters && tmp.num_counters != t->private->nentries) {
+ BUGPRINT("Wrong nr. of counters requested\n");
+ ret = -EINVAL;
+ goto free_unlock;
+ }
+
+ // we have the mutex lock, so no danger in reading this pointer
+ table = t->private;
+ // we need an atomic snapshot of the counters
+ write_lock_bh(&t->lock);
+ if (tmp.num_counters)
+ get_counters(t->private, counterstmp);
+
+ t->private = newinfo;
+ write_unlock_bh(&t->lock);
+ up(&ebt_mutex);
+ // So, a user can change the chains while having messed up his counter
+ // allocation. Only reason why I do this is because this way the lock
+ // is held only once, while this doesn't bring the kernel into a
+ // dangerous state.
+ if (tmp.num_counters &&
+ copy_to_user(tmp.counters, counterstmp,
+ tmp.num_counters * sizeof(struct ebt_counter))) {
+ BUGPRINT("Couldn't copy counters to userspace\n");
+ ret = -EFAULT;
+ }
+ else
+ ret = 0;
+
+ // decrease module count and free resources
+ EBT_ENTRY_ITERATE(table->entries, table->entries_size,
+ ebt_cleanup_entry, NULL);
+
+ vfree(table->entries);
+ if (table->counters)
+ vfree(table->counters);
+ vfree(table);
+
+ if (counterstmp)
+ vfree(counterstmp);
+ return ret;
+
+free_unlock:
+ up(&ebt_mutex);
+free_cleanup:
+ EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
+ ebt_cleanup_entry, NULL);
+free_counterstmp:
+ if (counterstmp)
+ vfree(counterstmp);
+free_entries:
+ if (newinfo->entries)
+ vfree(newinfo->entries);
+free_counters:
+ if (newinfo->counters)
+ vfree(newinfo->counters);
+free_newinfo:
+ if (newinfo)
+ vfree(newinfo);
+ return ret;
+}
+
+int ebt_register_target(struct ebt_target *target)
+{
+ int ret;
+
+ ret = down_interruptible(&ebt_mutex);
+ if (ret != 0)
+ return ret;
+ if (!list_named_insert(&ebt_targets, target)) {
+ up(&ebt_mutex);
+ return -EEXIST;
+ }
+ up(&ebt_mutex);
+ MOD_INC_USE_COUNT;
+
+ return 0;
+}
+
+void ebt_unregister_target(struct ebt_target *target)
+{
+ down(&ebt_mutex);
+ LIST_DELETE(&ebt_targets, target);
+ up(&ebt_mutex);
+ MOD_DEC_USE_COUNT;
+}
+
+int ebt_register_match(struct ebt_match *match)
+{
+ int ret;
+
+ ret = down_interruptible(&ebt_mutex);
+ if (ret != 0)
+ return ret;
+ if (!list_named_insert(&ebt_matches, match)) {
+ up(&ebt_mutex);
+ return -EEXIST;
+ }
+ up(&ebt_mutex);
+ MOD_INC_USE_COUNT;
+
+ return 0;
+}
+
+void ebt_unregister_match(struct ebt_match *match)
+{
+ down(&ebt_mutex);
+ LIST_DELETE(&ebt_matches, match);
+ up(&ebt_mutex);
+ MOD_DEC_USE_COUNT;
+}
+
+int ebt_register_watcher(struct ebt_watcher *watcher)
+{
+ int ret;
+
+ ret = down_interruptible(&ebt_mutex);
+ if (ret != 0)
+ return ret;
+ if (!list_named_insert(&ebt_watchers, watcher)) {
+ up(&ebt_mutex);
+ return -EEXIST;
+ }
+ up(&ebt_mutex);
+ MOD_INC_USE_COUNT;
+
+ return 0;
+}
+
+void ebt_unregister_watcher(struct ebt_watcher *watcher)
+{
+ down(&ebt_mutex);
+ LIST_DELETE(&ebt_watchers, watcher);
+ up(&ebt_mutex);
+ MOD_DEC_USE_COUNT;
+}
+
+int ebt_register_table(struct ebt_table *table)
+{
+ struct ebt_table_info *newinfo;
+ int ret;
+
+ if (!table || !table->table ||!table->table->entries ||
+ table->table->entries_size == 0 ||
+ table->table->counters || table->private) {
+ BUGPRINT("Bad table data for ebt_register_table!!!\n");
+ return -EINVAL;
+ }
+
+ newinfo = (struct ebt_table_info *)
+ vmalloc(sizeof(struct ebt_table_info));
+ ret = -ENOMEM;
+ if (!newinfo)
+ return -ENOMEM;
+
+ newinfo->entries = (char *)vmalloc(table->table->entries_size);
+ if (!(newinfo->entries))
+ goto free_newinfo;
+
+ memcpy(newinfo->entries, table->table->entries,
+ table->table->entries_size);
+
+ if (table->table->nentries) {
+ newinfo->counters = (struct ebt_counter *)
+ vmalloc(table->table->nentries *
+ sizeof(struct ebt_counter) * smp_num_cpus);
+ if (!newinfo->counters)
+ goto free_entries;
+ memset(newinfo->counters, 0, table->table->nentries *
+ sizeof(struct ebt_counter) * smp_num_cpus);
+ }
+ else
+ newinfo->counters = NULL;
+
+ // fill in newinfo and parse the entries
+ ret = translate_table(table->table, newinfo);
+ if (ret != 0) {
+ BUGPRINT("Translate_table failed\n");
+ goto free_counters;
+ }
+
+ if (table->check && table->check(newinfo, table->valid_hooks)) {
+ BUGPRINT("The table doesn't like its own initial data, lol\n");
+ return -EINVAL;
+ }
+
+ table->private = newinfo;
+ table->lock = RW_LOCK_UNLOCKED;
+ ret = down_interruptible(&ebt_mutex);
+ if (ret != 0)
+ goto free_counters;
+
+ if (list_named_find(&ebt_tables, table->name)) {
+ ret = -EEXIST;
+ BUGPRINT("Table name already exists\n");
+ goto free_unlock;
+ }
+
+ list_prepend(&ebt_tables, table);
+ up(&ebt_mutex);
+ MOD_INC_USE_COUNT;
+ return 0;
+free_unlock:
+ up(&ebt_mutex);
+free_counters:
+ if (newinfo->counters)
+ vfree(newinfo->counters);
+free_entries:
+ vfree(newinfo->entries);
+free_newinfo:
+ vfree(newinfo);
+ return ret;
+}
+
+void ebt_unregister_table(struct ebt_table *table)
+{
+ if (!table) {
+ BUGPRINT("Request to unregister NULL table!!!\n");
+ return;
+ }
+ down(&ebt_mutex);
+ LIST_DELETE(&ebt_tables, table);
+ up(&ebt_mutex);
+ EBT_ENTRY_ITERATE(table->private->entries,
+ table->private->entries_size, ebt_cleanup_entry, NULL);
+ if (table->private->counters)
+ vfree(table->private->counters);
+ if (table->private->entries)
+ vfree(table->private->entries);
+ vfree(table->private);
+ MOD_DEC_USE_COUNT;
+}
+
+// userspace just supplied us with counters
+static int update_counters(void *user, unsigned int len)
+{
+ int i, ret;
+ struct ebt_counter *tmp;
+ struct ebt_replace hlp;
+ struct ebt_table *t;
+
+ if (copy_from_user(&hlp, user, sizeof(hlp)))
+ return -EFAULT;
+
+ if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
+ return -EINVAL;
+ if (hlp.num_counters == 0)
+ return -EINVAL;
+
+ if ( !(tmp = (struct ebt_counter *)
+ vmalloc(hlp.num_counters * sizeof(struct ebt_counter))) ){
+ MEMPRINT("Updata_counters && nomemory\n");
+ return -ENOMEM;
+ }
+
+ hlp.name[EBT_TABLE_MAXNAMELEN - 1] = '\0';
+ ret = down_interruptible(&ebt_mutex);
+ if (ret != 0)
+ goto free_tmp;
+
+ if (!(t = (struct ebt_table *)list_named_find(&ebt_tables, hlp.name))) {
+ ret = -EINVAL;
+ goto unlock_mutex;
+ }
+
+ if (hlp.num_counters != t->private->nentries) {
+ BUGPRINT("Wrong nr of counters\n");
+ ret = -EINVAL;
+ goto unlock_mutex;
+ }
+
+ if ( copy_from_user(tmp, hlp.counters,
+ hlp.num_counters * sizeof(struct ebt_counter)) ) {
+ BUGPRINT("Updata_counters && !cfu\n");
+ ret = -EFAULT;
+ goto unlock_mutex;
+ }
+
+ // we want an atomic add of the counters
+ write_lock_bh(&t->lock);
+
+ // we add to the counters of the first cpu
+ for (i = 0; i < hlp.num_counters; i++)
+ t->private->counters[i].pcnt += tmp[i].pcnt;
+
+ write_unlock_bh(&t->lock);
+ ret = 0;
+unlock_mutex:
+ up(&ebt_mutex);
+free_tmp:
+ vfree(tmp);
+ return ret;
+}
+
+static inline int ebt_make_matchname(struct ebt_entry_match *m,
+ char *base, char *ubase)
+{
+ char *hlp = ubase - base + (char *)m;
+ if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int ebt_make_watchername(struct ebt_entry_watcher *w,
+ char *base, char *ubase)
+{
+ char *hlp = ubase - base + (char *)w;
+ if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int ebt_make_names(struct ebt_entry *e, char *base, char *ubase)
+{
+ int ret;
+ char *hlp = ubase - base + (char *)e + e->target_offset;
+ struct ebt_entry_target *t;
+
+ if ((e->bitmask & EBT_ENTRY_OR_ENTRIES) == 0)
+ return 0;
+
+ t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
+
+ ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
+ if (ret != 0)
+ return ret;
+ ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
+ if (ret != 0)
+ return ret;
+ if (copy_to_user(hlp, t->u.target->name, EBT_FUNCTION_MAXNAMELEN))
+ return -EFAULT;
+ return 0;
+}
+
+// called with ebt_mutex down
+static int copy_everything_to_user(struct ebt_table *t, void *user, int *len)
+{
+ struct ebt_replace tmp;
+ struct ebt_table_info *info = t->private;
+ struct ebt_counter *counterstmp;
+ int i;
+
+ if (copy_from_user(&tmp, user, sizeof(tmp))) {
+ BUGPRINT("Cfu didn't work\n");
+ return -EFAULT;
+ }
+
+ if (*len != sizeof(struct ebt_replace) + info->entries_size +
+ (tmp.num_counters? info->nentries * sizeof(struct ebt_counter): 0)) {
+ BUGPRINT("Wrong size\n");
+ return -EINVAL;
+ }
+
+ if (tmp.nentries != info->nentries) {
+ BUGPRINT("Nentries wrong\n");
+ return -EINVAL;
+ }
+
+ if (tmp.entries_size != info->entries_size) {
+ BUGPRINT("Wrong size\n");
+ return -EINVAL;
+ }
+
+ // userspace might not need the counters
+ if (tmp.num_counters) {
+ if (tmp.num_counters != info->nentries) {
+ BUGPRINT("Num_counters wrong\n");
+ return -EINVAL;
+ }
+ counterstmp = (struct ebt_counter *)
+ vmalloc(info->nentries * sizeof(struct ebt_counter));
+ if (!counterstmp) {
+ BUGPRINT("Couldn't copy counters, out of memory\n");
+ return -ENOMEM;
+ }
+ write_lock_bh(&t->lock);
+ get_counters(info, counterstmp);
+ write_unlock_bh(&t->lock);
+
+ if (copy_to_user(tmp.counters, counterstmp,
+ info->nentries * sizeof(struct ebt_counter))) {
+ BUGPRINT("Couldn't copy counters to userspace\n");
+ vfree(counterstmp);
+ return -EFAULT;
+ }
+ vfree(counterstmp);
+ }
+
+ if (copy_to_user(tmp.entries, info->entries, info->entries_size)) {
+ BUGPRINT("Couldn't copy entries to userspace\n");
+ return -EFAULT;
+ }
+ // make userspace's life easier
+ memcpy(tmp.counter_entry, info->counter_entry,
+ NF_BR_NUMHOOKS * sizeof(int));
+ memcpy(tmp.hook_entry, info->hook_entry,
+ NF_BR_NUMHOOKS * sizeof(struct ebt_entries *));
+ for (i = 0; i < NF_BR_NUMHOOKS; i++)
+ tmp.hook_entry[i] = (struct ebt_entries *)(((char *)
+ (info->hook_entry[i])) - info->entries + tmp.entries);
+ if (copy_to_user(user, &tmp, sizeof(struct ebt_replace))) {
+ BUGPRINT("Couldn't copy ebt_replace to userspace\n");
+ return -EFAULT;
+ }
+ // set the match/watcher/target names right
+ return EBT_ENTRY_ITERATE(info->entries, info->entries_size,
+ ebt_make_names, info->entries, tmp.entries);
+}
+
+static int do_ebt_set_ctl(struct sock *sk,
+ int cmd, void *user, unsigned int len)
+{
+ int ret;
+
+ switch(cmd) {
+ case EBT_SO_SET_ENTRIES:
+ ret = do_replace(user, len);
+ break;
+ case EBT_SO_SET_COUNTERS:
+ ret = update_counters(user, len);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int do_ebt_get_ctl(struct sock *sk, int cmd, void *user, int *len)
+{
+ int ret;
+ struct ebt_replace tmp;
+ struct ebt_table *t;
+
+ if (copy_from_user(&tmp, user, sizeof(tmp)))
+ return -EFAULT;
+
+ ret = down_interruptible(&ebt_mutex);
+ if (ret != 0)
+ return ret;
+
+ if (!(t = (struct ebt_table *)list_named_find(&ebt_tables, tmp.name))) {
+ print_string("Table not found, try insmod\n");
+ up(&ebt_mutex);
+ return -EINVAL;
+ }
+
+ switch(cmd) {
+ case EBT_SO_GET_INFO:
+ if (*len != sizeof(struct ebt_replace)){
+ ret = -EINVAL;
+ up(&ebt_mutex);
+ break;
+ }
+ tmp.nentries = t->private->nentries;
+ tmp.entries_size = t->private->entries_size;
+ // userspace needs this to check the chain names
+ tmp.valid_hooks = t->valid_hooks;
+ up(&ebt_mutex);
+ if (copy_to_user(user, &tmp, *len) != 0){
+ BUGPRINT("c2u Didn't work\n");
+ ret = -EFAULT;
+ break;
+ }
+ ret = 0;
+ break;
+
+ case EBT_SO_GET_ENTRIES:
+ ret = copy_everything_to_user(t, user, len);
+ up(&ebt_mutex);
+ break;
+
+ default:
+ up(&ebt_mutex);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static struct nf_sockopt_ops ebt_sockopts =
+{ { NULL, NULL }, PF_INET, EBT_BASE_CTL, EBT_SO_SET_MAX + 1, do_ebt_set_ctl,
+ EBT_BASE_CTL, EBT_SO_GET_MAX + 1, do_ebt_get_ctl, 0, NULL
+};
+
+// Copyright (C) 1998 by Ori Pomerantz
+// Print the string to the appropriate tty, the one
+// the current task uses
+static void print_string(char *str)
+{
+ struct tty_struct *my_tty;
+
+ /* The tty for the current task */
+ my_tty = current->tty;
+ if (my_tty != NULL) {
+ (*(my_tty->driver).write)(my_tty, 0, str, strlen(str));
+ (*(my_tty->driver).write)(my_tty, 0, "\015\012", 2);
+ }
+}
+
+static int __init init(void)
+{
+ int ret;
+
+ down(&ebt_mutex);
+ list_named_insert(&ebt_targets, &ebt_standard_target);
+ up(&ebt_mutex);
+ if ((ret = nf_register_sockopt(&ebt_sockopts)) < 0)
+ return ret;
+
+ print_string("Ebtables v2.0 registered");
+ return 0;
+}
+
+static void __exit fini(void)
+{
+ nf_unregister_sockopt(&ebt_sockopts);
+ print_string("Ebtables v2.0 unregistered");
+}
+
+EXPORT_SYMBOL(ebt_register_table);
+EXPORT_SYMBOL(ebt_unregister_table);
+EXPORT_SYMBOL(ebt_register_match);
+EXPORT_SYMBOL(ebt_unregister_match);
+EXPORT_SYMBOL(ebt_register_watcher);
+EXPORT_SYMBOL(ebt_unregister_watcher);
+EXPORT_SYMBOL(ebt_register_target);
+EXPORT_SYMBOL(ebt_unregister_target);
+EXPORT_SYMBOL(ebt_do_table);
+module_init(init);
+module_exit(fini);
+MODULE_LICENSE("GPL");
diff --git a/kernel/linux/net/netsyms.c b/kernel/linux/net/netsyms.c
new file mode 100644
index 0000000..14fa1bd
--- /dev/null
+++ b/kernel/linux/net/netsyms.c
@@ -0,0 +1,595 @@
+/*
+ * linux/net/netsyms.c
+ *
+ * Symbol table for the linux networking subsystem. Moved here to
+ * make life simpler in ksyms.c.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+
+#include <linux/types.h>
+#include <linux/net.h>
+#include <linux/in.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/fddidevice.h>
+#include <linux/trdevice.h>
+#include <linux/fcdevice.h>
+#include <linux/ioport.h>
+#include <linux/tty.h>
+#include <net/neighbour.h>
+#include <net/snmp.h>
+#include <net/dst.h>
+#include <net/checksum.h>
+#include <linux/etherdevice.h>
+#include <net/route.h>
+#ifdef CONFIG_HIPPI
+#include <linux/hippidevice.h>
+#endif
+#include <net/pkt_sched.h>
+#include <net/scm.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/random.h>
+#ifdef CONFIG_NET_DIVERT
+#include <linux/divert.h>
+#endif /* CONFIG_NET_DIVERT */
+
+#ifdef CONFIG_NET
+extern __u32 sysctl_wmem_max;
+extern __u32 sysctl_rmem_max;
+#endif
+
+#ifdef CONFIG_INET
+#include <linux/ip.h>
+#include <net/protocol.h>
+#include <net/arp.h>
+#include <net/ip.h>
+#include <net/udp.h>
+#include <net/tcp.h>
+#include <net/icmp.h>
+#include <net/inet_common.h>
+#include <linux/inet.h>
+#include <linux/mroute.h>
+#include <linux/igmp.h>
+
+extern struct net_proto_family inet_family_ops;
+
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) || defined (CONFIG_KHTTPD) || defined (CONFIG_KHTTPD_MODULE)
+#include <linux/in6.h>
+#include <linux/icmpv6.h>
+#include <net/ipv6.h>
+#include <net/ndisc.h>
+#include <net/transp_v6.h>
+#include <net/addrconf.h>
+
+extern int sysctl_local_port_range[2];
+extern int tcp_port_rover;
+extern int udp_port_rover;
+#endif
+
+#endif
+
+extern int netdev_finish_unregister(struct net_device *dev);
+
+#include <linux/rtnetlink.h>
+
+#ifdef CONFIG_IPX_MODULE
+extern struct datalink_proto *make_EII_client(void);
+extern struct datalink_proto *make_8023_client(void);
+extern void destroy_EII_client(struct datalink_proto *);
+extern void destroy_8023_client(struct datalink_proto *);
+#endif
+
+#ifdef CONFIG_ATALK_MODULE
+#include <net/sock.h>
+#endif
+
+#ifdef CONFIG_SYSCTL
+extern int sysctl_max_syn_backlog;
+#endif
+
+/* Skbuff symbols. */
+EXPORT_SYMBOL(skb_over_panic);
+EXPORT_SYMBOL(skb_under_panic);
+
+/* Socket layer registration */
+EXPORT_SYMBOL(sock_register);
+EXPORT_SYMBOL(sock_unregister);
+
+/* Socket locking */
+EXPORT_SYMBOL(__lock_sock);
+EXPORT_SYMBOL(__release_sock);
+
+/* Socket layer support routines */
+EXPORT_SYMBOL(memcpy_fromiovec);
+EXPORT_SYMBOL(memcpy_tokerneliovec);
+EXPORT_SYMBOL(sock_create);
+EXPORT_SYMBOL(sock_alloc);
+EXPORT_SYMBOL(sock_release);
+EXPORT_SYMBOL(sock_setsockopt);
+EXPORT_SYMBOL(sock_getsockopt);
+EXPORT_SYMBOL(sock_sendmsg);
+EXPORT_SYMBOL(sock_recvmsg);
+EXPORT_SYMBOL(sk_alloc);
+EXPORT_SYMBOL(sk_free);
+EXPORT_SYMBOL(sock_wake_async);
+EXPORT_SYMBOL(sock_alloc_send_skb);
+EXPORT_SYMBOL(sock_alloc_send_pskb);
+EXPORT_SYMBOL(sock_init_data);
+EXPORT_SYMBOL(sock_no_release);
+EXPORT_SYMBOL(sock_no_bind);
+EXPORT_SYMBOL(sock_no_connect);
+EXPORT_SYMBOL(sock_no_socketpair);
+EXPORT_SYMBOL(sock_no_accept);
+EXPORT_SYMBOL(sock_no_getname);
+EXPORT_SYMBOL(sock_no_poll);
+EXPORT_SYMBOL(sock_no_ioctl);
+EXPORT_SYMBOL(sock_no_listen);
+EXPORT_SYMBOL(sock_no_shutdown);
+EXPORT_SYMBOL(sock_no_getsockopt);
+EXPORT_SYMBOL(sock_no_setsockopt);
+EXPORT_SYMBOL(sock_no_sendmsg);
+EXPORT_SYMBOL(sock_no_recvmsg);
+EXPORT_SYMBOL(sock_no_mmap);
+EXPORT_SYMBOL(sock_no_sendpage);
+EXPORT_SYMBOL(sock_rfree);
+EXPORT_SYMBOL(sock_wfree);
+EXPORT_SYMBOL(sock_wmalloc);
+EXPORT_SYMBOL(sock_rmalloc);
+EXPORT_SYMBOL(skb_linearize);
+EXPORT_SYMBOL(skb_checksum);
+EXPORT_SYMBOL(skb_checksum_help);
+EXPORT_SYMBOL(skb_recv_datagram);
+EXPORT_SYMBOL(skb_free_datagram);
+EXPORT_SYMBOL(skb_copy_datagram);
+EXPORT_SYMBOL(skb_copy_datagram_iovec);
+EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec);
+EXPORT_SYMBOL(skb_copy_bits);
+EXPORT_SYMBOL(skb_copy_and_csum_bits);
+EXPORT_SYMBOL(skb_copy_and_csum_dev);
+EXPORT_SYMBOL(skb_copy_expand);
+EXPORT_SYMBOL(___pskb_trim);
+EXPORT_SYMBOL(__pskb_pull_tail);
+EXPORT_SYMBOL(pskb_expand_head);
+EXPORT_SYMBOL(pskb_copy);
+EXPORT_SYMBOL(skb_realloc_headroom);
+EXPORT_SYMBOL(datagram_poll);
+EXPORT_SYMBOL(put_cmsg);
+EXPORT_SYMBOL(sock_kmalloc);
+EXPORT_SYMBOL(sock_kfree_s);
+
+#ifdef CONFIG_FILTER
+EXPORT_SYMBOL(sk_run_filter);
+EXPORT_SYMBOL(sk_chk_filter);
+#endif
+
+EXPORT_SYMBOL(neigh_table_init);
+EXPORT_SYMBOL(neigh_table_clear);
+EXPORT_SYMBOL(neigh_resolve_output);
+EXPORT_SYMBOL(neigh_connected_output);
+EXPORT_SYMBOL(neigh_update);
+EXPORT_SYMBOL(neigh_create);
+EXPORT_SYMBOL(neigh_lookup);
+EXPORT_SYMBOL(__neigh_event_send);
+EXPORT_SYMBOL(neigh_event_ns);
+EXPORT_SYMBOL(neigh_ifdown);
+#ifdef CONFIG_ARPD
+EXPORT_SYMBOL(neigh_app_ns);
+#endif
+#ifdef CONFIG_SYSCTL
+EXPORT_SYMBOL(neigh_sysctl_register);
+#endif
+EXPORT_SYMBOL(pneigh_lookup);
+EXPORT_SYMBOL(pneigh_enqueue);
+EXPORT_SYMBOL(neigh_destroy);
+EXPORT_SYMBOL(neigh_parms_alloc);
+EXPORT_SYMBOL(neigh_parms_release);
+EXPORT_SYMBOL(neigh_rand_reach_time);
+EXPORT_SYMBOL(neigh_compat_output);
+
+/* dst_entry */
+EXPORT_SYMBOL(dst_alloc);
+EXPORT_SYMBOL(__dst_free);
+EXPORT_SYMBOL(dst_destroy);
+
+/* misc. support routines */
+EXPORT_SYMBOL(net_ratelimit);
+EXPORT_SYMBOL(net_random);
+EXPORT_SYMBOL(net_srandom);
+
+/* Needed by smbfs.o */
+EXPORT_SYMBOL(__scm_destroy);
+EXPORT_SYMBOL(__scm_send);
+
+/* Needed by unix.o */
+EXPORT_SYMBOL(scm_fp_dup);
+EXPORT_SYMBOL(files_stat);
+EXPORT_SYMBOL(memcpy_toiovec);
+
+#ifdef CONFIG_IPX_MODULE
+EXPORT_SYMBOL(make_8023_client);
+EXPORT_SYMBOL(destroy_8023_client);
+EXPORT_SYMBOL(make_EII_client);
+EXPORT_SYMBOL(destroy_EII_client);
+#endif
+
+/* for 801q VLAN support */
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+EXPORT_SYMBOL(dev_change_flags);
+EXPORT_SYMBOL(vlan_ioctl_hook);
+#endif
+
+EXPORT_SYMBOL(sklist_destroy_socket);
+EXPORT_SYMBOL(sklist_insert_socket);
+
+EXPORT_SYMBOL(scm_detach_fds);
+
+#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+EXPORT_SYMBOL(br_handle_frame_hook);
+#if defined(CONFIG_BRIDGE_EBT_BROUTE) || \
+ defined(CONFIG_BRIDGE_EBT_BROUTE_MODULE)
+EXPORT_SYMBOL(broute_decision);
+#endif
+#ifdef CONFIG_INET
+EXPORT_SYMBOL(br_ioctl_hook);
+#endif
+#endif
+
+#ifdef CONFIG_NET_DIVERT
+EXPORT_SYMBOL(alloc_divert_blk);
+EXPORT_SYMBOL(free_divert_blk);
+EXPORT_SYMBOL(divert_ioctl);
+#endif /* CONFIG_NET_DIVERT */
+
+#ifdef CONFIG_INET
+/* Internet layer registration */
+EXPORT_SYMBOL(inetdev_lock);
+EXPORT_SYMBOL(inet_add_protocol);
+EXPORT_SYMBOL(inet_del_protocol);
+EXPORT_SYMBOL(inet_register_protosw);
+EXPORT_SYMBOL(inet_unregister_protosw);
+EXPORT_SYMBOL(ip_route_output_key);
+EXPORT_SYMBOL(ip_route_input);
+EXPORT_SYMBOL(icmp_send);
+EXPORT_SYMBOL(ip_options_compile);
+EXPORT_SYMBOL(ip_options_undo);
+EXPORT_SYMBOL(arp_send);
+EXPORT_SYMBOL(arp_broken_ops);
+EXPORT_SYMBOL(__ip_select_ident);
+EXPORT_SYMBOL(ip_send_check);
+EXPORT_SYMBOL(ip_fragment);
+EXPORT_SYMBOL(inet_family_ops);
+EXPORT_SYMBOL(in_aton);
+EXPORT_SYMBOL(ip_mc_inc_group);
+EXPORT_SYMBOL(ip_mc_dec_group);
+EXPORT_SYMBOL(ip_finish_output);
+EXPORT_SYMBOL(inet_stream_ops);
+EXPORT_SYMBOL(inet_dgram_ops);
+EXPORT_SYMBOL(ip_cmsg_recv);
+EXPORT_SYMBOL(inet_addr_type);
+EXPORT_SYMBOL(inet_select_addr);
+EXPORT_SYMBOL(ip_dev_find);
+EXPORT_SYMBOL(inetdev_by_index);
+EXPORT_SYMBOL(in_dev_finish_destroy);
+EXPORT_SYMBOL(ip_defrag);
+
+/* Route manipulation */
+EXPORT_SYMBOL(ip_rt_ioctl);
+EXPORT_SYMBOL(devinet_ioctl);
+EXPORT_SYMBOL(register_inetaddr_notifier);
+EXPORT_SYMBOL(unregister_inetaddr_notifier);
+
+/* needed for ip_gre -cw */
+EXPORT_SYMBOL(ip_statistics);
+
+#ifdef CONFIG_DLCI_MODULE
+extern int (*dlci_ioctl_hook)(unsigned int, void *);
+EXPORT_SYMBOL(dlci_ioctl_hook);
+#endif
+
+
+#ifdef CONFIG_IPV6
+EXPORT_SYMBOL(ipv6_addr_type);
+EXPORT_SYMBOL(icmpv6_send);
+EXPORT_SYMBOL(ndisc_mc_map);
+EXPORT_SYMBOL(register_inet6addr_notifier);
+EXPORT_SYMBOL(unregister_inet6addr_notifier);
+#include <net/ip6_route.h>
+EXPORT_SYMBOL(ip6_route_output);
+#endif
+#if defined (CONFIG_IPV6_MODULE) || defined (CONFIG_KHTTPD) || defined (CONFIG_KHTTPD_MODULE)
+/* inet functions common to v4 and v6 */
+EXPORT_SYMBOL(inet_release);
+EXPORT_SYMBOL(inet_stream_connect);
+EXPORT_SYMBOL(inet_dgram_connect);
+EXPORT_SYMBOL(inet_accept);
+EXPORT_SYMBOL(inet_listen);
+EXPORT_SYMBOL(inet_shutdown);
+EXPORT_SYMBOL(inet_setsockopt);
+EXPORT_SYMBOL(inet_getsockopt);
+EXPORT_SYMBOL(inet_sendmsg);
+EXPORT_SYMBOL(inet_recvmsg);
+#ifdef INET_REFCNT_DEBUG
+EXPORT_SYMBOL(inet_sock_nr);
+#endif
+EXPORT_SYMBOL(inet_sock_destruct);
+EXPORT_SYMBOL(inet_sock_release);
+
+/* Socket demultiplexing. */
+EXPORT_SYMBOL(tcp_hashinfo);
+EXPORT_SYMBOL(tcp_listen_wlock);
+EXPORT_SYMBOL(udp_hash);
+EXPORT_SYMBOL(udp_hash_lock);
+
+EXPORT_SYMBOL(tcp_destroy_sock);
+EXPORT_SYMBOL(ip_queue_xmit);
+EXPORT_SYMBOL(memcpy_fromiovecend);
+EXPORT_SYMBOL(csum_partial_copy_fromiovecend);
+EXPORT_SYMBOL(tcp_v4_lookup_listener);
+/* UDP/TCP exported functions for TCPv6 */
+EXPORT_SYMBOL(udp_ioctl);
+EXPORT_SYMBOL(udp_connect);
+EXPORT_SYMBOL(udp_disconnect);
+EXPORT_SYMBOL(udp_sendmsg);
+EXPORT_SYMBOL(tcp_close);
+EXPORT_SYMBOL(tcp_disconnect);
+EXPORT_SYMBOL(tcp_accept);
+EXPORT_SYMBOL(tcp_write_wakeup);
+EXPORT_SYMBOL(tcp_write_space);
+EXPORT_SYMBOL(tcp_poll);
+EXPORT_SYMBOL(tcp_ioctl);
+EXPORT_SYMBOL(tcp_shutdown);
+EXPORT_SYMBOL(tcp_setsockopt);
+EXPORT_SYMBOL(tcp_getsockopt);
+EXPORT_SYMBOL(tcp_recvmsg);
+EXPORT_SYMBOL(tcp_send_synack);
+EXPORT_SYMBOL(tcp_check_req);
+EXPORT_SYMBOL(tcp_child_process);
+EXPORT_SYMBOL(tcp_parse_options);
+EXPORT_SYMBOL(tcp_rcv_established);
+EXPORT_SYMBOL(tcp_init_xmit_timers);
+EXPORT_SYMBOL(tcp_clear_xmit_timers);
+EXPORT_SYMBOL(tcp_statistics);
+EXPORT_SYMBOL(tcp_rcv_state_process);
+EXPORT_SYMBOL(tcp_timewait_state_process);
+EXPORT_SYMBOL(tcp_timewait_cachep);
+EXPORT_SYMBOL(tcp_timewait_kill);
+EXPORT_SYMBOL(tcp_sendmsg);
+EXPORT_SYMBOL(tcp_v4_rebuild_header);
+EXPORT_SYMBOL(tcp_v4_send_check);
+EXPORT_SYMBOL(tcp_v4_conn_request);
+EXPORT_SYMBOL(tcp_create_openreq_child);
+EXPORT_SYMBOL(tcp_bucket_create);
+EXPORT_SYMBOL(__tcp_put_port);
+EXPORT_SYMBOL(tcp_put_port);
+EXPORT_SYMBOL(tcp_inherit_port);
+EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
+EXPORT_SYMBOL(tcp_v4_do_rcv);
+EXPORT_SYMBOL(tcp_v4_connect);
+EXPORT_SYMBOL(tcp_v4_hash_connecting);
+EXPORT_SYMBOL(tcp_unhash);
+EXPORT_SYMBOL(udp_prot);
+EXPORT_SYMBOL(tcp_prot);
+EXPORT_SYMBOL(tcp_openreq_cachep);
+EXPORT_SYMBOL(ipv4_specific);
+EXPORT_SYMBOL(tcp_simple_retransmit);
+EXPORT_SYMBOL(tcp_transmit_skb);
+EXPORT_SYMBOL(tcp_connect);
+EXPORT_SYMBOL(tcp_make_synack);
+EXPORT_SYMBOL(tcp_tw_deschedule);
+EXPORT_SYMBOL(tcp_delete_keepalive_timer);
+EXPORT_SYMBOL(tcp_reset_keepalive_timer);
+EXPORT_SYMBOL(sysctl_local_port_range);
+EXPORT_SYMBOL(tcp_port_rover);
+EXPORT_SYMBOL(udp_port_rover);
+EXPORT_SYMBOL(tcp_sync_mss);
+EXPORT_SYMBOL(net_statistics);
+EXPORT_SYMBOL(__tcp_mem_reclaim);
+EXPORT_SYMBOL(tcp_sockets_allocated);
+EXPORT_SYMBOL(sysctl_tcp_reordering);
+EXPORT_SYMBOL(sysctl_tcp_rmem);
+EXPORT_SYMBOL(sysctl_tcp_wmem);
+EXPORT_SYMBOL(sysctl_tcp_ecn);
+EXPORT_SYMBOL(tcp_cwnd_application_limited);
+EXPORT_SYMBOL(tcp_sendpage);
+
+EXPORT_SYMBOL(tcp_write_xmit);
+
+EXPORT_SYMBOL(tcp_v4_remember_stamp);
+
+extern int sysctl_tcp_tw_recycle;
+
+#ifdef CONFIG_SYSCTL
+EXPORT_SYMBOL(sysctl_tcp_tw_recycle);
+EXPORT_SYMBOL(sysctl_max_syn_backlog);
+#endif
+
+#if defined (CONFIG_IPV6_MODULE)
+EXPORT_SYMBOL(secure_tcpv6_sequence_number);
+EXPORT_SYMBOL(secure_ipv6_id);
+#endif
+
+#endif
+
+EXPORT_SYMBOL(netlink_set_err);
+EXPORT_SYMBOL(netlink_broadcast);
+EXPORT_SYMBOL(netlink_unicast);
+EXPORT_SYMBOL(netlink_kernel_create);
+EXPORT_SYMBOL(netlink_dump_start);
+EXPORT_SYMBOL(netlink_ack);
+#if defined(CONFIG_NETLINK_DEV) || defined(CONFIG_NETLINK_DEV_MODULE)
+EXPORT_SYMBOL(netlink_attach);
+EXPORT_SYMBOL(netlink_detach);
+EXPORT_SYMBOL(netlink_post);
+#endif
+
+EXPORT_SYMBOL(rtattr_parse);
+EXPORT_SYMBOL(rtnetlink_links);
+EXPORT_SYMBOL(__rta_fill);
+EXPORT_SYMBOL(rtnetlink_dump_ifinfo);
+EXPORT_SYMBOL(rtnetlink_put_metrics);
+EXPORT_SYMBOL(rtnl);
+EXPORT_SYMBOL(neigh_delete);
+EXPORT_SYMBOL(neigh_add);
+EXPORT_SYMBOL(neigh_dump_info);
+
+EXPORT_SYMBOL(dev_set_allmulti);
+EXPORT_SYMBOL(dev_set_promiscuity);
+EXPORT_SYMBOL(sklist_remove_socket);
+EXPORT_SYMBOL(rtnl_sem);
+EXPORT_SYMBOL(rtnl_lock);
+EXPORT_SYMBOL(rtnl_unlock);
+
+/* ABI emulation layers need this */
+EXPORT_SYMBOL(move_addr_to_kernel);
+EXPORT_SYMBOL(move_addr_to_user);
+
+/* Used by at least ipip.c. */
+EXPORT_SYMBOL(ipv4_config);
+EXPORT_SYMBOL(dev_open);
+
+/* Used by other modules */
+EXPORT_SYMBOL(in_ntoa);
+EXPORT_SYMBOL(xrlim_allow);
+
+EXPORT_SYMBOL(ip_rcv);
+EXPORT_SYMBOL(arp_rcv);
+EXPORT_SYMBOL(arp_tbl);
+EXPORT_SYMBOL(arp_find);
+
+#endif /* CONFIG_INET */
+
+#ifdef CONFIG_TR
+EXPORT_SYMBOL(tr_type_trans);
+#endif
+
+/* Device callback registration */
+EXPORT_SYMBOL(register_netdevice_notifier);
+EXPORT_SYMBOL(unregister_netdevice_notifier);
+
+/* support for loadable net drivers */
+#ifdef CONFIG_NET
+EXPORT_SYMBOL(loopback_dev);
+EXPORT_SYMBOL(register_netdevice);
+EXPORT_SYMBOL(unregister_netdevice);
+EXPORT_SYMBOL(netdev_state_change);
+EXPORT_SYMBOL(dev_new_index);
+EXPORT_SYMBOL(dev_get_by_index);
+EXPORT_SYMBOL(__dev_get_by_index);
+EXPORT_SYMBOL(dev_get_by_name);
+EXPORT_SYMBOL(__dev_get_by_name);
+EXPORT_SYMBOL(netdev_finish_unregister);
+EXPORT_SYMBOL(netdev_set_master);
+EXPORT_SYMBOL(eth_type_trans);
+#ifdef CONFIG_FDDI
+EXPORT_SYMBOL(fddi_type_trans);
+#endif /* CONFIG_FDDI */
+#if 0
+EXPORT_SYMBOL(eth_copy_and_sum);
+#endif
+EXPORT_SYMBOL(alloc_skb);
+EXPORT_SYMBOL(__kfree_skb);
+EXPORT_SYMBOL(skb_clone);
+EXPORT_SYMBOL(skb_copy);
+EXPORT_SYMBOL(netif_rx);
+EXPORT_SYMBOL(dev_add_pack);
+EXPORT_SYMBOL(dev_remove_pack);
+EXPORT_SYMBOL(dev_get);
+EXPORT_SYMBOL(dev_alloc);
+EXPORT_SYMBOL(dev_alloc_name);
+EXPORT_SYMBOL(__netdev_watchdog_up);
+#ifdef CONFIG_KMOD
+EXPORT_SYMBOL(dev_load);
+#endif
+EXPORT_SYMBOL(dev_ioctl);
+EXPORT_SYMBOL(dev_queue_xmit);
+#ifdef CONFIG_NET_HW_FLOWCONTROL
+EXPORT_SYMBOL(netdev_dropping);
+EXPORT_SYMBOL(netdev_register_fc);
+EXPORT_SYMBOL(netdev_unregister_fc);
+EXPORT_SYMBOL(netdev_fc_xoff);
+#endif
+EXPORT_SYMBOL(dev_base);
+EXPORT_SYMBOL(dev_base_lock);
+EXPORT_SYMBOL(dev_close);
+EXPORT_SYMBOL(dev_mc_add);
+EXPORT_SYMBOL(dev_mc_delete);
+EXPORT_SYMBOL(dev_mc_upload);
+EXPORT_SYMBOL(__kill_fasync);
+
+EXPORT_SYMBOL(if_port_text);
+
+#ifdef CONFIG_HIPPI
+EXPORT_SYMBOL(hippi_type_trans);
+#endif
+
+#ifdef CONFIG_NET_FASTROUTE
+EXPORT_SYMBOL(netdev_fastroute);
+#endif
+
+#ifdef CONFIG_SYSCTL
+EXPORT_SYMBOL(sysctl_wmem_max);
+EXPORT_SYMBOL(sysctl_rmem_max);
+#ifdef CONFIG_INET
+EXPORT_SYMBOL(sysctl_ip_default_ttl);
+#endif
+#endif
+
+/* Packet scheduler modules want these. */
+EXPORT_SYMBOL(qdisc_destroy);
+EXPORT_SYMBOL(qdisc_reset);
+EXPORT_SYMBOL(qdisc_restart);
+EXPORT_SYMBOL(qdisc_create_dflt);
+EXPORT_SYMBOL(noop_qdisc);
+EXPORT_SYMBOL(qdisc_tree_lock);
+#ifdef CONFIG_NET_SCHED
+PSCHED_EXPORTLIST;
+EXPORT_SYMBOL(pfifo_qdisc_ops);
+EXPORT_SYMBOL(register_qdisc);
+EXPORT_SYMBOL(unregister_qdisc);
+EXPORT_SYMBOL(qdisc_get_rtab);
+EXPORT_SYMBOL(qdisc_put_rtab);
+EXPORT_SYMBOL(qdisc_copy_stats);
+#ifdef CONFIG_NET_ESTIMATOR
+EXPORT_SYMBOL(qdisc_new_estimator);
+EXPORT_SYMBOL(qdisc_kill_estimator);
+#endif
+#ifdef CONFIG_NET_CLS_POLICE
+EXPORT_SYMBOL(tcf_police);
+EXPORT_SYMBOL(tcf_police_locate);
+EXPORT_SYMBOL(tcf_police_destroy);
+EXPORT_SYMBOL(tcf_police_dump);
+#endif
+#endif
+#ifdef CONFIG_NET_CLS
+EXPORT_SYMBOL(register_tcf_proto_ops);
+EXPORT_SYMBOL(unregister_tcf_proto_ops);
+#endif
+#ifdef CONFIG_NETFILTER
+#include <linux/netfilter.h>
+EXPORT_SYMBOL(nf_register_hook);
+EXPORT_SYMBOL(nf_unregister_hook);
+EXPORT_SYMBOL(nf_register_sockopt);
+EXPORT_SYMBOL(nf_unregister_sockopt);
+EXPORT_SYMBOL(nf_reinject);
+EXPORT_SYMBOL(nf_register_queue_handler);
+EXPORT_SYMBOL(nf_unregister_queue_handler);
+EXPORT_SYMBOL(nf_hook_slow);
+EXPORT_SYMBOL(nf_hooks);
+EXPORT_SYMBOL(nf_setsockopt);
+EXPORT_SYMBOL(nf_getsockopt);
+EXPORT_SYMBOL(ip_ct_attach);
+#ifdef CONFIG_INET
+#include <linux/netfilter_ipv4.h>
+EXPORT_SYMBOL(ip_route_me_harder);
+#endif
+#endif
+
+EXPORT_SYMBOL(register_gifconf);
+
+EXPORT_SYMBOL(net_call_rx_atomic);
+EXPORT_SYMBOL(softnet_data);
+
+#endif /* CONFIG_NET */
diff --git a/kernel/patches/base-patches/ebtables-v2.0pre1_vs_2.4.18.diff b/kernel/patches/base-patches/ebtables-v2.0pre1_vs_2.4.18.diff
new file mode 100644
index 0000000..bab3e11
--- /dev/null
+++ b/kernel/patches/base-patches/ebtables-v2.0pre1_vs_2.4.18.diff
@@ -0,0 +1,2621 @@
+--- linux/net/Makefile Mon Feb 25 20:38:14 2002
++++ ebt2.0pre1/net/Makefile Wed Apr 3 19:57:30 2002
+@@ -7,7 +7,8 @@
+
+ O_TARGET := network.o
+
+-mod-subdirs := ipv4/netfilter ipv6/netfilter ipx irda bluetooth atm netlink sched
++mod-subdirs := bridge/netfilter ipv4/netfilter ipv6/netfilter ipx irda \
++ bluetooth atm netlink sched
+ export-objs := netsyms.o
+
+ subdir-y := core ethernet
+@@ -23,6 +24,12 @@
+ ifneq ($(CONFIG_IPV6),n)
+ ifneq ($(CONFIG_IPV6),)
+ subdir-$(CONFIG_NETFILTER) += ipv6/netfilter
++endif
++endif
++
++ifneq ($(CONFIG_BRIDGE),n)
++ifneq ($CONFIG_BRIDGE),)
++subdir-$(CONFIG_BRIDGE) += bridge/netfilter
+ endif
+ endif
+
+--- linux/net/Config.in Wed Apr 3 21:50:19 2002
++++ ebt2.0pre1/net/Config.in Wed Apr 3 18:47:51 2002
+@@ -60,6 +60,7 @@
+ source net/decnet/Config.in
+ fi
+ dep_tristate '802.1d Ethernet Bridging' CONFIG_BRIDGE $CONFIG_INET
++ source net/bridge/netfilter/Config.in
+ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+ if [ "$CONFIG_BRIDGE" != "n" -a "$CONFIG_NETFILTER" != "n" ]; then
+ bool ' netfilter (firewalling) support' CONFIG_BRIDGE_NF
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre1/net/bridge/netfilter/Makefile Wed Apr 3 18:48:52 2002
+@@ -0,0 +1,23 @@
++#
++# Makefile for the netfilter modules on top of bridging.
++#
++# Note! Dependencies are done automagically by 'make dep', which also
++# removes any old dependencies. DON'T put your own dependencies here
++# unless it's something special (ie not a .c file).
++#
++# Note 2! The CFLAGS definition is now in the main makefile...
++
++O_TARGET := netfilter.o
++
++export-objs = ebtables.o
++
++obj-$(CONFIG_BRIDGE_EBT) += ebtables.o
++obj-$(CONFIG_BRIDGE_EBT_T_FILTER) += ebtable_filter.o
++obj-$(CONFIG_BRIDGE_EBT_T_NAT) += ebtable_nat.o
++obj-$(CONFIG_BRIDGE_DB) += br_db.o
++obj-$(CONFIG_BRIDGE_EBT_IPF) += ebt_ip.o
++obj-$(CONFIG_BRIDGE_EBT_ARPF) += ebt_arp.o
++obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_log.o
++obj-$(CONFIG_BRIDGE_EBT_NAT) += ebt_nat.o
++
++include $(TOPDIR)/Rules.make
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre1/net/bridge/netfilter/Config.in Wed Apr 3 18:48:59 2002
+@@ -0,0 +1,12 @@
++#
++# Bridge netfilter configuration
++#
++dep_tristate ' Bridge: ebtables' CONFIG_BRIDGE_EBT $CONFIG_BRIDGE
++dep_tristate ' ebt: filter table support' CONFIG_BRIDGE_EBT_T_FILTER $CONFIG_BRIDGE_EBT
++dep_tristate ' ebt: nat table support' CONFIG_BRIDGE_EBT_T_NAT $CONFIG_BRIDGE_EBT
++dep_tristate ' ebt: LOG support' CONFIG_BRIDGE_EBT_LOG $CONFIG_BRIDGE_EBT
++dep_tristate ' ebt: IP filter support' CONFIG_BRIDGE_EBT_IPF $CONFIG_BRIDGE_EBT
++dep_tristate ' ebt: ARP filter support' CONFIG_BRIDGE_EBT_ARPF $CONFIG_BRIDGE_EBT
++dep_tristate ' ebt: nat target support' CONFIG_BRIDGE_EBT_NAT $CONFIG_BRIDGE_EBT
++dep_tristate ' Bridge: ethernet database' CONFIG_BRIDGE_DB $CONFIG_BRIDGE
++
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre1/net/bridge/netfilter/br_db.c Wed Apr 3 18:48:47 2002
+@@ -0,0 +1,357 @@
++/*
++ * bridge ethernet protocol database
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * br_db.c, April, 2002
++ *
++ * This code is stongly inspired on the iptables code which is
++ * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++#include <linux/skbuff.h>
++#include <linux/if_ether.h>
++#include <linux/netfilter_bridge.h>
++#include <linux/br_db.h>
++#include <linux/socket.h> /* PF_BRIDGE */
++#include <linux/spinlock.h> /* rwlock_t */
++#include <asm/errno.h>
++#include <asm/uaccess.h> /* copy_[to,from]_user */
++#include <linux/smp.h> /* multiprocessors */
++
++#define BUGPRINT(format, args...) printk("kernel msg: brdb bug: please report to author: "format, ## args)
++/*#define BUGPRINT(format, args...)*/
++#define MEMPRINT(format, args...) printk("kernel msg: brdb : out of memory: "format, ## args)
++/*#define MEMPRINT(format, args...)*/
++
++/* database variables */
++static __u16 allowdb = BRDB_NODB;
++static struct brdb_dbentry **flowdb = NULL;
++static unsigned int *dbsize;
++static unsigned int *dbnum;
++/* database lock */
++static rwlock_t brdb_dblock;
++
++static inline int brdb_dev_check(char *entry, const struct net_device *device){
++ if (*entry == '\0') return 0;
++ if (!device) return 1;
++ return strncmp(entry, device->name, IFNAMSIZ);
++}
++
++static inline int brdb_proto_check(unsigned int a, unsigned int b){
++ if (a == b || ( a == IDENTIFY802_3 && ntohs(b) < 1536 )) return 0;
++ return 1;
++}
++
++static unsigned int maintaindb (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ struct brdb_dbentry *hlp;
++ int i, cpunr;
++ unsigned short ethproto = ((**pskb).mac.ethernet)->h_proto;
++
++ cpunr = cpu_number_map(smp_processor_id());
++
++ read_lock_bh(&brdb_dblock);
++
++ if (allowdb == BRDB_NODB) {// must be after readlock
++ read_unlock_bh(&brdb_dblock);
++ return NF_ACCEPT;
++ }
++ hlp = flowdb[cpunr];
++ /* search for existing entry */
++ for (i = 0; i < dbnum[cpunr]; i++) {
++ if (hlp->hook == hook && !brdb_proto_check(hlp->ethproto, ethproto) &&
++ !brdb_dev_check(hlp->in, in) && !brdb_dev_check(hlp->out, out)) {
++ read_unlock_bh(&brdb_dblock);
++ return NF_ACCEPT;
++ }
++ hlp++;
++ }
++ /* add new entry to database */
++ if (dbnum[cpunr] == dbsize[cpunr]) {
++ dbsize[cpunr] *= 2;
++ if ( !( hlp = (struct brdb_dbentry *) vmalloc(dbsize[cpunr] * sizeof(struct brdb_dbentry)) ) ) {
++ dbsize[cpunr] /= 2;
++ MEMPRINT("maintaindb && nomemory\n");
++ read_unlock_bh(&brdb_dblock);
++ return NF_ACCEPT;
++ }
++ memcpy(hlp, flowdb[cpunr], dbnum[cpunr] * sizeof(struct brdb_dbentry));
++ vfree(flowdb[cpunr]);
++ flowdb[cpunr] = hlp;
++ }
++
++ hlp = flowdb[cpunr] + dbnum[cpunr];
++ hlp->hook = hook;
++ if (in)
++ strncpy(hlp->in, in->name, IFNAMSIZ);
++ else
++ hlp->in[0] = '\0';
++ if (out)
++ strncpy(hlp->out, out->name, IFNAMSIZ);
++ else
++ hlp->out[0] = '\0';
++ if (ntohs(ethproto) < 1536)
++ hlp->ethproto = IDENTIFY802_3;
++ else
++ hlp->ethproto = ethproto;
++ dbnum[cpunr]++;
++
++ read_unlock_bh(&brdb_dblock);
++
++ return NF_ACCEPT;
++}
++
++static int copy_db(void *user, int *len)
++{
++ int i, j, nentries = 0, ret;
++ struct brdb_dbentry *begin, *end1, *end2, *point, *point2;
++
++ write_lock_bh(&brdb_dblock);
++ for (i = 0; i < smp_num_cpus; i++)
++ nentries += dbnum[i];
++ if (*len > nentries)
++ return -EINVAL;
++
++ if ( !(begin = (struct brdb_dbentry *) vmalloc((*len) * sizeof(struct brdb_dbentry))) )
++ return -ENOMEM;
++ memcpy(begin, flowdb[0], dbnum[0] * sizeof(struct brdb_dbentry));
++ end1 = begin + dbnum[0];
++ for (i = 1; i < smp_num_cpus; i++) {/* cycle databases per cpu */
++ point2 = flowdb[i];
++ end2 = end1;
++ for (j = 0; j < dbnum[i]; j++) {/* cycle entries of a cpu's database (point2) */
++ for (point = begin; point != end2; point++)/* cycle different entries we found so far */
++ if (point->hook == point2->hook && !strncmp(point->in, point2->in, IFNAMSIZ) &&
++ !strncmp(point->out, point2->out, IFNAMSIZ) && point->ethproto == point2->ethproto)
++ goto out;/* already exists in a database of another cpu */
++
++ memcpy(end1, point2, sizeof(struct brdb_dbentry));
++ end1++;
++out:
++ point2++;
++ }
++ }
++ write_unlock_bh(&brdb_dblock);
++ i = (int)( (char *)end1 - (char *)begin);
++ *len = i < *len ? i : *len;
++ if (copy_to_user(user, begin, *len * sizeof(struct brdb_dbentry)) != 0)
++ ret = -EFAULT;
++ else
++ ret = 0;
++ vfree(begin);
++ return ret;
++}
++
++static int switch_nodb(void){
++ int i;
++
++ if (!flowdb)
++ BUGPRINT("switch_nodb && !flowdb\n");
++ for (i = 0; i < smp_num_cpus; i++)
++ vfree(flowdb[i]);
++ vfree(flowdb);
++ if (!dbsize)
++ BUGPRINT("switch_nodb && !dbsize\n");
++ vfree(dbsize);
++ if (!dbnum)
++ BUGPRINT("switch_nodb && !dbnum\n");
++ vfree(dbnum);
++ flowdb = NULL;
++ allowdb = BRDB_NODB;
++ return 0;
++}
++
++static int switch_db(void)
++{
++ int i, j;
++
++ if (flowdb) BUGPRINT("switch_db && flowdb\n");
++ if ( !(flowdb = (struct brdb_dbentry **) vmalloc(smp_num_cpus * sizeof(struct brdb_dbentry *))) ) {
++ MEMPRINT("switch_db && nomemory\n");
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < smp_num_cpus; i++)
++ if ( !(flowdb[i] = (struct brdb_dbentry *) vmalloc(INITIAL_DBSIZE * sizeof(struct brdb_dbentry))) )
++ goto sw_free1;
++ else
++ memset(flowdb[i], 0, INITIAL_DBSIZE * sizeof(struct brdb_dbentry));
++
++ if ( !(dbnum = (int*) vmalloc(smp_num_cpus * sizeof(int))) )
++ goto sw_free2;
++
++ if ( !(dbsize = (int*) vmalloc(smp_num_cpus * sizeof(int))) )
++ goto sw_free3;
++
++ for (i = 0; i < smp_num_cpus; i++) {
++ dbnum[i] = 0;
++ dbsize[i] = INITIAL_DBSIZE;
++ }
++ allowdb = BRDB_DB;
++ return 0;
++
++sw_free3:
++ MEMPRINT("switch_db && nomemory2\n");
++ vfree(dbnum);
++ dbnum = NULL;
++sw_free2:
++ MEMPRINT("switch_db && nomemory3\n");
++sw_free1:
++ MEMPRINT("switch_db && nomemory4\n");
++ for (j = 0; j<i; j++)
++ vfree(flowdb[j]);
++ vfree(flowdb);
++ allowdb = BRDB_NODB;
++ return -ENOMEM;
++}
++
++static int
++do_brdb_set_ctl(struct sock *sk, int cmd, void *user, unsigned int len)
++{
++ int ret;
++ __u16 adb;
++ switch(cmd) {
++ case BRDB_SO_SET_ALLOWDB:
++ if (len != sizeof(__u16)) {
++ ret = -EINVAL;
++ break;
++ }
++ if (copy_from_user(&adb, user, len) != 0) {
++ ret = -EFAULT;
++ break;
++ }
++ if (adb != BRDB_DB && adb != BRDB_NODB) {
++ ret = -EINVAL;
++ break;
++ }
++ write_lock_bh(&brdb_dblock);
++ if (adb == allowdb) {
++ ret = 0;
++ write_unlock_bh(&brdb_dblock);
++ break;
++ }
++ if (allowdb == BRDB_DB)
++ ret = switch_nodb();
++ else
++ ret = switch_db();
++ write_unlock_bh(&brdb_dblock);
++ break;
++
++ default:
++ ret = -EINVAL;
++ }
++ return ret;
++}
++
++static int
++do_brdb_get_ctl(struct sock *sk, int cmd, void *user, int *len)
++{
++ struct brdb_dbinfo help2;
++ int i, ret;
++ switch(cmd) {
++ case BRDB_SO_GET_DBINFO:
++ if (sizeof(struct brdb_dbinfo) != *len)
++ return -EINVAL;
++ write_lock_bh(&brdb_dblock);
++ /* 0 == no database
++ * i-1 == number of entries (if database)
++ */
++ if (allowdb == BRDB_NODB)
++ help2.nentries = 0;
++ else {
++ help2.nentries = 1;
++ for (i = 0; i < smp_num_cpus; i++)
++ help2.nentries += dbnum[i];
++ }
++ write_unlock_bh(&brdb_dblock);
++ if (copy_to_user(user, &help2, sizeof(help2)) != 0)
++ ret = -EFAULT;
++ else
++ ret = 0;
++ break;
++
++ case BRDB_SO_GET_DB:
++ if (*len == 0 || allowdb == BRDB_NODB)
++ return -EINVAL;
++ ret = copy_db(user, len);
++ break;
++ default:
++ ret = -EINVAL;
++ }
++
++ return ret;
++}
++
++static struct nf_sockopt_ops brdb_sockopts
++= { { NULL, NULL }, PF_INET, BRDB_BASE_CTL, BRDB_SO_SET_MAX+1, do_brdb_set_ctl,
++ BRDB_BASE_CTL, BRDB_SO_GET_MAX+1, do_brdb_get_ctl, 0, NULL };
++
++
++static struct nf_hook_ops brdb_br_ops[] = {
++ { { NULL, NULL }, maintaindb, PF_BRIDGE, NF_BR_PRE_ROUTING, -250},
++ { { NULL, NULL }, maintaindb, PF_BRIDGE, NF_BR_LOCAL_IN, -250},
++ { { NULL, NULL }, maintaindb, PF_BRIDGE, NF_BR_FORWARD, -250},
++ { { NULL, NULL }, maintaindb, PF_BRIDGE, NF_BR_LOCAL_OUT, -250},
++ { { NULL, NULL }, maintaindb, PF_BRIDGE, NF_BR_POST_ROUTING, -250}
++};
++
++static int __init init(void)
++{
++ int ret;
++
++ if ((ret = nf_register_hook(&brdb_br_ops[0])) < 0)
++ return ret;
++
++ if ((ret = nf_register_hook(&brdb_br_ops[1])) < 0)
++ goto clean0;
++
++ if ((ret = nf_register_hook(&brdb_br_ops[2])) < 0)
++ goto clean1;
++
++ if ((ret = nf_register_hook(&brdb_br_ops[3])) < 0)
++ goto clean2;
++
++ if ((ret = nf_register_hook(&brdb_br_ops[4])) < 0)
++ goto clean3;
++
++ /* Register setsockopt */
++ if ((ret = nf_register_sockopt(&brdb_sockopts)) < 0)
++ goto clean4;
++
++ rwlock_init(&brdb_dblock);
++ printk("Bridge ethernet database registered\n");
++ return ret;
++
++clean4: nf_unregister_hook(&brdb_br_ops[4]);
++clean3: nf_unregister_hook(&brdb_br_ops[3]);
++clean2: nf_unregister_hook(&brdb_br_ops[2]);
++clean1: nf_unregister_hook(&brdb_br_ops[1]);
++clean0: nf_unregister_hook(&brdb_br_ops[0]);
++
++ return ret;
++}
++
++static void __exit fini(void)
++{
++ nf_unregister_hook(&brdb_br_ops[4]);
++ nf_unregister_hook(&brdb_br_ops[3]);
++ nf_unregister_hook(&brdb_br_ops[2]);
++ nf_unregister_hook(&brdb_br_ops[1]);
++ nf_unregister_hook(&brdb_br_ops[0]);
++ nf_unregister_sockopt(&brdb_sockopts);
++}
++
++module_init(init);
++module_exit(fini);
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre1/net/bridge/netfilter/ebtable_filter.c Wed Apr 3 18:48:47 2002
+@@ -0,0 +1,89 @@
++/*
++ * ebtable_filter
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * April, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge.h>
++#include <linux/module.h>
++
++#define FILTER_VALID_HOOKS ((1 << NF_BR_LOCAL_IN) | (1 << NF_BR_FORWARD) | (1 << NF_BR_LOCAL_OUT))
++
++static struct ebt_entries initial_chains[] =
++{
++ {0, EBT_ACCEPT, 0},
++ {0, EBT_ACCEPT, 0},
++ {0, EBT_ACCEPT, 0}
++};
++
++static struct ebt_replace initial_table =
++{
++ "filter", FILTER_VALID_HOOKS, 0, 3 * sizeof(struct ebt_entries),
++ { [NF_BR_LOCAL_IN]&initial_chains[0], [NF_BR_FORWARD]&initial_chains[1],
++ [NF_BR_LOCAL_OUT]&initial_chains[2] },{},
++ 0, NULL, (char *)initial_chains
++};
++
++static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
++{
++ if (valid_hooks & ~FILTER_VALID_HOOKS)
++ return -EINVAL;
++ return 0;
++}
++
++static struct ebt_table frame_filter =
++{
++ {NULL, NULL}, "filter", &initial_table, FILTER_VALID_HOOKS,
++ RW_LOCK_UNLOCKED, check, NULL
++};
++
++static unsigned int ebt_hook (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ return ebt_do_table(hook, pskb, in, out, &frame_filter);
++}
++
++static struct nf_hook_ops ebt_ops_filter[] = {
++ { { NULL, NULL }, ebt_hook, PF_BRIDGE, NF_BR_LOCAL_IN, -200},
++ { { NULL, NULL }, ebt_hook, PF_BRIDGE, NF_BR_FORWARD, -200},
++ { { NULL, NULL }, ebt_hook, PF_BRIDGE, NF_BR_LOCAL_OUT, 200}
++};
++
++static int __init init(void)
++{
++ int i, j, ret;
++
++ ret = ebt_register_table(&frame_filter);
++ if (ret < 0)
++ return ret;
++ for (i = 0; i < sizeof(ebt_ops_filter) / sizeof(ebt_ops_filter[0]); i++)
++ if ((ret = nf_register_hook(&ebt_ops_filter[i])) < 0)
++ goto cleanup;
++ return ret;
++cleanup:
++ for (j = 0; j < i; j++)
++ nf_unregister_hook(&ebt_ops_filter[j]);
++ ebt_unregister_table(&frame_filter);
++ return ret;
++}
++
++static void __exit fini(void)
++{
++ int i;
++
++ for (i = 0; i < sizeof(ebt_ops_filter) / sizeof(ebt_ops_filter[0]); i++)
++ nf_unregister_hook(&ebt_ops_filter[i]);
++ ebt_unregister_table(&frame_filter);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre1/net/bridge/netfilter/ebtable_nat.c Wed Apr 3 18:48:47 2002
+@@ -0,0 +1,149 @@
++/*
++ * ebtable_nat
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * April, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge.h>
++#include <linux/netdevice.h>
++#include <linux/module.h>
++#define NAT_VALID_HOOKS ((1 << NF_BR_PRE_ROUTING) | (1 << NF_BR_LOCAL_OUT) | (1 << NF_BR_POST_ROUTING))
++
++static struct ebt_entries initial_chains[] =
++{
++ {0, EBT_ACCEPT, 0},
++ {0, EBT_ACCEPT, 0},
++ {0, EBT_ACCEPT, 0}
++};
++
++static struct ebt_replace initial_table =
++{
++ "nat", NAT_VALID_HOOKS, 0, 3 * sizeof(struct ebt_entries),
++ { [NF_BR_PRE_ROUTING]&initial_chains[0], [NF_BR_LOCAL_OUT]&initial_chains[1],
++ [NF_BR_POST_ROUTING]&initial_chains[2] }, {},
++ 0, NULL, (char *)initial_chains
++};
++
++static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
++{
++ if (valid_hooks & ~NAT_VALID_HOOKS)
++ return -EINVAL;
++ return 0;
++}
++
++static struct ebt_table frame_nat =
++{
++ {NULL, NULL}, "nat", &initial_table, NAT_VALID_HOOKS,
++ RW_LOCK_UNLOCKED, check, NULL
++};
++
++// used for snat to know if the frame comes from FORWARD or LOCAL_OUT
++// needed because of the bridge-nf patch (that allows use of iptables on bridged traffic)
++// if the packet is routed, we want the ebtables stuff on POSTROUTING to be executed _after_ the iptables stuff
++// when it's bridged, it's the way around
++static struct net_device __fake_net_device = {
++ hard_header_len: ETH_HLEN
++};
++
++static unsigned int
++ebt_nat_dst (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ return ebt_do_table(hook, pskb, in, out, &frame_nat);
++}
++
++// let snat know this frame is routed
++static unsigned int ebt_clear_physin (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ (*pskb)->physindev = NULL;
++ return NF_ACCEPT;
++}
++
++// let snat know this frame is bridged
++static unsigned int ebt_set_physin (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ (*pskb)->physindev = &__fake_net_device;
++ return NF_ACCEPT;
++}
++
++static unsigned int ebt_nat_src (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ // this is a routed packet
++ if ((*pskb)->physindev == NULL)
++ return NF_ACCEPT;
++ if ((*pskb)->physindev != &__fake_net_device)
++ printk("ebtables (br_nat_src): physindev hack doesn't work - BUG\n");
++
++ return ebt_do_table(hook, pskb, in, out, &frame_nat);
++}
++
++static unsigned int ebt_nat_src_route (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ // this is a bridged packet
++ if ((*pskb)->physindev == &__fake_net_device)
++ return NF_ACCEPT;
++ if ((*pskb)->physindev)
++ printk("ebtables (br_nat_src_route): physindev hack doesn't work - BUG\n");
++
++ return ebt_do_table(hook, pskb, in, out, &frame_nat);
++}
++
++static struct nf_hook_ops ebt_ops_nat[] = {
++ { { NULL, NULL }, ebt_nat_dst, PF_BRIDGE, NF_BR_LOCAL_OUT, 100},
++ { { NULL, NULL }, ebt_nat_src, PF_BRIDGE, NF_BR_POST_ROUTING, -100},
++ { { NULL, NULL }, ebt_nat_src_route, PF_BRIDGE, NF_BR_POST_ROUTING, 300},
++ { { NULL, NULL }, ebt_nat_dst, PF_BRIDGE, NF_BR_PRE_ROUTING, -300},
++ { { NULL, NULL }, ebt_clear_physin, PF_BRIDGE, NF_BR_LOCAL_OUT, 200 + 1},
++ { { NULL, NULL }, ebt_set_physin, PF_BRIDGE, NF_BR_FORWARD, 200 + 1}
++};
++
++static int __init init(void)
++{
++ int i, ret, j;
++
++ ret = ebt_register_table(&frame_nat);
++ if (ret < 0)
++ return ret;
++ for (i = 0; i < sizeof(ebt_ops_nat) / sizeof(ebt_ops_nat[0]); i++)
++ if ((ret = nf_register_hook(&ebt_ops_nat[i])) < 0)
++ goto cleanup;
++ return ret;
++cleanup:
++ for (j = 0; j < i; j++)
++ nf_unregister_hook(&ebt_ops_nat[j]);
++ ebt_unregister_table(&frame_nat);
++ return ret;
++}
++
++static void __exit fini(void)
++{
++ int i;
++
++ for (i = 0; i < sizeof(ebt_ops_nat) / sizeof(ebt_ops_nat[0]); i++)
++ nf_unregister_hook(&ebt_ops_nat[i]);
++ ebt_unregister_table(&frame_nat);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre1/net/bridge/netfilter/ebt_arp.c Wed Apr 3 18:48:47 2002
+@@ -0,0 +1,95 @@
++/*
++ * ebt_arp
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ * Tim Gardner <timg@tpi.com>
++ *
++ * April, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_arp.h>
++#include <linux/if_arp.h>
++#include <linux/module.h>
++
++#define FWINV2(bool,invflg) ((bool) ^ !!(infostuff->invflags & invflg))
++static int ebt_filter_arp(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *data,
++ unsigned int datalen, const struct ebt_counter *c)
++{
++ struct ebt_arp_info *infostuff = (struct ebt_arp_info *)data;
++
++ if (infostuff->bitmask & EBT_ARP_OPCODE && FWINV2(infostuff->opcode != ((*skb).nh.arph)->ar_op, EBT_ARP_OPCODE))
++ return 1;
++ if (infostuff->bitmask & EBT_ARP_HTYPE && FWINV2(infostuff->htype != ((*skb).nh.arph)->ar_hrd, EBT_ARP_HTYPE))
++ return 1;
++ if (infostuff->bitmask & EBT_ARP_PTYPE && FWINV2(infostuff->ptype != ((*skb).nh.arph)->ar_pro, EBT_ARP_PTYPE))
++ return 1;
++
++ if (infostuff->bitmask & (EBT_ARP_SRC_IP | EBT_ARP_DST_IP))
++ {
++ __u32 arp_len = sizeof(struct arphdr) + (2*(((*skb).nh.arph)->ar_hln)) + (2*(((*skb).nh.arph)->ar_pln));
++ __u32 dst;
++ __u32 src;
++
++ // Make sure the packet is long enough.
++ if ((((*skb).nh.raw) + arp_len) > (*skb).tail)
++ return 1;
++ // IPV4 addresses are always 4 bytes.
++ if (((*skb).nh.arph)->ar_pln != sizeof(__u32))
++ return 1;
++
++ if (infostuff->bitmask & EBT_ARP_SRC_IP) {
++ memcpy(&src, ((*skb).nh.raw)+sizeof(struct arphdr)+((*skb).nh.arph)->ar_hln, sizeof(__u32));
++ if (FWINV2(infostuff->saddr != (src & infostuff->smsk), EBT_ARP_SRC_IP))
++ return 1;
++ }
++
++ if (infostuff->bitmask & EBT_ARP_DST_IP) {
++ memcpy(&dst, ((*skb).nh.raw)+sizeof(struct arphdr) + (2*(((*skb).nh.arph)->ar_hln)) + (((*skb).nh.arph)->ar_pln), sizeof(__u32));
++ if (FWINV2(infostuff->daddr != (dst & infostuff->dmsk), EBT_ARP_DST_IP))
++ return 1;
++ }
++ }
++ return 0;
++}
++
++static int ebt_arp_check(const char *tablename, unsigned int hooknr, const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ struct ebt_arp_info *infostuff = (struct ebt_arp_info *) data;
++
++ if (datalen != sizeof(struct ebt_arp_info))
++ return -EINVAL;
++ if (e->bitmask & (EBT_NOPROTO | EBT_802_3) ||
++ (e->ethproto != __constant_htons(ETH_P_ARP) &&
++ e->ethproto != __constant_htons(ETH_P_RARP)) ||
++ e->invflags & EBT_IPROTO)
++ return -EINVAL;
++ if (infostuff->bitmask & ~EBT_ARP_MASK)
++ return -EINVAL;
++ return 0;
++}
++
++static struct ebt_match filter_arp =
++{
++ {NULL, NULL}, EBT_ARP_MATCH, ebt_filter_arp, ebt_arp_check, NULL, THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ebt_register_match(&filter_arp);
++}
++
++static void __exit fini(void)
++{
++ ebt_unregister_match(&filter_arp);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre1/net/bridge/netfilter/ebt_ip.c Wed Apr 3 18:48:47 2002
+@@ -0,0 +1,74 @@
++/*
++ * ebt_ip
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * April, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_ip.h>
++#include <linux/ip.h>
++#include <linux/module.h>
++
++#define FWINV2(bool,invflg) ((bool) ^ !!(infostuff->invflags & invflg))
++static int ebt_filter_ip(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *data,
++ unsigned int datalen, const struct ebt_counter *c)
++{
++ struct ebt_ip_info *infostuff = (struct ebt_ip_info *) data;
++
++ if (infostuff->bitmask & EBT_IP_TOS && FWINV2(infostuff->tos != ((*skb).nh.iph)->tos, EBT_IP_TOS))
++ return 1;
++ if (infostuff->bitmask & EBT_IP_PROTO && FWINV2(infostuff->protocol != ((*skb).nh.iph)->protocol, EBT_IP_PROTO))
++ return 1;
++ if (infostuff->bitmask & EBT_IP_SOURCE && FWINV2((((*skb).nh.iph)->saddr & infostuff->smsk) != infostuff->saddr, EBT_IP_SOURCE))
++ return 1;
++ if ((infostuff->bitmask & EBT_IP_DEST) &&
++ FWINV2((((*skb).nh.iph)->daddr & infostuff->dmsk) != infostuff->daddr, EBT_IP_DEST))
++ return 1;
++ return 0;
++}
++
++static int ebt_ip_check(const char *tablename, unsigned int hooknr, const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ struct ebt_ip_info *infostuff = (struct ebt_ip_info *) data;
++
++ if (datalen != sizeof(struct ebt_ip_info)) {
++ return -EINVAL;
++ }
++ if (e->bitmask & (EBT_NOPROTO | EBT_802_3) ||
++ e->ethproto != __constant_htons(ETH_P_IP) ||
++ e->invflags & EBT_IPROTO)
++ {
++ return -EINVAL;
++ }
++ if (infostuff->bitmask & ~EBT_IP_MASK) {
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static struct ebt_match filter_ip =
++{
++ {NULL, NULL}, EBT_IP_MATCH, ebt_filter_ip, ebt_ip_check, NULL, THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ebt_register_match(&filter_ip);
++}
++
++static void __exit fini(void)
++{
++ ebt_unregister_match(&filter_ip);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre1/net/bridge/netfilter/ebt_log.c Wed Apr 3 18:48:47 2002
+@@ -0,0 +1,103 @@
++/*
++ * ebt_log
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * April, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_log.h>
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/if_arp.h>
++#include <linux/spinlock.h>
++
++static spinlock_t ebt_log_lock = SPIN_LOCK_UNLOCKED;
++
++static int ebt_log_check(const char *tablename, unsigned int hooknr, const struct ebt_entry *e,
++ void *data, unsigned int datalen)
++{
++ struct ebt_log_info *loginfo = (struct ebt_log_info *)data;
++
++ if (datalen != sizeof(struct ebt_log_info))
++ return -EINVAL;
++ if (loginfo->bitmask & ~EBT_LOG_MASK)
++ return -EINVAL;
++ if (loginfo->loglevel >= 8)
++ return -EINVAL;
++ loginfo->prefix[EBT_LOG_PREFIX_SIZE - 1] = '\0';
++ return 0;
++}
++
++static void ebt_log(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out, const void *data, unsigned int datalen,
++ const struct ebt_counter *c)
++{
++ struct ebt_log_info *loginfo = (struct ebt_log_info *)data;
++ char level_string[4] = "< >";
++ level_string[1] = '0' + loginfo->loglevel;
++
++ spin_lock_bh(&ebt_log_lock);
++ printk(level_string);
++ printk("%s IN=%s OUT=%s ",
++ loginfo->prefix,
++ in ? in->name : "",
++ out ? out->name : "");// max length: 29 + 10 + 2 * 16
++
++ if (skb->dev->hard_header_len) {
++ int i;
++ unsigned char *p = (skb->mac.ethernet)->h_source;
++ printk("MAC source = ");
++ for (i = 0; i < ETH_ALEN; i++,p++)
++ printk("%02x%c", *p,
++ i == ETH_ALEN - 1
++ ? ' ':':');// length: 31
++ printk("MAC dest = ");
++ p = (skb->mac.ethernet)->h_dest;
++ for (i = 0; i < ETH_ALEN; i++,p++)
++ printk("%02x%c", *p,
++ i == ETH_ALEN - 1
++ ? ' ':':');// length: 29
++ }
++ printk("proto = 0x%04x", ntohs(((*skb).mac.ethernet)->h_proto));// length: 14
++
++ if ((loginfo->bitmask & EBT_LOG_IP) && skb->mac.ethernet->h_proto == htons(ETH_P_IP)){
++ struct iphdr *iph = skb->nh.iph;
++ printk(" IP SRC=%u.%u.%u.%u IP DST=%u.%u.%u.%u,", NIPQUAD(iph->saddr), NIPQUAD(iph->daddr));// max length: 46
++ printk(" IP tos=0x%02X, IP proto=%d", iph->tos, iph->protocol);// max length: 26
++ }
++
++ if ((loginfo->bitmask & EBT_LOG_ARP) &&
++ ((skb->mac.ethernet->h_proto == __constant_htons(ETH_P_ARP)) ||
++ (skb->mac.ethernet->h_proto == __constant_htons(ETH_P_RARP)))) {
++ struct arphdr * arph = skb->nh.arph;
++ printk(" ARP HTYPE=%d, PTYPE=0x%04x, OPCODE=%d",
++ ntohs(arph->ar_hrd), ntohs(arph->ar_pro), ntohs(arph->ar_op));// max length: 40
++ }
++ printk("\n");
++ spin_unlock_bh(&ebt_log_lock);
++}
++
++struct ebt_watcher log =
++{
++ {NULL, NULL}, EBT_LOG_WATCHER, ebt_log, ebt_log_check, NULL, THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ebt_register_watcher(&log);
++}
++
++static void __exit fini(void)
++{
++ ebt_unregister_watcher(&log);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre1/net/bridge/netfilter/ebt_nat.c Wed Apr 3 18:48:47 2002
+@@ -0,0 +1,114 @@
++/*
++ * ebt_nat
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * April, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_nat.h>
++#include <linux/netfilter_bridge.h>
++#include <linux/skbuff.h>
++#include <linux/module.h>
++#include <net/sock.h>
++
++__u8 ebt_target_snat(struct sk_buff **pskb, unsigned int hooknr,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *data, unsigned int datalen)
++{
++ struct ebt_nat_info *infostuff = (struct ebt_nat_info *) data;
++
++ if (skb_cloned(*pskb)) {
++ struct sk_buff *nskb = skb_copy(*pskb, GFP_ATOMIC);
++
++ if (!nskb)
++ return EBT_DROP;
++ if ((*pskb)->sk)
++ skb_set_owner_w(nskb, (*pskb)->sk);
++ kfree_skb(*pskb);
++ *pskb = nskb;
++ }
++ memcpy(((**pskb).mac.ethernet)->h_source, infostuff->mac, ETH_ALEN * sizeof(unsigned char));
++ return EBT_ACCEPT;
++}
++
++__u8 ebt_target_dnat(struct sk_buff **pskb, unsigned int hooknr,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *data, unsigned int datalen)
++{
++ struct ebt_nat_info *infostuff = (struct ebt_nat_info *) data;
++
++ if (skb_cloned(*pskb)) {
++ struct sk_buff *nskb = skb_copy(*pskb, GFP_ATOMIC);
++
++ if (!nskb)
++ return EBT_DROP;
++ if ((*pskb)->sk)
++ skb_set_owner_w(nskb, (*pskb)->sk);
++ kfree_skb(*pskb);
++ *pskb = nskb;
++ }
++ memcpy(((**pskb).mac.ethernet)->h_dest, infostuff->mac, ETH_ALEN * sizeof(unsigned char));
++ return EBT_ACCEPT;
++}
++
++int ebt_target_snat_check(const char *tablename, unsigned int hooknr, const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ if (strcmp(tablename, "nat"))
++ return -EINVAL;
++ if (datalen != sizeof(struct ebt_nat_info))
++ return -EINVAL;
++ if (hooknr != NF_BR_POST_ROUTING)
++ return -EINVAL;
++ return 0;
++}
++
++int ebt_target_dnat_check(const char *tablename, unsigned int hooknr, const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ if (strcmp(tablename, "nat"))
++ return -EINVAL;
++ if (datalen != sizeof(struct ebt_nat_info))
++ return -EINVAL;
++ if (hooknr != NF_BR_PRE_ROUTING && hooknr != NF_BR_LOCAL_OUT)
++ return -EINVAL;
++ return 0;
++}
++
++struct ebt_target snat =
++{
++ {NULL, NULL}, EBT_SNAT_TARGET, ebt_target_snat, ebt_target_snat_check, NULL, THIS_MODULE
++};
++
++struct ebt_target dnat =
++{
++ {NULL, NULL}, EBT_DNAT_TARGET, ebt_target_dnat, ebt_target_dnat_check, NULL, THIS_MODULE
++};
++
++static int __init init(void)
++{
++ int ret;
++ ret = ebt_register_target(&snat);
++ if (ret != 0)
++ return ret;
++ ret = ebt_register_target(&dnat);
++ if (ret == 0)
++ return 0;
++ ebt_unregister_target(&snat);
++ return ret;
++}
++
++static void __exit fini(void)
++{
++ ebt_unregister_target(&snat);
++ ebt_unregister_target(&dnat);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre1/net/bridge/netfilter/ebtables.c Wed Apr 3 18:48:47 2002
+@@ -0,0 +1,1088 @@
++/*
++ * ebtables
++ *
++ * Author:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * ebtables.c,v 2.0, April, 2002
++ *
++ * This code is stongly inspired on the iptables code which is
++ * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++/* used for print_string */
++#include <linux/sched.h>
++#include <linux/tty.h>
++
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++#include <linux/skbuff.h>
++#include <linux/if_ether.h>
++#include <linux/netfilter_bridge.h>
++#include <linux/netfilter_ipv4.h>
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/spinlock.h>
++#include <asm/uaccess.h>
++#include <linux/smp.h>
++#include <net/sock.h>
++#define ASSERT_READ_LOCK(x)
++#define ASSERT_WRITE_LOCK(x)
++#include <linux/netfilter_ipv4/listhelp.h> /* list_named_find */
++
++#if 0 // use this for remote debugging
++#define BUGPRINT(args) print_string(args);
++#else
++#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please report to author: "format, ## args)
++/*#define BUGPRINT(format, args...)*/
++#endif
++#define MEMPRINT(format, args...) printk("kernel msg: ebtables : out of memory: "format, ## args)
++/*#define MEMPRINT(format, args...)*/
++static void print_string(char *str);
++
++static DECLARE_MUTEX(ebt_mutex);
++static LIST_HEAD(ebt_tables);
++static LIST_HEAD(ebt_targets);
++static LIST_HEAD(ebt_matches);
++static LIST_HEAD(ebt_watchers);
++
++static struct ebt_target ebt_standard_target =
++{ {NULL, NULL}, EBT_STANDARD_TARGET, NULL, NULL, NULL, NULL};
++
++static inline int ebt_do_watcher (struct ebt_entry_watcher *w,
++ const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const struct ebt_counter *c)
++{
++ w->u.watcher->watcher(skb, in, out, w->data, w->watcher_size - sizeof(struct ebt_entry_watcher), c);
++ // watchers don't give a verdict
++ return 0;
++}
++
++static inline int ebt_do_match (struct ebt_entry_match *m,
++ const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const struct ebt_counter *c)
++{
++ return m->u.match->match(skb, in, out, m->data, m->match_size - sizeof(struct ebt_entry_match), c);
++}
++
++static inline int ebt_dev_check(char *entry, const struct net_device *device)
++{
++ if (*entry == '\0') return 0;
++ if (!device) return 1;
++ return strncmp(entry, device->name, IFNAMSIZ);
++}
++
++// Do some firewalling
++unsigned int ebt_do_table (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ struct ebt_table *table)
++{
++ int i, nentries;
++ struct ebt_entry *point;
++ struct ebt_counter *counter_base;
++ struct ebt_entry_target *t;
++ __u8 verdict;
++
++ read_lock_bh(&table->lock);
++ nentries = table->private->hook_entry[hook]->nentries;
++ point = (struct ebt_entry *)(table->private->hook_entry[hook]->data);
++ counter_base = table->private->counters + cpu_number_map(smp_processor_id()) * table->private->nentries + table->private->counter_entry[hook];
++ #define FWINV(bool,invflg) ((bool) ^ !!(point->invflags & invflg))
++ for (i = 0; i < nentries; i++) {
++ if ( ( point->bitmask & EBT_NOPROTO ||
++ FWINV(point->ethproto == ((**pskb).mac.ethernet)->h_proto, EBT_IPROTO)
++ || FWINV(ntohs(((**pskb).mac.ethernet)->h_proto) < 1536 && (point->bitmask & EBT_802_3), EBT_IPROTO) )
++ && FWINV(!ebt_dev_check((char *)(point->in), in), EBT_IIN)
++ && FWINV(!ebt_dev_check((char *)(point->out), out), EBT_IOUT) ){
++
++ if ( (point->bitmask & EBT_SOURCEMAC) &&
++ FWINV(!!memcmp(point->sourcemac, ((**pskb).mac.ethernet)->h_source, ETH_ALEN), EBT_ISOURCE) )
++ goto letscontinue;
++
++ if ( (point->bitmask & EBT_DESTMAC) &&
++ FWINV(!!memcmp(point->destmac, ((**pskb).mac.ethernet)->h_dest, ETH_ALEN), EBT_IDEST) )
++ goto letscontinue;
++
++ if (EBT_MATCH_ITERATE(point, ebt_do_match, *pskb, in, out, counter_base + i) != 0)
++ goto letscontinue;
++
++ // increase counter
++ (*(counter_base + i)).pcnt++;
++
++ // these should only watch: not modify, nor tell us what to do with the packet
++ EBT_WATCHER_ITERATE(point, ebt_do_watcher, *pskb, in, out, counter_base + i);
++
++ t = (struct ebt_entry_target *) (((char *)point) + point->target_offset);
++ // standard target
++ if (!t->u.target->target)
++ verdict = ((struct ebt_standard_target *)t)->verdict;
++ else
++ verdict = t->u.target->target(pskb, hook, in, out, t->data, t->target_size);
++ if (verdict == EBT_ACCEPT) {
++ read_unlock_bh(&table->lock);
++ return NF_ACCEPT;
++ }
++ if (verdict == EBT_DROP) {
++ read_unlock_bh(&table->lock);
++ return NF_DROP;
++ }
++ if (verdict != EBT_CONTINUE) {
++ read_unlock_bh(&table->lock);
++ BUGPRINT("Illegal target while firewalling!!\n");
++ // Try not to get oopsen
++ return NF_DROP;
++ }
++ }
++letscontinue:
++ point = (struct ebt_entry *)(((char *)point) + point->next_offset);
++ }
++
++ if ( table->private->hook_entry[hook]->policy == EBT_ACCEPT ) {
++ read_unlock_bh(&table->lock);
++ return NF_ACCEPT;
++ }
++ read_unlock_bh(&table->lock);
++ return NF_DROP;
++}
++
++static inline int
++ebt_check_match(struct ebt_entry_match *m, struct ebt_entry *e, const char *name, unsigned int hook, unsigned int *cnt)
++{
++ struct ebt_match *match;
++ int ret;
++
++ m->u.name[EBT_FUNCTION_MAXNAMELEN - 1] = '\0';
++ ret = down_interruptible(&ebt_mutex);
++ if (ret != 0)
++ return -EFAULT;
++ if (!(match = (struct ebt_match *)list_named_find(&ebt_matches, m->u.name))) {
++ BUGPRINT("match does not exist: %s\n", m->u.name);
++ up(&ebt_mutex);
++ return -ENOENT;
++ }
++ m->u.match = match;
++ if (match->check &&
++ match->check(name, hook, e, m->data, m->match_size - sizeof(*m)) != 0) {
++ BUGPRINT("match->check failed\n");
++ up(&ebt_mutex);
++ return -EINVAL;
++ }
++ if (match->me)
++ __MOD_INC_USE_COUNT(match->me);
++ up(&ebt_mutex);
++ (*cnt)++;
++ return 0;
++}
++
++static inline int
++ebt_check_watcher(struct ebt_entry_watcher *w, struct ebt_entry *e, const char *name, unsigned int hook, unsigned int *cnt)
++{
++ struct ebt_watcher *watcher;
++ int ret;
++
++ ret = down_interruptible(&ebt_mutex);
++ if (ret != 0)
++ return -EFAULT;
++ w->u.name[EBT_FUNCTION_MAXNAMELEN - 1] = '\0';
++ if (!(watcher = (struct ebt_watcher *)list_named_find(&ebt_watchers, w->u.name))) {
++ BUGPRINT("watcher does not exist: %s\n", w->u.name);
++ up(&ebt_mutex);
++ return -ENOENT;
++ }
++ w->u.watcher = watcher;
++ if (watcher->check &&
++ watcher->check(name, hook, e, w->data, w->watcher_size - sizeof(*w)) != 0) {
++ BUGPRINT("watcher->check failed\n");
++ up(&ebt_mutex);
++ return -EINVAL;
++ }
++ if (watcher->me)
++ __MOD_INC_USE_COUNT(watcher->me);
++ up(&ebt_mutex);
++ (*cnt)++;
++ return 0;
++}
++
++// this one is very careful, as it is the first function to parse the userspace data
++static inline int
++ebt_check_entry_size_and_hooks(struct ebt_entry *e, struct ebt_table_info *newinfo,
++ char *base, char *limit, struct ebt_entries **hook_entries, unsigned int *n,
++ unsigned int *cnt, unsigned int *totalcnt, unsigned int valid_hooks)
++{
++ int i;
++
++ for (i = 0; i < NF_BR_NUMHOOKS; i++) {
++ if ((valid_hooks & (1 << i)) == 0)
++ continue;
++ if ( (char *)hook_entries[i] - base == (char *)e - newinfo->entries)
++ break;
++ }
++ // beginning of a new chain
++ if (i != NF_BR_NUMHOOKS) {
++ if ((e->bitmask & EBT_ENTRY_OR_ENTRIES) != 0) {
++ // we make userspace set this right, so there is no misunderstanding
++ BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set in distinguisher\n");
++ return -EINVAL;
++ }
++ // this checks if the previous chain has as many entries as it said it has
++ if (*n != *cnt) {
++ BUGPRINT("nentries does not equal the nr of entries in the chain\n");
++ return -EINVAL;
++ }
++ // before we look at the struct, be sure it is not too big
++ if ((char *)hook_entries[i] + sizeof(struct ebt_entries) > limit) {
++ BUGPRINT("entries_size too small\n");
++ return -EINVAL;
++ }
++ if (((struct ebt_entries *)e)->policy != EBT_DROP && ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
++ BUGPRINT("bad policy\n");
++ return -EINVAL;
++ }
++ *n = ((struct ebt_entries *)e)->nentries;
++ *cnt = 0;
++ newinfo->hook_entry[i] = (struct ebt_entries *)e;
++ newinfo->counter_entry[i] = *totalcnt;
++ return 0;
++ }
++ // a plain old entry, heh
++ if (sizeof(struct ebt_entry) > e->watchers_offset || e->watchers_offset > e->target_offset || e->target_offset > e->next_offset) {
++ BUGPRINT("entry offsets not in right order\n");
++ return -EINVAL;
++ }
++ if (((char *)e) + e->next_offset - newinfo->entries > limit - base) {
++ BUGPRINT("entry offsets point too far\n");
++ return -EINVAL;
++ }
++
++ if ((e->bitmask & EBT_ENTRY_OR_ENTRIES) == 0) {
++ BUGPRINT("EBT_ENTRY_OR_ENTRIES should be set in bitmask for an entry\n");
++ return -EINVAL;
++ }
++ (*cnt)++;
++ (*totalcnt)++;
++ return 0;
++}
++
++static inline int
++ebt_cleanup_match(struct ebt_entry_match *m, unsigned int *i)
++{
++ if (i && (*i)-- == 0)
++ return 1;
++ if (m->u.match->destroy)
++ m->u.match->destroy(m->data, m->match_size - sizeof(*m));
++ if (m->u.match->me)
++ __MOD_DEC_USE_COUNT(m->u.match->me);
++
++ return 0;
++}
++
++static inline int
++ebt_cleanup_watcher(struct ebt_entry_watcher *w, unsigned int *i)
++{
++ if (i && (*i)-- == 0)
++ return 1;
++ if (w->u.watcher->destroy)
++ w->u.watcher->destroy(w->data, w->watcher_size - sizeof(*w));
++ if (w->u.watcher->me)
++ __MOD_DEC_USE_COUNT(w->u.watcher->me);
++
++ return 0;
++}
++
++static inline int
++ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo, const char *name, unsigned int *cnt, unsigned int valid_hooks)
++{
++ struct ebt_entry_target *t;
++ struct ebt_target *target;
++ unsigned int i, j, hook = 0;
++ int ret;
++
++ // Don't mess with the struct ebt_entries
++ if ((e->bitmask & EBT_ENTRY_OR_ENTRIES) == 0)
++ return 0;
++
++ if (e->bitmask & ~EBT_F_MASK) {
++ BUGPRINT("Unknown flag for bitmask\n");
++ return -EINVAL;
++ }
++ if (e->invflags & ~EBT_INV_MASK) {
++ BUGPRINT("Unknown flag for inv bitmask\n");
++ return -EINVAL;
++ }
++ if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
++ BUGPRINT("NOPROTO & 802_3 not allowed\n");
++ return -EINVAL;
++ }
++ // what hook do we belong to?
++ for (i = 0; i < NF_BR_NUMHOOKS; i++) {
++ if ((valid_hooks & (1 << i)) == 0)
++ continue;
++ if ((char *)newinfo->hook_entry[i] < (char *)e)
++ hook = i;
++ else
++ break;
++ }
++ i = 0;
++ ret = EBT_MATCH_ITERATE(e, ebt_check_match, e, name, hook, &i);
++ if (ret != 0)
++ goto cleanup_matches;
++ j = 0;
++ ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, e, name, hook, &j);
++ if (ret != 0)
++ goto cleanup_watchers;
++ t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
++ ret = down_interruptible(&ebt_mutex);
++ if (ret != 0)
++ goto cleanup_watchers;
++ t->u.name[EBT_FUNCTION_MAXNAMELEN - 1] = '\0';
++ if (!(target = (struct ebt_target *)list_named_find(&ebt_targets, t->u.name))) {
++ BUGPRINT("Target does not exist: %s\n", t->u.name);
++ ret = -ENOENT;
++ up(&ebt_mutex);
++ goto cleanup_watchers;
++ }
++ if (target->me)
++ __MOD_INC_USE_COUNT(target->me);
++ up(&ebt_mutex);
++
++ t->u.target = target;
++ if (t->u.target == &ebt_standard_target) {
++ if (e->target_offset + sizeof(struct ebt_standard_target) > e->next_offset) {
++ BUGPRINT("Standard target size too big\n");
++ ret = -EFAULT;
++ goto cleanup_watchers;
++ }
++ if (((struct ebt_standard_target *)t)->verdict >= NUM_STANDARD_TARGETS) {
++ BUGPRINT("Invalid standard target\n");
++ ret = -EFAULT;
++ goto cleanup_watchers;
++ }
++ } else if (t->u.target->check &&
++ t->u.target->check(name, hook, e, t->data, t->target_size - sizeof(*t)) != 0) {
++ if (t->u.target->me)
++ __MOD_DEC_USE_COUNT(t->u.target->me);
++ ret = -EFAULT;
++ goto cleanup_watchers;
++ }
++ (*cnt)++;
++ return 0;
++cleanup_watchers:
++ EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, &j);
++cleanup_matches:
++ EBT_MATCH_ITERATE(e, ebt_cleanup_match, &i);
++ return ret;
++}
++
++static inline int
++ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt)
++{
++ struct ebt_entry_target *t;
++
++ if (e->bitmask == 0)
++ return 0;
++ // we're done
++ if (cnt && (*cnt)-- == 0)
++ return 1;
++ EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, NULL);
++ EBT_MATCH_ITERATE(e, ebt_cleanup_match, NULL);
++ t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
++ if (t->u.target->destroy)
++ t->u.target->destroy(t->data, t->target_size - sizeof(*t));
++ if (t->u.target->me)
++ __MOD_DEC_USE_COUNT(t->u.target->me);
++
++ return 0;
++}
++
++// do the parsing of the table/chains/entries/matches/watchers/targets, heh
++static int translate_table(struct ebt_replace *repl,
++ struct ebt_table_info *newinfo)
++{
++ unsigned int i, j, k;
++ int ret;
++
++ i = 0;
++ while (i < NF_BR_NUMHOOKS && !(repl->valid_hooks & (1 << i)))
++ i++;
++ if (i == NF_BR_NUMHOOKS) {
++ BUGPRINT("No valid hooks specified\n");
++ return -EINVAL;
++ }
++ if (repl->hook_entry[i] != (struct ebt_entries *)repl->entries) {
++ BUGPRINT("Chains don't start at beginning\n");
++ return -EINVAL;
++ }
++ // make sure chains are ordered after each other in same order as their corresponding hooks
++ for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
++ if (!(repl->valid_hooks & (1 << j)))
++ continue;
++ if ( repl->hook_entry[j] <= repl->hook_entry[i] ) {
++ BUGPRINT("Hook order must be followed\n");
++ return -EINVAL;
++ }
++ i = j;
++ }
++
++ for (i = 0; i < NF_BR_NUMHOOKS; i++) {
++ newinfo->hook_entry[i] = NULL;
++ newinfo->counter_entry[i] = 0;
++ }
++
++ newinfo->entries_size = repl->entries_size;
++ newinfo->nentries = repl->nentries;
++
++ // do some early checkings and initialize some things
++ i = 0; // holds the expected nr. of entries for the chain
++ j = 0; // holds the up to now counted entries for the chain
++ k = 0; // holds the total nr. of entries, should equal newinfo->nentries afterwards
++ ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, ebt_check_entry_size_and_hooks, newinfo, repl->entries,
++ repl->entries + repl->entries_size, repl->hook_entry, &i, &j, &k, repl->valid_hooks);
++
++ if (ret != 0)
++ return ret;
++
++ if (i != j) {
++ BUGPRINT("nentries does not equal the nr of entries in the (last) chain\n");
++ return -EINVAL;
++ }
++ if (k != newinfo->nentries) {
++ BUGPRINT("Total nentries is wrong\n");
++ return -EINVAL;
++ }
++
++ // check if all valid hooks have a chain
++ for (i = 0; i < NF_BR_NUMHOOKS; i++) {
++ if (newinfo->hook_entry[i] == NULL && (repl->valid_hooks & (1 << i))){
++ BUGPRINT("Valid hook without chain\n");
++ return -EINVAL;
++ }
++ }
++
++ // we just don't trust anything
++ repl->name[EBT_TABLE_MAXNAMELEN - 1] = '\0';
++ i = 0; // used to know what we need to clean up if something goes wrong
++ ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, ebt_check_entry, newinfo, repl->name, &i, repl->valid_hooks);
++ if (ret != 0) {
++ BUGPRINT("ebt_check_entry gave fault back\n");
++ EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, ebt_cleanup_entry, &i);
++ }
++ return ret;
++}
++
++// called with under write_lock
++void get_counters(struct ebt_table_info *info, struct ebt_counter *counters)
++{
++ int i, cpu, counter_base;
++
++ // counters of cpu 0
++ memcpy(counters, info->counters, sizeof(struct ebt_counter) * info->nentries);
++ // add other counters to those of cpu 0
++ for (cpu = 1; cpu < smp_num_cpus; cpu++) {
++ counter_base = cpu * info->nentries;
++ for (i = 0; i < info->nentries; i++)
++ counters[i].pcnt += info->counters[counter_base + i].pcnt;
++ }
++}
++
++// replace the table
++static int do_replace(void *user, unsigned int len)
++{
++ int ret;
++ struct ebt_table_info *newinfo;
++ struct ebt_replace tmp;
++ struct ebt_table *t;
++ struct ebt_counter *counterstmp = NULL;
++ // used to be able to unlock earlier
++ struct ebt_table_info *table;
++
++ if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
++ return -EFAULT;
++
++ if (len != sizeof(tmp) + tmp.entries_size) {
++ BUGPRINT("Wrong len argument\n");
++ return -EINVAL;
++ }
++
++ if (tmp.entries_size == 0) {
++ BUGPRINT("Entries_size never zero\n");
++ return -EINVAL;
++ }
++ newinfo = (struct ebt_table_info *)vmalloc(sizeof(struct ebt_table_info));
++ if (!newinfo)
++ return -ENOMEM;
++
++ if (tmp.nentries) {
++ newinfo->counters = (struct ebt_counter *)vmalloc(sizeof(struct ebt_counter) * tmp.nentries * smp_num_cpus);
++ if (!newinfo->counters) {
++ ret = -ENOMEM;
++ goto free_newinfo;
++ }
++ memset(newinfo->counters, 0, sizeof(struct ebt_counter) * tmp.nentries * smp_num_cpus);
++ }
++ else
++ newinfo->counters = NULL;
++
++ newinfo->entries = (char *)vmalloc(tmp.entries_size);
++ if (!newinfo->entries) {
++ ret = -ENOMEM;
++ goto free_counters;
++ }
++ if (copy_from_user(newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
++ BUGPRINT("Couldn't copy entries from userspace\n");
++ ret = -EFAULT;
++ goto free_entries;
++ }
++
++ // the user wants counters back
++ // the check on the size is done later, when we have the lock
++ if (tmp.num_counters) {
++ counterstmp = (struct ebt_counter *)vmalloc(tmp.num_counters * sizeof(struct ebt_counter));
++ if (!counterstmp) {
++ ret = -ENOMEM;
++ goto free_entries;
++ }
++ }
++ else
++ counterstmp = NULL;
++
++ ret = translate_table(&tmp, newinfo);
++
++ if (ret != 0)
++ goto free_counterstmp;
++
++ ret = down_interruptible(&ebt_mutex);
++
++ if (ret != 0)
++ goto free_cleanup;
++
++ if (!(t = (struct ebt_table *)list_named_find(&ebt_tables, tmp.name))) {
++ ret = -ENOENT;
++ // give some help to the poor user
++ print_string("The table is not present, try insmod\n");
++ goto free_unlock;
++ }
++
++ // the table doesn't like it
++ if (t->check && (ret = t->check(newinfo, tmp.valid_hooks)))
++ goto free_unlock;
++
++ if (tmp.num_counters && tmp.num_counters != t->private->nentries) {
++ BUGPRINT("Wrong nr. of counters requested\n");
++ ret = -EINVAL;
++ goto free_unlock;
++ }
++
++ // we have the mutex lock, so no danger in reading this pointer
++ table = t->private;
++ // we need an atomic snapshot of the counters
++ write_lock_bh(&t->lock);
++ if (tmp.num_counters)
++ get_counters(t->private, counterstmp);
++
++ t->private = newinfo;
++ write_unlock_bh(&t->lock);
++ up(&ebt_mutex);
++ // So, a user can change the chains while having messed up his counter allocation
++ // Only reason why I do this is because this way the lock is held only once,
++ // while this doesn't bring the kernel into a dangerous state.
++ if (tmp.num_counters &&
++ copy_to_user(tmp.counters, counterstmp, tmp.num_counters * sizeof(struct ebt_counter))) {
++ BUGPRINT("Couldn't copy counters to userspace\n");
++ ret = -EFAULT;
++ }
++ else
++ ret = 0;
++
++ // decrease module count and free resources
++ EBT_ENTRY_ITERATE(table->entries, table->entries_size, ebt_cleanup_entry, NULL);
++ vfree(table->entries);
++
++ if (table->counters)
++ vfree(table->counters);
++ vfree(table);
++
++ if (counterstmp)
++ vfree(counterstmp);
++ return ret;
++
++free_unlock:
++ up(&ebt_mutex);
++free_cleanup:
++ EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, ebt_cleanup_entry, NULL);
++free_counterstmp:
++ if (counterstmp)
++ vfree(counterstmp);
++free_entries:
++ if (newinfo->entries)
++ vfree(newinfo->entries);
++free_counters:
++ if (newinfo->counters)
++ vfree(newinfo->counters);
++free_newinfo:
++ if (newinfo)
++ vfree(newinfo);
++ return ret;
++}
++
++int ebt_register_target(struct ebt_target *target)
++{
++ int ret;
++
++ ret = down_interruptible(&ebt_mutex);
++ if (ret != 0)
++ return ret;
++ if (!list_named_insert(&ebt_targets, target)) {
++ up(&ebt_mutex);
++ return -EEXIST;
++ }
++ up(&ebt_mutex);
++ MOD_INC_USE_COUNT;
++
++ return 0;
++}
++
++void ebt_unregister_target(struct ebt_target *target)
++{
++ down(&ebt_mutex);
++ LIST_DELETE(&ebt_targets, target);
++ up(&ebt_mutex);
++ MOD_DEC_USE_COUNT;
++}
++
++int ebt_register_match(struct ebt_match *match)
++{
++ int ret;
++
++ ret = down_interruptible(&ebt_mutex);
++ if (ret != 0)
++ return ret;
++ if (!list_named_insert(&ebt_matches, match)) {
++ up(&ebt_mutex);
++ return -EEXIST;
++ }
++ up(&ebt_mutex);
++ MOD_INC_USE_COUNT;
++
++ return 0;
++}
++
++void ebt_unregister_match(struct ebt_match *match)
++{
++ down(&ebt_mutex);
++ LIST_DELETE(&ebt_matches, match);
++ up(&ebt_mutex);
++ MOD_DEC_USE_COUNT;
++}
++
++int ebt_register_watcher(struct ebt_watcher *watcher)
++{
++ int ret;
++
++ ret = down_interruptible(&ebt_mutex);
++ if (ret != 0)
++ return ret;
++ if (!list_named_insert(&ebt_watchers, watcher)) {
++ up(&ebt_mutex);
++ return -EEXIST;
++ }
++ up(&ebt_mutex);
++ MOD_INC_USE_COUNT;
++
++ return 0;
++}
++
++void ebt_unregister_watcher(struct ebt_watcher *watcher)
++{
++ down(&ebt_mutex);
++ LIST_DELETE(&ebt_watchers, watcher);
++ up(&ebt_mutex);
++ MOD_DEC_USE_COUNT;
++}
++
++int ebt_register_table(struct ebt_table *table)
++{
++ struct ebt_table_info *newinfo;
++ int ret;
++
++ if (!table || !table->table ||!table->table->entries ||
++ table->table->entries_size == 0 ||
++ table->table->counters || table->private) {
++ BUGPRINT("Bad table data for ebt_register_table!!!\n");
++ return -EINVAL;
++ }
++
++ newinfo = (struct ebt_table_info *)vmalloc(sizeof(struct ebt_table_info));
++ ret = -ENOMEM;
++ if (!newinfo)
++ return -ENOMEM;
++
++ newinfo->entries = (char *)vmalloc(table->table->entries_size);
++ if (!(newinfo->entries))
++ goto free_newinfo;
++
++ memcpy(newinfo->entries, table->table->entries, table->table->entries_size);
++
++ if (table->table->nentries) {
++ newinfo->counters = (struct ebt_counter *)vmalloc(table->table->nentries * sizeof(struct ebt_counter) * smp_num_cpus);
++ if (!newinfo->counters)
++ goto free_entries;
++ memset(newinfo->counters, 0, table->table->nentries * sizeof(struct ebt_counter) * smp_num_cpus);
++ }
++ else
++ newinfo->counters = NULL;
++
++ // fill in newinfo and parse the entries
++ ret = translate_table(table->table, newinfo);
++ if (ret != 0) {
++ BUGPRINT("Translate_table failed\n");
++ goto free_counters;
++ }
++
++ if (table->check && table->check(newinfo, table->valid_hooks)) {
++ BUGPRINT("The table doesn't like its own initial data, lol\n");
++ return -EINVAL;
++ }
++
++ ret = down_interruptible(&ebt_mutex);
++ if (ret != 0)
++ goto free_counters;
++
++ if (list_named_find(&ebt_tables, table->name)) {
++ ret = -EEXIST;
++ BUGPRINT("Table name already exists\n");
++ goto free_unlock;
++ }
++
++ table->private = newinfo;
++ table->lock = RW_LOCK_UNLOCKED;
++ list_prepend(&ebt_tables, table);
++ up(&ebt_mutex);
++ MOD_INC_USE_COUNT;
++ return 0;
++free_unlock:
++ up(&ebt_mutex);
++free_counters:
++ if (newinfo->counters)
++ vfree(newinfo->counters);
++free_entries:
++ vfree(newinfo->entries);
++free_newinfo:
++ vfree(newinfo);
++ return ret;
++}
++
++void ebt_unregister_table(struct ebt_table *table)
++{
++ if (!table) {
++ BUGPRINT("Request to unregister NULL table!!!\n");
++ return;
++ }
++ down(&ebt_mutex);
++ LIST_DELETE(&ebt_tables, table);
++ up(&ebt_mutex);
++ EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size, ebt_cleanup_entry, NULL);
++ if (table->private->counters)
++ vfree(table->private->counters);
++ if (table->private->entries)
++ vfree(table->private->entries);
++ vfree(table->private);
++ MOD_DEC_USE_COUNT;
++}
++
++/* userspace just supplied us with counters */
++static int update_counters(void *user, unsigned int len)
++{
++ int i, ret;
++ struct ebt_counter *tmp;
++ struct ebt_replace hlp;
++ struct ebt_table *t;
++
++ if (copy_from_user(&hlp, user, sizeof(hlp)))
++ return -EFAULT;
++
++ if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
++ return -EINVAL;
++ if (hlp.num_counters == 0)
++ return -EINVAL;
++
++ if ( !(tmp = (struct ebt_counter *)vmalloc(hlp.num_counters * sizeof(struct ebt_counter))) ){
++ MEMPRINT("Updata_counters && nomemory\n");
++ return -ENOMEM;
++ }
++
++ hlp.name[EBT_TABLE_MAXNAMELEN - 1] = '\0';
++ ret = down_interruptible(&ebt_mutex);
++ if (ret != 0)
++ goto free_tmp;
++
++ if (!(t = (struct ebt_table *)list_named_find(&ebt_tables, hlp.name))) {
++ BUGPRINT("Table not found for update_counters\n");
++ ret = -EINVAL;
++ goto unlock_mutex;
++ }
++
++ if (hlp.num_counters != t->private->nentries) {
++ BUGPRINT("Wrong nr of counters\n");
++ ret = -EINVAL;
++ goto unlock_mutex;
++ }
++
++ if ( copy_from_user(tmp, hlp.counters, hlp.num_counters * sizeof(struct ebt_counter)) ) {
++ BUGPRINT("Updata_counters && !cfu\n");
++ ret = -EFAULT;
++ goto unlock_mutex;
++ }
++
++ // we want an atomic add of the counters
++ write_lock_bh(&t->lock);
++
++ // we add to the counters of the first cpu
++ for (i = 0; i < hlp.num_counters; i++)
++ t->private->counters[i].pcnt += tmp[i].pcnt;
++
++ write_unlock_bh(&t->lock);
++ ret = 0;
++unlock_mutex:
++ up(&ebt_mutex);
++free_tmp:
++ vfree(tmp);
++ return ret;
++}
++
++static inline int ebt_make_matchname(struct ebt_entry_match *m, char *base, char *ubase)
++{
++ char *hlp = ubase - base + (char *)m;
++ if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
++ return -EFAULT;
++ return 0;
++}
++
++static inline int ebt_make_watchername(struct ebt_entry_watcher *w, char *base, char *ubase)
++{
++ char *hlp = ubase - base + (char *)w;
++ if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
++ return -EFAULT;
++ return 0;
++}
++
++static inline int ebt_make_names(struct ebt_entry *e, char *base, char *ubase)
++{
++ int ret;
++ char *hlp = ubase - base + (char *)e + e->target_offset;
++ struct ebt_entry_target *t;
++
++ if ((e->bitmask & EBT_ENTRY_OR_ENTRIES) == 0)
++ return 0;
++
++ t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
++
++ ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
++ if (ret != 0)
++ return ret;
++ ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
++ if (ret != 0)
++ return ret;
++ if (copy_to_user(hlp, t->u.target->name, EBT_FUNCTION_MAXNAMELEN))
++ return -EFAULT;
++ return 0;
++}
++
++// called with ebt_mutex down
++static int copy_everything_to_user(struct ebt_table *t, void *user, int *len)
++{
++ struct ebt_replace tmp;
++ struct ebt_table_info *info = t->private;
++ struct ebt_counter *counterstmp;
++ int i;
++
++ if (copy_from_user(&tmp, user, sizeof(tmp))) {
++ BUGPRINT("Cfu didn't work\n");
++ return -EFAULT;
++ }
++
++ if (*len != sizeof(struct ebt_replace) + info->entries_size + (tmp.num_counters? info->nentries * sizeof(struct ebt_counter): 0)) {
++ BUGPRINT("Wrong size\n");
++ return -EINVAL;
++ }
++
++ if (tmp.nentries != info->nentries) {
++ BUGPRINT("Nentries wrong\n");
++ return -EINVAL;
++ }
++
++ if (tmp.entries_size != info->entries_size) {
++ BUGPRINT("Wrong size\n");
++ return -EINVAL;
++ }
++
++ // userspace might not need the counters
++ if (tmp.num_counters) {
++ if (tmp.num_counters != info->nentries) {
++ BUGPRINT("Num_counters wrong\n");
++ return -EINVAL;
++ }
++ counterstmp = (struct ebt_counter *)vmalloc(info->nentries * sizeof(struct ebt_counter));
++ if (!counterstmp) {
++ BUGPRINT("Couldn't copy counters, out of memory\n");
++ return -ENOMEM;
++ }
++ write_lock_bh(&t->lock);
++ get_counters(info, counterstmp);
++ write_unlock_bh(&t->lock);
++
++ if (copy_to_user(tmp.counters, counterstmp, info->nentries * sizeof(struct ebt_counter))) {
++ BUGPRINT("Couldn't copy counters to userspace\n");
++ vfree(counterstmp);
++ return -EFAULT;
++ }
++ vfree(counterstmp);
++ }
++
++ if (copy_to_user(tmp.entries, info->entries, info->entries_size)) {
++ BUGPRINT("Couldn't copy entries to userspace\n");
++ return -EFAULT;
++ }
++ // make userspace's life easier
++ memcpy(tmp.counter_entry, info->counter_entry, NF_BR_NUMHOOKS * sizeof(int));
++ memcpy(tmp.hook_entry, info->hook_entry, NF_BR_NUMHOOKS * sizeof(struct ebt_entries *));
++ for (i = 0; i < NF_BR_NUMHOOKS; i++)
++ tmp.hook_entry[i] = (struct ebt_entries *) (((char *)(info->hook_entry[i])) - info->entries + tmp.entries);
++ if (copy_to_user(user, &tmp, sizeof(struct ebt_replace))) {
++ BUGPRINT("Couldn't copy ebt_replace to userspace\n");
++ return -EFAULT;
++ }
++ // set the match/watcher/target names right
++ return EBT_ENTRY_ITERATE(info->entries, info->entries_size, ebt_make_names, info->entries, tmp.entries);
++}
++
++static int do_ebt_set_ctl(struct sock *sk,
++ int cmd, void *user, unsigned int len)
++{
++ int ret;
++
++ switch(cmd) {
++ case EBT_SO_SET_ENTRIES:
++ ret = do_replace(user, len);
++ break;
++ case EBT_SO_SET_COUNTERS:
++ ret = update_counters(user, len);
++ break;
++ default:
++ ret = -EINVAL;
++ }
++ return ret;
++}
++
++static int do_ebt_get_ctl(struct sock *sk, int cmd, void *user, int *len)
++{
++ int ret;
++ struct ebt_replace tmp;
++ struct ebt_table *t;
++
++ if (copy_from_user(&tmp, user, sizeof(tmp)))
++ return -EFAULT;
++
++ ret = down_interruptible(&ebt_mutex);
++ if (ret != 0)
++ return ret;
++
++ if (!(t = (struct ebt_table *)list_named_find(&ebt_tables, tmp.name))) {
++ print_string("Table not found, try insmod\n");
++ up(&ebt_mutex);
++ return -EINVAL;
++ }
++
++ switch(cmd) {
++ case EBT_SO_GET_INFO:
++ if (*len != sizeof(struct ebt_replace)){
++ ret = -EINVAL;
++ up(&ebt_mutex);
++ break;
++ }
++ tmp.nentries = t->private->nentries;
++ tmp.entries_size = t->private->entries_size;
++ // userspace needs this to check the chain names
++ tmp.valid_hooks = t->valid_hooks;
++ up(&ebt_mutex);
++ if (copy_to_user(user, &tmp, *len) != 0){
++ BUGPRINT("c2u Didn't work\n");
++ ret = -EFAULT;
++ break;
++ }
++ ret = 0;
++ break;
++
++ case EBT_SO_GET_ENTRIES:
++ ret = copy_everything_to_user(t, user, len);
++ up(&ebt_mutex);
++ break;
++
++ default:
++ up(&ebt_mutex);
++ ret = -EINVAL;
++ }
++
++ return ret;
++}
++
++static struct nf_sockopt_ops ebt_sockopts
++= { { NULL, NULL }, PF_INET, EBT_BASE_CTL, EBT_SO_SET_MAX + 1, do_ebt_set_ctl,
++ EBT_BASE_CTL, EBT_SO_GET_MAX + 1, do_ebt_get_ctl, 0, NULL
++};
++
++// Copyright (C) 1998 by Ori Pomerantz
++// Print the string to the appropriate tty, the one
++// the current task uses
++static void print_string(char *str)
++{
++ struct tty_struct *my_tty;
++
++ /* The tty for the current task */
++ my_tty = current->tty;
++ if (my_tty != NULL) {
++ (*(my_tty->driver).write)(my_tty, 0, str, strlen(str));
++ (*(my_tty->driver).write)(my_tty, 0, "\015\012", 2);
++ }
++}
++
++static int __init init(void)
++{
++ int ret;
++
++ down(&ebt_mutex);
++ list_named_insert(&ebt_targets, &ebt_standard_target);
++ up(&ebt_mutex);
++ if ((ret = nf_register_sockopt(&ebt_sockopts)) < 0)
++ return ret;
++
++ print_string("Ebtables v2.0 registered");
++ return 0;
++}
++
++static void __exit fini(void)
++{
++ nf_unregister_sockopt(&ebt_sockopts);
++ print_string("Ebtables v2.0 unregistered");
++}
++
++EXPORT_SYMBOL(ebt_register_table);
++EXPORT_SYMBOL(ebt_unregister_table);
++EXPORT_SYMBOL(ebt_register_match);
++EXPORT_SYMBOL(ebt_unregister_match);
++EXPORT_SYMBOL(ebt_register_watcher);
++EXPORT_SYMBOL(ebt_unregister_watcher);
++EXPORT_SYMBOL(ebt_register_target);
++EXPORT_SYMBOL(ebt_unregister_target);
++EXPORT_SYMBOL(ebt_do_table);
++module_init(init);
++module_exit(fini);
++MODULE_LICENSE("GPL");
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre1/include/linux/netfilter_bridge/ebtables.h Wed Apr 3 20:44:38 2002
+@@ -0,0 +1,304 @@
++/*
++ * ebtables
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * ebtables.c,v 2.0, April, 2002
++ *
++ * This code is stongly inspired on the iptables code which is
++ * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
++ */
++
++#ifndef __LINUX_BRIDGE_EFF_H
++#define __LINUX_BRIDGE_EFF_H
++#include <linux/if.h> /* IFNAMSIZ */
++#include <linux/netfilter_bridge.h>
++#include <linux/if_ether.h> /* ETH_ALEN */
++
++#define EBT_TABLE_MAXNAMELEN 32
++#define EBT_FUNCTION_MAXNAMELEN EBT_TABLE_MAXNAMELEN
++
++/* [gs]etsockopt numbers */
++#define EBT_BASE_CTL 128
++
++#define EBT_SO_SET_ENTRIES (EBT_BASE_CTL)
++#define EBT_SO_SET_COUNTERS (EBT_SO_SET_ENTRIES+1)
++#define EBT_SO_SET_MAX (EBT_SO_SET_COUNTERS+1)
++
++#define EBT_SO_GET_INFO (EBT_BASE_CTL)
++#define EBT_SO_GET_ENTRIES (EBT_SO_GET_INFO+1)
++#define EBT_SO_GET_MAX (EBT_SO_GET_ENTRIES+1)
++
++#define EBT_ACCEPT 0
++#define EBT_DROP 1
++#define EBT_CONTINUE 2
++#define NUM_STANDARD_TARGETS 3
++
++struct ebt_entries {
++ // this field is always set to zero (including userspace).
++ // See EBT_ENTRY_OR_ENTRIES.
++ // Must be same size as ebt_entry.bitmask
++ __u32 distinguisher;
++ // one standard (accept or drop) per hook
++ __u8 policy;
++ // nr. of entries
++ __u32 nentries;
++ // entry list
++ __u8 data[0];
++};
++
++// used for the bitmask of struct ebt_entry
++
++// This is a hack to make a difference between an ebt_entry struct and an
++// ebt_entries struct when traversing the entries from start to end.
++// Using this simplifies the code alot, while still being able to use ebt_entries.
++// Contrary, iptables doesn't use something like ebt_entries and therefore uses different
++// techniques for naming the policy and such. So, iptables doesn't need a hack like this.
++#define EBT_ENTRY_OR_ENTRIES 0x01
++// these are the normal masks
++#define EBT_NOPROTO 0x02
++#define EBT_802_3 0x04
++#define EBT_SOURCEMAC 0x08
++#define EBT_DESTMAC 0x10
++#define EBT_F_MASK (EBT_NOPROTO | EBT_802_3 | EBT_SOURCEMAC | EBT_DESTMAC | EBT_ENTRY_OR_ENTRIES)
++
++#define EBT_IPROTO 0x01
++#define EBT_IIN 0x02
++#define EBT_IOUT 0x04
++#define EBT_ISOURCE 0x8
++#define EBT_IDEST 0x10
++#define EBT_INV_MASK (EBT_IPROTO | EBT_IIN | EBT_IOUT | EBT_ISOURCE | EBT_IDEST)
++
++struct ebt_counter
++{
++ __u64 pcnt;
++};
++
++struct ebt_entry_match
++{
++ union {
++ char name[EBT_FUNCTION_MAXNAMELEN];
++ struct ebt_match *match;
++ } u;
++ unsigned int match_size;// size of this struct + sizee of data
++ unsigned char data[0];
++};
++
++struct ebt_entry_watcher
++{
++ union {
++ char name[EBT_FUNCTION_MAXNAMELEN];
++ struct ebt_watcher *watcher;
++ } u;
++ unsigned int watcher_size;// size of this struct + sizee of data
++ unsigned char data[0];
++};
++
++struct ebt_entry_target
++{
++ union {
++ char name[EBT_FUNCTION_MAXNAMELEN];
++ struct ebt_target *target;
++ } u;
++ unsigned int target_size;// size of this struct + sizee of data
++ unsigned char data[0];
++};
++
++#define EBT_STANDARD_TARGET "standard"
++struct ebt_standard_target
++{
++ struct ebt_entry_target;
++ __u8 verdict;
++};
++
++/* one entry */
++struct ebt_entry {
++ __u32 bitmask; // this needs to be the first field
++ __u32 invflags;
++ __u16 ethproto;
++ __u8 in[IFNAMSIZ];
++ __u8 out[IFNAMSIZ];
++ __u8 sourcemac[ETH_ALEN];
++ __u8 destmac[ETH_ALEN];
++ // sizeof ebt_entry + matches
++ __u16 watchers_offset;
++ // sizeof ebt_entry + matches + watchers
++ __u16 target_offset;
++ // sizeof ebt_entry + matches + watchers + target
++ __u16 next_offset;
++ unsigned char elems[0];
++};
++
++struct ebt_replace
++{
++ char name[EBT_TABLE_MAXNAMELEN];
++ unsigned int valid_hooks;
++ unsigned int nentries; // nr of rules in the table
++ unsigned int entries_size; // total size of the entries
++ struct ebt_entries *hook_entry[NF_BR_NUMHOOKS]; // start of the chains
++ unsigned int counter_entry[NF_BR_NUMHOOKS]; // how many counters in front of it?
++ unsigned int num_counters; // nr of counters userspace expects back
++ struct ebt_counter *counters; // where the kernel will put the old counters
++ char *entries;
++};
++
++#ifdef __KERNEL__
++
++struct ebt_match
++{
++ struct list_head list;
++ const char name[EBT_FUNCTION_MAXNAMELEN];
++ // 0 == it matches
++ int (*match)(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchdata,
++ unsigned int datalen, const struct ebt_counter *c);
++ // true == let it in
++ int (*check)(const char *tablename, unsigned int hooknr, const struct ebt_entry *e,
++ void *matchdata, unsigned int datalen);
++ void (*destroy)(void *matchdata, unsigned int datalen);
++ struct module *me;
++};
++
++struct ebt_watcher
++{
++ struct list_head list;
++ const char name[EBT_FUNCTION_MAXNAMELEN];
++ void (*watcher)(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *watcherdata,
++ unsigned int datalen, const struct ebt_counter *c);
++ // true == let it in
++ int (*check)(const char *tablename, unsigned int hooknr, const struct ebt_entry *e,
++ void *watcherdata, unsigned int datalen);
++ void (*destroy)(void *watcherdata, unsigned int datalen);
++ struct module *me;
++};
++
++struct ebt_target
++{
++ struct list_head list;
++ const char name[EBT_FUNCTION_MAXNAMELEN];
++ // returns one of the standard verdicts
++ __u8 (*target)(struct sk_buff **pskb,
++ unsigned int hooknr,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *targetdata,
++ unsigned int datalen);
++ // true == let it in
++ int (*check)(const char *tablename, unsigned int hooknr, const struct ebt_entry *e,
++ void *targetdata, unsigned int datalen);
++ void (*destroy)(void *targetdata, unsigned int datalen);
++ struct module *me;
++};
++
++struct ebt_table_info
++{
++ unsigned int entries_size; // total size of the entries
++ unsigned int nentries;
++ struct ebt_entries *hook_entry[NF_BR_NUMHOOKS];
++ unsigned int counter_entry[NF_BR_NUMHOOKS]; // how many counters in front of it?
++ struct ebt_counter *counters;
++ char *entries;
++};
++
++struct ebt_table
++{
++ struct list_head list;
++ char name[EBT_TABLE_MAXNAMELEN];
++ struct ebt_replace *table;
++ unsigned int valid_hooks;
++ rwlock_t lock;
++ // e.g. could be the table explicitly only allows certain matches, targets, ...
++ int (*check)(const struct ebt_table_info *info, unsigned int valid_hooks);
++ struct ebt_table_info *private;// the data used by the kernel
++};
++
++extern int ebt_register_table(struct ebt_table *table);
++extern void ebt_unregister_table(struct ebt_table *table);
++extern int ebt_register_match(struct ebt_match *match);
++extern void ebt_unregister_match(struct ebt_match *match);
++extern int ebt_register_watcher(struct ebt_watcher *watcher);
++extern void ebt_unregister_watcher(struct ebt_watcher *watcher);
++extern int ebt_register_target(struct ebt_target *target);
++extern void ebt_unregister_target(struct ebt_target *target);
++extern unsigned int ebt_do_table(unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out, struct ebt_table *table);
++
++#endif /* __KERNEL__ */
++
++// blatently stolen from ip_tables.h
++/* fn returns 0 to continue iteration */
++#define EBT_MATCH_ITERATE(e, fn, args...) \
++({ \
++ unsigned int __i; \
++ int __ret = 0; \
++ struct ebt_entry_match *__match; \
++ \
++ for (__i = sizeof(struct ebt_entry); \
++ __i < (e)->watchers_offset; \
++ __i += __match->match_size) { \
++ __match = (void *)(e) + __i; \
++ \
++ __ret = fn(__match , ## args); \
++ if (__ret != 0) \
++ break; \
++ } \
++ if (__ret == 0) { \
++ if (__i != (e)->watchers_offset) \
++ __ret = -EINVAL; \
++ } \
++ __ret; \
++})
++
++#define EBT_WATCHER_ITERATE(e, fn, args...) \
++({ \
++ unsigned int __i; \
++ int __ret = 0; \
++ struct ebt_entry_watcher *__watcher; \
++ \
++ for (__i = e->watchers_offset; \
++ __i < (e)->target_offset; \
++ __i += __watcher->watcher_size) { \
++ __watcher = (void *)(e) + __i; \
++ \
++ __ret = fn(__watcher , ## args); \
++ if (__ret != 0) \
++ break; \
++ } \
++ if (__ret == 0) { \
++ if (__i != (e)->target_offset) \
++ __ret = -EINVAL; \
++ } \
++ __ret; \
++})
++
++#define EBT_ENTRY_ITERATE(entries, size, fn, args...) \
++({ \
++ unsigned int __i; \
++ int __ret = 0; \
++ struct ebt_entry *__entry; \
++ \
++ for (__i = 0; __i < (size);) { \
++ __entry = (void *)(entries) + __i; \
++ __ret = fn(__entry , ## args); \
++ if (__ret != 0) \
++ break; \
++ if (__entry->bitmask != 0) \
++ __i += __entry->next_offset; \
++ else \
++ __i += sizeof(struct ebt_entries); \
++ } \
++ if (__ret == 0) { \
++ if (__i != (size)) \
++ __ret = -EINVAL; \
++ } \
++ __ret; \
++})
++
++#endif
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre1/include/linux/netfilter_bridge/ebt_arp.h Wed Apr 3 18:50:31 2002
+@@ -0,0 +1,25 @@
++#ifndef __LINUX_BRIDGE_EBT_ARP_H
++#define __LINUX_BRIDGE_EBT_ARP_H
++
++#define EBT_ARP_OPCODE 0x01
++#define EBT_ARP_HTYPE 0x02
++#define EBT_ARP_PTYPE 0x04
++#define EBT_ARP_SRC_IP 0x08
++#define EBT_ARP_DST_IP 0x10
++#define EBT_ARP_MASK (EBT_ARP_OPCODE | EBT_ARP_HTYPE | EBT_ARP_PTYPE | EBT_ARP_SRC_IP | EBT_ARP_DST_IP)
++#define EBT_ARP_MATCH "arp"
++
++struct ebt_arp_info
++{
++ __u16 htype;
++ __u16 ptype;
++ __u16 opcode;
++ __u32 saddr;
++ __u32 smsk;
++ __u32 daddr;
++ __u32 dmsk;
++ __u8 bitmask;
++ __u8 invflags;
++};
++
++#endif
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre1/include/linux/netfilter_bridge/ebt_ip.h Wed Apr 3 18:50:31 2002
+@@ -0,0 +1,24 @@
++#ifndef __LINUX_BRIDGE_EBT_IP_H
++#define __LINUX_BRIDGE_EBT_IP_H
++
++#define EBT_IP_SOURCE 0x01
++#define EBT_IP_DEST 0x02
++#define EBT_IP_TOS 0x04
++#define EBT_IP_PROTO 0x08
++#define EBT_IP_MASK (EBT_IP_SOURCE | EBT_IP_DEST | EBT_IP_TOS | EBT_IP_PROTO)
++#define EBT_IP_MATCH "ip"
++
++// the same values are used for the invflags
++struct ebt_ip_info
++{
++ __u32 saddr;
++ __u32 daddr;
++ __u32 smsk;
++ __u32 dmsk;
++ __u8 tos;
++ __u8 protocol;
++ __u8 bitmask;
++ __u8 invflags;
++};
++
++#endif
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre1/include/linux/netfilter_bridge/ebt_log.h Wed Apr 3 18:50:31 2002
+@@ -0,0 +1,17 @@
++#ifndef __LINUX_BRIDGE_EBT_LOG_H
++#define __LINUX_BRIDGE_EBT_LOG_H
++
++#define EBT_LOG_IP 0x01 // if the frame is made by ip, log the ip information
++#define EBT_LOG_ARP 0x02
++#define EBT_LOG_MASK (EBT_LOG_IP | EBT_LOG_ARP)
++#define EBT_LOG_PREFIX_SIZE 30
++#define EBT_LOG_WATCHER "log"
++
++struct ebt_log_info
++{
++ __u8 loglevel;
++ __u8 prefix[EBT_LOG_PREFIX_SIZE];
++ __u32 bitmask;
++};
++
++#endif
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre1/include/linux/netfilter_bridge/ebt_nat.h Wed Apr 3 18:50:31 2002
+@@ -0,0 +1,11 @@
++#ifndef __LINUX_BRIDGE_EBT_NAT_H
++#define __LINUX_BRIDGE_EBT_NAT_H
++
++struct ebt_nat_info
++{
++ unsigned char mac[ETH_ALEN];
++};
++#define EBT_SNAT_TARGET "snat"
++#define EBT_DNAT_TARGET "dnat"
++
++#endif
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre1/include/linux/br_db.h Wed Apr 3 20:45:01 2002
+@@ -0,0 +1,53 @@
++/*
++ * bridge ethernet protocol filter
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * br_db.h,v 1.1 2001/04/16
++ *
++ * This code is stongly inspired on the iptables code which is
++ * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#ifndef __LINUX_BRIDGE_DB_H
++#define __LINUX_BRIDGE_DB_H
++#include <linux/if.h> /* IFNAMSIZ */
++#ifdef __KERNEL__
++#include <linux/if_bridge.h>
++#include <linux/netfilter_bridge.h>
++#else
++#include <linux/netfilter_bridge.h>
++#endif
++#define BRDB_BASE_CTL 135
++
++#define BRDB_SO_SET_ALLOWDB (BRDB_BASE_CTL)
++#define BRDB_SO_SET_MAX (BRDB_SO_SET_ALLOWDB+1)
++
++#define BRDB_SO_GET_DBINFO (BRDB_BASE_CTL)
++#define BRDB_SO_GET_DB (BRDB_SO_GET_DBINFO+1)
++#define BRDB_SO_GET_MAX (BRDB_SO_GET_DB+1)
++
++#define BRDB_NODB 0
++#define BRDB_DB 1
++
++#define INITIAL_DBSIZE 10
++#define IDENTIFY802_3 46
++
++struct brdb_dbinfo {
++ __u32 nentries;
++};
++
++struct brdb_dbentry {
++ __u8 in[IFNAMSIZ];
++ __u8 out[IFNAMSIZ];
++ __u16 ethproto;
++ __u32 hook;
++};
++
++#endif
diff --git a/kernel/patches/base-patches/ebtables-v2.0pre2_vs_2.4.18.diff b/kernel/patches/base-patches/ebtables-v2.0pre2_vs_2.4.18.diff
new file mode 100644
index 0000000..23ecce4
--- /dev/null
+++ b/kernel/patches/base-patches/ebtables-v2.0pre2_vs_2.4.18.diff
@@ -0,0 +1,2752 @@
+--- linux/net/Makefile Mon Feb 25 20:38:14 2002
++++ ebt2.0pre2/net/Makefile Wed Apr 3 21:50:43 2002
+@@ -7,7 +7,8 @@
+
+ O_TARGET := network.o
+
+-mod-subdirs := ipv4/netfilter ipv6/netfilter ipx irda bluetooth atm netlink sched
++mod-subdirs := bridge/netfilter ipv4/netfilter ipv6/netfilter ipx irda \
++ bluetooth atm netlink sched
+ export-objs := netsyms.o
+
+ subdir-y := core ethernet
+@@ -23,6 +24,12 @@
+ ifneq ($(CONFIG_IPV6),n)
+ ifneq ($(CONFIG_IPV6),)
+ subdir-$(CONFIG_NETFILTER) += ipv6/netfilter
++endif
++endif
++
++ifneq ($(CONFIG_BRIDGE),n)
++ifneq ($CONFIG_BRIDGE),)
++subdir-$(CONFIG_BRIDGE) += bridge/netfilter
+ endif
+ endif
+
+--- linux/net/Config.in Sun Apr 14 15:19:26 2002
++++ ebt2.0pre2/net/Config.in Wed Apr 3 21:50:43 2002
+@@ -60,6 +60,7 @@
+ source net/decnet/Config.in
+ fi
+ dep_tristate '802.1d Ethernet Bridging' CONFIG_BRIDGE $CONFIG_INET
++ source net/bridge/netfilter/Config.in
+ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+ if [ "$CONFIG_BRIDGE" != "n" -a "$CONFIG_NETFILTER" != "n" ]; then
+ bool ' netfilter (firewalling) support' CONFIG_BRIDGE_NF
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre2/net/bridge/netfilter/Makefile Wed Apr 3 21:50:43 2002
+@@ -0,0 +1,23 @@
++#
++# Makefile for the netfilter modules on top of bridging.
++#
++# Note! Dependencies are done automagically by 'make dep', which also
++# removes any old dependencies. DON'T put your own dependencies here
++# unless it's something special (ie not a .c file).
++#
++# Note 2! The CFLAGS definition is now in the main makefile...
++
++O_TARGET := netfilter.o
++
++export-objs = ebtables.o
++
++obj-$(CONFIG_BRIDGE_EBT) += ebtables.o
++obj-$(CONFIG_BRIDGE_EBT_T_FILTER) += ebtable_filter.o
++obj-$(CONFIG_BRIDGE_EBT_T_NAT) += ebtable_nat.o
++obj-$(CONFIG_BRIDGE_DB) += br_db.o
++obj-$(CONFIG_BRIDGE_EBT_IPF) += ebt_ip.o
++obj-$(CONFIG_BRIDGE_EBT_ARPF) += ebt_arp.o
++obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_log.o
++obj-$(CONFIG_BRIDGE_EBT_NAT) += ebt_nat.o
++
++include $(TOPDIR)/Rules.make
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre2/net/bridge/netfilter/Config.in Wed Apr 3 21:50:43 2002
+@@ -0,0 +1,12 @@
++#
++# Bridge netfilter configuration
++#
++dep_tristate ' Bridge: ebtables' CONFIG_BRIDGE_EBT $CONFIG_BRIDGE
++dep_tristate ' ebt: filter table support' CONFIG_BRIDGE_EBT_T_FILTER $CONFIG_BRIDGE_EBT
++dep_tristate ' ebt: nat table support' CONFIG_BRIDGE_EBT_T_NAT $CONFIG_BRIDGE_EBT
++dep_tristate ' ebt: LOG support' CONFIG_BRIDGE_EBT_LOG $CONFIG_BRIDGE_EBT
++dep_tristate ' ebt: IP filter support' CONFIG_BRIDGE_EBT_IPF $CONFIG_BRIDGE_EBT
++dep_tristate ' ebt: ARP filter support' CONFIG_BRIDGE_EBT_ARPF $CONFIG_BRIDGE_EBT
++dep_tristate ' ebt: nat target support' CONFIG_BRIDGE_EBT_NAT $CONFIG_BRIDGE_EBT
++dep_tristate ' Bridge: ethernet database' CONFIG_BRIDGE_DB $CONFIG_BRIDGE
++
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre2/net/bridge/netfilter/br_db.c Wed Apr 3 21:50:43 2002
+@@ -0,0 +1,357 @@
++/*
++ * bridge ethernet protocol database
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * br_db.c, April, 2002
++ *
++ * This code is stongly inspired on the iptables code which is
++ * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++#include <linux/skbuff.h>
++#include <linux/if_ether.h>
++#include <linux/netfilter_bridge.h>
++#include <linux/br_db.h>
++#include <linux/socket.h> /* PF_BRIDGE */
++#include <linux/spinlock.h> /* rwlock_t */
++#include <asm/errno.h>
++#include <asm/uaccess.h> /* copy_[to,from]_user */
++#include <linux/smp.h> /* multiprocessors */
++
++#define BUGPRINT(format, args...) printk("kernel msg: brdb bug: please report to author: "format, ## args)
++/*#define BUGPRINT(format, args...)*/
++#define MEMPRINT(format, args...) printk("kernel msg: brdb : out of memory: "format, ## args)
++/*#define MEMPRINT(format, args...)*/
++
++/* database variables */
++static __u16 allowdb = BRDB_NODB;
++static struct brdb_dbentry **flowdb = NULL;
++static unsigned int *dbsize;
++static unsigned int *dbnum;
++/* database lock */
++static rwlock_t brdb_dblock;
++
++static inline int brdb_dev_check(char *entry, const struct net_device *device){
++ if (*entry == '\0') return 0;
++ if (!device) return 1;
++ return strncmp(entry, device->name, IFNAMSIZ);
++}
++
++static inline int brdb_proto_check(unsigned int a, unsigned int b){
++ if (a == b || ( a == IDENTIFY802_3 && ntohs(b) < 1536 )) return 0;
++ return 1;
++}
++
++static unsigned int maintaindb (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ struct brdb_dbentry *hlp;
++ int i, cpunr;
++ unsigned short ethproto = ((**pskb).mac.ethernet)->h_proto;
++
++ cpunr = cpu_number_map(smp_processor_id());
++
++ read_lock_bh(&brdb_dblock);
++
++ if (allowdb == BRDB_NODB) {// must be after readlock
++ read_unlock_bh(&brdb_dblock);
++ return NF_ACCEPT;
++ }
++ hlp = flowdb[cpunr];
++ /* search for existing entry */
++ for (i = 0; i < dbnum[cpunr]; i++) {
++ if (hlp->hook == hook && !brdb_proto_check(hlp->ethproto, ethproto) &&
++ !brdb_dev_check(hlp->in, in) && !brdb_dev_check(hlp->out, out)) {
++ read_unlock_bh(&brdb_dblock);
++ return NF_ACCEPT;
++ }
++ hlp++;
++ }
++ /* add new entry to database */
++ if (dbnum[cpunr] == dbsize[cpunr]) {
++ dbsize[cpunr] *= 2;
++ if ( !( hlp = (struct brdb_dbentry *) vmalloc(dbsize[cpunr] * sizeof(struct brdb_dbentry)) ) ) {
++ dbsize[cpunr] /= 2;
++ MEMPRINT("maintaindb && nomemory\n");
++ read_unlock_bh(&brdb_dblock);
++ return NF_ACCEPT;
++ }
++ memcpy(hlp, flowdb[cpunr], dbnum[cpunr] * sizeof(struct brdb_dbentry));
++ vfree(flowdb[cpunr]);
++ flowdb[cpunr] = hlp;
++ }
++
++ hlp = flowdb[cpunr] + dbnum[cpunr];
++ hlp->hook = hook;
++ if (in)
++ strncpy(hlp->in, in->name, IFNAMSIZ);
++ else
++ hlp->in[0] = '\0';
++ if (out)
++ strncpy(hlp->out, out->name, IFNAMSIZ);
++ else
++ hlp->out[0] = '\0';
++ if (ntohs(ethproto) < 1536)
++ hlp->ethproto = IDENTIFY802_3;
++ else
++ hlp->ethproto = ethproto;
++ dbnum[cpunr]++;
++
++ read_unlock_bh(&brdb_dblock);
++
++ return NF_ACCEPT;
++}
++
++static int copy_db(void *user, int *len)
++{
++ int i, j, nentries = 0, ret;
++ struct brdb_dbentry *begin, *end1, *end2, *point, *point2;
++
++ write_lock_bh(&brdb_dblock);
++ for (i = 0; i < smp_num_cpus; i++)
++ nentries += dbnum[i];
++ if (*len > nentries)
++ return -EINVAL;
++
++ if ( !(begin = (struct brdb_dbentry *) vmalloc((*len) * sizeof(struct brdb_dbentry))) )
++ return -ENOMEM;
++ memcpy(begin, flowdb[0], dbnum[0] * sizeof(struct brdb_dbentry));
++ end1 = begin + dbnum[0];
++ for (i = 1; i < smp_num_cpus; i++) {/* cycle databases per cpu */
++ point2 = flowdb[i];
++ end2 = end1;
++ for (j = 0; j < dbnum[i]; j++) {/* cycle entries of a cpu's database (point2) */
++ for (point = begin; point != end2; point++)/* cycle different entries we found so far */
++ if (point->hook == point2->hook && !strncmp(point->in, point2->in, IFNAMSIZ) &&
++ !strncmp(point->out, point2->out, IFNAMSIZ) && point->ethproto == point2->ethproto)
++ goto out;/* already exists in a database of another cpu */
++
++ memcpy(end1, point2, sizeof(struct brdb_dbentry));
++ end1++;
++out:
++ point2++;
++ }
++ }
++ write_unlock_bh(&brdb_dblock);
++ i = (int)( (char *)end1 - (char *)begin);
++ *len = i < *len ? i : *len;
++ if (copy_to_user(user, begin, *len * sizeof(struct brdb_dbentry)) != 0)
++ ret = -EFAULT;
++ else
++ ret = 0;
++ vfree(begin);
++ return ret;
++}
++
++static int switch_nodb(void){
++ int i;
++
++ if (!flowdb)
++ BUGPRINT("switch_nodb && !flowdb\n");
++ for (i = 0; i < smp_num_cpus; i++)
++ vfree(flowdb[i]);
++ vfree(flowdb);
++ if (!dbsize)
++ BUGPRINT("switch_nodb && !dbsize\n");
++ vfree(dbsize);
++ if (!dbnum)
++ BUGPRINT("switch_nodb && !dbnum\n");
++ vfree(dbnum);
++ flowdb = NULL;
++ allowdb = BRDB_NODB;
++ return 0;
++}
++
++static int switch_db(void)
++{
++ int i, j;
++
++ if (flowdb) BUGPRINT("switch_db && flowdb\n");
++ if ( !(flowdb = (struct brdb_dbentry **) vmalloc(smp_num_cpus * sizeof(struct brdb_dbentry *))) ) {
++ MEMPRINT("switch_db && nomemory\n");
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < smp_num_cpus; i++)
++ if ( !(flowdb[i] = (struct brdb_dbentry *) vmalloc(INITIAL_DBSIZE * sizeof(struct brdb_dbentry))) )
++ goto sw_free1;
++ else
++ memset(flowdb[i], 0, INITIAL_DBSIZE * sizeof(struct brdb_dbentry));
++
++ if ( !(dbnum = (int*) vmalloc(smp_num_cpus * sizeof(int))) )
++ goto sw_free2;
++
++ if ( !(dbsize = (int*) vmalloc(smp_num_cpus * sizeof(int))) )
++ goto sw_free3;
++
++ for (i = 0; i < smp_num_cpus; i++) {
++ dbnum[i] = 0;
++ dbsize[i] = INITIAL_DBSIZE;
++ }
++ allowdb = BRDB_DB;
++ return 0;
++
++sw_free3:
++ MEMPRINT("switch_db && nomemory2\n");
++ vfree(dbnum);
++ dbnum = NULL;
++sw_free2:
++ MEMPRINT("switch_db && nomemory3\n");
++sw_free1:
++ MEMPRINT("switch_db && nomemory4\n");
++ for (j = 0; j<i; j++)
++ vfree(flowdb[j]);
++ vfree(flowdb);
++ allowdb = BRDB_NODB;
++ return -ENOMEM;
++}
++
++static int
++do_brdb_set_ctl(struct sock *sk, int cmd, void *user, unsigned int len)
++{
++ int ret;
++ __u16 adb;
++ switch(cmd) {
++ case BRDB_SO_SET_ALLOWDB:
++ if (len != sizeof(__u16)) {
++ ret = -EINVAL;
++ break;
++ }
++ if (copy_from_user(&adb, user, len) != 0) {
++ ret = -EFAULT;
++ break;
++ }
++ if (adb != BRDB_DB && adb != BRDB_NODB) {
++ ret = -EINVAL;
++ break;
++ }
++ write_lock_bh(&brdb_dblock);
++ if (adb == allowdb) {
++ ret = 0;
++ write_unlock_bh(&brdb_dblock);
++ break;
++ }
++ if (allowdb == BRDB_DB)
++ ret = switch_nodb();
++ else
++ ret = switch_db();
++ write_unlock_bh(&brdb_dblock);
++ break;
++
++ default:
++ ret = -EINVAL;
++ }
++ return ret;
++}
++
++static int
++do_brdb_get_ctl(struct sock *sk, int cmd, void *user, int *len)
++{
++ struct brdb_dbinfo help2;
++ int i, ret;
++ switch(cmd) {
++ case BRDB_SO_GET_DBINFO:
++ if (sizeof(struct brdb_dbinfo) != *len)
++ return -EINVAL;
++ write_lock_bh(&brdb_dblock);
++ /* 0 == no database
++ * i-1 == number of entries (if database)
++ */
++ if (allowdb == BRDB_NODB)
++ help2.nentries = 0;
++ else {
++ help2.nentries = 1;
++ for (i = 0; i < smp_num_cpus; i++)
++ help2.nentries += dbnum[i];
++ }
++ write_unlock_bh(&brdb_dblock);
++ if (copy_to_user(user, &help2, sizeof(help2)) != 0)
++ ret = -EFAULT;
++ else
++ ret = 0;
++ break;
++
++ case BRDB_SO_GET_DB:
++ if (*len == 0 || allowdb == BRDB_NODB)
++ return -EINVAL;
++ ret = copy_db(user, len);
++ break;
++ default:
++ ret = -EINVAL;
++ }
++
++ return ret;
++}
++
++static struct nf_sockopt_ops brdb_sockopts
++= { { NULL, NULL }, PF_INET, BRDB_BASE_CTL, BRDB_SO_SET_MAX+1, do_brdb_set_ctl,
++ BRDB_BASE_CTL, BRDB_SO_GET_MAX+1, do_brdb_get_ctl, 0, NULL };
++
++
++static struct nf_hook_ops brdb_br_ops[] = {
++ { { NULL, NULL }, maintaindb, PF_BRIDGE, NF_BR_PRE_ROUTING, -250},
++ { { NULL, NULL }, maintaindb, PF_BRIDGE, NF_BR_LOCAL_IN, -250},
++ { { NULL, NULL }, maintaindb, PF_BRIDGE, NF_BR_FORWARD, -250},
++ { { NULL, NULL }, maintaindb, PF_BRIDGE, NF_BR_LOCAL_OUT, -250},
++ { { NULL, NULL }, maintaindb, PF_BRIDGE, NF_BR_POST_ROUTING, -250}
++};
++
++static int __init init(void)
++{
++ int ret;
++
++ if ((ret = nf_register_hook(&brdb_br_ops[0])) < 0)
++ return ret;
++
++ if ((ret = nf_register_hook(&brdb_br_ops[1])) < 0)
++ goto clean0;
++
++ if ((ret = nf_register_hook(&brdb_br_ops[2])) < 0)
++ goto clean1;
++
++ if ((ret = nf_register_hook(&brdb_br_ops[3])) < 0)
++ goto clean2;
++
++ if ((ret = nf_register_hook(&brdb_br_ops[4])) < 0)
++ goto clean3;
++
++ /* Register setsockopt */
++ if ((ret = nf_register_sockopt(&brdb_sockopts)) < 0)
++ goto clean4;
++
++ rwlock_init(&brdb_dblock);
++ printk("Bridge ethernet database registered\n");
++ return ret;
++
++clean4: nf_unregister_hook(&brdb_br_ops[4]);
++clean3: nf_unregister_hook(&brdb_br_ops[3]);
++clean2: nf_unregister_hook(&brdb_br_ops[2]);
++clean1: nf_unregister_hook(&brdb_br_ops[1]);
++clean0: nf_unregister_hook(&brdb_br_ops[0]);
++
++ return ret;
++}
++
++static void __exit fini(void)
++{
++ nf_unregister_hook(&brdb_br_ops[4]);
++ nf_unregister_hook(&brdb_br_ops[3]);
++ nf_unregister_hook(&brdb_br_ops[2]);
++ nf_unregister_hook(&brdb_br_ops[1]);
++ nf_unregister_hook(&brdb_br_ops[0]);
++ nf_unregister_sockopt(&brdb_sockopts);
++}
++
++module_init(init);
++module_exit(fini);
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre2/net/bridge/netfilter/ebtable_filter.c Sat Apr 13 21:51:47 2002
+@@ -0,0 +1,90 @@
++/*
++ * ebtable_filter
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * April, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge.h>
++#include <linux/module.h>
++
++#define FILTER_VALID_HOOKS ((1 << NF_BR_LOCAL_IN) | (1 << NF_BR_FORWARD) | \
++ (1 << NF_BR_LOCAL_OUT))
++
++static struct ebt_entries initial_chains[] =
++{
++ {0, EBT_ACCEPT, 0},
++ {0, EBT_ACCEPT, 0},
++ {0, EBT_ACCEPT, 0}
++};
++
++static struct ebt_replace initial_table =
++{
++ "filter", FILTER_VALID_HOOKS, 0, 3 * sizeof(struct ebt_entries),
++ { [NF_BR_LOCAL_IN]&initial_chains[0], [NF_BR_FORWARD]&initial_chains[1],
++ [NF_BR_LOCAL_OUT]&initial_chains[2] },{},
++ 0, NULL, (char *)initial_chains
++};
++
++static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
++{
++ if (valid_hooks & ~FILTER_VALID_HOOKS)
++ return -EINVAL;
++ return 0;
++}
++
++static struct ebt_table frame_filter =
++{
++ {NULL, NULL}, "filter", &initial_table, FILTER_VALID_HOOKS,
++ RW_LOCK_UNLOCKED, check, NULL
++};
++
++static unsigned int ebt_hook (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ return ebt_do_table(hook, pskb, in, out, &frame_filter);
++}
++
++static struct nf_hook_ops ebt_ops_filter[] = {
++ { { NULL, NULL }, ebt_hook, PF_BRIDGE, NF_BR_LOCAL_IN, -200},
++ { { NULL, NULL }, ebt_hook, PF_BRIDGE, NF_BR_FORWARD, -200},
++ { { NULL, NULL }, ebt_hook, PF_BRIDGE, NF_BR_LOCAL_OUT, 200}
++};
++
++static int __init init(void)
++{
++ int i, j, ret;
++
++ ret = ebt_register_table(&frame_filter);
++ if (ret < 0)
++ return ret;
++ for (i = 0; i < sizeof(ebt_ops_filter) / sizeof(ebt_ops_filter[0]); i++)
++ if ((ret = nf_register_hook(&ebt_ops_filter[i])) < 0)
++ goto cleanup;
++ return ret;
++cleanup:
++ for (j = 0; j < i; j++)
++ nf_unregister_hook(&ebt_ops_filter[j]);
++ ebt_unregister_table(&frame_filter);
++ return ret;
++}
++
++static void __exit fini(void)
++{
++ int i;
++
++ for (i = 0; i < sizeof(ebt_ops_filter) / sizeof(ebt_ops_filter[0]); i++)
++ nf_unregister_hook(&ebt_ops_filter[i]);
++ ebt_unregister_table(&frame_filter);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre2/net/bridge/netfilter/ebtable_nat.c Sat Apr 13 21:54:58 2002
+@@ -0,0 +1,153 @@
++/*
++ * ebtable_nat
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * April, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge.h>
++#include <linux/netdevice.h>
++#include <linux/module.h>
++#define NAT_VALID_HOOKS ((1 << NF_BR_PRE_ROUTING) | (1 << NF_BR_LOCAL_OUT) | \
++ (1 << NF_BR_POST_ROUTING))
++
++static struct ebt_entries initial_chains[] =
++{
++ {0, EBT_ACCEPT, 0},
++ {0, EBT_ACCEPT, 0},
++ {0, EBT_ACCEPT, 0}
++};
++
++static struct ebt_replace initial_table =
++{
++ "nat", NAT_VALID_HOOKS, 0, 3 * sizeof(struct ebt_entries),
++ { [NF_BR_PRE_ROUTING]&initial_chains[0], [NF_BR_LOCAL_OUT]&initial_chains[1],
++ [NF_BR_POST_ROUTING]&initial_chains[2] }, {},
++ 0, NULL, (char *)initial_chains
++};
++
++static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
++{
++ if (valid_hooks & ~NAT_VALID_HOOKS)
++ return -EINVAL;
++ return 0;
++}
++
++static struct ebt_table frame_nat =
++{
++ {NULL, NULL}, "nat", &initial_table, NAT_VALID_HOOKS,
++ RW_LOCK_UNLOCKED, check, NULL
++};
++
++// used for snat to know if the frame comes from FORWARD or LOCAL_OUT.
++// needed because of the bridge-nf patch (that allows use of iptables
++// on bridged traffic)
++// if the packet is routed, we want the ebtables stuff on POSTROUTING
++// to be executed _after_ the iptables stuff. when it's bridged, it's
++// the way around
++static struct net_device __fake_net_device = {
++ hard_header_len: ETH_HLEN
++};
++
++static unsigned int
++ebt_nat_dst (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ return ebt_do_table(hook, pskb, in, out, &frame_nat);
++}
++
++// let snat know this frame is routed
++static unsigned int ebt_clear_physin (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ (*pskb)->physindev = NULL;
++ return NF_ACCEPT;
++}
++
++// let snat know this frame is bridged
++static unsigned int ebt_set_physin (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ (*pskb)->physindev = &__fake_net_device;
++ return NF_ACCEPT;
++}
++
++static unsigned int ebt_nat_src (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ // this is a routed packet
++ if ((*pskb)->physindev == NULL)
++ return NF_ACCEPT;
++ if ((*pskb)->physindev != &__fake_net_device)
++ printk("ebtables (br_nat_src): physindev hack "
++ "doesn't work - BUG\n");
++
++ return ebt_do_table(hook, pskb, in, out, &frame_nat);
++}
++
++static unsigned int ebt_nat_src_route (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in, const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ // this is a bridged packet
++ if ((*pskb)->physindev == &__fake_net_device)
++ return NF_ACCEPT;
++ if ((*pskb)->physindev)
++ printk("ebtables (br_nat_src_route): physindev hack "
++ "doesn't work - BUG\n");
++
++ return ebt_do_table(hook, pskb, in, out, &frame_nat);
++}
++
++static struct nf_hook_ops ebt_ops_nat[] = {
++ { { NULL, NULL }, ebt_nat_dst, PF_BRIDGE, NF_BR_LOCAL_OUT, 100},
++ { { NULL, NULL }, ebt_nat_src, PF_BRIDGE, NF_BR_POST_ROUTING, -100},
++ { { NULL, NULL }, ebt_nat_src_route, PF_BRIDGE, NF_BR_POST_ROUTING,300},
++ { { NULL, NULL }, ebt_nat_dst, PF_BRIDGE, NF_BR_PRE_ROUTING, -300},
++ { { NULL, NULL }, ebt_clear_physin, PF_BRIDGE, NF_BR_LOCAL_OUT,200 + 1},
++ { { NULL, NULL }, ebt_set_physin, PF_BRIDGE, NF_BR_FORWARD, 200 + 1}
++};
++
++static int __init init(void)
++{
++ int i, ret, j;
++
++ ret = ebt_register_table(&frame_nat);
++ if (ret < 0)
++ return ret;
++ for (i = 0; i < sizeof(ebt_ops_nat) / sizeof(ebt_ops_nat[0]); i++)
++ if ((ret = nf_register_hook(&ebt_ops_nat[i])) < 0)
++ goto cleanup;
++ return ret;
++cleanup:
++ for (j = 0; j < i; j++)
++ nf_unregister_hook(&ebt_ops_nat[j]);
++ ebt_unregister_table(&frame_nat);
++ return ret;
++}
++
++static void __exit fini(void)
++{
++ int i;
++
++ for (i = 0; i < sizeof(ebt_ops_nat) / sizeof(ebt_ops_nat[0]); i++)
++ nf_unregister_hook(&ebt_ops_nat[i]);
++ ebt_unregister_table(&frame_nat);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre2/net/bridge/netfilter/ebt_arp.c Sat Apr 13 21:45:34 2002
+@@ -0,0 +1,107 @@
++/*
++ * ebt_arp
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ * Tim Gardner <timg@tpi.com>
++ *
++ * April, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_arp.h>
++#include <linux/if_arp.h>
++#include <linux/module.h>
++
++#define FWINV2(bool,invflg) ((bool) ^ !!(infostuff->invflags & invflg))
++static int ebt_filter_arp(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *data,
++ unsigned int datalen, const struct ebt_counter *c)
++{
++ struct ebt_arp_info *infostuff = (struct ebt_arp_info *)data;
++
++ if (infostuff->bitmask & EBT_ARP_OPCODE && FWINV2(infostuff->opcode !=
++ ((*skb).nh.arph)->ar_op, EBT_ARP_OPCODE))
++ return 1;
++ if (infostuff->bitmask & EBT_ARP_HTYPE && FWINV2(infostuff->htype !=
++ ((*skb).nh.arph)->ar_hrd, EBT_ARP_HTYPE))
++ return 1;
++ if (infostuff->bitmask & EBT_ARP_PTYPE && FWINV2(infostuff->ptype !=
++ ((*skb).nh.arph)->ar_pro, EBT_ARP_PTYPE))
++ return 1;
++
++ if (infostuff->bitmask & (EBT_ARP_SRC_IP | EBT_ARP_DST_IP))
++ {
++ __u32 arp_len = sizeof(struct arphdr) +
++ (2*(((*skb).nh.arph)->ar_hln)) +
++ (2*(((*skb).nh.arph)->ar_pln));
++ __u32 dst;
++ __u32 src;
++
++ // Make sure the packet is long enough.
++ if ((((*skb).nh.raw) + arp_len) > (*skb).tail)
++ return 1;
++ // IPV4 addresses are always 4 bytes.
++ if (((*skb).nh.arph)->ar_pln != sizeof(__u32))
++ return 1;
++
++ if (infostuff->bitmask & EBT_ARP_SRC_IP) {
++ memcpy(&src, ((*skb).nh.raw) + sizeof(struct arphdr) +
++ ((*skb).nh.arph)->ar_hln, sizeof(__u32));
++ if (FWINV2(infostuff->saddr != (src & infostuff->smsk),
++ EBT_ARP_SRC_IP))
++ return 1;
++ }
++
++ if (infostuff->bitmask & EBT_ARP_DST_IP) {
++ memcpy(&dst, ((*skb).nh.raw)+sizeof(struct arphdr) +
++ (2*(((*skb).nh.arph)->ar_hln)) +
++ (((*skb).nh.arph)->ar_pln), sizeof(__u32));
++ if (FWINV2(infostuff->daddr != (dst & infostuff->dmsk),
++ EBT_ARP_DST_IP))
++ return 1;
++ }
++ }
++ return 0;
++}
++
++static int ebt_arp_check(const char *tablename, unsigned int hooknr,
++ const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ struct ebt_arp_info *infostuff = (struct ebt_arp_info *) data;
++
++ if (datalen != sizeof(struct ebt_arp_info))
++ return -EINVAL;
++ if (e->bitmask & (EBT_NOPROTO | EBT_802_3) ||
++ (e->ethproto != __constant_htons(ETH_P_ARP) &&
++ e->ethproto != __constant_htons(ETH_P_RARP)) ||
++ e->invflags & EBT_IPROTO)
++ return -EINVAL;
++ if (infostuff->bitmask & ~EBT_ARP_MASK)
++ return -EINVAL;
++ return 0;
++}
++
++static struct ebt_match filter_arp =
++{
++ {NULL, NULL}, EBT_ARP_MATCH, ebt_filter_arp, ebt_arp_check, NULL,
++ THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ebt_register_match(&filter_arp);
++}
++
++static void __exit fini(void)
++{
++ ebt_unregister_match(&filter_arp);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre2/net/bridge/netfilter/ebt_ip.c Sat Apr 13 21:47:18 2002
+@@ -0,0 +1,81 @@
++/*
++ * ebt_ip
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * April, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_ip.h>
++#include <linux/ip.h>
++#include <linux/module.h>
++
++#define FWINV2(bool,invflg) ((bool) ^ !!(infostuff->invflags & invflg))
++static int ebt_filter_ip(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *data,
++ unsigned int datalen, const struct ebt_counter *c)
++{
++ struct ebt_ip_info *infostuff = (struct ebt_ip_info *) data;
++
++ if (infostuff->bitmask & EBT_IP_TOS &&
++ FWINV2(infostuff->tos != ((*skb).nh.iph)->tos, EBT_IP_TOS))
++ return 1;
++ if (infostuff->bitmask & EBT_IP_PROTO && FWINV2(infostuff->protocol !=
++ ((*skb).nh.iph)->protocol, EBT_IP_PROTO))
++ return 1;
++ if (infostuff->bitmask & EBT_IP_SOURCE &&
++ FWINV2((((*skb).nh.iph)->saddr & infostuff->smsk) !=
++ infostuff->saddr, EBT_IP_SOURCE))
++ return 1;
++ if ((infostuff->bitmask & EBT_IP_DEST) &&
++ FWINV2((((*skb).nh.iph)->daddr & infostuff->dmsk) !=
++ infostuff->daddr, EBT_IP_DEST))
++ return 1;
++ return 0;
++}
++
++static int ebt_ip_check(const char *tablename, unsigned int hooknr,
++ const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ struct ebt_ip_info *infostuff = (struct ebt_ip_info *) data;
++
++ if (datalen != sizeof(struct ebt_ip_info)) {
++ return -EINVAL;
++ }
++ if (e->bitmask & (EBT_NOPROTO | EBT_802_3) ||
++ e->ethproto != __constant_htons(ETH_P_IP) ||
++ e->invflags & EBT_IPROTO)
++ {
++ return -EINVAL;
++ }
++ if (infostuff->bitmask & ~EBT_IP_MASK) {
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static struct ebt_match filter_ip =
++{
++ {NULL, NULL}, EBT_IP_MATCH, ebt_filter_ip, ebt_ip_check, NULL,
++ THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ebt_register_match(&filter_ip);
++}
++
++static void __exit fini(void)
++{
++ ebt_unregister_match(&filter_ip);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre2/net/bridge/netfilter/ebt_log.c Sat Apr 13 21:49:45 2002
+@@ -0,0 +1,111 @@
++/*
++ * ebt_log
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * April, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_log.h>
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/if_arp.h>
++#include <linux/spinlock.h>
++
++static spinlock_t ebt_log_lock = SPIN_LOCK_UNLOCKED;
++
++static int ebt_log_check(const char *tablename, unsigned int hooknr,
++ const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ struct ebt_log_info *loginfo = (struct ebt_log_info *)data;
++
++ if (datalen != sizeof(struct ebt_log_info))
++ return -EINVAL;
++ if (loginfo->bitmask & ~EBT_LOG_MASK)
++ return -EINVAL;
++ if (loginfo->loglevel >= 8)
++ return -EINVAL;
++ loginfo->prefix[EBT_LOG_PREFIX_SIZE - 1] = '\0';
++ return 0;
++}
++
++static void ebt_log(const struct sk_buff *skb, const struct net_device *in,
++ const struct net_device *out, const void *data, unsigned int datalen,
++ const struct ebt_counter *c)
++{
++ struct ebt_log_info *loginfo = (struct ebt_log_info *)data;
++ char level_string[4] = "< >";
++ level_string[1] = '0' + loginfo->loglevel;
++
++ spin_lock_bh(&ebt_log_lock);
++ printk(level_string);
++ // max length: 29 + 10 + 2 * 16
++ printk("%s IN=%s OUT=%s ",
++ loginfo->prefix,
++ in ? in->name : "",
++ out ? out->name : "");
++
++ if (skb->dev->hard_header_len) {
++ int i;
++ unsigned char *p = (skb->mac.ethernet)->h_source;
++ printk("MAC source = ");
++ for (i = 0; i < ETH_ALEN; i++,p++)
++ printk("%02x%c", *p,
++ i == ETH_ALEN - 1
++ ? ' ':':');// length: 31
++ printk("MAC dest = ");
++ p = (skb->mac.ethernet)->h_dest;
++ for (i = 0; i < ETH_ALEN; i++,p++)
++ printk("%02x%c", *p,
++ i == ETH_ALEN - 1
++ ? ' ':':');// length: 29
++ }
++ // length: 14
++ printk("proto = 0x%04x", ntohs(((*skb).mac.ethernet)->h_proto));
++
++ if ((loginfo->bitmask & EBT_LOG_IP) && skb->mac.ethernet->h_proto ==
++ htons(ETH_P_IP)){
++ struct iphdr *iph = skb->nh.iph;
++ // max length: 46
++ printk(" IP SRC=%u.%u.%u.%u IP DST=%u.%u.%u.%u,",
++ NIPQUAD(iph->saddr), NIPQUAD(iph->daddr));
++ // max length: 26
++ printk(" IP tos=0x%02X, IP proto=%d", iph->tos, iph->protocol);
++ }
++
++ if ((loginfo->bitmask & EBT_LOG_ARP) &&
++ ((skb->mac.ethernet->h_proto == __constant_htons(ETH_P_ARP)) ||
++ (skb->mac.ethernet->h_proto == __constant_htons(ETH_P_RARP)))) {
++ struct arphdr * arph = skb->nh.arph;
++ // max length: 40
++ printk(" ARP HTYPE=%d, PTYPE=0x%04x, OPCODE=%d",
++ ntohs(arph->ar_hrd), ntohs(arph->ar_pro),
++ ntohs(arph->ar_op));
++ }
++ printk("\n");
++ spin_unlock_bh(&ebt_log_lock);
++}
++
++struct ebt_watcher log =
++{
++ {NULL, NULL}, EBT_LOG_WATCHER, ebt_log, ebt_log_check, NULL,
++ THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ebt_register_watcher(&log);
++}
++
++static void __exit fini(void)
++{
++ ebt_unregister_watcher(&log);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre2/net/bridge/netfilter/ebt_nat.c Sat Apr 13 21:51:32 2002
+@@ -0,0 +1,118 @@
++/*
++ * ebt_nat
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * April, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_nat.h>
++#include <linux/netfilter_bridge.h>
++#include <linux/skbuff.h>
++#include <linux/module.h>
++#include <net/sock.h>
++
++__u8 ebt_target_snat(struct sk_buff **pskb, unsigned int hooknr,
++ const struct net_device *in, const struct net_device *out,
++ const void *data, unsigned int datalen)
++{
++ struct ebt_nat_info *infostuff = (struct ebt_nat_info *) data;
++
++ if (skb_cloned(*pskb)) {
++ struct sk_buff *nskb = skb_copy(*pskb, GFP_ATOMIC);
++
++ if (!nskb)
++ return EBT_DROP;
++ if ((*pskb)->sk)
++ skb_set_owner_w(nskb, (*pskb)->sk);
++ kfree_skb(*pskb);
++ *pskb = nskb;
++ }
++ memcpy(((**pskb).mac.ethernet)->h_source, infostuff->mac,
++ ETH_ALEN * sizeof(unsigned char));
++ return EBT_ACCEPT;
++}
++
++__u8 ebt_target_dnat(struct sk_buff **pskb, unsigned int hooknr,
++ const struct net_device *in, const struct net_device *out,
++ const void *data, unsigned int datalen)
++{
++ struct ebt_nat_info *infostuff = (struct ebt_nat_info *) data;
++
++ if (skb_cloned(*pskb)) {
++ struct sk_buff *nskb = skb_copy(*pskb, GFP_ATOMIC);
++
++ if (!nskb)
++ return EBT_DROP;
++ if ((*pskb)->sk)
++ skb_set_owner_w(nskb, (*pskb)->sk);
++ kfree_skb(*pskb);
++ *pskb = nskb;
++ }
++ memcpy(((**pskb).mac.ethernet)->h_dest, infostuff->mac,
++ ETH_ALEN * sizeof(unsigned char));
++ return EBT_ACCEPT;
++}
++
++int ebt_target_snat_check(const char *tablename, unsigned int hooknr,
++ const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ if (strcmp(tablename, "nat"))
++ return -EINVAL;
++ if (datalen != sizeof(struct ebt_nat_info))
++ return -EINVAL;
++ if (hooknr != NF_BR_POST_ROUTING)
++ return -EINVAL;
++ return 0;
++}
++
++int ebt_target_dnat_check(const char *tablename, unsigned int hooknr,
++ const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ if (strcmp(tablename, "nat"))
++ return -EINVAL;
++ if (datalen != sizeof(struct ebt_nat_info))
++ return -EINVAL;
++ if (hooknr != NF_BR_PRE_ROUTING && hooknr != NF_BR_LOCAL_OUT)
++ return -EINVAL;
++ return 0;
++}
++
++struct ebt_target snat =
++{
++ {NULL, NULL}, EBT_SNAT_TARGET, ebt_target_snat, ebt_target_snat_check,
++ NULL, THIS_MODULE
++};
++
++struct ebt_target dnat =
++{
++ {NULL, NULL}, EBT_DNAT_TARGET, ebt_target_dnat, ebt_target_dnat_check,
++ NULL, THIS_MODULE
++};
++
++static int __init init(void)
++{
++ int ret;
++ ret = ebt_register_target(&snat);
++ if (ret != 0)
++ return ret;
++ ret = ebt_register_target(&dnat);
++ if (ret == 0)
++ return 0;
++ ebt_unregister_target(&snat);
++ return ret;
++}
++
++static void __exit fini(void)
++{
++ ebt_unregister_target(&snat);
++ ebt_unregister_target(&dnat);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre2/net/bridge/netfilter/ebtables.c Sat Apr 13 21:36:18 2002
+@@ -0,0 +1,1168 @@
++/*
++ * ebtables
++ *
++ * Author:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * ebtables.c,v 2.0, April, 2002
++ *
++ * This code is stongly inspired on the iptables code which is
++ * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++// used for print_string
++#include <linux/sched.h>
++#include <linux/tty.h>
++
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++#include <linux/skbuff.h>
++#include <linux/if_ether.h>
++#include <linux/netfilter_bridge.h>
++#include <linux/netfilter_ipv4.h>
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/spinlock.h>
++#include <asm/uaccess.h>
++#include <linux/smp.h>
++#include <net/sock.h>
++
++// list_named_find
++#define ASSERT_READ_LOCK(x)
++#define ASSERT_WRITE_LOCK(x)
++#include <linux/netfilter_ipv4/listhelp.h>
++
++#if 0 // use this for remote debugging
++#define BUGPRINT(args) print_string(args);
++#else
++#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
++ "report to author: "format, ## args)
++// #define BUGPRINT(format, args...)
++#endif
++#define MEMPRINT(format, args...) printk("kernel msg: ebtables "\
++ ": out of memory: "format, ## args)
++// #define MEMPRINT(format, args...)
++
++static void print_string(char *str);
++
++static DECLARE_MUTEX(ebt_mutex);
++static LIST_HEAD(ebt_tables);
++static LIST_HEAD(ebt_targets);
++static LIST_HEAD(ebt_matches);
++static LIST_HEAD(ebt_watchers);
++
++static struct ebt_target ebt_standard_target =
++{ {NULL, NULL}, EBT_STANDARD_TARGET, NULL, NULL, NULL, NULL};
++
++static inline int ebt_do_watcher (struct ebt_entry_watcher *w,
++ const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const struct ebt_counter *c)
++{
++ w->u.watcher->watcher(skb, in, out, w->data,
++ w->watcher_size - sizeof(struct ebt_entry_watcher), c);
++ // watchers don't give a verdict
++ return 0;
++}
++
++static inline int ebt_do_match (struct ebt_entry_match *m,
++ const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const struct ebt_counter *c)
++{
++ return m->u.match->match(skb, in, out, m->data,
++ m->match_size - sizeof(struct ebt_entry_match), c);
++}
++
++static inline int ebt_dev_check(char *entry, const struct net_device *device)
++{
++ if (*entry == '\0')
++ return 0;
++ if (!device)
++ return 1;
++ return strncmp(entry, device->name, IFNAMSIZ);
++}
++
++// Do some firewalling
++unsigned int ebt_do_table (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in, const struct net_device *out,
++ struct ebt_table *table)
++{
++ int i, nentries;
++ struct ebt_entry *point;
++ struct ebt_counter *counter_base;
++ struct ebt_entry_target *t;
++ __u8 verdict;
++
++ read_lock_bh(&table->lock);
++ nentries = table->private->hook_entry[hook]->nentries;
++ point = (struct ebt_entry *)(table->private->hook_entry[hook]->data);
++ counter_base = table->private->counters +
++ cpu_number_map(smp_processor_id()) * table->private->nentries +
++ table->private->counter_entry[hook];
++ #define FWINV(bool,invflg) ((bool) ^ !!(point->invflags & invflg))
++ for (i = 0; i < nentries; i++) {
++ if ( ( point->bitmask & EBT_NOPROTO ||
++ FWINV(point->ethproto == ((**pskb).mac.ethernet)->h_proto,
++ EBT_IPROTO)
++ || FWINV(ntohs(((**pskb).mac.ethernet)->h_proto) < 1536 &&
++ (point->bitmask & EBT_802_3), EBT_IPROTO) )
++ && FWINV(!ebt_dev_check((char *)(point->in), in), EBT_IIN)
++ && FWINV(!ebt_dev_check((char *)(point->out), out), EBT_IOUT)
++ ) {
++ if ( (point->bitmask & EBT_SOURCEMAC) &&
++ FWINV(!!memcmp(point->sourcemac,
++ ((**pskb).mac.ethernet)->h_source, ETH_ALEN),
++ EBT_ISOURCE) )
++ goto letscontinue;
++
++ if ( (point->bitmask & EBT_DESTMAC) &&
++ FWINV(!!memcmp(point->destmac,
++ ((**pskb).mac.ethernet)->h_dest, ETH_ALEN),
++ EBT_IDEST) )
++ goto letscontinue;
++
++ if (EBT_MATCH_ITERATE(point, ebt_do_match, *pskb, in,
++ out, counter_base + i) != 0)
++ goto letscontinue;
++
++ // increase counter
++ (*(counter_base + i)).pcnt++;
++
++ // these should only watch: not modify, nor tell us
++ // what to do with the packet
++ EBT_WATCHER_ITERATE(point, ebt_do_watcher, *pskb, in,
++ out, counter_base + i);
++
++ t = (struct ebt_entry_target *)
++ (((char *)point) + point->target_offset);
++ // standard target
++ if (!t->u.target->target)
++ verdict =
++ ((struct ebt_standard_target *)t)->verdict;
++ else
++ verdict = t->u.target->target(pskb, hook,
++ in, out, t->data, t->target_size);
++ if (verdict == EBT_ACCEPT) {
++ read_unlock_bh(&table->lock);
++ return NF_ACCEPT;
++ }
++ if (verdict == EBT_DROP) {
++ read_unlock_bh(&table->lock);
++ return NF_DROP;
++ }
++ if (verdict != EBT_CONTINUE) {
++ read_unlock_bh(&table->lock);
++ BUGPRINT("Illegal target while "
++ "firewalling!!\n");
++ // Try not to get oopsen
++ return NF_DROP;
++ }
++ }
++letscontinue:
++ point = (struct ebt_entry *)
++ (((char *)point) + point->next_offset);
++ }
++
++ if ( table->private->hook_entry[hook]->policy == EBT_ACCEPT ) {
++ read_unlock_bh(&table->lock);
++ return NF_ACCEPT;
++ }
++ read_unlock_bh(&table->lock);
++ return NF_DROP;
++}
++
++static inline int
++ebt_check_match(struct ebt_entry_match *m, struct ebt_entry *e,
++ const char *name, unsigned int hook, unsigned int *cnt)
++{
++ struct ebt_match *match;
++ int ret;
++
++ m->u.name[EBT_FUNCTION_MAXNAMELEN - 1] = '\0';
++ ret = down_interruptible(&ebt_mutex);
++ if (ret != 0)
++ return -EFAULT;
++ if (!(match = (struct ebt_match *)
++ list_named_find(&ebt_matches, m->u.name))) {
++ up(&ebt_mutex);
++ return -ENOENT;
++ }
++ m->u.match = match;
++ if (match->check &&
++ match->check(name, hook, e, m->data,
++ m->match_size - sizeof(*m)) != 0) {
++ BUGPRINT("match->check failed\n");
++ up(&ebt_mutex);
++ return -EINVAL;
++ }
++ if (match->me)
++ __MOD_INC_USE_COUNT(match->me);
++ up(&ebt_mutex);
++ (*cnt)++;
++ return 0;
++}
++
++static inline int
++ebt_check_watcher(struct ebt_entry_watcher *w, struct ebt_entry *e,
++ const char *name, unsigned int hook, unsigned int *cnt)
++{
++ struct ebt_watcher *watcher;
++ int ret;
++
++ ret = down_interruptible(&ebt_mutex);
++ if (ret != 0)
++ return -EFAULT;
++ w->u.name[EBT_FUNCTION_MAXNAMELEN - 1] = '\0';
++ if (!(watcher = (struct ebt_watcher *)
++ list_named_find(&ebt_watchers, w->u.name))) {
++ up(&ebt_mutex);
++ return -ENOENT;
++ }
++ w->u.watcher = watcher;
++ if (watcher->check &&
++ watcher->check(name, hook, e, w->data,
++ w->watcher_size - sizeof(*w)) != 0) {
++ BUGPRINT("watcher->check failed\n");
++ up(&ebt_mutex);
++ return -EINVAL;
++ }
++ if (watcher->me)
++ __MOD_INC_USE_COUNT(watcher->me);
++ up(&ebt_mutex);
++ (*cnt)++;
++ return 0;
++}
++
++// this one is very careful, as it is the first function
++// to parse the userspace data
++static inline int
++ebt_check_entry_size_and_hooks(struct ebt_entry *e,
++ struct ebt_table_info *newinfo, char *base, char *limit,
++ struct ebt_entries **hook_entries, unsigned int *n, unsigned int *cnt,
++ unsigned int *totalcnt, unsigned int valid_hooks)
++{
++ int i;
++
++ for (i = 0; i < NF_BR_NUMHOOKS; i++) {
++ if ((valid_hooks & (1 << i)) == 0)
++ continue;
++ if ( (char *)hook_entries[i] - base ==
++ (char *)e - newinfo->entries)
++ break;
++ }
++ // beginning of a new chain
++ if (i != NF_BR_NUMHOOKS) {
++ if ((e->bitmask & EBT_ENTRY_OR_ENTRIES) != 0) {
++ // we make userspace set this right,
++ // so there is no misunderstanding
++ BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
++ "in distinguisher\n");
++ return -EINVAL;
++ }
++ // this checks if the previous chain has as many entries
++ // as it said it has
++ if (*n != *cnt) {
++ BUGPRINT("nentries does not equal the nr of entries "
++ "in the chain\n");
++ return -EINVAL;
++ }
++ // before we look at the struct, be sure it is not too big
++ if ((char *)hook_entries[i] + sizeof(struct ebt_entries)
++ > limit) {
++ BUGPRINT("entries_size too small\n");
++ return -EINVAL;
++ }
++ if (((struct ebt_entries *)e)->policy != EBT_DROP &&
++ ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
++ BUGPRINT("bad policy\n");
++ return -EINVAL;
++ }
++ *n = ((struct ebt_entries *)e)->nentries;
++ *cnt = 0;
++ newinfo->hook_entry[i] = (struct ebt_entries *)e;
++ newinfo->counter_entry[i] = *totalcnt;
++ return 0;
++ }
++ // a plain old entry, heh
++ if (sizeof(struct ebt_entry) > e->watchers_offset ||
++ e->watchers_offset > e->target_offset ||
++ e->target_offset > e->next_offset) {
++ BUGPRINT("entry offsets not in right order\n");
++ return -EINVAL;
++ }
++ if (((char *)e) + e->next_offset - newinfo->entries > limit - base) {
++ BUGPRINT("entry offsets point too far\n");
++ return -EINVAL;
++ }
++
++ if ((e->bitmask & EBT_ENTRY_OR_ENTRIES) == 0) {
++ BUGPRINT("EBT_ENTRY_OR_ENTRIES should be set in "
++ "bitmask for an entry\n");
++ return -EINVAL;
++ }
++ (*cnt)++;
++ (*totalcnt)++;
++ return 0;
++}
++
++static inline int
++ebt_cleanup_match(struct ebt_entry_match *m, unsigned int *i)
++{
++ if (i && (*i)-- == 0)
++ return 1;
++ if (m->u.match->destroy)
++ m->u.match->destroy(m->data, m->match_size - sizeof(*m));
++ if (m->u.match->me)
++ __MOD_DEC_USE_COUNT(m->u.match->me);
++
++ return 0;
++}
++
++static inline int
++ebt_cleanup_watcher(struct ebt_entry_watcher *w, unsigned int *i)
++{
++ if (i && (*i)-- == 0)
++ return 1;
++ if (w->u.watcher->destroy)
++ w->u.watcher->destroy(w->data, w->watcher_size - sizeof(*w));
++ if (w->u.watcher->me)
++ __MOD_DEC_USE_COUNT(w->u.watcher->me);
++
++ return 0;
++}
++
++static inline int
++ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo,
++ const char *name, unsigned int *cnt, unsigned int valid_hooks)
++{
++ struct ebt_entry_target *t;
++ struct ebt_target *target;
++ unsigned int i, j, hook = 0;
++ int ret;
++
++ // Don't mess with the struct ebt_entries
++ if ((e->bitmask & EBT_ENTRY_OR_ENTRIES) == 0)
++ return 0;
++
++ if (e->bitmask & ~EBT_F_MASK) {
++ BUGPRINT("Unknown flag for bitmask\n");
++ return -EINVAL;
++ }
++ if (e->invflags & ~EBT_INV_MASK) {
++ BUGPRINT("Unknown flag for inv bitmask\n");
++ return -EINVAL;
++ }
++ if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
++ BUGPRINT("NOPROTO & 802_3 not allowed\n");
++ return -EINVAL;
++ }
++ // what hook do we belong to?
++ for (i = 0; i < NF_BR_NUMHOOKS; i++) {
++ if ((valid_hooks & (1 << i)) == 0)
++ continue;
++ if ((char *)newinfo->hook_entry[i] < (char *)e)
++ hook = i;
++ else
++ break;
++ }
++ i = 0;
++ ret = EBT_MATCH_ITERATE(e, ebt_check_match, e, name, hook, &i);
++ if (ret != 0)
++ goto cleanup_matches;
++ j = 0;
++ ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, e, name, hook, &j);
++ if (ret != 0)
++ goto cleanup_watchers;
++ t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
++ ret = down_interruptible(&ebt_mutex);
++ if (ret != 0)
++ goto cleanup_watchers;
++ t->u.name[EBT_FUNCTION_MAXNAMELEN - 1] = '\0';
++ if (!(target = (struct ebt_target *)
++ list_named_find(&ebt_targets, t->u.name))) {
++ ret = -ENOENT;
++ up(&ebt_mutex);
++ goto cleanup_watchers;
++ }
++ if (target->me)
++ __MOD_INC_USE_COUNT(target->me);
++ up(&ebt_mutex);
++
++ t->u.target = target;
++ if (t->u.target == &ebt_standard_target) {
++ if (e->target_offset + sizeof(struct ebt_standard_target) >
++ e->next_offset) {
++ BUGPRINT("Standard target size too big\n");
++ ret = -EFAULT;
++ goto cleanup_watchers;
++ }
++ if (((struct ebt_standard_target *)t)->verdict >=
++ NUM_STANDARD_TARGETS) {
++ BUGPRINT("Invalid standard target\n");
++ ret = -EFAULT;
++ goto cleanup_watchers;
++ }
++ } else if (t->u.target->check &&
++ t->u.target->check(name, hook, e, t->data,
++ t->target_size - sizeof(*t)) != 0) {
++ if (t->u.target->me)
++ __MOD_DEC_USE_COUNT(t->u.target->me);
++ ret = -EFAULT;
++ goto cleanup_watchers;
++ }
++ (*cnt)++;
++ return 0;
++cleanup_watchers:
++ EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, &j);
++cleanup_matches:
++ EBT_MATCH_ITERATE(e, ebt_cleanup_match, &i);
++ return ret;
++}
++
++static inline int
++ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt)
++{
++ struct ebt_entry_target *t;
++
++ if (e->bitmask == 0)
++ return 0;
++ // we're done
++ if (cnt && (*cnt)-- == 0)
++ return 1;
++ EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, NULL);
++ EBT_MATCH_ITERATE(e, ebt_cleanup_match, NULL);
++ t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
++ if (t->u.target->destroy)
++ t->u.target->destroy(t->data, t->target_size - sizeof(*t));
++ if (t->u.target->me)
++ __MOD_DEC_USE_COUNT(t->u.target->me);
++
++ return 0;
++}
++
++// do the parsing of the table/chains/entries/matches/watchers/targets, heh
++static int translate_table(struct ebt_replace *repl,
++ struct ebt_table_info *newinfo)
++{
++ unsigned int i, j, k;
++ int ret;
++
++ i = 0;
++ while (i < NF_BR_NUMHOOKS && !(repl->valid_hooks & (1 << i)))
++ i++;
++ if (i == NF_BR_NUMHOOKS) {
++ BUGPRINT("No valid hooks specified\n");
++ return -EINVAL;
++ }
++ if (repl->hook_entry[i] != (struct ebt_entries *)repl->entries) {
++ BUGPRINT("Chains don't start at beginning\n");
++ return -EINVAL;
++ }
++ // make sure chains are ordered after each other in same order
++ // as their corresponding hooks
++ for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
++ if (!(repl->valid_hooks & (1 << j)))
++ continue;
++ if ( repl->hook_entry[j] <= repl->hook_entry[i] ) {
++ BUGPRINT("Hook order must be followed\n");
++ return -EINVAL;
++ }
++ i = j;
++ }
++
++ for (i = 0; i < NF_BR_NUMHOOKS; i++) {
++ newinfo->hook_entry[i] = NULL;
++ newinfo->counter_entry[i] = 0;
++ }
++
++ newinfo->entries_size = repl->entries_size;
++ newinfo->nentries = repl->nentries;
++
++ // do some early checkings and initialize some things
++ i = 0; // holds the expected nr. of entries for the chain
++ j = 0; // holds the up to now counted entries for the chain
++ k = 0; // holds the total nr. of entries, should equal
++ // newinfo->nentries afterwards
++ ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
++ ebt_check_entry_size_and_hooks, newinfo, repl->entries,
++ repl->entries + repl->entries_size, repl->hook_entry, &i, &j, &k,
++ repl->valid_hooks);
++
++ if (ret != 0)
++ return ret;
++
++ if (i != j) {
++ BUGPRINT("nentries does not equal the nr of entries in the "
++ "(last) chain\n");
++ return -EINVAL;
++ }
++ if (k != newinfo->nentries) {
++ BUGPRINT("Total nentries is wrong\n");
++ return -EINVAL;
++ }
++
++ // check if all valid hooks have a chain
++ for (i = 0; i < NF_BR_NUMHOOKS; i++) {
++ if (newinfo->hook_entry[i] == NULL &&
++ (repl->valid_hooks & (1 << i))){
++ BUGPRINT("Valid hook without chain\n");
++ return -EINVAL;
++ }
++ }
++
++ // we just don't trust anything
++ repl->name[EBT_TABLE_MAXNAMELEN - 1] = '\0';
++ // used to know what we need to clean up if something goes wrong
++ i = 0;
++ ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
++ ebt_check_entry, newinfo, repl->name, &i, repl->valid_hooks);
++ if (ret != 0) {
++ BUGPRINT("ebt_check_entry gave fault back\n");
++ EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, ebt_cleanup_entry, &i);
++ }
++ return ret;
++}
++
++// called under write_lock
++static inline void get_counters(struct ebt_table_info *info,
++ struct ebt_counter *counters)
++{
++ int i, cpu, counter_base;
++
++ // counters of cpu 0
++ memcpy(counters, info->counters,
++ sizeof(struct ebt_counter) * info->nentries);
++ // add other counters to those of cpu 0
++ for (cpu = 1; cpu < smp_num_cpus; cpu++) {
++ counter_base = cpu * info->nentries;
++ for (i = 0; i < info->nentries; i++)
++ counters[i].pcnt +=
++ info->counters[counter_base + i].pcnt;
++ }
++}
++
++// replace the table
++static int do_replace(void *user, unsigned int len)
++{
++ int ret;
++ struct ebt_table_info *newinfo;
++ struct ebt_replace tmp;
++ struct ebt_table *t;
++ struct ebt_counter *counterstmp = NULL;
++ // used to be able to unlock earlier
++ struct ebt_table_info *table;
++
++ if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
++ return -EFAULT;
++
++ if (len != sizeof(tmp) + tmp.entries_size) {
++ BUGPRINT("Wrong len argument\n");
++ return -EINVAL;
++ }
++
++ if (tmp.entries_size == 0) {
++ BUGPRINT("Entries_size never zero\n");
++ return -EINVAL;
++ }
++ newinfo = (struct ebt_table_info *)
++ vmalloc(sizeof(struct ebt_table_info));
++ if (!newinfo)
++ return -ENOMEM;
++
++ if (tmp.nentries) {
++ newinfo->counters = (struct ebt_counter *)vmalloc(
++ sizeof(struct ebt_counter) * tmp.nentries * smp_num_cpus);
++ if (!newinfo->counters) {
++ ret = -ENOMEM;
++ goto free_newinfo;
++ }
++ memset(newinfo->counters, 0,
++ sizeof(struct ebt_counter) * tmp.nentries * smp_num_cpus);
++ }
++ else
++ newinfo->counters = NULL;
++
++ newinfo->entries = (char *)vmalloc(tmp.entries_size);
++ if (!newinfo->entries) {
++ ret = -ENOMEM;
++ goto free_counters;
++ }
++ if (copy_from_user(
++ newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
++ BUGPRINT("Couldn't copy entries from userspace\n");
++ ret = -EFAULT;
++ goto free_entries;
++ }
++
++ // the user wants counters back
++ // the check on the size is done later, when we have the lock
++ if (tmp.num_counters) {
++ counterstmp = (struct ebt_counter *)
++ vmalloc(tmp.num_counters * sizeof(struct ebt_counter));
++ if (!counterstmp) {
++ ret = -ENOMEM;
++ goto free_entries;
++ }
++ }
++ else
++ counterstmp = NULL;
++
++ ret = translate_table(&tmp, newinfo);
++
++ if (ret != 0)
++ goto free_counterstmp;
++
++ ret = down_interruptible(&ebt_mutex);
++
++ if (ret != 0)
++ goto free_cleanup;
++
++ if (!(t = (struct ebt_table *)list_named_find(&ebt_tables, tmp.name))) {
++ ret = -ENOENT;
++ // give some help to the poor user
++ print_string("The table is not present, try insmod\n");
++ goto free_unlock;
++ }
++
++ // the table doesn't like it
++ if (t->check && (ret = t->check(newinfo, tmp.valid_hooks)))
++ goto free_unlock;
++
++ if (tmp.num_counters && tmp.num_counters != t->private->nentries) {
++ BUGPRINT("Wrong nr. of counters requested\n");
++ ret = -EINVAL;
++ goto free_unlock;
++ }
++
++ // we have the mutex lock, so no danger in reading this pointer
++ table = t->private;
++ // we need an atomic snapshot of the counters
++ write_lock_bh(&t->lock);
++ if (tmp.num_counters)
++ get_counters(t->private, counterstmp);
++
++ t->private = newinfo;
++ write_unlock_bh(&t->lock);
++ up(&ebt_mutex);
++ // So, a user can change the chains while having messed up his counter
++ // allocation. Only reason why I do this is because this way the lock
++ // is held only once, while this doesn't bring the kernel into a
++ // dangerous state.
++ if (tmp.num_counters &&
++ copy_to_user(tmp.counters, counterstmp,
++ tmp.num_counters * sizeof(struct ebt_counter))) {
++ BUGPRINT("Couldn't copy counters to userspace\n");
++ ret = -EFAULT;
++ }
++ else
++ ret = 0;
++
++ // decrease module count and free resources
++ EBT_ENTRY_ITERATE(table->entries, table->entries_size,
++ ebt_cleanup_entry, NULL);
++
++ vfree(table->entries);
++ if (table->counters)
++ vfree(table->counters);
++ vfree(table);
++
++ if (counterstmp)
++ vfree(counterstmp);
++ return ret;
++
++free_unlock:
++ up(&ebt_mutex);
++free_cleanup:
++ EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
++ ebt_cleanup_entry, NULL);
++free_counterstmp:
++ if (counterstmp)
++ vfree(counterstmp);
++free_entries:
++ if (newinfo->entries)
++ vfree(newinfo->entries);
++free_counters:
++ if (newinfo->counters)
++ vfree(newinfo->counters);
++free_newinfo:
++ if (newinfo)
++ vfree(newinfo);
++ return ret;
++}
++
++int ebt_register_target(struct ebt_target *target)
++{
++ int ret;
++
++ ret = down_interruptible(&ebt_mutex);
++ if (ret != 0)
++ return ret;
++ if (!list_named_insert(&ebt_targets, target)) {
++ up(&ebt_mutex);
++ return -EEXIST;
++ }
++ up(&ebt_mutex);
++ MOD_INC_USE_COUNT;
++
++ return 0;
++}
++
++void ebt_unregister_target(struct ebt_target *target)
++{
++ down(&ebt_mutex);
++ LIST_DELETE(&ebt_targets, target);
++ up(&ebt_mutex);
++ MOD_DEC_USE_COUNT;
++}
++
++int ebt_register_match(struct ebt_match *match)
++{
++ int ret;
++
++ ret = down_interruptible(&ebt_mutex);
++ if (ret != 0)
++ return ret;
++ if (!list_named_insert(&ebt_matches, match)) {
++ up(&ebt_mutex);
++ return -EEXIST;
++ }
++ up(&ebt_mutex);
++ MOD_INC_USE_COUNT;
++
++ return 0;
++}
++
++void ebt_unregister_match(struct ebt_match *match)
++{
++ down(&ebt_mutex);
++ LIST_DELETE(&ebt_matches, match);
++ up(&ebt_mutex);
++ MOD_DEC_USE_COUNT;
++}
++
++int ebt_register_watcher(struct ebt_watcher *watcher)
++{
++ int ret;
++
++ ret = down_interruptible(&ebt_mutex);
++ if (ret != 0)
++ return ret;
++ if (!list_named_insert(&ebt_watchers, watcher)) {
++ up(&ebt_mutex);
++ return -EEXIST;
++ }
++ up(&ebt_mutex);
++ MOD_INC_USE_COUNT;
++
++ return 0;
++}
++
++void ebt_unregister_watcher(struct ebt_watcher *watcher)
++{
++ down(&ebt_mutex);
++ LIST_DELETE(&ebt_watchers, watcher);
++ up(&ebt_mutex);
++ MOD_DEC_USE_COUNT;
++}
++
++int ebt_register_table(struct ebt_table *table)
++{
++ struct ebt_table_info *newinfo;
++ int ret;
++
++ if (!table || !table->table ||!table->table->entries ||
++ table->table->entries_size == 0 ||
++ table->table->counters || table->private) {
++ BUGPRINT("Bad table data for ebt_register_table!!!\n");
++ return -EINVAL;
++ }
++
++ newinfo = (struct ebt_table_info *)
++ vmalloc(sizeof(struct ebt_table_info));
++ ret = -ENOMEM;
++ if (!newinfo)
++ return -ENOMEM;
++
++ newinfo->entries = (char *)vmalloc(table->table->entries_size);
++ if (!(newinfo->entries))
++ goto free_newinfo;
++
++ memcpy(newinfo->entries, table->table->entries,
++ table->table->entries_size);
++
++ if (table->table->nentries) {
++ newinfo->counters = (struct ebt_counter *)
++ vmalloc(table->table->nentries *
++ sizeof(struct ebt_counter) * smp_num_cpus);
++ if (!newinfo->counters)
++ goto free_entries;
++ memset(newinfo->counters, 0, table->table->nentries *
++ sizeof(struct ebt_counter) * smp_num_cpus);
++ }
++ else
++ newinfo->counters = NULL;
++
++ // fill in newinfo and parse the entries
++ ret = translate_table(table->table, newinfo);
++ if (ret != 0) {
++ BUGPRINT("Translate_table failed\n");
++ goto free_counters;
++ }
++
++ if (table->check && table->check(newinfo, table->valid_hooks)) {
++ BUGPRINT("The table doesn't like its own initial data, lol\n");
++ return -EINVAL;
++ }
++
++ table->private = newinfo;
++ table->lock = RW_LOCK_UNLOCKED;
++ ret = down_interruptible(&ebt_mutex);
++ if (ret != 0)
++ goto free_counters;
++
++ if (list_named_find(&ebt_tables, table->name)) {
++ ret = -EEXIST;
++ BUGPRINT("Table name already exists\n");
++ goto free_unlock;
++ }
++
++ list_prepend(&ebt_tables, table);
++ up(&ebt_mutex);
++ MOD_INC_USE_COUNT;
++ return 0;
++free_unlock:
++ up(&ebt_mutex);
++free_counters:
++ if (newinfo->counters)
++ vfree(newinfo->counters);
++free_entries:
++ vfree(newinfo->entries);
++free_newinfo:
++ vfree(newinfo);
++ return ret;
++}
++
++void ebt_unregister_table(struct ebt_table *table)
++{
++ if (!table) {
++ BUGPRINT("Request to unregister NULL table!!!\n");
++ return;
++ }
++ down(&ebt_mutex);
++ LIST_DELETE(&ebt_tables, table);
++ up(&ebt_mutex);
++ EBT_ENTRY_ITERATE(table->private->entries,
++ table->private->entries_size, ebt_cleanup_entry, NULL);
++ if (table->private->counters)
++ vfree(table->private->counters);
++ if (table->private->entries)
++ vfree(table->private->entries);
++ vfree(table->private);
++ MOD_DEC_USE_COUNT;
++}
++
++// userspace just supplied us with counters
++static int update_counters(void *user, unsigned int len)
++{
++ int i, ret;
++ struct ebt_counter *tmp;
++ struct ebt_replace hlp;
++ struct ebt_table *t;
++
++ if (copy_from_user(&hlp, user, sizeof(hlp)))
++ return -EFAULT;
++
++ if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
++ return -EINVAL;
++ if (hlp.num_counters == 0)
++ return -EINVAL;
++
++ if ( !(tmp = (struct ebt_counter *)
++ vmalloc(hlp.num_counters * sizeof(struct ebt_counter))) ){
++ MEMPRINT("Updata_counters && nomemory\n");
++ return -ENOMEM;
++ }
++
++ hlp.name[EBT_TABLE_MAXNAMELEN - 1] = '\0';
++ ret = down_interruptible(&ebt_mutex);
++ if (ret != 0)
++ goto free_tmp;
++
++ if (!(t = (struct ebt_table *)list_named_find(&ebt_tables, hlp.name))) {
++ ret = -EINVAL;
++ goto unlock_mutex;
++ }
++
++ if (hlp.num_counters != t->private->nentries) {
++ BUGPRINT("Wrong nr of counters\n");
++ ret = -EINVAL;
++ goto unlock_mutex;
++ }
++
++ if ( copy_from_user(tmp, hlp.counters,
++ hlp.num_counters * sizeof(struct ebt_counter)) ) {
++ BUGPRINT("Updata_counters && !cfu\n");
++ ret = -EFAULT;
++ goto unlock_mutex;
++ }
++
++ // we want an atomic add of the counters
++ write_lock_bh(&t->lock);
++
++ // we add to the counters of the first cpu
++ for (i = 0; i < hlp.num_counters; i++)
++ t->private->counters[i].pcnt += tmp[i].pcnt;
++
++ write_unlock_bh(&t->lock);
++ ret = 0;
++unlock_mutex:
++ up(&ebt_mutex);
++free_tmp:
++ vfree(tmp);
++ return ret;
++}
++
++static inline int ebt_make_matchname(struct ebt_entry_match *m,
++ char *base, char *ubase)
++{
++ char *hlp = ubase - base + (char *)m;
++ if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
++ return -EFAULT;
++ return 0;
++}
++
++static inline int ebt_make_watchername(struct ebt_entry_watcher *w,
++ char *base, char *ubase)
++{
++ char *hlp = ubase - base + (char *)w;
++ if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
++ return -EFAULT;
++ return 0;
++}
++
++static inline int ebt_make_names(struct ebt_entry *e, char *base, char *ubase)
++{
++ int ret;
++ char *hlp = ubase - base + (char *)e + e->target_offset;
++ struct ebt_entry_target *t;
++
++ if ((e->bitmask & EBT_ENTRY_OR_ENTRIES) == 0)
++ return 0;
++
++ t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
++
++ ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
++ if (ret != 0)
++ return ret;
++ ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
++ if (ret != 0)
++ return ret;
++ if (copy_to_user(hlp, t->u.target->name, EBT_FUNCTION_MAXNAMELEN))
++ return -EFAULT;
++ return 0;
++}
++
++// called with ebt_mutex down
++static int copy_everything_to_user(struct ebt_table *t, void *user, int *len)
++{
++ struct ebt_replace tmp;
++ struct ebt_table_info *info = t->private;
++ struct ebt_counter *counterstmp;
++ int i;
++
++ if (copy_from_user(&tmp, user, sizeof(tmp))) {
++ BUGPRINT("Cfu didn't work\n");
++ return -EFAULT;
++ }
++
++ if (*len != sizeof(struct ebt_replace) + info->entries_size +
++ (tmp.num_counters? info->nentries * sizeof(struct ebt_counter): 0)) {
++ BUGPRINT("Wrong size\n");
++ return -EINVAL;
++ }
++
++ if (tmp.nentries != info->nentries) {
++ BUGPRINT("Nentries wrong\n");
++ return -EINVAL;
++ }
++
++ if (tmp.entries_size != info->entries_size) {
++ BUGPRINT("Wrong size\n");
++ return -EINVAL;
++ }
++
++ // userspace might not need the counters
++ if (tmp.num_counters) {
++ if (tmp.num_counters != info->nentries) {
++ BUGPRINT("Num_counters wrong\n");
++ return -EINVAL;
++ }
++ counterstmp = (struct ebt_counter *)
++ vmalloc(info->nentries * sizeof(struct ebt_counter));
++ if (!counterstmp) {
++ BUGPRINT("Couldn't copy counters, out of memory\n");
++ return -ENOMEM;
++ }
++ write_lock_bh(&t->lock);
++ get_counters(info, counterstmp);
++ write_unlock_bh(&t->lock);
++
++ if (copy_to_user(tmp.counters, counterstmp,
++ info->nentries * sizeof(struct ebt_counter))) {
++ BUGPRINT("Couldn't copy counters to userspace\n");
++ vfree(counterstmp);
++ return -EFAULT;
++ }
++ vfree(counterstmp);
++ }
++
++ if (copy_to_user(tmp.entries, info->entries, info->entries_size)) {
++ BUGPRINT("Couldn't copy entries to userspace\n");
++ return -EFAULT;
++ }
++ // make userspace's life easier
++ memcpy(tmp.counter_entry, info->counter_entry,
++ NF_BR_NUMHOOKS * sizeof(int));
++ memcpy(tmp.hook_entry, info->hook_entry,
++ NF_BR_NUMHOOKS * sizeof(struct ebt_entries *));
++ for (i = 0; i < NF_BR_NUMHOOKS; i++)
++ tmp.hook_entry[i] = (struct ebt_entries *)(((char *)
++ (info->hook_entry[i])) - info->entries + tmp.entries);
++ if (copy_to_user(user, &tmp, sizeof(struct ebt_replace))) {
++ BUGPRINT("Couldn't copy ebt_replace to userspace\n");
++ return -EFAULT;
++ }
++ // set the match/watcher/target names right
++ return EBT_ENTRY_ITERATE(info->entries, info->entries_size,
++ ebt_make_names, info->entries, tmp.entries);
++}
++
++static int do_ebt_set_ctl(struct sock *sk,
++ int cmd, void *user, unsigned int len)
++{
++ int ret;
++
++ switch(cmd) {
++ case EBT_SO_SET_ENTRIES:
++ ret = do_replace(user, len);
++ break;
++ case EBT_SO_SET_COUNTERS:
++ ret = update_counters(user, len);
++ break;
++ default:
++ ret = -EINVAL;
++ }
++ return ret;
++}
++
++static int do_ebt_get_ctl(struct sock *sk, int cmd, void *user, int *len)
++{
++ int ret;
++ struct ebt_replace tmp;
++ struct ebt_table *t;
++
++ if (copy_from_user(&tmp, user, sizeof(tmp)))
++ return -EFAULT;
++
++ ret = down_interruptible(&ebt_mutex);
++ if (ret != 0)
++ return ret;
++
++ if (!(t = (struct ebt_table *)list_named_find(&ebt_tables, tmp.name))) {
++ print_string("Table not found, try insmod\n");
++ up(&ebt_mutex);
++ return -EINVAL;
++ }
++
++ switch(cmd) {
++ case EBT_SO_GET_INFO:
++ if (*len != sizeof(struct ebt_replace)){
++ ret = -EINVAL;
++ up(&ebt_mutex);
++ break;
++ }
++ tmp.nentries = t->private->nentries;
++ tmp.entries_size = t->private->entries_size;
++ // userspace needs this to check the chain names
++ tmp.valid_hooks = t->valid_hooks;
++ up(&ebt_mutex);
++ if (copy_to_user(user, &tmp, *len) != 0){
++ BUGPRINT("c2u Didn't work\n");
++ ret = -EFAULT;
++ break;
++ }
++ ret = 0;
++ break;
++
++ case EBT_SO_GET_ENTRIES:
++ ret = copy_everything_to_user(t, user, len);
++ up(&ebt_mutex);
++ break;
++
++ default:
++ up(&ebt_mutex);
++ ret = -EINVAL;
++ }
++
++ return ret;
++}
++
++static struct nf_sockopt_ops ebt_sockopts =
++{ { NULL, NULL }, PF_INET, EBT_BASE_CTL, EBT_SO_SET_MAX + 1, do_ebt_set_ctl,
++ EBT_BASE_CTL, EBT_SO_GET_MAX + 1, do_ebt_get_ctl, 0, NULL
++};
++
++// Copyright (C) 1998 by Ori Pomerantz
++// Print the string to the appropriate tty, the one
++// the current task uses
++static void print_string(char *str)
++{
++ struct tty_struct *my_tty;
++
++ /* The tty for the current task */
++ my_tty = current->tty;
++ if (my_tty != NULL) {
++ (*(my_tty->driver).write)(my_tty, 0, str, strlen(str));
++ (*(my_tty->driver).write)(my_tty, 0, "\015\012", 2);
++ }
++}
++
++static int __init init(void)
++{
++ int ret;
++
++ down(&ebt_mutex);
++ list_named_insert(&ebt_targets, &ebt_standard_target);
++ up(&ebt_mutex);
++ if ((ret = nf_register_sockopt(&ebt_sockopts)) < 0)
++ return ret;
++
++ print_string("Ebtables v2.0 registered");
++ return 0;
++}
++
++static void __exit fini(void)
++{
++ nf_unregister_sockopt(&ebt_sockopts);
++ print_string("Ebtables v2.0 unregistered");
++}
++
++EXPORT_SYMBOL(ebt_register_table);
++EXPORT_SYMBOL(ebt_unregister_table);
++EXPORT_SYMBOL(ebt_register_match);
++EXPORT_SYMBOL(ebt_unregister_match);
++EXPORT_SYMBOL(ebt_register_watcher);
++EXPORT_SYMBOL(ebt_unregister_watcher);
++EXPORT_SYMBOL(ebt_register_target);
++EXPORT_SYMBOL(ebt_unregister_target);
++EXPORT_SYMBOL(ebt_do_table);
++module_init(init);
++module_exit(fini);
++MODULE_LICENSE("GPL");
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre2/include/linux/netfilter_bridge/ebtables.h Sat Apr 13 16:06:20 2002
+@@ -0,0 +1,318 @@
++/*
++ * ebtables
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * ebtables.c,v 2.0, April, 2002
++ *
++ * This code is stongly inspired on the iptables code which is
++ * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
++ */
++
++#ifndef __LINUX_BRIDGE_EFF_H
++#define __LINUX_BRIDGE_EFF_H
++#include <linux/if.h> // IFNAMSIZ
++#include <linux/netfilter_bridge.h>
++#include <linux/if_ether.h> // ETH_ALEN
++
++#define EBT_TABLE_MAXNAMELEN 32
++#define EBT_FUNCTION_MAXNAMELEN EBT_TABLE_MAXNAMELEN
++
++/* [gs]etsockopt numbers */
++#define EBT_BASE_CTL 128
++
++#define EBT_SO_SET_ENTRIES (EBT_BASE_CTL)
++#define EBT_SO_SET_COUNTERS (EBT_SO_SET_ENTRIES+1)
++#define EBT_SO_SET_MAX (EBT_SO_SET_COUNTERS+1)
++
++#define EBT_SO_GET_INFO (EBT_BASE_CTL)
++#define EBT_SO_GET_ENTRIES (EBT_SO_GET_INFO+1)
++#define EBT_SO_GET_MAX (EBT_SO_GET_ENTRIES+1)
++
++#define EBT_ACCEPT 0
++#define EBT_DROP 1
++#define EBT_CONTINUE 2
++#define NUM_STANDARD_TARGETS 3
++
++struct ebt_entries {
++ // this field is always set to zero (including userspace).
++ // See EBT_ENTRY_OR_ENTRIES.
++ // Must be same size as ebt_entry.bitmask
++ __u32 distinguisher;
++ // one standard (accept or drop) per hook
++ __u8 policy;
++ // nr. of entries
++ __u32 nentries;
++ // entry list
++ __u8 data[0];
++};
++
++// used for the bitmask of struct ebt_entry
++
++// This is a hack to make a difference between an ebt_entry struct and an
++// ebt_entries struct when traversing the entries from start to end.
++// Using this simplifies the code alot, while still being able to use
++// ebt_entries.
++// Contrary, iptables doesn't use something like ebt_entries and therefore uses
++// different techniques for naming the policy and such. So, iptables doesn't
++// need a hack like this.
++#define EBT_ENTRY_OR_ENTRIES 0x01
++// these are the normal masks
++#define EBT_NOPROTO 0x02
++#define EBT_802_3 0x04
++#define EBT_SOURCEMAC 0x08
++#define EBT_DESTMAC 0x10
++#define EBT_F_MASK (EBT_NOPROTO | EBT_802_3 | EBT_SOURCEMAC | EBT_DESTMAC \
++ | EBT_ENTRY_OR_ENTRIES)
++
++#define EBT_IPROTO 0x01
++#define EBT_IIN 0x02
++#define EBT_IOUT 0x04
++#define EBT_ISOURCE 0x8
++#define EBT_IDEST 0x10
++#define EBT_INV_MASK (EBT_IPROTO | EBT_IIN | EBT_IOUT | EBT_ISOURCE | EBT_IDEST)
++
++struct ebt_counter
++{
++ __u64 pcnt;
++};
++
++struct ebt_entry_match
++{
++ union {
++ char name[EBT_FUNCTION_MAXNAMELEN];
++ struct ebt_match *match;
++ } u;
++ // size of this struct + size of data
++ unsigned int match_size;
++ unsigned char data[0];
++};
++
++struct ebt_entry_watcher
++{
++ union {
++ char name[EBT_FUNCTION_MAXNAMELEN];
++ struct ebt_watcher *watcher;
++ } u;
++ // size of this struct + size of data
++ unsigned int watcher_size;
++ unsigned char data[0];
++};
++
++struct ebt_entry_target
++{
++ union {
++ char name[EBT_FUNCTION_MAXNAMELEN];
++ struct ebt_target *target;
++ } u;
++ // size of this struct + size of data
++ unsigned int target_size;
++ unsigned char data[0];
++};
++
++#define EBT_STANDARD_TARGET "standard"
++struct ebt_standard_target
++{
++ struct ebt_entry_target target;
++ __u8 verdict;
++};
++
++/* one entry */
++struct ebt_entry {
++ // this needs to be the first field
++ __u32 bitmask;
++ __u32 invflags;
++ __u16 ethproto;
++ __u8 in[IFNAMSIZ];
++ __u8 out[IFNAMSIZ];
++ __u8 sourcemac[ETH_ALEN];
++ __u8 destmac[ETH_ALEN];
++ // sizeof ebt_entry + matches
++ __u16 watchers_offset;
++ // sizeof ebt_entry + matches + watchers
++ __u16 target_offset;
++ // sizeof ebt_entry + matches + watchers + target
++ __u16 next_offset;
++ unsigned char elems[0];
++};
++
++struct ebt_replace
++{
++ char name[EBT_TABLE_MAXNAMELEN];
++ unsigned int valid_hooks;
++ // nr of rules in the table
++ unsigned int nentries;
++ // total size of the entries
++ unsigned int entries_size;
++ // start of the chains
++ struct ebt_entries *hook_entry[NF_BR_NUMHOOKS];
++ // how many counters in front of it?
++ unsigned int counter_entry[NF_BR_NUMHOOKS];
++ // nr of counters userspace expects back
++ unsigned int num_counters;
++ // where the kernel will put the old counters
++ struct ebt_counter *counters;
++ char *entries;
++};
++
++#ifdef __KERNEL__
++
++struct ebt_match
++{
++ struct list_head list;
++ const char name[EBT_FUNCTION_MAXNAMELEN];
++ // 0 == it matches
++ int (*match)(const struct sk_buff *skb, const struct net_device *in,
++ const struct net_device *out, const void *matchdata,
++ unsigned int datalen, const struct ebt_counter *c);
++ // 0 == let it in
++ int (*check)(const char *tablename, unsigned int hooknr,
++ const struct ebt_entry *e, void *matchdata, unsigned int datalen);
++ void (*destroy)(void *matchdata, unsigned int datalen);
++ struct module *me;
++};
++
++struct ebt_watcher
++{
++ struct list_head list;
++ const char name[EBT_FUNCTION_MAXNAMELEN];
++ void (*watcher)(const struct sk_buff *skb, const struct net_device *in,
++ const struct net_device *out, const void *watcherdata,
++ unsigned int datalen, const struct ebt_counter *c);
++ // 0 == let it in
++ int (*check)(const char *tablename, unsigned int hooknr,
++ const struct ebt_entry *e, void *watcherdata, unsigned int datalen);
++ void (*destroy)(void *watcherdata, unsigned int datalen);
++ struct module *me;
++};
++
++struct ebt_target
++{
++ struct list_head list;
++ const char name[EBT_FUNCTION_MAXNAMELEN];
++ // returns one of the standard verdicts
++ __u8 (*target)(struct sk_buff **pskb,
++ unsigned int hooknr,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *targetdata,
++ unsigned int datalen);
++ // 0 == let it in
++ int (*check)(const char *tablename, unsigned int hooknr, const struct ebt_entry *e,
++ void *targetdata, unsigned int datalen);
++ void (*destroy)(void *targetdata, unsigned int datalen);
++ struct module *me;
++};
++
++struct ebt_table_info
++{
++ // total size of the entries
++ unsigned int entries_size;
++ unsigned int nentries;
++ // pointers to the start of the chains
++ struct ebt_entries *hook_entry[NF_BR_NUMHOOKS];
++ // how many counters in front of the counters bolonging to a chain
++ unsigned int counter_entry[NF_BR_NUMHOOKS];
++ struct ebt_counter *counters;
++ char *entries;
++};
++
++struct ebt_table
++{
++ struct list_head list;
++ char name[EBT_TABLE_MAXNAMELEN];
++ struct ebt_replace *table;
++ unsigned int valid_hooks;
++ rwlock_t lock;
++ // e.g. could be the table explicitly only allows certain
++ // matches, targets, ... 0 == let it in
++ int (*check)(const struct ebt_table_info *info, unsigned int valid_hooks);
++ // the data used by the kernel
++ struct ebt_table_info *private;
++};
++
++extern int ebt_register_table(struct ebt_table *table);
++extern void ebt_unregister_table(struct ebt_table *table);
++extern int ebt_register_match(struct ebt_match *match);
++extern void ebt_unregister_match(struct ebt_match *match);
++extern int ebt_register_watcher(struct ebt_watcher *watcher);
++extern void ebt_unregister_watcher(struct ebt_watcher *watcher);
++extern int ebt_register_target(struct ebt_target *target);
++extern void ebt_unregister_target(struct ebt_target *target);
++extern unsigned int ebt_do_table(unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in, const struct net_device *out,
++ struct ebt_table *table);
++
++#endif /* __KERNEL__ */
++
++// blatently stolen from ip_tables.h
++// fn returns 0 to continue iteration
++#define EBT_MATCH_ITERATE(e, fn, args...) \
++({ \
++ unsigned int __i; \
++ int __ret = 0; \
++ struct ebt_entry_match *__match; \
++ \
++ for (__i = sizeof(struct ebt_entry); \
++ __i < (e)->watchers_offset; \
++ __i += __match->match_size) { \
++ __match = (void *)(e) + __i; \
++ \
++ __ret = fn(__match , ## args); \
++ if (__ret != 0) \
++ break; \
++ } \
++ if (__ret == 0) { \
++ if (__i != (e)->watchers_offset) \
++ __ret = -EINVAL; \
++ } \
++ __ret; \
++})
++
++#define EBT_WATCHER_ITERATE(e, fn, args...) \
++({ \
++ unsigned int __i; \
++ int __ret = 0; \
++ struct ebt_entry_watcher *__watcher; \
++ \
++ for (__i = e->watchers_offset; \
++ __i < (e)->target_offset; \
++ __i += __watcher->watcher_size) { \
++ __watcher = (void *)(e) + __i; \
++ \
++ __ret = fn(__watcher , ## args); \
++ if (__ret != 0) \
++ break; \
++ } \
++ if (__ret == 0) { \
++ if (__i != (e)->target_offset) \
++ __ret = -EINVAL; \
++ } \
++ __ret; \
++})
++
++#define EBT_ENTRY_ITERATE(entries, size, fn, args...) \
++({ \
++ unsigned int __i; \
++ int __ret = 0; \
++ struct ebt_entry *__entry; \
++ \
++ for (__i = 0; __i < (size);) { \
++ __entry = (void *)(entries) + __i; \
++ __ret = fn(__entry , ## args); \
++ if (__ret != 0) \
++ break; \
++ if (__entry->bitmask != 0) \
++ __i += __entry->next_offset; \
++ else \
++ __i += sizeof(struct ebt_entries); \
++ } \
++ if (__ret == 0) { \
++ if (__i != (size)) \
++ __ret = -EINVAL; \
++ } \
++ __ret; \
++})
++
++#endif
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre2/include/linux/netfilter_bridge/ebt_arp.h Sat Apr 13 16:11:46 2002
+@@ -0,0 +1,26 @@
++#ifndef __LINUX_BRIDGE_EBT_ARP_H
++#define __LINUX_BRIDGE_EBT_ARP_H
++
++#define EBT_ARP_OPCODE 0x01
++#define EBT_ARP_HTYPE 0x02
++#define EBT_ARP_PTYPE 0x04
++#define EBT_ARP_SRC_IP 0x08
++#define EBT_ARP_DST_IP 0x10
++#define EBT_ARP_MASK (EBT_ARP_OPCODE | EBT_ARP_HTYPE | EBT_ARP_PTYPE | \
++ EBT_ARP_SRC_IP | EBT_ARP_DST_IP)
++#define EBT_ARP_MATCH "arp"
++
++struct ebt_arp_info
++{
++ __u16 htype;
++ __u16 ptype;
++ __u16 opcode;
++ __u32 saddr;
++ __u32 smsk;
++ __u32 daddr;
++ __u32 dmsk;
++ __u8 bitmask;
++ __u8 invflags;
++};
++
++#endif
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre2/include/linux/netfilter_bridge/ebt_ip.h Wed Apr 3 21:50:43 2002
+@@ -0,0 +1,24 @@
++#ifndef __LINUX_BRIDGE_EBT_IP_H
++#define __LINUX_BRIDGE_EBT_IP_H
++
++#define EBT_IP_SOURCE 0x01
++#define EBT_IP_DEST 0x02
++#define EBT_IP_TOS 0x04
++#define EBT_IP_PROTO 0x08
++#define EBT_IP_MASK (EBT_IP_SOURCE | EBT_IP_DEST | EBT_IP_TOS | EBT_IP_PROTO)
++#define EBT_IP_MATCH "ip"
++
++// the same values are used for the invflags
++struct ebt_ip_info
++{
++ __u32 saddr;
++ __u32 daddr;
++ __u32 smsk;
++ __u32 dmsk;
++ __u8 tos;
++ __u8 protocol;
++ __u8 bitmask;
++ __u8 invflags;
++};
++
++#endif
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre2/include/linux/netfilter_bridge/ebt_log.h Wed Apr 3 21:50:43 2002
+@@ -0,0 +1,17 @@
++#ifndef __LINUX_BRIDGE_EBT_LOG_H
++#define __LINUX_BRIDGE_EBT_LOG_H
++
++#define EBT_LOG_IP 0x01 // if the frame is made by ip, log the ip information
++#define EBT_LOG_ARP 0x02
++#define EBT_LOG_MASK (EBT_LOG_IP | EBT_LOG_ARP)
++#define EBT_LOG_PREFIX_SIZE 30
++#define EBT_LOG_WATCHER "log"
++
++struct ebt_log_info
++{
++ __u8 loglevel;
++ __u8 prefix[EBT_LOG_PREFIX_SIZE];
++ __u32 bitmask;
++};
++
++#endif
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre2/include/linux/netfilter_bridge/ebt_nat.h Wed Apr 3 21:50:43 2002
+@@ -0,0 +1,11 @@
++#ifndef __LINUX_BRIDGE_EBT_NAT_H
++#define __LINUX_BRIDGE_EBT_NAT_H
++
++struct ebt_nat_info
++{
++ unsigned char mac[ETH_ALEN];
++};
++#define EBT_SNAT_TARGET "snat"
++#define EBT_DNAT_TARGET "dnat"
++
++#endif
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre2/include/linux/br_db.h Sat Apr 13 22:43:11 2002
+@@ -0,0 +1,53 @@
++/*
++ * bridge ethernet protocol filter
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * br_db.h,v 1.1 2001/04/16
++ *
++ * This code is stongly inspired on the iptables code which is
++ * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#ifndef __LINUX_BRIDGE_DB_H
++#define __LINUX_BRIDGE_DB_H
++#include <linux/if.h> /* IFNAMSIZ */
++#ifdef __KERNEL__
++#include <linux/if_bridge.h>
++#include <linux/netfilter_bridge.h>
++#else
++#include <linux/netfilter_bridge.h>
++#endif
++#define BRDB_BASE_CTL 135
++
++#define BRDB_SO_SET_ALLOWDB (BRDB_BASE_CTL)
++#define BRDB_SO_SET_MAX (BRDB_SO_SET_ALLOWDB+1)
++
++#define BRDB_SO_GET_DBINFO (BRDB_BASE_CTL)
++#define BRDB_SO_GET_DB (BRDB_SO_GET_DBINFO+1)
++#define BRDB_SO_GET_MAX (BRDB_SO_GET_DB+1)
++
++#define BRDB_NODB 0
++#define BRDB_DB 1
++
++#define INITIAL_DBSIZE 10
++#define IDENTIFY802_3 46
++
++struct brdb_dbinfo {
++ __u32 nentries;
++};
++
++struct brdb_dbentry {
++ __u8 in[IFNAMSIZ];
++ __u8 out[IFNAMSIZ];
++ __u16 ethproto;
++ __u32 hook;
++};
++
++#endif
diff --git a/kernel/patches/base-patches/ebtables-v2.0pre3_vs_2.4.18.diff b/kernel/patches/base-patches/ebtables-v2.0pre3_vs_2.4.18.diff
new file mode 100644
index 0000000..72e80fe
--- /dev/null
+++ b/kernel/patches/base-patches/ebtables-v2.0pre3_vs_2.4.18.diff
@@ -0,0 +1,3108 @@
+ebtables-v2.0pre3 - 27 April
+
+*** modifications for brouter support ***
+
+--- linux/net/bridge/br_private.h Sat Apr 27 22:55:42 2002
++++ ebt2.0pre3/net/bridge/br_private.h Sat Apr 27 21:52:48 2002
+@@ -170,7 +170,7 @@
+
+ /* br_input.c */
+ extern int br_handle_frame_finish(struct sk_buff *skb);
+-extern void br_handle_frame(struct sk_buff *skb);
++extern int br_handle_frame(struct sk_buff *skb);
+
+ /* br_ioctl.c */
+ extern void br_call_ioctl_atomic(void (*fn)(void));
+--- linux/include/linux/if_bridge.h Thu Nov 22 20:47:12 2001
++++ ebt2.0pre3/include/linux/if_bridge.h Sat Apr 27 21:39:14 2002
+@@ -102,8 +102,13 @@
+ struct net_bridge_port;
+
+ extern int (*br_ioctl_hook)(unsigned long arg);
+-extern void (*br_handle_frame_hook)(struct sk_buff *skb);
+-
++extern int (*br_handle_frame_hook)(struct sk_buff *skb);
++#if defined(CONFIG_BRIDGE_EBT_BROUTE) || \
++ defined(CONFIG_BRIDGE_EBT_BROUTE_MODULE)
++extern unsigned int (*broute_decision) (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in, const struct net_device *out,
++ int (*okfn)(struct sk_buff *));
++#endif
+ #endif
+
+ #endif
+--- linux/net/core/dev.c Mon Feb 25 20:38:14 2002
++++ ebt2.0pre3/net/core/dev.c Sat Apr 27 21:05:16 2002
+@@ -1384,7 +1384,14 @@
+ }
+
+ #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+-void (*br_handle_frame_hook)(struct sk_buff *skb) = NULL;
++int (*br_handle_frame_hook)(struct sk_buff *skb) = NULL;
++#if defined(CONFIG_BRIDGE_EBT_BROUTE) || \
++ defined(CONFIG_BRIDGE_EBT_BROUTE_MODULE)
++unsigned int (*broute_decision) (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *)) = NULL;
++#endif
+ #endif
+
+ static __inline__ int handle_bridge(struct sk_buff *skb,
+@@ -1394,14 +1401,14 @@
+
+ if (pt_prev) {
+ if (!pt_prev->data)
+- ret = deliver_to_old_ones(pt_prev, skb, 0);
++ deliver_to_old_ones(pt_prev, skb, 0);
+ else {
+ atomic_inc(&skb->users);
+- ret = pt_prev->func(skb, skb->dev, pt_prev);
++ pt_prev->func(skb, skb->dev, pt_prev);
+ }
+ }
+
+- br_handle_frame_hook(skb);
++ ret = br_handle_frame_hook(skb);
+ return ret;
+ }
+
+@@ -1479,9 +1486,10 @@
+ #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+ if (skb->dev->br_port != NULL &&
+ br_handle_frame_hook != NULL) {
+- handle_bridge(skb, pt_prev);
+- dev_put(rx_dev);
+- continue;
++ if (handle_bridge(skb, pt_prev) == 0) {
++ dev_put(rx_dev);
++ continue;
++ }
+ }
+ #endif
+
+--- linux/net/bridge/br_input.c Sat Apr 27 22:55:42 2002
++++ ebt2.0pre3/net/bridge/br_input.c Sat Apr 27 21:05:16 2002
+@@ -19,7 +19,10 @@
+ #include <linux/if_bridge.h>
+ #include <linux/netfilter_bridge.h>
+ #include "br_private.h"
+-
++#if defined(CONFIG_BRIDGE_EBT_BROUTE) || \
++ defined(CONFIG_BRIDGE_EBT_BROUTE_MODULE)
++#include <linux/netfilter.h>
++#endif
+ unsigned char bridge_ula[6] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
+
+ static int br_pass_frame_up_finish(struct sk_buff *skb)
+@@ -112,7 +115,7 @@
+ return 0;
+ }
+
+-void br_handle_frame(struct sk_buff *skb)
++int br_handle_frame(struct sk_buff *skb)
+ {
+ struct net_bridge *br;
+ unsigned char *dest;
+@@ -146,23 +149,30 @@
+ goto handle_special_frame;
+
+ if (p->state == BR_STATE_FORWARDING) {
++#if defined(CONFIG_BRIDGE_EBT_BROUTE) || \
++ defined(CONFIG_BRIDGE_EBT_BROUTE_MODULE)
++ if (broute_decision && broute_decision(NF_BR_BROUTING, &skb,
++ skb->dev, NULL, NULL) == NF_DROP)
++ return -1;
++#endif
+ NF_HOOK(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
+ br_handle_frame_finish);
+ read_unlock(&br->lock);
+- return;
++ return 0;
+ }
+
+ err:
+ read_unlock(&br->lock);
+ err_nolock:
+ kfree_skb(skb);
+- return;
++ return 0;
+
+ handle_special_frame:
+ if (!dest[5]) {
+ br_stp_handle_bpdu(skb);
+- return;
++ return 0;
+ }
+
+ kfree_skb(skb);
++ return 0;
+ }
+--- linux/net/netsyms.c Mon Feb 25 20:38:14 2002
++++ ebt2.0pre3/net/netsyms.c Sat Apr 27 21:05:16 2002
+@@ -228,6 +228,10 @@
+
+ #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+ EXPORT_SYMBOL(br_handle_frame_hook);
++#if defined(CONFIG_BRIDGE_EBT_BROUTE) || \
++ defined(CONFIG_BRIDGE_EBT_BROUTE_MODULE)
++EXPORT_SYMBOL(broute_decision);
++#endif
+ #ifdef CONFIG_INET
+ EXPORT_SYMBOL(br_ioctl_hook);
+ #endif
+--- linux/include/linux/netfilter_bridge.h Tue Jun 12 04:15:27 2001
++++ ebt2.0pre3/include/linux/netfilter_bridge.h Sat Apr 27 21:53:07 2002
+@@ -18,7 +18,19 @@
+ #define NF_BR_LOCAL_OUT 3
+ /* Packets about to hit the wire. */
+ #define NF_BR_POST_ROUTING 4
+-#define NF_BR_NUMHOOKS 5
++/* Not really a hook, but used for the ebtables broute table */
++#define NF_BR_BROUTING 5
++#define NF_BR_NUMHOOKS 6
+
++enum nf_br_hook_priorities {
++ NF_BR_PRI_FIRST = INT_MIN,
++ NF_BR_PRI_FILTER_BRIDGED = -200,
++ NF_BR_PRI_FILTER_OTHER = 200,
++ NF_BR_PRI_NAT_DST_BRIDGED = -300,
++ NF_BR_PRI_NAT_DST_OTHER = 100,
++ NF_BR_PRI_NAT_SRC_BRIDGED = -100,
++ NF_BR_PRI_NAT_SRC_OTHER = 300,
++ NF_BR_PRI_LAST = INT_MAX,
++};
+
+ #endif
+
+*** modifications for ebtables compilation ***
+
+--- linux/net/Makefile Mon Feb 25 20:38:14 2002
++++ ebt2.0pre3/net/Makefile Sat Apr 27 21:05:16 2002
+@@ -7,7 +7,8 @@
+
+ O_TARGET := network.o
+
+-mod-subdirs := ipv4/netfilter ipv6/netfilter ipx irda bluetooth atm netlink sched
++mod-subdirs := bridge/netfilter ipv4/netfilter ipv6/netfilter ipx irda \
++ bluetooth atm netlink sched
+ export-objs := netsyms.o
+
+ subdir-y := core ethernet
+@@ -23,6 +24,12 @@
+ ifneq ($(CONFIG_IPV6),n)
+ ifneq ($(CONFIG_IPV6),)
+ subdir-$(CONFIG_NETFILTER) += ipv6/netfilter
++endif
++endif
++
++ifneq ($(CONFIG_BRIDGE),n)
++ifneq ($CONFIG_BRIDGE),)
++subdir-$(CONFIG_BRIDGE) += bridge/netfilter
+ endif
+ endif
+
+--- linux/net/Config.in Sat Apr 27 22:55:42 2002
++++ ebt2.0pre3/net/Config.in Sat Apr 27 21:05:16 2002
+@@ -60,6 +60,7 @@
+ source net/decnet/Config.in
+ fi
+ dep_tristate '802.1d Ethernet Bridging' CONFIG_BRIDGE $CONFIG_INET
++ source net/bridge/netfilter/Config.in
+ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+ if [ "$CONFIG_BRIDGE" != "n" -a "$CONFIG_NETFILTER" != "n" ]; then
+ bool ' netfilter (firewalling) support' CONFIG_BRIDGE_NF
+
+*** new ebtables files ***
+
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre3/net/bridge/netfilter/Makefile Sat Apr 27 21:05:16 2002
+@@ -0,0 +1,24 @@
++#
++# Makefile for the netfilter modules on top of bridging.
++#
++# Note! Dependencies are done automagically by 'make dep', which also
++# removes any old dependencies. DON'T put your own dependencies here
++# unless it's something special (ie not a .c file).
++#
++# Note 2! The CFLAGS definition is now in the main makefile...
++
++O_TARGET := netfilter.o
++
++export-objs = ebtables.o
++
++obj-$(CONFIG_BRIDGE_EBT) += ebtables.o
++obj-$(CONFIG_BRIDGE_EBT_T_FILTER) += ebtable_filter.o
++obj-$(CONFIG_BRIDGE_EBT_T_NAT) += ebtable_nat.o
++obj-$(CONFIG_BRIDGE_EBT_BROUTE) += ebtable_broute.o
++obj-$(CONFIG_BRIDGE_DB) += br_db.o
++obj-$(CONFIG_BRIDGE_EBT_IPF) += ebt_ip.o
++obj-$(CONFIG_BRIDGE_EBT_ARPF) += ebt_arp.o
++obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_log.o
++obj-$(CONFIG_BRIDGE_EBT_NAT) += ebt_nat.o
++obj-$(CONFIG_BRIDGE_EBT_REDIRECT) += ebt_redirect.o
++include $(TOPDIR)/Rules.make
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre3/net/bridge/netfilter/Config.in Sat Apr 27 21:05:16 2002
+@@ -0,0 +1,14 @@
++#
++# Bridge netfilter configuration
++#
++dep_tristate ' Bridge: ebtables' CONFIG_BRIDGE_EBT $CONFIG_BRIDGE
++dep_tristate ' ebt: filter table support' CONFIG_BRIDGE_EBT_T_FILTER $CONFIG_BRIDGE_EBT
++dep_tristate ' ebt: nat table support' CONFIG_BRIDGE_EBT_T_NAT $CONFIG_BRIDGE_EBT
++dep_tristate ' ebt: broute table support' CONFIG_BRIDGE_EBT_BROUTE $CONFIG_BRIDGE_EBT
++dep_tristate ' ebt: LOG support' CONFIG_BRIDGE_EBT_LOG $CONFIG_BRIDGE_EBT
++dep_tristate ' ebt: IP filter support' CONFIG_BRIDGE_EBT_IPF $CONFIG_BRIDGE_EBT
++dep_tristate ' ebt: ARP filter support' CONFIG_BRIDGE_EBT_ARPF $CONFIG_BRIDGE_EBT
++dep_tristate ' ebt: nat target support' CONFIG_BRIDGE_EBT_NAT $CONFIG_BRIDGE_EBT
++dep_tristate ' ebt: redirect target support' CONFIG_BRIDGE_EBT_REDIRECT $CONFIG_BRIDGE_EBT
++dep_tristate ' Bridge: ethernet database' CONFIG_BRIDGE_DB $CONFIG_BRIDGE
++
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre3/net/bridge/netfilter/br_db.c Sat Apr 27 21:05:16 2002
+@@ -0,0 +1,357 @@
++/*
++ * bridge ethernet protocol database
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * br_db.c, April, 2002
++ *
++ * This code is stongly inspired on the iptables code which is
++ * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++#include <linux/skbuff.h>
++#include <linux/if_ether.h>
++#include <linux/netfilter_bridge.h>
++#include <linux/br_db.h>
++#include <linux/socket.h> /* PF_BRIDGE */
++#include <linux/spinlock.h> /* rwlock_t */
++#include <asm/errno.h>
++#include <asm/uaccess.h> /* copy_[to,from]_user */
++#include <linux/smp.h> /* multiprocessors */
++
++#define BUGPRINT(format, args...) printk("kernel msg: brdb bug: please report to author: "format, ## args)
++/*#define BUGPRINT(format, args...)*/
++#define MEMPRINT(format, args...) printk("kernel msg: brdb : out of memory: "format, ## args)
++/*#define MEMPRINT(format, args...)*/
++
++/* database variables */
++static __u16 allowdb = BRDB_NODB;
++static struct brdb_dbentry **flowdb = NULL;
++static unsigned int *dbsize;
++static unsigned int *dbnum;
++/* database lock */
++static rwlock_t brdb_dblock;
++
++static inline int brdb_dev_check(char *entry, const struct net_device *device){
++ if (*entry == '\0') return 0;
++ if (!device) return 1;
++ return strncmp(entry, device->name, IFNAMSIZ);
++}
++
++static inline int brdb_proto_check(unsigned int a, unsigned int b){
++ if (a == b || ( a == IDENTIFY802_3 && ntohs(b) < 1536 )) return 0;
++ return 1;
++}
++
++static unsigned int maintaindb (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ struct brdb_dbentry *hlp;
++ int i, cpunr;
++ unsigned short ethproto = ((**pskb).mac.ethernet)->h_proto;
++
++ cpunr = cpu_number_map(smp_processor_id());
++
++ read_lock_bh(&brdb_dblock);
++
++ if (allowdb == BRDB_NODB) {// must be after readlock
++ read_unlock_bh(&brdb_dblock);
++ return NF_ACCEPT;
++ }
++ hlp = flowdb[cpunr];
++ /* search for existing entry */
++ for (i = 0; i < dbnum[cpunr]; i++) {
++ if (hlp->hook == hook && !brdb_proto_check(hlp->ethproto, ethproto) &&
++ !brdb_dev_check(hlp->in, in) && !brdb_dev_check(hlp->out, out)) {
++ read_unlock_bh(&brdb_dblock);
++ return NF_ACCEPT;
++ }
++ hlp++;
++ }
++ /* add new entry to database */
++ if (dbnum[cpunr] == dbsize[cpunr]) {
++ dbsize[cpunr] *= 2;
++ if ( !( hlp = (struct brdb_dbentry *) vmalloc(dbsize[cpunr] * sizeof(struct brdb_dbentry)) ) ) {
++ dbsize[cpunr] /= 2;
++ MEMPRINT("maintaindb && nomemory\n");
++ read_unlock_bh(&brdb_dblock);
++ return NF_ACCEPT;
++ }
++ memcpy(hlp, flowdb[cpunr], dbnum[cpunr] * sizeof(struct brdb_dbentry));
++ vfree(flowdb[cpunr]);
++ flowdb[cpunr] = hlp;
++ }
++
++ hlp = flowdb[cpunr] + dbnum[cpunr];
++ hlp->hook = hook;
++ if (in)
++ strncpy(hlp->in, in->name, IFNAMSIZ);
++ else
++ hlp->in[0] = '\0';
++ if (out)
++ strncpy(hlp->out, out->name, IFNAMSIZ);
++ else
++ hlp->out[0] = '\0';
++ if (ntohs(ethproto) < 1536)
++ hlp->ethproto = IDENTIFY802_3;
++ else
++ hlp->ethproto = ethproto;
++ dbnum[cpunr]++;
++
++ read_unlock_bh(&brdb_dblock);
++
++ return NF_ACCEPT;
++}
++
++static int copy_db(void *user, int *len)
++{
++ int i, j, nentries = 0, ret;
++ struct brdb_dbentry *begin, *end1, *end2, *point, *point2;
++
++ write_lock_bh(&brdb_dblock);
++ for (i = 0; i < smp_num_cpus; i++)
++ nentries += dbnum[i];
++ if (*len > nentries)
++ return -EINVAL;
++
++ if ( !(begin = (struct brdb_dbentry *) vmalloc((*len) * sizeof(struct brdb_dbentry))) )
++ return -ENOMEM;
++ memcpy(begin, flowdb[0], dbnum[0] * sizeof(struct brdb_dbentry));
++ end1 = begin + dbnum[0];
++ for (i = 1; i < smp_num_cpus; i++) {/* cycle databases per cpu */
++ point2 = flowdb[i];
++ end2 = end1;
++ for (j = 0; j < dbnum[i]; j++) {/* cycle entries of a cpu's database (point2) */
++ for (point = begin; point != end2; point++)/* cycle different entries we found so far */
++ if (point->hook == point2->hook && !strncmp(point->in, point2->in, IFNAMSIZ) &&
++ !strncmp(point->out, point2->out, IFNAMSIZ) && point->ethproto == point2->ethproto)
++ goto out;/* already exists in a database of another cpu */
++
++ memcpy(end1, point2, sizeof(struct brdb_dbentry));
++ end1++;
++out:
++ point2++;
++ }
++ }
++ write_unlock_bh(&brdb_dblock);
++ i = (int)( (char *)end1 - (char *)begin);
++ *len = i < *len ? i : *len;
++ if (copy_to_user(user, begin, *len * sizeof(struct brdb_dbentry)) != 0)
++ ret = -EFAULT;
++ else
++ ret = 0;
++ vfree(begin);
++ return ret;
++}
++
++static int switch_nodb(void){
++ int i;
++
++ if (!flowdb)
++ BUGPRINT("switch_nodb && !flowdb\n");
++ for (i = 0; i < smp_num_cpus; i++)
++ vfree(flowdb[i]);
++ vfree(flowdb);
++ if (!dbsize)
++ BUGPRINT("switch_nodb && !dbsize\n");
++ vfree(dbsize);
++ if (!dbnum)
++ BUGPRINT("switch_nodb && !dbnum\n");
++ vfree(dbnum);
++ flowdb = NULL;
++ allowdb = BRDB_NODB;
++ return 0;
++}
++
++static int switch_db(void)
++{
++ int i, j;
++
++ if (flowdb) BUGPRINT("switch_db && flowdb\n");
++ if ( !(flowdb = (struct brdb_dbentry **) vmalloc(smp_num_cpus * sizeof(struct brdb_dbentry *))) ) {
++ MEMPRINT("switch_db && nomemory\n");
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < smp_num_cpus; i++)
++ if ( !(flowdb[i] = (struct brdb_dbentry *) vmalloc(INITIAL_DBSIZE * sizeof(struct brdb_dbentry))) )
++ goto sw_free1;
++ else
++ memset(flowdb[i], 0, INITIAL_DBSIZE * sizeof(struct brdb_dbentry));
++
++ if ( !(dbnum = (int*) vmalloc(smp_num_cpus * sizeof(int))) )
++ goto sw_free2;
++
++ if ( !(dbsize = (int*) vmalloc(smp_num_cpus * sizeof(int))) )
++ goto sw_free3;
++
++ for (i = 0; i < smp_num_cpus; i++) {
++ dbnum[i] = 0;
++ dbsize[i] = INITIAL_DBSIZE;
++ }
++ allowdb = BRDB_DB;
++ return 0;
++
++sw_free3:
++ MEMPRINT("switch_db && nomemory2\n");
++ vfree(dbnum);
++ dbnum = NULL;
++sw_free2:
++ MEMPRINT("switch_db && nomemory3\n");
++sw_free1:
++ MEMPRINT("switch_db && nomemory4\n");
++ for (j = 0; j<i; j++)
++ vfree(flowdb[j]);
++ vfree(flowdb);
++ allowdb = BRDB_NODB;
++ return -ENOMEM;
++}
++
++static int
++do_brdb_set_ctl(struct sock *sk, int cmd, void *user, unsigned int len)
++{
++ int ret;
++ __u16 adb;
++ switch(cmd) {
++ case BRDB_SO_SET_ALLOWDB:
++ if (len != sizeof(__u16)) {
++ ret = -EINVAL;
++ break;
++ }
++ if (copy_from_user(&adb, user, len) != 0) {
++ ret = -EFAULT;
++ break;
++ }
++ if (adb != BRDB_DB && adb != BRDB_NODB) {
++ ret = -EINVAL;
++ break;
++ }
++ write_lock_bh(&brdb_dblock);
++ if (adb == allowdb) {
++ ret = 0;
++ write_unlock_bh(&brdb_dblock);
++ break;
++ }
++ if (allowdb == BRDB_DB)
++ ret = switch_nodb();
++ else
++ ret = switch_db();
++ write_unlock_bh(&brdb_dblock);
++ break;
++
++ default:
++ ret = -EINVAL;
++ }
++ return ret;
++}
++
++static int
++do_brdb_get_ctl(struct sock *sk, int cmd, void *user, int *len)
++{
++ struct brdb_dbinfo help2;
++ int i, ret;
++ switch(cmd) {
++ case BRDB_SO_GET_DBINFO:
++ if (sizeof(struct brdb_dbinfo) != *len)
++ return -EINVAL;
++ write_lock_bh(&brdb_dblock);
++ /* 0 == no database
++ * i-1 == number of entries (if database)
++ */
++ if (allowdb == BRDB_NODB)
++ help2.nentries = 0;
++ else {
++ help2.nentries = 1;
++ for (i = 0; i < smp_num_cpus; i++)
++ help2.nentries += dbnum[i];
++ }
++ write_unlock_bh(&brdb_dblock);
++ if (copy_to_user(user, &help2, sizeof(help2)) != 0)
++ ret = -EFAULT;
++ else
++ ret = 0;
++ break;
++
++ case BRDB_SO_GET_DB:
++ if (*len == 0 || allowdb == BRDB_NODB)
++ return -EINVAL;
++ ret = copy_db(user, len);
++ break;
++ default:
++ ret = -EINVAL;
++ }
++
++ return ret;
++}
++
++static struct nf_sockopt_ops brdb_sockopts
++= { { NULL, NULL }, PF_INET, BRDB_BASE_CTL, BRDB_SO_SET_MAX+1, do_brdb_set_ctl,
++ BRDB_BASE_CTL, BRDB_SO_GET_MAX+1, do_brdb_get_ctl, 0, NULL };
++
++
++static struct nf_hook_ops brdb_br_ops[] = {
++ { { NULL, NULL }, maintaindb, PF_BRIDGE, NF_BR_PRE_ROUTING, -250},
++ { { NULL, NULL }, maintaindb, PF_BRIDGE, NF_BR_LOCAL_IN, -250},
++ { { NULL, NULL }, maintaindb, PF_BRIDGE, NF_BR_FORWARD, -250},
++ { { NULL, NULL }, maintaindb, PF_BRIDGE, NF_BR_LOCAL_OUT, -250},
++ { { NULL, NULL }, maintaindb, PF_BRIDGE, NF_BR_POST_ROUTING, -250}
++};
++
++static int __init init(void)
++{
++ int ret;
++
++ if ((ret = nf_register_hook(&brdb_br_ops[0])) < 0)
++ return ret;
++
++ if ((ret = nf_register_hook(&brdb_br_ops[1])) < 0)
++ goto clean0;
++
++ if ((ret = nf_register_hook(&brdb_br_ops[2])) < 0)
++ goto clean1;
++
++ if ((ret = nf_register_hook(&brdb_br_ops[3])) < 0)
++ goto clean2;
++
++ if ((ret = nf_register_hook(&brdb_br_ops[4])) < 0)
++ goto clean3;
++
++ /* Register setsockopt */
++ if ((ret = nf_register_sockopt(&brdb_sockopts)) < 0)
++ goto clean4;
++
++ rwlock_init(&brdb_dblock);
++ printk("Bridge ethernet database registered\n");
++ return ret;
++
++clean4: nf_unregister_hook(&brdb_br_ops[4]);
++clean3: nf_unregister_hook(&brdb_br_ops[3]);
++clean2: nf_unregister_hook(&brdb_br_ops[2]);
++clean1: nf_unregister_hook(&brdb_br_ops[1]);
++clean0: nf_unregister_hook(&brdb_br_ops[0]);
++
++ return ret;
++}
++
++static void __exit fini(void)
++{
++ nf_unregister_hook(&brdb_br_ops[4]);
++ nf_unregister_hook(&brdb_br_ops[3]);
++ nf_unregister_hook(&brdb_br_ops[2]);
++ nf_unregister_hook(&brdb_br_ops[1]);
++ nf_unregister_hook(&brdb_br_ops[0]);
++ nf_unregister_sockopt(&brdb_sockopts);
++}
++
++module_init(init);
++module_exit(fini);
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre3/net/bridge/netfilter/ebtable_filter.c Sat Apr 27 21:05:16 2002
+@@ -0,0 +1,93 @@
++/*
++ * ebtable_filter
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * April, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge.h>
++#include <linux/module.h>
++
++#define FILTER_VALID_HOOKS ((1 << NF_BR_LOCAL_IN) | (1 << NF_BR_FORWARD) | \
++ (1 << NF_BR_LOCAL_OUT))
++
++static struct ebt_entries initial_chains[] =
++{
++ {0, EBT_ACCEPT, 0},
++ {0, EBT_ACCEPT, 0},
++ {0, EBT_ACCEPT, 0}
++};
++
++static struct ebt_replace initial_table =
++{
++ "filter", FILTER_VALID_HOOKS, 0, 3 * sizeof(struct ebt_entries),
++ { [NF_BR_LOCAL_IN]&initial_chains[0], [NF_BR_FORWARD]&initial_chains[1],
++ [NF_BR_LOCAL_OUT]&initial_chains[2] },{},
++ 0, NULL, (char *)initial_chains
++};
++
++static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
++{
++ if (valid_hooks & ~FILTER_VALID_HOOKS)
++ return -EINVAL;
++ return 0;
++}
++
++static struct ebt_table frame_filter =
++{
++ {NULL, NULL}, "filter", &initial_table, FILTER_VALID_HOOKS,
++ RW_LOCK_UNLOCKED, check, NULL
++};
++
++static unsigned int ebt_hook (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ return ebt_do_table(hook, pskb, in, out, &frame_filter);
++}
++
++static struct nf_hook_ops ebt_ops_filter[] = {
++ { { NULL, NULL }, ebt_hook, PF_BRIDGE, NF_BR_LOCAL_IN,
++ NF_BR_PRI_FILTER_BRIDGED},
++ { { NULL, NULL }, ebt_hook, PF_BRIDGE, NF_BR_FORWARD,
++ NF_BR_PRI_FILTER_BRIDGED},
++ { { NULL, NULL }, ebt_hook, PF_BRIDGE, NF_BR_LOCAL_OUT,
++ NF_BR_PRI_FILTER_OTHER}
++};
++
++static int __init init(void)
++{
++ int i, j, ret;
++
++ ret = ebt_register_table(&frame_filter);
++ if (ret < 0)
++ return ret;
++ for (i = 0; i < sizeof(ebt_ops_filter) / sizeof(ebt_ops_filter[0]); i++)
++ if ((ret = nf_register_hook(&ebt_ops_filter[i])) < 0)
++ goto cleanup;
++ return ret;
++cleanup:
++ for (j = 0; j < i; j++)
++ nf_unregister_hook(&ebt_ops_filter[j]);
++ ebt_unregister_table(&frame_filter);
++ return ret;
++}
++
++static void __exit fini(void)
++{
++ int i;
++
++ for (i = 0; i < sizeof(ebt_ops_filter) / sizeof(ebt_ops_filter[0]); i++)
++ nf_unregister_hook(&ebt_ops_filter[i]);
++ ebt_unregister_table(&frame_filter);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre3/net/bridge/netfilter/ebtable_nat.c Sat Apr 27 21:05:16 2002
+@@ -0,0 +1,156 @@
++/*
++ * ebtable_nat
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * April, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge.h>
++#include <linux/netdevice.h>
++#include <linux/module.h>
++#define NAT_VALID_HOOKS ((1 << NF_BR_PRE_ROUTING) | (1 << NF_BR_LOCAL_OUT) | \
++ (1 << NF_BR_POST_ROUTING))
++
++static struct ebt_entries initial_chains[] =
++{
++ {0, EBT_ACCEPT, 0},
++ {0, EBT_ACCEPT, 0},
++ {0, EBT_ACCEPT, 0}
++};
++
++static struct ebt_replace initial_table =
++{
++ "nat", NAT_VALID_HOOKS, 0, 3 * sizeof(struct ebt_entries),
++ { [NF_BR_PRE_ROUTING]&initial_chains[0], [NF_BR_LOCAL_OUT]&initial_chains[1],
++ [NF_BR_POST_ROUTING]&initial_chains[2] }, {},
++ 0, NULL, (char *)initial_chains
++};
++
++static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
++{
++ if (valid_hooks & ~NAT_VALID_HOOKS)
++ return -EINVAL;
++ return 0;
++}
++
++static struct ebt_table frame_nat =
++{
++ {NULL, NULL}, "nat", &initial_table, NAT_VALID_HOOKS,
++ RW_LOCK_UNLOCKED, check, NULL
++};
++
++// used for snat to know if the frame comes from FORWARD or LOCAL_OUT.
++// needed because of the bridge-nf patch (that allows use of iptables
++// on bridged traffic)
++// if the packet is routed, we want the ebtables stuff on POSTROUTING
++// to be executed _after_ the iptables stuff. when it's bridged, it's
++// the way around
++static struct net_device __fake_net_device = {
++ hard_header_len: ETH_HLEN
++};
++
++static unsigned int
++ebt_nat_dst (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in, const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ return ebt_do_table(hook, pskb, in, out, &frame_nat);
++}
++
++// let snat know this frame is routed
++static unsigned int ebt_clear_physin (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in, const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ (*pskb)->physindev = NULL;
++ return NF_ACCEPT;
++}
++
++// let snat know this frame is bridged
++static unsigned int ebt_set_physin (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in, const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ (*pskb)->physindev = &__fake_net_device;
++ return NF_ACCEPT;
++}
++
++static unsigned int ebt_nat_src (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ // this is a routed packet
++ if ((*pskb)->physindev == NULL)
++ return NF_ACCEPT;
++ if ((*pskb)->physindev != &__fake_net_device)
++ printk("ebtables (br_nat_src): physindev hack "
++ "doesn't work - BUG\n");
++
++ return ebt_do_table(hook, pskb, in, out, &frame_nat);
++}
++
++static unsigned int ebt_nat_src_route (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in, const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ // this is a bridged packet
++ if ((*pskb)->physindev == &__fake_net_device)
++ return NF_ACCEPT;
++ if ((*pskb)->physindev)
++ printk("ebtables (br_nat_src_route): physindev hack "
++ "doesn't work - BUG\n");
++
++ return ebt_do_table(hook, pskb, in, out, &frame_nat);
++}
++
++static struct nf_hook_ops ebt_ops_nat[] = {
++ { { NULL, NULL }, ebt_nat_dst, PF_BRIDGE, NF_BR_LOCAL_OUT,
++ NF_BR_PRI_NAT_DST_OTHER},
++ { { NULL, NULL }, ebt_nat_src, PF_BRIDGE, NF_BR_POST_ROUTING,
++ NF_BR_PRI_NAT_SRC_BRIDGED},
++ { { NULL, NULL }, ebt_nat_src_route, PF_BRIDGE, NF_BR_POST_ROUTING,
++ NF_BR_PRI_NAT_SRC_OTHER},
++ { { NULL, NULL }, ebt_nat_dst, PF_BRIDGE, NF_BR_PRE_ROUTING,
++ NF_BR_PRI_NAT_DST_BRIDGED},
++ { { NULL, NULL }, ebt_clear_physin, PF_BRIDGE, NF_BR_LOCAL_OUT,
++ NF_BR_PRI_FILTER_OTHER + 1},
++ { { NULL, NULL }, ebt_set_physin, PF_BRIDGE, NF_BR_FORWARD,
++ NF_BR_PRI_FILTER_OTHER + 1}
++};
++
++static int __init init(void)
++{
++ int i, ret, j;
++
++ ret = ebt_register_table(&frame_nat);
++ if (ret < 0)
++ return ret;
++ for (i = 0; i < sizeof(ebt_ops_nat) / sizeof(ebt_ops_nat[0]); i++)
++ if ((ret = nf_register_hook(&ebt_ops_nat[i])) < 0)
++ goto cleanup;
++ return ret;
++cleanup:
++ for (j = 0; j < i; j++)
++ nf_unregister_hook(&ebt_ops_nat[j]);
++ ebt_unregister_table(&frame_nat);
++ return ret;
++}
++
++static void __exit fini(void)
++{
++ int i;
++
++ for (i = 0; i < sizeof(ebt_ops_nat) / sizeof(ebt_ops_nat[0]); i++)
++ nf_unregister_hook(&ebt_ops_nat[i]);
++ ebt_unregister_table(&frame_nat);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre3/net/bridge/netfilter/ebtable_broute.c Sat Apr 27 21:05:16 2002
+@@ -0,0 +1,80 @@
++/*
++ * ebtable_broute
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * April, 2002
++ *
++ * This table lets you choose between routing and bridging for frames
++ * entering on a bridge enslaved nic. This table is traversed before any
++ * other ebtables table. See net/bridge/br_input.c.
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge.h>
++#include <linux/netdevice.h>
++#include <linux/module.h>
++#include <linux/if_bridge.h>
++#include <linux/brlock.h>
++
++// EBT_ACCEPT means the frame will be bridged
++// EBT_DROP means the frame will be routed
++static struct ebt_entries initial_chain =
++ {0, EBT_ACCEPT, 0};
++
++static struct ebt_replace initial_table =
++{
++ "broute", 1 << NF_BR_BROUTING, 0, sizeof(struct ebt_entries),
++ { [NF_BR_BROUTING]&initial_chain}, {},
++ 0, NULL, (char *)&initial_chain
++};
++
++static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
++{
++ if (valid_hooks & ~(1 << NF_BR_BROUTING))
++ return -EINVAL;
++ return 0;
++}
++
++static struct ebt_table broute_table =
++{
++ {NULL, NULL}, "broute", &initial_table, 1 << NF_BR_BROUTING,
++ RW_LOCK_UNLOCKED, check, NULL
++};
++
++static unsigned int
++ebt_broute (unsigned int hook, struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ return ebt_do_table(hook, pskb, in, out, &broute_table);
++}
++
++static int __init init(void)
++{
++ int ret;
++
++ ret = ebt_register_table(&broute_table);
++ if (ret < 0)
++ return ret;
++ br_write_lock_bh(BR_NETPROTO_LOCK);
++ // in br_input.c, br_handle_frame() wants to call broute_decision()
++ broute_decision = ebt_broute;
++ br_write_unlock_bh(BR_NETPROTO_LOCK);
++ return ret;
++}
++
++static void __exit fini(void)
++{
++ br_write_lock_bh(BR_NETPROTO_LOCK);
++ broute_decision = NULL;
++ br_write_unlock_bh(BR_NETPROTO_LOCK);
++ ebt_unregister_table(&broute_table);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre3/net/bridge/netfilter/ebt_redirect.c Sat Apr 27 22:48:52 2002
+@@ -0,0 +1,65 @@
++/*
++ * ebt_redirect
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * April, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_redirect.h>
++#include <linux/netfilter_bridge.h>
++#include <linux/skbuff.h>
++#include <linux/module.h>
++#include <net/sock.h>
++#include "../br_private.h"
++
++static __u8 ebt_target_redirect(struct sk_buff **pskb, unsigned int hooknr,
++ const struct net_device *in, const struct net_device *out,
++ const void *data, unsigned int datalen)
++{
++ struct ebt_redirect_info *infostuff = (struct ebt_redirect_info *) data;
++
++ memcpy((**pskb).mac.ethernet->h_dest,
++ in->br_port->br->dev.dev_addr, ETH_ALEN);
++ (*pskb)->pkt_type = PACKET_HOST;
++ return infostuff->target;
++}
++
++static int ebt_target_redirect_check(const char *tablename, unsigned int hooknr,
++ const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ struct ebt_redirect_info *infostuff = (struct ebt_redirect_info *) data;
++
++ if ( (strcmp(tablename, "nat") || hooknr != NF_BR_PRE_ROUTING) &&
++ (strcmp(tablename, "broute") || hooknr != NF_BR_BROUTING) )
++ return -EINVAL;
++ if (datalen != sizeof(struct ebt_redirect_info))
++ return -EINVAL;
++ if (infostuff->target >= NUM_STANDARD_TARGETS)
++ return -EINVAL;
++ return 0;
++}
++
++static struct ebt_target redirect_target =
++{
++ {NULL, NULL}, EBT_REDIRECT_TARGET, ebt_target_redirect,
++ ebt_target_redirect_check, NULL, THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ebt_register_target(&redirect_target);
++}
++
++static void __exit fini(void)
++{
++ ebt_unregister_target(&redirect_target);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre3/net/bridge/netfilter/ebt_arp.c Sat Apr 27 21:05:16 2002
+@@ -0,0 +1,107 @@
++/*
++ * ebt_arp
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ * Tim Gardner <timg@tpi.com>
++ *
++ * April, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_arp.h>
++#include <linux/if_arp.h>
++#include <linux/module.h>
++
++#define FWINV2(bool,invflg) ((bool) ^ !!(infostuff->invflags & invflg))
++static int ebt_filter_arp(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *data,
++ unsigned int datalen, const struct ebt_counter *c)
++{
++ struct ebt_arp_info *infostuff = (struct ebt_arp_info *)data;
++
++ if (infostuff->bitmask & EBT_ARP_OPCODE && FWINV2(infostuff->opcode !=
++ ((*skb).nh.arph)->ar_op, EBT_ARP_OPCODE))
++ return 1;
++ if (infostuff->bitmask & EBT_ARP_HTYPE && FWINV2(infostuff->htype !=
++ ((*skb).nh.arph)->ar_hrd, EBT_ARP_HTYPE))
++ return 1;
++ if (infostuff->bitmask & EBT_ARP_PTYPE && FWINV2(infostuff->ptype !=
++ ((*skb).nh.arph)->ar_pro, EBT_ARP_PTYPE))
++ return 1;
++
++ if (infostuff->bitmask & (EBT_ARP_SRC_IP | EBT_ARP_DST_IP))
++ {
++ __u32 arp_len = sizeof(struct arphdr) +
++ (2*(((*skb).nh.arph)->ar_hln)) +
++ (2*(((*skb).nh.arph)->ar_pln));
++ __u32 dst;
++ __u32 src;
++
++ // Make sure the packet is long enough.
++ if ((((*skb).nh.raw) + arp_len) > (*skb).tail)
++ return 1;
++ // IPV4 addresses are always 4 bytes.
++ if (((*skb).nh.arph)->ar_pln != sizeof(__u32))
++ return 1;
++
++ if (infostuff->bitmask & EBT_ARP_SRC_IP) {
++ memcpy(&src, ((*skb).nh.raw) + sizeof(struct arphdr) +
++ ((*skb).nh.arph)->ar_hln, sizeof(__u32));
++ if (FWINV2(infostuff->saddr != (src & infostuff->smsk),
++ EBT_ARP_SRC_IP))
++ return 1;
++ }
++
++ if (infostuff->bitmask & EBT_ARP_DST_IP) {
++ memcpy(&dst, ((*skb).nh.raw)+sizeof(struct arphdr) +
++ (2*(((*skb).nh.arph)->ar_hln)) +
++ (((*skb).nh.arph)->ar_pln), sizeof(__u32));
++ if (FWINV2(infostuff->daddr != (dst & infostuff->dmsk),
++ EBT_ARP_DST_IP))
++ return 1;
++ }
++ }
++ return 0;
++}
++
++static int ebt_arp_check(const char *tablename, unsigned int hooknr,
++ const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ struct ebt_arp_info *infostuff = (struct ebt_arp_info *) data;
++
++ if (datalen != sizeof(struct ebt_arp_info))
++ return -EINVAL;
++ if (e->bitmask & (EBT_NOPROTO | EBT_802_3) ||
++ (e->ethproto != __constant_htons(ETH_P_ARP) &&
++ e->ethproto != __constant_htons(ETH_P_RARP)) ||
++ e->invflags & EBT_IPROTO)
++ return -EINVAL;
++ if (infostuff->bitmask & ~EBT_ARP_MASK)
++ return -EINVAL;
++ return 0;
++}
++
++static struct ebt_match filter_arp =
++{
++ {NULL, NULL}, EBT_ARP_MATCH, ebt_filter_arp, ebt_arp_check, NULL,
++ THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ebt_register_match(&filter_arp);
++}
++
++static void __exit fini(void)
++{
++ ebt_unregister_match(&filter_arp);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre3/net/bridge/netfilter/ebt_ip.c Sat Apr 27 21:05:16 2002
+@@ -0,0 +1,81 @@
++/*
++ * ebt_ip
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * April, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_ip.h>
++#include <linux/ip.h>
++#include <linux/module.h>
++
++#define FWINV2(bool,invflg) ((bool) ^ !!(infostuff->invflags & invflg))
++static int ebt_filter_ip(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *data,
++ unsigned int datalen, const struct ebt_counter *c)
++{
++ struct ebt_ip_info *infostuff = (struct ebt_ip_info *) data;
++
++ if (infostuff->bitmask & EBT_IP_TOS &&
++ FWINV2(infostuff->tos != ((*skb).nh.iph)->tos, EBT_IP_TOS))
++ return 1;
++ if (infostuff->bitmask & EBT_IP_PROTO && FWINV2(infostuff->protocol !=
++ ((*skb).nh.iph)->protocol, EBT_IP_PROTO))
++ return 1;
++ if (infostuff->bitmask & EBT_IP_SOURCE &&
++ FWINV2((((*skb).nh.iph)->saddr & infostuff->smsk) !=
++ infostuff->saddr, EBT_IP_SOURCE))
++ return 1;
++ if ((infostuff->bitmask & EBT_IP_DEST) &&
++ FWINV2((((*skb).nh.iph)->daddr & infostuff->dmsk) !=
++ infostuff->daddr, EBT_IP_DEST))
++ return 1;
++ return 0;
++}
++
++static int ebt_ip_check(const char *tablename, unsigned int hooknr,
++ const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ struct ebt_ip_info *infostuff = (struct ebt_ip_info *) data;
++
++ if (datalen != sizeof(struct ebt_ip_info)) {
++ return -EINVAL;
++ }
++ if (e->bitmask & (EBT_NOPROTO | EBT_802_3) ||
++ e->ethproto != __constant_htons(ETH_P_IP) ||
++ e->invflags & EBT_IPROTO)
++ {
++ return -EINVAL;
++ }
++ if (infostuff->bitmask & ~EBT_IP_MASK) {
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static struct ebt_match filter_ip =
++{
++ {NULL, NULL}, EBT_IP_MATCH, ebt_filter_ip, ebt_ip_check, NULL,
++ THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ebt_register_match(&filter_ip);
++}
++
++static void __exit fini(void)
++{
++ ebt_unregister_match(&filter_ip);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre3/net/bridge/netfilter/ebt_log.c Sat Apr 27 21:05:16 2002
+@@ -0,0 +1,111 @@
++/*
++ * ebt_log
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * April, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_log.h>
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/if_arp.h>
++#include <linux/spinlock.h>
++
++static spinlock_t ebt_log_lock = SPIN_LOCK_UNLOCKED;
++
++static int ebt_log_check(const char *tablename, unsigned int hooknr,
++ const struct ebt_entry *e, void *data, unsigned int datalen)
++{
++ struct ebt_log_info *loginfo = (struct ebt_log_info *)data;
++
++ if (datalen != sizeof(struct ebt_log_info))
++ return -EINVAL;
++ if (loginfo->bitmask & ~EBT_LOG_MASK)
++ return -EINVAL;
++ if (loginfo->loglevel >= 8)
++ return -EINVAL;
++ loginfo->prefix[EBT_LOG_PREFIX_SIZE - 1] = '\0';
++ return 0;
++}
++
++static void ebt_log(const struct sk_buff *skb, const struct net_device *in,
++ const struct net_device *out, const void *data, unsigned int datalen,
++ const struct ebt_counter *c)
++{
++ struct ebt_log_info *loginfo = (struct ebt_log_info *)data;
++ char level_string[4] = "< >";
++ level_string[1] = '0' + loginfo->loglevel;
++
++ spin_lock_bh(&ebt_log_lock);
++ printk(level_string);
++ // max length: 29 + 10 + 2 * 16
++ printk("%s IN=%s OUT=%s ",
++ loginfo->prefix,
++ in ? in->name : "",
++ out ? out->name : "");
++
++ if (skb->dev->hard_header_len) {
++ int i;
++ unsigned char *p = (skb->mac.ethernet)->h_source;
++ printk("MAC source = ");
++ for (i = 0; i < ETH_ALEN; i++,p++)
++ printk("%02x%c", *p,
++ i == ETH_ALEN - 1
++ ? ' ':':');// length: 31
++ printk("MAC dest = ");
++ p = (skb->mac.ethernet)->h_dest;
++ for (i = 0; i < ETH_ALEN; i++,p++)
++ printk("%02x%c", *p,
++ i == ETH_ALEN - 1
++ ? ' ':':');// length: 29
++ }
++ // length: 14
++ printk("proto = 0x%04x", ntohs(((*skb).mac.ethernet)->h_proto));
++
++ if ((loginfo->bitmask & EBT_LOG_IP) && skb->mac.ethernet->h_proto ==
++ htons(ETH_P_IP)){
++ struct iphdr *iph = skb->nh.iph;
++ // max length: 46
++ printk(" IP SRC=%u.%u.%u.%u IP DST=%u.%u.%u.%u,",
++ NIPQUAD(iph->saddr), NIPQUAD(iph->daddr));
++ // max length: 26
++ printk(" IP tos=0x%02X, IP proto=%d", iph->tos, iph->protocol);
++ }
++
++ if ((loginfo->bitmask & EBT_LOG_ARP) &&
++ ((skb->mac.ethernet->h_proto == __constant_htons(ETH_P_ARP)) ||
++ (skb->mac.ethernet->h_proto == __constant_htons(ETH_P_RARP)))) {
++ struct arphdr * arph = skb->nh.arph;
++ // max length: 40
++ printk(" ARP HTYPE=%d, PTYPE=0x%04x, OPCODE=%d",
++ ntohs(arph->ar_hrd), ntohs(arph->ar_pro),
++ ntohs(arph->ar_op));
++ }
++ printk("\n");
++ spin_unlock_bh(&ebt_log_lock);
++}
++
++struct ebt_watcher log =
++{
++ {NULL, NULL}, EBT_LOG_WATCHER, ebt_log, ebt_log_check, NULL,
++ THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ebt_register_watcher(&log);
++}
++
++static void __exit fini(void)
++{
++ ebt_unregister_watcher(&log);
++}
++
++module_init(init);
++module_exit(fini);
++EXPORT_NO_SYMBOLS;
++MODULE_LICENSE("GPL");
+--- /dev/null Thu Aug 24 11:00:32 2000
++++ ebt2.0pre3/net/bridge/netfilter/ebt_nat.c Sat Apr 27 21:05:16 2002
+@@ -0,0 +1,106 @@
++/*
++ * ebt_nat
++ *
++ * Authors:
++ * Bart De Schuymer <bart.de.schuymer@pandora.be>
++ *
++ * April, 2002
++ *
++ */
++
++#include <linux/netfilter_bridge/ebtables.h>
++#include <linux/netfilter_bridge/ebt_nat.h>
++#include <linux/netfilter_bridge.h>