Update kernel headers to 2.6.31

Final 2.6.31 released, so update sanitized headers.
This commit is contained in:
Stephen Hemminger 2009-09-10 09:03:22 -07:00
parent f0309aa493
commit f40554f687
14 changed files with 8 additions and 1273 deletions

View File

@ -19,6 +19,11 @@ enum {
* @packets: number of seen packets
*/
struct gnet_stats_basic
{
__u64 bytes;
__u32 packets;
};
struct gnet_stats_basic_packed
{
__u64 bytes;
__u32 packets;

View File

@ -55,9 +55,7 @@ struct ifa_cacheinfo
};
/* backwards compatibility for userspace */
#ifndef __KERNEL__
#define IFA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifaddrmsg))))
#define IFA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifaddrmsg))
#endif
#endif

View File

@ -120,30 +120,5 @@ struct ethhdr {
__be16 h_proto; /* packet type ID field */
} __attribute__((packed));
#ifdef __KERNEL__
#include <linux/skbuff.h>
static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
{
return (struct ethhdr *)skb_mac_header(skb);
}
int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
#ifdef CONFIG_SYSCTL
extern struct ctl_table ether_table[];
#endif
extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
/*
* Display a 6 byte device address (MAC) in a readable format.
*/
extern char *print_mac(char *buf, const unsigned char *addr);
#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
#define MAC_BUF_SIZE 18
#define DECLARE_MAC_BUF(var) char var[MAC_BUF_SIZE] __maybe_unused
#endif
#endif /* _LINUX_IF_ETHER_H */

View File

@ -88,10 +88,8 @@ enum
#define IFLA_MAX (__IFLA_MAX - 1)
/* backwards compatibility for userspace */
#ifndef __KERNEL__
#define IFLA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifinfomsg))))
#define IFLA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifinfomsg))
#endif
/* ifi_flags.

View File

@ -3,9 +3,6 @@
#include <linux/types.h>
#ifdef __KERNEL__
#include <linux/ip.h>
#endif
#define SIOCGETTUNNEL (SIOCDEVPRIVATE + 0)
#define SIOCADDTUNNEL (SIOCDEVPRIVATE + 1)

View File

@ -13,304 +13,6 @@
#ifndef _LINUX_IF_VLAN_H_
#define _LINUX_IF_VLAN_H_
#ifdef __KERNEL__
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#define VLAN_HLEN 4 /* The additional bytes (on top of the Ethernet header)
* that VLAN requires.
*/
#define VLAN_ETH_ALEN 6 /* Octets in one ethernet addr */
#define VLAN_ETH_HLEN 18 /* Total octets in header. */
#define VLAN_ETH_ZLEN 64 /* Min. octets in frame sans FCS */
/*
* According to 802.3ac, the packet can be 4 bytes longer. --Klika Jan
*/
#define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */
#define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */
/*
* struct vlan_hdr - vlan header
* @h_vlan_TCI: priority and VLAN ID
* @h_vlan_encapsulated_proto: packet type ID or len
*/
struct vlan_hdr {
__be16 h_vlan_TCI;
__be16 h_vlan_encapsulated_proto;
};
/**
* struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr)
* @h_dest: destination ethernet address
* @h_source: source ethernet address
* @h_vlan_proto: ethernet protocol (always 0x8100)
* @h_vlan_TCI: priority and VLAN ID
* @h_vlan_encapsulated_proto: packet type ID or len
*/
struct vlan_ethhdr {
unsigned char h_dest[ETH_ALEN];
unsigned char h_source[ETH_ALEN];
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
__be16 h_vlan_encapsulated_proto;
};
#include <linux/skbuff.h>
static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
{
return (struct vlan_ethhdr *)skb_mac_header(skb);
}
#define VLAN_VID_MASK 0xfff
/* found in socket.c */
extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
/* if this changes, algorithm will have to be reworked because this
* depends on completely exhausting the VLAN identifier space. Thus
* it gives constant time look-up, but in many cases it wastes memory.
*/
#define VLAN_GROUP_ARRAY_LEN 4096
#define VLAN_GROUP_ARRAY_SPLIT_PARTS 8
#define VLAN_GROUP_ARRAY_PART_LEN (VLAN_GROUP_ARRAY_LEN/VLAN_GROUP_ARRAY_SPLIT_PARTS)
struct vlan_group {
struct net_device *real_dev; /* The ethernet(like) device
* the vlan is attached to.
*/
unsigned int nr_vlans;
struct hlist_node hlist; /* linked list */
struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS];
struct rcu_head rcu;
};
static inline struct net_device *vlan_group_get_device(struct vlan_group *vg,
u16 vlan_id)
{
struct net_device **array;
array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
return array ? array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] : NULL;
}
static inline void vlan_group_set_device(struct vlan_group *vg,
u16 vlan_id,
struct net_device *dev)
{
struct net_device **array;
if (!vg)
return;
array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev;
}
#define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci)
#define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci)
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
extern u16 vlan_dev_vlan_id(const struct net_device *dev);
extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
u16 vlan_tci, int polling);
extern int vlan_hwaccel_do_receive(struct sk_buff *skb);
extern int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
unsigned int vlan_tci, struct sk_buff *skb);
extern int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
unsigned int vlan_tci);
#else
static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
BUG();
return NULL;
}
static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
{
BUG();
return 0;
}
static inline int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
u16 vlan_tci, int polling)
{
BUG();
return NET_XMIT_SUCCESS;
}
static inline int vlan_hwaccel_do_receive(struct sk_buff *skb)
{
return 0;
}
static inline int vlan_gro_receive(struct napi_struct *napi,
struct vlan_group *grp,
unsigned int vlan_tci, struct sk_buff *skb)
{
return NET_RX_DROP;
}
static inline int vlan_gro_frags(struct napi_struct *napi,
struct vlan_group *grp, unsigned int vlan_tci)
{
return NET_RX_DROP;
}
#endif
/**
* vlan_hwaccel_rx - netif_rx wrapper for VLAN RX acceleration
* @skb: buffer
* @grp: vlan group
* @vlan_tci: VLAN TCI as received from the card
*/
static inline int vlan_hwaccel_rx(struct sk_buff *skb,
struct vlan_group *grp,
u16 vlan_tci)
{
return __vlan_hwaccel_rx(skb, grp, vlan_tci, 0);
}
/**
* vlan_hwaccel_receive_skb - netif_receive_skb wrapper for VLAN RX acceleration
* @skb: buffer
* @grp: vlan group
* @vlan_tci: VLAN TCI as received from the card
*/
static inline int vlan_hwaccel_receive_skb(struct sk_buff *skb,
struct vlan_group *grp,
u16 vlan_tci)
{
return __vlan_hwaccel_rx(skb, grp, vlan_tci, 1);
}
/**
* __vlan_put_tag - regular VLAN tag inserting
* @skb: skbuff to tag
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
* Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
*/
static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
{
struct vlan_ethhdr *veth;
if (skb_cow_head(skb, VLAN_HLEN) < 0) {
kfree_skb(skb);
return NULL;
}
veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);
/* Move the mac addresses to the beginning of the new header. */
memmove(skb->data, skb->data + VLAN_HLEN, 2 * VLAN_ETH_ALEN);
skb->mac_header -= VLAN_HLEN;
/* first, the ethernet type */
veth->h_vlan_proto = htons(ETH_P_8021Q);
/* now, the TCI */
veth->h_vlan_TCI = htons(vlan_tci);
skb->protocol = htons(ETH_P_8021Q);
return skb;
}
/**
* __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
* @skb: skbuff to tag
* @vlan_tci: VLAN TCI to insert
*
* Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest
*/
static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb,
u16 vlan_tci)
{
skb->vlan_tci = vlan_tci;
return skb;
}
#define HAVE_VLAN_PUT_TAG
/**
* vlan_put_tag - inserts VLAN tag according to device features
* @skb: skbuff to tag
* @vlan_tci: VLAN TCI to insert
*
* Assumes skb->dev is the target that will xmit this frame.
* Returns a VLAN tagged skb.
*/
static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
{
if (skb->dev->features & NETIF_F_HW_VLAN_TX) {
return __vlan_hwaccel_put_tag(skb, vlan_tci);
} else {
return __vlan_put_tag(skb, vlan_tci);
}
}
/**
* __vlan_get_tag - get the VLAN ID that is part of the payload
* @skb: skbuff to query
* @vlan_tci: buffer to store vlaue
*
* Returns error if the skb is not of VLAN type
*/
static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
if (veth->h_vlan_proto != htons(ETH_P_8021Q)) {
return -EINVAL;
}
*vlan_tci = ntohs(veth->h_vlan_TCI);
return 0;
}
/**
* __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[]
* @skb: skbuff to query
* @vlan_tci: buffer to store vlaue
*
* Returns error if @skb->vlan_tci is not set correctly
*/
static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
u16 *vlan_tci)
{
if (vlan_tx_tag_present(skb)) {
*vlan_tci = skb->vlan_tci;
return 0;
} else {
*vlan_tci = 0;
return -EINVAL;
}
}
#define HAVE_VLAN_GET_TAG
/**
* vlan_get_tag - get the VLAN ID from the skb
* @skb: skbuff to query
* @vlan_tci: buffer to store vlaue
*
* Returns error if the skb is not VLAN tagged
*/
static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
if (skb->dev->features & NETIF_F_HW_VLAN_TX) {
return __vlan_hwaccel_get_tag(skb, vlan_tci);
} else {
return __vlan_get_tag(skb, vlan_tci);
}
}
#endif /* __KERNEL__ */
/* VLAN IOCTLs are found in sockios.h */

View File

@ -120,21 +120,5 @@ struct tcpvegas_info {
__u32 tcpv_minrtt;
};
#ifdef __KERNEL__
struct sock;
struct inet_hashinfo;
struct inet_diag_handler {
struct inet_hashinfo *idiag_hashinfo;
void (*idiag_get_info)(struct sock *sk,
struct inet_diag_msg *r,
void *info);
__u16 idiag_info_size;
__u16 idiag_type;
};
extern int inet_diag_register(const struct inet_diag_handler *handler);
extern void inet_diag_unregister(const struct inet_diag_handler *handler);
#endif /* __KERNEL__ */
#endif /* _INET_DIAG_H_ */

View File

@ -171,440 +171,5 @@ struct xt_counters_info
#define XT_ENTRY_ITERATE(type, entries, size, fn, args...) \
XT_ENTRY_ITERATE_CONTINUE(type, entries, size, 0, fn, args)
#ifdef __KERNEL__
#include <linux/netdevice.h>
/**
* struct xt_match_param - parameters for match extensions' match functions
*
* @in: input netdevice
* @out: output netdevice
* @match: struct xt_match through which this function was invoked
* @matchinfo: per-match data
* @fragoff: packet is a fragment, this is the data offset
* @thoff: position of transport header relative to skb->data
* @hook: hook number given packet came from
* @family: Actual NFPROTO_* through which the function is invoked
* (helpful when match->family == NFPROTO_UNSPEC)
* @hotdrop: drop packet if we had inspection problems
*/
struct xt_match_param {
const struct net_device *in, *out;
const struct xt_match *match;
const void *matchinfo;
int fragoff;
unsigned int thoff;
unsigned int hooknum;
u_int8_t family;
bool *hotdrop;
};
/**
* struct xt_mtchk_param - parameters for match extensions'
* checkentry functions
*
* @table: table the rule is tried to be inserted into
* @entryinfo: the family-specific rule data
* (struct ipt_ip, ip6t_ip, ebt_entry)
* @match: struct xt_match through which this function was invoked
* @matchinfo: per-match data
* @hook_mask: via which hooks the new rule is reachable
*/
struct xt_mtchk_param {
const char *table;
const void *entryinfo;
const struct xt_match *match;
void *matchinfo;
unsigned int hook_mask;
u_int8_t family;
};
/* Match destructor parameters */
struct xt_mtdtor_param {
const struct xt_match *match;
void *matchinfo;
u_int8_t family;
};
/**
* struct xt_target_param - parameters for target extensions' target functions
*
* @hooknum: hook through which this target was invoked
* @target: struct xt_target through which this function was invoked
* @targinfo: per-target data
*
* Other fields see above.
*/
struct xt_target_param {
const struct net_device *in, *out;
unsigned int hooknum;
const struct xt_target *target;
const void *targinfo;
u_int8_t family;
};
/**
* struct xt_tgchk_param - parameters for target extensions'
* checkentry functions
*
* @entryinfo: the family-specific rule data
* (struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry)
*
* Other fields see above.
*/
struct xt_tgchk_param {
const char *table;
const void *entryinfo;
const struct xt_target *target;
void *targinfo;
unsigned int hook_mask;
u_int8_t family;
};
/* Target destructor parameters */
struct xt_tgdtor_param {
const struct xt_target *target;
void *targinfo;
u_int8_t family;
};
struct xt_match
{
struct list_head list;
const char name[XT_FUNCTION_MAXNAMELEN-1];
u_int8_t revision;
/* Return true or false: return FALSE and set *hotdrop = 1 to
force immediate packet drop. */
/* Arguments changed since 2.6.9, as this must now handle
non-linear skb, using skb_header_pointer and
skb_ip_make_writable. */
bool (*match)(const struct sk_buff *skb,
const struct xt_match_param *);
/* Called when user tries to insert an entry of this type. */
bool (*checkentry)(const struct xt_mtchk_param *);
/* Called when entry of this type deleted. */
void (*destroy)(const struct xt_mtdtor_param *);
/* Called when userspace align differs from kernel space one */
void (*compat_from_user)(void *dst, void *src);
int (*compat_to_user)(void __user *dst, void *src);
/* Set this to THIS_MODULE if you are a module, otherwise NULL */
struct module *me;
/* Free to use by each match */
unsigned long data;
const char *table;
unsigned int matchsize;
unsigned int compatsize;
unsigned int hooks;
unsigned short proto;
unsigned short family;
};
/* Registration hooks for targets. */
struct xt_target
{
struct list_head list;
const char name[XT_FUNCTION_MAXNAMELEN-1];
/* Returns verdict. Argument order changed since 2.6.9, as this
must now handle non-linear skbs, using skb_copy_bits and
skb_ip_make_writable. */
unsigned int (*target)(struct sk_buff *skb,
const struct xt_target_param *);
/* Called when user tries to insert an entry of this type:
hook_mask is a bitmask of hooks from which it can be
called. */
/* Should return true or false. */
bool (*checkentry)(const struct xt_tgchk_param *);
/* Called when entry of this type deleted. */
void (*destroy)(const struct xt_tgdtor_param *);
/* Called when userspace align differs from kernel space one */
void (*compat_from_user)(void *dst, void *src);
int (*compat_to_user)(void __user *dst, void *src);
/* Set this to THIS_MODULE if you are a module, otherwise NULL */
struct module *me;
const char *table;
unsigned int targetsize;
unsigned int compatsize;
unsigned int hooks;
unsigned short proto;
unsigned short family;
u_int8_t revision;
};
/* Furniture shopping... */
struct xt_table
{
struct list_head list;
/* What hooks you will enter on */
unsigned int valid_hooks;
/* Man behind the curtain... */
struct xt_table_info *private;
/* Set this to THIS_MODULE if you are a module, otherwise NULL */
struct module *me;
u_int8_t af; /* address/protocol family */
/* A unique name... */
const char name[XT_TABLE_MAXNAMELEN];
};
#include <linux/netfilter_ipv4.h>
/* The table itself */
struct xt_table_info
{
/* Size per table */
unsigned int size;
/* Number of entries: FIXME. --RR */
unsigned int number;
/* Initial number of entries. Needed for module usage count */
unsigned int initial_entries;
/* Entry points and underflows */
unsigned int hook_entry[NF_INET_NUMHOOKS];
unsigned int underflow[NF_INET_NUMHOOKS];
/* ipt_entry tables: one per CPU */
/* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */
void *entries[1];
};
#define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \
+ nr_cpu_ids * sizeof(char *))
extern int xt_register_target(struct xt_target *target);
extern void xt_unregister_target(struct xt_target *target);
extern int xt_register_targets(struct xt_target *target, unsigned int n);
extern void xt_unregister_targets(struct xt_target *target, unsigned int n);
extern int xt_register_match(struct xt_match *target);
extern void xt_unregister_match(struct xt_match *target);
extern int xt_register_matches(struct xt_match *match, unsigned int n);
extern void xt_unregister_matches(struct xt_match *match, unsigned int n);
extern int xt_check_match(struct xt_mtchk_param *,
unsigned int size, u_int8_t proto, bool inv_proto);
extern int xt_check_target(struct xt_tgchk_param *,
unsigned int size, u_int8_t proto, bool inv_proto);
extern struct xt_table *xt_register_table(struct net *net,
struct xt_table *table,
struct xt_table_info *bootstrap,
struct xt_table_info *newinfo);
extern void *xt_unregister_table(struct xt_table *table);
extern struct xt_table_info *xt_replace_table(struct xt_table *table,
unsigned int num_counters,
struct xt_table_info *newinfo,
int *error);
extern struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
extern struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
extern struct xt_target *xt_request_find_target(u8 af, const char *name,
u8 revision);
extern int xt_find_revision(u8 af, const char *name, u8 revision,
int target, int *err);
extern struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
const char *name);
extern void xt_table_unlock(struct xt_table *t);
extern int xt_proto_init(struct net *net, u_int8_t af);
extern void xt_proto_fini(struct net *net, u_int8_t af);
extern struct xt_table_info *xt_alloc_table_info(unsigned int size);
extern void xt_free_table_info(struct xt_table_info *info);
/*
* Per-CPU spinlock associated with per-cpu table entries, and
* with a counter for the "reading" side that allows a recursive
* reader to avoid taking the lock and deadlocking.
*
* "reading" is used by ip/arp/ip6 tables rule processing which runs per-cpu.
* It needs to ensure that the rules are not being changed while the packet
* is being processed. In some cases, the read lock will be acquired
* twice on the same CPU; this is okay because of the count.
*
* "writing" is used when reading counters.
* During replace any readers that are using the old tables have to complete
* before freeing the old table. This is handled by the write locking
* necessary for reading the counters.
*/
struct xt_info_lock {
spinlock_t lock;
unsigned char readers;
};
DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks);
/*
* Note: we need to ensure that preemption is disabled before acquiring
* the per-cpu-variable, so we do it as a two step process rather than
* using "spin_lock_bh()".
*
* We _also_ need to disable bottom half processing before updating our
* nesting count, to make sure that the only kind of re-entrancy is this
* code being called by itself: since the count+lock is not an atomic
* operation, we can allow no races.
*
* _Only_ that special combination of being per-cpu and never getting
* re-entered asynchronously means that the count is safe.
*/
static inline void xt_info_rdlock_bh(void)
{
struct xt_info_lock *lock;
local_bh_disable();
lock = &__get_cpu_var(xt_info_locks);
if (likely(!lock->readers++))
spin_lock(&lock->lock);
}
static inline void xt_info_rdunlock_bh(void)
{
struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks);
if (likely(!--lock->readers))
spin_unlock(&lock->lock);
local_bh_enable();
}
/*
* The "writer" side needs to get exclusive access to the lock,
* regardless of readers. This must be called with bottom half
* processing (and thus also preemption) disabled.
*/
static inline void xt_info_wrlock(unsigned int cpu)
{
spin_lock(&per_cpu(xt_info_locks, cpu).lock);
}
static inline void xt_info_wrunlock(unsigned int cpu)
{
spin_unlock(&per_cpu(xt_info_locks, cpu).lock);
}
/*
* This helper is performance critical and must be inlined
*/
static inline unsigned long ifname_compare_aligned(const char *_a,
const char *_b,
const char *_mask)
{
const unsigned long *a = (const unsigned long *)_a;
const unsigned long *b = (const unsigned long *)_b;
const unsigned long *mask = (const unsigned long *)_mask;
unsigned long ret;
ret = (a[0] ^ b[0]) & mask[0];
if (IFNAMSIZ > sizeof(unsigned long))
ret |= (a[1] ^ b[1]) & mask[1];
if (IFNAMSIZ > 2 * sizeof(unsigned long))
ret |= (a[2] ^ b[2]) & mask[2];
if (IFNAMSIZ > 3 * sizeof(unsigned long))
ret |= (a[3] ^ b[3]) & mask[3];
BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
return ret;
}
#ifdef CONFIG_COMPAT
#include <net/compat.h>
struct compat_xt_entry_match
{
union {
struct {
u_int16_t match_size;
char name[XT_FUNCTION_MAXNAMELEN - 1];
u_int8_t revision;
} user;
struct {
u_int16_t match_size;
compat_uptr_t match;
} kernel;
u_int16_t match_size;
} u;
unsigned char data[0];
};
struct compat_xt_entry_target
{
union {
struct {
u_int16_t target_size;
char name[XT_FUNCTION_MAXNAMELEN - 1];
u_int8_t revision;
} user;
struct {
u_int16_t target_size;
compat_uptr_t target;
} kernel;
u_int16_t target_size;
} u;
unsigned char data[0];
};
/* FIXME: this works only on 32 bit tasks
* need to change whole approach in order to calculate align as function of
* current task alignment */
struct compat_xt_counters
{
#if defined(CONFIG_X86_64) || defined(CONFIG_IA64)
u_int32_t cnt[4];
#else
u_int64_t cnt[2];
#endif
};
struct compat_xt_counters_info
{
char name[XT_TABLE_MAXNAMELEN];
compat_uint_t num_counters;
struct compat_xt_counters counters[0];
};
#define COMPAT_XT_ALIGN(s) (((s) + (__alignof__(struct compat_xt_counters)-1)) \
& ~(__alignof__(struct compat_xt_counters)-1))
extern void xt_compat_lock(u_int8_t af);
extern void xt_compat_unlock(u_int8_t af);
extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta);
extern void xt_compat_flush_offsets(u_int8_t af);
extern short xt_compat_calc_jump(u_int8_t af, unsigned int offset);
extern int xt_compat_match_offset(const struct xt_match *match);
extern int xt_compat_match_from_user(struct xt_entry_match *m,
void **dstptr, unsigned int *size);
extern int xt_compat_match_to_user(struct xt_entry_match *m,
void __user **dstptr, unsigned int *size);
extern int xt_compat_target_offset(const struct xt_target *target);
extern void xt_compat_target_from_user(struct xt_entry_target *t,
void **dstptr, unsigned int *size);
extern int xt_compat_target_to_user(struct xt_entry_target *t,
void __user **dstptr, unsigned int *size);
#endif /* CONFIG_COMPAT */
#endif /* __KERNEL__ */
#endif /* _X_TABLES_H */

View File

@ -8,7 +8,6 @@
#include <linux/netfilter.h>
/* only for userspace compatibility */
#ifndef __KERNEL__
/* IP Cache bits. */
/* Src IP address. */
#define NFC_IP_SRC 0x0001
@ -49,7 +48,6 @@
/* Packets about to hit the wire. */
#define NF_IP_POST_ROUTING 4
#define NF_IP_NUMHOOKS 5
#endif /* ! __KERNEL__ */
enum nf_ip_hook_priorities {
NF_IP_PRI_FIRST = INT_MIN,
@ -73,11 +71,5 @@ enum nf_ip_hook_priorities {
/* 2.4 firewalling went 64 through 67. */
#define SO_ORIGINAL_DST 80
#ifdef __KERNEL__
extern int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type);
extern int ip_xfrm_me_harder(struct sk_buff *skb);
extern __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol);
#endif /*__KERNEL__*/
#endif /*__LINUX_IP_NETFILTER_H*/

View File

@ -151,122 +151,5 @@ struct nlattr
#define NLA_ALIGN(len) (((len) + NLA_ALIGNTO - 1) & ~(NLA_ALIGNTO - 1))
#define NLA_HDRLEN ((int) NLA_ALIGN(sizeof(struct nlattr)))
#ifdef __KERNEL__
#include <linux/capability.h>
#include <linux/skbuff.h>
static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
{
return (struct nlmsghdr *)skb->data;
}
struct netlink_skb_parms
{
struct ucred creds; /* Skb credentials */
__u32 pid;
__u32 dst_group;
kernel_cap_t eff_cap;
__u32 loginuid; /* Login (audit) uid */
__u32 sessionid; /* Session id (audit) */
__u32 sid; /* SELinux security id */
};
#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb))
#define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds)
extern struct sock *netlink_kernel_create(struct net *net,
int unit,unsigned int groups,
void (*input)(struct sk_buff *skb),
struct mutex *cb_mutex,
struct module *module);
extern void netlink_kernel_release(struct sock *sk);
extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
extern void netlink_clear_multicast_users(struct sock *sk, unsigned int group);
extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
extern int netlink_has_listeners(struct sock *sk, unsigned int group);
extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock);
extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid,
__u32 group, gfp_t allocation);
extern void netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code);
extern int netlink_register_notifier(struct notifier_block *nb);
extern int netlink_unregister_notifier(struct notifier_block *nb);
/* finegrained unicast helpers: */
struct sock *netlink_getsockbyfilp(struct file *filp);
int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
long *timeo, struct sock *ssk);
void netlink_detachskb(struct sock *sk, struct sk_buff *skb);
int netlink_sendskb(struct sock *sk, struct sk_buff *skb);
/*
* skb should fit one page. This choice is good for headerless malloc.
* But we should limit to 8K so that userspace does not have to
* use enormous buffer sizes on recvmsg() calls just to avoid
* MSG_TRUNC when PAGE_SIZE is very large.
*/
#if PAGE_SIZE < 8192UL
#define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(PAGE_SIZE)
#else
#define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(8192UL)
#endif
#define NLMSG_DEFAULT_SIZE (NLMSG_GOODSIZE - NLMSG_HDRLEN)
struct netlink_callback
{
struct sk_buff *skb;
struct nlmsghdr *nlh;
int (*dump)(struct sk_buff * skb, struct netlink_callback *cb);
int (*done)(struct netlink_callback *cb);
int family;
long args[6];
};
struct netlink_notify
{
struct net *net;
int pid;
int protocol;
};
static __inline__ struct nlmsghdr *
__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
{
struct nlmsghdr *nlh;
int size = NLMSG_LENGTH(len);
nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
nlh->nlmsg_type = type;
nlh->nlmsg_len = size;
nlh->nlmsg_flags = flags;
nlh->nlmsg_pid = pid;
nlh->nlmsg_seq = seq;
if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
return nlh;
}
#define NLMSG_NEW(skb, pid, seq, type, len, flags) \
({ if (unlikely(skb_tailroom(skb) < (int)NLMSG_SPACE(len))) \
goto nlmsg_failure; \
__nlmsg_put(skb, pid, seq, type, len, flags); })
#define NLMSG_PUT(skb, pid, seq, type, len) \
NLMSG_NEW(skb, pid, seq, type, len, 0)
extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
struct nlmsghdr *nlh,
int (*dump)(struct sk_buff *skb, struct netlink_callback*),
int (*done)(struct netlink_callback*));
#define NL_NONROOT_RECV 0x1
#define NL_NONROOT_SEND 0x2
extern void netlink_set_nonroot(int protocol, unsigned flag);
#endif /* __KERNEL__ */
#endif /* __LINUX_NETLINK_H */

View File

@ -524,7 +524,6 @@ enum
#define NDUSEROPT_MAX (__NDUSEROPT_MAX - 1)
#ifndef __KERNEL__
/* RTnetlink multicast groups - backwards compatibility for userspace */
#define RTMGRP_LINK 1
#define RTMGRP_NOTIFY 2
@ -545,7 +544,6 @@ enum
#define RTMGRP_DECnet_ROUTE 0x4000
#define RTMGRP_IPV6_PREFIX 0x20000
#endif
/* RTnetlink multicast groups */
enum rtnetlink_groups {
@ -611,169 +609,6 @@ struct tcamsg
/* End of information exported to user level */
#ifdef __KERNEL__
#include <linux/mutex.h>
static __inline__ int rtattr_strcmp(const struct rtattr *rta, const char *str)
{
int len = strlen(str) + 1;
return len > rta->rta_len || memcmp(RTA_DATA(rta), str, len);
}
extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid);
extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid,
u32 group, struct nlmsghdr *nlh, gfp_t flags);
extern void rtnl_set_sk_err(struct net *net, u32 group, int error);
extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics);
extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
u32 id, u32 ts, u32 tsage, long expires,
u32 error);
extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data);
#define RTA_PUT(skb, attrtype, attrlen, data) \
({ if (unlikely(skb_tailroom(skb) < (int)RTA_SPACE(attrlen))) \
goto rtattr_failure; \
__rta_fill(skb, attrtype, attrlen, data); })
#define RTA_APPEND(skb, attrlen, data) \
({ if (unlikely(skb_tailroom(skb) < (int)(attrlen))) \
goto rtattr_failure; \
memcpy(skb_put(skb, attrlen), data, attrlen); })
#define RTA_PUT_NOHDR(skb, attrlen, data) \
({ RTA_APPEND(skb, RTA_ALIGN(attrlen), data); \
memset(skb_tail_pointer(skb) - (RTA_ALIGN(attrlen) - attrlen), 0, \
RTA_ALIGN(attrlen) - attrlen); })
#define RTA_PUT_U8(skb, attrtype, value) \
({ u8 _tmp = (value); \
RTA_PUT(skb, attrtype, sizeof(u8), &_tmp); })
#define RTA_PUT_U16(skb, attrtype, value) \
({ u16 _tmp = (value); \
RTA_PUT(skb, attrtype, sizeof(u16), &_tmp); })
#define RTA_PUT_U32(skb, attrtype, value) \
({ u32 _tmp = (value); \
RTA_PUT(skb, attrtype, sizeof(u32), &_tmp); })
#define RTA_PUT_U64(skb, attrtype, value) \
({ u64 _tmp = (value); \
RTA_PUT(skb, attrtype, sizeof(u64), &_tmp); })
#define RTA_PUT_SECS(skb, attrtype, value) \
RTA_PUT_U64(skb, attrtype, (value) / HZ)
#define RTA_PUT_MSECS(skb, attrtype, value) \
RTA_PUT_U64(skb, attrtype, jiffies_to_msecs(value))
#define RTA_PUT_STRING(skb, attrtype, value) \
RTA_PUT(skb, attrtype, strlen(value) + 1, value)
#define RTA_PUT_FLAG(skb, attrtype) \
RTA_PUT(skb, attrtype, 0, NULL);
#define RTA_NEST(skb, type) \
({ struct rtattr *__start = (struct rtattr *)skb_tail_pointer(skb); \
RTA_PUT(skb, type, 0, NULL); \
__start; })
#define RTA_NEST_END(skb, start) \
({ (start)->rta_len = skb_tail_pointer(skb) - (unsigned char *)(start); \
(skb)->len; })
#define RTA_NEST_COMPAT(skb, type, attrlen, data) \
({ struct rtattr *__start = (struct rtattr *)skb_tail_pointer(skb); \
RTA_PUT(skb, type, attrlen, data); \
RTA_NEST(skb, type); \
__start; })
#define RTA_NEST_COMPAT_END(skb, start) \
({ struct rtattr *__nest = (void *)(start) + NLMSG_ALIGN((start)->rta_len); \
(start)->rta_len = skb_tail_pointer(skb) - (unsigned char *)(start); \
RTA_NEST_END(skb, __nest); \
(skb)->len; })
#define RTA_NEST_CANCEL(skb, start) \
({ if (start) \
skb_trim(skb, (unsigned char *) (start) - (skb)->data); \
-1; })
#define RTA_GET_U8(rta) \
({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u8)) \
goto rtattr_failure; \
*(u8 *) RTA_DATA(rta); })
#define RTA_GET_U16(rta) \
({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u16)) \
goto rtattr_failure; \
*(u16 *) RTA_DATA(rta); })
#define RTA_GET_U32(rta) \
({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u32)) \
goto rtattr_failure; \
*(u32 *) RTA_DATA(rta); })
#define RTA_GET_U64(rta) \
({ u64 _tmp; \
if (!rta || RTA_PAYLOAD(rta) < sizeof(u64)) \
goto rtattr_failure; \
memcpy(&_tmp, RTA_DATA(rta), sizeof(_tmp)); \
_tmp; })
#define RTA_GET_FLAG(rta) (!!(rta))
#define RTA_GET_SECS(rta) ((unsigned long) RTA_GET_U64(rta) * HZ)
#define RTA_GET_MSECS(rta) (msecs_to_jiffies((unsigned long) RTA_GET_U64(rta)))
static inline struct rtattr *
__rta_reserve(struct sk_buff *skb, int attrtype, int attrlen)
{
struct rtattr *rta;
int size = RTA_LENGTH(attrlen);
rta = (struct rtattr*)skb_put(skb, RTA_ALIGN(size));
rta->rta_type = attrtype;
rta->rta_len = size;
memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size);
return rta;
}
#define __RTA_PUT(skb, attrtype, attrlen) \
({ if (unlikely(skb_tailroom(skb) < (int)RTA_SPACE(attrlen))) \
goto rtattr_failure; \
__rta_reserve(skb, attrtype, attrlen); })
extern void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change);
/* RTNL is used as a global lock for all changes to network configuration */
extern void rtnl_lock(void);
extern void rtnl_unlock(void);
extern int rtnl_trylock(void);
extern int rtnl_is_locked(void);
extern void rtnetlink_init(void);
extern void __rtnl_unlock(void);
#define ASSERT_RTNL() do { \
if (unlikely(!rtnl_is_locked())) { \
printk(KERN_ERR "RTNL: assertion failed at %s (%d)\n", \
__FILE__, __LINE__); \
dump_stack(); \
} \
} while(0)
static inline u32 rtm_get_table(struct rtattr **rta, u8 table)
{
return RTA_GET_U32(rta[RTA_TABLE-1]);
rtattr_failure:
return table;
}
#endif /* __KERNEL__ */
#endif /* __LINUX_RTNETLINK_H */

View File

@ -22,14 +22,8 @@ struct __kernel_sockaddr_storage {
#include <linux/sockios.h> /* the SIOCxxx I/O controls */
#include <linux/uio.h> /* iovec support */
#include <linux/types.h> /* pid_t */
#include <linux/compiler.h> /* __user */
/* */
#ifdef __KERNEL__
# ifdef CONFIG_PROC_FS
struct seq_file;
extern void socket_seq_show(struct seq_file *seq);
# endif
#endif /* __KERNEL__ */
typedef unsigned short sa_family_t;
@ -104,12 +98,10 @@ struct cmsghdr {
* This mess will go away with glibc
*/
#ifdef __KERNEL__
#define __KINLINE static inline
#elif defined(__GNUC__)
#if defined(__GNUC__)
#define __KINLINE static __inline__
#elif defined(__cplusplus)
#define __KINLINE static inline
#define __KINLINE static __inline__
#else
#define __KINLINE static
#endif
@ -310,23 +302,5 @@ struct ucred {
/* IPX options */
#define IPX_TYPE 1
#ifdef __KERNEL__
extern int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
int offset, int len);
extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
struct iovec *iov,
int offset,
unsigned int len, __wsum *csump);
extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
int offset, int len);
extern int move_addr_to_user(struct sockaddr *kaddr, int klen, void __user *uaddr, int __user *ulen);
extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
#endif
#endif /* not kernel and not glibc */
#endif /* _LINUX_SOCKET_H */

View File

@ -4,150 +4,9 @@
#include <asm/types.h>
#ifndef __ASSEMBLY__
#ifdef __KERNEL__
#define DECLARE_BITMAP(name,bits) \
unsigned long name[BITS_TO_LONGS(bits)]
#endif
#include <linux/posix_types.h>
#ifdef __KERNEL__
typedef __u32 __kernel_dev_t;
typedef __kernel_fd_set fd_set;
typedef __kernel_dev_t dev_t;
typedef __kernel_ino_t ino_t;
typedef __kernel_mode_t mode_t;
typedef __kernel_nlink_t nlink_t;
typedef __kernel_off_t off_t;
typedef __kernel_pid_t pid_t;
typedef __kernel_daddr_t daddr_t;
typedef __kernel_key_t key_t;
typedef __kernel_suseconds_t suseconds_t;
typedef __kernel_timer_t timer_t;
typedef __kernel_clockid_t clockid_t;
typedef __kernel_mqd_t mqd_t;
typedef _Bool bool;
typedef __kernel_uid32_t uid_t;
typedef __kernel_gid32_t gid_t;
typedef __kernel_uid16_t uid16_t;
typedef __kernel_gid16_t gid16_t;
typedef unsigned long uintptr_t;
#ifdef CONFIG_UID16
/* This is defined by include/asm-{arch}/posix_types.h */
typedef __kernel_old_uid_t old_uid_t;
typedef __kernel_old_gid_t old_gid_t;
#endif /* CONFIG_UID16 */
#if defined(__GNUC__)
typedef __kernel_loff_t loff_t;
#endif
/*
* The following typedefs are also protected by individual ifdefs for
* historical reasons:
*/
#ifndef _SIZE_T
#define _SIZE_T
typedef __kernel_size_t size_t;
#endif
#ifndef _SSIZE_T
#define _SSIZE_T
typedef __kernel_ssize_t ssize_t;
#endif
#ifndef _PTRDIFF_T
#define _PTRDIFF_T
typedef __kernel_ptrdiff_t ptrdiff_t;
#endif
#ifndef _TIME_T
#define _TIME_T
typedef __kernel_time_t time_t;
#endif
#ifndef _CLOCK_T
#define _CLOCK_T
typedef __kernel_clock_t clock_t;
#endif
#ifndef _CADDR_T
#define _CADDR_T
typedef __kernel_caddr_t caddr_t;
#endif
/* bsd */
typedef unsigned char u_char;
typedef unsigned short u_short;
typedef unsigned int u_int;
typedef unsigned long u_long;
/* sysv */
typedef unsigned char unchar;
typedef unsigned short ushort;
typedef unsigned int uint;
typedef unsigned long ulong;
#ifndef __BIT_TYPES_DEFINED__
#define __BIT_TYPES_DEFINED__
typedef __u8 u_int8_t;
typedef __s8 int8_t;
typedef __u16 u_int16_t;
typedef __s16 int16_t;
typedef __u32 u_int32_t;
typedef __s32 int32_t;
#endif /* !(__BIT_TYPES_DEFINED__) */
typedef __u8 uint8_t;
typedef __u16 uint16_t;
typedef __u32 uint32_t;
#if defined(__GNUC__)
typedef __u64 uint64_t;
typedef __u64 u_int64_t;
typedef __s64 int64_t;
#endif
/* this is a special 64bit data type that is 8-byte aligned */
#define aligned_u64 __u64 __attribute__((aligned(8)))
#define aligned_be64 __be64 __attribute__((aligned(8)))
#define aligned_le64 __le64 __attribute__((aligned(8)))
/**
* The type used for indexing onto a disc or disc partition.
*
* Linux always considers sectors to be 512 bytes long independently
* of the devices real block size.
*
* blkcnt_t is the type of the inode's block count.
*/
#ifdef CONFIG_LBDAF
typedef u64 sector_t;
typedef u64 blkcnt_t;
#else
typedef unsigned long sector_t;
typedef unsigned long blkcnt_t;
#endif
/*
* The type of an index into the pagecache. Use a #define so asm/types.h
* can override it.
*/
#ifndef pgoff_t
#define pgoff_t unsigned long
#endif
#endif /* __KERNEL__ */
/*
* Below are truly Linux-specific types that should never collide with
@ -175,35 +34,5 @@ typedef __u64 __bitwise __be64;
typedef __u16 __bitwise __sum16;
typedef __u32 __bitwise __wsum;
#ifdef __KERNEL__
typedef unsigned __bitwise__ gfp_t;
typedef unsigned __bitwise__ fmode_t;
#ifdef CONFIG_PHYS_ADDR_T_64BIT
typedef u64 phys_addr_t;
#else
typedef u32 phys_addr_t;
#endif
typedef phys_addr_t resource_size_t;
typedef struct {
volatile int counter;
} atomic_t;
#ifdef CONFIG_64BIT
typedef struct {
volatile long counter;
} atomic64_t;
#endif
struct ustat {
__kernel_daddr_t f_tfree;
__kernel_ino_t f_tinode;
char f_fname[6];
char f_fpack[6];
};
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_TYPES_H */

View File

@ -450,14 +450,12 @@ struct xfrm_user_mapping {
__be16 new_sport;
};
#ifndef __KERNEL__
/* backwards compatibility for userspace */
#define XFRMGRP_ACQUIRE 1
#define XFRMGRP_EXPIRE 2
#define XFRMGRP_SA 4
#define XFRMGRP_POLICY 8
#define XFRMGRP_REPORT 0x20
#endif
enum xfrm_nlgroups {
XFRMNLGRP_NONE,