mirror of
				https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
				synced 2025-10-31 16:38:31 +00:00 
			
		
		
		
	 1d2c8eea69
			
		
	
	
		1d2c8eea69
		
	
	
	
	
		
			
			- rename ____kmalloc to kmalloc_track_caller so that people have a chance to guess what it does just from it's name. Add a comment describing it for those who don't. Also move it after kmalloc in slab.h so people get less confused when they are just looking for kmalloc - move things around in slab.c a little to reduce the ifdef mess. [penberg@cs.helsinki.fi: Fix up reversed #ifdef] Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Christoph Lameter <clameter@engr.sgi.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
		
			
				
	
	
		
			2094 lines
		
	
	
		
			51 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			2094 lines
		
	
	
		
			51 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|  *	Routines having to do with the 'struct sk_buff' memory handlers.
 | |
|  *
 | |
|  *	Authors:	Alan Cox <iiitac@pyr.swan.ac.uk>
 | |
|  *			Florian La Roche <rzsfl@rz.uni-sb.de>
 | |
|  *
 | |
|  *	Version:	$Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $
 | |
|  *
 | |
|  *	Fixes:
 | |
|  *		Alan Cox	:	Fixed the worst of the load
 | |
|  *					balancer bugs.
 | |
|  *		Dave Platt	:	Interrupt stacking fix.
 | |
|  *	Richard Kooijman	:	Timestamp fixes.
 | |
|  *		Alan Cox	:	Changed buffer format.
 | |
|  *		Alan Cox	:	destructor hook for AF_UNIX etc.
 | |
|  *		Linus Torvalds	:	Better skb_clone.
 | |
|  *		Alan Cox	:	Added skb_copy.
 | |
|  *		Alan Cox	:	Added all the changed routines Linus
 | |
|  *					only put in the headers
 | |
|  *		Ray VanTassle	:	Fixed --skb->lock in free
 | |
|  *		Alan Cox	:	skb_copy copy arp field
 | |
|  *		Andi Kleen	:	slabified it.
 | |
|  *		Robert Olsson	:	Removed skb_head_pool
 | |
|  *
 | |
|  *	NOTE:
 | |
|  *		The __skb_ routines should be called with interrupts
 | |
|  *	disabled, or you better be *real* sure that the operation is atomic
 | |
|  *	with respect to whatever list is being frobbed (e.g. via lock_sock()
 | |
|  *	or via disabling bottom half handlers, etc).
 | |
|  *
 | |
|  *	This program is free software; you can redistribute it and/or
 | |
|  *	modify it under the terms of the GNU General Public License
 | |
|  *	as published by the Free Software Foundation; either version
 | |
|  *	2 of the License, or (at your option) any later version.
 | |
|  */
 | |
| 
 | |
| /*
 | |
|  *	The functions in this file will not compile correctly with gcc 2.4.x
 | |
|  */
 | |
| 
 | |
| #include <linux/module.h>
 | |
| #include <linux/types.h>
 | |
| #include <linux/kernel.h>
 | |
| #include <linux/sched.h>
 | |
| #include <linux/mm.h>
 | |
| #include <linux/interrupt.h>
 | |
| #include <linux/in.h>
 | |
| #include <linux/inet.h>
 | |
| #include <linux/slab.h>
 | |
| #include <linux/netdevice.h>
 | |
| #ifdef CONFIG_NET_CLS_ACT
 | |
| #include <net/pkt_sched.h>
 | |
| #endif
 | |
| #include <linux/string.h>
 | |
| #include <linux/skbuff.h>
 | |
| #include <linux/cache.h>
 | |
| #include <linux/rtnetlink.h>
 | |
| #include <linux/init.h>
 | |
| #include <linux/highmem.h>
 | |
| 
 | |
| #include <net/protocol.h>
 | |
| #include <net/dst.h>
 | |
| #include <net/sock.h>
 | |
| #include <net/checksum.h>
 | |
| #include <net/xfrm.h>
 | |
| 
 | |
| #include <asm/uaccess.h>
 | |
| #include <asm/system.h>
 | |
| 
 | |
| static kmem_cache_t *skbuff_head_cache __read_mostly;
 | |
| static kmem_cache_t *skbuff_fclone_cache __read_mostly;
 | |
| 
 | |
| /*
 | |
|  *	Keep out-of-line to prevent kernel bloat.
 | |
|  *	__builtin_return_address is not used because it is not always
 | |
|  *	reliable.
 | |
|  */
 | |
| 
 | |
| /**
 | |
|  *	skb_over_panic	- 	private function
 | |
|  *	@skb: buffer
 | |
|  *	@sz: size
 | |
|  *	@here: address
 | |
|  *
 | |
|  *	Out of line support code for skb_put(). Not user callable.
 | |
|  */
 | |
| void skb_over_panic(struct sk_buff *skb, int sz, void *here)
 | |
| {
 | |
| 	printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
 | |
| 	                  "data:%p tail:%p end:%p dev:%s\n",
 | |
| 	       here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end,
 | |
| 	       skb->dev ? skb->dev->name : "<NULL>");
 | |
| 	BUG();
 | |
| }
 | |
| 
 | |
| /**
 | |
|  *	skb_under_panic	- 	private function
 | |
|  *	@skb: buffer
 | |
|  *	@sz: size
 | |
|  *	@here: address
 | |
|  *
 | |
|  *	Out of line support code for skb_push(). Not user callable.
 | |
|  */
 | |
| 
 | |
| void skb_under_panic(struct sk_buff *skb, int sz, void *here)
 | |
| {
 | |
| 	printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
 | |
| 	                  "data:%p tail:%p end:%p dev:%s\n",
 | |
| 	       here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end,
 | |
| 	       skb->dev ? skb->dev->name : "<NULL>");
 | |
| 	BUG();
 | |
| }
 | |
| 
 | |
| void skb_truesize_bug(struct sk_buff *skb)
 | |
| {
 | |
| 	printk(KERN_ERR "SKB BUG: Invalid truesize (%u) "
 | |
| 	       "len=%u, sizeof(sk_buff)=%Zd\n",
 | |
| 	       skb->truesize, skb->len, sizeof(struct sk_buff));
 | |
| }
 | |
| EXPORT_SYMBOL(skb_truesize_bug);
 | |
| 
 | |
| /* 	Allocate a new skbuff. We do this ourselves so we can fill in a few
 | |
|  *	'private' fields and also do memory statistics to find all the
 | |
|  *	[BEEP] leaks.
 | |
|  *
 | |
|  */
 | |
| 
 | |
| /**
 | |
|  *	__alloc_skb	-	allocate a network buffer
 | |
|  *	@size: size to allocate
 | |
|  *	@gfp_mask: allocation mask
 | |
|  *	@fclone: allocate from fclone cache instead of head cache
 | |
|  *		and allocate a cloned (child) skb
 | |
|  *
 | |
|  *	Allocate a new &sk_buff. The returned buffer has no headroom and a
 | |
|  *	tail room of size bytes. The object has a reference count of one.
 | |
|  *	The return is the buffer. On a failure the return is %NULL.
 | |
|  *
 | |
|  *	Buffers may only be allocated from interrupts using a @gfp_mask of
 | |
|  *	%GFP_ATOMIC.
 | |
|  */
 | |
| struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 | |
| 			    int fclone)
 | |
| {
 | |
| 	kmem_cache_t *cache;
 | |
| 	struct skb_shared_info *shinfo;
 | |
| 	struct sk_buff *skb;
 | |
| 	u8 *data;
 | |
| 
 | |
| 	cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
 | |
| 
 | |
| 	/* Get the HEAD */
 | |
| 	skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA);
 | |
| 	if (!skb)
 | |
| 		goto out;
 | |
| 
 | |
| 	/* Get the DATA. Size must match skb_add_mtu(). */
 | |
| 	size = SKB_DATA_ALIGN(size);
 | |
| 	data = kmalloc_track_caller(size + sizeof(struct skb_shared_info),
 | |
| 			gfp_mask);
 | |
| 	if (!data)
 | |
| 		goto nodata;
 | |
| 
 | |
| 	memset(skb, 0, offsetof(struct sk_buff, truesize));
 | |
| 	skb->truesize = size + sizeof(struct sk_buff);
 | |
| 	atomic_set(&skb->users, 1);
 | |
| 	skb->head = data;
 | |
| 	skb->data = data;
 | |
| 	skb->tail = data;
 | |
| 	skb->end  = data + size;
 | |
| 	/* make sure we initialize shinfo sequentially */
 | |
| 	shinfo = skb_shinfo(skb);
 | |
| 	atomic_set(&shinfo->dataref, 1);
 | |
| 	shinfo->nr_frags  = 0;
 | |
| 	shinfo->gso_size = 0;
 | |
| 	shinfo->gso_segs = 0;
 | |
| 	shinfo->gso_type = 0;
 | |
| 	shinfo->ip6_frag_id = 0;
 | |
| 	shinfo->frag_list = NULL;
 | |
| 
 | |
| 	if (fclone) {
 | |
| 		struct sk_buff *child = skb + 1;
 | |
| 		atomic_t *fclone_ref = (atomic_t *) (child + 1);
 | |
| 
 | |
| 		skb->fclone = SKB_FCLONE_ORIG;
 | |
| 		atomic_set(fclone_ref, 1);
 | |
| 
 | |
| 		child->fclone = SKB_FCLONE_UNAVAILABLE;
 | |
| 	}
 | |
| out:
 | |
| 	return skb;
 | |
| nodata:
 | |
| 	kmem_cache_free(cache, skb);
 | |
| 	skb = NULL;
 | |
| 	goto out;
 | |
| }
 | |
| 
 | |
| /**
 | |
|  *	alloc_skb_from_cache	-	allocate a network buffer
 | |
|  *	@cp: kmem_cache from which to allocate the data area
 | |
|  *           (object size must be big enough for @size bytes + skb overheads)
 | |
|  *	@size: size to allocate
 | |
|  *	@gfp_mask: allocation mask
 | |
|  *
 | |
|  *	Allocate a new &sk_buff. The returned buffer has no headroom and
 | |
|  *	tail room of size bytes. The object has a reference count of one.
 | |
|  *	The return is the buffer. On a failure the return is %NULL.
 | |
|  *
 | |
|  *	Buffers may only be allocated from interrupts using a @gfp_mask of
 | |
|  *	%GFP_ATOMIC.
 | |
|  */
 | |
| struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
 | |
| 				     unsigned int size,
 | |
| 				     gfp_t gfp_mask)
 | |
| {
 | |
| 	struct sk_buff *skb;
 | |
| 	u8 *data;
 | |
| 
 | |
| 	/* Get the HEAD */
 | |
| 	skb = kmem_cache_alloc(skbuff_head_cache,
 | |
| 			       gfp_mask & ~__GFP_DMA);
 | |
| 	if (!skb)
 | |
| 		goto out;
 | |
| 
 | |
| 	/* Get the DATA. */
 | |
| 	size = SKB_DATA_ALIGN(size);
 | |
| 	data = kmem_cache_alloc(cp, gfp_mask);
 | |
| 	if (!data)
 | |
| 		goto nodata;
 | |
| 
 | |
| 	memset(skb, 0, offsetof(struct sk_buff, truesize));
 | |
| 	skb->truesize = size + sizeof(struct sk_buff);
 | |
| 	atomic_set(&skb->users, 1);
 | |
| 	skb->head = data;
 | |
| 	skb->data = data;
 | |
| 	skb->tail = data;
 | |
| 	skb->end  = data + size;
 | |
| 
 | |
| 	atomic_set(&(skb_shinfo(skb)->dataref), 1);
 | |
| 	skb_shinfo(skb)->nr_frags  = 0;
 | |
| 	skb_shinfo(skb)->gso_size = 0;
 | |
| 	skb_shinfo(skb)->gso_segs = 0;
 | |
| 	skb_shinfo(skb)->gso_type = 0;
 | |
| 	skb_shinfo(skb)->frag_list = NULL;
 | |
| out:
 | |
| 	return skb;
 | |
| nodata:
 | |
| 	kmem_cache_free(skbuff_head_cache, skb);
 | |
| 	skb = NULL;
 | |
| 	goto out;
 | |
| }
 | |
| 
 | |
| /**
 | |
|  *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
 | |
|  *	@dev: network device to receive on
 | |
|  *	@length: length to allocate
 | |
|  *	@gfp_mask: get_free_pages mask, passed to alloc_skb
 | |
|  *
 | |
|  *	Allocate a new &sk_buff and assign it a usage count of one. The
 | |
|  *	buffer has unspecified headroom built in. Users should allocate
 | |
|  *	the headroom they think they need without accounting for the
 | |
|  *	built in space. The built in space is used for optimisations.
 | |
|  *
 | |
|  *	%NULL is returned if there is no free memory.
 | |
|  */
 | |
| struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
 | |
| 		unsigned int length, gfp_t gfp_mask)
 | |
| {
 | |
| 	struct sk_buff *skb;
 | |
| 
 | |
| 	skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
 | |
| 	if (likely(skb)) {
 | |
| 		skb_reserve(skb, NET_SKB_PAD);
 | |
| 		skb->dev = dev;
 | |
| 	}
 | |
| 	return skb;
 | |
| }
 | |
| 
 | |
| static void skb_drop_list(struct sk_buff **listp)
 | |
| {
 | |
| 	struct sk_buff *list = *listp;
 | |
| 
 | |
| 	*listp = NULL;
 | |
| 
 | |
| 	do {
 | |
| 		struct sk_buff *this = list;
 | |
| 		list = list->next;
 | |
| 		kfree_skb(this);
 | |
| 	} while (list);
 | |
| }
 | |
| 
 | |
| static inline void skb_drop_fraglist(struct sk_buff *skb)
 | |
| {
 | |
| 	skb_drop_list(&skb_shinfo(skb)->frag_list);
 | |
| }
 | |
| 
 | |
| static void skb_clone_fraglist(struct sk_buff *skb)
 | |
| {
 | |
| 	struct sk_buff *list;
 | |
| 
 | |
| 	for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
 | |
| 		skb_get(list);
 | |
| }
 | |
| 
 | |
| static void skb_release_data(struct sk_buff *skb)
 | |
| {
 | |
| 	if (!skb->cloned ||
 | |
| 	    !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
 | |
| 			       &skb_shinfo(skb)->dataref)) {
 | |
| 		if (skb_shinfo(skb)->nr_frags) {
 | |
| 			int i;
 | |
| 			for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
 | |
| 				put_page(skb_shinfo(skb)->frags[i].page);
 | |
| 		}
 | |
| 
 | |
| 		if (skb_shinfo(skb)->frag_list)
 | |
| 			skb_drop_fraglist(skb);
 | |
| 
 | |
| 		kfree(skb->head);
 | |
| 	}
 | |
| }
 | |
| 
 | |
| /*
 | |
|  *	Free an skbuff by memory without cleaning the state.
 | |
|  */
 | |
| void kfree_skbmem(struct sk_buff *skb)
 | |
| {
 | |
| 	struct sk_buff *other;
 | |
| 	atomic_t *fclone_ref;
 | |
| 
 | |
| 	skb_release_data(skb);
 | |
| 	switch (skb->fclone) {
 | |
| 	case SKB_FCLONE_UNAVAILABLE:
 | |
| 		kmem_cache_free(skbuff_head_cache, skb);
 | |
| 		break;
 | |
| 
 | |
| 	case SKB_FCLONE_ORIG:
 | |
| 		fclone_ref = (atomic_t *) (skb + 2);
 | |
| 		if (atomic_dec_and_test(fclone_ref))
 | |
| 			kmem_cache_free(skbuff_fclone_cache, skb);
 | |
| 		break;
 | |
| 
 | |
| 	case SKB_FCLONE_CLONE:
 | |
| 		fclone_ref = (atomic_t *) (skb + 1);
 | |
| 		other = skb - 1;
 | |
| 
 | |
| 		/* The clone portion is available for
 | |
| 		 * fast-cloning again.
 | |
| 		 */
 | |
| 		skb->fclone = SKB_FCLONE_UNAVAILABLE;
 | |
| 
 | |
| 		if (atomic_dec_and_test(fclone_ref))
 | |
| 			kmem_cache_free(skbuff_fclone_cache, other);
 | |
| 		break;
 | |
| 	};
 | |
| }
 | |
| 
 | |
| /**
 | |
|  *	__kfree_skb - private function
 | |
|  *	@skb: buffer
 | |
|  *
 | |
|  *	Free an sk_buff. Release anything attached to the buffer.
 | |
|  *	Clean the state. This is an internal helper function. Users should
 | |
|  *	always call kfree_skb
 | |
|  */
 | |
| 
 | |
| void __kfree_skb(struct sk_buff *skb)
 | |
| {
 | |
| 	dst_release(skb->dst);
 | |
| #ifdef CONFIG_XFRM
 | |
| 	secpath_put(skb->sp);
 | |
| #endif
 | |
| 	if (skb->destructor) {
 | |
| 		WARN_ON(in_irq());
 | |
| 		skb->destructor(skb);
 | |
| 	}
 | |
| #ifdef CONFIG_NETFILTER
 | |
| 	nf_conntrack_put(skb->nfct);
 | |
| #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 | |
| 	nf_conntrack_put_reasm(skb->nfct_reasm);
 | |
| #endif
 | |
| #ifdef CONFIG_BRIDGE_NETFILTER
 | |
| 	nf_bridge_put(skb->nf_bridge);
 | |
| #endif
 | |
| #endif
 | |
| /* XXX: IS this still necessary? - JHS */
 | |
| #ifdef CONFIG_NET_SCHED
 | |
| 	skb->tc_index = 0;
 | |
| #ifdef CONFIG_NET_CLS_ACT
 | |
| 	skb->tc_verd = 0;
 | |
| #endif
 | |
| #endif
 | |
| 
 | |
| 	kfree_skbmem(skb);
 | |
| }
 | |
| 
 | |
| /**
 | |
|  *	kfree_skb - free an sk_buff
 | |
|  *	@skb: buffer to free
 | |
|  *
 | |
|  *	Drop a reference to the buffer and free it if the usage count has
 | |
|  *	hit zero.
 | |
|  */
 | |
| void kfree_skb(struct sk_buff *skb)
 | |
| {
 | |
| 	if (unlikely(!skb))
 | |
| 		return;
 | |
| 	if (likely(atomic_read(&skb->users) == 1))
 | |
| 		smp_rmb();
 | |
| 	else if (likely(!atomic_dec_and_test(&skb->users)))
 | |
| 		return;
 | |
| 	__kfree_skb(skb);
 | |
| }
 | |
| 
 | |
| /**
 | |
|  *	skb_clone	-	duplicate an sk_buff
 | |
|  *	@skb: buffer to clone
 | |
|  *	@gfp_mask: allocation priority
 | |
|  *
 | |
|  *	Duplicate an &sk_buff. The new one is not owned by a socket. Both
 | |
|  *	copies share the same packet data but not structure. The new
 | |
|  *	buffer has a reference count of 1. If the allocation fails the
 | |
|  *	function returns %NULL otherwise the new buffer is returned.
 | |
|  *
 | |
|  *	If this function is called from an interrupt gfp_mask() must be
 | |
|  *	%GFP_ATOMIC.
 | |
|  */
 | |
| 
 | |
| struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
 | |
| {
 | |
| 	struct sk_buff *n;
 | |
| 
 | |
| 	n = skb + 1;
 | |
| 	if (skb->fclone == SKB_FCLONE_ORIG &&
 | |
| 	    n->fclone == SKB_FCLONE_UNAVAILABLE) {
 | |
| 		atomic_t *fclone_ref = (atomic_t *) (n + 1);
 | |
| 		n->fclone = SKB_FCLONE_CLONE;
 | |
| 		atomic_inc(fclone_ref);
 | |
| 	} else {
 | |
| 		n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
 | |
| 		if (!n)
 | |
| 			return NULL;
 | |
| 		n->fclone = SKB_FCLONE_UNAVAILABLE;
 | |
| 	}
 | |
| 
 | |
| #define C(x) n->x = skb->x
 | |
| 
 | |
| 	n->next = n->prev = NULL;
 | |
| 	n->sk = NULL;
 | |
| 	C(tstamp);
 | |
| 	C(dev);
 | |
| 	C(h);
 | |
| 	C(nh);
 | |
| 	C(mac);
 | |
| 	C(dst);
 | |
| 	dst_clone(skb->dst);
 | |
| 	C(sp);
 | |
| #ifdef CONFIG_INET
 | |
| 	secpath_get(skb->sp);
 | |
| #endif
 | |
| 	memcpy(n->cb, skb->cb, sizeof(skb->cb));
 | |
| 	C(len);
 | |
| 	C(data_len);
 | |
| 	C(csum);
 | |
| 	C(local_df);
 | |
| 	n->cloned = 1;
 | |
| 	n->nohdr = 0;
 | |
| 	C(pkt_type);
 | |
| 	C(ip_summed);
 | |
| 	C(priority);
 | |
| #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
 | |
| 	C(ipvs_property);
 | |
| #endif
 | |
| 	C(protocol);
 | |
| 	n->destructor = NULL;
 | |
| #ifdef CONFIG_NETFILTER
 | |
| 	C(nfmark);
 | |
| 	C(nfct);
 | |
| 	nf_conntrack_get(skb->nfct);
 | |
| 	C(nfctinfo);
 | |
| #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 | |
| 	C(nfct_reasm);
 | |
| 	nf_conntrack_get_reasm(skb->nfct_reasm);
 | |
| #endif
 | |
| #ifdef CONFIG_BRIDGE_NETFILTER
 | |
| 	C(nf_bridge);
 | |
| 	nf_bridge_get(skb->nf_bridge);
 | |
| #endif
 | |
| #endif /*CONFIG_NETFILTER*/
 | |
| #ifdef CONFIG_NET_SCHED
 | |
| 	C(tc_index);
 | |
| #ifdef CONFIG_NET_CLS_ACT
 | |
| 	n->tc_verd = SET_TC_VERD(skb->tc_verd,0);
 | |
| 	n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
 | |
| 	n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
 | |
| 	C(input_dev);
 | |
| #endif
 | |
| 	skb_copy_secmark(n, skb);
 | |
| #endif
 | |
| 	C(truesize);
 | |
| 	atomic_set(&n->users, 1);
 | |
| 	C(head);
 | |
| 	C(data);
 | |
| 	C(tail);
 | |
| 	C(end);
 | |
| 
 | |
| 	atomic_inc(&(skb_shinfo(skb)->dataref));
 | |
| 	skb->cloned = 1;
 | |
| 
 | |
| 	return n;
 | |
| }
 | |
| 
 | |
| static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 | |
| {
 | |
| 	/*
 | |
| 	 *	Shift between the two data areas in bytes
 | |
| 	 */
 | |
| 	unsigned long offset = new->data - old->data;
 | |
| 
 | |
| 	new->sk		= NULL;
 | |
| 	new->dev	= old->dev;
 | |
| 	new->priority	= old->priority;
 | |
| 	new->protocol	= old->protocol;
 | |
| 	new->dst	= dst_clone(old->dst);
 | |
| #ifdef CONFIG_INET
 | |
| 	new->sp		= secpath_get(old->sp);
 | |
| #endif
 | |
| 	new->h.raw	= old->h.raw + offset;
 | |
| 	new->nh.raw	= old->nh.raw + offset;
 | |
| 	new->mac.raw	= old->mac.raw + offset;
 | |
| 	memcpy(new->cb, old->cb, sizeof(old->cb));
 | |
| 	new->local_df	= old->local_df;
 | |
| 	new->fclone	= SKB_FCLONE_UNAVAILABLE;
 | |
| 	new->pkt_type	= old->pkt_type;
 | |
| 	new->tstamp	= old->tstamp;
 | |
| 	new->destructor = NULL;
 | |
| #ifdef CONFIG_NETFILTER
 | |
| 	new->nfmark	= old->nfmark;
 | |
| 	new->nfct	= old->nfct;
 | |
| 	nf_conntrack_get(old->nfct);
 | |
| 	new->nfctinfo	= old->nfctinfo;
 | |
| #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 | |
| 	new->nfct_reasm = old->nfct_reasm;
 | |
| 	nf_conntrack_get_reasm(old->nfct_reasm);
 | |
| #endif
 | |
| #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
 | |
| 	new->ipvs_property = old->ipvs_property;
 | |
| #endif
 | |
| #ifdef CONFIG_BRIDGE_NETFILTER
 | |
| 	new->nf_bridge	= old->nf_bridge;
 | |
| 	nf_bridge_get(old->nf_bridge);
 | |
| #endif
 | |
| #endif
 | |
| #ifdef CONFIG_NET_SCHED
 | |
| #ifdef CONFIG_NET_CLS_ACT
 | |
| 	new->tc_verd = old->tc_verd;
 | |
| #endif
 | |
| 	new->tc_index	= old->tc_index;
 | |
| #endif
 | |
| 	skb_copy_secmark(new, old);
 | |
| 	atomic_set(&new->users, 1);
 | |
| 	skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
 | |
| 	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
 | |
| 	skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
 | |
| }
 | |
| 
 | |
| /**
 | |
|  *	skb_copy	-	create private copy of an sk_buff
 | |
|  *	@skb: buffer to copy
 | |
|  *	@gfp_mask: allocation priority
 | |
|  *
 | |
|  *	Make a copy of both an &sk_buff and its data. This is used when the
 | |
|  *	caller wishes to modify the data and needs a private copy of the
 | |
|  *	data to alter. Returns %NULL on failure or the pointer to the buffer
 | |
|  *	on success. The returned buffer has a reference count of 1.
 | |
|  *
 | |
|  *	As by-product this function converts non-linear &sk_buff to linear
 | |
|  *	one, so that &sk_buff becomes completely private and caller is allowed
 | |
|  *	to modify all the data of returned buffer. This means that this
 | |
|  *	function is not recommended for use in circumstances when only
 | |
|  *	header is going to be modified. Use pskb_copy() instead.
 | |
|  */
 | |
| 
 | |
| struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
 | |
| {
 | |
| 	int headerlen = skb->data - skb->head;
 | |
| 	/*
 | |
| 	 *	Allocate the copy buffer
 | |
| 	 */
 | |
| 	struct sk_buff *n = alloc_skb(skb->end - skb->head + skb->data_len,
 | |
| 				      gfp_mask);
 | |
| 	if (!n)
 | |
| 		return NULL;
 | |
| 
 | |
| 	/* Set the data pointer */
 | |
| 	skb_reserve(n, headerlen);
 | |
| 	/* Set the tail pointer and length */
 | |
| 	skb_put(n, skb->len);
 | |
| 	n->csum	     = skb->csum;
 | |
| 	n->ip_summed = skb->ip_summed;
 | |
| 
 | |
| 	if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
 | |
| 		BUG();
 | |
| 
 | |
| 	copy_skb_header(n, skb);
 | |
| 	return n;
 | |
| }
 | |
| 
 | |
| 
 | |
| /**
 | |
|  *	pskb_copy	-	create copy of an sk_buff with private head.
 | |
|  *	@skb: buffer to copy
 | |
|  *	@gfp_mask: allocation priority
 | |
|  *
 | |
|  *	Make a copy of both an &sk_buff and part of its data, located
 | |
|  *	in header. Fragmented data remain shared. This is used when
 | |
|  *	the caller wishes to modify only header of &sk_buff and needs
 | |
|  *	private copy of the header to alter. Returns %NULL on failure
 | |
|  *	or the pointer to the buffer on success.
 | |
|  *	The returned buffer has a reference count of 1.
 | |
|  */
 | |
| 
 | |
| struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
 | |
| {
 | |
| 	/*
 | |
| 	 *	Allocate the copy buffer
 | |
| 	 */
 | |
| 	struct sk_buff *n = alloc_skb(skb->end - skb->head, gfp_mask);
 | |
| 
 | |
| 	if (!n)
 | |
| 		goto out;
 | |
| 
 | |
| 	/* Set the data pointer */
 | |
| 	skb_reserve(n, skb->data - skb->head);
 | |
| 	/* Set the tail pointer and length */
 | |
| 	skb_put(n, skb_headlen(skb));
 | |
| 	/* Copy the bytes */
 | |
| 	memcpy(n->data, skb->data, n->len);
 | |
| 	n->csum	     = skb->csum;
 | |
| 	n->ip_summed = skb->ip_summed;
 | |
| 
 | |
| 	n->data_len  = skb->data_len;
 | |
| 	n->len	     = skb->len;
 | |
| 
 | |
| 	if (skb_shinfo(skb)->nr_frags) {
 | |
| 		int i;
 | |
| 
 | |
| 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 | |
| 			skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
 | |
| 			get_page(skb_shinfo(n)->frags[i].page);
 | |
| 		}
 | |
| 		skb_shinfo(n)->nr_frags = i;
 | |
| 	}
 | |
| 
 | |
| 	if (skb_shinfo(skb)->frag_list) {
 | |
| 		skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
 | |
| 		skb_clone_fraglist(n);
 | |
| 	}
 | |
| 
 | |
| 	copy_skb_header(n, skb);
 | |
| out:
 | |
| 	return n;
 | |
| }
 | |
| 
 | |
| /**
 | |
|  *	pskb_expand_head - reallocate header of &sk_buff
 | |
|  *	@skb: buffer to reallocate
 | |
|  *	@nhead: room to add at head
 | |
|  *	@ntail: room to add at tail
 | |
|  *	@gfp_mask: allocation priority
 | |
|  *
 | |
|  *	Expands (or creates identical copy, if &nhead and &ntail are zero)
 | |
|  *	header of skb. &sk_buff itself is not changed. &sk_buff MUST have
 | |
|  *	reference count of 1. Returns zero in the case of success or error,
 | |
|  *	if expansion failed. In the last case, &sk_buff is not changed.
 | |
|  *
 | |
|  *	All the pointers pointing into skb header may change and must be
 | |
|  *	reloaded after call to this function.
 | |
|  */
 | |
| 
 | |
| int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
 | |
| 		     gfp_t gfp_mask)
 | |
| {
 | |
| 	int i;
 | |
| 	u8 *data;
 | |
| 	int size = nhead + (skb->end - skb->head) + ntail;
 | |
| 	long off;
 | |
| 
 | |
| 	if (skb_shared(skb))
 | |
| 		BUG();
 | |
| 
 | |
| 	size = SKB_DATA_ALIGN(size);
 | |
| 
 | |
| 	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
 | |
| 	if (!data)
 | |
| 		goto nodata;
 | |
| 
 | |
| 	/* Copy only real data... and, alas, header. This should be
 | |
| 	 * optimized for the cases when header is void. */
 | |
| 	memcpy(data + nhead, skb->head, skb->tail - skb->head);
 | |
| 	memcpy(data + size, skb->end, sizeof(struct skb_shared_info));
 | |
| 
 | |
| 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
 | |
| 		get_page(skb_shinfo(skb)->frags[i].page);
 | |
| 
 | |
| 	if (skb_shinfo(skb)->frag_list)
 | |
| 		skb_clone_fraglist(skb);
 | |
| 
 | |
| 	skb_release_data(skb);
 | |
| 
 | |
| 	off = (data + nhead) - skb->head;
 | |
| 
 | |
| 	skb->head     = data;
 | |
| 	skb->end      = data + size;
 | |
| 	skb->data    += off;
 | |
| 	skb->tail    += off;
 | |
| 	skb->mac.raw += off;
 | |
| 	skb->h.raw   += off;
 | |
| 	skb->nh.raw  += off;
 | |
| 	skb->cloned   = 0;
 | |
| 	skb->nohdr    = 0;
 | |
| 	atomic_set(&skb_shinfo(skb)->dataref, 1);
 | |
| 	return 0;
 | |
| 
 | |
| nodata:
 | |
| 	return -ENOMEM;
 | |
| }
 | |
| 
 | |
| /* Make private copy of skb with writable head and some headroom */
 | |
| 
 | |
| struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
 | |
| {
 | |
| 	struct sk_buff *skb2;
 | |
| 	int delta = headroom - skb_headroom(skb);
 | |
| 
 | |
| 	if (delta <= 0)
 | |
| 		skb2 = pskb_copy(skb, GFP_ATOMIC);
 | |
| 	else {
 | |
| 		skb2 = skb_clone(skb, GFP_ATOMIC);
 | |
| 		if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
 | |
| 					     GFP_ATOMIC)) {
 | |
| 			kfree_skb(skb2);
 | |
| 			skb2 = NULL;
 | |
| 		}
 | |
| 	}
 | |
| 	return skb2;
 | |
| }
 | |
| 
 | |
| 
 | |
| /**
 | |
|  *	skb_copy_expand	-	copy and expand sk_buff
 | |
|  *	@skb: buffer to copy
 | |
|  *	@newheadroom: new free bytes at head
 | |
|  *	@newtailroom: new free bytes at tail
 | |
|  *	@gfp_mask: allocation priority
 | |
|  *
 | |
|  *	Make a copy of both an &sk_buff and its data and while doing so
 | |
|  *	allocate additional space.
 | |
|  *
 | |
|  *	This is used when the caller wishes to modify the data and needs a
 | |
|  *	private copy of the data to alter as well as more space for new fields.
 | |
|  *	Returns %NULL on failure or the pointer to the buffer
 | |
|  *	on success. The returned buffer has a reference count of 1.
 | |
|  *
 | |
|  *	You must pass %GFP_ATOMIC as the allocation priority if this function
 | |
|  *	is called from an interrupt.
 | |
|  *
 | |
|  *	BUG ALERT: ip_summed is not copied. Why does this work? Is it used
 | |
|  *	only by netfilter in the cases when checksum is recalculated? --ANK
 | |
|  */
 | |
| struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
 | |
| 				int newheadroom, int newtailroom,
 | |
| 				gfp_t gfp_mask)
 | |
| {
 | |
| 	/*
 | |
| 	 *	Allocate the copy buffer
 | |
| 	 */
 | |
| 	struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
 | |
| 				      gfp_mask);
 | |
| 	int head_copy_len, head_copy_off;
 | |
| 
 | |
| 	if (!n)
 | |
| 		return NULL;
 | |
| 
 | |
| 	skb_reserve(n, newheadroom);
 | |
| 
 | |
| 	/* Set the tail pointer and length */
 | |
| 	skb_put(n, skb->len);
 | |
| 
 | |
| 	head_copy_len = skb_headroom(skb);
 | |
| 	head_copy_off = 0;
 | |
| 	if (newheadroom <= head_copy_len)
 | |
| 		head_copy_len = newheadroom;
 | |
| 	else
 | |
| 		head_copy_off = newheadroom - head_copy_len;
 | |
| 
 | |
| 	/* Copy the linear header and data. */
 | |
| 	if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
 | |
| 			  skb->len + head_copy_len))
 | |
| 		BUG();
 | |
| 
 | |
| 	copy_skb_header(n, skb);
 | |
| 
 | |
| 	return n;
 | |
| }
 | |
| 
 | |
| /**
 | |
|  *	skb_pad			-	zero pad the tail of an skb
 | |
|  *	@skb: buffer to pad
 | |
|  *	@pad: space to pad
 | |
|  *
 | |
|  *	Ensure that a buffer is followed by a padding area that is zero
 | |
|  *	filled. Used by network drivers which may DMA or transfer data
 | |
|  *	beyond the buffer end onto the wire.
 | |
|  *
 | |
|  *	May return error in out of memory cases. The skb is freed on error.
 | |
|  */
 | |
|  
 | |
| int skb_pad(struct sk_buff *skb, int pad)
 | |
| {
 | |
| 	int err;
 | |
| 	int ntail;
 | |
| 	
 | |
| 	/* If the skbuff is non linear tailroom is always zero.. */
 | |
| 	if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
 | |
| 		memset(skb->data+skb->len, 0, pad);
 | |
| 		return 0;
 | |
| 	}
 | |
| 
 | |
| 	ntail = skb->data_len + pad - (skb->end - skb->tail);
 | |
| 	if (likely(skb_cloned(skb) || ntail > 0)) {
 | |
| 		err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
 | |
| 		if (unlikely(err))
 | |
| 			goto free_skb;
 | |
| 	}
 | |
| 
 | |
| 	/* FIXME: The use of this function with non-linear skb's really needs
 | |
| 	 * to be audited.
 | |
| 	 */
 | |
| 	err = skb_linearize(skb);
 | |
| 	if (unlikely(err))
 | |
| 		goto free_skb;
 | |
| 
 | |
| 	memset(skb->data + skb->len, 0, pad);
 | |
| 	return 0;
 | |
| 
 | |
| free_skb:
 | |
| 	kfree_skb(skb);
 | |
| 	return err;
 | |
| }	
 | |
|  
 | |
| /* Trims skb to length len. It can change skb pointers.
 | |
|  */
 | |
| 
 | |
| int ___pskb_trim(struct sk_buff *skb, unsigned int len)
 | |
| {
 | |
| 	struct sk_buff **fragp;
 | |
| 	struct sk_buff *frag;
 | |
| 	int offset = skb_headlen(skb);
 | |
| 	int nfrags = skb_shinfo(skb)->nr_frags;
 | |
| 	int i;
 | |
| 	int err;
 | |
| 
 | |
| 	if (skb_cloned(skb) &&
 | |
| 	    unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
 | |
| 		return err;
 | |
| 
 | |
| 	i = 0;
 | |
| 	if (offset >= len)
 | |
| 		goto drop_pages;
 | |
| 
 | |
| 	for (; i < nfrags; i++) {
 | |
| 		int end = offset + skb_shinfo(skb)->frags[i].size;
 | |
| 
 | |
| 		if (end < len) {
 | |
| 			offset = end;
 | |
| 			continue;
 | |
| 		}
 | |
| 
 | |
| 		skb_shinfo(skb)->frags[i++].size = len - offset;
 | |
| 
 | |
| drop_pages:
 | |
| 		skb_shinfo(skb)->nr_frags = i;
 | |
| 
 | |
| 		for (; i < nfrags; i++)
 | |
| 			put_page(skb_shinfo(skb)->frags[i].page);
 | |
| 
 | |
| 		if (skb_shinfo(skb)->frag_list)
 | |
| 			skb_drop_fraglist(skb);
 | |
| 		goto done;
 | |
| 	}
 | |
| 
 | |
| 	for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
 | |
| 	     fragp = &frag->next) {
 | |
| 		int end = offset + frag->len;
 | |
| 
 | |
| 		if (skb_shared(frag)) {
 | |
| 			struct sk_buff *nfrag;
 | |
| 
 | |
| 			nfrag = skb_clone(frag, GFP_ATOMIC);
 | |
| 			if (unlikely(!nfrag))
 | |
| 				return -ENOMEM;
 | |
| 
 | |
| 			nfrag->next = frag->next;
 | |
| 			kfree_skb(frag);
 | |
| 			frag = nfrag;
 | |
| 			*fragp = frag;
 | |
| 		}
 | |
| 
 | |
| 		if (end < len) {
 | |
| 			offset = end;
 | |
| 			continue;
 | |
| 		}
 | |
| 
 | |
| 		if (end > len &&
 | |
| 		    unlikely((err = pskb_trim(frag, len - offset))))
 | |
| 			return err;
 | |
| 
 | |
| 		if (frag->next)
 | |
| 			skb_drop_list(&frag->next);
 | |
| 		break;
 | |
| 	}
 | |
| 
 | |
| done:
 | |
| 	if (len > skb_headlen(skb)) {
 | |
| 		skb->data_len -= skb->len - len;
 | |
| 		skb->len       = len;
 | |
| 	} else {
 | |
| 		skb->len       = len;
 | |
| 		skb->data_len  = 0;
 | |
| 		skb->tail      = skb->data + len;
 | |
| 	}
 | |
| 
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| /**
 | |
|  *	__pskb_pull_tail - advance tail of skb header
 | |
|  *	@skb: buffer to reallocate
 | |
|  *	@delta: number of bytes to advance tail
 | |
|  *
 | |
|  *	The function makes a sense only on a fragmented &sk_buff,
 | |
|  *	it expands header moving its tail forward and copying necessary
 | |
|  *	data from fragmented part.
 | |
|  *
 | |
|  *	&sk_buff MUST have reference count of 1.
 | |
|  *
 | |
|  *	Returns %NULL (and &sk_buff does not change) if pull failed
 | |
|  *	or value of new tail of skb in the case of success.
 | |
|  *
 | |
|  *	All the pointers pointing into skb header may change and must be
 | |
|  *	reloaded after call to this function.
 | |
|  */
 | |
| 
 | |
| /* Moves tail of skb head forward, copying data from fragmented part,
 | |
|  * when it is necessary.
 | |
|  * 1. It may fail due to malloc failure.
 | |
|  * 2. It may change skb pointers.
 | |
|  *
 | |
|  * It is pretty complicated. Luckily, it is called only in exceptional cases.
 | |
|  */
 | |
| unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
 | |
| {
 | |
| 	/* If skb has not enough free space at tail, get new one
 | |
| 	 * plus 128 bytes for future expansions. If we have enough
 | |
| 	 * room at tail, reallocate without expansion only if skb is cloned.
 | |
| 	 */
 | |
| 	int i, k, eat = (skb->tail + delta) - skb->end;
 | |
| 
 | |
| 	if (eat > 0 || skb_cloned(skb)) {
 | |
| 		if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
 | |
| 				     GFP_ATOMIC))
 | |
| 			return NULL;
 | |
| 	}
 | |
| 
 | |
| 	if (skb_copy_bits(skb, skb_headlen(skb), skb->tail, delta))
 | |
| 		BUG();
 | |
| 
 | |
| 	/* Optimization: no fragments, no reasons to preestimate
 | |
| 	 * size of pulled pages. Superb.
 | |
| 	 */
 | |
| 	if (!skb_shinfo(skb)->frag_list)
 | |
| 		goto pull_pages;
 | |
| 
 | |
| 	/* Estimate size of pulled pages. */
 | |
| 	eat = delta;
 | |
| 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 | |
| 		if (skb_shinfo(skb)->frags[i].size >= eat)
 | |
| 			goto pull_pages;
 | |
| 		eat -= skb_shinfo(skb)->frags[i].size;
 | |
| 	}
 | |
| 
 | |
| 	/* If we need update frag list, we are in troubles.
 | |
| 	 * Certainly, it possible to add an offset to skb data,
 | |
| 	 * but taking into account that pulling is expected to
 | |
| 	 * be very rare operation, it is worth to fight against
 | |
| 	 * further bloating skb head and crucify ourselves here instead.
 | |
| 	 * Pure masohism, indeed. 8)8)
 | |
| 	 */
 | |
| 	if (eat) {
 | |
| 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
 | |
| 		struct sk_buff *clone = NULL;
 | |
| 		struct sk_buff *insp = NULL;
 | |
| 
 | |
| 		do {
 | |
| 			BUG_ON(!list);
 | |
| 
 | |
| 			if (list->len <= eat) {
 | |
| 				/* Eaten as whole. */
 | |
| 				eat -= list->len;
 | |
| 				list = list->next;
 | |
| 				insp = list;
 | |
| 			} else {
 | |
| 				/* Eaten partially. */
 | |
| 
 | |
| 				if (skb_shared(list)) {
 | |
| 					/* Sucks! We need to fork list. :-( */
 | |
| 					clone = skb_clone(list, GFP_ATOMIC);
 | |
| 					if (!clone)
 | |
| 						return NULL;
 | |
| 					insp = list->next;
 | |
| 					list = clone;
 | |
| 				} else {
 | |
| 					/* This may be pulled without
 | |
| 					 * problems. */
 | |
| 					insp = list;
 | |
| 				}
 | |
| 				if (!pskb_pull(list, eat)) {
 | |
| 					if (clone)
 | |
| 						kfree_skb(clone);
 | |
| 					return NULL;
 | |
| 				}
 | |
| 				break;
 | |
| 			}
 | |
| 		} while (eat);
 | |
| 
 | |
| 		/* Free pulled out fragments. */
 | |
| 		while ((list = skb_shinfo(skb)->frag_list) != insp) {
 | |
| 			skb_shinfo(skb)->frag_list = list->next;
 | |
| 			kfree_skb(list);
 | |
| 		}
 | |
| 		/* And insert new clone at head. */
 | |
| 		if (clone) {
 | |
| 			clone->next = list;
 | |
| 			skb_shinfo(skb)->frag_list = clone;
 | |
| 		}
 | |
| 	}
 | |
| 	/* Success! Now we may commit changes to skb data. */
 | |
| 
 | |
| pull_pages:
 | |
| 	eat = delta;
 | |
| 	k = 0;
 | |
| 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 | |
| 		if (skb_shinfo(skb)->frags[i].size <= eat) {
 | |
| 			put_page(skb_shinfo(skb)->frags[i].page);
 | |
| 			eat -= skb_shinfo(skb)->frags[i].size;
 | |
| 		} else {
 | |
| 			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
 | |
| 			if (eat) {
 | |
| 				skb_shinfo(skb)->frags[k].page_offset += eat;
 | |
| 				skb_shinfo(skb)->frags[k].size -= eat;
 | |
| 				eat = 0;
 | |
| 			}
 | |
| 			k++;
 | |
| 		}
 | |
| 	}
 | |
| 	skb_shinfo(skb)->nr_frags = k;
 | |
| 
 | |
| 	skb->tail     += delta;
 | |
| 	skb->data_len -= delta;
 | |
| 
 | |
| 	return skb->tail;
 | |
| }
 | |
| 
 | |
| /* Copy some data bits from skb to kernel buffer. */
 | |
| 
 | |
| int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
 | |
| {
 | |
| 	int i, copy;
 | |
| 	int start = skb_headlen(skb);
 | |
| 
 | |
| 	if (offset > (int)skb->len - len)
 | |
| 		goto fault;
 | |
| 
 | |
| 	/* Copy header. */
 | |
| 	if ((copy = start - offset) > 0) {
 | |
| 		if (copy > len)
 | |
| 			copy = len;
 | |
| 		memcpy(to, skb->data + offset, copy);
 | |
| 		if ((len -= copy) == 0)
 | |
| 			return 0;
 | |
| 		offset += copy;
 | |
| 		to     += copy;
 | |
| 	}
 | |
| 
 | |
| 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 | |
| 		int end;
 | |
| 
 | |
| 		BUG_TRAP(start <= offset + len);
 | |
| 
 | |
| 		end = start + skb_shinfo(skb)->frags[i].size;
 | |
| 		if ((copy = end - offset) > 0) {
 | |
| 			u8 *vaddr;
 | |
| 
 | |
| 			if (copy > len)
 | |
| 				copy = len;
 | |
| 
 | |
| 			vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
 | |
| 			memcpy(to,
 | |
| 			       vaddr + skb_shinfo(skb)->frags[i].page_offset+
 | |
| 			       offset - start, copy);
 | |
| 			kunmap_skb_frag(vaddr);
 | |
| 
 | |
| 			if ((len -= copy) == 0)
 | |
| 				return 0;
 | |
| 			offset += copy;
 | |
| 			to     += copy;
 | |
| 		}
 | |
| 		start = end;
 | |
| 	}
 | |
| 
 | |
| 	if (skb_shinfo(skb)->frag_list) {
 | |
| 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
 | |
| 
 | |
| 		for (; list; list = list->next) {
 | |
| 			int end;
 | |
| 
 | |
| 			BUG_TRAP(start <= offset + len);
 | |
| 
 | |
| 			end = start + list->len;
 | |
| 			if ((copy = end - offset) > 0) {
 | |
| 				if (copy > len)
 | |
| 					copy = len;
 | |
| 				if (skb_copy_bits(list, offset - start,
 | |
| 						  to, copy))
 | |
| 					goto fault;
 | |
| 				if ((len -= copy) == 0)
 | |
| 					return 0;
 | |
| 				offset += copy;
 | |
| 				to     += copy;
 | |
| 			}
 | |
| 			start = end;
 | |
| 		}
 | |
| 	}
 | |
| 	if (!len)
 | |
| 		return 0;
 | |
| 
 | |
| fault:
 | |
| 	return -EFAULT;
 | |
| }
 | |
| 
 | |
| /**
 | |
|  *	skb_store_bits - store bits from kernel buffer to skb
 | |
|  *	@skb: destination buffer
 | |
|  *	@offset: offset in destination
 | |
|  *	@from: source buffer
 | |
|  *	@len: number of bytes to copy
 | |
|  *
 | |
|  *	Copy the specified number of bytes from the source buffer to the
 | |
|  *	destination skb.  This function handles all the messy bits of
 | |
|  *	traversing fragment lists and such.
 | |
|  */
 | |
| 
 | |
| int skb_store_bits(const struct sk_buff *skb, int offset, void *from, int len)
 | |
| {
 | |
| 	int i, copy;
 | |
| 	int start = skb_headlen(skb);
 | |
| 
 | |
| 	if (offset > (int)skb->len - len)
 | |
| 		goto fault;
 | |
| 
 | |
| 	if ((copy = start - offset) > 0) {
 | |
| 		if (copy > len)
 | |
| 			copy = len;
 | |
| 		memcpy(skb->data + offset, from, copy);
 | |
| 		if ((len -= copy) == 0)
 | |
| 			return 0;
 | |
| 		offset += copy;
 | |
| 		from += copy;
 | |
| 	}
 | |
| 
 | |
| 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 | |
| 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 | |
| 		int end;
 | |
| 
 | |
| 		BUG_TRAP(start <= offset + len);
 | |
| 
 | |
| 		end = start + frag->size;
 | |
| 		if ((copy = end - offset) > 0) {
 | |
| 			u8 *vaddr;
 | |
| 
 | |
| 			if (copy > len)
 | |
| 				copy = len;
 | |
| 
 | |
| 			vaddr = kmap_skb_frag(frag);
 | |
| 			memcpy(vaddr + frag->page_offset + offset - start,
 | |
| 			       from, copy);
 | |
| 			kunmap_skb_frag(vaddr);
 | |
| 
 | |
| 			if ((len -= copy) == 0)
 | |
| 				return 0;
 | |
| 			offset += copy;
 | |
| 			from += copy;
 | |
| 		}
 | |
| 		start = end;
 | |
| 	}
 | |
| 
 | |
| 	if (skb_shinfo(skb)->frag_list) {
 | |
| 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
 | |
| 
 | |
| 		for (; list; list = list->next) {
 | |
| 			int end;
 | |
| 
 | |
| 			BUG_TRAP(start <= offset + len);
 | |
| 
 | |
| 			end = start + list->len;
 | |
| 			if ((copy = end - offset) > 0) {
 | |
| 				if (copy > len)
 | |
| 					copy = len;
 | |
| 				if (skb_store_bits(list, offset - start,
 | |
| 						   from, copy))
 | |
| 					goto fault;
 | |
| 				if ((len -= copy) == 0)
 | |
| 					return 0;
 | |
| 				offset += copy;
 | |
| 				from += copy;
 | |
| 			}
 | |
| 			start = end;
 | |
| 		}
 | |
| 	}
 | |
| 	if (!len)
 | |
| 		return 0;
 | |
| 
 | |
| fault:
 | |
| 	return -EFAULT;
 | |
| }
 | |
| 
 | |
| EXPORT_SYMBOL(skb_store_bits);
 | |
| 
 | |
| /* Checksum skb data. */
 | |
| 
 | |
| unsigned int skb_checksum(const struct sk_buff *skb, int offset,
 | |
| 			  int len, unsigned int csum)
 | |
| {
 | |
| 	int start = skb_headlen(skb);
 | |
| 	int i, copy = start - offset;
 | |
| 	int pos = 0;
 | |
| 
 | |
| 	/* Checksum header. */
 | |
| 	if (copy > 0) {
 | |
| 		if (copy > len)
 | |
| 			copy = len;
 | |
| 		csum = csum_partial(skb->data + offset, copy, csum);
 | |
| 		if ((len -= copy) == 0)
 | |
| 			return csum;
 | |
| 		offset += copy;
 | |
| 		pos	= copy;
 | |
| 	}
 | |
| 
 | |
| 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 | |
| 		int end;
 | |
| 
 | |
| 		BUG_TRAP(start <= offset + len);
 | |
| 
 | |
| 		end = start + skb_shinfo(skb)->frags[i].size;
 | |
| 		if ((copy = end - offset) > 0) {
 | |
| 			unsigned int csum2;
 | |
| 			u8 *vaddr;
 | |
| 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 | |
| 
 | |
| 			if (copy > len)
 | |
| 				copy = len;
 | |
| 			vaddr = kmap_skb_frag(frag);
 | |
| 			csum2 = csum_partial(vaddr + frag->page_offset +
 | |
| 					     offset - start, copy, 0);
 | |
| 			kunmap_skb_frag(vaddr);
 | |
| 			csum = csum_block_add(csum, csum2, pos);
 | |
| 			if (!(len -= copy))
 | |
| 				return csum;
 | |
| 			offset += copy;
 | |
| 			pos    += copy;
 | |
| 		}
 | |
| 		start = end;
 | |
| 	}
 | |
| 
 | |
| 	if (skb_shinfo(skb)->frag_list) {
 | |
| 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
 | |
| 
 | |
| 		for (; list; list = list->next) {
 | |
| 			int end;
 | |
| 
 | |
| 			BUG_TRAP(start <= offset + len);
 | |
| 
 | |
| 			end = start + list->len;
 | |
| 			if ((copy = end - offset) > 0) {
 | |
| 				unsigned int csum2;
 | |
| 				if (copy > len)
 | |
| 					copy = len;
 | |
| 				csum2 = skb_checksum(list, offset - start,
 | |
| 						     copy, 0);
 | |
| 				csum = csum_block_add(csum, csum2, pos);
 | |
| 				if ((len -= copy) == 0)
 | |
| 					return csum;
 | |
| 				offset += copy;
 | |
| 				pos    += copy;
 | |
| 			}
 | |
| 			start = end;
 | |
| 		}
 | |
| 	}
 | |
| 	BUG_ON(len);
 | |
| 
 | |
| 	return csum;
 | |
| }
 | |
| 
 | |
| /* Both of above in one bottle. */
 | |
| 
 | |
| unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
 | |
| 				    u8 *to, int len, unsigned int csum)
 | |
| {
 | |
| 	int start = skb_headlen(skb);
 | |
| 	int i, copy = start - offset;
 | |
| 	int pos = 0;
 | |
| 
 | |
| 	/* Copy header. */
 | |
| 	if (copy > 0) {
 | |
| 		if (copy > len)
 | |
| 			copy = len;
 | |
| 		csum = csum_partial_copy_nocheck(skb->data + offset, to,
 | |
| 						 copy, csum);
 | |
| 		if ((len -= copy) == 0)
 | |
| 			return csum;
 | |
| 		offset += copy;
 | |
| 		to     += copy;
 | |
| 		pos	= copy;
 | |
| 	}
 | |
| 
 | |
| 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 | |
| 		int end;
 | |
| 
 | |
| 		BUG_TRAP(start <= offset + len);
 | |
| 
 | |
| 		end = start + skb_shinfo(skb)->frags[i].size;
 | |
| 		if ((copy = end - offset) > 0) {
 | |
| 			unsigned int csum2;
 | |
| 			u8 *vaddr;
 | |
| 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 | |
| 
 | |
| 			if (copy > len)
 | |
| 				copy = len;
 | |
| 			vaddr = kmap_skb_frag(frag);
 | |
| 			csum2 = csum_partial_copy_nocheck(vaddr +
 | |
| 							  frag->page_offset +
 | |
| 							  offset - start, to,
 | |
| 							  copy, 0);
 | |
| 			kunmap_skb_frag(vaddr);
 | |
| 			csum = csum_block_add(csum, csum2, pos);
 | |
| 			if (!(len -= copy))
 | |
| 				return csum;
 | |
| 			offset += copy;
 | |
| 			to     += copy;
 | |
| 			pos    += copy;
 | |
| 		}
 | |
| 		start = end;
 | |
| 	}
 | |
| 
 | |
| 	if (skb_shinfo(skb)->frag_list) {
 | |
| 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
 | |
| 
 | |
| 		for (; list; list = list->next) {
 | |
| 			unsigned int csum2;
 | |
| 			int end;
 | |
| 
 | |
| 			BUG_TRAP(start <= offset + len);
 | |
| 
 | |
| 			end = start + list->len;
 | |
| 			if ((copy = end - offset) > 0) {
 | |
| 				if (copy > len)
 | |
| 					copy = len;
 | |
| 				csum2 = skb_copy_and_csum_bits(list,
 | |
| 							       offset - start,
 | |
| 							       to, copy, 0);
 | |
| 				csum = csum_block_add(csum, csum2, pos);
 | |
| 				if ((len -= copy) == 0)
 | |
| 					return csum;
 | |
| 				offset += copy;
 | |
| 				to     += copy;
 | |
| 				pos    += copy;
 | |
| 			}
 | |
| 			start = end;
 | |
| 		}
 | |
| 	}
 | |
| 	BUG_ON(len);
 | |
| 	return csum;
 | |
| }
 | |
| 
 | |
| void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
 | |
| {
 | |
| 	unsigned int csum;
 | |
| 	long csstart;
 | |
| 
 | |
| 	if (skb->ip_summed == CHECKSUM_PARTIAL)
 | |
| 		csstart = skb->h.raw - skb->data;
 | |
| 	else
 | |
| 		csstart = skb_headlen(skb);
 | |
| 
 | |
| 	BUG_ON(csstart > skb_headlen(skb));
 | |
| 
 | |
| 	memcpy(to, skb->data, csstart);
 | |
| 
 | |
| 	csum = 0;
 | |
| 	if (csstart != skb->len)
 | |
| 		csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
 | |
| 					      skb->len - csstart, 0);
 | |
| 
 | |
| 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 | |
| 		long csstuff = csstart + skb->csum;
 | |
| 
 | |
| 		*((unsigned short *)(to + csstuff)) = csum_fold(csum);
 | |
| 	}
 | |
| }
 | |
| 
 | |
| /**
 | |
|  *	skb_dequeue - remove from the head of the queue
 | |
|  *	@list: list to dequeue from
 | |
|  *
 | |
|  *	Remove the head of the list. The list lock is taken so the function
 | |
|  *	may be used safely with other locking list functions. The head item is
 | |
|  *	returned or %NULL if the list is empty.
 | |
|  */
 | |
| 
 | |
| struct sk_buff *skb_dequeue(struct sk_buff_head *list)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 	struct sk_buff *result;
 | |
| 
 | |
| 	spin_lock_irqsave(&list->lock, flags);
 | |
| 	result = __skb_dequeue(list);
 | |
| 	spin_unlock_irqrestore(&list->lock, flags);
 | |
| 	return result;
 | |
| }
 | |
| 
 | |
| /**
 | |
|  *	skb_dequeue_tail - remove from the tail of the queue
 | |
|  *	@list: list to dequeue from
 | |
|  *
 | |
|  *	Remove the tail of the list. The list lock is taken so the function
 | |
|  *	may be used safely with other locking list functions. The tail item is
 | |
|  *	returned or %NULL if the list is empty.
 | |
|  */
 | |
| struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 	struct sk_buff *result;
 | |
| 
 | |
| 	spin_lock_irqsave(&list->lock, flags);
 | |
| 	result = __skb_dequeue_tail(list);
 | |
| 	spin_unlock_irqrestore(&list->lock, flags);
 | |
| 	return result;
 | |
| }
 | |
| 
 | |
| /**
 | |
|  *	skb_queue_purge - empty a list
 | |
|  *	@list: list to empty
 | |
|  *
 | |
|  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
 | |
|  *	the list and one reference dropped. This function takes the list
 | |
|  *	lock and is atomic with respect to other list locking functions.
 | |
|  */
 | |
| void skb_queue_purge(struct sk_buff_head *list)
 | |
| {
 | |
| 	struct sk_buff *skb;
 | |
| 	while ((skb = skb_dequeue(list)) != NULL)
 | |
| 		kfree_skb(skb);
 | |
| }
 | |
| 
 | |
| /**
 | |
|  *	skb_queue_head - queue a buffer at the list head
 | |
|  *	@list: list to use
 | |
|  *	@newsk: buffer to queue
 | |
|  *
 | |
|  *	Queue a buffer at the start of the list. This function takes the
 | |
|  *	list lock and can be used safely with other locking &sk_buff functions
 | |
|  *	safely.
 | |
|  *
 | |
|  *	A buffer cannot be placed on two lists at the same time.
 | |
|  */
 | |
| void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 
 | |
| 	spin_lock_irqsave(&list->lock, flags);
 | |
| 	__skb_queue_head(list, newsk);
 | |
| 	spin_unlock_irqrestore(&list->lock, flags);
 | |
| }
 | |
| 
 | |
| /**
 | |
|  *	skb_queue_tail - queue a buffer at the list tail
 | |
|  *	@list: list to use
 | |
|  *	@newsk: buffer to queue
 | |
|  *
 | |
|  *	Queue a buffer at the tail of the list. This function takes the
 | |
|  *	list lock and can be used safely with other locking &sk_buff functions
 | |
|  *	safely.
 | |
|  *
 | |
|  *	A buffer cannot be placed on two lists at the same time.
 | |
|  */
 | |
| void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 
 | |
| 	spin_lock_irqsave(&list->lock, flags);
 | |
| 	__skb_queue_tail(list, newsk);
 | |
| 	spin_unlock_irqrestore(&list->lock, flags);
 | |
| }
 | |
| 
 | |
| /**
 | |
|  *	skb_unlink	-	remove a buffer from a list
 | |
|  *	@skb: buffer to remove
 | |
|  *	@list: list to use
 | |
|  *
 | |
|  *	Remove a packet from a list. The list locks are taken and this
 | |
|  *	function is atomic with respect to other list locked calls
 | |
|  *
 | |
|  *	You must know what list the SKB is on.
 | |
|  */
 | |
| void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 
 | |
| 	spin_lock_irqsave(&list->lock, flags);
 | |
| 	__skb_unlink(skb, list);
 | |
| 	spin_unlock_irqrestore(&list->lock, flags);
 | |
| }
 | |
| 
 | |
| /**
 | |
|  *	skb_append	-	append a buffer
 | |
|  *	@old: buffer to insert after
 | |
|  *	@newsk: buffer to insert
 | |
|  *	@list: list to use
 | |
|  *
 | |
|  *	Place a packet after a given packet in a list. The list locks are taken
 | |
|  *	and this function is atomic with respect to other list locked calls.
 | |
|  *	A buffer cannot be placed on two lists at the same time.
 | |
|  */
 | |
| void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 
 | |
| 	spin_lock_irqsave(&list->lock, flags);
 | |
| 	__skb_append(old, newsk, list);
 | |
| 	spin_unlock_irqrestore(&list->lock, flags);
 | |
| }
 | |
| 
 | |
| 
 | |
| /**
 | |
|  *	skb_insert	-	insert a buffer
 | |
|  *	@old: buffer to insert before
 | |
|  *	@newsk: buffer to insert
 | |
|  *	@list: list to use
 | |
|  *
 | |
|  *	Place a packet before a given packet in a list. The list locks are
 | |
|  * 	taken and this function is atomic with respect to other list locked
 | |
|  *	calls.
 | |
|  *
 | |
|  *	A buffer cannot be placed on two lists at the same time.
 | |
|  */
 | |
| void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 
 | |
| 	spin_lock_irqsave(&list->lock, flags);
 | |
| 	__skb_insert(newsk, old->prev, old, list);
 | |
| 	spin_unlock_irqrestore(&list->lock, flags);
 | |
| }
 | |
| 
 | |
| #if 0
 | |
| /*
 | |
|  * 	Tune the memory allocator for a new MTU size.
 | |
|  */
 | |
| void skb_add_mtu(int mtu)
 | |
| {
 | |
| 	/* Must match allocation in alloc_skb */
 | |
| 	mtu = SKB_DATA_ALIGN(mtu) + sizeof(struct skb_shared_info);
 | |
| 
 | |
| 	kmem_add_cache_size(mtu);
 | |
| }
 | |
| #endif
 | |
| 
 | |
| static inline void skb_split_inside_header(struct sk_buff *skb,
 | |
| 					   struct sk_buff* skb1,
 | |
| 					   const u32 len, const int pos)
 | |
| {
 | |
| 	int i;
 | |
| 
 | |
| 	memcpy(skb_put(skb1, pos - len), skb->data + len, pos - len);
 | |
| 
 | |
| 	/* And move data appendix as is. */
 | |
| 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
 | |
| 		skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
 | |
| 
 | |
| 	skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
 | |
| 	skb_shinfo(skb)->nr_frags  = 0;
 | |
| 	skb1->data_len		   = skb->data_len;
 | |
| 	skb1->len		   += skb1->data_len;
 | |
| 	skb->data_len		   = 0;
 | |
| 	skb->len		   = len;
 | |
| 	skb->tail		   = skb->data + len;
 | |
| }
 | |
| 
 | |
| static inline void skb_split_no_header(struct sk_buff *skb,
 | |
| 				       struct sk_buff* skb1,
 | |
| 				       const u32 len, int pos)
 | |
| {
 | |
| 	int i, k = 0;
 | |
| 	const int nfrags = skb_shinfo(skb)->nr_frags;
 | |
| 
 | |
| 	skb_shinfo(skb)->nr_frags = 0;
 | |
| 	skb1->len		  = skb1->data_len = skb->len - len;
 | |
| 	skb->len		  = len;
 | |
| 	skb->data_len		  = len - pos;
 | |
| 
 | |
| 	for (i = 0; i < nfrags; i++) {
 | |
| 		int size = skb_shinfo(skb)->frags[i].size;
 | |
| 
 | |
| 		if (pos + size > len) {
 | |
| 			skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
 | |
| 
 | |
| 			if (pos < len) {
 | |
| 				/* Split frag.
 | |
| 				 * We have two variants in this case:
 | |
| 				 * 1. Move all the frag to the second
 | |
| 				 *    part, if it is possible. F.e.
 | |
| 				 *    this approach is mandatory for TUX,
 | |
| 				 *    where splitting is expensive.
 | |
| 				 * 2. Split is accurately. We make this.
 | |
| 				 */
 | |
| 				get_page(skb_shinfo(skb)->frags[i].page);
 | |
| 				skb_shinfo(skb1)->frags[0].page_offset += len - pos;
 | |
| 				skb_shinfo(skb1)->frags[0].size -= len - pos;
 | |
| 				skb_shinfo(skb)->frags[i].size	= len - pos;
 | |
| 				skb_shinfo(skb)->nr_frags++;
 | |
| 			}
 | |
| 			k++;
 | |
| 		} else
 | |
| 			skb_shinfo(skb)->nr_frags++;
 | |
| 		pos += size;
 | |
| 	}
 | |
| 	skb_shinfo(skb1)->nr_frags = k;
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * skb_split - Split fragmented skb to two parts at length len.
 | |
|  * @skb: the buffer to split
 | |
|  * @skb1: the buffer to receive the second part
 | |
|  * @len: new length for skb
 | |
|  */
 | |
| void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
 | |
| {
 | |
| 	int pos = skb_headlen(skb);
 | |
| 
 | |
| 	if (len < pos)	/* Split line is inside header. */
 | |
| 		skb_split_inside_header(skb, skb1, len, pos);
 | |
| 	else		/* Second chunk has no header, nothing to copy. */
 | |
| 		skb_split_no_header(skb, skb1, len, pos);
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * skb_prepare_seq_read - Prepare a sequential read of skb data
 | |
|  * @skb: the buffer to read
 | |
|  * @from: lower offset of data to be read
 | |
|  * @to: upper offset of data to be read
 | |
|  * @st: state variable
 | |
|  *
 | |
|  * Initializes the specified state variable. Must be called before
 | |
|  * invoking skb_seq_read() for the first time.
 | |
|  */
 | |
| void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
 | |
| 			  unsigned int to, struct skb_seq_state *st)
 | |
| {
 | |
| 	st->lower_offset = from;
 | |
| 	st->upper_offset = to;
 | |
| 	st->root_skb = st->cur_skb = skb;
 | |
| 	st->frag_idx = st->stepped_offset = 0;
 | |
| 	st->frag_data = NULL;
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * skb_seq_read - Sequentially read skb data
 | |
|  * @consumed: number of bytes consumed by the caller so far
 | |
|  * @data: destination pointer for data to be returned
 | |
|  * @st: state variable
 | |
|  *
 | |
|  * Reads a block of skb data at &consumed relative to the
 | |
|  * lower offset specified to skb_prepare_seq_read(). Assigns
 | |
|  * the head of the data block to &data and returns the length
 | |
|  * of the block or 0 if the end of the skb data or the upper
 | |
|  * offset has been reached.
 | |
|  *
 | |
|  * The caller is not required to consume all of the data
 | |
|  * returned, i.e. &consumed is typically set to the number
 | |
|  * of bytes already consumed and the next call to
 | |
|  * skb_seq_read() will return the remaining part of the block.
 | |
|  *
 | |
|  * Note: The size of each block of data returned can be arbitary,
 | |
|  *       this limitation is the cost for zerocopy seqeuental
 | |
|  *       reads of potentially non linear data.
 | |
|  *
 | |
|  * Note: Fragment lists within fragments are not implemented
 | |
|  *       at the moment, state->root_skb could be replaced with
 | |
|  *       a stack for this purpose.
 | |
|  */
 | |
| unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
 | |
| 			  struct skb_seq_state *st)
 | |
| {
 | |
| 	unsigned int block_limit, abs_offset = consumed + st->lower_offset;
 | |
| 	skb_frag_t *frag;
 | |
| 
 | |
| 	if (unlikely(abs_offset >= st->upper_offset))
 | |
| 		return 0;
 | |
| 
 | |
| next_skb:
 | |
| 	block_limit = skb_headlen(st->cur_skb);
 | |
| 
 | |
| 	if (abs_offset < block_limit) {
 | |
| 		*data = st->cur_skb->data + abs_offset;
 | |
| 		return block_limit - abs_offset;
 | |
| 	}
 | |
| 
 | |
| 	if (st->frag_idx == 0 && !st->frag_data)
 | |
| 		st->stepped_offset += skb_headlen(st->cur_skb);
 | |
| 
 | |
| 	while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
 | |
| 		frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
 | |
| 		block_limit = frag->size + st->stepped_offset;
 | |
| 
 | |
| 		if (abs_offset < block_limit) {
 | |
| 			if (!st->frag_data)
 | |
| 				st->frag_data = kmap_skb_frag(frag);
 | |
| 
 | |
| 			*data = (u8 *) st->frag_data + frag->page_offset +
 | |
| 				(abs_offset - st->stepped_offset);
 | |
| 
 | |
| 			return block_limit - abs_offset;
 | |
| 		}
 | |
| 
 | |
| 		if (st->frag_data) {
 | |
| 			kunmap_skb_frag(st->frag_data);
 | |
| 			st->frag_data = NULL;
 | |
| 		}
 | |
| 
 | |
| 		st->frag_idx++;
 | |
| 		st->stepped_offset += frag->size;
 | |
| 	}
 | |
| 
 | |
| 	if (st->cur_skb->next) {
 | |
| 		st->cur_skb = st->cur_skb->next;
 | |
| 		st->frag_idx = 0;
 | |
| 		goto next_skb;
 | |
| 	} else if (st->root_skb == st->cur_skb &&
 | |
| 		   skb_shinfo(st->root_skb)->frag_list) {
 | |
| 		st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
 | |
| 		goto next_skb;
 | |
| 	}
 | |
| 
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * skb_abort_seq_read - Abort a sequential read of skb data
 | |
|  * @st: state variable
 | |
|  *
 | |
|  * Must be called if skb_seq_read() was not called until it
 | |
|  * returned 0.
 | |
|  */
 | |
| void skb_abort_seq_read(struct skb_seq_state *st)
 | |
| {
 | |
| 	if (st->frag_data)
 | |
| 		kunmap_skb_frag(st->frag_data);
 | |
| }
 | |
| 
 | |
| #define TS_SKB_CB(state)	((struct skb_seq_state *) &((state)->cb))
 | |
| 
 | |
| static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
 | |
| 					  struct ts_config *conf,
 | |
| 					  struct ts_state *state)
 | |
| {
 | |
| 	return skb_seq_read(offset, text, TS_SKB_CB(state));
 | |
| }
 | |
| 
 | |
| static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
 | |
| {
 | |
| 	skb_abort_seq_read(TS_SKB_CB(state));
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * skb_find_text - Find a text pattern in skb data
 | |
|  * @skb: the buffer to look in
 | |
|  * @from: search offset
 | |
|  * @to: search limit
 | |
|  * @config: textsearch configuration
 | |
|  * @state: uninitialized textsearch state variable
 | |
|  *
 | |
|  * Finds a pattern in the skb data according to the specified
 | |
|  * textsearch configuration. Use textsearch_next() to retrieve
 | |
|  * subsequent occurrences of the pattern. Returns the offset
 | |
|  * to the first occurrence or UINT_MAX if no match was found.
 | |
|  */
 | |
| unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
 | |
| 			   unsigned int to, struct ts_config *config,
 | |
| 			   struct ts_state *state)
 | |
| {
 | |
| 	unsigned int ret;
 | |
| 
 | |
| 	config->get_next_block = skb_ts_get_next_block;
 | |
| 	config->finish = skb_ts_finish;
 | |
| 
 | |
| 	skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
 | |
| 
 | |
| 	ret = textsearch_find(config, state);
 | |
| 	return (ret <= to - from ? ret : UINT_MAX);
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * skb_append_datato_frags: - append the user data to a skb
 | |
|  * @sk: sock  structure
 | |
|  * @skb: skb structure to be appened with user data.
 | |
|  * @getfrag: call back function to be used for getting the user data
 | |
|  * @from: pointer to user message iov
 | |
|  * @length: length of the iov message
 | |
|  *
 | |
|  * Description: This procedure append the user data in the fragment part
 | |
|  * of the skb if any page alloc fails user this procedure returns  -ENOMEM
 | |
|  */
 | |
| int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
 | |
| 			int (*getfrag)(void *from, char *to, int offset,
 | |
| 					int len, int odd, struct sk_buff *skb),
 | |
| 			void *from, int length)
 | |
| {
 | |
| 	int frg_cnt = 0;
 | |
| 	skb_frag_t *frag = NULL;
 | |
| 	struct page *page = NULL;
 | |
| 	int copy, left;
 | |
| 	int offset = 0;
 | |
| 	int ret;
 | |
| 
 | |
| 	do {
 | |
| 		/* Return error if we don't have space for new frag */
 | |
| 		frg_cnt = skb_shinfo(skb)->nr_frags;
 | |
| 		if (frg_cnt >= MAX_SKB_FRAGS)
 | |
| 			return -EFAULT;
 | |
| 
 | |
| 		/* allocate a new page for next frag */
 | |
| 		page = alloc_pages(sk->sk_allocation, 0);
 | |
| 
 | |
| 		/* If alloc_page fails just return failure and caller will
 | |
| 		 * free previous allocated pages by doing kfree_skb()
 | |
| 		 */
 | |
| 		if (page == NULL)
 | |
| 			return -ENOMEM;
 | |
| 
 | |
| 		/* initialize the next frag */
 | |
| 		sk->sk_sndmsg_page = page;
 | |
| 		sk->sk_sndmsg_off = 0;
 | |
| 		skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
 | |
| 		skb->truesize += PAGE_SIZE;
 | |
| 		atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
 | |
| 
 | |
| 		/* get the new initialized frag */
 | |
| 		frg_cnt = skb_shinfo(skb)->nr_frags;
 | |
| 		frag = &skb_shinfo(skb)->frags[frg_cnt - 1];
 | |
| 
 | |
| 		/* copy the user data to page */
 | |
| 		left = PAGE_SIZE - frag->page_offset;
 | |
| 		copy = (length > left)? left : length;
 | |
| 
 | |
| 		ret = getfrag(from, (page_address(frag->page) +
 | |
| 			    frag->page_offset + frag->size),
 | |
| 			    offset, copy, 0, skb);
 | |
| 		if (ret < 0)
 | |
| 			return -EFAULT;
 | |
| 
 | |
| 		/* copy was successful so update the size parameters */
 | |
| 		sk->sk_sndmsg_off += copy;
 | |
| 		frag->size += copy;
 | |
| 		skb->len += copy;
 | |
| 		skb->data_len += copy;
 | |
| 		offset += copy;
 | |
| 		length -= copy;
 | |
| 
 | |
| 	} while (length > 0);
 | |
| 
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| /**
 | |
|  *	skb_pull_rcsum - pull skb and update receive checksum
 | |
|  *	@skb: buffer to update
 | |
|  *	@start: start of data before pull
 | |
|  *	@len: length of data pulled
 | |
|  *
 | |
|  *	This function performs an skb_pull on the packet and updates
 | |
|  *	update the CHECKSUM_COMPLETE checksum.  It should be used on
 | |
|  *	receive path processing instead of skb_pull unless you know
 | |
|  *	that the checksum difference is zero (e.g., a valid IP header)
 | |
|  *	or you are setting ip_summed to CHECKSUM_NONE.
 | |
|  */
 | |
| unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
 | |
| {
 | |
| 	BUG_ON(len > skb->len);
 | |
| 	skb->len -= len;
 | |
| 	BUG_ON(skb->len < skb->data_len);
 | |
| 	skb_postpull_rcsum(skb, skb->data, len);
 | |
| 	return skb->data += len;
 | |
| }
 | |
| 
 | |
| EXPORT_SYMBOL_GPL(skb_pull_rcsum);
 | |
| 
 | |
| /**
 | |
|  *	skb_segment - Perform protocol segmentation on skb.
 | |
|  *	@skb: buffer to segment
 | |
|  *	@features: features for the output path (see dev->features)
 | |
|  *
 | |
|  *	This function performs segmentation on the given skb.  It returns
 | |
|  *	the segment at the given position.  It returns NULL if there are
 | |
|  *	no more segments to generate, or when an error is encountered.
 | |
|  */
 | |
| struct sk_buff *skb_segment(struct sk_buff *skb, int features)
 | |
| {
 | |
| 	struct sk_buff *segs = NULL;
 | |
| 	struct sk_buff *tail = NULL;
 | |
| 	unsigned int mss = skb_shinfo(skb)->gso_size;
 | |
| 	unsigned int doffset = skb->data - skb->mac.raw;
 | |
| 	unsigned int offset = doffset;
 | |
| 	unsigned int headroom;
 | |
| 	unsigned int len;
 | |
| 	int sg = features & NETIF_F_SG;
 | |
| 	int nfrags = skb_shinfo(skb)->nr_frags;
 | |
| 	int err = -ENOMEM;
 | |
| 	int i = 0;
 | |
| 	int pos;
 | |
| 
 | |
| 	__skb_push(skb, doffset);
 | |
| 	headroom = skb_headroom(skb);
 | |
| 	pos = skb_headlen(skb);
 | |
| 
 | |
| 	do {
 | |
| 		struct sk_buff *nskb;
 | |
| 		skb_frag_t *frag;
 | |
| 		int hsize, nsize;
 | |
| 		int k;
 | |
| 		int size;
 | |
| 
 | |
| 		len = skb->len - offset;
 | |
| 		if (len > mss)
 | |
| 			len = mss;
 | |
| 
 | |
| 		hsize = skb_headlen(skb) - offset;
 | |
| 		if (hsize < 0)
 | |
| 			hsize = 0;
 | |
| 		nsize = hsize + doffset;
 | |
| 		if (nsize > len + doffset || !sg)
 | |
| 			nsize = len + doffset;
 | |
| 
 | |
| 		nskb = alloc_skb(nsize + headroom, GFP_ATOMIC);
 | |
| 		if (unlikely(!nskb))
 | |
| 			goto err;
 | |
| 
 | |
| 		if (segs)
 | |
| 			tail->next = nskb;
 | |
| 		else
 | |
| 			segs = nskb;
 | |
| 		tail = nskb;
 | |
| 
 | |
| 		nskb->dev = skb->dev;
 | |
| 		nskb->priority = skb->priority;
 | |
| 		nskb->protocol = skb->protocol;
 | |
| 		nskb->dst = dst_clone(skb->dst);
 | |
| 		memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
 | |
| 		nskb->pkt_type = skb->pkt_type;
 | |
| 		nskb->mac_len = skb->mac_len;
 | |
| 
 | |
| 		skb_reserve(nskb, headroom);
 | |
| 		nskb->mac.raw = nskb->data;
 | |
| 		nskb->nh.raw = nskb->data + skb->mac_len;
 | |
| 		nskb->h.raw = nskb->nh.raw + (skb->h.raw - skb->nh.raw);
 | |
| 		memcpy(skb_put(nskb, doffset), skb->data, doffset);
 | |
| 
 | |
| 		if (!sg) {
 | |
| 			nskb->csum = skb_copy_and_csum_bits(skb, offset,
 | |
| 							    skb_put(nskb, len),
 | |
| 							    len, 0);
 | |
| 			continue;
 | |
| 		}
 | |
| 
 | |
| 		frag = skb_shinfo(nskb)->frags;
 | |
| 		k = 0;
 | |
| 
 | |
| 		nskb->ip_summed = CHECKSUM_PARTIAL;
 | |
| 		nskb->csum = skb->csum;
 | |
| 		memcpy(skb_put(nskb, hsize), skb->data + offset, hsize);
 | |
| 
 | |
| 		while (pos < offset + len) {
 | |
| 			BUG_ON(i >= nfrags);
 | |
| 
 | |
| 			*frag = skb_shinfo(skb)->frags[i];
 | |
| 			get_page(frag->page);
 | |
| 			size = frag->size;
 | |
| 
 | |
| 			if (pos < offset) {
 | |
| 				frag->page_offset += offset - pos;
 | |
| 				frag->size -= offset - pos;
 | |
| 			}
 | |
| 
 | |
| 			k++;
 | |
| 
 | |
| 			if (pos + size <= offset + len) {
 | |
| 				i++;
 | |
| 				pos += size;
 | |
| 			} else {
 | |
| 				frag->size -= pos + size - (offset + len);
 | |
| 				break;
 | |
| 			}
 | |
| 
 | |
| 			frag++;
 | |
| 		}
 | |
| 
 | |
| 		skb_shinfo(nskb)->nr_frags = k;
 | |
| 		nskb->data_len = len - hsize;
 | |
| 		nskb->len += nskb->data_len;
 | |
| 		nskb->truesize += nskb->data_len;
 | |
| 	} while ((offset += len) < skb->len);
 | |
| 
 | |
| 	return segs;
 | |
| 
 | |
| err:
 | |
| 	while ((skb = segs)) {
 | |
| 		segs = skb->next;
 | |
| 		kfree(skb);
 | |
| 	}
 | |
| 	return ERR_PTR(err);
 | |
| }
 | |
| 
 | |
| EXPORT_SYMBOL_GPL(skb_segment);
 | |
| 
 | |
| void __init skb_init(void)
 | |
| {
 | |
| 	skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
 | |
| 					      sizeof(struct sk_buff),
 | |
| 					      0,
 | |
| 					      SLAB_HWCACHE_ALIGN|SLAB_PANIC,
 | |
| 					      NULL, NULL);
 | |
| 	skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
 | |
| 						(2*sizeof(struct sk_buff)) +
 | |
| 						sizeof(atomic_t),
 | |
| 						0,
 | |
| 						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
 | |
| 						NULL, NULL);
 | |
| }
 | |
| 
 | |
| EXPORT_SYMBOL(___pskb_trim);
 | |
| EXPORT_SYMBOL(__kfree_skb);
 | |
| EXPORT_SYMBOL(kfree_skb);
 | |
| EXPORT_SYMBOL(__pskb_pull_tail);
 | |
| EXPORT_SYMBOL(__alloc_skb);
 | |
| EXPORT_SYMBOL(__netdev_alloc_skb);
 | |
| EXPORT_SYMBOL(pskb_copy);
 | |
| EXPORT_SYMBOL(pskb_expand_head);
 | |
| EXPORT_SYMBOL(skb_checksum);
 | |
| EXPORT_SYMBOL(skb_clone);
 | |
| EXPORT_SYMBOL(skb_clone_fraglist);
 | |
| EXPORT_SYMBOL(skb_copy);
 | |
| EXPORT_SYMBOL(skb_copy_and_csum_bits);
 | |
| EXPORT_SYMBOL(skb_copy_and_csum_dev);
 | |
| EXPORT_SYMBOL(skb_copy_bits);
 | |
| EXPORT_SYMBOL(skb_copy_expand);
 | |
| EXPORT_SYMBOL(skb_over_panic);
 | |
| EXPORT_SYMBOL(skb_pad);
 | |
| EXPORT_SYMBOL(skb_realloc_headroom);
 | |
| EXPORT_SYMBOL(skb_under_panic);
 | |
| EXPORT_SYMBOL(skb_dequeue);
 | |
| EXPORT_SYMBOL(skb_dequeue_tail);
 | |
| EXPORT_SYMBOL(skb_insert);
 | |
| EXPORT_SYMBOL(skb_queue_purge);
 | |
| EXPORT_SYMBOL(skb_queue_head);
 | |
| EXPORT_SYMBOL(skb_queue_tail);
 | |
| EXPORT_SYMBOL(skb_unlink);
 | |
| EXPORT_SYMBOL(skb_append);
 | |
| EXPORT_SYMBOL(skb_split);
 | |
| EXPORT_SYMBOL(skb_prepare_seq_read);
 | |
| EXPORT_SYMBOL(skb_seq_read);
 | |
| EXPORT_SYMBOL(skb_abort_seq_read);
 | |
| EXPORT_SYMBOL(skb_find_text);
 | |
| EXPORT_SYMBOL(skb_append_datato_frags);
 |