mirror of
				https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
				synced 2025-10-31 18:36:37 +00:00 
			
		
		
		
	 2ca4532a49
			
		
	
	
		2ca4532a49
		
	
	
	
	
		
			
			After commit 355cfa73 ("mm: modify swap_map and add SWAP_HAS_CACHE flag"),
only the context which have set SWAP_HAS_CACHE flag by swapcache_prepare()
or get_swap_page() would call add_to_swap_cache().  So add_to_swap_cache()
doesn't return -EEXIST any more.
Even though it doesn't return -EEXIST, it's not good behavior conceptually
to call swapcache_prepare() in the -EEXIST case, because it means clearing
SWAP_HAS_CACHE flag while the entry is on swap cache.
This patch removes redundant codes and comments from callers of it, and
adds VM_BUG_ON() in error path of add_to_swap_cache() and some comments.
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
	
			
		
			
				
	
	
		
			397 lines
		
	
	
		
			10 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			397 lines
		
	
	
		
			10 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|  *  linux/mm/swap_state.c
 | |
|  *
 | |
|  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 | |
|  *  Swap reorganised 29.12.95, Stephen Tweedie
 | |
|  *
 | |
|  *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
 | |
|  */
 | |
| #include <linux/module.h>
 | |
| #include <linux/mm.h>
 | |
| #include <linux/kernel_stat.h>
 | |
| #include <linux/swap.h>
 | |
| #include <linux/swapops.h>
 | |
| #include <linux/init.h>
 | |
| #include <linux/pagemap.h>
 | |
| #include <linux/buffer_head.h>
 | |
| #include <linux/backing-dev.h>
 | |
| #include <linux/pagevec.h>
 | |
| #include <linux/migrate.h>
 | |
| #include <linux/page_cgroup.h>
 | |
| 
 | |
| #include <asm/pgtable.h>
 | |
| 
 | |
| /*
 | |
|  * swapper_space is a fiction, retained to simplify the path through
 | |
|  * vmscan's shrink_page_list, to make sync_page look nicer, and to allow
 | |
|  * future use of radix_tree tags in the swap cache.
 | |
|  */
 | |
| static const struct address_space_operations swap_aops = {
 | |
| 	.writepage	= swap_writepage,
 | |
| 	.sync_page	= block_sync_page,
 | |
| 	.set_page_dirty	= __set_page_dirty_nobuffers,
 | |
| 	.migratepage	= migrate_page,
 | |
| };
 | |
| 
 | |
| static struct backing_dev_info swap_backing_dev_info = {
 | |
| 	.name		= "swap",
 | |
| 	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
 | |
| 	.unplug_io_fn	= swap_unplug_io_fn,
 | |
| };
 | |
| 
 | |
| struct address_space swapper_space = {
 | |
| 	.page_tree	= RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
 | |
| 	.tree_lock	= __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock),
 | |
| 	.a_ops		= &swap_aops,
 | |
| 	.i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
 | |
| 	.backing_dev_info = &swap_backing_dev_info,
 | |
| };
 | |
| 
 | |
| #define INC_CACHE_INFO(x)	do { swap_cache_info.x++; } while (0)
 | |
| 
 | |
| static struct {
 | |
| 	unsigned long add_total;
 | |
| 	unsigned long del_total;
 | |
| 	unsigned long find_success;
 | |
| 	unsigned long find_total;
 | |
| } swap_cache_info;
 | |
| 
 | |
| void show_swap_cache_info(void)
 | |
| {
 | |
| 	printk("%lu pages in swap cache\n", total_swapcache_pages);
 | |
| 	printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
 | |
| 		swap_cache_info.add_total, swap_cache_info.del_total,
 | |
| 		swap_cache_info.find_success, swap_cache_info.find_total);
 | |
| 	printk("Free swap  = %ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10));
 | |
| 	printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
 | |
|  * but sets SwapCache flag and private instead of mapping and index.
 | |
|  */
 | |
| static int __add_to_swap_cache(struct page *page, swp_entry_t entry)
 | |
| {
 | |
| 	int error;
 | |
| 
 | |
| 	VM_BUG_ON(!PageLocked(page));
 | |
| 	VM_BUG_ON(PageSwapCache(page));
 | |
| 	VM_BUG_ON(!PageSwapBacked(page));
 | |
| 
 | |
| 	page_cache_get(page);
 | |
| 	SetPageSwapCache(page);
 | |
| 	set_page_private(page, entry.val);
 | |
| 
 | |
| 	spin_lock_irq(&swapper_space.tree_lock);
 | |
| 	error = radix_tree_insert(&swapper_space.page_tree, entry.val, page);
 | |
| 	if (likely(!error)) {
 | |
| 		total_swapcache_pages++;
 | |
| 		__inc_zone_page_state(page, NR_FILE_PAGES);
 | |
| 		INC_CACHE_INFO(add_total);
 | |
| 	}
 | |
| 	spin_unlock_irq(&swapper_space.tree_lock);
 | |
| 
 | |
| 	if (unlikely(error)) {
 | |
| 		/*
 | |
| 		 * Only the context which have set SWAP_HAS_CACHE flag
 | |
| 		 * would call add_to_swap_cache().
 | |
| 		 * So add_to_swap_cache() doesn't returns -EEXIST.
 | |
| 		 */
 | |
| 		VM_BUG_ON(error == -EEXIST);
 | |
| 		set_page_private(page, 0UL);
 | |
| 		ClearPageSwapCache(page);
 | |
| 		page_cache_release(page);
 | |
| 	}
 | |
| 
 | |
| 	return error;
 | |
| }
 | |
| 
 | |
| 
 | |
| int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
 | |
| {
 | |
| 	int error;
 | |
| 
 | |
| 	error = radix_tree_preload(gfp_mask);
 | |
| 	if (!error) {
 | |
| 		error = __add_to_swap_cache(page, entry);
 | |
| 		radix_tree_preload_end();
 | |
| 	}
 | |
| 	return error;
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * This must be called only on pages that have
 | |
|  * been verified to be in the swap cache.
 | |
|  */
 | |
| void __delete_from_swap_cache(struct page *page)
 | |
| {
 | |
| 	VM_BUG_ON(!PageLocked(page));
 | |
| 	VM_BUG_ON(!PageSwapCache(page));
 | |
| 	VM_BUG_ON(PageWriteback(page));
 | |
| 
 | |
| 	radix_tree_delete(&swapper_space.page_tree, page_private(page));
 | |
| 	set_page_private(page, 0);
 | |
| 	ClearPageSwapCache(page);
 | |
| 	total_swapcache_pages--;
 | |
| 	__dec_zone_page_state(page, NR_FILE_PAGES);
 | |
| 	INC_CACHE_INFO(del_total);
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * add_to_swap - allocate swap space for a page
 | |
|  * @page: page we want to move to swap
 | |
|  *
 | |
|  * Allocate swap space for the page and add the page to the
 | |
|  * swap cache.  Caller needs to hold the page lock. 
 | |
|  */
 | |
| int add_to_swap(struct page *page)
 | |
| {
 | |
| 	swp_entry_t entry;
 | |
| 	int err;
 | |
| 
 | |
| 	VM_BUG_ON(!PageLocked(page));
 | |
| 	VM_BUG_ON(!PageUptodate(page));
 | |
| 
 | |
| 	entry = get_swap_page();
 | |
| 	if (!entry.val)
 | |
| 		return 0;
 | |
| 
 | |
| 	/*
 | |
| 	 * Radix-tree node allocations from PF_MEMALLOC contexts could
 | |
| 	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
 | |
| 	 * stops emergency reserves from being allocated.
 | |
| 	 *
 | |
| 	 * TODO: this could cause a theoretical memory reclaim
 | |
| 	 * deadlock in the swap out path.
 | |
| 	 */
 | |
| 	/*
 | |
| 	 * Add it to the swap cache and mark it dirty
 | |
| 	 */
 | |
| 	err = add_to_swap_cache(page, entry,
 | |
| 			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
 | |
| 
 | |
| 	if (!err) {	/* Success */
 | |
| 		SetPageDirty(page);
 | |
| 		return 1;
 | |
| 	} else {	/* -ENOMEM radix-tree allocation failure */
 | |
| 		/*
 | |
| 		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
 | |
| 		 * clear SWAP_HAS_CACHE flag.
 | |
| 		 */
 | |
| 		swapcache_free(entry, NULL);
 | |
| 		return 0;
 | |
| 	}
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * This must be called only on pages that have
 | |
|  * been verified to be in the swap cache and locked.
 | |
|  * It will never put the page into the free list,
 | |
|  * the caller has a reference on the page.
 | |
|  */
 | |
| void delete_from_swap_cache(struct page *page)
 | |
| {
 | |
| 	swp_entry_t entry;
 | |
| 
 | |
| 	entry.val = page_private(page);
 | |
| 
 | |
| 	spin_lock_irq(&swapper_space.tree_lock);
 | |
| 	__delete_from_swap_cache(page);
 | |
| 	spin_unlock_irq(&swapper_space.tree_lock);
 | |
| 
 | |
| 	swapcache_free(entry, page);
 | |
| 	page_cache_release(page);
 | |
| }
 | |
| 
 | |
| /* 
 | |
|  * If we are the only user, then try to free up the swap cache. 
 | |
|  * 
 | |
|  * Its ok to check for PageSwapCache without the page lock
 | |
|  * here because we are going to recheck again inside
 | |
|  * try_to_free_swap() _with_ the lock.
 | |
|  * 					- Marcelo
 | |
|  */
 | |
| static inline void free_swap_cache(struct page *page)
 | |
| {
 | |
| 	if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
 | |
| 		try_to_free_swap(page);
 | |
| 		unlock_page(page);
 | |
| 	}
 | |
| }
 | |
| 
 | |
| /* 
 | |
|  * Perform a free_page(), also freeing any swap cache associated with
 | |
|  * this page if it is the last user of the page.
 | |
|  */
 | |
| void free_page_and_swap_cache(struct page *page)
 | |
| {
 | |
| 	free_swap_cache(page);
 | |
| 	page_cache_release(page);
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * Passed an array of pages, drop them all from swapcache and then release
 | |
|  * them.  They are removed from the LRU and freed if this is their last use.
 | |
|  */
 | |
| void free_pages_and_swap_cache(struct page **pages, int nr)
 | |
| {
 | |
| 	struct page **pagep = pages;
 | |
| 
 | |
| 	lru_add_drain();
 | |
| 	while (nr) {
 | |
| 		int todo = min(nr, PAGEVEC_SIZE);
 | |
| 		int i;
 | |
| 
 | |
| 		for (i = 0; i < todo; i++)
 | |
| 			free_swap_cache(pagep[i]);
 | |
| 		release_pages(pagep, todo, 0);
 | |
| 		pagep += todo;
 | |
| 		nr -= todo;
 | |
| 	}
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * Lookup a swap entry in the swap cache. A found page will be returned
 | |
|  * unlocked and with its refcount incremented - we rely on the kernel
 | |
|  * lock getting page table operations atomic even if we drop the page
 | |
|  * lock before returning.
 | |
|  */
 | |
| struct page * lookup_swap_cache(swp_entry_t entry)
 | |
| {
 | |
| 	struct page *page;
 | |
| 
 | |
| 	page = find_get_page(&swapper_space, entry.val);
 | |
| 
 | |
| 	if (page)
 | |
| 		INC_CACHE_INFO(find_success);
 | |
| 
 | |
| 	INC_CACHE_INFO(find_total);
 | |
| 	return page;
 | |
| }
 | |
| 
 | |
| /* 
 | |
|  * Locate a page of swap in physical memory, reserving swap cache space
 | |
|  * and reading the disk if it is not already cached.
 | |
|  * A failure return means that either the page allocation failed or that
 | |
|  * the swap entry is no longer in use.
 | |
|  */
 | |
| struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 | |
| 			struct vm_area_struct *vma, unsigned long addr)
 | |
| {
 | |
| 	struct page *found_page, *new_page = NULL;
 | |
| 	int err;
 | |
| 
 | |
| 	do {
 | |
| 		/*
 | |
| 		 * First check the swap cache.  Since this is normally
 | |
| 		 * called after lookup_swap_cache() failed, re-calling
 | |
| 		 * that would confuse statistics.
 | |
| 		 */
 | |
| 		found_page = find_get_page(&swapper_space, entry.val);
 | |
| 		if (found_page)
 | |
| 			break;
 | |
| 
 | |
| 		/*
 | |
| 		 * Get a new page to read into from swap.
 | |
| 		 */
 | |
| 		if (!new_page) {
 | |
| 			new_page = alloc_page_vma(gfp_mask, vma, addr);
 | |
| 			if (!new_page)
 | |
| 				break;		/* Out of memory */
 | |
| 		}
 | |
| 
 | |
| 		/*
 | |
| 		 * call radix_tree_preload() while we can wait.
 | |
| 		 */
 | |
| 		err = radix_tree_preload(gfp_mask & GFP_KERNEL);
 | |
| 		if (err)
 | |
| 			break;
 | |
| 
 | |
| 		/*
 | |
| 		 * Swap entry may have been freed since our caller observed it.
 | |
| 		 */
 | |
| 		err = swapcache_prepare(entry);
 | |
| 		if (err == -EEXIST) {	/* seems racy */
 | |
| 			radix_tree_preload_end();
 | |
| 			continue;
 | |
| 		}
 | |
| 		if (err) {		/* swp entry is obsolete ? */
 | |
| 			radix_tree_preload_end();
 | |
| 			break;
 | |
| 		}
 | |
| 
 | |
| 		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
 | |
| 		__set_page_locked(new_page);
 | |
| 		SetPageSwapBacked(new_page);
 | |
| 		err = __add_to_swap_cache(new_page, entry);
 | |
| 		if (likely(!err)) {
 | |
| 			radix_tree_preload_end();
 | |
| 			/*
 | |
| 			 * Initiate read into locked page and return.
 | |
| 			 */
 | |
| 			lru_cache_add_anon(new_page);
 | |
| 			swap_readpage(new_page);
 | |
| 			return new_page;
 | |
| 		}
 | |
| 		radix_tree_preload_end();
 | |
| 		ClearPageSwapBacked(new_page);
 | |
| 		__clear_page_locked(new_page);
 | |
| 		/*
 | |
| 		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
 | |
| 		 * clear SWAP_HAS_CACHE flag.
 | |
| 		 */
 | |
| 		swapcache_free(entry, NULL);
 | |
| 	} while (err != -ENOMEM);
 | |
| 
 | |
| 	if (new_page)
 | |
| 		page_cache_release(new_page);
 | |
| 	return found_page;
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * swapin_readahead - swap in pages in hope we need them soon
 | |
|  * @entry: swap entry of this memory
 | |
|  * @gfp_mask: memory allocation flags
 | |
|  * @vma: user vma this address belongs to
 | |
|  * @addr: target address for mempolicy
 | |
|  *
 | |
|  * Returns the struct page for entry and addr, after queueing swapin.
 | |
|  *
 | |
|  * Primitive swap readahead code. We simply read an aligned block of
 | |
|  * (1 << page_cluster) entries in the swap area. This method is chosen
 | |
|  * because it doesn't cost us any seek time.  We also make sure to queue
 | |
|  * the 'original' request together with the readahead ones...
 | |
|  *
 | |
|  * This has been extended to use the NUMA policies from the mm triggering
 | |
|  * the readahead.
 | |
|  *
 | |
|  * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
 | |
|  */
 | |
| struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
 | |
| 			struct vm_area_struct *vma, unsigned long addr)
 | |
| {
 | |
| 	int nr_pages;
 | |
| 	struct page *page;
 | |
| 	unsigned long offset;
 | |
| 	unsigned long end_offset;
 | |
| 
 | |
| 	/*
 | |
| 	 * Get starting offset for readaround, and number of pages to read.
 | |
| 	 * Adjust starting address by readbehind (for NUMA interleave case)?
 | |
| 	 * No, it's very unlikely that swap layout would follow vma layout,
 | |
| 	 * more likely that neighbouring swap pages came from the same node:
 | |
| 	 * so use the same "addr" to choose the same node for each swap read.
 | |
| 	 */
 | |
| 	nr_pages = valid_swaphandles(entry, &offset);
 | |
| 	for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
 | |
| 		/* Ok, do the async read-ahead now */
 | |
| 		page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
 | |
| 						gfp_mask, vma, addr);
 | |
| 		if (!page)
 | |
| 			break;
 | |
| 		page_cache_release(page);
 | |
| 	}
 | |
| 	lru_add_drain();	/* Push any new pages onto the LRU now */
 | |
| 	return read_swap_cache_async(entry, gfp_mask, vma, addr);
 | |
| }
 |