mirror of
				https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
				synced 2025-10-31 07:02:06 +00:00 
			
		
		
		
	 46031f9a38
			
		
	
	
		46031f9a38
		
	
	
	
	
		
			
			If a bypass-the-cache read fails, we simply try again through the cache.  If
it fails again it will trigger normal recovery precedures.
update 1:
From: NeilBrown <neilb@suse.de>
1/
  chunk_aligned_read and retry_aligned_read assume that
      data_disks == raid_disks - 1
  which is not true for raid6.
  So when an aligned read request bypasses the cache, we can get the wrong data.
2/ The cloned bio is being used-after-free in raid5_align_endio
   (to test BIO_UPTODATE).
3/ We forgot to add rdev->data_offset when submitting
   a bio for aligned-read
4/ clone_bio calls blk_recount_segments and then we change bi_bdev,
   so we need to invalidate the segment counts.
5/ We don't de-reference the rdev when the read completes.
   This means we need to record the rdev to so it is still
   available in the end_io routine.  Fortunately
   bi_next in the original bio is unused at this point so
   we can stuff it in there.
6/ We leak a cloned bio if the target rdev is not usable.
From: NeilBrown <neilb@suse.de>
update 2:
1/ When aligned requests fail (read error) they need to be retried
   via the normal method (stripe cache).  As we cannot be sure that
   we can process a single read in one go (we may not be able to
   allocate all the stripes needed) we store a bio-being-retried
   and a list of bioes-that-still-need-to-be-retried.
   When find a bio that needs to be retried, we should add it to
   the list, not to single-bio...
2/ We were never incrementing 'scnt' when resubmitting failed
   aligned requests.
[akpm@osdl.org: build fix]
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
		
	
			
		
			
				
	
	
		
			281 lines
		
	
	
		
			11 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			281 lines
		
	
	
		
			11 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| #ifndef _RAID5_H
 | |
| #define _RAID5_H
 | |
| 
 | |
| #include <linux/raid/md.h>
 | |
| #include <linux/raid/xor.h>
 | |
| 
 | |
| /*
 | |
|  *
 | |
|  * Each stripe contains one buffer per disc.  Each buffer can be in
 | |
|  * one of a number of states stored in "flags".  Changes between
 | |
|  * these states happen *almost* exclusively under a per-stripe
 | |
|  * spinlock.  Some very specific changes can happen in bi_end_io, and
 | |
|  * these are not protected by the spin lock.
 | |
|  *
 | |
|  * The flag bits that are used to represent these states are:
 | |
|  *   R5_UPTODATE and R5_LOCKED
 | |
|  *
 | |
|  * State Empty == !UPTODATE, !LOCK
 | |
|  *        We have no data, and there is no active request
 | |
|  * State Want == !UPTODATE, LOCK
 | |
|  *        A read request is being submitted for this block
 | |
|  * State Dirty == UPTODATE, LOCK
 | |
|  *        Some new data is in this buffer, and it is being written out
 | |
|  * State Clean == UPTODATE, !LOCK
 | |
|  *        We have valid data which is the same as on disc
 | |
|  *
 | |
|  * The possible state transitions are:
 | |
|  *
 | |
|  *  Empty -> Want   - on read or write to get old data for  parity calc
 | |
|  *  Empty -> Dirty  - on compute_parity to satisfy write/sync request.(RECONSTRUCT_WRITE)
 | |
|  *  Empty -> Clean  - on compute_block when computing a block for failed drive
 | |
|  *  Want  -> Empty  - on failed read
 | |
|  *  Want  -> Clean  - on successful completion of read request
 | |
|  *  Dirty -> Clean  - on successful completion of write request
 | |
|  *  Dirty -> Clean  - on failed write
 | |
|  *  Clean -> Dirty  - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW)
 | |
|  *
 | |
|  * The Want->Empty, Want->Clean, Dirty->Clean, transitions
 | |
|  * all happen in b_end_io at interrupt time.
 | |
|  * Each sets the Uptodate bit before releasing the Lock bit.
 | |
|  * This leaves one multi-stage transition:
 | |
|  *    Want->Dirty->Clean
 | |
|  * This is safe because thinking that a Clean buffer is actually dirty
 | |
|  * will at worst delay some action, and the stripe will be scheduled
 | |
|  * for attention after the transition is complete.
 | |
|  *
 | |
|  * There is one possibility that is not covered by these states.  That
 | |
|  * is if one drive has failed and there is a spare being rebuilt.  We
 | |
|  * can't distinguish between a clean block that has been generated
 | |
|  * from parity calculations, and a clean block that has been
 | |
|  * successfully written to the spare ( or to parity when resyncing).
 | |
|  * To distingush these states we have a stripe bit STRIPE_INSYNC that
 | |
|  * is set whenever a write is scheduled to the spare, or to the parity
 | |
|  * disc if there is no spare.  A sync request clears this bit, and
 | |
|  * when we find it set with no buffers locked, we know the sync is
 | |
|  * complete.
 | |
|  *
 | |
|  * Buffers for the md device that arrive via make_request are attached
 | |
|  * to the appropriate stripe in one of two lists linked on b_reqnext.
 | |
|  * One list (bh_read) for read requests, one (bh_write) for write.
 | |
|  * There should never be more than one buffer on the two lists
 | |
|  * together, but we are not guaranteed of that so we allow for more.
 | |
|  *
 | |
|  * If a buffer is on the read list when the associated cache buffer is
 | |
|  * Uptodate, the data is copied into the read buffer and it's b_end_io
 | |
|  * routine is called.  This may happen in the end_request routine only
 | |
|  * if the buffer has just successfully been read.  end_request should
 | |
|  * remove the buffers from the list and then set the Uptodate bit on
 | |
|  * the buffer.  Other threads may do this only if they first check
 | |
|  * that the Uptodate bit is set.  Once they have checked that they may
 | |
|  * take buffers off the read queue.
 | |
|  *
 | |
|  * When a buffer on the write list is committed for write it is copied
 | |
|  * into the cache buffer, which is then marked dirty, and moved onto a
 | |
|  * third list, the written list (bh_written).  Once both the parity
 | |
|  * block and the cached buffer are successfully written, any buffer on
 | |
|  * a written list can be returned with b_end_io.
 | |
|  *
 | |
|  * The write list and read list both act as fifos.  The read list is
 | |
|  * protected by the device_lock.  The write and written lists are
 | |
|  * protected by the stripe lock.  The device_lock, which can be
 | |
|  * claimed while the stipe lock is held, is only for list
 | |
|  * manipulations and will only be held for a very short time.  It can
 | |
|  * be claimed from interrupts.
 | |
|  *
 | |
|  *
 | |
|  * Stripes in the stripe cache can be on one of two lists (or on
 | |
|  * neither).  The "inactive_list" contains stripes which are not
 | |
|  * currently being used for any request.  They can freely be reused
 | |
|  * for another stripe.  The "handle_list" contains stripes that need
 | |
|  * to be handled in some way.  Both of these are fifo queues.  Each
 | |
|  * stripe is also (potentially) linked to a hash bucket in the hash
 | |
|  * table so that it can be found by sector number.  Stripes that are
 | |
|  * not hashed must be on the inactive_list, and will normally be at
 | |
|  * the front.  All stripes start life this way.
 | |
|  *
 | |
|  * The inactive_list, handle_list and hash bucket lists are all protected by the
 | |
|  * device_lock.
 | |
|  *  - stripes on the inactive_list never have their stripe_lock held.
 | |
|  *  - stripes have a reference counter. If count==0, they are on a list.
 | |
|  *  - If a stripe might need handling, STRIPE_HANDLE is set.
 | |
|  *  - When refcount reaches zero, then if STRIPE_HANDLE it is put on
 | |
|  *    handle_list else inactive_list
 | |
|  *
 | |
|  * This, combined with the fact that STRIPE_HANDLE is only ever
 | |
|  * cleared while a stripe has a non-zero count means that if the
 | |
|  * refcount is 0 and STRIPE_HANDLE is set, then it is on the
 | |
|  * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then
 | |
|  * the stripe is on inactive_list.
 | |
|  *
 | |
|  * The possible transitions are:
 | |
|  *  activate an unhashed/inactive stripe (get_active_stripe())
 | |
|  *     lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev
 | |
|  *  activate a hashed, possibly active stripe (get_active_stripe())
 | |
|  *     lockdev check-hash if(!cnt++)unlink-stripe unlockdev
 | |
|  *  attach a request to an active stripe (add_stripe_bh())
 | |
|  *     lockdev attach-buffer unlockdev
 | |
|  *  handle a stripe (handle_stripe())
 | |
|  *     lockstripe clrSTRIPE_HANDLE ... (lockdev check-buffers unlockdev) .. change-state .. record io needed unlockstripe schedule io
 | |
|  *  release an active stripe (release_stripe())
 | |
|  *     lockdev if (!--cnt) { if  STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev
 | |
|  *
 | |
|  * The refcount counts each thread that have activated the stripe,
 | |
|  * plus raid5d if it is handling it, plus one for each active request
 | |
|  * on a cached buffer.
 | |
|  */
 | |
| 
 | |
| struct stripe_head {
 | |
| 	struct hlist_node	hash;
 | |
| 	struct list_head	lru;			/* inactive_list or handle_list */
 | |
| 	struct raid5_private_data	*raid_conf;
 | |
| 	sector_t		sector;			/* sector of this row */
 | |
| 	int			pd_idx;			/* parity disk index */
 | |
| 	unsigned long		state;			/* state flags */
 | |
| 	atomic_t		count;			/* nr of active thread/requests */
 | |
| 	spinlock_t		lock;
 | |
| 	int			bm_seq;	/* sequence number for bitmap flushes */
 | |
| 	int			disks;			/* disks in stripe */
 | |
| 	struct r5dev {
 | |
| 		struct bio	req;
 | |
| 		struct bio_vec	vec;
 | |
| 		struct page	*page;
 | |
| 		struct bio	*toread, *towrite, *written;
 | |
| 		sector_t	sector;			/* sector of this page */
 | |
| 		unsigned long	flags;
 | |
| 	} dev[1]; /* allocated with extra space depending of RAID geometry */
 | |
| };
 | |
| /* Flags */
 | |
| #define	R5_UPTODATE	0	/* page contains current data */
 | |
| #define	R5_LOCKED	1	/* IO has been submitted on "req" */
 | |
| #define	R5_OVERWRITE	2	/* towrite covers whole page */
 | |
| /* and some that are internal to handle_stripe */
 | |
| #define	R5_Insync	3	/* rdev && rdev->in_sync at start */
 | |
| #define	R5_Wantread	4	/* want to schedule a read */
 | |
| #define	R5_Wantwrite	5
 | |
| #define	R5_Overlap	7	/* There is a pending overlapping request on this block */
 | |
| #define	R5_ReadError	8	/* seen a read error here recently */
 | |
| #define	R5_ReWrite	9	/* have tried to over-write the readerror */
 | |
| 
 | |
| #define	R5_Expanded	10	/* This block now has post-expand data */
 | |
| /*
 | |
|  * Write method
 | |
|  */
 | |
| #define RECONSTRUCT_WRITE	1
 | |
| #define READ_MODIFY_WRITE	2
 | |
| /* not a write method, but a compute_parity mode */
 | |
| #define	CHECK_PARITY		3
 | |
| 
 | |
| /*
 | |
|  * Stripe state
 | |
|  */
 | |
| #define STRIPE_HANDLE		2
 | |
| #define	STRIPE_SYNCING		3
 | |
| #define	STRIPE_INSYNC		4
 | |
| #define	STRIPE_PREREAD_ACTIVE	5
 | |
| #define	STRIPE_DELAYED		6
 | |
| #define	STRIPE_DEGRADED		7
 | |
| #define	STRIPE_BIT_DELAY	8
 | |
| #define	STRIPE_EXPANDING	9
 | |
| #define	STRIPE_EXPAND_SOURCE	10
 | |
| #define	STRIPE_EXPAND_READY	11
 | |
| /*
 | |
|  * Plugging:
 | |
|  *
 | |
|  * To improve write throughput, we need to delay the handling of some
 | |
|  * stripes until there has been a chance that several write requests
 | |
|  * for the one stripe have all been collected.
 | |
|  * In particular, any write request that would require pre-reading
 | |
|  * is put on a "delayed" queue until there are no stripes currently
 | |
|  * in a pre-read phase.  Further, if the "delayed" queue is empty when
 | |
|  * a stripe is put on it then we "plug" the queue and do not process it
 | |
|  * until an unplug call is made. (the unplug_io_fn() is called).
 | |
|  *
 | |
|  * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add
 | |
|  * it to the count of prereading stripes.
 | |
|  * When write is initiated, or the stripe refcnt == 0 (just in case) we
 | |
|  * clear the PREREAD_ACTIVE flag and decrement the count
 | |
|  * Whenever the 'handle' queue is empty and the device is not plugged, we
 | |
|  * move any strips from delayed to handle and clear the DELAYED flag and set
 | |
|  * PREREAD_ACTIVE.
 | |
|  * In stripe_handle, if we find pre-reading is necessary, we do it if
 | |
|  * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue.
 | |
|  * HANDLE gets cleared if stripe_handle leave nothing locked.
 | |
|  */
 | |
|  
 | |
| 
 | |
| struct disk_info {
 | |
| 	mdk_rdev_t	*rdev;
 | |
| };
 | |
| 
 | |
| struct raid5_private_data {
 | |
| 	struct hlist_head	*stripe_hashtbl;
 | |
| 	mddev_t			*mddev;
 | |
| 	struct disk_info	*spare;
 | |
| 	int			chunk_size, level, algorithm;
 | |
| 	int			max_degraded;
 | |
| 	int			raid_disks;
 | |
| 	int			max_nr_stripes;
 | |
| 
 | |
| 	/* used during an expand */
 | |
| 	sector_t		expand_progress;	/* MaxSector when no expand happening */
 | |
| 	sector_t		expand_lo; /* from here up to expand_progress it out-of-bounds
 | |
| 					    * as we haven't flushed the metadata yet
 | |
| 					    */
 | |
| 	int			previous_raid_disks;
 | |
| 
 | |
| 	struct list_head	handle_list; /* stripes needing handling */
 | |
| 	struct list_head	delayed_list; /* stripes that have plugged requests */
 | |
| 	struct list_head	bitmap_list; /* stripes delaying awaiting bitmap update */
 | |
| 	struct bio		*retry_read_aligned; /* currently retrying aligned bios   */
 | |
| 	struct bio		*retry_read_aligned_list; /* aligned bios retry list  */
 | |
| 	atomic_t		preread_active_stripes; /* stripes with scheduled io */
 | |
| 	atomic_t		active_aligned_reads;
 | |
| 
 | |
| 	atomic_t		reshape_stripes; /* stripes with pending writes for reshape */
 | |
| 	/* unfortunately we need two cache names as we temporarily have
 | |
| 	 * two caches.
 | |
| 	 */
 | |
| 	int			active_name;
 | |
| 	char			cache_name[2][20];
 | |
| 	struct kmem_cache		*slab_cache; /* for allocating stripes */
 | |
| 
 | |
| 	int			seq_flush, seq_write;
 | |
| 	int			quiesce;
 | |
| 
 | |
| 	int			fullsync;  /* set to 1 if a full sync is needed,
 | |
| 					    * (fresh device added).
 | |
| 					    * Cleared when a sync completes.
 | |
| 					    */
 | |
| 
 | |
| 	struct page 		*spare_page; /* Used when checking P/Q in raid6 */
 | |
| 
 | |
| 	/*
 | |
| 	 * Free stripes pool
 | |
| 	 */
 | |
| 	atomic_t		active_stripes;
 | |
| 	struct list_head	inactive_list;
 | |
| 	wait_queue_head_t	wait_for_stripe;
 | |
| 	wait_queue_head_t	wait_for_overlap;
 | |
| 	int			inactive_blocked;	/* release of inactive stripes blocked,
 | |
| 							 * waiting for 25% to be free
 | |
| 							 */
 | |
| 	int			pool_size; /* number of disks in stripeheads in pool */
 | |
| 	spinlock_t		device_lock;
 | |
| 	struct disk_info	*disks;
 | |
| };
 | |
| 
 | |
| typedef struct raid5_private_data raid5_conf_t;
 | |
| 
 | |
| #define mddev_to_conf(mddev) ((raid5_conf_t *) mddev->private)
 | |
| 
 | |
| /*
 | |
|  * Our supported algorithms
 | |
|  */
 | |
| #define ALGORITHM_LEFT_ASYMMETRIC	0
 | |
| #define ALGORITHM_RIGHT_ASYMMETRIC	1
 | |
| #define ALGORITHM_LEFT_SYMMETRIC	2
 | |
| #define ALGORITHM_RIGHT_SYMMETRIC	3
 | |
| 
 | |
| #endif
 |