mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-08-28 09:22:08 +00:00
md: stop using for_each_mddev in md_exit
Just do a simple list_for_each_entry_safe on all_mddevs, and only grab a reference when we drop the lock and delete the now unused for_each_mddev macro. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Logan Gunthorpe <logang@deltatee.com> Reviewed-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Song Liu <song@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
f265143422
commit
16648bac86
@ -368,28 +368,6 @@ EXPORT_SYMBOL_GPL(md_new_event);
|
|||||||
static LIST_HEAD(all_mddevs);
|
static LIST_HEAD(all_mddevs);
|
||||||
static DEFINE_SPINLOCK(all_mddevs_lock);
|
static DEFINE_SPINLOCK(all_mddevs_lock);
|
||||||
|
|
||||||
/*
|
|
||||||
* iterates through all used mddevs in the system.
|
|
||||||
* We take care to grab the all_mddevs_lock whenever navigating
|
|
||||||
* the list, and to always hold a refcount when unlocked.
|
|
||||||
* Any code which breaks out of this loop while own
|
|
||||||
* a reference to the current mddev and must mddev_put it.
|
|
||||||
*/
|
|
||||||
#define for_each_mddev(_mddev,_tmp) \
|
|
||||||
\
|
|
||||||
for (({ spin_lock(&all_mddevs_lock); \
|
|
||||||
_tmp = all_mddevs.next; \
|
|
||||||
_mddev = NULL;}); \
|
|
||||||
({ if (_tmp != &all_mddevs) \
|
|
||||||
mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
|
|
||||||
spin_unlock(&all_mddevs_lock); \
|
|
||||||
if (_mddev) mddev_put(_mddev); \
|
|
||||||
_mddev = list_entry(_tmp, struct mddev, all_mddevs); \
|
|
||||||
_tmp != &all_mddevs;}); \
|
|
||||||
({ spin_lock(&all_mddevs_lock); \
|
|
||||||
_tmp = _tmp->next;}) \
|
|
||||||
)
|
|
||||||
|
|
||||||
/* Rather than calling directly into the personality make_request function,
|
/* Rather than calling directly into the personality make_request function,
|
||||||
* IO requests come here first so that we can check if the device is
|
* IO requests come here first so that we can check if the device is
|
||||||
* being suspended pending a reconfiguration.
|
* being suspended pending a reconfiguration.
|
||||||
@ -9923,8 +9901,7 @@ void md_autostart_arrays(int part)
|
|||||||
|
|
||||||
static __exit void md_exit(void)
|
static __exit void md_exit(void)
|
||||||
{
|
{
|
||||||
struct mddev *mddev;
|
struct mddev *mddev, *n;
|
||||||
struct list_head *tmp;
|
|
||||||
int delay = 1;
|
int delay = 1;
|
||||||
|
|
||||||
unregister_blkdev(MD_MAJOR,"md");
|
unregister_blkdev(MD_MAJOR,"md");
|
||||||
@ -9944,17 +9921,23 @@ static __exit void md_exit(void)
|
|||||||
}
|
}
|
||||||
remove_proc_entry("mdstat", NULL);
|
remove_proc_entry("mdstat", NULL);
|
||||||
|
|
||||||
for_each_mddev(mddev, tmp) {
|
spin_lock(&all_mddevs_lock);
|
||||||
|
list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) {
|
||||||
|
mddev_get(mddev);
|
||||||
|
spin_unlock(&all_mddevs_lock);
|
||||||
export_array(mddev);
|
export_array(mddev);
|
||||||
mddev->ctime = 0;
|
mddev->ctime = 0;
|
||||||
mddev->hold_active = 0;
|
mddev->hold_active = 0;
|
||||||
/*
|
/*
|
||||||
* for_each_mddev() will call mddev_put() at the end of each
|
* As the mddev is now fully clear, mddev_put will schedule
|
||||||
* iteration. As the mddev is now fully clear, this will
|
* the mddev for destruction by a workqueue, and the
|
||||||
* schedule the mddev for destruction by a workqueue, and the
|
|
||||||
* destroy_workqueue() below will wait for that to complete.
|
* destroy_workqueue() below will wait for that to complete.
|
||||||
*/
|
*/
|
||||||
|
mddev_put(mddev);
|
||||||
|
spin_lock(&all_mddevs_lock);
|
||||||
}
|
}
|
||||||
|
spin_unlock(&all_mddevs_lock);
|
||||||
|
|
||||||
destroy_workqueue(md_rdev_misc_wq);
|
destroy_workqueue(md_rdev_misc_wq);
|
||||||
destroy_workqueue(md_misc_wq);
|
destroy_workqueue(md_misc_wq);
|
||||||
destroy_workqueue(md_wq);
|
destroy_workqueue(md_wq);
|
||||||
|
Loading…
Reference in New Issue
Block a user