dm kcopyd: remove superfluous page allocation spinlock
Remove the spinlock protecting the pages allocation. The spinlock is only
taken on initialization or from single-threaded workqueue. Therefore, the
spinlock is useless.
The spinlock is taken in kcopyd_get_pages and kcopyd_put_pages.
kcopyd_get_pages is only called from run_pages_job, which is only
called from process_jobs called from do_work.
kcopyd_put_pages is called from client_alloc_pages (which is initialization
function) or from run_complete_job. run_complete_job is only called from
process_jobs called from do_work.
Another spinlock, kc->job_lock is taken each time someone pushes or pops
some work for the worker thread. Once we take kc->job_lock, we
guarantee that any written memory is visible to the other CPUs.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 24fb42e..ed957791 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -36,7 +36,6 @@
* pages for kcopyd io.
*---------------------------------------------------------------*/
struct dm_kcopyd_client {
- spinlock_t lock;
struct page_list *pages;
unsigned int nr_pages;
unsigned int nr_free_pages;
@@ -99,11 +98,8 @@
{
struct page_list *pl;
- spin_lock(&kc->lock);
- if (kc->nr_free_pages < nr) {
- spin_unlock(&kc->lock);
+ if (kc->nr_free_pages < nr)
return -ENOMEM;
- }
kc->nr_free_pages -= nr;
for (*pages = pl = kc->pages; --nr; pl = pl->next)
@@ -112,8 +108,6 @@
kc->pages = pl->next;
pl->next = NULL;
- spin_unlock(&kc->lock);
-
return 0;
}
@@ -121,14 +115,12 @@
{
struct page_list *cursor;
- spin_lock(&kc->lock);
for (cursor = pl; cursor->next; cursor = cursor->next)
kc->nr_free_pages++;
kc->nr_free_pages++;
cursor->next = kc->pages;
kc->pages = pl;
- spin_unlock(&kc->lock);
}
/*
@@ -625,7 +617,6 @@
if (!kc)
return -ENOMEM;
- spin_lock_init(&kc->lock);
spin_lock_init(&kc->job_lock);
INIT_LIST_HEAD(&kc->complete_jobs);
INIT_LIST_HEAD(&kc->io_jobs);