mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-06 10:00:17 +00:00
blk-mq: Replace tags->lock with SRCU for tag iterators
Replace the spinlock in blk_mq_find_and_get_req() with an SRCU read lock around the tag iterators. This is done by: - Holding the SRCU read lock in blk_mq_queue_tag_busy_iter(), blk_mq_tagset_busy_iter(), and blk_mq_hctx_has_requests(). - Removing the now-redundant tags->lock from blk_mq_find_and_get_req(). This change fixes lockup issue in scsi_host_busy() in case of shost->host_blocked. Also avoids big tags->lock when reading disk sysfs attribute `inflight`. Reviewed-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Yu Kuai <yukuai3@huawei.com> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
@@ -256,13 +256,10 @@ static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
|
|||||||
unsigned int bitnr)
|
unsigned int bitnr)
|
||||||
{
|
{
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&tags->lock, flags);
|
|
||||||
rq = tags->rqs[bitnr];
|
rq = tags->rqs[bitnr];
|
||||||
if (!rq || rq->tag != bitnr || !req_ref_inc_not_zero(rq))
|
if (!rq || rq->tag != bitnr || !req_ref_inc_not_zero(rq))
|
||||||
rq = NULL;
|
rq = NULL;
|
||||||
spin_unlock_irqrestore(&tags->lock, flags);
|
|
||||||
return rq;
|
return rq;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -440,7 +437,9 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
|
|||||||
busy_tag_iter_fn *fn, void *priv)
|
busy_tag_iter_fn *fn, void *priv)
|
||||||
{
|
{
|
||||||
unsigned int flags = tagset->flags;
|
unsigned int flags = tagset->flags;
|
||||||
int i, nr_tags;
|
int i, nr_tags, srcu_idx;
|
||||||
|
|
||||||
|
srcu_idx = srcu_read_lock(&tagset->tags_srcu);
|
||||||
|
|
||||||
nr_tags = blk_mq_is_shared_tags(flags) ? 1 : tagset->nr_hw_queues;
|
nr_tags = blk_mq_is_shared_tags(flags) ? 1 : tagset->nr_hw_queues;
|
||||||
|
|
||||||
@@ -449,6 +448,7 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
|
|||||||
__blk_mq_all_tag_iter(tagset->tags[i], fn, priv,
|
__blk_mq_all_tag_iter(tagset->tags[i], fn, priv,
|
||||||
BT_TAG_ITER_STARTED);
|
BT_TAG_ITER_STARTED);
|
||||||
}
|
}
|
||||||
|
srcu_read_unlock(&tagset->tags_srcu, srcu_idx);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
|
EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
|
||||||
|
|
||||||
@@ -499,6 +499,8 @@ EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
|
|||||||
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
|
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
|
||||||
void *priv)
|
void *priv)
|
||||||
{
|
{
|
||||||
|
int srcu_idx;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* __blk_mq_update_nr_hw_queues() updates nr_hw_queues and hctx_table
|
* __blk_mq_update_nr_hw_queues() updates nr_hw_queues and hctx_table
|
||||||
* while the queue is frozen. So we can use q_usage_counter to avoid
|
* while the queue is frozen. So we can use q_usage_counter to avoid
|
||||||
@@ -507,6 +509,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
|
|||||||
if (!percpu_ref_tryget(&q->q_usage_counter))
|
if (!percpu_ref_tryget(&q->q_usage_counter))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
srcu_idx = srcu_read_lock(&q->tag_set->tags_srcu);
|
||||||
if (blk_mq_is_shared_tags(q->tag_set->flags)) {
|
if (blk_mq_is_shared_tags(q->tag_set->flags)) {
|
||||||
struct blk_mq_tags *tags = q->tag_set->shared_tags;
|
struct blk_mq_tags *tags = q->tag_set->shared_tags;
|
||||||
struct sbitmap_queue *bresv = &tags->breserved_tags;
|
struct sbitmap_queue *bresv = &tags->breserved_tags;
|
||||||
@@ -536,6 +539,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
|
|||||||
bt_for_each(hctx, q, btags, fn, priv, false);
|
bt_for_each(hctx, q, btags, fn, priv, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
srcu_read_unlock(&q->tag_set->tags_srcu, srcu_idx);
|
||||||
blk_queue_exit(q);
|
blk_queue_exit(q);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3415,7 +3415,6 @@ static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
|
|||||||
struct blk_mq_tags *tags)
|
struct blk_mq_tags *tags)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There is no need to clear mapping if driver tags is not initialized
|
* There is no need to clear mapping if driver tags is not initialized
|
||||||
@@ -3439,15 +3438,6 @@ static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Wait until all pending iteration is done.
|
|
||||||
*
|
|
||||||
* Request reference is cleared and it is guaranteed to be observed
|
|
||||||
* after the ->lock is released.
|
|
||||||
*/
|
|
||||||
spin_lock_irqsave(&drv_tags->lock, flags);
|
|
||||||
spin_unlock_irqrestore(&drv_tags->lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
|
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
|
||||||
@@ -3670,8 +3660,12 @@ static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
|
|||||||
struct rq_iter_data data = {
|
struct rq_iter_data data = {
|
||||||
.hctx = hctx,
|
.hctx = hctx,
|
||||||
};
|
};
|
||||||
|
int srcu_idx;
|
||||||
|
|
||||||
|
srcu_idx = srcu_read_lock(&hctx->queue->tag_set->tags_srcu);
|
||||||
blk_mq_all_tag_iter(tags, blk_mq_has_request, &data);
|
blk_mq_all_tag_iter(tags, blk_mq_has_request, &data);
|
||||||
|
srcu_read_unlock(&hctx->queue->tag_set->tags_srcu, srcu_idx);
|
||||||
|
|
||||||
return data.has_rq;
|
return data.has_rq;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3891,7 +3885,6 @@ static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
|
|||||||
unsigned int queue_depth, struct request *flush_rq)
|
unsigned int queue_depth, struct request *flush_rq)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
/* The hw queue may not be mapped yet */
|
/* The hw queue may not be mapped yet */
|
||||||
if (!tags)
|
if (!tags)
|
||||||
@@ -3901,15 +3894,6 @@ static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
|
|||||||
|
|
||||||
for (i = 0; i < queue_depth; i++)
|
for (i = 0; i < queue_depth; i++)
|
||||||
cmpxchg(&tags->rqs[i], flush_rq, NULL);
|
cmpxchg(&tags->rqs[i], flush_rq, NULL);
|
||||||
|
|
||||||
/*
|
|
||||||
* Wait until all pending iteration is done.
|
|
||||||
*
|
|
||||||
* Request reference is cleared and it is guaranteed to be observed
|
|
||||||
* after the ->lock is released.
|
|
||||||
*/
|
|
||||||
spin_lock_irqsave(&tags->lock, flags);
|
|
||||||
spin_unlock_irqrestore(&tags->lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_free_flush_queue_callback(struct rcu_head *head)
|
static void blk_free_flush_queue_callback(struct rcu_head *head)
|
||||||
|
|||||||
Reference in New Issue
Block a user