From e22ccbe9c20a190c505bd7a0eac569e96b8b841e Mon Sep 17 00:00:00 2001 From: Johnny Mnemonic Date: Wed, 31 Jul 2024 10:46:49 +0200 Subject: [PATCH] Drop 6259151c04d4e0085e00d2dcb471ebdd1778e72e, as fixed in linux-6.1.y With 39823b47bbd40502632ffba90ebb34fff7c8b5e8 dropped in 6.1.103-rc2, 6259151c04d4e0085e00d2dcb471ebdd1778e72e is no longer needed. See https://lore.kernel.org/stable/2024073137-scouting-wooing-ec33@gregkh/#t --- ...151c04d4e0085e00d2dcb471ebdd1778e72e.patch | 52 ------------------- 1 file changed, 52 deletions(-) delete mode 100644 patches/linux-6.1.y/6259151c04d4e0085e00d2dcb471ebdd1778e72e.patch diff --git a/patches/linux-6.1.y/6259151c04d4e0085e00d2dcb471ebdd1778e72e.patch b/patches/linux-6.1.y/6259151c04d4e0085e00d2dcb471ebdd1778e72e.patch deleted file mode 100644 index 2501115952344..0000000000000 --- a/patches/linux-6.1.y/6259151c04d4e0085e00d2dcb471ebdd1778e72e.patch +++ /dev/null @@ -1,52 +0,0 @@ -From 6259151c04d4e0085e00d2dcb471ebdd1778e72e Mon Sep 17 00:00:00 2001 -From: Bart Van Assche -Date: Thu, 9 May 2024 10:01:48 -0700 -Subject: [PATCH] block: Call .limit_depth() after .hctx has been set - -Call .limit_depth() after data->hctx has been set such that data->hctx can -be used in .limit_depth() implementations. - -Cc: Christoph Hellwig -Cc: Damien Le Moal -Cc: Zhiguo Niu -Fixes: 07757588e507 ("block/mq-deadline: Reserve 25% of scheduler tags for synchronous requests") -Signed-off-by: Bart Van Assche -Tested-by: Zhiguo Niu -Reviewed-by: Christoph Hellwig -Link: https://lore.kernel.org/r/20240509170149.7639-2-bvanassche@acm.org -Signed-off-by: Jens Axboe ---- - block/blk-mq.c | 12 ++++++------ - 1 file changed, 6 insertions(+), 6 deletions(-) - -diff --git a/block/blk-mq.c b/block/blk-mq.c -index fec2dea5c6e885..e3c3c0c21b5536 100644 ---- a/block/blk-mq.c -+++ b/block/blk-mq.c -@@ -448,6 +448,10 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) - if (data->cmd_flags & REQ_NOWAIT) - data->flags |= BLK_MQ_REQ_NOWAIT; - -+retry: -+ data->ctx = blk_mq_get_ctx(q); -+ data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); -+ - if (q->elevator) { - /* - * All requests use scheduler tags when an I/O scheduler is -@@ -466,13 +470,9 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) - e->type->ops.limit_depth && - !(data->flags & BLK_MQ_REQ_RESERVED)) - e->type->ops.limit_depth(data->cmd_flags, data); -- } -- --retry: -- data->ctx = blk_mq_get_ctx(q); -- data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); -- if (!(data->rq_flags & RQF_ELV)) -+ } else { - blk_mq_tag_busy(data->hctx); -+ } - - if (data->flags & BLK_MQ_REQ_RESERVED) - data->rq_flags |= RQF_RESV;