md/raid10: use the atomic queue limit update APIs
authorChristoph Hellwig <hch@lst.de>
Sun, 3 Mar 2024 14:01:47 +0000 (07:01 -0700)
committerSong Liu <song@kernel.org>
Wed, 6 Mar 2024 16:59:53 +0000 (08:59 -0800)
Build the queue limits outside the queue and apply them using
queue_limits_set.   To make the code more obvious also split the queue
limits handling into separate helpers.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed--by: Song Liu <song@kernel.org>
Tested-by: Song Liu <song@kernel.org>
Signed-off-by: Song Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20240303140150.5435-9-hch@lst.de
drivers/md/raid10.c

index 4021cf06b3a616b994297aecf6592b685cba6d1c..e96fdf47319fd088de77c4e4b5ed10a62b711ccd 100644 (file)
@@ -2106,10 +2106,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                        continue;
                }
 
-               if (!mddev_is_dm(mddev))
-                       disk_stack_limits(mddev->gendisk, rdev->bdev,
-                                         rdev->data_offset << 9);
-
+               err = mddev_stack_new_rdev(mddev, rdev);
+               if (err)
+                       return err;
                p->head_position = 0;
                p->recovery_disabled = mddev->recovery_disabled - 1;
                rdev->raid_disk = mirror;
@@ -2125,10 +2124,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                clear_bit(In_sync, &rdev->flags);
                set_bit(Replacement, &rdev->flags);
                rdev->raid_disk = repl_slot;
-               err = 0;
-               if (!mddev_is_dm(mddev))
-                       disk_stack_limits(mddev->gendisk, rdev->bdev,
-                                         rdev->data_offset << 9);
+               err = mddev_stack_new_rdev(mddev, rdev);
+               if (err)
+                       return err;
                conf->fullsync = 1;
                WRITE_ONCE(p->replacement, rdev);
        }
@@ -3969,14 +3967,26 @@ static struct r10conf *setup_conf(struct mddev *mddev)
        return ERR_PTR(err);
 }
 
-static void raid10_set_io_opt(struct r10conf *conf)
+static unsigned int raid10_nr_stripes(struct r10conf *conf)
 {
-       int raid_disks = conf->geo.raid_disks;
+       unsigned int raid_disks = conf->geo.raid_disks;
+
+       if (conf->geo.raid_disks % conf->geo.near_copies)
+               return raid_disks;
+       return raid_disks / conf->geo.near_copies;
+}
 
-       if (!(conf->geo.raid_disks % conf->geo.near_copies))
-               raid_disks /= conf->geo.near_copies;
-       blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) *
-                        raid_disks);
+static int raid10_set_queue_limits(struct mddev *mddev)
+{
+       struct r10conf *conf = mddev->private;
+       struct queue_limits lim;
+
+       blk_set_stacking_limits(&lim);
+       lim.max_write_zeroes_sectors = 0;
+       lim.io_min = mddev->chunk_sectors << 9;
+       lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
+       mddev_stack_rdev_limits(mddev, &lim);
+       return queue_limits_set(mddev->queue, &lim);
 }
 
 static int raid10_run(struct mddev *mddev)
@@ -3988,6 +3998,7 @@ static int raid10_run(struct mddev *mddev)
        sector_t size;
        sector_t min_offset_diff = 0;
        int first = 1;
+       int ret = -EIO;
 
        if (mddev->private == NULL) {
                conf = setup_conf(mddev);
@@ -4014,12 +4025,6 @@ static int raid10_run(struct mddev *mddev)
                }
        }
 
-       if (!mddev_is_dm(conf->mddev)) {
-               blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
-               blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
-               raid10_set_io_opt(conf);
-       }
-
        rdev_for_each(rdev, mddev) {
                long long diff;
 
@@ -4048,14 +4053,16 @@ static int raid10_run(struct mddev *mddev)
                if (first || diff < min_offset_diff)
                        min_offset_diff = diff;
 
-               if (!mddev_is_dm(mddev))
-                       disk_stack_limits(mddev->gendisk, rdev->bdev,
-                                         rdev->data_offset << 9);
-
                disk->head_position = 0;
                first = 0;
        }
 
+       if (!mddev_is_dm(conf->mddev)) {
+               ret = raid10_set_queue_limits(mddev);
+               if (ret)
+                       goto out_free_conf;
+       }
+
        /* need to check that every block has at least one working mirror */
        if (!enough(conf, -1)) {
                pr_err("md/raid10:%s: not enough operational mirrors.\n",
@@ -4156,7 +4163,7 @@ out_free_conf:
        raid10_free_conf(conf);
        mddev->private = NULL;
 out:
-       return -EIO;
+       return ret;
 }
 
 static void raid10_free(struct mddev *mddev, void *priv)
@@ -4933,8 +4940,7 @@ static void end_reshape(struct r10conf *conf)
        conf->reshape_safe = MaxSector;
        spin_unlock_irq(&conf->device_lock);
 
-       if (!mddev_is_dm(conf->mddev))
-               raid10_set_io_opt(conf);
+       mddev_update_io_opt(conf->mddev, raid10_nr_stripes(conf));
        conf->fullsync = 0;
 }