@@ -281,8 +281,8 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
return ret;
}
-static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
- struct sg_io_hdr *hdr, fmode_t mode)
+int sg_io(struct request_queue *q, struct gendisk *bd_disk,
+ struct sg_io_hdr *hdr, fmode_t mode)
{
unsigned long start_time;
ssize_t ret = 0;
@@ -367,6 +367,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
blk_put_request(rq);
return ret;
}
+EXPORT_SYMBOL_GPL(sg_io);
/**
* sg_scsi_ioctl -- handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl
@@ -473,6 +473,17 @@ config DM_MULTIPATH_IOA
If unsure, say N.
+config DM_MULTIPATH_SG_IO
+ bool "Retry SCSI generic I/O on multipath devices"
+ depends on DM_MULTIPATH && BLK_SCSI_REQUEST
+ help
+ With this option, SCSI generic (SG) requests issued on multipath
+ devices will behave similar to regular block I/O: upon failure,
+ they are repeated on a different path, and the erroring device
+ is marked as failed.
+
+ If unsure, say N.
+
config DM_DELAY
tristate "I/O delaying target"
depends on BLK_DEV_DM
@@ -189,4 +189,9 @@ extern atomic_t dm_global_event_nr;
extern wait_queue_head_t dm_global_eventq;
void dm_issue_global_event(void);
+int __dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
+ struct block_device **bdev,
+ struct dm_target **target);
+void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx);
+
#endif
@@ -11,6 +11,7 @@
#include "dm-bio-record.h"
#include "dm-path-selector.h"
#include "dm-uevent.h"
+#include "dm-core.h"
#include <linux/blkdev.h>
#include <linux/ctype.h>
@@ -26,6 +27,7 @@
#include <scsi/scsi_dh.h>
#include <linux/atomic.h>
#include <linux/blk-mq.h>
+#include <scsi/sg.h>
#define DM_MSG_PREFIX "multipath"
#define DM_PG_INIT_DELAY_MSECS 2000
@@ -2129,6 +2131,106 @@ static int multipath_busy(struct dm_target *ti)
return busy;
}
+#ifdef CONFIG_DM_MULTIPATH_SG_IO
+static int pgpath_sg_io_ioctl(struct block_device *bdev,
+ struct sg_io_hdr *hdr, struct multipath *m,
+ fmode_t mode)
+{
+ int rc;
+ blk_status_t sts;
+ struct priority_group *pg;
+ struct pgpath *pgpath;
+ char path_name[BDEVNAME_SIZE];
+
+ rc = sg_io(bdev->bd_disk->queue, bdev->bd_disk, hdr, mode);
+ DMDEBUG("SG_IO via %s: rc = %d D%02xH%02xM%02xS%02x",
+ bdevname(bdev, path_name), rc,
+ hdr->driver_status, hdr->host_status,
+ hdr->msg_status, hdr->status);
+
+ /*
+ * Errors resulting from invalid parameters shouldn't be retried
+ * on another path.
+ */
+ switch (rc) {
+ case -ENOIOCTLCMD:
+ case -EFAULT:
+ case -EINVAL:
+ case -EPERM:
+ return rc;
+ default:
+ break;
+ }
+
+ sts = sg_io_to_blk_status(hdr);
+ if (sts == BLK_STS_OK)
+ return 0;
+ else if (!blk_path_error(sts))
+ return blk_status_to_errno(sts);
+
+ /* path error - fail the path */
+ list_for_each_entry(pg, &m->priority_groups, list) {
+ list_for_each_entry(pgpath, &pg->pgpaths, list) {
+ if (pgpath->path.dev->bdev == bdev)
+ fail_path(pgpath);
+ }
+ }
+
+ return -EAGAIN;
+}
+
+static int multipath_sg_io_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long uarg)
+{
+ struct mapped_device *md = bdev->bd_disk->private_data;
+ void __user *arg = (void __user *)uarg;
+ struct sg_io_hdr hdr;
+ int rc;
+ bool suspended;
+ int retries = 5;
+
+ if (copy_from_user(&hdr, arg, sizeof(hdr)))
+ return -EFAULT;
+
+ if (hdr.interface_id != 'S')
+ return -EINVAL;
+
+ if (hdr.dxfer_len > (queue_max_hw_sectors(bdev->bd_disk->queue) << 9))
+ return -EIO;
+
+ do {
+ struct block_device *path_dev;
+ struct dm_target *tgt;
+ struct sg_io_hdr rhdr;
+ int srcu_idx;
+
+ suspended = false;
+ /* This will fail and break the loop if no valid paths found */
+ rc = __dm_prepare_ioctl(md, &srcu_idx, &path_dev, &tgt);
+ if (rc == -EAGAIN)
+ suspended = true;
+ else if (rc < 0)
+ DMERR("%s: failed to get path: %d", __func__, rc);
+ else {
+ /* Need to copy the sg_io_hdr, it may be modified */
+ rhdr = hdr;
+ rc = pgpath_sg_io_ioctl(path_dev, &rhdr,
+ tgt->private, mode);
+ if (rc == 0 && copy_to_user(arg, &rhdr, sizeof(rhdr)))
+ rc = -EFAULT;
+ }
+ dm_unprepare_ioctl(md, srcu_idx);
+ if (suspended) {
+ DMDEBUG("%s: suspended, retries = %d\n",
+ __func__, retries);
+ msleep(20);
+ }
+ } while (rc == -EAGAIN && (!suspended || retries-- > 0));
+
+ return rc;
+}
+#endif
+
/*-----------------------------------------------------------------
* Module setup
*---------------------------------------------------------------*/
@@ -2153,6 +2255,9 @@ static struct target_type multipath_target = {
.prepare_ioctl = multipath_prepare_ioctl,
.iterate_devices = multipath_iterate_devices,
.busy = multipath_busy,
+#ifdef CONFIG_DM_MULTIPATH_SG_IO
+ .sg_io_ioctl = multipath_sg_io_ioctl,
+#endif
};
static int __init dm_multipath_init(void)
@@ -29,6 +29,7 @@
#include <linux/part_stat.h>
#include <linux/blk-crypto.h>
#include <linux/keyslot-manager.h>
+#include <scsi/sg.h>
#define DM_MSG_PREFIX "core"
@@ -522,8 +523,9 @@ static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
#define dm_blk_report_zones NULL
#endif /* CONFIG_BLK_DEV_ZONED */
-static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
- struct block_device **bdev)
+int __dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
+ struct block_device **bdev,
+ struct dm_target **target)
{
struct dm_target *tgt;
struct dm_table *map;
@@ -553,13 +555,24 @@ static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
goto retry;
}
+ if (r >= 0 && target)
+ *target = tgt;
+
return r;
}
+EXPORT_SYMBOL_GPL(__dm_prepare_ioctl);
-static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
+static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
+ struct block_device **bdev)
+{
+ return __dm_prepare_ioctl(md, srcu_idx, bdev, NULL);
+}
+
+void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
{
dm_put_live_table(md, srcu_idx);
}
+EXPORT_SYMBOL_GPL(dm_unprepare_ioctl);
static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
@@ -567,6 +580,13 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
struct mapped_device *md = bdev->bd_disk->private_data;
int r, srcu_idx;
+#ifdef CONFIG_DM_MULTIPATH_SG_IO
+ if (cmd == SG_IO && md->immutable_target &&
+ md->immutable_target->type->sg_io_ioctl)
+ return md->immutable_target->type->sg_io_ioctl(bdev, mode,
+ cmd, arg);
+#endif
+
r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
if (r < 0)
goto out;
@@ -923,6 +923,8 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
unsigned int, void __user *);
extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
struct scsi_ioctl_command __user *);
+extern int sg_io(struct request_queue *q, struct gendisk *gd,
+ struct sg_io_hdr *hdr, fmode_t mode);
extern int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp);
extern int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp);
@@ -151,6 +151,10 @@ typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i);
typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
size_t nr_pages);
+
+typedef int (*dm_sg_io_ioctl_fn)(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long uarg);
+
#define PAGE_SECTORS (PAGE_SIZE / 512)
void dm_error(const char *message);
@@ -204,7 +208,9 @@ struct target_type {
dm_dax_copy_iter_fn dax_copy_from_iter;
dm_dax_copy_iter_fn dax_copy_to_iter;
dm_dax_zero_page_range_fn dax_zero_page_range;
-
+#ifdef CONFIG_DM_MULTIPATH_SG_IO
+ dm_sg_io_ioctl_fn sg_io_ioctl;
+#endif
/* For internal device-mapper use. */
struct list_head list;
};