@@ -1101,8 +1101,6 @@ static ssize_t block_size_store(struct config_item *item,
}
da->block_size = val;
- if (da->max_bytes_per_io)
- da->hw_max_sectors = da->max_bytes_per_io / val;
pr_debug("dev[%p]: SE Device block_size changed to %u\n",
da->da_dev, val);
@@ -193,7 +193,6 @@ static int fd_configure_device(struct se_device *dev)
}
dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
- dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
@@ -519,6 +519,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
struct se_device *dev = cmd->se_dev;
u32 mtl = 0;
int have_tp = 0, opt, min;
+ u32 io_max_blocks;
/*
* Following spc3r22 section 6.5.3 Block Limits VPD page, when
@@ -557,7 +558,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE) /
dev->dev_attrib.block_size;
}
- put_unaligned_be32(min_not_zero(mtl, dev->dev_attrib.hw_max_sectors), &buf[8]);
+ io_max_blocks = mult_frac(dev->dev_attrib.hw_max_sectors,
+ dev->dev_attrib.hw_block_size,
+ dev->dev_attrib.block_size);
+ put_unaligned_be32(min_not_zero(mtl, io_max_blocks), &buf[8]);
/*
* Set OPTIMAL TRANSFER LENGTH
@@ -712,7 +712,6 @@ struct se_dev_attrib {
u32 unmap_granularity;
u32 unmap_granularity_alignment;
u32 max_write_same_len;
- u32 max_bytes_per_io;
struct se_device *da_dev;
struct config_group da_group;
};