Message ID | 20220630122241.1658-2-a.kovaleva@yadro.com |
---|---|
State | New |
Headers | show |
Series | Make target send correct io limits | expand |
On 6/30/22 7:22 AM, Anastasia Kovaleva wrote: > --- a/drivers/target/target_core_spc.c > +++ b/drivers/target/target_core_spc.c > @@ -513,6 +513,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) > struct se_device *dev = cmd->se_dev; > u32 mtl = 0; > int have_tp = 0, opt, min; > + u32 io_max_blocks; > > /* > * Following spc3r22 section 6.5.3 Block Limits VPD page, when > @@ -547,11 +548,15 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) > * XXX: Currently assumes single PAGE_SIZE per scatterlist for fabrics > * enforcing maximum HW scatter-gather-list entry limit > */ > + io_max_blocks = mult_frac( > + dev->dev_attrib.hw_max_sectors, > + dev->dev_attrib.hw_block_size, > + dev->dev_attrib.block_size); Just some trivial comments. This chunk might be better after the max_data_sg_nents chunk below because the comment above is for the max_data_sg_nents chunk. Also, fix up the tabbing/coding style like: io_max_blocks = mult_frac(dev->dev_attrib.hw_max_sectors, dev->dev_attrib.hw_block_size, dev->dev_attrib.block_size); > if (cmd->se_tfo->max_data_sg_nents) { > mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE) / > dev->dev_attrib.block_size; > } > - put_unaligned_be32(min_not_zero(mtl, dev->dev_attrib.hw_max_sectors), &buf[8]); > + put_unaligned_be32(min_not_zero(mtl, io_max_blocks), &buf[8]); >
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index bbcbbfa72b07..51943369887a 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -1089,8 +1089,6 @@ static ssize_t block_size_store(struct config_item *item, } da->block_size = val; - if (da->max_bytes_per_io) - da->hw_max_sectors = da->max_bytes_per_io / val; pr_debug("dev[%p]: SE Device block_size changed to %u\n", da->da_dev, val); diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index e68f1cc8ef98..dcf4c8e47e8b 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -189,7 +189,6 @@ static int fd_configure_device(struct se_device *dev) } dev->dev_attrib.hw_block_size = fd_dev->fd_block_size; - dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES; dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size; dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index c14441c89bed..c3d90e811617 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -513,6 +513,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) struct se_device *dev = cmd->se_dev; u32 mtl = 0; int have_tp = 0, opt, min; + u32 io_max_blocks; /* * Following spc3r22 section 6.5.3 Block Limits VPD page, when @@ -547,11 +548,15 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) * XXX: Currently assumes single PAGE_SIZE per scatterlist for fabrics * enforcing maximum HW scatter-gather-list entry limit */ + io_max_blocks = mult_frac( + dev->dev_attrib.hw_max_sectors, + dev->dev_attrib.hw_block_size, + dev->dev_attrib.block_size); if (cmd->se_tfo->max_data_sg_nents) { mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE) / dev->dev_attrib.block_size; } - put_unaligned_be32(min_not_zero(mtl, dev->dev_attrib.hw_max_sectors), &buf[8]); + put_unaligned_be32(min_not_zero(mtl, io_max_blocks), &buf[8]); /* * Set OPTIMAL TRANSFER LENGTH diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index c2b36f7d917d..748d1afbadaa 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -709,7 +709,6 @@ struct se_dev_attrib { u32 unmap_granularity; u32 unmap_granularity_alignment; u32 max_write_same_len; - u32 max_bytes_per_io; struct se_device *da_dev; struct config_group da_group; };