@@ -799,17 +799,22 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha)
ha = vha->hw;
tmpl = &qla_nvme_fc_transport;
- if (ql2xnvme_queues < MIN_NVME_HW_QUEUES || ql2xnvme_queues > MAX_NVME_HW_QUEUES) {
+ if (ql2xnvme_queues < MIN_NVME_HW_QUEUES) {
ql_log(ql_log_warn, vha, 0xfffd,
- "ql2xnvme_queues=%d is out of range(MIN:%d - MAX:%d). Resetting ql2xnvme_queues to:%d\n",
- ql2xnvme_queues, MIN_NVME_HW_QUEUES, MAX_NVME_HW_QUEUES,
- DEF_NVME_HW_QUEUES);
+ "ql2xnvme_queues=%d is lower than minimum queues: %d. Resetting ql2xnvme_queues to:%d\n",
+ ql2xnvme_queues, MIN_NVME_HW_QUEUES, DEF_NVME_HW_QUEUES);
ql2xnvme_queues = DEF_NVME_HW_QUEUES;
+ } else if (ql2xnvme_queues > (ha->max_qpairs - 1)) {
+ ql_log(ql_log_warn, vha, 0xfffd,
+ "ql2xnvme_queues=%d is greater than available IRQs: %d. Resetting ql2xnvme_queues to: %d\n",
+ ql2xnvme_queues, (ha->max_qpairs - 1),
+ (ha->max_qpairs - 1));
+ ql2xnvme_queues = ((ha->max_qpairs - 1));
}
qla_nvme_fc_transport.max_hw_queues =
min((uint8_t)(ql2xnvme_queues),
- (uint8_t)(ha->max_qpairs ? ha->max_qpairs : 1));
+ (uint8_t)((ha->max_qpairs - 1) ? (ha->max_qpairs - 1) : 1));
ql_log(ql_log_info, vha, 0xfffb,
"Number of NVME queues used for this port: %d\n",
@@ -14,7 +14,6 @@
#include "qla_dsd.h"
#define MIN_NVME_HW_QUEUES 1
-#define MAX_NVME_HW_QUEUES 128
#define DEF_NVME_HW_QUEUES 8
#define NVME_ATIO_CMD_OFF 32
@@ -344,7 +344,6 @@ MODULE_PARM_DESC(ql2xnvme_queues,
"Number of NVMe Queues that can be configured.\n"
"Final value will be min(ql2xnvme_queues, num_cpus,num_chip_queues)\n"
"1 - Minimum number of queues supported\n"
- "128 - Maximum number of queues supported\n"
"8 - Default value");
static struct scsi_transport_template *qla2xxx_transport_template = NULL;