@@ -359,6 +359,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* initiator and responder
* @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload
* @IWL_UCODE_TLV_CAPA_PROTECTED_TWT: Supports protection of TWT action frames
+ * @IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE: Supports the firmware handshake in
+ * reset flow
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -404,6 +406,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_TAS_CFG = (__force iwl_ucode_tlv_capa_t)53,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD = (__force iwl_ucode_tlv_capa_t)54,
IWL_UCODE_TLV_CAPA_PROTECTED_TWT = (__force iwl_ucode_tlv_capa_t)56,
+ IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE = (__force iwl_ucode_tlv_capa_t)57,
/* set 2 */
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
@@ -573,6 +573,7 @@ enum msix_hw_int_causes {
MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0),
MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1),
MSIX_HW_INT_CAUSES_REG_IML = BIT(1),
+ MSIX_HW_INT_CAUSES_REG_RESET_DONE = BIT(2),
MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6),
MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7),
MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8),
@@ -55,6 +55,7 @@
/* Device NMI register and value for 9000 family and above hw's */
#define UREG_NIC_SET_NMI_DRIVER 0x00a05c10
#define UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER BIT(24)
+#define UREG_NIC_SET_NMI_DRIVER_RESET_HANDSHAKE (BIT(24) | BIT(25))
/* Shared registers (0x0..0x3ff, via target indirect or periphery */
#define SHR_BASE 0x00a10000
@@ -399,6 +400,7 @@ enum {
#define UREG_DOORBELL_TO_ISR6 0xA05C04
#define UREG_DOORBELL_TO_ISR6_NMI_BIT BIT(0)
+#define UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE (BIT(0) | BIT(1))
#define UREG_DOORBELL_TO_ISR6_SUSPEND BIT(18)
#define UREG_DOORBELL_TO_ISR6_RESUME BIT(19)
#define UREG_DOORBELL_TO_ISR6_PNVM BIT(20)
@@ -380,6 +380,7 @@ struct iwl_hcmd_arr {
* @command_groups_size: number of command groups, to avoid illegal access
* @cb_data_offs: offset inside skb->cb to store transport data at, must have
* space for at least two pointers
+ * @fw_reset_handshake: firmware supports reset flow handshake
*/
struct iwl_trans_config {
struct iwl_op_mode *op_mode;
@@ -397,6 +398,7 @@ struct iwl_trans_config {
int command_groups_size;
u8 cb_data_offs;
+ bool fw_reset_handshake;
};
struct iwl_trans_dump_data {
@@ -728,6 +728,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
sizeof(mvm->hw->wiphy->fw_version),
"%s", fw->fw_version);
+ trans_cfg.fw_reset_handshake = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE);
+
/* Configure transport layer */
iwl_trans_configure(mvm->trans, &trans_cfg);
@@ -406,6 +406,10 @@ struct iwl_trans_pcie {
void *base_rb_stts;
dma_addr_t base_rb_stts_dma;
+
+ bool fw_reset_handshake;
+ bool fw_reset_done;
+ wait_queue_head_t fw_reset_waitq;
};
static inline struct iwl_trans_pcie *
@@ -2242,6 +2242,12 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
iwl_pcie_irq_handle_error(trans);
}
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE) {
+ IWL_DEBUG_ISR(trans, "Reset flow completed\n");
+ trans_pcie->fw_reset_done = true;
+ wake_up(&trans_pcie->fw_reset_waitq);
+ }
+
iwl_pcie_clear_irq(trans, entry);
lock_map_release(&trans->sync_cmd_lockdep_map);
@@ -88,6 +88,28 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}
+static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret;
+
+ trans_pcie->fw_reset_done = false;
+
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER,
+ UREG_NIC_SET_NMI_DRIVER_RESET_HANDSHAKE);
+ else
+ iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
+ UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE);
+
+ /* wait 200ms */
+ ret = wait_event_timeout(trans_pcie->fw_reset_waitq,
+ trans_pcie->fw_reset_done, HZ / 5);
+ if (!ret)
+ IWL_ERR(trans,
+ "firmware didn't ACK the reset - continue anyway\n");
+}
+
void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -97,6 +119,10 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
if (trans_pcie->is_down)
return;
+ if (trans_pcie->fw_reset_handshake &&
+ trans->state >= IWL_TRANS_FW_STARTED)
+ iwl_trans_pcie_fw_reset_handshake(trans);
+
trans_pcie->is_down = true;
/* tell the device to stop sending interrupts */
@@ -1048,6 +1048,7 @@ static struct iwl_causes_list causes_list[] = {
{MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
{MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
{MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11},
+ {MSIX_HW_INT_CAUSES_REG_RESET_DONE, CSR_MSIX_HW_INT_MASK_AD, 0x12},
{MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
{MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
{MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
@@ -1889,6 +1890,8 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
*/
if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY)
init_dummy_netdev(&trans_pcie->napi_dev);
+
+ trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake;
}
void iwl_trans_pcie_free(struct iwl_trans *trans)
@@ -3385,6 +3388,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
spin_lock_init(&trans_pcie->alloc_page_lock);
mutex_init(&trans_pcie->mutex);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
+ init_waitqueue_head(&trans_pcie->fw_reset_waitq);
trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
WQ_HIGHPRI | WQ_UNBOUND, 1);