Message ID | 1613508745-32324-1-git-send-email-jhugo@codeaurora.org |
---|---|
State | Superseded |
Headers | show |
Series | mhi_bus: core: Wait for ready state after reset | expand |
Hi Jeffrey, Thank you for the patch! Perhaps something to improve: [auto build test WARNING on linus/master] [also build test WARNING on v5.11 next-20210216] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch] url: https://github.com/0day-ci/linux/commits/Jeffrey-Hugo/mhi_bus-core-Wait-for-ready-state-after-reset/20210217-045558 base: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git f40ddce88593482919761f74910f42f4b84c004b config: x86_64-randconfig-a013-20210216 (attached as .config) compiler: clang version 12.0.0 (https://github.com/llvm/llvm-project c9439ca36342fb6013187d0a69aef92736951476) reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # install x86_64 cross compiling tool for clang build # apt-get install binutils-x86-64-linux-gnu # https://github.com/0day-ci/linux/commit/a9148d0d4715fb099ae777ecd89a1d3fab7eb7aa git remote add linux-review https://github.com/0day-ci/linux git fetch --no-tags linux-review Jeffrey-Hugo/mhi_bus-core-Wait-for-ready-state-after-reset/20210217-045558 git checkout a9148d0d4715fb099ae777ecd89a1d3fab7eb7aa # save the attached .config to linux build tree COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=x86_64 If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot <lkp@intel.com> All warnings (new ones prefixed by >>): >> drivers/bus/mhi/core/pm.c:501:8: warning: variable 'cur_state' is uninitialized when used here [-Wuninitialized] cur_state == MHI_PM_SYS_ERR_PROCESS) { ^~~~~~~~~ drivers/bus/mhi/core/pm.c:451:2: note: variable 'cur_state' is declared here enum mhi_pm_state cur_state; ^ 1 warning generated. vim +/cur_state +501 drivers/bus/mhi/core/pm.c 447 448 /* Handle shutdown transitions */ 449 static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) 450 { 451 enum mhi_pm_state cur_state; 452 struct mhi_event *mhi_event; 453 struct mhi_cmd_ctxt *cmd_ctxt; 454 struct mhi_cmd *mhi_cmd; 455 struct mhi_event_ctxt *er_ctxt; 456 struct device *dev = &mhi_cntrl->mhi_dev->dev; 457 int ret, i; 458 459 dev_dbg(dev, "Processing disable transition with PM state: %s\n", 460 to_mhi_pm_state_str(mhi_cntrl->pm_state)); 461 462 mutex_lock(&mhi_cntrl->pm_mutex); 463 464 /* Trigger MHI RESET so that the device will not access host memory */ 465 if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { 466 u32 in_reset = -1, ready = 0; 467 unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms); 468 469 dev_dbg(dev, "Triggering MHI Reset in device\n"); 470 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); 471 472 /* Wait for the reset bit to be cleared by the device */ 473 ret = wait_event_timeout(mhi_cntrl->state_event, 474 mhi_read_reg_field(mhi_cntrl, 475 mhi_cntrl->regs, 476 MHICTRL, 477 MHICTRL_RESET_MASK, 478 MHICTRL_RESET_SHIFT, 479 &in_reset) || 480 !in_reset, timeout); 481 if (!ret || in_reset) 482 dev_err(dev, "Device failed to exit MHI Reset state\n"); 483 484 /* 485 * Device will clear BHI_INTVEC as a part of RESET processing, 486 * hence re-program it 487 */ 488 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); 489 490 if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) { 491 /* wait for ready to be set */ 492 ret = wait_event_timeout(mhi_cntrl->state_event, 493 mhi_read_reg_field(mhi_cntrl, 494 mhi_cntrl->regs, 495 MHISTATUS, 496 MHISTATUS_READY_MASK, 497 MHISTATUS_READY_SHIFT, 498 &ready) 499 || ready, timeout); 500 if ((!ret || !ready) && > 501 cur_state == MHI_PM_SYS_ERR_PROCESS) { 502 dev_err(dev, 503 "Device failed to enter READY state\n"); 504 mutex_unlock(&mhi_cntrl->pm_mutex); 505 return; 506 } 507 } 508 } 509 510 dev_dbg(dev, 511 "Waiting for all pending event ring processing to complete\n"); 512 mhi_event = mhi_cntrl->mhi_event; 513 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { 514 if (mhi_event->offload_ev) 515 continue; 516 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); 517 tasklet_kill(&mhi_event->task); 518 } 519 520 /* Release lock and wait for all pending threads to complete */ 521 mutex_unlock(&mhi_cntrl->pm_mutex); 522 dev_dbg(dev, "Waiting for all pending threads to complete\n"); 523 wake_up_all(&mhi_cntrl->state_event); 524 525 dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); 526 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); 527 528 mutex_lock(&mhi_cntrl->pm_mutex); 529 530 WARN_ON(atomic_read(&mhi_cntrl->dev_wake)); 531 WARN_ON(atomic_read(&mhi_cntrl->pending_pkts)); 532 533 /* Reset the ev rings and cmd rings */ 534 dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n"); 535 mhi_cmd = mhi_cntrl->mhi_cmd; 536 cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; 537 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { 538 struct mhi_ring *ring = &mhi_cmd->ring; 539 540 ring->rp = ring->base; 541 ring->wp = ring->base; 542 cmd_ctxt->rp = cmd_ctxt->rbase; 543 cmd_ctxt->wp = cmd_ctxt->rbase; 544 } 545 546 mhi_event = mhi_cntrl->mhi_event; 547 er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; 548 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, 549 mhi_event++) { 550 struct mhi_ring *ring = &mhi_event->ring; 551 552 /* Skip offload events */ 553 if (mhi_event->offload_ev) 554 continue; 555 556 ring->rp = ring->base; 557 ring->wp = ring->base; 558 er_ctxt->rp = er_ctxt->rbase; 559 er_ctxt->wp = er_ctxt->rbase; 560 } 561 562 /* Move to disable state */ 563 write_lock_irq(&mhi_cntrl->pm_lock); 564 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE); 565 write_unlock_irq(&mhi_cntrl->pm_lock); 566 if (unlikely(cur_state != MHI_PM_DISABLE)) 567 dev_err(dev, "Error moving from PM state: %s to: %s\n", 568 to_mhi_pm_state_str(cur_state), 569 to_mhi_pm_state_str(MHI_PM_DISABLE)); 570 571 dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n", 572 to_mhi_pm_state_str(mhi_cntrl->pm_state), 573 TO_MHI_STATE_STR(mhi_cntrl->dev_state)); 574 575 mutex_unlock(&mhi_cntrl->pm_mutex); 576 } 577 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
Hi Jeffrey, url: https://github.com/0day-ci/linux/commits/Jeffrey-Hugo/mhi_bus-core-Wait-for-ready-state-after-reset/20210217-045558 base: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git f40ddce88593482919761f74910f42f4b84c004b config: i386-randconfig-m021-20210215 (attached as .config) compiler: gcc-9 (Debian 9.3.0-15) 9.3.0 If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot <lkp@intel.com> Reported-by: Dan Carpenter <dan.carpenter@oracle.com> smatch warnings: drivers/bus/mhi/core/pm.c:501 mhi_pm_disable_transition() error: uninitialized symbol 'cur_state'. vim +/cur_state +501 drivers/bus/mhi/core/pm.c a03c7a86e12721 Bhaumik Bhatt 2020-11-09 449 static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 450 { a03c7a86e12721 Bhaumik Bhatt 2020-11-09 451 enum mhi_pm_state cur_state; ^^^^^^^^^^^^^^^^^^^^^^^^^^^ a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 452 struct mhi_event *mhi_event; a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 453 struct mhi_cmd_ctxt *cmd_ctxt; a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 454 struct mhi_cmd *mhi_cmd; a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 455 struct mhi_event_ctxt *er_ctxt; a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 456 struct device *dev = &mhi_cntrl->mhi_dev->dev; a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 457 int ret, i; a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 458 a03c7a86e12721 Bhaumik Bhatt 2020-11-09 459 dev_dbg(dev, "Processing disable transition with PM state: %s\n", a03c7a86e12721 Bhaumik Bhatt 2020-11-09 460 to_mhi_pm_state_str(mhi_cntrl->pm_state)); a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 461 a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 462 mutex_lock(&mhi_cntrl->pm_mutex); a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 463 a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 464 /* Trigger MHI RESET so that the device will not access host memory */ a03c7a86e12721 Bhaumik Bhatt 2020-11-09 465 if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { a9148d0d4715fb Jeffrey Hugo 2021-02-16 466 u32 in_reset = -1, ready = 0; a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 467 unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms); a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 468 a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 469 dev_dbg(dev, "Triggering MHI Reset in device\n"); a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 470 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 471 a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 472 /* Wait for the reset bit to be cleared by the device */ a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 473 ret = wait_event_timeout(mhi_cntrl->state_event, a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 474 mhi_read_reg_field(mhi_cntrl, a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 475 mhi_cntrl->regs, a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 476 MHICTRL, a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 477 MHICTRL_RESET_MASK, a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 478 MHICTRL_RESET_SHIFT, a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 479 &in_reset) || a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 480 !in_reset, timeout); 556bbb442bbb44 Bhaumik Bhatt 2020-11-09 481 if (!ret || in_reset) a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 482 dev_err(dev, "Device failed to exit MHI Reset state\n"); a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 483 a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 484 /* a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 485 * Device will clear BHI_INTVEC as a part of RESET processing, a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 486 * hence re-program it a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 487 */ a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 488 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); a9148d0d4715fb Jeffrey Hugo 2021-02-16 489 a9148d0d4715fb Jeffrey Hugo 2021-02-16 490 if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) { a9148d0d4715fb Jeffrey Hugo 2021-02-16 491 /* wait for ready to be set */ a9148d0d4715fb Jeffrey Hugo 2021-02-16 492 ret = wait_event_timeout(mhi_cntrl->state_event, a9148d0d4715fb Jeffrey Hugo 2021-02-16 493 mhi_read_reg_field(mhi_cntrl, a9148d0d4715fb Jeffrey Hugo 2021-02-16 494 mhi_cntrl->regs, a9148d0d4715fb Jeffrey Hugo 2021-02-16 495 MHISTATUS, a9148d0d4715fb Jeffrey Hugo 2021-02-16 496 MHISTATUS_READY_MASK, a9148d0d4715fb Jeffrey Hugo 2021-02-16 497 MHISTATUS_READY_SHIFT, a9148d0d4715fb Jeffrey Hugo 2021-02-16 498 &ready) a9148d0d4715fb Jeffrey Hugo 2021-02-16 499 || ready, timeout); a9148d0d4715fb Jeffrey Hugo 2021-02-16 500 if ((!ret || !ready) && a9148d0d4715fb Jeffrey Hugo 2021-02-16 @501 cur_state == MHI_PM_SYS_ERR_PROCESS) { ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ "cur_state" is not initialized until later. a9148d0d4715fb Jeffrey Hugo 2021-02-16 502 dev_err(dev, a9148d0d4715fb Jeffrey Hugo 2021-02-16 503 "Device failed to enter READY state\n"); a9148d0d4715fb Jeffrey Hugo 2021-02-16 504 mutex_unlock(&mhi_cntrl->pm_mutex); a9148d0d4715fb Jeffrey Hugo 2021-02-16 505 return; a9148d0d4715fb Jeffrey Hugo 2021-02-16 506 } a9148d0d4715fb Jeffrey Hugo 2021-02-16 507 } a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 508 } a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 509 a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 510 dev_dbg(dev, a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 511 "Waiting for all pending event ring processing to complete\n"); a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 512 mhi_event = mhi_cntrl->mhi_event; a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 513 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 514 if (mhi_event->offload_ev) a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 515 continue; 6cc1716102b554 Bhaumik Bhatt 2020-11-09 516 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 517 tasklet_kill(&mhi_event->task); a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 518 } a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 519 a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 520 /* Release lock and wait for all pending threads to complete */ a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 521 mutex_unlock(&mhi_cntrl->pm_mutex); a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 522 dev_dbg(dev, "Waiting for all pending threads to complete\n"); a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 523 wake_up_all(&mhi_cntrl->state_event); a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 524 a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 525 dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); 10ea8bcda5ae54 Loic Poulain 2020-11-25 526 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 527 a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 528 mutex_lock(&mhi_cntrl->pm_mutex); a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 529 a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 530 WARN_ON(atomic_read(&mhi_cntrl->dev_wake)); a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 531 WARN_ON(atomic_read(&mhi_cntrl->pending_pkts)); a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 532 a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 533 /* Reset the ev rings and cmd rings */ a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 534 dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n"); a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 535 mhi_cmd = mhi_cntrl->mhi_cmd; a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 536 cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 537 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 538 struct mhi_ring *ring = &mhi_cmd->ring; a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 539 a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 540 ring->rp = ring->base; a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 541 ring->wp = ring->base; a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 542 cmd_ctxt->rp = cmd_ctxt->rbase; a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 543 cmd_ctxt->wp = cmd_ctxt->rbase; a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 544 } a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 545 a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 546 mhi_event = mhi_cntrl->mhi_event; a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 547 er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 548 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 549 mhi_event++) { a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 550 struct mhi_ring *ring = &mhi_event->ring; a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 551 a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 552 /* Skip offload events */ a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 553 if (mhi_event->offload_ev) a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 554 continue; a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 555 a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 556 ring->rp = ring->base; a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 557 ring->wp = ring->base; a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 558 er_ctxt->rp = er_ctxt->rbase; a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 559 er_ctxt->wp = er_ctxt->rbase; a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 560 } a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 561 a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 562 /* Move to disable state */ a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 563 write_lock_irq(&mhi_cntrl->pm_lock); a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 564 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE); a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 565 write_unlock_irq(&mhi_cntrl->pm_lock); a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 566 if (unlikely(cur_state != MHI_PM_DISABLE)) a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 567 dev_err(dev, "Error moving from PM state: %s to: %s\n", a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 568 to_mhi_pm_state_str(cur_state), a6e2e3522f2914 Manivannan Sadhasivam 2020-02-20 569 to_mhi_pm_state_str(MHI_PM_DISABLE)); 556bbb442bbb44 Bhaumik Bhatt 2020-11-09 570 556bbb442bbb44 Bhaumik Bhatt 2020-11-09 571 dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n", 556bbb442bbb44 Bhaumik Bhatt 2020-11-09 572 to_mhi_pm_state_str(mhi_cntrl->pm_state), 556bbb442bbb44 Bhaumik Bhatt 2020-11-09 573 TO_MHI_STATE_STR(mhi_cntrl->dev_state)); 556bbb442bbb44 Bhaumik Bhatt 2020-11-09 574 556bbb442bbb44 Bhaumik Bhatt 2020-11-09 575 mutex_unlock(&mhi_cntrl->pm_mutex); 556bbb442bbb44 Bhaumik Bhatt 2020-11-09 576 } --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c index ef8fb4a..dc69074 100644 --- a/drivers/bus/mhi/core/pm.c +++ b/drivers/bus/mhi/core/pm.c @@ -463,7 +463,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) /* Trigger MHI RESET so that the device will not access host memory */ if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { - u32 in_reset = -1; + u32 in_reset = -1, ready = 0; unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms); dev_dbg(dev, "Triggering MHI Reset in device\n"); @@ -486,6 +486,25 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) * hence re-program it */ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + + if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) { + /* wait for ready to be set */ + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_read_reg_field(mhi_cntrl, + mhi_cntrl->regs, + MHISTATUS, + MHISTATUS_READY_MASK, + MHISTATUS_READY_SHIFT, + &ready) + || ready, timeout); + if ((!ret || !ready) && + cur_state == MHI_PM_SYS_ERR_PROCESS) { + dev_err(dev, + "Device failed to enter READY state\n"); + mutex_unlock(&mhi_cntrl->pm_mutex); + return; + } + } } dev_dbg(dev,
After the device has signaled the end of reset by clearing the reset bit, it will automatically reinit MHI and the internal device structures. Once That is done, the device will signal it has entered the ready state. Signaling the ready state involves sending an interrupt (MSI) to the host which might cause IOMMU faults if it occurs at the wrong time. If the controller is being powered down, and possibly removed, then the reset flow would only wait for the end of reset. At which point, the host and device would start a race. The host may complete its reset work, and remove the interrupt handler, which would cause the interrupt to be disabled in the IOMMU. If that occurs before the device signals the ready state, then the IOMMU will fault since it blocked an interrupt. While harmless, the fault would appear like a serious issue has occurred so let's silence it by making sure the device hits the ready state before the host completes its reset processing. Signed-off-by: Jeffrey Hugo <jhugo@codeaurora.org> --- drivers/bus/mhi/core/pm.c | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-)