Message ID | 20190802195150.23207-7-robh@kernel.org |
---|---|
State | Superseded |
Headers | show |
Series | drm/panfrost: Add heap and no execute buffer allocation | expand |
A-b, I think. On Fri, Aug 02, 2019 at 01:51:48PM -0600, Rob Herring wrote: > In preparation to handle mapping of page faults, we need the MMU handler > to be threaded as code paths take a mutex. > > As the IRQ may be shared, we can't use the default handler and must > disable the MMU interrupts locally. > > Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com> > Cc: Boris Brezillon <boris.brezillon@collabora.com> > Cc: Robin Murphy <robin.murphy@arm.com> > Cc: Steven Price <steven.price@arm.com> > Cc: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com> > Signed-off-by: Rob Herring <robh@kernel.org> > --- > drivers/gpu/drm/panfrost/panfrost_mmu.c | 20 +++++++++++++++----- > 1 file changed, 15 insertions(+), 5 deletions(-) > > diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c > index eba6ce785ef0..7d44328b280f 100644 > --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c > +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c > @@ -300,12 +300,20 @@ static const char *access_type_name(struct panfrost_device *pfdev, > static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data) > { > struct panfrost_device *pfdev = data; > - u32 status = mmu_read(pfdev, MMU_INT_STAT); > - int i; > > - if (!status) > + if (!mmu_read(pfdev, MMU_INT_STAT)) > return IRQ_NONE; > > + mmu_write(pfdev, MMU_INT_MASK, 0); > + return IRQ_WAKE_THREAD; > +} > + > +static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data) > +{ > + struct panfrost_device *pfdev = data; > + u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT); > + int i; > + > dev_err(pfdev->dev, "mmu irq status=%x\n", status); > > for (i = 0; status; i++) { > @@ -350,6 +358,7 @@ static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data) > status &= ~mask; > } > > + mmu_write(pfdev, MMU_INT_MASK, ~0); > return IRQ_HANDLED; > }; > > @@ -368,8 +377,9 @@ int panfrost_mmu_init(struct panfrost_device *pfdev) > if (irq <= 0) > return -ENODEV; > > - err = devm_request_irq(pfdev->dev, irq, panfrost_mmu_irq_handler, > - IRQF_SHARED, "mmu", pfdev); > + err = devm_request_threaded_irq(pfdev->dev, irq, panfrost_mmu_irq_handler, > + panfrost_mmu_irq_handler_thread, > + IRQF_SHARED, "mmu", pfdev); > > if (err) { > dev_err(pfdev->dev, "failed to request mmu irq"); > -- > 2.20.1 >
On 02/08/2019 20:51, Rob Herring wrote: > In preparation to handle mapping of page faults, we need the MMU handler > to be threaded as code paths take a mutex. > > As the IRQ may be shared, we can't use the default handler and must > disable the MMU interrupts locally. > > Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com> > Cc: Boris Brezillon <boris.brezillon@collabora.com> > Cc: Robin Murphy <robin.murphy@arm.com> > Cc: Steven Price <steven.price@arm.com> > Cc: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com> > Signed-off-by: Rob Herring <robh@kernel.org> Reviewed-by: Steven Price <steven.price@arm.com> > --- > drivers/gpu/drm/panfrost/panfrost_mmu.c | 20 +++++++++++++++----- > 1 file changed, 15 insertions(+), 5 deletions(-) > > diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c > index eba6ce785ef0..7d44328b280f 100644 > --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c > +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c > @@ -300,12 +300,20 @@ static const char *access_type_name(struct panfrost_device *pfdev, > static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data) > { > struct panfrost_device *pfdev = data; > - u32 status = mmu_read(pfdev, MMU_INT_STAT); > - int i; > > - if (!status) > + if (!mmu_read(pfdev, MMU_INT_STAT)) > return IRQ_NONE; > > + mmu_write(pfdev, MMU_INT_MASK, 0); > + return IRQ_WAKE_THREAD; > +} > + > +static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data) > +{ > + struct panfrost_device *pfdev = data; > + u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT); > + int i; > + > dev_err(pfdev->dev, "mmu irq status=%x\n", status); > > for (i = 0; status; i++) { > @@ -350,6 +358,7 @@ static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data) > status &= ~mask; > } > > + mmu_write(pfdev, MMU_INT_MASK, ~0); > return IRQ_HANDLED; > }; > > @@ -368,8 +377,9 @@ int panfrost_mmu_init(struct panfrost_device *pfdev) > if (irq <= 0) > return -ENODEV; > > - err = devm_request_irq(pfdev->dev, irq, panfrost_mmu_irq_handler, > - IRQF_SHARED, "mmu", pfdev); > + err = devm_request_threaded_irq(pfdev->dev, irq, panfrost_mmu_irq_handler, > + panfrost_mmu_irq_handler_thread, > + IRQF_SHARED, "mmu", pfdev); > > if (err) { > dev_err(pfdev->dev, "failed to request mmu irq"); >
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index eba6ce785ef0..7d44328b280f 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -300,12 +300,20 @@ static const char *access_type_name(struct panfrost_device *pfdev, static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data) { struct panfrost_device *pfdev = data; - u32 status = mmu_read(pfdev, MMU_INT_STAT); - int i; - if (!status) + if (!mmu_read(pfdev, MMU_INT_STAT)) return IRQ_NONE; + mmu_write(pfdev, MMU_INT_MASK, 0); + return IRQ_WAKE_THREAD; +} + +static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data) +{ + struct panfrost_device *pfdev = data; + u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT); + int i; + dev_err(pfdev->dev, "mmu irq status=%x\n", status); for (i = 0; status; i++) { @@ -350,6 +358,7 @@ static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data) status &= ~mask; } + mmu_write(pfdev, MMU_INT_MASK, ~0); return IRQ_HANDLED; }; @@ -368,8 +377,9 @@ int panfrost_mmu_init(struct panfrost_device *pfdev) if (irq <= 0) return -ENODEV; - err = devm_request_irq(pfdev->dev, irq, panfrost_mmu_irq_handler, - IRQF_SHARED, "mmu", pfdev); + err = devm_request_threaded_irq(pfdev->dev, irq, panfrost_mmu_irq_handler, + panfrost_mmu_irq_handler_thread, + IRQF_SHARED, "mmu", pfdev); if (err) { dev_err(pfdev->dev, "failed to request mmu irq");
In preparation to handle mapping of page faults, we need the MMU handler to be threaded as code paths take a mutex. As the IRQ may be shared, we can't use the default handler and must disable the MMU interrupts locally. Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com> Cc: Boris Brezillon <boris.brezillon@collabora.com> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Steven Price <steven.price@arm.com> Cc: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com> Signed-off-by: Rob Herring <robh@kernel.org> --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-)