@@ -93,7 +93,18 @@ gf100_fb_init(struct nvkm_fb *base)
struct gf100_fb *fb = gf100_fb(base);
struct nvkm_device *device = fb->base.subdev.device;
- if (fb->r100c10_page)
+ if (!fb->r100c10) {
+ dma_addr_t addr = dma_map_page(device->dev, fb->r100c10_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (!dma_mapping_error(device->dev, addr)) {
+ fb->r100c10 = addr;
+ } else {
+ nvkm_warn(&fb->base.subdev,
+ "dma_map_page() failed on 100c10 page\n");
+ }
+ }
+
+ if (fb->r100c10)
nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
}
@@ -103,12 +114,13 @@ gf100_fb_dtor(struct nvkm_fb *base)
struct gf100_fb *fb = gf100_fb(base);
struct nvkm_device *device = fb->base.subdev.device;
- if (fb->r100c10_page) {
+ if (fb->r100c10) {
dma_unmap_page(device->dev, fb->r100c10, PAGE_SIZE,
DMA_BIDIRECTIONAL);
- __free_page(fb->r100c10_page);
}
+ __free_page(fb->r100c10_page);
+
return fb;
}
@@ -124,11 +136,9 @@ gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
*pfb = &fb->base;
fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (fb->r100c10_page) {
- fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(device->dev, fb->r100c10))
- return -EFAULT;
+ if (!fb->r100c10_page) {
+ nvkm_error(&fb->base.subdev, "failed 100c10 page alloc\n");
+ return -ENOMEM;
}
return 0;
The 100c10 scratch page is mapped using dma_map_page() before the TTM layer has had a chance to set the DMA mask. This means we are still running with the default of 32 when this code executes, and this causes problems for platforms with no memory below 4 GB (such as AMD Seattle) So move the dma_map_page() to the .init hook, which executes after the DMA mask has been set. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> --- drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c | 26 ++++++++++++++------ 1 file changed, 18 insertions(+), 8 deletions(-) -- 2.7.4