diff mbox series

[net-next,v2,4/9] gve: Add support for dma_mask register

Message ID 20200901215149.2685117-5-awogbemila@google.com
State New
Headers show
Series Add GVE Features | expand

Commit Message

David Awogbemila Sept. 1, 2020, 9:51 p.m. UTC
From: Catherine Sullivan <csully@google.com>

Add the dma_mask register and read it to set the dma_masks.
gve_alloc_page will alloc_page with:
 GFP_DMA if priv->dma_mask is 24,
 GFP_DMA32 if priv->dma_mask is 32.

Reviewed-by: Yangchun Fu <yangchun@google.com>
Signed-off-by: Catherine Sullivan <csully@google.com>
Signed-off-by: David Awogbemila <awogbemila@google.com>
---
 drivers/net/ethernet/google/gve/gve.h         |  6 ++-
 drivers/net/ethernet/google/gve/gve_main.c    | 42 ++++++++++++-------
 .../net/ethernet/google/gve/gve_register.h    |  3 +-
 3 files changed, 34 insertions(+), 17 deletions(-)

Comments

Jakub Kicinski Sept. 2, 2020, 12:34 a.m. UTC | #1
On Tue,  1 Sep 2020 14:51:44 -0700 David Awogbemila wrote:
> From: Catherine Sullivan <csully@google.com>
> 
> Add the dma_mask register and read it to set the dma_masks.
> gve_alloc_page will alloc_page with:
>  GFP_DMA if priv->dma_mask is 24,
>  GFP_DMA32 if priv->dma_mask is 32.

What about Andrew's request to CC someone who can review this for
correctness?

What's your use case here? Do you really have devices with 24bit
addressing?
David Miller Sept. 2, 2020, 11:08 p.m. UTC | #2
From: David Awogbemila <awogbemila@google.com>
Date: Wed, 2 Sep 2020 11:42:37 -0700

> I don't think there is a specific 24-bit device in mind here, only
> that we have seen 32-bit addressing use cases where the guest ran out
> of SWIOTLB space and restricting to GFP_DMA32 helped.. so we thought
> it would be natural for the driver to handle the 24 bit case in case
> it ever came along.

You should add such support when the situation presents itself, rather
than prematurely like this.

Thank you.
David Awogbemila Sept. 3, 2020, 4:31 p.m. UTC | #3
Thanks, I'll adjust this.

On Wed, Sep 2, 2020 at 4:08 PM David Miller <davem@davemloft.net> wrote:
>
> From: David Awogbemila <awogbemila@google.com>
> Date: Wed, 2 Sep 2020 11:42:37 -0700
>
> > I don't think there is a specific 24-bit device in mind here, only
> > that we have seen 32-bit addressing use cases where the guest ran out
> > of SWIOTLB space and restricting to GFP_DMA32 helped.. so we thought
> > it would be natural for the driver to handle the 24 bit case in case
> > it ever came along.
>
> You should add such support when the situation presents itself, rather
> than prematurely like this.
>
> Thank you.
diff mbox series

Patch

diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 55b34b437764..37a3bbced36a 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -232,6 +232,9 @@  struct gve_priv {
 	struct work_struct service_task;
 	unsigned long service_task_flags;
 	unsigned long state_flags;
+
+  /* Gvnic device's dma mask, set during probe. */
+	u8 dma_mask;
 };
 
 enum gve_service_task_flags {
@@ -451,8 +454,7 @@  static inline bool gve_can_recycle_pages(struct net_device *dev)
 }
 
 /* buffers */
-int gve_alloc_page(struct gve_priv *priv, struct device *dev,
-		   struct page **page, dma_addr_t *dma,
+int gve_alloc_page(struct gve_priv *priv, struct device *dev, struct page **page, dma_addr_t *dma,
 		   enum dma_data_direction);
 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
 		   enum dma_data_direction);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index a0b8c1e8ed98..b176fcef19de 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -518,7 +518,14 @@  int gve_alloc_page(struct gve_priv *priv, struct device *dev,
 		   struct page **page, dma_addr_t *dma,
 		   enum dma_data_direction dir)
 {
-	*page = alloc_page(GFP_KERNEL);
+	gfp_t gfp_flags = GFP_KERNEL;
+
+	if (priv->dma_mask == 24)
+		gfp_flags |= GFP_DMA;
+	else if (priv->dma_mask == 32)
+		gfp_flags |= GFP_DMA32;
+
+	*page = alloc_page(gfp_flags);
 	if (!*page) {
 		priv->page_alloc_fail++;
 		return -ENOMEM;
@@ -1083,6 +1090,7 @@  static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	__be32 __iomem *db_bar;
 	struct gve_registers __iomem *reg_bar;
 	struct gve_priv *priv;
+	u8 dma_mask;
 	int err;
 
 	err = pci_enable_device(pdev);
@@ -1095,19 +1103,6 @@  static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 	pci_set_master(pdev);
 
-	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
-	if (err) {
-		dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
-		goto abort_with_pci_region;
-	}
-
-	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-	if (err) {
-		dev_err(&pdev->dev,
-			"Failed to set consistent dma mask: err=%d\n", err);
-		goto abort_with_pci_region;
-	}
-
 	reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
 	if (!reg_bar) {
 		dev_err(&pdev->dev, "Failed to map pci bar!\n");
@@ -1122,10 +1117,28 @@  static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		goto abort_with_reg_bar;
 	}
 
+	dma_mask = readb(&reg_bar->dma_mask);
+	// Default to 64 if the register isn't set
+	if (!dma_mask)
+		dma_mask = 64;
 	gve_write_version(&reg_bar->driver_version);
 	/* Get max queues to alloc etherdev */
 	max_rx_queues = ioread32be(&reg_bar->max_tx_queues);
 	max_tx_queues = ioread32be(&reg_bar->max_rx_queues);
+
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (err) {
+		dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
+		goto abort_with_reg_bar;
+	}
+
+	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (err) {
+		dev_err(&pdev->dev,
+			"Failed to set consistent dma mask: err=%d\n", err);
+		goto abort_with_reg_bar;
+	}
+
 	/* Alloc and setup the netdev and priv */
 	dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
 	if (!dev) {
@@ -1160,6 +1173,7 @@  static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	priv->db_bar2 = db_bar;
 	priv->service_task_flags = 0x0;
 	priv->state_flags = 0x0;
+	priv->dma_mask = dma_mask;
 
 	gve_set_probe_in_progress(priv);
 
diff --git a/drivers/net/ethernet/google/gve/gve_register.h b/drivers/net/ethernet/google/gve/gve_register.h
index 84ab8893aadd..fad8813d1bb1 100644
--- a/drivers/net/ethernet/google/gve/gve_register.h
+++ b/drivers/net/ethernet/google/gve/gve_register.h
@@ -16,7 +16,8 @@  struct gve_registers {
 	__be32	adminq_pfn;
 	__be32	adminq_doorbell;
 	__be32	adminq_event_counter;
-	u8	reserved[3];
+	u8	reserved[2];
+	u8	dma_mask;
 	u8	driver_version;
 };