@@ -1945,6 +1945,87 @@ bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
}
EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL);
+/**
+ * cxl_port_get_switch_qos - retrieve QoS data for CXL switches
+ * @port: endpoint cxl_port
+ * @rd_bw: writeback value for min read bandwidth
+ * @rd_lat: writeback value for total read latency
+ * @wr_bw: writeback value for min write bandwidth
+ * @wr_lat: writeback value for total write latency
+ *
+ * Return: Errno on failure, 0 on success. -ENOENT if no switch device
+ */
+int cxl_port_get_switch_qos(struct cxl_port *port, u64 *rd_bw, u64 *rd_lat,
+ u64 *wr_bw, u64 *wr_lat)
+{
+ u64 min_rd_bw = ULONG_MAX;
+ u64 min_wr_bw = ULONG_MAX;
+ struct cxl_dport *dport;
+ struct cxl_port *nport;
+ u64 total_rd_lat = 0;
+ u64 total_wr_lat = 0;
+ struct device *next;
+ int switches = 0;
+ int rc = 0;
+
+ if (!is_cxl_endpoint(port))
+ return -EINVAL;
+
+ /* Skip the endpoint */
+ next = port->dev.parent;
+ nport = to_cxl_port(next);
+ dport = port->parent_dport;
+
+ do {
+ struct node_hmem_attrs *hmem_attrs;
+ u64 lat, bw;
+
+ if (!nport->cdat.table)
+ break;
+
+ if (!dev_is_pci(dport->dport))
+ break;
+
+ hmem_attrs = xa_load(&nport->cdat.sslbis_xa, dport->port_id);
+ if (xa_is_err(hmem_attrs))
+ return xa_err(hmem_attrs);
+
+ if (!hmem_attrs) {
+ hmem_attrs = xa_load(&nport->cdat.sslbis_xa, SSLBIS_ANY_PORT);
+ if (xa_is_err(hmem_attrs))
+ return xa_err(hmem_attrs);
+ if (!hmem_attrs)
+ return -ENXIO;
+ }
+
+ bw = hmem_attrs->write_bandwidth;
+ lat = hmem_attrs->write_latency;
+ min_wr_bw = min_t(u64, min_wr_bw, bw);
+ total_wr_lat += lat;
+
+ bw = hmem_attrs->read_bandwidth;
+ lat = hmem_attrs->read_latency;
+ min_rd_bw = min_t(u64, min_rd_bw, bw);
+ total_rd_lat += lat;
+
+ dport = nport->parent_dport;
+ next = next->parent;
+ nport = to_cxl_port(next);
+ switches++;
+ } while (next);
+
+ *wr_bw = min_wr_bw;
+ *wr_lat = total_wr_lat;
+ *rd_bw = min_rd_bw;
+ *rd_lat = total_rd_lat;
+
+ if (!switches)
+ return -ENOENT;
+
+ return rc;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_port_get_switch_qos, CXL);
+
/* for user tooling to ensure port disable work has completed */
static ssize_t flush_store(struct bus_type *bus, const char *buf, size_t count)
{
@@ -807,6 +807,8 @@ struct qtg_dsm_output {
struct qtg_dsm_output *cxl_acpi_evaluate_qtg_dsm(acpi_handle handle,
struct qtg_dsm_input *input);
acpi_handle cxl_acpi_get_rootdev_handle(struct device *dev);
+int cxl_port_get_switch_qos(struct cxl_port *port, u64 *rd_bw, u64 *rd_lat,
+ u64 *wr_bw, u64 *wr_lat);
/*
* Unit test builds overrides this to __weak, find the 'strong' version
The CDAT information from the switch, Switch Scoped Latency and Bandwidth Information Strucutre (SSLBIS), is parsed and stored in an xarray under the cxl_port. The QoS data are indexed by the downstream port id. Walk the CXL ports from endpoint to root and retrieve the relevant QoS information (bandwidth and latency) that are from the switch CDAT. If read or write QoS values are not available, then use the access QoS value. Signed-off-by: Dave Jiang <dave.jiang@intel.com> --- v3: - Move to use 'struct node_hmem_attrs' --- drivers/cxl/core/port.c | 81 +++++++++++++++++++++++++++++++++++++++++++++++ drivers/cxl/cxl.h | 2 + 2 files changed, 83 insertions(+)