@@ -1120,6 +1120,9 @@ struct cxl_dev_state *cxl_dev_state_create(struct device *dev)
mutex_init(&cxlds->mbox_mutex);
mutex_init(&cxlds->event.log_lock);
cxlds->dev = dev;
+ INIT_LIST_HEAD(&cxlds->qos_list);
+ cxlds->ram_qtg_id = -1;
+ cxlds->pmem_qtg_id = -1;
return cxlds;
}
@@ -5,6 +5,7 @@
#include <uapi/linux/cxl_mem.h>
#include <linux/cdev.h>
#include <linux/uuid.h>
+#include <linux/node.h>
#include "cxl.h"
/* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */
@@ -40,6 +41,7 @@
* @cxl_nvd: optional bridge to an nvdimm if the device supports pmem
* @id: id number of this memdev instance.
* @depth: endpoint port depth
+ * @qos_list: QTG ID related list of entries
*/
struct cxl_memdev {
struct device dev;
@@ -50,6 +52,7 @@ struct cxl_memdev {
struct cxl_nvdimm *cxl_nvd;
int id;
int depth;
+ struct list_head qos_list;
};
static inline struct cxl_memdev *to_cxl_memdev(struct device *dev)
@@ -215,6 +218,19 @@ struct cxl_event_state {
struct mutex log_lock;
};
+/**
+ * struct qos_prop - QoS property entry
+ * @list - list entry
+ * @dpa_range - range for DPA address
+ * @qtg_id - QoS Throttling Group ID
+ */
+struct qos_prop_entry {
+ struct list_head list;
+ struct range dpa_range;
+ u16 qtg_id;
+ struct node_hmem_attrs hmem_attrs;
+};
+
/**
* struct cxl_dev_state - The driver device state
*
@@ -283,6 +299,10 @@ struct cxl_dev_state {
u64 next_volatile_bytes;
u64 next_persistent_bytes;
+ int ram_qtg_id;
+ int pmem_qtg_id;
+ struct list_head qos_list;
+
resource_size_t component_reg_phys;
u64 serial;
@@ -124,6 +124,40 @@ static int cxl_port_qos_calculate(struct cxl_port *port,
return 0;
}
+static void cxl_memdev_set_qtg(struct cxl_dev_state *cxlds, struct list_head *dsmas_list)
+{
+ struct range pmem_range = {
+ .start = cxlds->pmem_res.start,
+ .end = cxlds->pmem_res.end,
+ };
+ struct range ram_range = {
+ .start = cxlds->ram_res.start,
+ .end = cxlds->ram_res.end,
+ };
+ struct qos_prop_entry *qos;
+ struct dsmas_entry *dent;
+
+ list_for_each_entry(dent, dsmas_list, list) {
+ qos = devm_kzalloc(cxlds->dev, sizeof(*qos), GFP_KERNEL);
+ if (!qos)
+ return;
+
+ qos->dpa_range = dent->dpa_range;
+ qos->qtg_id = dent->qtg_id;
+ qos->hmem_attrs = dent->hmem_attrs;
+ list_add_tail(&qos->list, &cxlds->qos_list);
+
+ if (resource_size(&cxlds->ram_res) &&
+ range_contains(&ram_range, &dent->dpa_range) &&
+ cxlds->ram_qtg_id == -1)
+ cxlds->ram_qtg_id = dent->qtg_id;
+ else if (resource_size(&cxlds->pmem_res) &&
+ range_contains(&pmem_range, &dent->dpa_range) &&
+ cxlds->pmem_qtg_id == -1)
+ cxlds->pmem_qtg_id = dent->qtg_id;
+ }
+}
+
static int cxl_switch_port_probe(struct cxl_port *port)
{
struct cxl_hdm *cxlhdm;
@@ -234,6 +268,7 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
if (rc)
dev_dbg(&port->dev, "Failed to do QoS calculations\n");
+ cxl_memdev_set_qtg(cxlds, &dsmas_list);
dsmas_list_destroy(&dsmas_list);
}
Once the QTG ID _DSM is executed successfully, the QTG ID is retrieved from the return package. Create a list of entries in the cxl_memdev context and store the QTG ID and the associated DPA range. This information can be exposed to user space via sysfs in order to help region setup for hot-plugged CXL memory devices. Signed-off-by: Dave Jiang <dave.jiang@intel.com> --- v3: - Move back to QTG ID per partition --- drivers/cxl/core/mbox.c | 3 +++ drivers/cxl/cxlmem.h | 20 ++++++++++++++++++++ drivers/cxl/port.c | 35 +++++++++++++++++++++++++++++++++++ 3 files changed, 58 insertions(+)