@@ -117,10 +117,6 @@ config CXL_PORT
default CXL_BUS
tristate
-config CXL_SUSPEND
- def_bool y
- depends on SUSPEND && CXL_MEM
-
config CXL_REGION
bool "CXL: Region Support"
default CXL_BUS
@@ -7,6 +7,8 @@
#include <linux/acpi.h>
#include <linux/pci.h>
#include <linux/node.h>
+#include <linux/pm.h>
+#include <linux/workqueue.h>
#include <asm/div64.h>
#include "cxlpci.h"
#include "cxl.h"
@@ -813,6 +815,27 @@ static int pair_cxl_resource(struct device *dev, void *data)
return 0;
}
+static void cxl_srmem_work_fn(struct work_struct *work)
+{
+ /* Wait for CXL PCI and mem drivers to load */
+ cxl_wait_for_pci_mem();
+
+ /*
+ * Once the CXL PCI and mem drivers have loaded wait
+ * for the driver probe routines to complete.
+ */
+ wait_for_device_probe();
+
+ cxl_region_srmem_update();
+}
+
+DECLARE_WORK(cxl_sr_work, cxl_srmem_work_fn);
+
+static void cxl_srmem_update(void)
+{
+ schedule_work(&cxl_sr_work);
+}
+
static int cxl_acpi_probe(struct platform_device *pdev)
{
int rc;
@@ -887,6 +910,10 @@ static int cxl_acpi_probe(struct platform_device *pdev)
/* In case PCI is scanned before ACPI re-trigger memdev attach */
cxl_bus_rescan();
+
+ /* Update SOFT RESERVED resources that intersect with CXL regions */
+ cxl_srmem_update();
+
return 0;
}
@@ -918,6 +945,7 @@ static int __init cxl_acpi_init(void)
static void __exit cxl_acpi_exit(void)
{
+ cancel_work_sync(&cxl_sr_work);
platform_driver_unregister(&cxl_acpi_driver);
cxl_bus_drain();
}
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CXL_BUS) += cxl_core.o
-obj-$(CONFIG_CXL_SUSPEND) += suspend.o
+obj-y += suspend.o
ccflags-y += -I$(srctree)/drivers/cxl
CFLAGS_trace.o = -DTRACE_INCLUDE_PATH=. -I$(src)
@@ -10,6 +10,7 @@
#include <linux/sort.h>
#include <linux/idr.h>
#include <linux/memory-tiers.h>
+#include <linux/ioport.h>
#include <cxlmem.h>
#include <cxl.h>
#include "core.h"
@@ -2333,7 +2334,7 @@ const struct device_type cxl_region_type = {
bool is_cxl_region(struct device *dev)
{
- return dev->type == &cxl_region_type;
+ return dev && dev->type == &cxl_region_type;
}
EXPORT_SYMBOL_NS_GPL(is_cxl_region, "CXL");
@@ -3443,6 +3444,27 @@ int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
}
EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, "CXL");
+int cxl_region_srmem_update(void)
+{
+ struct device *dev = NULL;
+ struct cxl_region *cxlr;
+ struct resource *res;
+
+ do {
+ dev = bus_find_next_device(&cxl_bus_type, dev);
+ if (is_cxl_region(dev)) {
+ cxlr = to_cxl_region(dev);
+ res = cxlr->params.res;
+ release_srmem_region_adjustable(res->start,
+ resource_size(res));
+ }
+ put_device(dev);
+ } while (dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_region_srmem_update, "CXL");
+
u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa)
{
struct cxl_region_ref *iter;
@@ -2,9 +2,14 @@
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
#include <linux/atomic.h>
#include <linux/export.h>
+#include <linux/wait.h>
#include "cxlmem.h"
+#include "cxlpci.h"
static atomic_t mem_active;
+static atomic_t pci_loaded;
+
+static DECLARE_WAIT_QUEUE_HEAD(cxl_wait_queue);
bool cxl_mem_active(void)
{
@@ -14,6 +19,7 @@ bool cxl_mem_active(void)
void cxl_mem_active_inc(void)
{
atomic_inc(&mem_active);
+ wake_up(&cxl_wait_queue);
}
EXPORT_SYMBOL_NS_GPL(cxl_mem_active_inc, "CXL");
@@ -22,3 +28,38 @@ void cxl_mem_active_dec(void)
atomic_dec(&mem_active);
}
EXPORT_SYMBOL_NS_GPL(cxl_mem_active_dec, "CXL");
+
+void mark_cxl_pci_loaded(void)
+{
+ atomic_inc(&pci_loaded);
+ wake_up(&cxl_wait_queue);
+}
+EXPORT_SYMBOL_NS_GPL(mark_cxl_pci_loaded, "CXL");
+
+static bool cxl_pci_loaded(void)
+{
+ if (IS_ENABLED(CONFIG_CXL_PCI))
+ return atomic_read(&pci_loaded) != 0;
+
+ return true;
+}
+
+static bool cxl_mem_probed(void)
+{
+ if (IS_ENABLED(CONFIG_CXL_MEM))
+ return atomic_read(&mem_active) != 0;
+
+ return true;
+}
+
+void cxl_wait_for_pci_mem(void)
+{
+ if (IS_ENABLED(CONFIG_CXL_PCI) || IS_ENABLED(CONFIG_CXL_MEM))
+ if (wait_event_timeout(cxl_wait_queue,
+ cxl_pci_loaded() && cxl_mem_probed(),
+ 30 * HZ)) {
+ pr_debug("Timeout waiting for CXL PCI or CXL Memory probing");
+ }
+
+}
+EXPORT_SYMBOL_NS_GPL(cxl_wait_for_pci_mem, "CXL");
@@ -858,6 +858,7 @@ bool is_cxl_pmem_region(struct device *dev);
struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev);
int cxl_add_to_region(struct cxl_port *root,
struct cxl_endpoint_decoder *cxled);
+int cxl_region_srmem_update(void);
struct cxl_dax_region *to_cxl_dax_region(struct device *dev);
u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa);
#else
@@ -902,6 +903,8 @@ void cxl_coordinates_combine(struct access_coordinate *out,
bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
+void cxl_wait_for_pci_mem(void);
+
/*
* Unit test builds overrides this to __weak, find the 'strong' version
* of these symbols in tools/testing/cxl/.
@@ -853,17 +853,8 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd);
int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa);
int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa);
-#ifdef CONFIG_CXL_SUSPEND
void cxl_mem_active_inc(void);
void cxl_mem_active_dec(void);
-#else
-static inline void cxl_mem_active_inc(void)
-{
-}
-static inline void cxl_mem_active_dec(void)
-{
-}
-#endif
int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd);
@@ -135,4 +135,5 @@ void read_cdat_data(struct cxl_port *port);
void cxl_cor_error_detected(struct pci_dev *pdev);
pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
pci_channel_state_t state);
+void mark_cxl_pci_loaded(void);
#endif /* __CXL_PCI_H__ */
@@ -1185,6 +1185,8 @@ static int __init cxl_pci_driver_init(void)
if (rc)
pci_unregister_driver(&cxl_pci_driver);
+ mark_cxl_pci_loaded();
+
return rc;
}
@@ -35,14 +35,7 @@ static inline void pm_vt_switch_unregister(struct device *dev)
}
#endif /* CONFIG_VT_CONSOLE_SLEEP */
-#ifdef CONFIG_CXL_SUSPEND
bool cxl_mem_active(void);
-#else
-static inline bool cxl_mem_active(void)
-{
- return false;
-}
-#endif
/*
* Device power management