@@ -717,6 +717,7 @@ struct arm_smmu_option_prop {
};
static DEFINE_XARRAY_ALLOC1(asid_xa);
+static DEFINE_SPINLOCK(contexts_lock);
static struct arm_smmu_option_prop arm_smmu_options[] = {
{ ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
@@ -1513,6 +1514,17 @@ static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
}
/* Context descriptor manipulation functions */
+static void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
+{
+ struct arm_smmu_cmdq_ent cmd = {
+ .opcode = CMDQ_OP_TLBI_NH_ASID,
+ .tlbi.asid = asid,
+ };
+
+ arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+ arm_smmu_cmdq_issue_sync(smmu);
+}
+
static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
int ssid, bool leaf)
{
@@ -1547,7 +1559,7 @@ static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu,
size_t size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3);
l1_desc->l2ptr = dmam_alloc_coherent(smmu->dev, size,
- &l1_desc->l2ptr_dma, GFP_KERNEL);
+ &l1_desc->l2ptr_dma, GFP_ATOMIC);
if (!l1_desc->l2ptr) {
dev_warn(smmu->dev,
"failed to allocate context descriptor table\n");
@@ -1593,8 +1605,8 @@ static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_domain *smmu_domain,
return l1_desc->l2ptr + idx * CTXDESC_CD_DWORDS;
}
-static int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain,
- int ssid, struct arm_smmu_ctx_desc *cd)
+static int __arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain,
+ int ssid, struct arm_smmu_ctx_desc *cd)
{
/*
* This function handles the following cases:
@@ -1670,6 +1682,17 @@ static int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain,
return 0;
}
+static int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain,
+ int ssid, struct arm_smmu_ctx_desc *cd)
+{
+ int ret;
+
+ spin_lock(&contexts_lock);
+ ret = __arm_smmu_write_ctx_desc(smmu_domain, ssid, cd);
+ spin_unlock(&contexts_lock);
+ return ret;
+}
+
static int arm_smmu_alloc_cd_tables(struct arm_smmu_domain *smmu_domain)
{
int ret;
@@ -1773,9 +1796,18 @@ static bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd)
return free;
}
+/*
+ * Try to reserve this ASID in the SMMU. If it is in use, try to steal it from
+ * the private entry. Careful here, we may be modifying the context tables of
+ * another SMMU!
+ */
static struct arm_smmu_ctx_desc *arm_smmu_share_asid(u16 asid)
{
+ int ret;
+ u32 new_asid;
struct arm_smmu_ctx_desc *cd;
+ struct arm_smmu_device *smmu;
+ struct arm_smmu_domain *smmu_domain;
cd = xa_load(&asid_xa, asid);
if (!cd)
@@ -1791,11 +1823,31 @@ static struct arm_smmu_ctx_desc *arm_smmu_share_asid(u16 asid)
return cd;
}
+ smmu_domain = container_of(cd, struct arm_smmu_domain, s1_cfg.cd);
+ smmu = smmu_domain->smmu;
+
+ /*
+ * Race with unmap: TLB invalidations will start targeting the new ASID,
+ * which isn't assigned yet. We'll do an invalidate-all on the old ASID
+ * later, so it doesn't matter.
+ */
+ ret = __xa_alloc(&asid_xa, &new_asid, cd,
+ XA_LIMIT(1, 1 << smmu->asid_bits), GFP_ATOMIC);
+ if (ret)
+ return ERR_PTR(-ENOSPC);
+ cd->asid = new_asid;
+
/*
- * Ouch, ASID is already in use for a private cd.
- * TODO: seize it.
+ * Update ASID and invalidate CD in all associated masters. There will
+ * be some overlap between use of both ASIDs, until we invalidate the
+ * TLB.
*/
- return ERR_PTR(-EEXIST);
+ arm_smmu_write_ctx_desc(smmu_domain, 0, cd);
+
+ /* Invalidate TLB entries previously associated with that context */
+ arm_smmu_tlb_inv_asid(smmu, asid);
+
+ return NULL;
}
__maybe_unused
@@ -2389,15 +2441,6 @@ static void arm_smmu_tlb_inv_context(void *cookie)
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cmdq_ent cmd;
- if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
- cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
- cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
- cmd.tlbi.vmid = 0;
- } else {
- cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
- cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
- }
-
/*
* NOTE: when io-pgtable is in non-strict mode, we may get here with
* PTEs previously cleared by unmaps on the current CPU not yet visible
@@ -2405,8 +2448,14 @@ static void arm_smmu_tlb_inv_context(void *cookie)
* insertion to guarantee those are observed before the TLBI. Do be
* careful, 007.
*/
- arm_smmu_cmdq_issue_cmd(smmu, &cmd);
- arm_smmu_cmdq_issue_sync(smmu);
+ if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+ arm_smmu_tlb_inv_asid(smmu, smmu_domain->s1_cfg.cd.asid);
+ } else {
+ cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
+ cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
+ arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+ arm_smmu_cmdq_issue_sync(smmu);
+ }
arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
}