iommu: arm-smmu: Merge for msm-kona kernel upgrade
Merge all iommu/smmu changes from msm-4.14 to msm-kona
as of:
'commit 681d7197c96a ("iommu/arm-smmu: ignore target
specific initialization")'
Change-Id: I4452531c17fe653a282e8923851eba3deae0e9b0
Signed-off-by: Swathi Sridhar <swatsrid@codeaurora.org>
This commit is contained in:
@@ -29,6 +29,7 @@
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/kprobes.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/checksum.h>
|
||||
|
||||
EXPORT_SYMBOL(copy_page);
|
||||
@@ -83,3 +84,8 @@ extern long long __ashrti3(long long a, int b);
|
||||
EXPORT_SYMBOL(__ashrti3);
|
||||
extern long long __lshrti3(long long a, int b);
|
||||
EXPORT_SYMBOL(__lshrti3);
|
||||
|
||||
/* caching functions */
|
||||
EXPORT_SYMBOL(__dma_inv_area);
|
||||
EXPORT_SYMBOL(__dma_clean_area);
|
||||
EXPORT_SYMBOL(__dma_flush_area);
|
||||
|
||||
@@ -931,12 +931,17 @@ static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
|
||||
unsigned long attrs)
|
||||
{
|
||||
bool coherent = is_dma_coherent(dev, attrs);
|
||||
int ret;
|
||||
|
||||
ret = iommu_dma_map_sg(dev, sgl, nelems,
|
||||
dma_info_to_prot(dir, coherent, attrs));
|
||||
if (!ret)
|
||||
return ret;
|
||||
|
||||
if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
|
||||
__iommu_sync_sg_for_device(dev, sgl, nelems, dir);
|
||||
|
||||
return iommu_dma_map_sg(dev, sgl, nelems,
|
||||
dma_info_to_prot(dir, coherent, attrs));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __iommu_unmap_sg_attrs(struct device *dev,
|
||||
|
||||
@@ -60,7 +60,7 @@ config IOMMU_IO_PGTABLE_ARMV7S_SELFTEST
|
||||
|
||||
config IOMMU_IO_PGTABLE_FAST
|
||||
bool "Fast ARMv7/v8 Long Descriptor Format"
|
||||
select IOMMU_IO_PGTABLE
|
||||
depends on ARM64_DMA_USE_IOMMU || ARM_DMA_USE_IOMMU
|
||||
help
|
||||
Enable support for a subset of the ARM long descriptor pagetable
|
||||
format. This allocator achieves fast performance by
|
||||
|
||||
@@ -9,6 +9,7 @@ obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
|
||||
obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o
|
||||
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
|
||||
obj-$(CONFIG_IOMMU_IOVA) += iova.o
|
||||
obj-$(CONFIG_MSM_TZ_SMMU) += io-pgtable-msm-secure.o
|
||||
obj-$(CONFIG_IOMMU_IO_PGTABLE_FAST) += io-pgtable-fast.o dma-mapping-fast.o
|
||||
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
|
||||
obj-$(CONFIG_IOMMU_DEBUG) += iommu-debug.o
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
@@ -32,7 +32,7 @@ struct page *arm_smmu_errata_get_guard_page(int vmid)
|
||||
ret = hyp_assign_phys(page_to_phys(page), PAGE_ALIGN(size),
|
||||
&source_vm, 1,
|
||||
&dest_vm, &dest_perm, 1);
|
||||
if (ret) {
|
||||
if (ret && (ret != -EIO)) {
|
||||
__free_pages(page, get_order(size));
|
||||
page = NULL;
|
||||
}
|
||||
|
||||
@@ -53,6 +53,7 @@
|
||||
#include <soc/qcom/secure_buffer.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/msm-bus.h>
|
||||
#include <trace/events/iommu.h>
|
||||
#include <dt-bindings/msm/msm-bus-ids.h>
|
||||
|
||||
#include <linux/amba/bus.h>
|
||||
@@ -247,6 +248,7 @@ struct arm_smmu_device {
|
||||
#define ARM_SMMU_OPT_NO_ASID_RETENTION (1 << 5)
|
||||
#define ARM_SMMU_OPT_STATIC_CB (1 << 6)
|
||||
#define ARM_SMMU_OPT_DISABLE_ATOS (1 << 7)
|
||||
#define ARM_SMMU_OPT_MIN_IOVA_ALIGN (1 << 8)
|
||||
u32 options;
|
||||
enum arm_smmu_arch_version version;
|
||||
enum arm_smmu_implementation model;
|
||||
@@ -362,6 +364,8 @@ struct arm_smmu_domain {
|
||||
struct list_head unassign_list;
|
||||
struct mutex assign_lock;
|
||||
struct list_head secure_pool_list;
|
||||
/* nonsecure pool protected by pgtbl_lock */
|
||||
struct list_head nonsecure_pool;
|
||||
struct iommu_domain domain;
|
||||
bool qsmmuv500_errata1_min_iova_align;
|
||||
};
|
||||
@@ -384,6 +388,7 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
|
||||
{ ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" },
|
||||
{ ARM_SMMU_OPT_STATIC_CB, "qcom,enable-static-cb"},
|
||||
{ ARM_SMMU_OPT_DISABLE_ATOS, "qcom,disable-atos" },
|
||||
{ ARM_SMMU_OPT_MIN_IOVA_ALIGN, "qcom,min-iova-align" },
|
||||
{ 0, NULL},
|
||||
};
|
||||
|
||||
@@ -961,7 +966,7 @@ static void arm_smmu_domain_power_off(struct iommu_domain *domain,
|
||||
}
|
||||
|
||||
/* Wait for any pending TLB invalidations to complete */
|
||||
static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
|
||||
static int __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
|
||||
void __iomem *sync, void __iomem *status)
|
||||
{
|
||||
unsigned int spin_cnt, delay;
|
||||
@@ -970,13 +975,15 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
|
||||
for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
|
||||
for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
|
||||
if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
|
||||
return;
|
||||
return 0;
|
||||
cpu_relax();
|
||||
}
|
||||
udelay(delay);
|
||||
}
|
||||
trace_tlbsync_timeout(smmu->dev, 0);
|
||||
dev_err_ratelimited(smmu->dev,
|
||||
"TLB sync timed out -- SMMU may be deadlocked\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
|
||||
@@ -985,8 +992,10 @@ static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&smmu->global_sync_lock, flags);
|
||||
__arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
|
||||
base + ARM_SMMU_GR0_sTLBGSTATUS);
|
||||
if (__arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
|
||||
base + ARM_SMMU_GR0_sTLBGSTATUS))
|
||||
dev_err_ratelimited(smmu->dev,
|
||||
"TLB global sync failed!\n");
|
||||
spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
|
||||
}
|
||||
|
||||
@@ -998,8 +1007,12 @@ static void arm_smmu_tlb_sync_context(void *cookie)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&smmu_domain->sync_lock, flags);
|
||||
__arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
|
||||
base + ARM_SMMU_CB_TLBSTATUS);
|
||||
if (__arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
|
||||
base + ARM_SMMU_CB_TLBSTATUS))
|
||||
dev_err_ratelimited(smmu->dev,
|
||||
"TLB sync on cb%d failed for device %s\n",
|
||||
smmu_domain->cfg.cbndx,
|
||||
dev_name(smmu_domain->dev));
|
||||
spin_unlock_irqrestore(&smmu_domain->sync_lock, flags);
|
||||
}
|
||||
|
||||
@@ -1013,10 +1026,14 @@ static void arm_smmu_tlb_sync_vmid(void *cookie)
|
||||
static void arm_smmu_tlb_inv_context_s1(void *cookie)
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain = cookie;
|
||||
struct device *dev = smmu_domain->dev;
|
||||
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
|
||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||
void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
|
||||
bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
|
||||
ktime_t cur = ktime_get();
|
||||
|
||||
trace_tlbi_start(dev, 0);
|
||||
|
||||
if (!use_tlbiall)
|
||||
writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
|
||||
@@ -1024,6 +1041,7 @@ static void arm_smmu_tlb_inv_context_s1(void *cookie)
|
||||
writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
|
||||
|
||||
arm_smmu_tlb_sync_context(cookie);
|
||||
trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
|
||||
}
|
||||
|
||||
static void arm_smmu_tlb_inv_context_s2(void *cookie)
|
||||
@@ -1141,6 +1159,7 @@ static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
|
||||
list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
|
||||
arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
|
||||
/* pages will be freed later (after being unassigned) */
|
||||
list_del(&it->list);
|
||||
kfree(it);
|
||||
}
|
||||
}
|
||||
@@ -1152,8 +1171,19 @@ static void *arm_smmu_alloc_pages_exact(void *cookie,
|
||||
void *page;
|
||||
struct arm_smmu_domain *smmu_domain = cookie;
|
||||
|
||||
if (!arm_smmu_is_master_side_secure(smmu_domain))
|
||||
if (!arm_smmu_is_master_side_secure(smmu_domain)) {
|
||||
struct page *pg;
|
||||
/* size is expected to be 4K with current configuration */
|
||||
if (size == PAGE_SIZE) {
|
||||
pg = list_first_entry_or_null(
|
||||
&smmu_domain->nonsecure_pool, struct page, lru);
|
||||
if (pg) {
|
||||
list_del_init(&pg->lru);
|
||||
return page_address(pg);
|
||||
}
|
||||
}
|
||||
return alloc_pages_exact(size, gfp_mask);
|
||||
}
|
||||
|
||||
page = arm_smmu_secure_pool_remove(smmu_domain, size);
|
||||
if (page)
|
||||
@@ -1254,7 +1284,7 @@ static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
|
||||
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
|
||||
{
|
||||
int flags, ret, tmp;
|
||||
u32 fsr, fsynr, resume;
|
||||
u32 fsr, fsynr0, fsynr1, frsynra, resume;
|
||||
unsigned long iova;
|
||||
struct iommu_domain *domain = dev;
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
@@ -1264,7 +1294,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
|
||||
void __iomem *gr1_base;
|
||||
bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
|
||||
phys_addr_t phys_soft;
|
||||
u32 frsynra;
|
||||
uint64_t pte;
|
||||
bool non_fatal_fault = !!(smmu_domain->attributes &
|
||||
(1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
|
||||
|
||||
@@ -1291,8 +1321,9 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
|
||||
BUG();
|
||||
}
|
||||
|
||||
fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
|
||||
flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
|
||||
fsynr0 = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
|
||||
fsynr1 = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR1);
|
||||
flags = fsynr0 & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
|
||||
if (fsr & FSR_TF)
|
||||
flags |= IOMMU_FAULT_TRANSLATION;
|
||||
if (fsr & FSR_PF)
|
||||
@@ -1309,8 +1340,8 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
|
||||
tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
|
||||
if (!tmp || (tmp == -EBUSY)) {
|
||||
dev_dbg(smmu->dev,
|
||||
"Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
|
||||
iova, fsr, fsynr, cfg->cbndx);
|
||||
"Context fault handled by client: iova=0x%08lx, cb=%d, fsr=0x%x, fsynr0=0x%x, fsynr1=0x%x\n",
|
||||
iova, cfg->cbndx, fsr, fsynr0, fsynr1);
|
||||
dev_dbg(smmu->dev,
|
||||
"soft iova-to-phys=%pa\n", &phys_soft);
|
||||
ret = IRQ_HANDLED;
|
||||
@@ -1320,20 +1351,23 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
|
||||
fsr);
|
||||
if (__ratelimit(&_rs)) {
|
||||
dev_err(smmu->dev,
|
||||
"Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
|
||||
iova, fsr, fsynr, cfg->cbndx);
|
||||
"Unhandled context fault: iova=0x%08lx, cb=%d, fsr=0x%x, fsynr0=0x%x, fsynr1=0x%x\n",
|
||||
iova, cfg->cbndx, fsr, fsynr0, fsynr1);
|
||||
dev_err(smmu->dev, "FAR = %016lx\n",
|
||||
(unsigned long)iova);
|
||||
dev_err(smmu->dev,
|
||||
"FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
|
||||
"FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n",
|
||||
fsr,
|
||||
(fsr & 0x02) ? "TF " : "",
|
||||
(fsr & 0x02) ? (fsynr0 & 0x10 ?
|
||||
"TF W " : "TF R ") : "",
|
||||
(fsr & 0x04) ? "AFF " : "",
|
||||
(fsr & 0x08) ? "PF " : "",
|
||||
(fsr & 0x08) ? (fsynr0 & 0x10 ?
|
||||
"PF W " : "PF R ") : "",
|
||||
(fsr & 0x10) ? "EF " : "",
|
||||
(fsr & 0x20) ? "TLBMCF " : "",
|
||||
(fsr & 0x40) ? "TLBLKF " : "",
|
||||
(fsr & 0x80) ? "MHF " : "",
|
||||
(fsr & 0x100) ? "UUT " : "",
|
||||
(fsr & 0x40000000) ? "SS " : "",
|
||||
(fsr & 0x80000000) ? "MULTI " : "");
|
||||
dev_err(smmu->dev,
|
||||
@@ -1342,6 +1376,10 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
|
||||
dev_err(smmu->dev,
|
||||
"SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
|
||||
dev_name(smmu->dev));
|
||||
else {
|
||||
pte = arm_smmu_iova_to_pte(domain, iova);
|
||||
dev_err(smmu->dev, "PTE = %016llx\n", pte);
|
||||
}
|
||||
if (phys_atos)
|
||||
dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n",
|
||||
&phys_atos);
|
||||
@@ -1591,6 +1629,9 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx,
|
||||
reg |= SCTLR_HUPCF;
|
||||
}
|
||||
|
||||
if (attributes & (1 << DOMAIN_ATTR_NO_CFRE))
|
||||
reg &= ~SCTLR_CFRE;
|
||||
|
||||
if ((!(attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) &&
|
||||
!(attributes & (1 << DOMAIN_ATTR_EARLY_MAP))) || !stage1)
|
||||
reg |= SCTLR_M;
|
||||
@@ -1796,12 +1837,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
||||
goto out_unlock;
|
||||
|
||||
cfg->cbndx = ret;
|
||||
if (smmu->version < ARM_SMMU_V2) {
|
||||
cfg->irptndx = atomic_inc_return(&smmu->irptndx);
|
||||
cfg->irptndx %= smmu->num_context_irqs;
|
||||
} else {
|
||||
cfg->irptndx = cfg->cbndx;
|
||||
}
|
||||
|
||||
if (arm_smmu_is_slave_side_secure(smmu_domain)) {
|
||||
smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
|
||||
@@ -1870,6 +1905,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
||||
if (ret)
|
||||
goto out_clear_smmu;
|
||||
|
||||
if (smmu->version < ARM_SMMU_V2) {
|
||||
cfg->irptndx = atomic_inc_return(&smmu->irptndx);
|
||||
cfg->irptndx %= smmu->num_context_irqs;
|
||||
} else {
|
||||
cfg->irptndx = cfg->cbndx;
|
||||
}
|
||||
|
||||
/*
|
||||
* Request context fault interrupt. Do this last to avoid the
|
||||
@@ -2000,6 +2041,7 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
|
||||
INIT_LIST_HEAD(&smmu_domain->unassign_list);
|
||||
mutex_init(&smmu_domain->assign_lock);
|
||||
INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
|
||||
INIT_LIST_HEAD(&smmu_domain->nonsecure_pool);
|
||||
arm_smmu_domain_reinit(smmu_domain);
|
||||
|
||||
return &smmu_domain->domain;
|
||||
@@ -2424,6 +2466,60 @@ static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void arm_smmu_prealloc_memory(struct arm_smmu_domain *smmu_domain,
|
||||
size_t size, struct list_head *pool)
|
||||
{
|
||||
int i;
|
||||
u32 nr = 0;
|
||||
struct page *page;
|
||||
|
||||
if ((smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC)) ||
|
||||
arm_smmu_has_secure_vmid(smmu_domain))
|
||||
return;
|
||||
|
||||
/* number of 2nd level pagetable entries */
|
||||
nr += round_up(size, SZ_1G) >> 30;
|
||||
/* number of 3rd level pagetabel entries */
|
||||
nr += round_up(size, SZ_2M) >> 21;
|
||||
|
||||
/* Retry later with atomic allocation on error */
|
||||
for (i = 0; i < nr; i++) {
|
||||
page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
|
||||
if (!page)
|
||||
break;
|
||||
list_add(&page->lru, pool);
|
||||
}
|
||||
}
|
||||
|
||||
static void arm_smmu_prealloc_memory_sg(struct arm_smmu_domain *smmu_domain,
|
||||
struct scatterlist *sgl, int nents,
|
||||
struct list_head *pool)
|
||||
{
|
||||
int i;
|
||||
size_t size = 0;
|
||||
struct scatterlist *sg;
|
||||
|
||||
if ((smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC)) ||
|
||||
arm_smmu_has_secure_vmid(smmu_domain))
|
||||
return;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i)
|
||||
size += sg->length;
|
||||
|
||||
arm_smmu_prealloc_memory(smmu_domain, size, pool);
|
||||
}
|
||||
|
||||
static void arm_smmu_release_prealloc_memory(
|
||||
struct arm_smmu_domain *smmu_domain, struct list_head *list)
|
||||
{
|
||||
struct page *page, *tmp;
|
||||
|
||||
list_for_each_entry_safe(page, tmp, list, lru) {
|
||||
list_del(&page->lru);
|
||||
__free_pages(page, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
{
|
||||
int ret;
|
||||
@@ -2502,6 +2598,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
unsigned long flags;
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
|
||||
LIST_HEAD(nonsecure_pool);
|
||||
|
||||
if (!ops)
|
||||
return -ENODEV;
|
||||
@@ -2509,15 +2606,19 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
if (arm_smmu_is_slave_side_secure(smmu_domain))
|
||||
return msm_secure_smmu_map(domain, iova, paddr, size, prot);
|
||||
|
||||
arm_smmu_prealloc_memory(smmu_domain, size, &nonsecure_pool);
|
||||
arm_smmu_secure_domain_lock(smmu_domain);
|
||||
|
||||
spin_lock_irqsave(&smmu_domain->cb_lock, flags);
|
||||
list_splice_init(&nonsecure_pool, &smmu_domain->nonsecure_pool);
|
||||
ret = ops->map(ops, iova, paddr, size, prot);
|
||||
list_splice_init(&smmu_domain->nonsecure_pool, &nonsecure_pool);
|
||||
spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
|
||||
|
||||
arm_smmu_assign_table(smmu_domain);
|
||||
arm_smmu_secure_domain_unlock(smmu_domain);
|
||||
|
||||
arm_smmu_release_prealloc_memory(smmu_domain, &nonsecure_pool);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -2595,6 +2696,7 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||
unsigned int idx_start, idx_end;
|
||||
struct scatterlist *sg_start, *sg_end;
|
||||
unsigned long __saved_iova_start;
|
||||
LIST_HEAD(nonsecure_pool);
|
||||
|
||||
if (!ops)
|
||||
return -ENODEV;
|
||||
@@ -2602,9 +2704,8 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||
if (arm_smmu_is_slave_side_secure(smmu_domain))
|
||||
return msm_secure_smmu_map_sg(domain, iova, sg, nents, prot);
|
||||
|
||||
ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
|
||||
if (ret)
|
||||
return ret;
|
||||
arm_smmu_prealloc_memory_sg(smmu_domain, sg, nents, &nonsecure_pool);
|
||||
arm_smmu_secure_domain_lock(smmu_domain);
|
||||
|
||||
__saved_iova_start = iova;
|
||||
idx_start = idx_end = 0;
|
||||
@@ -2622,9 +2723,12 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&smmu_domain->cb_lock, flags);
|
||||
list_splice_init(&nonsecure_pool, &smmu_domain->nonsecure_pool);
|
||||
ret = ops->map_sg(ops, iova, sg_start, idx_end - idx_start,
|
||||
prot, &size);
|
||||
list_splice_init(&smmu_domain->nonsecure_pool, &nonsecure_pool);
|
||||
spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
|
||||
|
||||
/* Returns 0 on error */
|
||||
if (!ret) {
|
||||
size_to_unmap = iova + size - __saved_iova_start;
|
||||
@@ -2643,7 +2747,8 @@ static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||
arm_smmu_unmap(domain, __saved_iova_start, size_to_unmap);
|
||||
iova = __saved_iova_start;
|
||||
}
|
||||
arm_smmu_domain_power_off(domain, smmu_domain->smmu);
|
||||
arm_smmu_secure_domain_unlock(smmu_domain);
|
||||
arm_smmu_release_prealloc_memory(smmu_domain, &nonsecure_pool);
|
||||
return iova - __saved_iova_start;
|
||||
}
|
||||
|
||||
@@ -3094,6 +3199,11 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
|
||||
& (1 << DOMAIN_ATTR_CB_STALL_DISABLE));
|
||||
ret = 0;
|
||||
break;
|
||||
case DOMAIN_ATTR_NO_CFRE:
|
||||
*((int *)data) = !!(smmu_domain->attributes
|
||||
& (1 << DOMAIN_ATTR_NO_CFRE));
|
||||
ret = 0;
|
||||
break;
|
||||
case DOMAIN_ATTR_QCOM_MMU500_ERRATA_MIN_IOVA_ALIGN:
|
||||
*((int *)data) = smmu_domain->qsmmuv500_errata1_min_iova_align;
|
||||
ret = 0;
|
||||
@@ -3238,16 +3348,6 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
|
||||
ret = 0;
|
||||
break;
|
||||
case DOMAIN_ATTR_USE_UPSTREAM_HINT:
|
||||
/* can't be changed while attached */
|
||||
if (smmu_domain->smmu != NULL) {
|
||||
ret = -EBUSY;
|
||||
break;
|
||||
}
|
||||
if (*((int *)data))
|
||||
smmu_domain->attributes |=
|
||||
1 << DOMAIN_ATTR_USE_UPSTREAM_HINT;
|
||||
ret = 0;
|
||||
break;
|
||||
case DOMAIN_ATTR_USE_LLC_NWA:
|
||||
/* can't be changed while attached */
|
||||
if (smmu_domain->smmu != NULL) {
|
||||
@@ -3256,7 +3356,7 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
|
||||
}
|
||||
if (*((int *)data))
|
||||
smmu_domain->attributes |=
|
||||
1 << DOMAIN_ATTR_USE_LLC_NWA;
|
||||
1 << attr;
|
||||
ret = 0;
|
||||
break;
|
||||
case DOMAIN_ATTR_EARLY_MAP: {
|
||||
@@ -3278,9 +3378,11 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
|
||||
break;
|
||||
}
|
||||
case DOMAIN_ATTR_BITMAP_IOVA_ALLOCATOR:
|
||||
case DOMAIN_ATTR_CB_STALL_DISABLE:
|
||||
case DOMAIN_ATTR_NO_CFRE:
|
||||
if (*((int *)data))
|
||||
smmu_domain->attributes |=
|
||||
1 << DOMAIN_ATTR_BITMAP_IOVA_ALLOCATOR;
|
||||
1 << attr;
|
||||
ret = 0;
|
||||
break;
|
||||
case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT: {
|
||||
@@ -3303,13 +3405,6 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
case DOMAIN_ATTR_CB_STALL_DISABLE:
|
||||
if (*((int *)data))
|
||||
smmu_domain->attributes |=
|
||||
1 << DOMAIN_ATTR_CB_STALL_DISABLE;
|
||||
ret = 0;
|
||||
break;
|
||||
default:
|
||||
ret = -ENODEV;
|
||||
}
|
||||
@@ -3723,7 +3818,7 @@ static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
|
||||
for (i = 0; i < smmu->num_context_banks; ++i) {
|
||||
cb_base = ARM_SMMU_CB(smmu, i);
|
||||
|
||||
writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
|
||||
arm_smmu_write_context_bank(smmu, i, 0);
|
||||
writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
|
||||
/*
|
||||
* Disable MMU-500's not-particularly-beneficial next-page
|
||||
@@ -4680,8 +4775,15 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
|
||||
static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
|
||||
{
|
||||
struct arm_smmu_device *smmu = dev_get_drvdata(dev);
|
||||
int ret;
|
||||
|
||||
ret = arm_smmu_power_on(smmu->pwr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
arm_smmu_device_reset(smmu);
|
||||
arm_smmu_power_off(smmu->pwr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -4692,6 +4794,7 @@ static struct platform_driver arm_smmu_driver = {
|
||||
.name = "arm-smmu",
|
||||
.of_match_table = of_match_ptr(arm_smmu_of_match),
|
||||
.pm = &arm_smmu_pm_ops,
|
||||
.suppress_bind_attrs = true,
|
||||
},
|
||||
.probe = arm_smmu_device_dt_probe,
|
||||
.remove = arm_smmu_device_remove,
|
||||
@@ -4702,10 +4805,12 @@ static int __init arm_smmu_init(void)
|
||||
{
|
||||
static bool registered;
|
||||
int ret = 0;
|
||||
ktime_t cur;
|
||||
|
||||
if (registered)
|
||||
return 0;
|
||||
|
||||
cur = ktime_get();
|
||||
ret = platform_driver_register(&qsmmuv500_tbu_driver);
|
||||
if (ret)
|
||||
return ret;
|
||||
@@ -4715,6 +4820,8 @@ static int __init arm_smmu_init(void)
|
||||
ret = register_iommu_sec_ptbl();
|
||||
#endif
|
||||
registered = !ret;
|
||||
trace_smmu_init(ktime_us_delta(ktime_get(), cur));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -4820,6 +4927,12 @@ static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu,
|
||||
u32 halt, fsr, sctlr_orig, sctlr, status;
|
||||
void __iomem *base, *cb_base;
|
||||
|
||||
if (of_property_read_bool(tbu->dev->of_node,
|
||||
"qcom,opt-out-tbu-halting")) {
|
||||
dev_notice(tbu->dev, "TBU opted-out for halting!\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&tbu->halt_lock, flags);
|
||||
if (tbu->halt_count) {
|
||||
tbu->halt_count++;
|
||||
@@ -5040,8 +5153,8 @@ static phys_addr_t qsmmuv500_iova_to_phys(
|
||||
val = readq_relaxed(tbu->base + DEBUG_PAR_REG);
|
||||
fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
|
||||
if (fsr & FSR_FAULT) {
|
||||
dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx\n",
|
||||
fsr);
|
||||
dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx, SID=0x%x\n",
|
||||
fsr, sid);
|
||||
|
||||
/* Clear pending interrupts */
|
||||
writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
|
||||
@@ -5184,8 +5297,9 @@ static void qsmmuv500_init_cb(struct arm_smmu_domain *smmu_domain,
|
||||
* Prefetch only works properly if the start and end of all
|
||||
* buffers in the page table are aligned to ARM_SMMU_MIN_IOVA_ALIGN.
|
||||
*/
|
||||
if ((iommudata->actlr >> QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT) &
|
||||
QSMMUV500_ACTLR_DEEP_PREFETCH_MASK)
|
||||
if (((iommudata->actlr >> QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT) &
|
||||
QSMMUV500_ACTLR_DEEP_PREFETCH_MASK) &&
|
||||
(smmu->options & ARM_SMMU_OPT_MIN_IOVA_ALIGN))
|
||||
smmu_domain->qsmmuv500_errata1_min_iova_align = true;
|
||||
|
||||
/*
|
||||
@@ -5275,6 +5389,9 @@ static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
|
||||
data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
|
||||
smmu->archdata = data;
|
||||
|
||||
if (arm_smmu_is_static_cb(smmu))
|
||||
return 0;
|
||||
|
||||
ret = qsmmuv500_read_actlr_tbl(smmu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -336,6 +336,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
||||
return 0;
|
||||
}
|
||||
|
||||
iovad->end_pfn = end_pfn;
|
||||
init_iova_domain(iovad, 1UL << order, base_pfn);
|
||||
if (!dev)
|
||||
return 0;
|
||||
@@ -425,7 +426,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
|
||||
* rb_tree.
|
||||
*/
|
||||
limit = min_t(dma_addr_t, DMA_BIT_MASK(32) >> shift,
|
||||
iovad->dma_32bit_pfn);
|
||||
iovad->end_pfn);
|
||||
|
||||
/* Try to get PCI devices a SAC address */
|
||||
if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
|
||||
@@ -433,7 +434,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
|
||||
|
||||
if (!iova) {
|
||||
limit = min_t(dma_addr_t, dma_limit >> shift,
|
||||
iovad->dma_32bit_pfn);
|
||||
iovad->end_pfn);
|
||||
|
||||
iova = alloc_iova_fast(iovad, iova_len, limit, true);
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/pci.h>
|
||||
#include <trace/events/iommu.h>
|
||||
|
||||
#include <soc/qcom/secure_buffer.h>
|
||||
#include <linux/arm-smmu-errata.h>
|
||||
@@ -398,6 +399,8 @@ static dma_addr_t fast_smmu_map_page(struct device *dev, struct page *page,
|
||||
fast_dmac_clean_range(mapping, pmd, pmd + nptes);
|
||||
|
||||
spin_unlock_irqrestore(&mapping->lock, flags);
|
||||
|
||||
trace_map(mapping->domain, iova, phys_to_map, len, prot);
|
||||
return iova + offset_from_phys_to_map;
|
||||
|
||||
fail_free_iova:
|
||||
@@ -429,6 +432,8 @@ static void fast_smmu_unmap_page(struct device *dev, dma_addr_t iova,
|
||||
fast_dmac_clean_range(mapping, pmd, pmd + nptes);
|
||||
__fast_smmu_free_iova(mapping, iova - offset, len);
|
||||
spin_unlock_irqrestore(&mapping->lock, flags);
|
||||
|
||||
trace_unmap(mapping->domain, iova - offset, len, len);
|
||||
}
|
||||
|
||||
static void fast_smmu_sync_single_for_cpu(struct device *dev,
|
||||
@@ -459,7 +464,8 @@ static int fast_smmu_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return -EINVAL;
|
||||
/* 0 indicates error */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fast_smmu_unmap_sg(struct device *dev,
|
||||
@@ -914,7 +920,7 @@ static int fast_smmu_errata_init(struct dma_iommu_mapping *mapping)
|
||||
int fast_smmu_init_mapping(struct device *dev,
|
||||
struct dma_iommu_mapping *mapping)
|
||||
{
|
||||
int err;
|
||||
int err = 0;
|
||||
struct iommu_domain *domain = mapping->domain;
|
||||
struct iommu_pgtbl_info info;
|
||||
u64 size = (u64)mapping->bits << PAGE_SHIFT;
|
||||
|
||||
@@ -213,6 +213,7 @@ struct arm_lpae_io_pgtable {
|
||||
unsigned long bits_per_level;
|
||||
|
||||
void *pgd;
|
||||
void *pgd_ttbr1;
|
||||
};
|
||||
|
||||
typedef u64 arm_lpae_iopte;
|
||||
@@ -406,21 +407,6 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
|
||||
BUG_ON(!suppress_map_failures);
|
||||
return -EEXIST;
|
||||
}
|
||||
if (iopte_leaf(pte, lvl)) {
|
||||
WARN_ON(!selftest_running);
|
||||
return -EEXIST;
|
||||
} else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
|
||||
/*
|
||||
* We need to unmap and free the old table before
|
||||
* overwriting it with a block entry.
|
||||
*/
|
||||
arm_lpae_iopte *tblp;
|
||||
size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
|
||||
|
||||
tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
|
||||
if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
__arm_lpae_init_pte(data, paddr, prot, lvl, ptep, flush);
|
||||
|
||||
@@ -754,6 +740,8 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop)
|
||||
struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
|
||||
|
||||
__arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
|
||||
__arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data),
|
||||
data->pgd_ttbr1);
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
@@ -1219,14 +1207,22 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
|
||||
if (!data->pgd)
|
||||
goto out_free_data;
|
||||
|
||||
data->pgd_ttbr1 = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL,
|
||||
cfg, cookie);
|
||||
if (!data->pgd_ttbr1)
|
||||
goto out_free_pgd;
|
||||
|
||||
/* Ensure the empty pgd is visible before any actual TTBR write */
|
||||
wmb();
|
||||
|
||||
/* TTBRs */
|
||||
cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
|
||||
cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
|
||||
cfg->arm_lpae_s1_cfg.ttbr[1] = virt_to_phys(data->pgd_ttbr1);
|
||||
return &data->iop;
|
||||
|
||||
out_free_pgd:
|
||||
__arm_lpae_free_pages(data->pgd, data->pgd_size, cfg, cookie);
|
||||
|
||||
out_free_data:
|
||||
kfree(data);
|
||||
return NULL;
|
||||
|
||||
353
drivers/iommu/io-pgtable-msm-secure.c
Normal file
353
drivers/iommu/io-pgtable-msm-secure.c
Normal file
@@ -0,0 +1,353 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "io-pgtable-msm-secure: " fmt
|
||||
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
#include <soc/qcom/scm.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#include "io-pgtable.h"
|
||||
|
||||
#define IOMMU_SECURE_PTBL_SIZE 3
|
||||
#define IOMMU_SECURE_PTBL_INIT 4
|
||||
#define IOMMU_SECURE_MAP2_FLAT 0x12
|
||||
#define IOMMU_SECURE_UNMAP2_FLAT 0x13
|
||||
#define IOMMU_TLBINVAL_FLAG 0x00000001
|
||||
|
||||
#define io_pgtable_to_data(x) \
|
||||
container_of((x), struct msm_secure_io_pgtable, iop)
|
||||
|
||||
#define io_pgtable_ops_to_pgtable(x) \
|
||||
container_of((x), struct io_pgtable, ops)
|
||||
|
||||
#define io_pgtable_ops_to_data(x) \
|
||||
io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
|
||||
|
||||
struct msm_secure_io_pgtable {
|
||||
struct io_pgtable iop;
|
||||
/* lock required while operating on page tables */
|
||||
struct mutex pgtbl_lock;
|
||||
};
|
||||
|
||||
int msm_iommu_sec_pgtbl_init(void)
|
||||
{
|
||||
int psize[2] = {0, 0};
|
||||
unsigned int spare = 0;
|
||||
int ret, ptbl_ret = 0;
|
||||
struct device dev = {0};
|
||||
void *cpu_addr;
|
||||
dma_addr_t paddr;
|
||||
unsigned long attrs = 0;
|
||||
|
||||
if (is_scm_armv8()) {
|
||||
struct scm_desc desc = {0};
|
||||
|
||||
desc.args[0] = spare;
|
||||
desc.arginfo = SCM_ARGS(1);
|
||||
ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
|
||||
IOMMU_SECURE_PTBL_SIZE), &desc);
|
||||
psize[0] = desc.ret[0];
|
||||
psize[1] = desc.ret[1];
|
||||
if (ret || psize[1]) {
|
||||
pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* Now allocate memory for the secure page tables */
|
||||
attrs = DMA_ATTR_NO_KERNEL_MAPPING;
|
||||
dev.coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
|
||||
arch_setup_dma_ops(&dev, 0, 0, NULL, 1);
|
||||
cpu_addr = dma_alloc_attrs(&dev, psize[0], &paddr, GFP_KERNEL, attrs);
|
||||
if (!cpu_addr) {
|
||||
pr_err("%s: Failed to allocate %d bytes for PTBL\n",
|
||||
__func__, psize[0]);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (is_scm_armv8()) {
|
||||
struct scm_desc desc = {0};
|
||||
|
||||
desc.args[0] = paddr;
|
||||
desc.args[1] = psize[0];
|
||||
desc.args[2] = 0;
|
||||
desc.arginfo = SCM_ARGS(3, SCM_RW, SCM_VAL, SCM_VAL);
|
||||
|
||||
ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
|
||||
IOMMU_SECURE_PTBL_INIT), &desc);
|
||||
ptbl_ret = desc.ret[0];
|
||||
|
||||
if (ret) {
|
||||
pr_err("scm call IOMMU_SECURE_PTBL_INIT failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (ptbl_ret) {
|
||||
pr_err("scm call IOMMU_SECURE_PTBL_INIT extended ret fail\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(msm_iommu_sec_pgtbl_init);
|
||||
|
||||
static int msm_secure_map(struct io_pgtable_ops *ops, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int iommu_prot)
|
||||
{
|
||||
struct msm_secure_io_pgtable *data = io_pgtable_ops_to_data(ops);
|
||||
struct io_pgtable_cfg *cfg = &data->iop.cfg;
|
||||
void *flush_va, *flush_va_end;
|
||||
struct scm_desc desc = {0};
|
||||
int ret = -EINVAL;
|
||||
u32 resp;
|
||||
|
||||
if (!IS_ALIGNED(iova, SZ_1M) || !IS_ALIGNED(paddr, SZ_1M) ||
|
||||
!IS_ALIGNED(size, SZ_1M))
|
||||
return -EINVAL;
|
||||
|
||||
desc.args[0] = virt_to_phys(&paddr);
|
||||
desc.args[1] = 1;
|
||||
desc.args[2] = size;
|
||||
desc.args[3] = cfg->arm_msm_secure_cfg.sec_id;
|
||||
desc.args[4] = cfg->arm_msm_secure_cfg.cbndx;
|
||||
desc.args[5] = iova;
|
||||
desc.args[6] = size;
|
||||
desc.args[7] = 0;
|
||||
|
||||
flush_va = &paddr;
|
||||
flush_va_end = (void *)
|
||||
(((unsigned long) flush_va) + sizeof(phys_addr_t));
|
||||
|
||||
mutex_lock(&data->pgtbl_lock);
|
||||
/*
|
||||
* Ensure that the buffer is in RAM by the time it gets to TZ
|
||||
*/
|
||||
dmac_clean_range(flush_va, flush_va_end);
|
||||
|
||||
desc.arginfo = SCM_ARGS(8, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL,
|
||||
SCM_VAL, SCM_VAL, SCM_VAL);
|
||||
|
||||
if (is_scm_armv8()) {
|
||||
ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
|
||||
IOMMU_SECURE_MAP2_FLAT), &desc);
|
||||
resp = desc.ret[0];
|
||||
}
|
||||
mutex_unlock(&data->pgtbl_lock);
|
||||
|
||||
if (ret || resp)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static dma_addr_t msm_secure_get_phys_addr(struct scatterlist *sg)
|
||||
{
|
||||
/*
|
||||
* Try sg_dma_address first so that we can
|
||||
* map carveout regions that do not have a
|
||||
* struct page associated with them.
|
||||
*/
|
||||
dma_addr_t pa = sg_dma_address(sg);
|
||||
|
||||
if (pa == 0)
|
||||
pa = sg_phys(sg);
|
||||
return pa;
|
||||
}
|
||||
|
||||
static int msm_secure_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
|
||||
struct scatterlist *sg, unsigned int nents,
|
||||
int iommu_prot, size_t *size)
|
||||
{
|
||||
struct msm_secure_io_pgtable *data = io_pgtable_ops_to_data(ops);
|
||||
struct io_pgtable_cfg *cfg = &data->iop.cfg;
|
||||
int ret = -EINVAL;
|
||||
struct scatterlist *tmp, *sgiter;
|
||||
dma_addr_t *pa_list = 0;
|
||||
unsigned int cnt, offset = 0, chunk_offset = 0;
|
||||
dma_addr_t pa;
|
||||
void *flush_va, *flush_va_end;
|
||||
unsigned long len = 0;
|
||||
struct scm_desc desc = {0};
|
||||
int i;
|
||||
u32 resp;
|
||||
|
||||
for_each_sg(sg, tmp, nents, i)
|
||||
len += tmp->length;
|
||||
|
||||
if (!IS_ALIGNED(iova, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
|
||||
return -EINVAL;
|
||||
|
||||
if (sg->length == len) {
|
||||
cnt = 1;
|
||||
pa = msm_secure_get_phys_addr(sg);
|
||||
if (!IS_ALIGNED(pa, SZ_1M))
|
||||
return -EINVAL;
|
||||
|
||||
desc.args[0] = virt_to_phys(&pa);
|
||||
desc.args[1] = cnt;
|
||||
desc.args[2] = len;
|
||||
flush_va = &pa;
|
||||
} else {
|
||||
sgiter = sg;
|
||||
if (!IS_ALIGNED(sgiter->length, SZ_1M))
|
||||
return -EINVAL;
|
||||
cnt = sg->length / SZ_1M;
|
||||
while ((sgiter = sg_next(sgiter))) {
|
||||
if (!IS_ALIGNED(sgiter->length, SZ_1M))
|
||||
return -EINVAL;
|
||||
cnt += sgiter->length / SZ_1M;
|
||||
}
|
||||
|
||||
pa_list = kmalloc_array(cnt, sizeof(*pa_list), GFP_KERNEL);
|
||||
if (!pa_list)
|
||||
return -ENOMEM;
|
||||
|
||||
sgiter = sg;
|
||||
cnt = 0;
|
||||
pa = msm_secure_get_phys_addr(sgiter);
|
||||
while (offset < len) {
|
||||
|
||||
if (!IS_ALIGNED(pa, SZ_1M)) {
|
||||
kfree(pa_list);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pa_list[cnt] = pa + chunk_offset;
|
||||
chunk_offset += SZ_1M;
|
||||
offset += SZ_1M;
|
||||
cnt++;
|
||||
|
||||
if (chunk_offset >= sgiter->length && offset < len) {
|
||||
chunk_offset = 0;
|
||||
sgiter = sg_next(sgiter);
|
||||
pa = msm_secure_get_phys_addr(sgiter);
|
||||
}
|
||||
}
|
||||
|
||||
desc.args[0] = virt_to_phys(pa_list);
|
||||
desc.args[1] = cnt;
|
||||
desc.args[2] = SZ_1M;
|
||||
flush_va = pa_list;
|
||||
}
|
||||
|
||||
desc.args[3] = cfg->arm_msm_secure_cfg.sec_id;
|
||||
desc.args[4] = cfg->arm_msm_secure_cfg.cbndx;
|
||||
desc.args[5] = iova;
|
||||
desc.args[6] = len;
|
||||
desc.args[7] = 0;
|
||||
|
||||
desc.arginfo = SCM_ARGS(8, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL,
|
||||
SCM_VAL, SCM_VAL, SCM_VAL);
|
||||
|
||||
/*
|
||||
* Ensure that the buffer is in RAM by the time it gets to TZ
|
||||
*/
|
||||
|
||||
flush_va_end = (void *) (((unsigned long) flush_va) +
|
||||
(cnt * sizeof(*pa_list)));
|
||||
|
||||
mutex_lock(&data->pgtbl_lock);
|
||||
dmac_clean_range(flush_va, flush_va_end);
|
||||
|
||||
if (is_scm_armv8()) {
|
||||
ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
|
||||
IOMMU_SECURE_MAP2_FLAT), &desc);
|
||||
resp = desc.ret[0];
|
||||
|
||||
if (ret || resp)
|
||||
ret = -EINVAL;
|
||||
else
|
||||
ret = len;
|
||||
}
|
||||
mutex_unlock(&data->pgtbl_lock);
|
||||
|
||||
kfree(pa_list);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static size_t msm_secure_unmap(struct io_pgtable_ops *ops, unsigned long iova,
|
||||
size_t len)
|
||||
{
|
||||
struct msm_secure_io_pgtable *data = io_pgtable_ops_to_data(ops);
|
||||
struct io_pgtable_cfg *cfg = &data->iop.cfg;
|
||||
int ret = -EINVAL;
|
||||
struct scm_desc desc = {0};
|
||||
|
||||
if (!IS_ALIGNED(iova, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
|
||||
return ret;
|
||||
|
||||
desc.args[0] = cfg->arm_msm_secure_cfg.sec_id;
|
||||
desc.args[1] = cfg->arm_msm_secure_cfg.cbndx;
|
||||
desc.args[2] = iova;
|
||||
desc.args[3] = len;
|
||||
desc.args[4] = IOMMU_TLBINVAL_FLAG;
|
||||
desc.arginfo = SCM_ARGS(5);
|
||||
|
||||
mutex_lock(&data->pgtbl_lock);
|
||||
if (is_scm_armv8()) {
|
||||
ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
|
||||
IOMMU_SECURE_UNMAP2_FLAT), &desc);
|
||||
|
||||
if (!ret)
|
||||
ret = len;
|
||||
}
|
||||
mutex_unlock(&data->pgtbl_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static phys_addr_t msm_secure_iova_to_phys(struct io_pgtable_ops *ops,
|
||||
unsigned long iova)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct msm_secure_io_pgtable *
|
||||
msm_secure_alloc_pgtable_data(struct io_pgtable_cfg *cfg)
|
||||
{
|
||||
struct msm_secure_io_pgtable *data;
|
||||
|
||||
data = kmalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return NULL;
|
||||
|
||||
data->iop.ops = (struct io_pgtable_ops) {
|
||||
.map = msm_secure_map,
|
||||
.map_sg = msm_secure_map_sg,
|
||||
.unmap = msm_secure_unmap,
|
||||
.iova_to_phys = msm_secure_iova_to_phys,
|
||||
};
|
||||
mutex_init(&data->pgtbl_lock);
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
static struct io_pgtable *
|
||||
msm_secure_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
|
||||
{
|
||||
struct msm_secure_io_pgtable *data =
|
||||
msm_secure_alloc_pgtable_data(cfg);
|
||||
|
||||
return &data->iop;
|
||||
}
|
||||
|
||||
static void msm_secure_free_pgtable(struct io_pgtable *iop)
|
||||
{
|
||||
struct msm_secure_io_pgtable *data = io_pgtable_to_data(iop);
|
||||
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
struct io_pgtable_init_fns io_pgtable_arm_msm_secure_init_fns = {
|
||||
.alloc = msm_secure_alloc_pgtable,
|
||||
.free = msm_secure_free_pgtable,
|
||||
};
|
||||
@@ -29,7 +29,7 @@
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/dma-iommu.h>
|
||||
|
||||
#if defined(CONFIG_IOMMU_DEBUG_TRACKING) || defined(CONFIG_IOMMU_TESTS)
|
||||
#if defined(CONFIG_IOMMU_TESTS)
|
||||
|
||||
static const char *iommu_debug_attr_to_string(enum iommu_attr attr)
|
||||
{
|
||||
@@ -103,20 +103,25 @@ void iommu_debug_attach_device(struct iommu_domain *domain,
|
||||
struct iommu_debug_attachment *attach;
|
||||
struct iommu_group *group;
|
||||
|
||||
group = iommu_group_get(dev);
|
||||
group = dev->iommu_group;
|
||||
if (!group)
|
||||
return;
|
||||
|
||||
mutex_lock(&iommu_debug_attachments_lock);
|
||||
list_for_each_entry(attach, &iommu_debug_attachments, list)
|
||||
if ((attach->domain == domain) && (attach->group == group))
|
||||
goto out;
|
||||
|
||||
attach = kzalloc(sizeof(*attach), GFP_KERNEL);
|
||||
if (!attach)
|
||||
return;
|
||||
goto out;
|
||||
|
||||
attach->domain = domain;
|
||||
attach->group = group;
|
||||
INIT_LIST_HEAD(&attach->list);
|
||||
|
||||
mutex_lock(&iommu_debug_attachments_lock);
|
||||
list_add(&attach->list, &iommu_debug_attachments);
|
||||
out:
|
||||
mutex_unlock(&iommu_debug_attachments_lock);
|
||||
}
|
||||
|
||||
@@ -129,7 +134,6 @@ void iommu_debug_domain_remove(struct iommu_domain *domain)
|
||||
if (it->domain != domain)
|
||||
continue;
|
||||
list_del(&it->list);
|
||||
iommu_group_put(it->group);
|
||||
kfree(it);
|
||||
}
|
||||
|
||||
@@ -167,6 +171,8 @@ struct iommu_debug_device {
|
||||
u64 phys;
|
||||
size_t len;
|
||||
struct list_head list;
|
||||
struct mutex clk_lock;
|
||||
unsigned int clk_count;
|
||||
};
|
||||
|
||||
static int iommu_debug_build_phoney_sg_table(struct device *dev,
|
||||
@@ -1275,7 +1281,7 @@ static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev,
|
||||
}
|
||||
|
||||
ddev->domain->is_debug_domain = true;
|
||||
|
||||
val = VMID_CP_CAMERA;
|
||||
if (is_secure && iommu_domain_set_attr(ddev->domain,
|
||||
DOMAIN_ATTR_SECURE_VMID,
|
||||
&val)) {
|
||||
@@ -1568,6 +1574,10 @@ static ssize_t iommu_debug_pte_read(struct file *file, char __user *ubuf,
|
||||
ssize_t retval;
|
||||
size_t buflen;
|
||||
|
||||
if (kptr_restrict != 0) {
|
||||
pr_err("kptr_restrict needs to be disabled.\n");
|
||||
return -EPERM;
|
||||
}
|
||||
if (!dev->archdata.mapping) {
|
||||
pr_err("No mapping. Did you already attach?\n");
|
||||
return -EINVAL;
|
||||
@@ -1635,6 +1645,10 @@ static ssize_t iommu_debug_atos_read(struct file *file, char __user *ubuf,
|
||||
ssize_t retval;
|
||||
size_t buflen;
|
||||
|
||||
if (kptr_restrict != 0) {
|
||||
pr_err("kptr_restrict needs to be disabled.\n");
|
||||
return -EPERM;
|
||||
}
|
||||
if (!ddev->domain) {
|
||||
pr_err("No domain. Did you already attach?\n");
|
||||
return -EINVAL;
|
||||
@@ -1683,6 +1697,10 @@ static ssize_t iommu_debug_dma_atos_read(struct file *file, char __user *ubuf,
|
||||
ssize_t retval;
|
||||
size_t buflen;
|
||||
|
||||
if (kptr_restrict != 0) {
|
||||
pr_err("kptr_restrict needs to be disabled.\n");
|
||||
return -EPERM;
|
||||
}
|
||||
if (!dev->archdata.mapping) {
|
||||
pr_err("No mapping. Did you already attach?\n");
|
||||
return -EINVAL;
|
||||
@@ -2129,20 +2147,34 @@ static ssize_t iommu_debug_config_clocks_write(struct file *file,
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
mutex_lock(&ddev->clk_lock);
|
||||
switch (buf) {
|
||||
case '0':
|
||||
if (ddev->clk_count == 0) {
|
||||
dev_err(dev, "Config clocks already disabled\n");
|
||||
break;
|
||||
}
|
||||
|
||||
if (--ddev->clk_count > 0)
|
||||
break;
|
||||
|
||||
dev_err(dev, "Disabling config clocks\n");
|
||||
iommu_disable_config_clocks(ddev->domain);
|
||||
break;
|
||||
case '1':
|
||||
if (ddev->clk_count++ > 0)
|
||||
break;
|
||||
|
||||
dev_err(dev, "Enabling config clocks\n");
|
||||
if (iommu_enable_config_clocks(ddev->domain))
|
||||
dev_err(dev, "Failed!\n");
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "Invalid value. Should be 0 or 1.\n");
|
||||
mutex_unlock(&ddev->clk_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
mutex_unlock(&ddev->clk_lock);
|
||||
|
||||
return count;
|
||||
}
|
||||
@@ -2192,6 +2224,9 @@ static int snarf_iommu_devices(struct device *dev, void *ignored)
|
||||
if (!of_find_property(dev->of_node, "iommus", NULL))
|
||||
return 0;
|
||||
|
||||
if (!of_device_is_compatible(dev->of_node, "iommu-debug-test"))
|
||||
return 0;
|
||||
|
||||
/* Hold a reference count */
|
||||
if (!iommu_group_get(dev))
|
||||
return 0;
|
||||
@@ -2200,6 +2235,7 @@ static int snarf_iommu_devices(struct device *dev, void *ignored)
|
||||
if (!ddev)
|
||||
return -ENODEV;
|
||||
|
||||
mutex_init(&ddev->clk_lock);
|
||||
ddev->dev = dev;
|
||||
dir = debugfs_create_dir(dev_name(dev), debugfs_tests_dir);
|
||||
if (!dir) {
|
||||
|
||||
@@ -31,6 +31,7 @@ void iommu_debugfs_setup(void)
|
||||
{
|
||||
if (!iommu_debugfs_dir) {
|
||||
iommu_debugfs_dir = debugfs_create_dir("iommu", NULL);
|
||||
iommu_debugfs_top = iommu_debugfs_dir;
|
||||
pr_warn("\n");
|
||||
pr_warn("*************************************************************\n");
|
||||
pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
|
||||
|
||||
@@ -635,6 +635,7 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev)
|
||||
if (ret)
|
||||
goto err_put_group;
|
||||
|
||||
|
||||
/* Notify any listeners about change to group. */
|
||||
blocking_notifier_call_chain(&group->notifier,
|
||||
IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
|
||||
@@ -1304,6 +1305,7 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
|
||||
/* Assume all sizes by default; the driver may override this later */
|
||||
domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
|
||||
domain->is_debug_domain = false;
|
||||
memset(domain->name, 0, IOMMU_DOMAIN_NAME_LEN);
|
||||
|
||||
return domain;
|
||||
}
|
||||
@@ -1336,6 +1338,11 @@ static int __iommu_attach_device(struct iommu_domain *domain,
|
||||
if (!ret) {
|
||||
trace_attach_device_to_domain(dev);
|
||||
iommu_debug_attach_device(domain, dev);
|
||||
|
||||
if (!strnlen(domain->name, IOMMU_DOMAIN_NAME_LEN)) {
|
||||
strlcpy(domain->name, dev_name(dev),
|
||||
IOMMU_DOMAIN_NAME_LEN);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@@ -1630,7 +1637,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
if (ret)
|
||||
iommu_unmap(domain, orig_iova, orig_size - size);
|
||||
else
|
||||
trace_map(orig_iova, orig_paddr, orig_size);
|
||||
trace_map(domain, orig_iova, orig_paddr, orig_size, prot);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1692,7 +1699,7 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
|
||||
if (sync && ops->iotlb_sync)
|
||||
ops->iotlb_sync(domain);
|
||||
|
||||
trace_unmap(orig_iova, size, unmapped);
|
||||
trace_unmap(domain, orig_iova, size, unmapped);
|
||||
return unmapped;
|
||||
}
|
||||
|
||||
@@ -1710,6 +1717,18 @@ size_t iommu_unmap_fast(struct iommu_domain *domain,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_unmap_fast);
|
||||
|
||||
size_t iommu_map_sg(struct iommu_domain *domain,
|
||||
unsigned long iova, struct scatterlist *sg,
|
||||
unsigned int nents, int prot)
|
||||
{
|
||||
size_t mapped;
|
||||
|
||||
mapped = domain->ops->map_sg(domain, iova, sg, nents, prot);
|
||||
trace_map_sg(domain, iova, mapped, prot);
|
||||
return mapped;
|
||||
}
|
||||
EXPORT_SYMBOL(iommu_map_sg);
|
||||
|
||||
size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||
struct scatterlist *sg, unsigned int nents, int prot)
|
||||
{
|
||||
|
||||
@@ -191,7 +191,7 @@ static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
mutex_lock(&iommu_meta->lock);
|
||||
iommu_map = msm_iommu_lookup(iommu_meta, dev);
|
||||
if (!iommu_map) {
|
||||
iommu_map = kmalloc(sizeof(*iommu_map), GFP_ATOMIC);
|
||||
iommu_map = kmalloc(sizeof(*iommu_map), GFP_KERNEL);
|
||||
|
||||
if (!iommu_map) {
|
||||
ret = -ENOMEM;
|
||||
|
||||
123
drivers/soc/qcom/msm_tz_smmu.c
Normal file
123
drivers/soc/qcom/msm_tz_smmu.c
Normal file
@@ -0,0 +1,123 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/of.h>
|
||||
#include <soc/qcom/scm.h>
|
||||
#include <soc/qcom/msm_tz_smmu.h>
|
||||
|
||||
static const char * const device_id_mappings[] = {
|
||||
[TZ_DEVICE_VIDEO] = "VIDEO",
|
||||
[TZ_DEVICE_MDSS] = "MDSS",
|
||||
[TZ_DEVICE_LPASS] = "LPASS",
|
||||
[TZ_DEVICE_MDSS_BOOT] = "MDSS_BOOT",
|
||||
[TZ_DEVICE_USB1_HS] = "USB1_HS",
|
||||
[TZ_DEVICE_OCMEM] = "OCMEM",
|
||||
[TZ_DEVICE_LPASS_CORE] = "LPASS_CORE",
|
||||
[TZ_DEVICE_VPU] = "VPU",
|
||||
[TZ_DEVICE_COPSS_SMMU] = "COPSS_SMMU",
|
||||
[TZ_DEVICE_USB3_0] = "USB3_0",
|
||||
[TZ_DEVICE_USB3_1] = "USB3_1",
|
||||
[TZ_DEVICE_PCIE_0] = "PCIE_0",
|
||||
[TZ_DEVICE_PCIE_1] = "PCIE_1",
|
||||
[TZ_DEVICE_BCSS] = "BCSS",
|
||||
[TZ_DEVICE_VCAP] = "VCAP",
|
||||
[TZ_DEVICE_PCIE20] = "PCIE20",
|
||||
[TZ_DEVICE_IPA] = "IPA",
|
||||
[TZ_DEVICE_APPS] = "APPS",
|
||||
[TZ_DEVICE_GPU] = "GPU",
|
||||
[TZ_DEVICE_UFS] = "UFS",
|
||||
[TZ_DEVICE_ICE] = "ICE",
|
||||
[TZ_DEVICE_ROT] = "ROT",
|
||||
[TZ_DEVICE_VFE] = "VFE",
|
||||
[TZ_DEVICE_ANOC0] = "ANOC0",
|
||||
[TZ_DEVICE_ANOC1] = "ANOC1",
|
||||
[TZ_DEVICE_ANOC2] = "ANOC2",
|
||||
[TZ_DEVICE_CPP] = "CPP",
|
||||
[TZ_DEVICE_JPEG] = "JPEG",
|
||||
};
|
||||
|
||||
#define MAX_DEVICE_ID_NAME_LEN 20
|
||||
|
||||
#define TZ_SMMU_PREPARE_ATOS_ID 0x21
|
||||
#define TZ_SMMU_ATOS_START 1
|
||||
#define TZ_SMMU_ATOS_END 0
|
||||
|
||||
#define SMMU_CHANGE_PAGETABLE_FORMAT 0X01
|
||||
|
||||
enum tz_smmu_device_id msm_dev_to_device_id(struct device *dev)
|
||||
{
|
||||
const char *device_id;
|
||||
enum tz_smmu_device_id iter;
|
||||
|
||||
if (of_property_read_string(dev->of_node, "qcom,tz-device-id",
|
||||
&device_id)) {
|
||||
dev_err(dev, "no qcom,device-id property\n");
|
||||
return TZ_DEVICE_MAX;
|
||||
}
|
||||
|
||||
for (iter = TZ_DEVICE_START; iter < TZ_DEVICE_MAX; iter++)
|
||||
if (!strcmp(device_id_mappings[iter], device_id))
|
||||
return iter;
|
||||
|
||||
return TZ_DEVICE_MAX;
|
||||
}
|
||||
|
||||
static int __msm_tz_smmu_atos(struct device *dev, int cb_num, int operation)
|
||||
{
|
||||
int ret;
|
||||
struct scm_desc desc = {0};
|
||||
enum tz_smmu_device_id devid = msm_dev_to_device_id(dev);
|
||||
|
||||
if (devid == TZ_DEVICE_MAX)
|
||||
return -ENODEV;
|
||||
|
||||
desc.args[0] = devid;
|
||||
desc.args[1] = cb_num;
|
||||
desc.args[2] = operation;
|
||||
desc.arginfo = SCM_ARGS(3, SCM_VAL, SCM_VAL, SCM_VAL);
|
||||
|
||||
ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP, TZ_SMMU_PREPARE_ATOS_ID),
|
||||
&desc);
|
||||
if (ret)
|
||||
pr_info("%s: TZ SMMU ATOS %s failed, ret = %d\n",
|
||||
__func__,
|
||||
operation == TZ_SMMU_ATOS_START ? "start" : "end",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int msm_tz_smmu_atos_start(struct device *dev, int cb_num)
|
||||
{
|
||||
return __msm_tz_smmu_atos(dev, cb_num, TZ_SMMU_ATOS_START);
|
||||
}
|
||||
|
||||
int msm_tz_smmu_atos_end(struct device *dev, int cb_num)
|
||||
{
|
||||
return __msm_tz_smmu_atos(dev, cb_num, TZ_SMMU_ATOS_END);
|
||||
}
|
||||
|
||||
int msm_tz_set_cb_format(enum tz_smmu_device_id sec_id, int cbndx)
|
||||
{
|
||||
struct scm_desc desc = {0};
|
||||
int ret = 0;
|
||||
|
||||
desc.args[0] = sec_id;
|
||||
desc.args[1] = cbndx;
|
||||
desc.args[2] = 1; /* Enable */
|
||||
desc.arginfo = SCM_ARGS(3, SCM_VAL, SCM_VAL, SCM_VAL);
|
||||
|
||||
ret = scm_call2(SCM_SIP_FNID(SCM_SVC_SMMU_PROGRAM,
|
||||
SMMU_CHANGE_PAGETABLE_FORMAT), &desc);
|
||||
|
||||
if (ret) {
|
||||
WARN(1, "Format change failed for CB %d with ret %d\n",
|
||||
cbndx, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -47,7 +47,7 @@ struct dest_vm_and_perm_info {
|
||||
};
|
||||
|
||||
static void *qcom_secure_mem;
|
||||
#define QCOM_SECURE_MEM_SIZE (512*1024)
|
||||
#define QCOM_SECURE_MEM_SIZE (2048*1024)
|
||||
|
||||
static int secure_buffer_change_chunk(u32 chunks,
|
||||
u32 nchunks,
|
||||
|
||||
@@ -100,6 +100,8 @@ struct iommu_pgtbl_info {
|
||||
#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
|
||||
__IOMMU_DOMAIN_DMA_API)
|
||||
|
||||
|
||||
#define IOMMU_DOMAIN_NAME_LEN 32
|
||||
struct iommu_domain {
|
||||
unsigned type;
|
||||
const struct iommu_ops *ops;
|
||||
@@ -109,6 +111,7 @@ struct iommu_domain {
|
||||
struct iommu_domain_geometry geometry;
|
||||
void *iova_cookie;
|
||||
bool is_debug_domain;
|
||||
char name[IOMMU_DOMAIN_NAME_LEN];
|
||||
};
|
||||
|
||||
enum iommu_cap {
|
||||
@@ -129,6 +132,11 @@ enum iommu_cap {
|
||||
* DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints.
|
||||
* The caller can invoke iommu_domain_get_attr to check if the underlying
|
||||
* iommu implementation supports these constraints.
|
||||
*
|
||||
* DOMAIN_ATTR_NO_CFRE
|
||||
* Some bus implementations may enter a bad state if iommu reports an error
|
||||
* on context fault. As context faults are not always fatal, this must be
|
||||
* avoided.
|
||||
*/
|
||||
|
||||
enum iommu_attr {
|
||||
@@ -159,6 +167,7 @@ enum iommu_attr {
|
||||
DOMAIN_ATTR_BITMAP_IOVA_ALLOCATOR,
|
||||
DOMAIN_ATTR_QCOM_MMU500_ERRATA_MIN_IOVA_ALIGN,
|
||||
DOMAIN_ATTR_USE_LLC_NWA,
|
||||
DOMAIN_ATTR_NO_CFRE,
|
||||
DOMAIN_ATTR_MAX,
|
||||
};
|
||||
|
||||
@@ -362,6 +371,9 @@ extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t size);
|
||||
extern size_t iommu_unmap_fast(struct iommu_domain *domain,
|
||||
unsigned long iova, size_t size);
|
||||
extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||
struct scatterlist *sg, unsigned int nents,
|
||||
int prot);
|
||||
extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||
struct scatterlist *sg,unsigned int nents, int prot);
|
||||
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
|
||||
@@ -443,13 +455,6 @@ static inline void iommu_tlb_sync(struct iommu_domain *domain)
|
||||
domain->ops->iotlb_sync(domain);
|
||||
}
|
||||
|
||||
static inline size_t iommu_map_sg(struct iommu_domain *domain,
|
||||
unsigned long iova, struct scatterlist *sg,
|
||||
unsigned int nents, int prot)
|
||||
{
|
||||
return domain->ops->map_sg(domain, iova, sg, nents, prot);
|
||||
}
|
||||
|
||||
extern void iommu_trigger_fault(struct iommu_domain *domain,
|
||||
unsigned long flags);
|
||||
|
||||
|
||||
@@ -74,6 +74,7 @@ struct iova_domain {
|
||||
struct rb_node *cached32_node; /* Save last 32-bit alloced node */
|
||||
unsigned long granule; /* pfn granularity for this domain */
|
||||
unsigned long start_pfn; /* Lower limit for this domain */
|
||||
unsigned long end_pfn; /* Upper limit for this domain */
|
||||
unsigned long dma_32bit_pfn;
|
||||
struct iova anchor; /* rbtree lookup anchor */
|
||||
struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
|
||||
|
||||
@@ -12,8 +12,10 @@
|
||||
#define _TRACE_IOMMU_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
#include <linux/iommu.h>
|
||||
|
||||
struct device;
|
||||
struct iommu_domain;
|
||||
|
||||
DECLARE_EVENT_CLASS(iommu_group_event,
|
||||
|
||||
@@ -85,47 +87,84 @@ DEFINE_EVENT(iommu_device_event, detach_device_from_domain,
|
||||
|
||||
TRACE_EVENT(map,
|
||||
|
||||
TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
|
||||
TP_PROTO(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot),
|
||||
|
||||
TP_ARGS(iova, paddr, size),
|
||||
TP_ARGS(domain, iova, paddr, size, prot),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(name, domain->name)
|
||||
__field(u64, iova)
|
||||
__field(u64, paddr)
|
||||
__field(size_t, size)
|
||||
__field(int, prot)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(name, domain->name);
|
||||
__entry->iova = iova;
|
||||
__entry->paddr = paddr;
|
||||
__entry->size = size;
|
||||
__entry->prot = prot;
|
||||
),
|
||||
|
||||
TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu",
|
||||
__entry->iova, __entry->paddr, __entry->size
|
||||
TP_printk("IOMMU:%s iova=0x%016llx paddr=0x%016llx size=0x%zx prot=0x%x",
|
||||
__get_str(name), __entry->iova, __entry->paddr,
|
||||
__entry->size, __entry->prot
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(unmap,
|
||||
|
||||
TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size),
|
||||
TP_PROTO(struct iommu_domain *domain, unsigned long iova, size_t size,
|
||||
size_t unmapped_size),
|
||||
|
||||
TP_ARGS(iova, size, unmapped_size),
|
||||
TP_ARGS(domain, iova, size, unmapped_size),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(name, domain->name)
|
||||
__field(u64, iova)
|
||||
__field(size_t, size)
|
||||
__field(size_t, unmapped_size)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(name, domain->name);
|
||||
__entry->iova = iova;
|
||||
__entry->size = size;
|
||||
__entry->unmapped_size = unmapped_size;
|
||||
),
|
||||
|
||||
TP_printk("IOMMU: iova=0x%016llx size=%zu unmapped_size=%zu",
|
||||
__entry->iova, __entry->size, __entry->unmapped_size
|
||||
TP_printk("IOMMU:%s iova=0x%016llx size=0x%zx unmapped_size=0x%zx",
|
||||
__get_str(name), __entry->iova, __entry->size,
|
||||
__entry->unmapped_size
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(map_sg,
|
||||
|
||||
TP_PROTO(struct iommu_domain *domain, unsigned long iova, size_t size,
|
||||
int prot),
|
||||
|
||||
TP_ARGS(domain, iova, size, prot),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(name, domain->name)
|
||||
__field(u64, iova)
|
||||
__field(size_t, size)
|
||||
__field(int, prot)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(name, domain->name);
|
||||
__entry->iova = iova;
|
||||
__entry->size = size;
|
||||
__entry->prot = prot;
|
||||
),
|
||||
|
||||
TP_printk("IOMMU:%s iova=0x%016llx size=0x%zx prot=0x%x",
|
||||
__get_str(name), __entry->iova, __entry->size,
|
||||
__entry->prot
|
||||
)
|
||||
);
|
||||
|
||||
@@ -161,6 +200,66 @@ DEFINE_EVENT(iommu_error, io_page_fault,
|
||||
|
||||
TP_ARGS(dev, iova, flags)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(iommu_tlbi,
|
||||
|
||||
TP_PROTO(struct device *dev, u64 time),
|
||||
|
||||
TP_ARGS(dev, time),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(device, dev_name(dev))
|
||||
__field(u64, time)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(device, dev_name(dev));
|
||||
__entry->time = time;
|
||||
),
|
||||
|
||||
TP_printk("IOMMU:%s %lld us",
|
||||
__get_str(device), __entry->time
|
||||
)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(iommu_tlbi, tlbi_start,
|
||||
|
||||
TP_PROTO(struct device *dev, u64 time),
|
||||
|
||||
TP_ARGS(dev, time)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(iommu_tlbi, tlbi_end,
|
||||
|
||||
TP_PROTO(struct device *dev, u64 time),
|
||||
|
||||
TP_ARGS(dev, time)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(iommu_tlbi, tlbsync_timeout,
|
||||
|
||||
TP_PROTO(struct device *dev, u64 time),
|
||||
|
||||
TP_ARGS(dev, time)
|
||||
);
|
||||
|
||||
TRACE_EVENT(smmu_init,
|
||||
|
||||
TP_PROTO(u64 time),
|
||||
|
||||
TP_ARGS(time),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u64, time)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->time = time;
|
||||
),
|
||||
|
||||
TP_printk("ARM SMMU init latency: %lld us", __entry->time)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_IOMMU_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
||||
Reference in New Issue
Block a user