iommu: arm-smmu: Merge for msm-kona kernel upgrade

Merge all iommu/smmu changes from msm-4.14 to msm-kona
as of:
'commit 72ac9228fe40 ("iommu: arm-smmu: dump additional
smmu registers in fault handler")'.

Change-Id: I14b697f63d786032b31b490f2102393094762ff3
Signed-off-by: Swathi Sridhar <swatsrid@codeaurora.org>
This commit is contained in:
Swathi Sridhar
2018-11-09 10:43:44 -08:00
parent 5f23998b69
commit 9d4b7d6415
9 changed files with 279 additions and 136 deletions

View File

@@ -405,6 +405,16 @@ config ARM_SMMU_SELFTEST
If unsure, say N here.
config IOMMU_TLBSYNC_DEBUG
bool "TLB sync timeout debug"
depends on ARM_SMMU
help
Enables to collect the SMMU system state information right
after the first TLB sync timeout failure by calling BUG().
Note to use this only on debug builds.
If unsure, say N here.
config QCOM_LAZY_MAPPING
bool "Reference counted iommu-mapping support"
depends on ION

View File

@@ -191,6 +191,8 @@ enum arm_smmu_s2cr_privcfg {
#define TLBSTATUS_SACTIVE (1 << 0)
#define ARM_SMMU_CB_ATS1PR 0x800
#define ARM_SMMU_CB_ATSR 0x8f0
#define ARM_SMMU_STATS_SYNC_INV_TBU_ACK 0x25dc
#define ARM_SMMU_TBU_PWR_STATUS 0x2204
#define SCTLR_MEM_ATTR_SHIFT 16
#define SCTLR_SHCFG_SHIFT 22

View File

@@ -693,6 +693,20 @@ static int arm_smmu_arch_device_group(struct device *dev,
return smmu->arch_ops->device_group(dev, group);
}
static void arm_smmu_arch_write_sync(struct arm_smmu_device *smmu)
{
u32 id;
if (!smmu)
return;
/* Read to complete prior write transcations */
id = readl_relaxed(ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_ID0);
/* Wait for read to complete before off */
rmb();
}
static struct device_node *dev_get_dev_node(struct device *dev)
{
if (dev_is_pci(dev)) {
@@ -940,6 +954,9 @@ static int arm_smmu_power_on_atomic(struct arm_smmu_power_resources *pwr)
static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
{
unsigned long flags;
struct arm_smmu_device *smmu = pwr->dev->driver_data;
arm_smmu_arch_write_sync(smmu);
spin_lock_irqsave(&pwr->clock_refs_lock, flags);
if (pwr->clock_refs_count == 0) {
@@ -1080,6 +1097,7 @@ static int __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
void __iomem *sync, void __iomem *status)
{
unsigned int spin_cnt, delay;
u32 sync_inv_ack, tbu_pwr_status;
writel_relaxed(0, sync);
for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
@@ -1090,9 +1108,15 @@ static int __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
}
udelay(delay);
}
sync_inv_ack = scm_io_read((unsigned long)(smmu->phys_addr +
ARM_SMMU_STATS_SYNC_INV_TBU_ACK));
tbu_pwr_status = scm_io_read((unsigned long)(smmu->phys_addr +
ARM_SMMU_TBU_PWR_STATUS));
trace_tlbsync_timeout(smmu->dev, 0);
dev_err_ratelimited(smmu->dev,
"TLB sync timed out -- SMMU may be deadlocked\n");
"TLB sync timed out -- SMMU may be deadlocked ack 0x%x pwr 0x%x\n",
sync_inv_ack, tbu_pwr_status);
BUG_ON(IS_ENABLED(CONFIG_IOMMU_TLBSYNC_DEBUG));
return -EINVAL;
}
@@ -1376,6 +1400,62 @@ static struct iommu_gather_ops msm_smmu_gather_ops = {
.free_pages_exact = arm_smmu_free_pages_exact,
};
static void print_ctx_regs(struct arm_smmu_device *smmu, struct arm_smmu_cfg
*cfg, unsigned int fsr)
{
u32 fsynr0;
void __iomem *cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
void __iomem *gr1_base = ARM_SMMU_GR1(smmu);
bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
fsynr0 = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
dev_err(smmu->dev, "FAR = 0x%016llx\n",
readq_relaxed(cb_base + ARM_SMMU_CB_FAR));
dev_err(smmu->dev, "PAR = 0x%pK\n",
readq_relaxed(cb_base + ARM_SMMU_CB_PAR));
dev_err(smmu->dev,
"FSR = 0x%08x [%s%s%s%s%s%s%s%s%s%s]\n",
fsr,
(fsr & 0x02) ? (fsynr0 & 0x10 ?
"TF W " : "TF R ") : "",
(fsr & 0x04) ? "AFF " : "",
(fsr & 0x08) ? (fsynr0 & 0x10 ?
"PF W " : "PF R ") : "",
(fsr & 0x10) ? "EF " : "",
(fsr & 0x20) ? "TLBMCF " : "",
(fsr & 0x40) ? "TLBLKF " : "",
(fsr & 0x80) ? "MHF " : "",
(fsr & 0x100) ? "UUT " : "",
(fsr & 0x40000000) ? "SS " : "",
(fsr & 0x80000000) ? "MULTI " : "");
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
dev_err(smmu->dev, "TTBR0 = 0x%pK\n",
readl_relaxed(cb_base + ARM_SMMU_CB_TTBR0));
dev_err(smmu->dev, "TTBR1 = 0x%pK\n",
readl_relaxed(cb_base + ARM_SMMU_CB_TTBR1));
} else {
dev_err(smmu->dev, "TTBR0 = 0x%pK\n",
readq_relaxed(cb_base + ARM_SMMU_CB_TTBR0));
if (stage1)
dev_err(smmu->dev, "TTBR1 = 0x%pK\n",
readq_relaxed(cb_base + ARM_SMMU_CB_TTBR1));
}
dev_err(smmu->dev, "SCTLR = 0x%08x ACTLR = 0x%08x\n",
readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR),
readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR));
dev_err(smmu->dev, "CBAR = 0x%08x\n",
readl_relaxed(gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx)));
dev_err(smmu->dev, "MAIR0 = 0x%08x MAIR1 = 0x%08x\n",
readl_relaxed(cb_base + ARM_SMMU_CB_S1_MAIR0),
readl_relaxed(cb_base + ARM_SMMU_CB_S1_MAIR1));
}
static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
dma_addr_t iova, u32 fsr)
{
@@ -1463,29 +1543,17 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
ret = IRQ_HANDLED;
resume = RESUME_TERMINATE;
} else {
phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
fsr);
if (__ratelimit(&_rs)) {
phys_addr_t phys_atos = arm_smmu_verify_fault(domain,
iova,
fsr);
dev_err(smmu->dev,
"Unhandled context fault: iova=0x%08lx, cb=%d, fsr=0x%x, fsynr0=0x%x, fsynr1=0x%x\n",
iova, cfg->cbndx, fsr, fsynr0, fsynr1);
dev_err(smmu->dev, "FAR = %016lx\n",
(unsigned long)iova);
dev_err(smmu->dev,
"FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n",
fsr,
(fsr & 0x02) ? (fsynr0 & 0x10 ?
"TF W " : "TF R ") : "",
(fsr & 0x04) ? "AFF " : "",
(fsr & 0x08) ? (fsynr0 & 0x10 ?
"PF W " : "PF R ") : "",
(fsr & 0x10) ? "EF " : "",
(fsr & 0x20) ? "TLBMCF " : "",
(fsr & 0x40) ? "TLBLKF " : "",
(fsr & 0x80) ? "MHF " : "",
(fsr & 0x100) ? "UUT " : "",
(fsr & 0x40000000) ? "SS " : "",
(fsr & 0x80000000) ? "MULTI " : "");
print_ctx_regs(smmu, cfg, fsr);
dev_err(smmu->dev,
"soft iova-to-phys=%pa\n", &phys_soft);
if (!phys_soft)
@@ -2426,7 +2494,10 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
mutex_lock(&smmu->stream_map_mutex);
for_each_cfg_sme(fwspec, i, idx) {
WARN_ON(s2cr[idx].attach_count == 0);
if (WARN_ON(s2cr[idx].attach_count == 0)) {
mutex_unlock(&smmu->stream_map_mutex);
return;
}
s2cr[idx].attach_count -= 1;
if (s2cr[idx].attach_count > 0)
@@ -4324,7 +4395,7 @@ static int arm_smmu_init_bus_scaling(struct arm_smmu_power_resources *pwr)
pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
if (!pwr->bus_client) {
dev_err(dev, "Bus client registration failed\n");
return -EINVAL;
return -EPROBE_DEFER;
}
return 0;
@@ -5018,6 +5089,11 @@ module_exit(arm_smmu_exit);
#define DEBUG_PAR_PA_SHIFT 12
#define DEBUG_PAR_FAULT_VAL 0x1
#define DEBUG_AXUSER_REG 0x30
#define DEBUG_AXUSER_CDMID_MASK 0xff
#define DEBUG_AXUSER_CDMID_SHIFT 36
#define DEBUG_AXUSER_CDMID_VAL 255
#define TBU_DBG_TIMEOUT_US 100
struct actlr_setting {
@@ -5277,9 +5353,13 @@ static phys_addr_t qsmmuv500_iova_to_phys(
redo:
/* Set address and stream-id */
val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG);
val &= ~DEBUG_SID_HALT_SID_MASK;
val |= sid & DEBUG_SID_HALT_SID_MASK;
writeq_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
writeq_relaxed(iova, tbu->base + DEBUG_VA_ADDR_REG);
val = (u64)(DEBUG_AXUSER_CDMID_VAL & DEBUG_AXUSER_CDMID_MASK) <<
DEBUG_AXUSER_CDMID_SHIFT;
writeq_relaxed(val, tbu->base + DEBUG_AXUSER_REG);
/*
* Write-back Read and Write-Allocate
@@ -5336,6 +5416,9 @@ static phys_addr_t qsmmuv500_iova_to_phys(
/* Reset hardware */
writeq_relaxed(0, tbu->base + DEBUG_TXN_TRIGG_REG);
writeq_relaxed(0, tbu->base + DEBUG_VA_ADDR_REG);
val = readl_relaxed(tbu->base + DEBUG_SID_HALT_REG);
val &= ~DEBUG_SID_HALT_SID_MASK;
writel_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
/*
* After a failed translation, the next successful translation will
@@ -5351,6 +5434,12 @@ static phys_addr_t qsmmuv500_iova_to_phys(
qsmmuv500_tbu_resume(tbu);
out_power_off:
/* Read to complete prior write transcations */
val = readl_relaxed(tbu->base + DEBUG_SR_HALT_ACK_REG);
/* Wait for read to complete before off */
rmb();
arm_smmu_power_off(tbu->pwr);
return phys;

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/dma-contiguous.h>
@@ -540,12 +540,22 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
void *addr;
unsigned long flags;
struct sg_mapping_iter miter;
unsigned int count = ALIGN(size, SZ_4K) >> PAGE_SHIFT;
size_t count = ALIGN(size, SZ_4K) >> PAGE_SHIFT;
bool is_coherent = is_dma_coherent(dev, attrs);
int prot = dma_info_to_prot(DMA_BIDIRECTIONAL, is_coherent, attrs);
pgprot_t remap_prot = __get_dma_pgprot(attrs, PAGE_KERNEL, is_coherent);
struct page **pages;
/*
* sg_alloc_table_from_pages accepts unsigned int value for count
* so check count doesn't exceed UINT_MAX.
*/
if (count > UINT_MAX) {
dev_err(dev, "count: %zx exceeds UNIT_MAX\n", count);
return NULL;
}
*handle = DMA_ERROR_CODE;
pages = __fast_smmu_alloc_pages(count, gfp);

View File

@@ -289,20 +289,20 @@ static int iommu_debug_dma_reconfigure(struct iommu_debug_device *ddev,
bool coherent;
if (ddev->domain) {
dev_err(dev, "Already attached.\n");
dev_err_ratelimited(dev, "Already attached.\n");
return -EBUSY;
}
iommu = of_iommu_configure(dev, dev->of_node);
if (!iommu) {
dev_err(dev, "Is not associated with an iommu\n");
dev_err_ratelimited(dev, "Is not associated with an iommu\n");
return -EINVAL;
}
coherent = of_dma_is_coherent(dev->of_node);
if (!dev->iommu_group) {
dev_err(dev, "Does not have an iommu group\n");
dev_err_ratelimited(dev, "Does not have an iommu group\n");
return -EINVAL;
}
@@ -310,7 +310,7 @@ static int iommu_debug_dma_reconfigure(struct iommu_debug_device *ddev,
domain = iommu_get_domain_for_dev(dev);
if (domain) {
if (domain->type != IOMMU_DOMAIN_DMA) {
dev_err(dev, "Attached, but its not a default domain?\n");
dev_err_ratelimited(dev, "Attached, but its not a default domain?\n");
return -EINVAL;
}
iommu_detach_group(domain, dev->iommu_group);
@@ -318,19 +318,19 @@ static int iommu_debug_dma_reconfigure(struct iommu_debug_device *ddev,
domain = iommu_domain_alloc(dev->bus);
if (!domain) {
dev_err(dev, "Allocating iommu domain failed\n");
dev_err_ratelimited(dev, "Allocating iommu domain failed\n");
return -EINVAL;
}
domain->is_debug_domain = true;
if (iommu_debug_set_attrs(ddev, domain, attrs)) {
dev_err(dev, "Setting attrs failed\n");
dev_err_ratelimited(dev, "Setting attrs failed\n");
goto out_free_domain;
}
if (iommu_attach_group(domain, dev->iommu_group)) {
dev_err(dev, "attach group failed\n");
dev_err_ratelimited(dev, "attach group failed\n");
goto out_free_domain;
}
@@ -341,7 +341,7 @@ static int iommu_debug_dma_reconfigure(struct iommu_debug_device *ddev,
set_dma_ops(dev, NULL);
arch_setup_dma_ops(dev, dma_base, size, iommu, coherent);
if (!get_dma_ops(dev)) {
dev_err(dev, "arch_setup_dma_ops failed, dma ops are null.\n");
dev_err_ratelimited(dev, "arch_setup_dma_ops failed, dma ops are null.\n");
goto out_detach_group;
}
@@ -362,13 +362,13 @@ static void iommu_debug_dma_deconfigure(struct iommu_debug_device *ddev)
struct device *dev = ddev->dev;
if (!dev->iommu_group) {
dev_err(dev, "Does not have an iommu group\n");
dev_err_ratelimited(dev, "Does not have an iommu group\n");
return;
}
domain = ddev->domain;
if (!domain) {
dev_err(dev, "Is not attached\n");
dev_err_ratelimited(dev, "Is not attached\n");
return;
}
@@ -770,14 +770,14 @@ static int __tlb_stress_sweep(struct device *dev, struct seq_file *s)
for (iova = 0, i = 0; iova < max; iova += SZ_8K, ++i) {
dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
if (dma_addr == DMA_ERROR_CODE) {
dev_err(dev, "Failed map on iter %d\n", i);
dev_err_ratelimited(dev, "Failed map on iter %d\n", i);
ret = -EINVAL;
goto out;
}
}
if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
dev_err(dev,
dev_err_ratelimited(dev,
"dma_map_single unexpectedly (VA should have been exhausted)\n");
ret = -EINVAL;
goto out;
@@ -797,7 +797,7 @@ static int __tlb_stress_sweep(struct device *dev, struct seq_file *s)
if (dma_addr != SZ_8K) {
dma_addr_t expected = SZ_8K;
dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
dev_err_ratelimited(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
&dma_addr, &expected);
ret = -EINVAL;
goto out;
@@ -812,14 +812,14 @@ static int __tlb_stress_sweep(struct device *dev, struct seq_file *s)
if (dma_addr != 0) {
dma_addr_t expected = 0;
dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
dev_err_ratelimited(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
&dma_addr, &expected);
ret = -EINVAL;
goto out;
}
if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
dev_err(dev,
dev_err_ratelimited(dev,
"dma_map_single unexpectedly after remaps (VA should have been exhausted)\n");
ret = -EINVAL;
goto out;
@@ -869,7 +869,7 @@ static int __rand_va_sweep(struct device *dev, struct seq_file *s,
virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
if (!virt) {
if (size > SZ_8K) {
dev_err(dev,
dev_err_ratelimited(dev,
"Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
_size_to_string(size));
return 0;
@@ -881,7 +881,7 @@ static int __rand_va_sweep(struct device *dev, struct seq_file *s,
for (iova = 0, i = 0; iova < max; iova += size, ++i) {
dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
if (dma_addr == DMA_ERROR_CODE) {
dev_err(dev, "Failed map on iter %d\n", i);
dev_err_ratelimited(dev, "Failed map on iter %d\n", i);
ret = -EINVAL;
goto out;
}
@@ -914,7 +914,7 @@ static int __rand_va_sweep(struct device *dev, struct seq_file *s,
}
if (unmapped != remapped) {
dev_err(dev,
dev_err_ratelimited(dev,
"Unexpected random remap count! Unmapped %d but remapped %d\n",
unmapped, remapped);
ret = -EINVAL;
@@ -959,7 +959,7 @@ static int __full_va_sweep(struct device *dev, struct seq_file *s,
virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
if (!virt) {
if (size > SZ_8K) {
dev_err(dev,
dev_err_ratelimited(dev,
"Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
_size_to_string(size));
return 0;
@@ -988,7 +988,7 @@ static int __full_va_sweep(struct device *dev, struct seq_file *s,
phys_addr_t expected = phys;
if (__check_mapping(dev, domain, iova, expected)) {
dev_err(dev, "iter: %d\n", i);
dev_err_ratelimited(dev, "iter: %d\n", i);
ret = -EINVAL;
goto out;
}
@@ -999,7 +999,7 @@ static int __full_va_sweep(struct device *dev, struct seq_file *s,
unsigned long theiova = ((SZ_1G * 4ULL) - size) - iova;
if (__check_mapping(dev, domain, theiova, expected)) {
dev_err(dev, "iter: %d\n", i);
dev_err_ratelimited(dev, "iter: %d\n", i);
ret = -EINVAL;
goto out;
}
@@ -1158,7 +1158,7 @@ static int __functional_dma_api_basic_test(struct device *dev,
pa = iommu_iova_to_phys(domain, iova);
pa2 = iommu_iova_to_phys_hard(domain, iova);
if (pa != pa2) {
dev_err(dev,
dev_err_ratelimited(dev,
"iova_to_phys doesn't match iova_to_phys_hard: %pa != %pa\n",
&pa, &pa2);
ret = -EINVAL;
@@ -1166,7 +1166,7 @@ static int __functional_dma_api_basic_test(struct device *dev,
}
pa2 = virt_to_phys(data);
if (pa != pa2) {
dev_err(dev,
dev_err_ratelimited(dev,
"iova_to_phys doesn't match virt_to_phys: %pa != %pa\n",
&pa, &pa2);
ret = -EINVAL;
@@ -1175,7 +1175,8 @@ static int __functional_dma_api_basic_test(struct device *dev,
dma_unmap_single(dev, iova, size, DMA_TO_DEVICE);
for (j = 0; j < size; ++j) {
if (data[j] != 0xa5) {
dev_err(dev, "data[%d] != 0xa5\n", data[j]);
dev_err_ratelimited(dev,
"data[%d] != 0xa5\n", data[j]);
ret = -EINVAL;
goto out;
}
@@ -1230,7 +1231,7 @@ static int __functional_dma_api_map_sg_test(struct device *dev,
pa = iommu_iova_to_phys(domain, iova);
pa2 = iommu_iova_to_phys_hard(domain, iova);
if (pa != pa2) {
dev_err(dev,
dev_err_ratelimited(dev,
"iova_to_phys doesn't match iova_to_phys_hard: %pa != %pa\n",
&pa, &pa2);
ret = -EINVAL;
@@ -1241,7 +1242,7 @@ static int __functional_dma_api_map_sg_test(struct device *dev,
pa = iommu_iova_to_phys(domain, iova);
pa2 = iommu_iova_to_phys_hard(domain, iova);
if (pa != pa2) {
dev_err(dev,
dev_err_ratelimited(dev,
"iova_to_phys doesn't match iova_to_phys_hard: %pa != %pa\n",
&pa, &pa2);
ret = -EINVAL;
@@ -1289,7 +1290,7 @@ static int __apply_to_new_mapping(struct seq_file *s,
goto out_release_mapping;
}
dev_err(dev, "testing with pgtables at %pa\n", &pt_phys);
dev_err_ratelimited(dev, "testing with pgtables at %pa\n", &pt_phys);
if (iommu_enable_config_clocks(domain)) {
ds_printf(dev, s, "Couldn't enable clocks\n");
goto out_release_mapping;
@@ -1378,7 +1379,7 @@ static ssize_t __iommu_debug_attach_write(struct file *file,
int val, ret;
if (kstrtoint_from_user(ubuf, count, 0, &val)) {
pr_err("Invalid format. Expected a hex or decimal integer");
pr_err_ratelimited("Invalid format. Expected a hex or decimal integer");
return -EFAULT;
}
@@ -1386,10 +1387,10 @@ static ssize_t __iommu_debug_attach_write(struct file *file,
if (val) {
ret = iommu_debug_dma_reconfigure(ddev, attrs, 0, SZ_1G * 4ULL);
if (!ret)
pr_err("Attached\n");
pr_err_ratelimited("Attached\n");
} else {
iommu_debug_dma_deconfigure(ddev);
pr_err("Detached\n");
pr_err_ratelimited("Detached\n");
}
mutex_unlock(&ddev->state_lock);
retval = count;
@@ -1424,7 +1425,7 @@ static ssize_t iommu_debug_attach_read(struct file *file, char __user *ubuf,
c[0] = ddev->domain ? '1' : '0';
c[1] = '\n';
if (copy_to_user(ubuf, &c, 2)) {
pr_err("copy_to_user failed\n");
pr_err_ratelimited("copy_to_user failed\n");
return -EFAULT;
}
*offset = 1; /* non-zero means we're done */
@@ -1459,7 +1460,7 @@ static ssize_t iommu_debug_test_virt_addr_read(struct file *file,
buflen = strlen(buf);
if (copy_to_user(ubuf, buf, buflen)) {
pr_err("Couldn't copy_to_user\n");
pr_err_ratelimited("Couldn't copy_to_user\n");
retval = -EFAULT;
} else {
*offset = 1; /* non-zero means we're done */
@@ -1494,13 +1495,13 @@ static ssize_t iommu_debug_pte_write(struct file *file,
dma_addr_t iova;
if (kstrtox_from_user(ubuf, count, 0, &iova)) {
pr_err("Invalid format for iova\n");
pr_err_ratelimited("Invalid format for iova\n");
ddev->iova = 0;
return -EINVAL;
}
ddev->iova = iova;
pr_err("Saved iova=%pa for future PTE commands\n", &iova);
pr_err_ratelimited("Saved iova=%pa for future PTE commands\n", &iova);
return count;
}
@@ -1515,7 +1516,7 @@ static ssize_t iommu_debug_pte_read(struct file *file, char __user *ubuf,
size_t buflen;
if (kptr_restrict != 0) {
pr_err("kptr_restrict needs to be disabled.\n");
pr_err_ratelimited("kptr_restrict needs to be disabled.\n");
return -EPERM;
}
@@ -1524,7 +1525,7 @@ static ssize_t iommu_debug_pte_read(struct file *file, char __user *ubuf,
mutex_lock(&ddev->state_lock);
if (!ddev->domain) {
pr_err("No domain. Did you already attach?\n");
pr_err_ratelimited("No domain. Did you already attach?\n");
mutex_unlock(&ddev->state_lock);
return -EINVAL;
}
@@ -1540,7 +1541,7 @@ static ssize_t iommu_debug_pte_read(struct file *file, char __user *ubuf,
buflen = strlen(buf);
if (copy_to_user(ubuf, buf, buflen)) {
pr_err("Couldn't copy_to_user\n");
pr_err_ratelimited("Couldn't copy_to_user\n");
retval = -EFAULT;
} else {
*offset = 1; /* non-zero means we're done */
@@ -1565,13 +1566,13 @@ static ssize_t iommu_debug_atos_write(struct file *file,
dma_addr_t iova;
if (kstrtox_from_user(ubuf, count, 0, &iova)) {
pr_err("Invalid format for iova\n");
pr_err_ratelimited("Invalid format for iova\n");
ddev->iova = 0;
return -EINVAL;
}
ddev->iova = iova;
pr_err("Saved iova=%pa for future ATOS commands\n", &iova);
pr_err_ratelimited("Saved iova=%pa for future ATOS commands\n", &iova);
return count;
}
@@ -1585,7 +1586,7 @@ static ssize_t iommu_debug_atos_read(struct file *file, char __user *ubuf,
size_t buflen;
if (kptr_restrict != 0) {
pr_err("kptr_restrict needs to be disabled.\n");
pr_err_ratelimited("kptr_restrict needs to be disabled.\n");
return -EPERM;
}
@@ -1594,7 +1595,7 @@ static ssize_t iommu_debug_atos_read(struct file *file, char __user *ubuf,
mutex_lock(&ddev->state_lock);
if (!ddev->domain) {
pr_err("No domain. Did you already attach?\n");
pr_err_ratelimited("No domain. Did you already attach?\n");
mutex_unlock(&ddev->state_lock);
return -EINVAL;
}
@@ -1605,7 +1606,7 @@ static ssize_t iommu_debug_atos_read(struct file *file, char __user *ubuf,
if (!phys) {
strlcpy(buf, "FAIL\n", 100);
phys = iommu_iova_to_phys(ddev->domain, ddev->iova);
dev_err(ddev->dev, "ATOS for %pa failed. Software walk returned: %pa\n",
dev_err_ratelimited(ddev->dev, "ATOS for %pa failed. Software walk returned: %pa\n",
&ddev->iova, &phys);
} else {
snprintf(buf, 100, "%pa\n", &phys);
@@ -1613,7 +1614,7 @@ static ssize_t iommu_debug_atos_read(struct file *file, char __user *ubuf,
buflen = strlen(buf);
if (copy_to_user(ubuf, buf, buflen)) {
pr_err("Couldn't copy_to_user\n");
pr_err_ratelimited("Couldn't copy_to_user\n");
retval = -EFAULT;
} else {
*offset = 1; /* non-zero means we're done */
@@ -1640,7 +1641,7 @@ static ssize_t iommu_debug_dma_atos_read(struct file *file, char __user *ubuf,
size_t buflen;
if (kptr_restrict != 0) {
pr_err("kptr_restrict needs to be disabled.\n");
pr_err_ratelimited("kptr_restrict needs to be disabled.\n");
return -EPERM;
}
if (*offset)
@@ -1648,7 +1649,7 @@ static ssize_t iommu_debug_dma_atos_read(struct file *file, char __user *ubuf,
mutex_lock(&ddev->state_lock);
if (!ddev->domain) {
pr_err("No domain. Did you already attach?\n");
pr_err_ratelimited("No domain. Did you already attach?\n");
mutex_unlock(&ddev->state_lock);
return -EINVAL;
}
@@ -1664,7 +1665,7 @@ static ssize_t iommu_debug_dma_atos_read(struct file *file, char __user *ubuf,
buflen = strlen(buf);
if (copy_to_user(ubuf, buf, buflen)) {
pr_err("Couldn't copy_to_user\n");
pr_err_ratelimited("Couldn't copy_to_user\n");
retval = -EFAULT;
} else {
*offset = 1; /* non-zero means we're done */
@@ -1695,14 +1696,14 @@ static ssize_t iommu_debug_map_write(struct file *file, const char __user *ubuf,
struct iommu_debug_device *ddev = file->private_data;
if (count >= 100) {
pr_err("Value too large\n");
pr_err_ratelimited("Value too large\n");
return -EINVAL;
}
memset(buf, 0, 100);
if (copy_from_user(buf, ubuf, count)) {
pr_err("Couldn't copy from user\n");
pr_err_ratelimited("Couldn't copy from user\n");
retval = -EFAULT;
}
@@ -1735,27 +1736,27 @@ static ssize_t iommu_debug_map_write(struct file *file, const char __user *ubuf,
mutex_lock(&ddev->state_lock);
if (!ddev->domain) {
pr_err("No domain. Did you already attach?\n");
pr_err_ratelimited("No domain. Did you already attach?\n");
mutex_unlock(&ddev->state_lock);
return -EINVAL;
}
ret = iommu_map(ddev->domain, iova, phys, size, prot);
if (ret) {
pr_err("iommu_map failed with %d\n", ret);
pr_err_ratelimited("iommu_map failed with %d\n", ret);
retval = -EIO;
goto out;
}
retval = count;
pr_err("Mapped %pa to %pa (len=0x%zx, prot=0x%x)\n",
pr_err_ratelimited("Mapped %pa to %pa (len=0x%zx, prot=0x%x)\n",
&iova, &phys, size, prot);
out:
mutex_unlock(&ddev->state_lock);
return retval;
invalid_format:
pr_err("Invalid format. Expected: iova,phys,len,prot where `prot' is the bitwise OR of IOMMU_READ, IOMMU_WRITE, etc.\n");
pr_err_ratelimited("Invalid format. Expected: iova,phys,len,prot where `prot' is the bitwise OR of IOMMU_READ, IOMMU_WRITE, etc.\n");
return -EINVAL;
}
@@ -1789,14 +1790,14 @@ static ssize_t iommu_debug_dma_map_write(struct file *file,
struct device *dev = ddev->dev;
if (count >= sizeof(buf)) {
pr_err("Value too large\n");
pr_err_ratelimited("Value too large\n");
return -EINVAL;
}
memset(buf, 0, sizeof(buf));
if (copy_from_user(buf, ubuf, count)) {
pr_err("Couldn't copy from user\n");
pr_err_ratelimited("Couldn't copy from user\n");
return -EFAULT;
}
@@ -1836,7 +1837,7 @@ static ssize_t iommu_debug_dma_map_write(struct file *file,
mutex_lock(&ddev->state_lock);
if (!ddev->domain) {
pr_err("No domain. Did you already attach?\n");
pr_err_ratelimited("No domain. Did you already attach?\n");
mutex_unlock(&ddev->state_lock);
return -EINVAL;
}
@@ -1845,26 +1846,27 @@ static ssize_t iommu_debug_dma_map_write(struct file *file,
DMA_TO_DEVICE, dma_attrs);
if (dma_mapping_error(dev, iova)) {
pr_err("Failed to perform dma_map_single\n");
pr_err_ratelimited("Failed to perform dma_map_single\n");
ret = -EINVAL;
goto out;
}
retval = count;
pr_err("Mapped 0x%p to %pa (len=0x%zx)\n",
pr_err_ratelimited("Mapped 0x%p to %pa (len=0x%zx)\n",
v_addr, &iova, size);
ddev->iova = iova;
pr_err("Saved iova=%pa for future PTE commands\n", &iova);
pr_err_ratelimited("Saved iova=%pa for future PTE commands\n",
&iova);
out:
mutex_unlock(&ddev->state_lock);
return retval;
invalid_format:
pr_err("Invalid format. Expected: addr,len,dma attr where 'dma attr' is\n0: normal mapping\n1: force coherent\n2: force non-cohernet\n3: use system cache\n");
pr_err_ratelimited("Invalid format. Expected: addr,len,dma attr where 'dma attr' is\n0: normal mapping\n1: force coherent\n2: force non-cohernet\n3: use system cache\n");
return retval;
invalid_addr:
pr_err("Invalid addr given! Address should be within 1MB size from start addr returned by doing 'cat test_virt_addr'.\n");
pr_err_ratelimited("Invalid addr given! Address should be within 1MB size from start addr returned by doing 'cat test_virt_addr'.\n");
return retval;
}
@@ -1887,7 +1889,7 @@ static ssize_t iommu_debug_dma_map_read(struct file *file, char __user *ubuf,
buflen = strlen(buf);
if (copy_to_user(ubuf, buf, buflen)) {
pr_err("Couldn't copy_to_user\n");
pr_err_ratelimited("Couldn't copy_to_user\n");
retval = -EFAULT;
} else {
*offset = 1; /* non-zero means we're done */
@@ -1916,19 +1918,19 @@ static ssize_t iommu_debug_unmap_write(struct file *file,
struct iommu_debug_device *ddev = file->private_data;
if (count >= 100) {
pr_err("Value too large\n");
pr_err_ratelimited("Value too large\n");
return -EINVAL;
}
if (!ddev->domain) {
pr_err("No domain. Did you already attach?\n");
pr_err_ratelimited("No domain. Did you already attach?\n");
return -EINVAL;
}
memset(buf, 0, 100);
if (copy_from_user(buf, ubuf, count)) {
pr_err("Couldn't copy from user\n");
pr_err_ratelimited("Couldn't copy from user\n");
retval = -EFAULT;
goto out;
}
@@ -1948,27 +1950,27 @@ static ssize_t iommu_debug_unmap_write(struct file *file,
mutex_lock(&ddev->state_lock);
if (!ddev->domain) {
pr_err("No domain. Did you already attach?\n");
pr_err_ratelimited("No domain. Did you already attach?\n");
mutex_unlock(&ddev->state_lock);
return -EINVAL;
}
unmapped = iommu_unmap(ddev->domain, iova, size);
if (unmapped != size) {
pr_err("iommu_unmap failed. Expected to unmap: 0x%zx, unmapped: 0x%zx",
pr_err_ratelimited("iommu_unmap failed. Expected to unmap: 0x%zx, unmapped: 0x%zx",
size, unmapped);
retval = -EIO;
goto out;
}
retval = count;
pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
pr_err_ratelimited("Unmapped %pa (len=0x%zx)\n", &iova, size);
out:
mutex_unlock(&ddev->state_lock);
return retval;
invalid_format:
pr_err("Invalid format. Expected: iova,len\n");
pr_err_ratelimited("Invalid format. Expected: iova,len\n");
return -EINVAL;
}
@@ -1992,14 +1994,14 @@ static ssize_t iommu_debug_dma_unmap_write(struct file *file,
struct device *dev = ddev->dev;
if (count >= sizeof(buf)) {
pr_err("Value too large\n");
pr_err_ratelimited("Value too large\n");
return -EINVAL;
}
memset(buf, 0, sizeof(buf));
if (copy_from_user(buf, ubuf, count)) {
pr_err("Couldn't copy from user\n");
pr_err_ratelimited("Couldn't copy from user\n");
retval = -EFAULT;
goto out;
}
@@ -2036,20 +2038,20 @@ static ssize_t iommu_debug_dma_unmap_write(struct file *file,
mutex_lock(&ddev->state_lock);
if (!ddev->domain) {
pr_err("No domain. Did you already attach?\n");
pr_err_ratelimited("No domain. Did you already attach?\n");
mutex_unlock(&ddev->state_lock);
return -EINVAL;
}
dma_unmap_single_attrs(dev, iova, size, DMA_TO_DEVICE, dma_attrs);
retval = count;
pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
pr_err_ratelimited("Unmapped %pa (len=0x%zx)\n", &iova, size);
out:
mutex_unlock(&ddev->state_lock);
return retval;
invalid_format:
pr_err("Invalid format. Expected: iova,len, dma attr\n");
pr_err_ratelimited("Invalid format. Expected: iova,len, dma attr\n");
return retval;
}
@@ -2068,17 +2070,17 @@ static ssize_t iommu_debug_config_clocks_write(struct file *file,
/* we're expecting a single character plus (optionally) a newline */
if (count > 2) {
dev_err(dev, "Invalid value\n");
dev_err_ratelimited(dev, "Invalid value\n");
return -EINVAL;
}
if (!ddev->domain) {
dev_err(dev, "No domain. Did you already attach?\n");
dev_err_ratelimited(dev, "No domain. Did you already attach?\n");
return -EINVAL;
}
if (copy_from_user(&buf, ubuf, 1)) {
dev_err(dev, "Couldn't copy from user\n");
dev_err_ratelimited(dev, "Couldn't copy from user\n");
return -EFAULT;
}
@@ -2086,26 +2088,26 @@ static ssize_t iommu_debug_config_clocks_write(struct file *file,
switch (buf) {
case '0':
if (ddev->clk_count == 0) {
dev_err(dev, "Config clocks already disabled\n");
dev_err_ratelimited(dev, "Config clocks already disabled\n");
break;
}
if (--ddev->clk_count > 0)
break;
dev_err(dev, "Disabling config clocks\n");
dev_err_ratelimited(dev, "Disabling config clocks\n");
iommu_disable_config_clocks(ddev->domain);
break;
case '1':
if (ddev->clk_count++ > 0)
break;
dev_err(dev, "Enabling config clocks\n");
dev_err_ratelimited(dev, "Enabling config clocks\n");
if (iommu_enable_config_clocks(ddev->domain))
dev_err(dev, "Failed!\n");
dev_err_ratelimited(dev, "Failed!\n");
break;
default:
dev_err(dev, "Invalid value. Should be 0 or 1.\n");
dev_err_ratelimited(dev, "Invalid value. Should be 0 or 1.\n");
mutex_unlock(&ddev->clk_lock);
return -EINVAL;
}
@@ -2127,13 +2129,13 @@ static ssize_t iommu_debug_trigger_fault_write(
unsigned long flags;
if (kstrtoul_from_user(ubuf, count, 0, &flags)) {
pr_err("Invalid flags format\n");
pr_err_ratelimited("Invalid flags format\n");
return -EFAULT;
}
mutex_lock(&ddev->state_lock);
if (!ddev->domain) {
pr_err("No domain. Did you already attach?\n");
pr_err_ratelimited("No domain. Did you already attach?\n");
mutex_unlock(&ddev->state_lock);
return -EINVAL;
}
@@ -2177,147 +2179,147 @@ static int snarf_iommu_devices(struct device *dev, void *ignored)
ddev->dev = dev;
dir = debugfs_create_dir(dev_name(dev), debugfs_tests_dir);
if (!dir) {
pr_err("Couldn't create iommu/devices/%s debugfs dir\n",
pr_err_ratelimited("Couldn't create iommu/devices/%s debugfs dir\n",
dev_name(dev));
goto err;
}
if (!debugfs_create_file("nr_iters", 0400, dir, &iters_per_op,
&iommu_debug_nr_iters_ops)) {
pr_err("Couldn't create iommu/devices/%s/nr_iters debugfs file\n",
pr_err_ratelimited("Couldn't create iommu/devices/%s/nr_iters debugfs file\n",
dev_name(dev));
goto err_rmdir;
}
if (!debugfs_create_file("test_virt_addr", 0400, dir, ddev,
&iommu_debug_test_virt_addr_fops)) {
pr_err("Couldn't create iommu/devices/%s/test_virt_addr debugfs file\n",
pr_err_ratelimited("Couldn't create iommu/devices/%s/test_virt_addr debugfs file\n",
dev_name(dev));
goto err_rmdir;
}
if (!debugfs_create_file("profiling", 0400, dir, ddev,
&iommu_debug_profiling_fops)) {
pr_err("Couldn't create iommu/devices/%s/profiling debugfs file\n",
pr_err_ratelimited("Couldn't create iommu/devices/%s/profiling debugfs file\n",
dev_name(dev));
goto err_rmdir;
}
if (!debugfs_create_file("secure_profiling", 0400, dir, ddev,
&iommu_debug_secure_profiling_fops)) {
pr_err("Couldn't create iommu/devices/%s/secure_profiling debugfs file\n",
pr_err_ratelimited("Couldn't create iommu/devices/%s/secure_profiling debugfs file\n",
dev_name(dev));
goto err_rmdir;
}
if (!debugfs_create_file("profiling_fast", 0400, dir, ddev,
&iommu_debug_profiling_fast_fops)) {
pr_err("Couldn't create iommu/devices/%s/profiling_fast debugfs file\n",
pr_err_ratelimited("Couldn't create iommu/devices/%s/profiling_fast debugfs file\n",
dev_name(dev));
goto err_rmdir;
}
if (!debugfs_create_file("profiling_fast_dma_api", 0400, dir, ddev,
&iommu_debug_profiling_fast_dma_api_fops)) {
pr_err("Couldn't create iommu/devices/%s/profiling_fast_dma_api debugfs file\n",
pr_err_ratelimited("Couldn't create iommu/devices/%s/profiling_fast_dma_api debugfs file\n",
dev_name(dev));
goto err_rmdir;
}
if (!debugfs_create_file("functional_fast_dma_api", 0400, dir, ddev,
&iommu_debug_functional_fast_dma_api_fops)) {
pr_err("Couldn't create iommu/devices/%s/functional_fast_dma_api debugfs file\n",
pr_err_ratelimited("Couldn't create iommu/devices/%s/functional_fast_dma_api debugfs file\n",
dev_name(dev));
goto err_rmdir;
}
if (!debugfs_create_file("functional_arm_dma_api", 0400, dir, ddev,
&iommu_debug_functional_arm_dma_api_fops)) {
pr_err("Couldn't create iommu/devices/%s/functional_arm_dma_api debugfs file\n",
pr_err_ratelimited("Couldn't create iommu/devices/%s/functional_arm_dma_api debugfs file\n",
dev_name(dev));
goto err_rmdir;
}
if (!debugfs_create_file("dma_attach", 0600, dir, ddev,
&iommu_debug_dma_attach_fops)) {
pr_err("Couldn't create iommu/devices/%s/dma_attach debugfs file\n",
pr_err_ratelimited("Couldn't create iommu/devices/%s/dma_attach debugfs file\n",
dev_name(dev));
goto err_rmdir;
}
if (!debugfs_create_file("attach", 0400, dir, ddev,
&iommu_debug_attach_fops)) {
pr_err("Couldn't create iommu/devices/%s/attach debugfs file\n",
pr_err_ratelimited("Couldn't create iommu/devices/%s/attach debugfs file\n",
dev_name(dev));
goto err_rmdir;
}
if (!debugfs_create_file("secure_attach", 0400, dir, ddev,
&iommu_debug_secure_attach_fops)) {
pr_err("Couldn't create iommu/devices/%s/secure_attach debugfs file\n",
pr_err_ratelimited("Couldn't create iommu/devices/%s/secure_attach debugfs file\n",
dev_name(dev));
goto err_rmdir;
}
if (!debugfs_create_file("atos", 0200, dir, ddev,
&iommu_debug_atos_fops)) {
pr_err("Couldn't create iommu/devices/%s/atos debugfs file\n",
pr_err_ratelimited("Couldn't create iommu/devices/%s/atos debugfs file\n",
dev_name(dev));
goto err_rmdir;
}
if (!debugfs_create_file("dma_atos", 0600, dir, ddev,
&iommu_debug_dma_atos_fops)) {
pr_err("Couldn't create iommu/devices/%s/dma_atos debugfs file\n",
pr_err_ratelimited("Couldn't create iommu/devices/%s/dma_atos debugfs file\n",
dev_name(dev));
goto err_rmdir;
}
if (!debugfs_create_file("map", 0200, dir, ddev,
&iommu_debug_map_fops)) {
pr_err("Couldn't create iommu/devices/%s/map debugfs file\n",
pr_err_ratelimited("Couldn't create iommu/devices/%s/map debugfs file\n",
dev_name(dev));
goto err_rmdir;
}
if (!debugfs_create_file("dma_map", 0600, dir, ddev,
&iommu_debug_dma_map_fops)) {
pr_err("Couldn't create iommu/devices/%s/dma_map debugfs file\n",
pr_err_ratelimited("Couldn't create iommu/devices/%s/dma_map debugfs file\n",
dev_name(dev));
goto err_rmdir;
}
if (!debugfs_create_file("unmap", 0200, dir, ddev,
&iommu_debug_unmap_fops)) {
pr_err("Couldn't create iommu/devices/%s/unmap debugfs file\n",
pr_err_ratelimited("Couldn't create iommu/devices/%s/unmap debugfs file\n",
dev_name(dev));
goto err_rmdir;
}
if (!debugfs_create_file("dma_unmap", 0200, dir, ddev,
&iommu_debug_dma_unmap_fops)) {
pr_err("Couldn't create iommu/devices/%s/dma_unmap debugfs file\n",
pr_err_ratelimited("Couldn't create iommu/devices/%s/dma_unmap debugfs file\n",
dev_name(dev));
goto err_rmdir;
}
if (!debugfs_create_file("pte", 0600, dir, ddev,
&iommu_debug_pte_fops)) {
pr_err("Couldn't create iommu/devices/%s/pte debugfs file\n",
pr_err_ratelimited("Couldn't create iommu/devices/%s/pte debugfs file\n",
dev_name(dev));
goto err_rmdir;
}
if (!debugfs_create_file("config_clocks", 0200, dir, ddev,
&iommu_debug_config_clocks_fops)) {
pr_err("Couldn't create iommu/devices/%s/config_clocks debugfs file\n",
pr_err_ratelimited("Couldn't create iommu/devices/%s/config_clocks debugfs file\n",
dev_name(dev));
goto err_rmdir;
}
if (!debugfs_create_file("trigger-fault", 0200, dir, ddev,
&iommu_debug_trigger_fault_fops)) {
pr_err("Couldn't create iommu/devices/%s/trigger-fault debugfs file\n",
pr_err_ratelimited("Couldn't create iommu/devices/%s/trigger-fault debugfs file\n",
dev_name(dev));
goto err_rmdir;
}
@@ -2337,7 +2339,7 @@ static int iommu_debug_init_tests(void)
debugfs_tests_dir = debugfs_create_dir("tests",
iommu_debugfs_top);
if (!debugfs_tests_dir) {
pr_err("Couldn't create iommu/tests debugfs directory\n");
pr_err_ratelimited("Couldn't create iommu/tests debugfs directory\n");
return -ENODEV;
}

View File

@@ -103,6 +103,14 @@ static int of_iommu_xlate(struct device *dev,
int err;
ops = iommu_ops_from_fwnode(fwnode);
/*
* Return -EPROBE_DEFER for the platform devices which are dependent
* on the SMMU driver registration. Deferring from here helps in adding
* the clients in proper iommu groups.
*/
if (!dev_is_pci(dev) && of_device_is_available(iommu_spec->np) && !ops)
return -EPROBE_DEFER;
if ((ops && !ops->of_xlate) ||
!of_device_is_available(iommu_spec->np))
return NO_IOMMU;

View File

@@ -823,6 +823,10 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
void dma_release_declared_memory(struct device *dev);
void *dma_mark_declared_memory_occupied(struct device *dev,
dma_addr_t device_addr, size_t size);
dma_addr_t dma_get_device_base(struct device *dev,
struct dma_coherent_mem *mem);
unsigned long dma_get_size(struct dma_coherent_mem *mem);
#else
static inline int
dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
@@ -842,6 +846,17 @@ dma_mark_declared_memory_occupied(struct device *dev,
{
return ERR_PTR(-EBUSY);
}
static inline dma_addr_t
dma_get_device_base(struct device *dev, struct dma_coherent_mem *mem)
{
return 0;
}
static inline unsigned long dma_get_size(struct dma_coherent_mem *mem)
{
return 0;
}
#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
#ifdef CONFIG_HAS_DMA

View File

@@ -29,14 +29,21 @@ static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *de
return NULL;
}
static inline dma_addr_t dma_get_device_base(struct device *dev,
struct dma_coherent_mem * mem)
dma_addr_t dma_get_device_base(struct device *dev,
struct dma_coherent_mem *mem)
{
if (mem->use_dev_dma_pfn_offset)
return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
else
return mem->device_base;
}
EXPORT_SYMBOL(dma_get_device_base);
unsigned long dma_get_size(struct dma_coherent_mem *mem)
{
return mem->size << PAGE_SHIFT;
}
EXPORT_SYMBOL(dma_get_size);
static int dma_init_coherent_memory(
phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,

View File

@@ -75,7 +75,7 @@ void *removed_alloc(struct device *dev, size_t size, dma_addr_t *handle,
bool skip_zeroing = attrs & DMA_ATTR_SKIP_ZEROING;
int pageno;
unsigned long order;
void *addr = NULL;
void __iomem *addr = NULL;
struct removed_region *dma_mem = dev->removed_mem;
int nbits;
unsigned int align;
@@ -108,7 +108,7 @@ void *removed_alloc(struct device *dev, size_t size, dma_addr_t *handle,
goto out;
}
addr = ioremap(base, size);
addr = ioremap_wc(base, size);
if (WARN_ON(!addr)) {
bitmap_clear(dma_mem->bitmap, pageno, nbits);
} else {
@@ -202,10 +202,10 @@ void removed_sync_sg_for_device(struct device *dev,
{
}
void *removed_remap(struct device *dev, void *cpu_addr, dma_addr_t handle,
size_t size, unsigned long attrs)
static void __iomem *removed_remap(struct device *dev, void *cpu_addr,
dma_addr_t handle, size_t size, unsigned long attrs)
{
return ioremap(handle, size);
return ioremap_wc(handle, size);
}
void removed_unremap(struct device *dev, void *remapped_address, size_t size)