ion : Merge ion changes for msm-kona kernel upgrade

Merge all ion changes from msm-4.14 to msm-kona as of:
'commit 0fa34f5d9070 ("ion: fix system secure force alloc")'.

Change-Id: I040b44a89790f7096bf11c5b273ad0e5033eea9c
Signed-off-by: Swathi Sridhar <swatsrid@codeaurora.org>
This commit is contained in:
Swathi Sridhar
2018-06-26 11:13:41 -07:00
committed by Patrick Daly
parent 72c6f5f101
commit ac04f0739d
17 changed files with 526 additions and 95 deletions

View File

@@ -30,4 +30,15 @@ config SW_SYNC
WARNING: improper use of this can result in deadlocking kernel WARNING: improper use of this can result in deadlocking kernel
drivers from userspace. Intended for test and debug only. drivers from userspace. Intended for test and debug only.
config DEBUG_DMA_BUF_REF
bool "DEBUG Reference Count"
depends on STACKDEPOT
depends on DMA_SHARED_BUFFER
default n
help
Save stack traces for every call to dma_buf_get and dma_buf_put, to
help debug memory leaks. Potential leaks may be found by manually
matching the get/put call stacks. This feature consumes extra memory
in order to save the stack traces using STACKDEPOT.
endmenu endmenu

View File

@@ -1,3 +1,4 @@
obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o
obj-$(CONFIG_SYNC_FILE) += sync_file.o obj-$(CONFIG_SYNC_FILE) += sync_file.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
obj-$(CONFIG_DEBUG_DMA_BUF_REF) += dma-buf-ref.o

View File

@@ -0,0 +1,112 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/dma-buf.h>
#include <linux/slab.h>
#include <linux/stackdepot.h>
#include <linux/stacktrace.h>
#include <linux/seq_file.h>
#define DMA_BUF_STACK_DEPTH (16)
struct dma_buf_ref {
struct list_head list;
depot_stack_handle_t handle;
int count;
};
void dma_buf_ref_init(struct dma_buf *dmabuf)
{
INIT_LIST_HEAD(&dmabuf->refs);
}
void dma_buf_ref_destroy(struct dma_buf *dmabuf)
{
struct dma_buf_ref *r, *n;
mutex_lock(&dmabuf->lock);
list_for_each_entry_safe(r, n, &dmabuf->refs, list) {
list_del(&r->list);
kfree(r);
}
mutex_unlock(&dmabuf->lock);
}
static void dma_buf_ref_insert_handle(struct dma_buf *dmabuf,
depot_stack_handle_t handle,
int count)
{
struct dma_buf_ref *r;
mutex_lock(&dmabuf->lock);
list_for_each_entry(r, &dmabuf->refs, list) {
if (r->handle == handle) {
r->count += count;
goto out;
}
}
r = kzalloc(sizeof(*r), GFP_KERNEL);
if (!r)
goto out;
INIT_LIST_HEAD(&r->list);
r->handle = handle;
r->count = count;
list_add(&r->list, &dmabuf->refs);
out:
mutex_unlock(&dmabuf->lock);
}
void dma_buf_ref_mod(struct dma_buf *dmabuf, int nr)
{
unsigned long entries[DMA_BUF_STACK_DEPTH];
struct stack_trace trace = {
.nr_entries = 0,
.entries = entries,
.max_entries = DMA_BUF_STACK_DEPTH,
.skip = 1
};
depot_stack_handle_t handle;
save_stack_trace(&trace);
if (trace.nr_entries != 0 &&
trace.entries[trace.nr_entries-1] == ULONG_MAX)
trace.nr_entries--;
handle = depot_save_stack(&trace, GFP_KERNEL);
if (!handle)
return;
dma_buf_ref_insert_handle(dmabuf, handle, nr);
}
/**
* Called with dmabuf->lock held
*/
int dma_buf_ref_show(struct seq_file *s, struct dma_buf *dmabuf)
{
char *buf;
struct dma_buf_ref *ref;
int count = 0;
struct stack_trace trace;
buf = (void *)__get_free_page(GFP_KERNEL);
if (!buf)
return -ENOMEM;
list_for_each_entry(ref, &dmabuf->refs, list) {
count += ref->count;
seq_printf(s, "References: %d\n", ref->count);
depot_fetch_stack(ref->handle, &trace);
snprint_stack_trace(buf, PAGE_SIZE, &trace, 0);
seq_puts(s, buf);
seq_putc(s, '\n');
}
seq_printf(s, "Total references: %d\n\n\n", count);
free_page((unsigned long)buf);
return 0;
}

View File

@@ -36,6 +36,9 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/sched/signal.h>
#include <linux/fdtable.h>
#include <linux/list_sort.h>
#include <uapi/linux/dma-buf.h> #include <uapi/linux/dma-buf.h>
@@ -48,6 +51,19 @@ struct dma_buf_list {
struct mutex lock; struct mutex lock;
}; };
struct dma_info {
struct dma_buf *dmabuf;
struct list_head head;
};
struct dma_proc {
char name[TASK_COMM_LEN];
pid_t pid;
size_t size;
struct list_head dma_bufs;
struct list_head head;
};
static struct dma_buf_list db_list; static struct dma_buf_list db_list;
static int dma_buf_release(struct inode *inode, struct file *file) static int dma_buf_release(struct inode *inode, struct file *file)
@@ -71,12 +87,14 @@ static int dma_buf_release(struct inode *inode, struct file *file)
*/ */
BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active); BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
dmabuf->ops->release(dmabuf);
mutex_lock(&db_list.lock); mutex_lock(&db_list.lock);
list_del(&dmabuf->list_node); list_del(&dmabuf->list_node);
mutex_unlock(&db_list.lock); mutex_unlock(&db_list.lock);
dmabuf->ops->release(dmabuf);
dma_buf_ref_destroy(dmabuf);
if (dmabuf->resv == (struct reservation_object *)&dmabuf[1]) if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
reservation_object_fini(dmabuf->resv); reservation_object_fini(dmabuf->resv);
@@ -457,6 +475,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll; dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
dmabuf->name = bufname; dmabuf->name = bufname;
dmabuf->ktime = ktime_get();
if (!resv) { if (!resv) {
resv = (struct reservation_object *)&dmabuf[1]; resv = (struct reservation_object *)&dmabuf[1];
@@ -477,6 +496,9 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
mutex_init(&dmabuf->lock); mutex_init(&dmabuf->lock);
INIT_LIST_HEAD(&dmabuf->attachments); INIT_LIST_HEAD(&dmabuf->attachments);
dma_buf_ref_init(dmabuf);
dma_buf_ref_mod(dmabuf, 1);
mutex_lock(&db_list.lock); mutex_lock(&db_list.lock);
list_add(&dmabuf->list_node, &db_list.head); list_add(&dmabuf->list_node, &db_list.head);
mutex_unlock(&db_list.lock); mutex_unlock(&db_list.lock);
@@ -538,6 +560,7 @@ struct dma_buf *dma_buf_get(int fd)
fput(file); fput(file);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
dma_buf_ref_mod(file->private_data, 1);
return file->private_data; return file->private_data;
} }
@@ -558,6 +581,7 @@ void dma_buf_put(struct dma_buf *dmabuf)
if (WARN_ON(!dmabuf || !dmabuf->file)) if (WARN_ON(!dmabuf || !dmabuf->file))
return; return;
dma_buf_ref_mod(dmabuf, -1);
fput(dmabuf->file); fput(dmabuf->file);
} }
EXPORT_SYMBOL_GPL(dma_buf_put); EXPORT_SYMBOL_GPL(dma_buf_put);
@@ -1203,6 +1227,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
seq_printf(s, "Total %d devices attached\n\n", seq_printf(s, "Total %d devices attached\n\n",
attach_count); attach_count);
dma_buf_ref_show(s, buf_obj);
count++; count++;
size += buf_obj->size; size += buf_obj->size;
mutex_unlock(&buf_obj->lock); mutex_unlock(&buf_obj->lock);
@@ -1226,6 +1252,157 @@ static const struct file_operations dma_buf_debug_fops = {
.release = single_release, .release = single_release,
}; };
static bool list_contains(struct list_head *list, struct dma_buf *info)
{
struct dma_info *curr;
list_for_each_entry(curr, list, head)
if (curr->dmabuf == info)
return true;
return false;
}
static int get_dma_info(const void *data, struct file *file, unsigned int n)
{
struct dma_proc *dma_proc;
struct dma_info *dma_info;
dma_proc = (struct dma_proc *)data;
if (!is_dma_buf_file(file))
return 0;
if (list_contains(&dma_proc->dma_bufs, file->private_data))
return 0;
dma_info = kzalloc(sizeof(*dma_info), GFP_ATOMIC);
if (!dma_info)
return -ENOMEM;
get_file(file);
dma_info->dmabuf = file->private_data;
dma_proc->size += dma_info->dmabuf->size / SZ_1K;
list_add(&dma_info->head, &dma_proc->dma_bufs);
return 0;
}
static void write_proc(struct seq_file *s, struct dma_proc *proc)
{
struct dma_info *tmp;
seq_printf(s, "\n%s (PID %ld) size: %ld\nDMA Buffers:\n",
proc->name, proc->pid, proc->size);
seq_printf(s, "%-8s\t%-8s\t%-8s\n",
"Name", "Size (KB)", "Time Alive (sec)");
list_for_each_entry(tmp, &proc->dma_bufs, head) {
struct dma_buf *dmabuf = tmp->dmabuf;
ktime_t elapmstime = ktime_ms_delta(ktime_get(), dmabuf->ktime);
elapmstime = ktime_divns(elapmstime, MSEC_PER_SEC);
seq_printf(s, "%-8s\t%-8ld\t%-8ld\n",
dmabuf->name,
dmabuf->size / SZ_1K,
elapmstime);
}
}
static void free_proc(struct dma_proc *proc)
{
struct dma_info *tmp, *n;
list_for_each_entry_safe(tmp, n, &proc->dma_bufs, head) {
dma_buf_put(tmp->dmabuf);
list_del(&tmp->head);
kfree(tmp);
}
kfree(proc);
}
static int dmacmp(void *unused, struct list_head *a, struct list_head *b)
{
struct dma_info *a_buf, *b_buf;
a_buf = list_entry(a, struct dma_info, head);
b_buf = list_entry(b, struct dma_info, head);
return b_buf->dmabuf->size - a_buf->dmabuf->size;
}
static int proccmp(void *unused, struct list_head *a, struct list_head *b)
{
struct dma_proc *a_proc, *b_proc;
a_proc = list_entry(a, struct dma_proc, head);
b_proc = list_entry(b, struct dma_proc, head);
return b_proc->size - a_proc->size;
}
static int dma_procs_debug_show(struct seq_file *s, void *unused)
{
struct task_struct *task, *thread;
struct files_struct *files;
int ret = 0;
struct dma_proc *tmp, *n;
LIST_HEAD(plist);
read_lock(&tasklist_lock);
for_each_process(task) {
struct files_struct *group_leader_files = NULL;
tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
if (!tmp) {
ret = -ENOMEM;
read_unlock(&tasklist_lock);
goto mem_err;
}
INIT_LIST_HEAD(&tmp->dma_bufs);
for_each_thread(task, thread) {
task_lock(thread);
if (unlikely(!group_leader_files))
group_leader_files = task->group_leader->files;
files = thread->files;
if (files && (group_leader_files != files ||
thread == task->group_leader))
ret = iterate_fd(files, 0, get_dma_info, tmp);
task_unlock(thread);
}
if (ret || list_empty(&tmp->dma_bufs))
goto skip;
list_sort(NULL, &tmp->dma_bufs, dmacmp);
get_task_comm(tmp->name, task);
tmp->pid = task->tgid;
list_add(&tmp->head, &plist);
continue;
skip:
free_proc(tmp);
}
read_unlock(&tasklist_lock);
list_sort(NULL, &plist, proccmp);
list_for_each_entry(tmp, &plist, head)
write_proc(s, tmp);
ret = 0;
mem_err:
list_for_each_entry_safe(tmp, n, &plist, head) {
list_del(&tmp->head);
free_proc(tmp);
}
return ret;
}
static int dma_procs_debug_open(struct inode *f_inode, struct file *file)
{
return single_open(file, dma_procs_debug_show, NULL);
}
static const struct file_operations dma_procs_debug_fops = {
.open = dma_procs_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release
};
static struct dentry *dma_buf_debugfs_dir; static struct dentry *dma_buf_debugfs_dir;
static int dma_buf_init_debugfs(void) static int dma_buf_init_debugfs(void)
@@ -1246,6 +1423,17 @@ static int dma_buf_init_debugfs(void)
debugfs_remove_recursive(dma_buf_debugfs_dir); debugfs_remove_recursive(dma_buf_debugfs_dir);
dma_buf_debugfs_dir = NULL; dma_buf_debugfs_dir = NULL;
err = PTR_ERR(d); err = PTR_ERR(d);
return err;
}
d = debugfs_create_file("dmaprocs", 0444, dma_buf_debugfs_dir,
NULL, &dma_procs_debug_fops);
if (IS_ERR(d)) {
pr_debug("dma_buf: debugfs: failed to create node dmaprocs\n");
debugfs_remove_recursive(dma_buf_debugfs_dir);
dma_buf_debugfs_dir = NULL;
err = PTR_ERR(d);
} }
return err; return err;

View File

@@ -238,10 +238,17 @@ static struct mem_prot_info *get_info_list_from_table(struct sg_table *table,
#define BATCH_MAX_SIZE SZ_2M #define BATCH_MAX_SIZE SZ_2M
#define BATCH_MAX_SECTIONS 32 #define BATCH_MAX_SECTIONS 32
int hyp_assign_table(struct sg_table *table, /*
* When -EAGAIN is returned it is safe for the caller to try to call
* __hyp_assign_table again.
*
* When -EADDRNOTAVAIL is returned the memory may no longer be in
* a usable state and should no longer be accessed by the HLOS.
*/
static int __hyp_assign_table(struct sg_table *table,
u32 *source_vm_list, int source_nelems, u32 *source_vm_list, int source_nelems,
int *dest_vmids, int *dest_perms, int *dest_vmids, int *dest_perms,
int dest_nelems) int dest_nelems, bool try_lock)
{ {
int ret = 0; int ret = 0;
struct scm_desc desc = {0}; struct scm_desc desc = {0};
@@ -271,10 +278,17 @@ int hyp_assign_table(struct sg_table *table,
&dest_vm_copy_size); &dest_vm_copy_size);
if (!dest_vm_copy) { if (!dest_vm_copy) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_free; goto out_free_src;
} }
mutex_lock(&secure_buffer_mutex); if (try_lock) {
if (!mutex_trylock(&secure_buffer_mutex)) {
ret = -EAGAIN;
goto out_free_dest;
}
} else {
mutex_lock(&secure_buffer_mutex);
}
sg_table_copy = get_info_list_from_table(table, &sg_table_copy_size); sg_table_copy = get_info_list_from_table(table, &sg_table_copy_size);
if (!sg_table_copy) { if (!sg_table_copy) {
@@ -330,6 +344,12 @@ int hyp_assign_table(struct sg_table *table,
if (ret) { if (ret) {
pr_info("%s: Failed to assign memory protection, ret = %d\n", pr_info("%s: Failed to assign memory protection, ret = %d\n",
__func__, ret); __func__, ret);
/*
* Make it clear to clients that the memory may no
* longer be in a usable state.
*/
ret = -EADDRNOTAVAIL;
break; break;
} }
batch_start = batch_end; batch_start = batch_end;
@@ -337,12 +357,31 @@ int hyp_assign_table(struct sg_table *table,
out_unlock: out_unlock:
mutex_unlock(&secure_buffer_mutex); mutex_unlock(&secure_buffer_mutex);
out_free_dest:
kfree(dest_vm_copy); kfree(dest_vm_copy);
out_free: out_free_src:
kfree(source_vm_copy); kfree(source_vm_copy);
return ret; return ret;
} }
int hyp_assign_table(struct sg_table *table,
u32 *source_vm_list, int source_nelems,
int *dest_vmids, int *dest_perms,
int dest_nelems)
{
return __hyp_assign_table(table, source_vm_list, source_nelems,
dest_vmids, dest_perms, dest_nelems, false);
}
int try_hyp_assign_table(struct sg_table *table,
u32 *source_vm_list, int source_nelems,
int *dest_vmids, int *dest_perms,
int dest_nelems)
{
return __hyp_assign_table(table, source_vm_list, source_nelems,
dest_vmids, dest_perms, dest_nelems, true);
}
int hyp_assign_phys(phys_addr_t addr, u64 size, u32 *source_vm_list, int hyp_assign_phys(phys_addr_t addr, u64 size, u32 *source_vm_list,
int source_nelems, int *dest_vmids, int source_nelems, int *dest_vmids,
int *dest_perms, int dest_nelems) int *dest_perms, int dest_nelems)

View File

@@ -172,8 +172,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
void ion_buffer_destroy(struct ion_buffer *buffer) void ion_buffer_destroy(struct ion_buffer *buffer)
{ {
if (buffer->kmap_cnt > 0) { if (buffer->kmap_cnt > 0) {
pr_warn_once("%s: buffer still mapped in the kernel\n", pr_warn_ratelimited("ION client likely missing a call to dma_buf_kunmap or dma_buf_vunmap\n");
__func__);
buffer->heap->ops->unmap_kernel(buffer->heap, buffer); buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
} }
buffer->heap->ops->free(buffer); buffer->heap->ops->free(buffer);
@@ -220,7 +219,7 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
static void ion_buffer_kmap_put(struct ion_buffer *buffer) static void ion_buffer_kmap_put(struct ion_buffer *buffer)
{ {
if (buffer->kmap_cnt == 0) { if (buffer->kmap_cnt == 0) {
pr_warn_ratelimited("Call dma_buf_begin_cpu_access before dma_buf_end_cpu_access, pid:%d\n", pr_warn_ratelimited("ION client likely missing a call to dma_buf_kmap or dma_buf_vmap, pid:%d\n",
current->pid); current->pid);
return; return;
} }
@@ -495,31 +494,59 @@ static void ion_dma_buf_release(struct dma_buf *dmabuf)
struct ion_buffer *buffer = dmabuf->priv; struct ion_buffer *buffer = dmabuf->priv;
_ion_buffer_destroy(buffer); _ion_buffer_destroy(buffer);
} kfree(dmabuf->exp_name);
static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
{
struct ion_buffer *buffer = dmabuf->priv;
WARN(!buffer->vaddr, "Call dma_buf_begin_cpu_access before dma_buf_kmap\n");
return buffer->vaddr + offset * PAGE_SIZE;
}
static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
void *ptr)
{
} }
static void *ion_dma_buf_vmap(struct dma_buf *dmabuf) static void *ion_dma_buf_vmap(struct dma_buf *dmabuf)
{ {
struct ion_buffer *buffer = dmabuf->priv; struct ion_buffer *buffer = dmabuf->priv;
void *vaddr = ERR_PTR(-EINVAL);
WARN(!buffer->vaddr, "Call dma_buf_begin_cpu_access before dma_buf_vmap\n"); if (buffer->heap->ops->map_kernel) {
return buffer->vaddr; mutex_lock(&buffer->lock);
vaddr = ion_buffer_kmap_get(buffer);
mutex_unlock(&buffer->lock);
} else {
pr_warn_ratelimited("heap %s doesn't support map_kernel\n",
buffer->heap->name);
}
return vaddr;
} }
static void ion_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) static void ion_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
{ {
struct ion_buffer *buffer = dmabuf->priv;
if (buffer->heap->ops->map_kernel) {
mutex_lock(&buffer->lock);
ion_buffer_kmap_put(buffer);
mutex_unlock(&buffer->lock);
}
}
static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
{
/*
* TODO: Once clients remove their hacks where they assume kmap(ed)
* addresses are virtually contiguous implement this properly
*/
void *vaddr = ion_dma_buf_vmap(dmabuf);
if (IS_ERR(vaddr))
return vaddr;
return vaddr + offset * PAGE_SIZE;
}
static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
void *ptr)
{
/*
* TODO: Once clients remove their hacks where they assume kmap(ed)
* addresses are virtually contiguous implement this properly
*/
ion_dma_buf_vunmap(dmabuf, ptr);
} }
static int ion_sgl_sync_range(struct device *dev, struct scatterlist *sgl, static int ion_sgl_sync_range(struct device *dev, struct scatterlist *sgl,
@@ -604,7 +631,6 @@ static int __ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
bool sync_only_mapped) bool sync_only_mapped)
{ {
struct ion_buffer *buffer = dmabuf->priv; struct ion_buffer *buffer = dmabuf->priv;
void *vaddr;
struct ion_dma_buf_attachment *a; struct ion_dma_buf_attachment *a;
int ret = 0; int ret = 0;
@@ -617,19 +643,6 @@ static int __ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
goto out; goto out;
} }
/*
* TODO: Move this elsewhere because we don't always need a vaddr
*/
if (buffer->heap->ops->map_kernel) {
mutex_lock(&buffer->lock);
vaddr = ion_buffer_kmap_get(buffer);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
goto unlock;
}
mutex_unlock(&buffer->lock);
}
if (!(buffer->flags & ION_FLAG_CACHED)) { if (!(buffer->flags & ION_FLAG_CACHED)) {
trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name, false, trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name, false,
true, direction, true, direction,
@@ -701,8 +714,6 @@ static int __ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
} }
} }
unlock:
mutex_unlock(&buffer->lock); mutex_unlock(&buffer->lock);
out: out:
return ret; return ret;
@@ -725,12 +736,6 @@ static int __ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
goto out; goto out;
} }
if (buffer->heap->ops->map_kernel) {
mutex_lock(&buffer->lock);
ion_buffer_kmap_put(buffer);
mutex_unlock(&buffer->lock);
}
if (!(buffer->flags & ION_FLAG_CACHED)) { if (!(buffer->flags & ION_FLAG_CACHED)) {
trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name, false, trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name, false,
true, direction, true, direction,
@@ -833,7 +838,6 @@ static int ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
unsigned int len) unsigned int len)
{ {
struct ion_buffer *buffer = dmabuf->priv; struct ion_buffer *buffer = dmabuf->priv;
void *vaddr;
struct ion_dma_buf_attachment *a; struct ion_dma_buf_attachment *a;
int ret = 0; int ret = 0;
@@ -846,15 +850,6 @@ static int ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
goto out; goto out;
} }
/*
* TODO: Move this elsewhere because we don't always need a vaddr
*/
if (buffer->heap->ops->map_kernel) {
mutex_lock(&buffer->lock);
vaddr = ion_buffer_kmap_get(buffer);
mutex_unlock(&buffer->lock);
}
if (!(buffer->flags & ION_FLAG_CACHED)) { if (!(buffer->flags & ION_FLAG_CACHED)) {
trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name, false, trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name, false,
true, dir, true, dir,
@@ -934,12 +929,6 @@ static int ion_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
goto out; goto out;
} }
if (buffer->heap->ops->map_kernel) {
mutex_lock(&buffer->lock);
ion_buffer_kmap_put(buffer);
mutex_unlock(&buffer->lock);
}
if (!(buffer->flags & ION_FLAG_CACHED)) { if (!(buffer->flags & ION_FLAG_CACHED)) {
trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name, false, trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name, false,
true, direction, true, direction,
@@ -1038,6 +1027,7 @@ struct dma_buf *ion_alloc_dmabuf(size_t len, unsigned int heap_id_mask,
struct ion_heap *heap; struct ion_heap *heap;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info); DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct dma_buf *dmabuf; struct dma_buf *dmabuf;
char task_comm[TASK_COMM_LEN];
pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__, pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
len, heap_id_mask, flags); len, heap_id_mask, flags);
@@ -1069,14 +1059,20 @@ struct dma_buf *ion_alloc_dmabuf(size_t len, unsigned int heap_id_mask,
if (IS_ERR(buffer)) if (IS_ERR(buffer))
return ERR_CAST(buffer); return ERR_CAST(buffer);
get_task_comm(task_comm, current->group_leader);
exp_info.ops = &dma_buf_ops; exp_info.ops = &dma_buf_ops;
exp_info.size = buffer->size; exp_info.size = buffer->size;
exp_info.flags = O_RDWR; exp_info.flags = O_RDWR;
exp_info.priv = buffer; exp_info.priv = buffer;
exp_info.exp_name = kasprintf(GFP_KERNEL, "%s-%s-%d-%s", KBUILD_MODNAME,
heap->name, current->tgid, task_comm);
dmabuf = dma_buf_export(&exp_info); dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(dmabuf)) if (IS_ERR(dmabuf)) {
_ion_buffer_destroy(buffer); _ion_buffer_destroy(buffer);
kfree(exp_info.exp_name);
}
return dmabuf; return dmabuf;
} }

View File

@@ -3,7 +3,7 @@
* drivers/staging/android/ion/ion.h * drivers/staging/android/ion/ion.h
* *
* Copyright (C) 2011 Google, Inc. * Copyright (C) 2011 Google, Inc.
* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. * Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
* *
*/ */
@@ -30,6 +30,7 @@
#define ION_MM_HEAP_NAME "mm" #define ION_MM_HEAP_NAME "mm"
#define ION_SPSS_HEAP_NAME "spss" #define ION_SPSS_HEAP_NAME "spss"
#define ION_SECURE_CARVEOUT_HEAP_NAME "secure_carveout" #define ION_SECURE_CARVEOUT_HEAP_NAME "secure_carveout"
#define ION_USER_CONTIG_HEAP_NAME "user_contig"
#define ION_QSECOM_HEAP_NAME "qsecom" #define ION_QSECOM_HEAP_NAME "qsecom"
#define ION_QSECOM_TA_HEAP_NAME "qsecom_ta" #define ION_QSECOM_TA_HEAP_NAME "qsecom_ta"
#define ION_SECURE_HEAP_NAME "secure_heap" #define ION_SECURE_HEAP_NAME "secure_heap"

View File

@@ -343,8 +343,8 @@ static void ion_secure_cma_free_chunk(struct ion_cma_secure_heap *sheap,
kfree(chunk); kfree(chunk);
} }
static void __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap, static unsigned long
int max_nr) __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap, int max_nr)
{ {
struct list_head *entry, *_n; struct list_head *entry, *_n;
unsigned long drained_size = 0, skipped_size = 0; unsigned long drained_size = 0, skipped_size = 0;
@@ -368,6 +368,7 @@ static void __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap,
} }
trace_ion_secure_cma_shrink_pool_end(drained_size, skipped_size); trace_ion_secure_cma_shrink_pool_end(drained_size, skipped_size);
return drained_size;
} }
int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused) int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused)
@@ -385,6 +386,7 @@ int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused)
static unsigned long ion_secure_cma_shrinker(struct shrinker *shrinker, static unsigned long ion_secure_cma_shrinker(struct shrinker *shrinker,
struct shrink_control *sc) struct shrink_control *sc)
{ {
unsigned long freed;
struct ion_cma_secure_heap *sheap = container_of(shrinker, struct ion_cma_secure_heap *sheap = container_of(shrinker,
struct ion_cma_secure_heap, shrinker); struct ion_cma_secure_heap, shrinker);
int nr_to_scan = sc->nr_to_scan; int nr_to_scan = sc->nr_to_scan;
@@ -397,11 +399,11 @@ static unsigned long ion_secure_cma_shrinker(struct shrinker *shrinker,
if (!mutex_trylock(&sheap->chunk_lock)) if (!mutex_trylock(&sheap->chunk_lock))
return -EAGAIN; return -EAGAIN;
__ion_secure_cma_shrink_pool(sheap, nr_to_scan); freed = __ion_secure_cma_shrink_pool(sheap, nr_to_scan);
mutex_unlock(&sheap->chunk_lock); mutex_unlock(&sheap->chunk_lock);
return atomic_read(&sheap->total_pool_size); return freed;
} }
static unsigned long ion_secure_cma_shrinker_count(struct shrinker *shrinker, static unsigned long ion_secure_cma_shrinker_count(struct shrinker *shrinker,

View File

@@ -89,7 +89,8 @@ static int populate_vm_list(unsigned long flags, unsigned int *vm_list,
} }
int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list, int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list,
int source_nelems, bool clear_page_private) int source_nelems, bool clear_page_private,
bool try_lock)
{ {
u32 dest_vmid = VMID_HLOS; u32 dest_vmid = VMID_HLOS;
u32 dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC; u32 dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
@@ -103,11 +104,16 @@ int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list,
goto out; goto out;
} }
ret = hyp_assign_table(sgt, source_vm_list, source_nelems, if (try_lock)
&dest_vmid, &dest_perms, 1); ret = try_hyp_assign_table(sgt, source_vm_list, source_nelems,
&dest_vmid, &dest_perms, 1);
else
ret = hyp_assign_table(sgt, source_vm_list, source_nelems,
&dest_vmid, &dest_perms, 1);
if (ret) { if (ret) {
pr_err("%s: Unassign call failed.\n", if (!try_lock)
__func__); pr_err("%s: Unassign call failed.\n",
__func__);
goto out; goto out;
} }
if (clear_page_private) if (clear_page_private)
@@ -183,7 +189,7 @@ int ion_hyp_unassign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
} }
ret = ion_hyp_unassign_sg(sgt, source_vm_list, source_nelems, ret = ion_hyp_unassign_sg(sgt, source_vm_list, source_nelems,
set_page_private); set_page_private, false);
out_free_source: out_free_source:
kfree(source_vm_list); kfree(source_vm_list);

View File

@@ -13,7 +13,8 @@ bool is_secure_vmid_valid(int vmid);
int ion_hyp_assign_sg(struct sg_table *sgt, int *dest_vm_list, int ion_hyp_assign_sg(struct sg_table *sgt, int *dest_vm_list,
int dest_nelems, bool set_page_private); int dest_nelems, bool set_page_private);
int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list, int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list,
int source_nelems, bool clear_page_private); int source_nelems, bool clear_page_private,
bool try_lock);
int ion_hyp_unassign_sg_from_flags(struct sg_table *sgt, unsigned long flags, int ion_hyp_unassign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
bool set_page_private); bool set_page_private);
int ion_hyp_assign_sg_from_flags(struct sg_table *sgt, unsigned long flags, int ion_hyp_assign_sg_from_flags(struct sg_table *sgt, unsigned long flags,

View File

@@ -3,7 +3,7 @@
* drivers/staging/android/ion/ion_system_heap.c * drivers/staging/android/ion/ion_system_heap.c
* *
* Copyright (C) 2011 Google, Inc. * Copyright (C) 2011 Google, Inc.
* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. * Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
* *
*/ */
@@ -158,6 +158,9 @@ alloc_from_pool_preferred(struct ion_system_heap *heap,
struct page_info *info; struct page_info *info;
int i; int i;
if (buffer->flags & ION_FLAG_POOL_FORCE_ALLOC)
goto force_alloc;
info = kmalloc(sizeof(*info), GFP_KERNEL); info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info) if (!info)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
@@ -189,6 +192,7 @@ alloc_from_pool_preferred(struct ion_system_heap *heap,
} }
kfree(info); kfree(info);
force_alloc:
return alloc_largest_available(heap, buffer, size, max_order); return alloc_largest_available(heap, buffer, size, max_order);
} }
@@ -325,8 +329,10 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
goto err; goto err;
table = kzalloc(sizeof(*table), GFP_KERNEL); table = kzalloc(sizeof(*table), GFP_KERNEL);
if (!table) if (!table) {
ret = -ENOMEM;
goto err_free_data_pages; goto err_free_data_pages;
}
ret = sg_alloc_table(table, i, GFP_KERNEL); ret = sg_alloc_table(table, i, GFP_KERNEL);
if (ret) if (ret)
@@ -388,7 +394,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE; buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
if (vmid > 0) if (vmid > 0)
ion_hyp_unassign_sg(table, &vmid, 1, true); ion_hyp_unassign_sg(table, &vmid, 1, true, false);
for_each_sg(table->sgl, sg, table->nents, i) for_each_sg(table->sgl, sg, table->nents, i)
free_buffer_page(sys_heap, buffer, sg_page(sg), free_buffer_page(sys_heap, buffer, sg_page(sg),
@@ -429,7 +435,7 @@ void ion_system_heap_free(struct ion_buffer *buffer)
if (vmid < 0) if (vmid < 0)
ion_heap_buffer_zero(buffer); ion_heap_buffer_zero(buffer);
} else if (vmid > 0) { } else if (vmid > 0) {
if (ion_hyp_unassign_sg(table, &vmid, 1, true)) if (ion_hyp_unassign_sg(table, &vmid, 1, true, false))
return; return;
} }

View File

@@ -149,7 +149,8 @@ size_t ion_system_secure_heap_page_pool_total(struct ion_heap *heap,
return total << PAGE_SHIFT; return total << PAGE_SHIFT;
} }
static void process_one_shrink(struct ion_heap *sys_heap, static void process_one_shrink(struct ion_system_secure_heap *secure_heap,
struct ion_heap *sys_heap,
struct prefetch_info *info) struct prefetch_info *info)
{ {
struct ion_buffer buffer; struct ion_buffer buffer;
@@ -157,7 +158,7 @@ static void process_one_shrink(struct ion_heap *sys_heap,
int ret; int ret;
memset(&buffer, 0, sizeof(struct ion_buffer)); memset(&buffer, 0, sizeof(struct ion_buffer));
buffer.heap = sys_heap; buffer.heap = &secure_heap->heap;
buffer.flags = info->vmid; buffer.flags = info->vmid;
pool_size = ion_system_secure_heap_page_pool_total(sys_heap, pool_size = ion_system_secure_heap_page_pool_total(sys_heap,
@@ -171,6 +172,7 @@ static void process_one_shrink(struct ion_heap *sys_heap,
} }
buffer.private_flags = ION_PRIV_FLAG_SHRINKER_FREE; buffer.private_flags = ION_PRIV_FLAG_SHRINKER_FREE;
buffer.heap = sys_heap;
sys_heap->ops->free(&buffer); sys_heap->ops->free(&buffer);
} }
@@ -190,7 +192,7 @@ static void ion_system_secure_heap_prefetch_work(struct work_struct *work)
spin_unlock_irqrestore(&secure_heap->work_lock, flags); spin_unlock_irqrestore(&secure_heap->work_lock, flags);
if (info->shrink) if (info->shrink)
process_one_shrink(sys_heap, info); process_one_shrink(secure_heap, sys_heap, info);
else else
process_one_prefetch(sys_heap, info); process_one_prefetch(sys_heap, info);
@@ -205,7 +207,7 @@ static int alloc_prefetch_info(struct ion_prefetch_regions __user *
struct list_head *items) struct list_head *items)
{ {
struct prefetch_info *info; struct prefetch_info *info;
u64 __user *user_sizes; u64 user_sizes;
int err; int err;
unsigned int nr_sizes, vmid, i; unsigned int nr_sizes, vmid, i;
@@ -226,7 +228,7 @@ static int alloc_prefetch_info(struct ion_prefetch_regions __user *
if (!info) if (!info)
return -ENOMEM; return -ENOMEM;
err = get_user(info->size, &user_sizes[i]); err = get_user(info->size, ((u64 __user *)user_sizes + i));
if (err) if (err)
goto out_free; goto out_free;
@@ -260,7 +262,10 @@ static int __ion_system_secure_heap_resize(struct ion_heap *heap, void *ptr,
return -EINVAL; return -EINVAL;
for (i = 0; i < data->nr_regions; i++) { for (i = 0; i < data->nr_regions; i++) {
ret = alloc_prefetch_info(&data->regions[i], shrink, &items); struct ion_prefetch_regions *r;
r = (struct ion_prefetch_regions *)data->regions + i;
ret = alloc_prefetch_info(r, shrink, &items);
if (ret) if (ret)
goto out_free; goto out_free;
} }
@@ -270,9 +275,9 @@ static int __ion_system_secure_heap_resize(struct ion_heap *heap, void *ptr,
spin_unlock_irqrestore(&secure_heap->work_lock, flags); spin_unlock_irqrestore(&secure_heap->work_lock, flags);
goto out_free; goto out_free;
} }
list_splice_init(&items, &secure_heap->prefetch_list); list_splice_tail_init(&items, &secure_heap->prefetch_list);
schedule_delayed_work(&secure_heap->prefetch_work, queue_delayed_work(system_unbound_wq, &secure_heap->prefetch_work,
shrink ? msecs_to_jiffies(SHRINK_DELAY) : 0); shrink ? msecs_to_jiffies(SHRINK_DELAY) : 0);
spin_unlock_irqrestore(&secure_heap->work_lock, flags); spin_unlock_irqrestore(&secure_heap->work_lock, flags);
return 0; return 0;
@@ -449,7 +454,10 @@ int ion_secure_page_pool_shrink(struct ion_system_heap *sys_heap,
sg = sg_next(sg); sg = sg_next(sg);
} }
if (ion_hyp_unassign_sg(&sgt, &vmid, 1, true)) ret = ion_hyp_unassign_sg(&sgt, &vmid, 1, true, true);
if (ret == -EADDRNOTAVAIL)
goto out3;
else if (ret < 0)
goto out2; goto out2;
list_for_each_entry_safe(page, tmp, &pages, lru) { list_for_each_entry_safe(page, tmp, &pages, lru) {
@@ -460,6 +468,8 @@ int ion_secure_page_pool_shrink(struct ion_system_heap *sys_heap,
sg_free_table(&sgt); sg_free_table(&sgt);
return freed; return freed;
out2:
sg_free_table(&sgt);
out1: out1:
/* Restore pages to secure pool */ /* Restore pages to secure pool */
list_for_each_entry_safe(page, tmp, &pages, lru) { list_for_each_entry_safe(page, tmp, &pages, lru) {
@@ -467,7 +477,7 @@ int ion_secure_page_pool_shrink(struct ion_system_heap *sys_heap,
ion_page_pool_free(pool, page); ion_page_pool_free(pool, page);
} }
return 0; return 0;
out2: out3:
/* /*
* The security state of the pages is unknown after a failure; * The security state of the pages is unknown after a failure;
* They can neither be added back to the secure pool nor buddy system. * They can neither be added back to the secure pool nor buddy system.

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/ */
#include <linux/err.h> #include <linux/err.h>
@@ -40,6 +40,10 @@ static struct ion_heap_desc ion_heap_meta[] = {
.id = ION_CP_MM_HEAP_ID, .id = ION_CP_MM_HEAP_ID,
.name = ION_MM_HEAP_NAME, .name = ION_MM_HEAP_NAME,
}, },
{
.id = ION_USER_CONTIG_HEAP_ID,
.name = ION_USER_CONTIG_HEAP_NAME,
},
{ {
.id = ION_QSECOM_HEAP_ID, .id = ION_QSECOM_HEAP_ID,
.name = ION_QSECOM_HEAP_NAME, .name = ION_QSECOM_HEAP_NAME,
@@ -161,6 +165,10 @@ static int msm_ion_get_heap_dt_data(struct device_node *node,
base = cma_get_base(dev->cma_area); base = cma_get_base(dev->cma_area);
size = cma_get_size(dev->cma_area); size = cma_get_size(dev->cma_area);
ret = 0; ret = 0;
} else if (dev->dma_mem) {
base = dma_get_device_base(dev, dev->dma_mem);
size = dma_get_size(dev->dma_mem);
ret = 0;
} }
} else { } else {
base = of_translate_address(pnode, basep); base = of_translate_address(pnode, basep);

View File

@@ -53,6 +53,7 @@ enum ion_heap_ids {
#define ION_QSECOM_TA_HEAP_ID 19 #define ION_QSECOM_TA_HEAP_ID 19
#define ION_AUDIO_HEAP_ID 28 #define ION_AUDIO_HEAP_ID 28
#define ION_CAMERA_HEAP_ID 20 #define ION_CAMERA_HEAP_ID 20
#define ION_USER_CONTIG_HEAP_ID 26
/** /**
* Flags to be used when allocating from the secure heap for * Flags to be used when allocating from the secure heap for
* content protection * content protection
@@ -100,15 +101,15 @@ enum ion_heap_ids {
#define ION_IOC_MSM_MAGIC 'M' #define ION_IOC_MSM_MAGIC 'M'
struct ion_prefetch_regions { struct ion_prefetch_regions {
__u64 sizes;
__u32 vmid; __u32 vmid;
__u64 __user *sizes;
__u32 nr_sizes; __u32 nr_sizes;
}; };
struct ion_prefetch_data { struct ion_prefetch_data {
__u32 heap_id;
__u64 len; __u64 len;
struct ion_prefetch_regions __user *regions; __u64 regions;
__u32 heap_id;
__u32 nr_regions; __u32 nr_regions;
}; };

View File

@@ -0,0 +1,29 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DMA_BUF_REF_H
#define _DMA_BUF_REF_H
struct dma_buf;
struct seq_file;
#ifdef CONFIG_DEBUG_DMA_BUF_REF
void dma_buf_ref_init(struct dma_buf *b);
void dma_buf_ref_destroy(struct dma_buf *b);
void dma_buf_ref_mod(struct dma_buf *b, int nr);
int dma_buf_ref_show(struct seq_file *s, struct dma_buf *dmabuf);
#else
static inline void dma_buf_ref_init(struct dma_buf *b) {}
static inline void dma_buf_ref_destroy(struct dma_buf *b) {}
static inline void dma_buf_ref_mod(struct dma_buf *b, int nr) {}
static inline int dma_buf_ref_show(struct seq_file *s, struct dma_buf *dmabuf)
{
return -ENOMEM;
}
#endif
#endif /* _DMA_BUF_REF_H */

View File

@@ -31,6 +31,7 @@
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/dma-fence.h> #include <linux/dma-fence.h>
#include <linux/dma-buf-ref.h>
#include <linux/wait.h> #include <linux/wait.h>
struct device; struct device;
@@ -381,6 +382,7 @@ struct dma_buf_ops {
* @vmap_ptr: the current vmap ptr if vmapping_counter > 0 * @vmap_ptr: the current vmap ptr if vmapping_counter > 0
* @exp_name: name of the exporter; useful for debugging. * @exp_name: name of the exporter; useful for debugging.
* @name: unique name for the buffer * @name: unique name for the buffer
* @ktime: time (in jiffies) at which the buffer was born
* @owner: pointer to exporter module; used for refcounting when exporter is a * @owner: pointer to exporter module; used for refcounting when exporter is a
* kernel module. * kernel module.
* @list_node: node for dma_buf accounting and debugging. * @list_node: node for dma_buf accounting and debugging.
@@ -409,6 +411,7 @@ struct dma_buf {
void *vmap_ptr; void *vmap_ptr;
const char *exp_name; const char *exp_name;
char *name; char *name;
ktime_t ktime;
struct module *owner; struct module *owner;
struct list_head list_node; struct list_head list_node;
void *priv; void *priv;
@@ -423,6 +426,8 @@ struct dma_buf {
__poll_t active; __poll_t active;
} cb_excl, cb_shared; } cb_excl, cb_shared;
struct list_head refs;
}; };
/** /**
@@ -495,6 +500,7 @@ struct dma_buf_export_info {
static inline void get_dma_buf(struct dma_buf *dmabuf) static inline void get_dma_buf(struct dma_buf *dmabuf)
{ {
get_file(dmabuf->file); get_file(dmabuf->file);
dma_buf_ref_mod(dmabuf, 1);
} }
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,

View File

@@ -48,6 +48,12 @@ int hyp_assign_table(struct sg_table *table,
u32 *source_vm_list, int source_nelems, u32 *source_vm_list, int source_nelems,
int *dest_vmids, int *dest_perms, int *dest_vmids, int *dest_perms,
int dest_nelems); int dest_nelems);
int try_hyp_assign_table(struct sg_table *table,
u32 *source_vm_list, int source_nelems,
int *dest_vmids, int *dest_perms,
int dest_nelems);
extern int hyp_assign_phys(phys_addr_t addr, u64 size, extern int hyp_assign_phys(phys_addr_t addr, u64 size,
u32 *source_vmlist, int source_nelems, u32 *source_vmlist, int source_nelems,
int *dest_vmids, int *dest_perms, int dest_nelems); int *dest_vmids, int *dest_perms, int dest_nelems);
@@ -72,6 +78,14 @@ static inline int hyp_assign_table(struct sg_table *table,
return -EINVAL; return -EINVAL;
} }
static inline int try_hyp_assign_table(struct sg_table *table,
u32 *source_vm_list, int source_nelems,
int *dest_vmids, int *dest_perms,
int dest_nelems)
{
return -EINVAL;
}
static inline int hyp_assign_phys(phys_addr_t addr, u64 size, static inline int hyp_assign_phys(phys_addr_t addr, u64 size,
u32 *source_vmlist, int source_nelems, u32 *source_vmlist, int source_nelems,
int *dest_vmids, int *dest_perms, int dest_nelems) int *dest_vmids, int *dest_perms, int dest_nelems)