Merge tag 'LA.UM.9.12.r1-16200-SMxx50.QSSI12.0' of https://git.codelinaro.org/clo/la/kernel/msm-4.19 into android13-4.19-kona
"LA.UM.9.12.r1-16200-SMxx50.QSSI12.0" * tag 'LA.UM.9.12.r1-16200-SMxx50.QSSI12.0' of https://git.codelinaro.org/clo/la/kernel/msm-4.19: mdm: dataipa: increase the size of prefetch buffer exec: Force single empty string when argv is empty BACKPORT: FROMLIST: mm: protect free_pgtables with mmap_lock write lock in exit_mmap qseecom: Release ion buffer in case of keymaster TA Change-Id: I599d46bee0c79200fd55ff0705dd8b5d51ecc2d8
This commit is contained in:
@@ -3153,6 +3153,30 @@ static int qseecom_prepare_unload_app(struct qseecom_dev_handle *data)
|
||||
pr_debug("prepare to unload app(%d)(%s), pending %d\n",
|
||||
data->client.app_id, data->client.app_name,
|
||||
data->client.unload_pending);
|
||||
|
||||
/* For keymaster we are not going to unload so no need to add it in
|
||||
* unload app pending list as soon as we identify release ion buffer
|
||||
* and return .
|
||||
*/
|
||||
if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
|
||||
if (data->client.dmabuf) {
|
||||
/* Each client will get same KM TA loaded handle but
|
||||
* will allocate separate shared buffer during
|
||||
* loading of TA, as client can't unload KM TA so we
|
||||
* will only free out shared buffer and return early
|
||||
* to avoid any ion buffer leak.
|
||||
*/
|
||||
qseecom_vaddr_unmap(data->client.sb_virt,
|
||||
data->client.sgt, data->client.attach,
|
||||
data->client.dmabuf);
|
||||
MAKE_NULL(data->client.sgt,
|
||||
data->client.attach, data->client.dmabuf);
|
||||
}
|
||||
__qseecom_free_tzbuf(&data->sglistinfo_shm);
|
||||
data->released = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (data->client.unload_pending)
|
||||
return 0;
|
||||
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
|
||||
|
||||
@@ -44,7 +44,7 @@ enum ipa_fltrt_equations {
|
||||
#define IPA3_0_HW_TBL_ADDR_MASK (127)
|
||||
#define IPA3_0_HW_RULE_BUF_SIZE (256)
|
||||
#define IPA3_0_HW_RULE_START_ALIGNMENT (7)
|
||||
#define IPA3_0_HW_RULE_PREFETCH_BUF_SIZE (128)
|
||||
#define IPA3_0_HW_RULE_PREFETCH_BUF_SIZE (256)
|
||||
|
||||
|
||||
/*
|
||||
|
||||
14
mm/mmap.c
14
mm/mmap.c
@@ -3198,10 +3198,9 @@ void exit_mmap(struct mm_struct *mm)
|
||||
(void)__oom_reap_task_mm(mm);
|
||||
|
||||
set_bit(MMF_OOM_SKIP, &mm->flags);
|
||||
down_write(&mm->mmap_sem);
|
||||
up_write(&mm->mmap_sem);
|
||||
}
|
||||
|
||||
down_write(&mm->mmap_sem);
|
||||
if (mm->locked_vm) {
|
||||
vma = mm->mmap;
|
||||
while (vma) {
|
||||
@@ -3214,8 +3213,11 @@ void exit_mmap(struct mm_struct *mm)
|
||||
arch_exit_mmap(mm);
|
||||
|
||||
vma = mm->mmap;
|
||||
if (!vma) /* Can happen if dup_mmap() received an OOM */
|
||||
if (!vma) {
|
||||
/* Can happen if dup_mmap() received an OOM */
|
||||
up_write(&mm->mmap_sem);;
|
||||
return;
|
||||
}
|
||||
|
||||
lru_add_drain();
|
||||
flush_cache_mm(mm);
|
||||
@@ -3226,16 +3228,14 @@ void exit_mmap(struct mm_struct *mm)
|
||||
free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
|
||||
tlb_finish_mmu(&tlb, 0, -1);
|
||||
|
||||
/*
|
||||
* Walk the list again, actually closing and freeing it,
|
||||
* with preemption enabled, without holding any MM locks.
|
||||
*/
|
||||
/* Walk the list again, actually closing and freeing it. */
|
||||
while (vma) {
|
||||
if (vma->vm_flags & VM_ACCOUNT)
|
||||
nr_accounted += vma_pages(vma);
|
||||
vma = remove_vma(vma);
|
||||
cond_resched();
|
||||
}
|
||||
up_write(&mm->mmap_sem);
|
||||
vm_unacct_memory(nr_accounted);
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user