Merge tag 'LA.UM.9.12.1.r1-09700-SMxx50.QSSI12.0' of https://git.codelinaro.org/clo/la/kernel/msm-4.19 into android13-4.19-kona

"LA.UM.9.12.1.r1-09700-SMxx50.QSSI12.0"

* tag 'LA.UM.9.12.1.r1-09700-SMxx50.QSSI12.0' of https://git.codelinaro.org/clo/la/kernel/msm-4.19:
  adsprpc: Handle UAF scenario in put_args
  securemsm-kernel: Decrement the server object ref count in mutex context
  qcedev: fix UAF in qcedev_smmu
  msm: kgsl: Fix error handling during drawctxt switch
  msm: ipa: Add additional cleanup in finish rt rule addition
  vidc_3x: Fix cts failures
  msm: kgsl: Fix error handling during drawctxt switch
  msm: eva: Adding kref count for cvp_get_inst_from_id
  msm: adsprpc: use-after-free (UAF) in global maps
  scsi: ufs: Add load voting for UFS's VCCQ2 parent regulator
  msm: adsprpc: use-after-free (UAF) in global maps
  pci: pci-msm-msi: Re-name irq chip name for the driver
  rpmsg: glink: Get reference of channel objects in rx path
  serial: msm_geni_serial: Make HW Flow off if CRSTCTS flag not set
  serial: msm_geni_serial: check ioctl_count with auto_suspend enabled
  serial: msm_geni_serial: Move UART Rx path errors to ipc logs
  serial: msm_geni_serial: Prevent excessive logging due to WARN_ON
  serial: msm_geni_serial: Check for wakeup byte after suspend
  serial: msm_geni_serial: Update wakeup byte handling mechanism
  serial: msm_geni_serial: Check if workqueue allocation fails
  serial: msm_geni_serial: Handle vote_clock_off when rx data is inflight
  serial: msm_geni_serial: Check if port is open in msm_geni_wakeup_work
  serial: msm_geni_serial: Change wakeup interrupt handling mechanism
  serial: msm_geni_serial: Handle delayed Rx cancel cmd interrupt
  serial: msm_geni_serial: Handle Rx EOT & DMA_DONE after DMA Reset
  ARM: defconfig: Enable config for msm8937_32 and msm8937_32go
  ARM: defconfig: Enable config for msm8937
  memshare: Prevent possible integer overflow
  diag: Correct argument list for debug logs
  memshare: Prevent possible integer overflow
  soc: qcom: smem: Add boundary checks for partitions
  msm: kgsl: Fix upper bound check for iommu address
  cnss2: Add support for configuring calibration duration
  msm: kgsl: Limit the syncpoint count for AUX commands
  msm: kgsl: Prevent wrap around during user address mapping
  misc: update nordic DFU function check
  msm: kgsl: Use dma_buf_get() to get dma_buf structure
  msm: kgsl: Make sure that pool pages don't have any extra references
  cnss2: Validate maximum number of memory segments
  net: qrtr: fifo: Add bounds check on tx path
  icnss2: Add data length validation in cnss_wlfw_qdss_data_send_sync()
  cnss2: Unregister host driver during PCI remove
  msm: cvp: Resuming DSP if power collapse fails
  msm: ADSPRPC: Restrict untrusted applications from attaching to GuestOS
  misc: updates to controller's driver
  defconfig: Enable KinecticsXR Nordic chip for SKU4
  misc: add makefile changes for Nordic chip
  msm: synx: fix copyright
  defconfig: Enable Novatek NT36xxx Touch for tron target
  cnss2: Update bound checks for sbl reg dumps to SRAM mem range

 Conflicts:
	drivers/soc/qcom/memshare/msm_memshare.c
	drivers/soc/qcom/smcinvoke.c

Change-Id: If105d0d4f52aec12ae176eef8cd93b20751b8d62
This commit is contained in:
Michael Bestas
2024-10-10 13:34:38 +03:00
21 changed files with 377 additions and 181 deletions

View File

@@ -43,6 +43,8 @@ CONFIG_BPF_SYSCALL=y
CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB_FREELIST_RANDOM=y
CONFIG_SLAB_FREELIST_HARDENED=y
CONFIG_PROFILING=y
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_QM215=y

View File

@@ -42,6 +42,8 @@ CONFIG_BPF_SYSCALL=y
CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB_FREELIST_RANDOM=y
CONFIG_SLAB_FREELIST_HARDENED=y
CONFIG_PROFILING=y
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_QM215=y

View File

@@ -42,6 +42,8 @@ CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB_FREELIST_RANDOM=y
CONFIG_SLAB_FREELIST_HARDENED=y
CONFIG_PROFILING=y
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_QM215=y
@@ -715,7 +717,7 @@ CONFIG_PREEMPTIRQ_EVENTS=y
CONFIG_IRQSOFF_TRACER=y
CONFIG_PREEMPT_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_LKDTM=y
CONFIG_LKDTM=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_MEMTEST=y
CONFIG_BUG_ON_DATA_CORRUPTION=y

View File

@@ -44,6 +44,8 @@ CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB_FREELIST_RANDOM=y
CONFIG_SLAB_FREELIST_HARDENED=y
CONFIG_PROFILING=y
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_QM215=y
@@ -725,7 +727,7 @@ CONFIG_PREEMPTIRQ_EVENTS=y
CONFIG_IRQSOFF_TRACER=y
CONFIG_PREEMPT_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_LKDTM=y
CONFIG_LKDTM=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_MEMTEST=y
CONFIG_BUG_ON_DATA_CORRUPTION=y

View File

@@ -717,7 +717,7 @@ CONFIG_PREEMPTIRQ_EVENTS=y
CONFIG_IRQSOFF_TRACER=y
CONFIG_PREEMPT_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_LKDTM=y
CONFIG_LKDTM=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_MEMTEST=y
CONFIG_BUG_ON_DATA_CORRUPTION=y

View File

@@ -424,6 +424,8 @@ struct fastrpc_mmap {
uintptr_t attr;
bool is_filemap; /* flag to indicate map used in process init */
unsigned int ctx_refs; /* Indicates reference count for context map */
/* Map in use for dma handle */
unsigned int dma_handle_refs;
};
enum fastrpc_perfkeys {
@@ -850,8 +852,12 @@ static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
}
hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
/* Remove if only one reference map and no context map */
if (map->refs == 1 && !map->ctx_refs &&
map->raddr == va && map->raddr + map->len == va + len &&
if (map->refs == 1 &&
!map->ctx_refs &&
map->raddr == va &&
map->raddr + map->len == va + len &&
/* Remove map only if it isn't being used by DSP */
!map->dma_handle_refs &&
/* Remove map if not used in process initialization */
!map->is_filemap) {
match = map;
@@ -890,15 +896,21 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
if (map->flags == ADSP_MMAP_HEAP_ADDR ||
map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
spin_lock(&me->hlock);
if (map->refs)
map->refs--;
if (!map->refs && !map->ctx_refs)
if (!map->refs)
hlist_del_init(&map->hn);
spin_unlock(&me->hlock);
if (map->refs > 0)
return;
} else {
if (map->refs)
map->refs--;
if (!map->refs && !map->ctx_refs)
/* flags is passed as 1 during fastrpc_file_free
* (ie process exit), so that maps will be cleared
* even though references are present.
*/
if (!map->refs && !map->ctx_refs && !map->dma_handle_refs)
hlist_del_init(&map->hn);
if (map->refs > 0 && !flags)
return;
@@ -1770,13 +1782,15 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
FASTRPC_ATTR_NOVA, 0, 0, dmaflags,
&ctx->maps[i]);
if (!err && ctx->maps[i])
ctx->maps[i]->ctx_refs++;
ctx->maps[i]->dma_handle_refs++;
if (err) {
for (j = bufs; j < i; j++) {
if (ctx->maps[j] && ctx->maps[j]->ctx_refs)
ctx->maps[j]->ctx_refs--;
if (ctx->maps[j] &&
ctx->maps[j]->dma_handle_refs) {
ctx->maps[j]->dma_handle_refs--;
fastrpc_mmap_free(ctx->maps[j], 0);
}
}
mutex_unlock(&ctx->fl->map_mutex);
goto bail;
}
@@ -1884,13 +1898,33 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
rpra[i].buf.pv = buf;
}
PERF_END);
/* Since we are not holidng map_mutex during get args whole time
* it is possible that dma handle map may be removed by some invalid
* fd passed by DSP. Inside the lock check if the map present or not
*/
mutex_lock(&ctx->fl->map_mutex);
for (i = bufs; i < bufs + handles; ++i) {
struct fastrpc_mmap *map = ctx->maps[i];
if (map) {
pages[i].addr = map->phys;
pages[i].size = map->size;
struct fastrpc_mmap *mmap = NULL;
/* check if map was created */
if (ctx->maps[i]) {
/* check if map still exist */
if (!fastrpc_mmap_find(ctx->fl, ctx->fds[i], 0, 0,
0, 0, &mmap)) {
if (mmap) {
pages[i].addr = mmap->phys;
pages[i].size = mmap->size;
}
} else {
/* map already freed by some other call */
mutex_unlock(&ctx->fl->map_mutex);
pr_err("could not find map associated with dma handle fd %d\n",
ctx->fds[i]);
goto bail;
}
}
}
mutex_unlock(&ctx->fl->map_mutex);
fdlist = (uint64_t *)&pages[bufs + handles];
crclist = (uint32_t *)&fdlist[M_FDLIST];
/* reset fds, crc and early wakeup hint memory */
@@ -2073,12 +2107,13 @@ static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
break;
if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
0, 0, &mmap)) {
if (mmap && mmap->ctx_refs)
mmap->ctx_refs--;
if (mmap && mmap->dma_handle_refs) {
mmap->dma_handle_refs = 0;
fastrpc_mmap_free(mmap, 0);
}
}
}
}
mutex_unlock(&ctx->fl->map_mutex);
if (ctx->crc && crclist && rpra)
K_COPY_TO_USER(err, kernel, ctx->crc,

View File

@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2008-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/slab.h>
@@ -3868,7 +3869,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
DIAG_LOG(DIAG_DEBUG_MASKS,
"diag: %s: event masks update complete for client pid: %d\n",
current->tgid);
__func__, current->tgid);
goto exit;
}

View File

@@ -350,8 +350,12 @@ int qcedev_check_and_map_buffer(void *handle,
return 0;
unmap:
if (!found)
if (!found) {
qcedev_unmap_buffer(handle, mem_client, binfo);
mutex_lock(&qce_hndl->registeredbufs.lock);
list_del(&binfo->list);
mutex_unlock(&qce_hndl->registeredbufs.lock);
}
error:
kfree(binfo);

View File

@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2002,2007-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/debugfs.h>
@@ -615,8 +616,6 @@ int adreno_drawctxt_switch(struct adreno_device *adreno_dev,
if (drawctxt != NULL && kgsl_context_detached(&drawctxt->base))
return -ENOENT;
trace_adreno_drawctxt_switch(rb, drawctxt);
/* Get a refcount to the new instance */
if (drawctxt) {
if (!_kgsl_context_get(&drawctxt->base))
@@ -630,7 +629,7 @@ int adreno_drawctxt_switch(struct adreno_device *adreno_dev,
ret = adreno_iommu_set_pt_ctx(rb, new_pt, drawctxt);
if (ret)
return ret;
goto err;
if (rb->drawctxt_active) {
/* Wait for the timestamp to expire */
@@ -640,7 +639,12 @@ int adreno_drawctxt_switch(struct adreno_device *adreno_dev,
kgsl_context_put(&rb->drawctxt_active->base);
}
}
trace_adreno_drawctxt_switch(rb, drawctxt);
rb->drawctxt_active = drawctxt;
return 0;
err:
if (drawctxt)
kgsl_context_put(&drawctxt->base);
return ret;
}

View File

@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022,2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/slab.h>
@@ -400,63 +400,6 @@ static unsigned int __add_curr_ctxt_cmds(struct adreno_ringbuffer *rb,
return cmds - cmds_orig;
}
/**
* _set_ctxt_gpu() - Add commands to set the current context in memstore
* @rb: The ringbuffer in which commands to set memstore are added
* @drawctxt: The context whose id is being set in memstore
*/
static int _set_ctxt_gpu(struct adreno_ringbuffer *rb,
struct adreno_context *drawctxt)
{
unsigned int link[15], *cmds;
int result;
cmds = &link[0];
cmds += __add_curr_ctxt_cmds(rb, cmds, drawctxt);
result = adreno_ringbuffer_issue_internal_cmds(rb, 0, link,
(unsigned int)(cmds - link));
return result;
}
/**
* _set_pagetable_gpu() - Use GPU to switch the pagetable
* @rb: The rb in which commands to switch pagetable are to be
* submitted
* @new_pt: The pagetable to switch to
*/
static int _set_pagetable_gpu(struct adreno_ringbuffer *rb,
struct kgsl_pagetable *new_pt)
{
struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
unsigned int *link = NULL, count;
int result;
link = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (link == NULL)
return -ENOMEM;
/* If we are in a fault the MMU will be reset soon */
if (test_bit(ADRENO_DEVICE_FAULT, &adreno_dev->priv)) {
kfree(link);
return 0;
}
count = adreno_iommu_set_pt_generate_cmds(rb, link, new_pt);
WARN(count > (PAGE_SIZE / sizeof(unsigned int)),
"Temp command buffer overflow\n");
/*
* This returns the per context timestamp but we need to
* use the global timestamp for iommu clock disablement
*/
result = adreno_ringbuffer_issue_internal_cmds(rb,
KGSL_CMD_FLAGS_PMODE, link, count);
kfree(link);
return result;
}
/**
* adreno_iommu_init() - Adreno iommu init
* @adreno_dev: Adreno device
@@ -485,7 +428,6 @@ void adreno_iommu_init(struct adreno_device *adreno_dev)
/**
* adreno_iommu_set_pt_ctx() - Change the pagetable of the current RB
* @device: Pointer to device to which the rb belongs
* @rb: The RB pointer on which pagetable is to be changed
* @new_pt: The new pt the device will change to
* @drawctxt: The context whose pagetable the ringbuffer is switching to,
@@ -500,21 +442,35 @@ int adreno_iommu_set_pt_ctx(struct adreno_ringbuffer *rb,
struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct kgsl_pagetable *cur_pt = device->mmu.defaultpagetable;
unsigned int *cmds = NULL, count = 0;
int result = 0;
cmds = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (cmds == NULL)
return -ENOMEM;
/* Switch the page table if a MMU is attached */
if (kgsl_mmu_get_mmutype(device) != KGSL_MMU_TYPE_NONE) {
if (rb->drawctxt_active)
cur_pt = rb->drawctxt_active->base.proc_priv->pagetable;
/* Pagetable switch */
/* Add commands for pagetable switch */
if (new_pt != cur_pt)
result = _set_pagetable_gpu(rb, new_pt);
count += adreno_iommu_set_pt_generate_cmds(rb,
cmds, new_pt);
if (result)
}
/* Add commands to set the current context in memstore */
count += __add_curr_ctxt_cmds(rb, cmds + count, drawctxt);
WARN(count > (PAGE_SIZE / sizeof(unsigned int)),
"Temp command buffer overflow\n");
result = adreno_ringbuffer_issue_internal_cmds(rb, KGSL_CMD_FLAGS_PMODE,
cmds, count);
kfree(cmds);
return result;
}
/* Context switch */
return _set_ctxt_gpu(rb, drawctxt);
}

View File

@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2011-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/compat.h>
@@ -2586,18 +2586,19 @@ static bool kgsl_iommu_addr_in_range(struct kgsl_pagetable *pagetable,
uint64_t gpuaddr, uint64_t size)
{
struct kgsl_iommu_pt *pt = pagetable->priv;
u64 end = gpuaddr + size;
if (gpuaddr == 0)
/* Make sure we don't wrap around */
if (gpuaddr == 0 || end < gpuaddr)
return false;
if (gpuaddr >= pt->va_start && (gpuaddr + size) < pt->va_end)
if (gpuaddr >= pt->va_start && end <= pt->va_end)
return true;
if (gpuaddr >= pt->compat_va_start &&
(gpuaddr + size) < pt->compat_va_end)
if (gpuaddr >= pt->compat_va_start && end <= pt->compat_va_end)
return true;
if (gpuaddr >= pt->svm_start && (gpuaddr + size) < pt->svm_end)
if (gpuaddr >= pt->svm_start && end <= pt->svm_end)
return true;
return false;

View File

@@ -426,7 +426,7 @@ static struct msm_cvp_inst *cvp_get_inst_from_id(struct msm_cvp_core *core,
}
}
inst = match ? inst : NULL;
inst = match && kref_get_unless_zero(&inst->kref) ? inst : NULL;
mutex_unlock(&core->lock);
} else {
if (core->state == CVP_CORE_UNINIT)
@@ -525,7 +525,7 @@ static int hfi_process_session_cvp_msg(u32 device_id,
sess_msg = kmem_cache_alloc(cvp_driver->msg_cache, GFP_KERNEL);
if (sess_msg == NULL) {
dprintk(CVP_ERR, "%s runs out msg cache memory\n", __func__);
return -ENOMEM;
goto error_no_mem;
}
memcpy(&sess_msg->pkt, pkt, get_msg_size());
@@ -548,11 +548,14 @@ static int hfi_process_session_cvp_msg(u32 device_id,
info->response_type = HAL_NO_RESP;
cvp_put_inst(inst);
return 0;
error_handle_msg:
spin_unlock(&inst->session_queue.lock);
kmem_cache_free(cvp_driver->msg_cache, sess_msg);
error_no_mem:
cvp_put_inst(inst);
return -ENOMEM;
}

View File

@@ -843,9 +843,7 @@ static void handle_sys_init_done(enum hal_command_response cmd, void *data)
core->dec_codec_supported = sys_init_msg->dec_codec_supported;
/* This should come from sys_init_done */
core->resources.max_inst_count =
sys_init_msg->max_sessions_supported ? :
MAX_SUPPORTED_INSTANCES;
core->resources.max_inst_count = 16;
core->resources.max_secure_inst_count =
core->resources.max_secure_inst_count ? :
@@ -5051,7 +5049,8 @@ static int msm_vidc_check_mbpf_supported(struct msm_vidc_inst *inst)
/* ignore thumbnail session */
if (is_thumbnail_session(temp))
continue;
mbpf += msm_comm_get_mbs_per_frame(inst);
mbpf += NUM_MBS_PER_FRAME(inst->prop.width[OUTPUT_PORT],
inst->prop.height[OUTPUT_PORT]);
}
mutex_unlock(&core->lock);
if (mbpf > 2*capability->mbs_per_frame.max) {

View File

@@ -86,7 +86,7 @@ static void msm_msi_unmask_irq(struct irq_data *data)
}
static struct irq_chip msm_msi_irq_chip = {
.name = "msm_pci_msi",
.name = "gic_msm_pci_msi",
.irq_enable = msm_msi_unmask_irq,
.irq_disable = msm_msi_mask_irq,
.irq_mask = msm_msi_mask_irq,

View File

@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/bitops.h>
@@ -1078,12 +1079,14 @@ static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry,
static int __ipa_finish_rt_rule_add(struct ipa3_rt_entry *entry, u32 *rule_hdl,
struct ipa3_rt_tbl *tbl)
{
int id;
int id, res = 0;
if (tbl->rule_cnt < IPA_RULE_CNT_MAX)
tbl->rule_cnt++;
else
return -EINVAL;
else {
res = -EINVAL;
goto failed;
}
if (entry->hdr)
entry->hdr->ref_cnt++;
else if (entry->proc_ctx)
@@ -1092,6 +1095,7 @@ static int __ipa_finish_rt_rule_add(struct ipa3_rt_entry *entry, u32 *rule_hdl,
if (id < 0) {
IPAERR_RL("failed to add to tree\n");
WARN_ON_RATELIMIT_IPA(1);
res = -EPERM;
goto ipa_insert_failed;
}
IPADBG("add rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n",
@@ -1106,10 +1110,11 @@ static int __ipa_finish_rt_rule_add(struct ipa3_rt_entry *entry, u32 *rule_hdl,
entry->hdr->ref_cnt--;
else if (entry->proc_ctx)
entry->proc_ctx->ref_cnt--;
failed:
idr_remove(tbl->rule_ids, entry->rule_id);
list_del(&entry->link);
kmem_cache_free(ipa3_ctx->rt_rule_cache, entry);
return -EPERM;
return res;
}
static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,

View File

@@ -2,6 +2,7 @@
/*
* Copyright (c) 2016-2017, Linaro Ltd
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/idr.h>
@@ -340,6 +341,38 @@ static void qcom_glink_channel_release(struct kref *ref)
kfree(channel);
}
static struct glink_channel *qcom_glink_channel_ref_get(
struct qcom_glink *glink,
bool remote_channel, int cid)
{
struct glink_channel *channel = NULL;
unsigned long flags;
if (!glink)
return NULL;
spin_lock_irqsave(&glink->idr_lock, flags);
if (remote_channel)
channel = idr_find(&glink->rcids, cid);
else
channel = idr_find(&glink->lcids, cid);
if (channel)
kref_get(&channel->refcount);
spin_unlock_irqrestore(&glink->idr_lock, flags);
return channel;
}
static void qcom_glink_channel_ref_put(struct glink_channel *channel)
{
if (!channel)
return;
kref_put(&channel->refcount, qcom_glink_channel_release);
}
static size_t qcom_glink_rx_avail(struct qcom_glink *glink)
{
return glink->rx_pipe->avail(glink->rx_pipe);
@@ -491,11 +524,8 @@ static void qcom_glink_handle_intent_req_ack(struct qcom_glink *glink,
unsigned int cid, bool granted)
{
struct glink_channel *channel;
unsigned long flags;
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->rcids, cid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
channel = qcom_glink_channel_ref_get(glink, true, cid);
if (!channel) {
dev_err(glink->dev, "unable to find channel\n");
return;
@@ -505,6 +535,7 @@ static void qcom_glink_handle_intent_req_ack(struct qcom_glink *glink,
atomic_inc(&channel->intent_req_comp);
wake_up(&channel->intent_req_event);
CH_INFO(channel, "\n");
qcom_glink_channel_ref_put(channel);
}
/**
@@ -853,9 +884,7 @@ static void qcom_glink_handle_rx_done(struct qcom_glink *glink,
struct glink_channel *channel;
unsigned long flags;
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->rcids, cid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
channel = qcom_glink_channel_ref_get(glink, true, cid);
if (!channel) {
dev_err(glink->dev, "invalid channel id received\n");
return;
@@ -867,6 +896,7 @@ static void qcom_glink_handle_rx_done(struct qcom_glink *glink,
if (!intent) {
spin_unlock_irqrestore(&channel->intent_lock, flags);
dev_err(glink->dev, "invalid intent id received\n");
qcom_glink_channel_ref_put(channel);
return;
}
@@ -878,6 +908,7 @@ static void qcom_glink_handle_rx_done(struct qcom_glink *glink,
kfree(intent);
}
spin_unlock_irqrestore(&channel->intent_lock, flags);
qcom_glink_channel_ref_put(channel);
}
/**
@@ -900,9 +931,7 @@ static void qcom_glink_handle_intent_req(struct qcom_glink *glink,
unsigned long flags;
int iid;
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->rcids, cid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
channel = qcom_glink_channel_ref_get(glink, true, cid);
if (!channel) {
pr_err("%s channel not found for cid %d\n", __func__, cid);
@@ -919,6 +948,7 @@ static void qcom_glink_handle_intent_req(struct qcom_glink *glink,
spin_unlock_irqrestore(&channel->intent_lock, flags);
if (intent) {
qcom_glink_send_intent_req_ack(glink, channel, !!intent);
qcom_glink_channel_ref_put(channel);
return;
}
@@ -928,6 +958,7 @@ static void qcom_glink_handle_intent_req(struct qcom_glink *glink,
qcom_glink_advertise_intent(glink, channel, intent);
qcom_glink_send_intent_req_ack(glink, channel, !!intent);
qcom_glink_channel_ref_put(channel);
}
static int qcom_glink_rx_defer(struct qcom_glink *glink, size_t extra)
@@ -962,7 +993,7 @@ static int qcom_glink_rx_defer(struct qcom_glink *glink, size_t extra)
static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
{
struct glink_core_rx_intent *intent;
struct glink_channel *channel;
struct glink_channel *channel = NULL;
struct {
struct glink_msg msg;
__le32 chunk_size;
@@ -990,9 +1021,7 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
}
rcid = le16_to_cpu(hdr.msg.param1);
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->rcids, rcid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
channel = qcom_glink_channel_ref_get(glink, true, rcid);
if (!channel) {
dev_dbg(glink->dev, "Data on non-existing channel\n");
@@ -1013,13 +1042,16 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
/* Might have an ongoing, fragmented, message to append */
if (!channel->buf) {
intent = kzalloc(sizeof(*intent), GFP_ATOMIC);
if (!intent)
if (!intent) {
qcom_glink_channel_ref_put(channel);
return -ENOMEM;
}
intent->data = kmalloc(chunk_size + left_size,
GFP_ATOMIC);
if (!intent->data) {
kfree(intent);
qcom_glink_channel_ref_put(channel);
return -ENOMEM;
}
@@ -1086,7 +1118,7 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
advance_rx:
qcom_glink_rx_advance(glink, ALIGN(sizeof(hdr) + chunk_size, 8));
qcom_glink_channel_ref_put(channel);
return ret;
}
@@ -1117,9 +1149,7 @@ static void qcom_glink_handle_intent(struct qcom_glink *glink,
return;
}
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->rcids, cid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
channel = qcom_glink_channel_ref_get(glink, true, cid);
if (!channel) {
dev_err(glink->dev, "intents for non-existing channel\n");
qcom_glink_rx_advance(glink, ALIGN(msglen, 8));
@@ -1127,8 +1157,10 @@ static void qcom_glink_handle_intent(struct qcom_glink *glink,
}
msg = kmalloc(msglen, GFP_ATOMIC);
if (!msg)
if (!msg) {
qcom_glink_channel_ref_put(channel);
return;
}
qcom_glink_rx_peak(glink, msg, 0, msglen);
@@ -1155,15 +1187,14 @@ static void qcom_glink_handle_intent(struct qcom_glink *glink,
kfree(msg);
qcom_glink_rx_advance(glink, ALIGN(msglen, 8));
qcom_glink_channel_ref_put(channel);
}
static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid)
{
struct glink_channel *channel;
spin_lock(&glink->idr_lock);
channel = idr_find(&glink->lcids, lcid);
spin_unlock(&glink->idr_lock);
channel = qcom_glink_channel_ref_get(glink, false, lcid);
if (!channel) {
dev_err(glink->dev, "Invalid open ack packet\n");
return -EINVAL;
@@ -1171,7 +1202,7 @@ static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid)
CH_INFO(channel, "\n");
complete_all(&channel->open_ack);
qcom_glink_channel_ref_put(channel);
return 0;
}
@@ -1201,12 +1232,9 @@ static int qcom_glink_handle_signals(struct qcom_glink *glink,
unsigned int rcid, unsigned int signals)
{
struct glink_channel *channel;
unsigned long flags;
u32 old;
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->rcids, rcid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
channel = qcom_glink_channel_ref_get(glink, true, rcid);
if (!channel) {
dev_err(glink->dev, "signal for non-existing channel\n");
return -EINVAL;
@@ -1220,6 +1248,7 @@ static int qcom_glink_handle_signals(struct qcom_glink *glink,
CH_INFO(channel, "old:%d new:%d\n", old, channel->rsigs);
qcom_glink_channel_ref_put(channel);
return 0;
}

View File

@@ -858,6 +858,10 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufs_qcom_config_vreg(hba->dev,
host->vccq_parent, false);
if (host->vccq2_parent && !hba->auto_bkops_enabled)
ufs_qcom_config_vreg(hba->dev,
host->vccq2_parent, false);
if (ufs_qcom_is_link_off(hba)) {
/* Assert PHY soft reset */
ufs_qcom_assert_reset(hba);
@@ -897,6 +901,9 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (host->vccq_parent)
ufs_qcom_config_vreg(hba->dev, host->vccq_parent, true);
if (host->vccq2_parent)
ufs_qcom_config_vreg(hba->dev, host->vccq2_parent, true);
err = ufs_qcom_enable_lane_clks(host);
if (err)
goto out;
@@ -2089,6 +2096,8 @@ static int ufs_qcom_parse_reg_info(struct ufs_qcom_host *host, char *name,
vreg->min_uV = VDDP_REF_CLK_MIN_UV;
else if (!strcmp(name, "qcom,vccq-parent"))
vreg->min_uV = 0;
else if (!strcmp(name, "qcom,vccq2-parent"))
vreg->min_uV = 0;
ret = 0;
}
@@ -2101,6 +2110,8 @@ static int ufs_qcom_parse_reg_info(struct ufs_qcom_host *host, char *name,
vreg->max_uV = VDDP_REF_CLK_MAX_UV;
else if (!strcmp(name, "qcom,vccq-parent"))
vreg->max_uV = 0;
else if (!strcmp(name, "qcom,vccq2-parent"))
vreg->max_uV = 0;
ret = 0;
}
@@ -2248,6 +2259,17 @@ static int ufs_qcom_init(struct ufs_hba *hba)
}
}
err = ufs_qcom_parse_reg_info(host, "qcom,vccq2-parent",
&host->vccq2_parent);
if (host->vccq2_parent) {
err = ufs_qcom_config_vreg(hba->dev, host->vccq2_parent, true);
if (err) {
dev_err(dev, "%s: failed vccq2-parent set load: %d\n",
__func__, err);
goto out_disable_vddp;
}
}
err = ufs_qcom_init_lane_clks(host);
if (err)
goto out_set_load_vccq_parent;
@@ -2280,6 +2302,8 @@ static int ufs_qcom_init(struct ufs_hba *hba)
out_set_load_vccq_parent:
if (host->vccq_parent)
ufs_qcom_config_vreg(hba->dev, host->vccq_parent, false);
if (host->vccq2_parent)
ufs_qcom_config_vreg(hba->dev, host->vccq2_parent, false);
out_disable_vddp:
if (host->vddp_ref_clk)
ufs_qcom_disable_vreg(dev, host->vddp_ref_clk);

View File

@@ -363,6 +363,7 @@ struct ufs_qcom_host {
struct request *req_pending;
struct ufs_vreg *vddp_ref_clk;
struct ufs_vreg *vccq_parent;
struct ufs_vreg *vccq2_parent;
bool work_pending;
bool is_phy_pwr_on;
bool err_occurred;

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/err.h>
@@ -482,7 +482,6 @@ static void handle_alloc_generic_req(struct qmi_handle *handle,
}
if (!memblock[client_id].allotted && alloc_req->num_bytes > 0) {
if (alloc_req->num_bytes > memblock[client_id].init_size)
alloc_req->num_bytes = memblock[client_id].init_size;

View File

@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "smcinvoke: %s: " fmt, __func__
@@ -1672,8 +1672,11 @@ static long process_accept_req(struct file *filp, unsigned int cmd,
}
} while (!cb_txn);
out:
if (server_info)
if (server_info) {
mutex_lock(&g_smcinvoke_lock);
kref_put(&server_info->ref_cnt, destroy_cb_server);
mutex_unlock(&g_smcinvoke_lock);
}
if (ret && ret != -ERESTARTSYS)
pr_err("accept thread returning with ret: %d\n", ret);

View File

@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/bitmap.h>
@@ -115,7 +115,7 @@
#define UART_CORE2X_VOTE (5000)
#define UART_CONSOLE_CORE2X_VOTE (960)
#define WAKEBYTE_TIMEOUT_MSEC (2000)
#define WAKEBYTE_TIMEOUT_MSEC (2000) /* 2 Seconds */
#define WAIT_XFER_MAX_ITER (2)
#define WAIT_XFER_MAX_TIMEOUT_US (150)
#define WAIT_XFER_MIN_TIMEOUT_US (100)
@@ -223,7 +223,6 @@ struct msm_geni_serial_port {
void *ipc_log_irqstatus;
unsigned int cur_baud;
int ioctl_count;
int edge_count;
bool manual_flow;
struct msm_geni_serial_ver_info ver_info;
u32 cur_tx_remaining;
@@ -241,6 +240,10 @@ struct msm_geni_serial_port {
enum uart_error_code uart_error;
struct work_struct work;
struct workqueue_struct *qwork;
atomic_t check_wakeup_byte;
struct workqueue_struct *wakeup_irq_wq;
struct delayed_work wakeup_irq_dwork;
struct completion wakeup_comp;
};
static void msm_geni_serial_worker(struct work_struct *work);
@@ -551,18 +554,24 @@ static int vote_clock_on(struct uart_port *uport)
int ret = 0;
u32 geni_ios;
if (port->ioctl_count) {
IPC_LOG_MSG(port->ipc_log_pwr,
"%s clock already on\n", __func__);
return ret;
}
ret = msm_geni_serial_power_on(uport);
if (ret) {
dev_err(uport->dev, "Failed to vote clock on\n");
return ret;
}
atomic_set(&port->check_wakeup_byte, 0);
complete(&port->wakeup_comp);
port->ioctl_count++;
usage_count = atomic_read(&uport->dev->power.usage_count);
geni_ios = geni_read_reg_nolog(uport->membase, SE_GENI_IOS);
IPC_LOG_MSG(port->ipc_log_pwr,
"%s :%s ioctl:%d usage_count:%d edge-Count:%d geni_ios:0x%x\n",
__func__, current->comm, port->ioctl_count,
usage_count, port->edge_count, geni_ios);
"%s :%s ioctl:%d usage_count:%d geni_ios:0x%x\n", __func__,
current->comm, port->ioctl_count, usage_count, geni_ios);
return 0;
}
@@ -570,6 +579,7 @@ static int vote_clock_off(struct uart_port *uport)
{
struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
int usage_count;
int ret = 0;
if (!pm_runtime_enabled(uport->dev)) {
dev_err(uport->dev, "RPM not available.Can't enable clocks\n");
@@ -584,6 +594,12 @@ static int vote_clock_off(struct uart_port *uport)
return -EPERM;
}
wait_for_transfers_inflight(uport);
if (ret) {
IPC_LOG_MSG(port->ipc_log_pwr,
"%s wait_for_transfer_inflight return ret: %d",
__func__, ret);
return -EAGAIN;
}
port->ioctl_count--;
msm_geni_serial_power_off(uport);
usage_count = atomic_read(&uport->dev->power.usage_count);
@@ -1568,7 +1584,8 @@ static int stop_rx_sequencer(struct uart_port *uport)
IPC_LOG_MSG(port->ipc_log_misc, "%s: Interrupt delay\n",
__func__);
handle_rx_dma_xfer(s_irq_status, uport);
if (!port->ioctl_count) {
if (pm_runtime_enabled(uport->dev) &&
!port->ioctl_count) {
usage_count = atomic_read(
&uport->dev->power.usage_count);
IPC_LOG_MSG(port->ipc_log_misc,
@@ -1628,6 +1645,23 @@ static int stop_rx_sequencer(struct uart_port *uport)
goto exit_rx_seq;
}
port->s_cmd_done = false;
/* Check if Cancel Interrupt arrived but irq is delayed */
s_irq_status = geni_read_reg(uport->membase,
SE_GENI_S_IRQ_STATUS);
if (s_irq_status & S_CMD_CANCEL_EN) {
/* Clear delayed Cancel IRQ */
geni_write_reg(S_CMD_CANCEL_EN, uport->membase,
SE_GENI_S_IRQ_CLEAR);
IPC_LOG_MSG(port->ipc_log_misc,
"%s Cancel Command succeeded 0x%x\n",
__func__, s_irq_status);
/* Reset the error code and skip abort operation */
msm_geni_update_uart_error_code(port,
UART_ERROR_DEFAULT);
goto exit_enable_irq;
}
reinit_completion(&port->s_cmd_timeout);
geni_abort_s_cmd(uport->membase);
/* Ensure this goes through before polling. */
@@ -1668,6 +1702,7 @@ static int stop_rx_sequencer(struct uart_port *uport)
}
}
}
exit_enable_irq:
/* Enable the interrupts once the cancel operation is done. */
msm_geni_serial_enable_interrupts(uport);
port->s_cmd = false;
@@ -1866,6 +1901,30 @@ static int msm_geni_serial_handle_tx(struct uart_port *uport, bool done,
return 0;
}
/*
* msm_geni_find_wakeup_byte() - Checks if wakeup byte is present
* in rx buffer
*
* @uport: pointer to uart port
* @size: size of rx data
*
* Return: true if wakeup byte found else false
*/
static bool msm_geni_find_wakeup_byte(struct uart_port *uport, int size)
{
struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
unsigned char *buf = (unsigned char *)port->rx_buf;
if (buf[0] == port->wakeup_byte) {
IPC_LOG_MSG(port->ipc_log_rx,
"%s Found wakeup byte\n", __func__);
atomic_set(&port->check_wakeup_byte, 0);
return true;
}
dump_ipc(port->ipc_log_rx, "Dropped Rx", buf, 0, size);
return false;
}
static void check_rx_buf(char *buf, struct uart_port *uport, int size)
{
struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
@@ -1926,27 +1985,40 @@ static int msm_geni_serial_handle_dma_rx(struct uart_port *uport, bool drop_rx)
if (unlikely(!rx_bytes)) {
IPC_LOG_MSG(msm_port->ipc_log_rx, "%s: Size %d\n",
__func__, rx_bytes);
goto exit_handle_dma_rx;
return 0;
}
/* Check RX buffer data for faulty pattern*/
check_rx_buf((char *)msm_port->rx_buf, uport, rx_bytes);
if (drop_rx)
goto exit_handle_dma_rx;
return 0;
if (atomic_read(&msm_port->check_wakeup_byte)) {
ret = msm_geni_find_wakeup_byte(uport, rx_bytes);
if (!ret) {
/* wakeup byte not found, drop the rx data */
IPC_LOG_MSG(msm_port->ipc_log_rx,
"%s dropping Rx data as wakeup byte not found in %d bytes\n",
__func__, rx_bytes);
memset(msm_port->rx_buf, 0, rx_bytes);
return 0;
}
}
tport = &uport->state->port;
ret = tty_insert_flip_string(tport, (unsigned char *)(msm_port->rx_buf),
ret = tty_insert_flip_string(tport,
(unsigned char *)(msm_port->rx_buf),
rx_bytes);
if (ret != rx_bytes) {
dev_err(uport->dev, "%s: ret %d rx_bytes %d\n", __func__,
ret, rx_bytes);
WARN_ON(1);
IPC_LOG_MSG(msm_port->ipc_log_rx, "%s: ret %d rx_bytes %d\n",
__func__, ret, rx_bytes);
WARN_ON_ONCE(1);
}
uport->icount.rx += ret;
tty_flip_buffer_push(tport);
dump_ipc(msm_port->ipc_log_rx, "DMA Rx", (char *)msm_port->rx_buf, 0,
rx_bytes);
dump_ipc(msm_port->ipc_log_rx, "DMA Rx",
(char *)msm_port->rx_buf, 0, rx_bytes);
/*
* DMA_DONE interrupt doesn't confirm that the DATA is copied to
@@ -1955,7 +2027,6 @@ static int msm_geni_serial_handle_dma_rx(struct uart_port *uport, bool drop_rx)
* change to idenetify such scenario.
*/
memset(msm_port->rx_buf, 0, rx_bytes);
exit_handle_dma_rx:
return ret;
}
@@ -2008,7 +2079,6 @@ static bool handle_rx_dma_xfer(u32 s_irq_status, struct uart_port *uport)
IPC_LOG_MSG(msm_port->ipc_log_misc,
"%s.Reset done. 0x%x.\n", __func__, dma_rx_status);
ret = true;
goto exit;
}
if (dma_rx_status & UART_DMA_RX_ERRS) {
@@ -2063,7 +2133,6 @@ static bool handle_rx_dma_xfer(u32 s_irq_status, struct uart_port *uport)
if (s_irq_status & (S_CMD_CANCEL_EN | S_CMD_ABORT_EN))
ret = true;
exit:
spin_unlock(&msm_port->rx_lock);
return ret;
}
@@ -2226,6 +2295,46 @@ static void msm_geni_serial_handle_isr(struct uart_port *uport,
}
}
/*
* msm_geni_wakeup_work() - Worker function invoked by wakeup isr,
* powers on uart for data transfer and power off after
* WAKEBYTE_TIMEOUT_MSEC(2secs)
*
* @work: pointer to work structure
*
* Return: None
*/
static void msm_geni_wakeup_work(struct work_struct *work)
{
struct msm_geni_serial_port *port;
struct uart_port *uport;
port = container_of(work, struct msm_geni_serial_port,
wakeup_irq_dwork.work);
if (!atomic_read(&port->check_wakeup_byte))
return;
uport = &port->uport;
reinit_completion(&port->wakeup_comp);
if (msm_geni_serial_power_on(uport)) {
atomic_set(&port->check_wakeup_byte, 0);
IPC_LOG_MSG(port->ipc_log_rx,
"%s:Failed to power on\n", __func__);
return;
}
/* wait to receive wakeup byte in rx path */
if (!wait_for_completion_timeout(&port->wakeup_comp,
msecs_to_jiffies(WAKEBYTE_TIMEOUT_MSEC
))) {
IPC_LOG_MSG(port->ipc_log_rx,
"%s completion of wakeup_comp task timedout %dmsec\n",
__func__, WAKEBYTE_TIMEOUT_MSEC);
/* Check if port is closed during the task timeout time */
if (!uport->state->port.tty)
return;
}
msm_geni_serial_power_off(uport);
}
static irqreturn_t msm_geni_serial_isr(int isr, void *dev)
{
struct uart_port *uport = dev;
@@ -2245,19 +2354,28 @@ static irqreturn_t msm_geni_wakeup_isr(int isr, void *dev)
unsigned long flags;
spin_lock_irqsave(&uport->lock, flags);
IPC_LOG_MSG(port->ipc_log_rx, "%s: Edge-Count %d\n", __func__,
port->edge_count);
if (port->wakeup_byte && (port->edge_count == 2)) {
tty = uport->state->port.tty;
tty_insert_flip_char(tty->port, port->wakeup_byte, TTY_NORMAL);
IPC_LOG_MSG(port->ipc_log_rx, "%s: Inject 0x%x\n",
__func__, port->wakeup_byte);
port->edge_count = 0;
tty_flip_buffer_push(tty->port);
__pm_wakeup_event(port->geni_wake, WAKEBYTE_TIMEOUT_MSEC);
} else if (port->edge_count < 2) {
port->edge_count++;
IPC_LOG_MSG(port->ipc_log_rx, "%s\n", __func__);
if (atomic_read(&port->check_wakeup_byte)) {
spin_unlock_irqrestore(&uport->lock, flags);
return IRQ_HANDLED;
}
tty = uport->state->port.tty;
/* uport->state->port.tty pointer initialized as part of
* UART port_open. Adding null check to ensure tty should
* have a valid value before dereference it in wakeup_isr.
*/
if (!tty) {
IPC_LOG_MSG(port->ipc_log_rx,
"%s: Unexpected wakeup ISR\n", __func__);
WARN_ON_ONCE(1);
spin_unlock_irqrestore(&uport->lock, flags);
return IRQ_HANDLED;
}
atomic_set(&port->check_wakeup_byte, 1);
queue_delayed_work(port->wakeup_irq_wq, &port->wakeup_irq_dwork, 0);
spin_unlock_irqrestore(&uport->lock, flags);
return IRQ_HANDLED;
}
@@ -2483,12 +2601,19 @@ static int msm_geni_serial_startup(struct uart_port *uport)
enable_irq(uport->irq);
if (msm_port->wakeup_irq > 0) {
msm_port->wakeup_irq_wq = alloc_workqueue("%s", WQ_HIGHPRI, 1,
dev_name(uport->dev));
if (!msm_port->wakeup_irq_wq)
return -ENOMEM;
INIT_DELAYED_WORK(&msm_port->wakeup_irq_dwork,
msm_geni_wakeup_work);
ret = request_irq(msm_port->wakeup_irq, msm_geni_wakeup_isr,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"hs_uart_wakeup", uport);
if (unlikely(ret)) {
dev_err(uport->dev, "%s:Failed to get WakeIRQ ret%d\n",
__func__, ret);
destroy_workqueue(msm_port->wakeup_irq_wq);
goto exit_startup;
}
disable_irq(msm_port->wakeup_irq);
@@ -2719,10 +2844,12 @@ static void msm_geni_serial_set_termios(struct uart_port *uport,
if (termios->c_cflag & CRTSCTS) {
tx_trans_cfg &= ~UART_CTS_MASK;
uport->status |= UPSTAT_AUTOCTS;
}
else
msm_geni_serial_set_manual_flow(true, port);
} else {
tx_trans_cfg |= UART_CTS_MASK;
msm_geni_serial_set_manual_flow(false, port);
/* status bits to ignore */
}
if (likely(baud))
uart_update_timeout(uport, termios->c_cflag, baud);
@@ -2731,12 +2858,6 @@ static void msm_geni_serial_set_termios(struct uart_port *uport,
tx_parity_cfg, rx_trans_cfg, rx_parity_cfg, bits_per_char,
stop_bit_len, ser_clk_cfg);
if (termios->c_cflag & CRTSCTS) {
geni_write_reg_nolog(0x0, uport->membase, SE_UART_MANUAL_RFR);
IPC_LOG_MSG(port->ipc_log_misc,
"%s: Manual flow Disabled, HW Flow ON\n", __func__);
}
IPC_LOG_MSG(port->ipc_log_misc, "%s: baud %d\n", __func__, baud);
IPC_LOG_MSG(port->ipc_log_misc, "Tx: trans_cfg%d parity %d\n",
tx_trans_cfg, tx_parity_cfg);
@@ -3497,6 +3618,7 @@ static int msm_geni_serial_probe(struct platform_device *pdev)
init_completion(&dev_port->m_cmd_timeout);
init_completion(&dev_port->s_cmd_timeout);
init_completion(&dev_port->wakeup_comp);
uport->irq = platform_get_irq(pdev, 0);
if (uport->irq < 0) {
ret = uport->irq;
@@ -3605,6 +3727,8 @@ static int msm_geni_serial_remove(struct platform_device *pdev)
flush_workqueue(port->qwork);
destroy_workqueue(port->qwork);
}
if (port->wakeup_irq > 0)
destroy_workqueue(port->wakeup_irq_wq);
uart_remove_one_port(drv, &port->uport);
if (port->rx_dma) {
geni_se_iommu_free_buf(port->wrapper_dev, &port->rx_dma,
@@ -3679,7 +3803,7 @@ static int msm_geni_serial_runtime_suspend(struct device *dev)
}
if (port->wakeup_irq > 0) {
port->edge_count = 0;
atomic_set(&port->check_wakeup_byte, 0);
enable_irq(port->wakeup_irq);
}
IPC_LOG_MSG(port->ipc_log_pwr, "%s: End\n", __func__);