Merge remote-tracking branch 'qcom_sm8250/lineage-20' into lineage-22.2

Change-Id: I41d3b2e791439f1ef1d34ced81a40912461bb3b0
This commit is contained in:
Sebastiano Barezzi
2025-04-02 13:37:55 +02:00
201 changed files with 6939 additions and 4071 deletions

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 4 VERSION = 4
PATCHLEVEL = 19 PATCHLEVEL = 19
SUBLEVEL = 324 SUBLEVEL = 325
EXTRAVERSION = EXTRAVERSION =
NAME = "People's Front" NAME = "People's Front"

View File

@@ -253,8 +253,8 @@
reg_dcdc5: dcdc5 { reg_dcdc5: dcdc5 {
regulator-always-on; regulator-always-on;
regulator-min-microvolt = <1425000>; regulator-min-microvolt = <1450000>;
regulator-max-microvolt = <1575000>; regulator-max-microvolt = <1550000>;
regulator-name = "vcc-dram"; regulator-name = "vcc-dram";
}; };

View File

@@ -0,0 +1,67 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/clock/adi,axi-clkgen.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Binding for Analog Devices AXI clkgen pcore clock generator
maintainers:
- Lars-Peter Clausen <lars@metafoo.de>
- Michael Hennerich <michael.hennerich@analog.com>
description: |
The axi_clkgen IP core is a software programmable clock generator,
that can be synthesized on various FPGA platforms.
Link: https://wiki.analog.com/resources/fpga/docs/axi_clkgen
properties:
compatible:
enum:
- adi,axi-clkgen-2.00.a
clocks:
description:
Specifies the reference clock(s) from which the output frequency is
derived. This must either reference one clock if only the first clock
input is connected or two if both clock inputs are connected. The last
clock is the AXI bus clock that needs to be enabled so we can access the
core registers.
minItems: 2
maxItems: 3
clock-names:
oneOf:
- items:
- const: clkin1
- const: s_axi_aclk
- items:
- const: clkin1
- const: clkin2
- const: s_axi_aclk
'#clock-cells':
const: 0
reg:
maxItems: 1
required:
- compatible
- reg
- clocks
- clock-names
- '#clock-cells'
additionalProperties: false
examples:
- |
clock-controller@ff000000 {
compatible = "adi,axi-clkgen-2.00.a";
#clock-cells = <0>;
reg = <0xff000000 0x1000>;
clocks = <&osc 1>, <&clkc 15>;
clock-names = "clkin1", "s_axi_aclk";
};

View File

@@ -1,25 +0,0 @@
Binding for the axi-clkgen clock generator
This binding uses the common clock binding[1].
[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
Required properties:
- compatible : shall be "adi,axi-clkgen-1.00.a" or "adi,axi-clkgen-2.00.a".
- #clock-cells : from common clock binding; Should always be set to 0.
- reg : Address and length of the axi-clkgen register set.
- clocks : Phandle and clock specifier for the parent clock(s). This must
either reference one clock if only the first clock input is connected or two
if both clock inputs are connected. For the later case the clock connected
to the first input must be specified first.
Optional properties:
- clock-output-names : From common clock binding.
Example:
clock@ff000000 {
compatible = "adi,axi-clkgen";
#clock-cells = <0>;
reg = <0xff000000 0x1000>;
clocks = <&osc 1>;
};

View File

@@ -106,6 +106,7 @@ CONFIG_PARTITION_ADVANCED=y
CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_IOSCHED_BFQ=y CONFIG_IOSCHED_BFQ=y
CONFIG_BFQ_GROUP_IOSCHED=y CONFIG_BFQ_GROUP_IOSCHED=y
CONFIG_GKI_HIDDEN_GPU_CONFIGS=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
@@ -120,6 +121,7 @@ CONFIG_PACKET=y
CONFIG_UNIX=y CONFIG_UNIX=y
CONFIG_XFRM_USER=y CONFIG_XFRM_USER=y
CONFIG_XFRM_INTERFACE=y CONFIG_XFRM_INTERFACE=y
CONFIG_XFRM_MIGRATE=y
CONFIG_XFRM_STATISTICS=y CONFIG_XFRM_STATISTICS=y
CONFIG_NET_KEY=y CONFIG_NET_KEY=y
CONFIG_INET=y CONFIG_INET=y
@@ -440,6 +442,7 @@ CONFIG_HID_MULTITOUCH=y
CONFIG_HID_NINTENDO=y CONFIG_HID_NINTENDO=y
CONFIG_HID_PLANTRONICS=y CONFIG_HID_PLANTRONICS=y
CONFIG_HID_SONY=y CONFIG_HID_SONY=y
CONFIG_SONY_FF=y
CONFIG_USB_HIDDEV=y CONFIG_USB_HIDDEV=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_XHCI_HCD=y CONFIG_USB_XHCI_HCD=y
@@ -648,6 +651,7 @@ CONFIG_STATIC_USERMODEHELPER_PATH=""
CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_GCM=y CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_CHACHA20POLY1305=y
CONFIG_CRYPTO_XCBC=y CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_TWOFISH=y CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_ANSI_CPRNG=y

View File

@@ -104,6 +104,7 @@ CONFIG_PARTITION_ADVANCED=y
CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_IOSCHED_BFQ=y CONFIG_IOSCHED_BFQ=y
CONFIG_BFQ_GROUP_IOSCHED=y CONFIG_BFQ_GROUP_IOSCHED=y
CONFIG_GKI_HIDDEN_GPU_CONFIGS=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
@@ -120,6 +121,7 @@ CONFIG_PACKET=y
CONFIG_UNIX=y CONFIG_UNIX=y
CONFIG_XFRM_USER=y CONFIG_XFRM_USER=y
CONFIG_XFRM_INTERFACE=y CONFIG_XFRM_INTERFACE=y
CONFIG_XFRM_MIGRATE=y
CONFIG_XFRM_STATISTICS=y CONFIG_XFRM_STATISTICS=y
CONFIG_NET_KEY=y CONFIG_NET_KEY=y
CONFIG_INET=y CONFIG_INET=y
@@ -707,6 +709,7 @@ CONFIG_SECURITY_APPARMOR=y
CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE=0 CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE=0
CONFIG_CRYPTO_CCM=y CONFIG_CRYPTO_CCM=y
CONFIG_CRYPTO_GCM=y CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_CHACHA20POLY1305=y
CONFIG_CRYPTO_XCBC=y CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_TWOFISH=y CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_ANSI_CPRNG=y

View File

@@ -106,6 +106,7 @@ CONFIG_PARTITION_ADVANCED=y
CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_IOSCHED_BFQ=y CONFIG_IOSCHED_BFQ=y
CONFIG_BFQ_GROUP_IOSCHED=y CONFIG_BFQ_GROUP_IOSCHED=y
CONFIG_GKI_HIDDEN_GPU_CONFIGS=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
@@ -121,6 +122,7 @@ CONFIG_PACKET=y
CONFIG_UNIX=y CONFIG_UNIX=y
CONFIG_XFRM_USER=y CONFIG_XFRM_USER=y
CONFIG_XFRM_INTERFACE=y CONFIG_XFRM_INTERFACE=y
CONFIG_XFRM_MIGRATE=y
CONFIG_XFRM_STATISTICS=y CONFIG_XFRM_STATISTICS=y
CONFIG_NET_KEY=y CONFIG_NET_KEY=y
CONFIG_INET=y CONFIG_INET=y
@@ -450,6 +452,7 @@ CONFIG_HID_MULTITOUCH=y
CONFIG_HID_NINTENDO=y CONFIG_HID_NINTENDO=y
CONFIG_HID_PLANTRONICS=y CONFIG_HID_PLANTRONICS=y
CONFIG_HID_SONY=y CONFIG_HID_SONY=y
CONFIG_SONY_FF=y
CONFIG_USB_HIDDEV=y CONFIG_USB_HIDDEV=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_XHCI_HCD=y CONFIG_USB_XHCI_HCD=y
@@ -678,6 +681,7 @@ CONFIG_STATIC_USERMODEHELPER_PATH=""
CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_GCM=y CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_CHACHA20POLY1305=y
CONFIG_CRYPTO_XCBC=y CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_TWOFISH=y CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_ANSI_CPRNG=y

View File

@@ -40,9 +40,9 @@
/* Translate a kernel address of @sym into its equivalent linear mapping */ /* Translate a kernel address of @sym into its equivalent linear mapping */
#define kvm_ksym_ref(sym) \ #define kvm_ksym_ref(sym) \
({ \ ({ \
void *val = &sym; \ void *val = __va_function(sym); \
if (!is_kernel_in_hyp_mode()) \ if (!is_kernel_in_hyp_mode()) \
val = lm_alias(&sym); \ val = lm_alias(val); \
val; \ val; \
}) })

View File

@@ -62,8 +62,11 @@
#define PAGE_OFFSET (UL(0xffffffffffffffff) - \ #define PAGE_OFFSET (UL(0xffffffffffffffff) - \
(UL(1) << (VA_BITS - 1)) + 1) (UL(1) << (VA_BITS - 1)) + 1)
#define KIMAGE_VADDR (MODULES_END) #define KIMAGE_VADDR (MODULES_END)
#define BPF_JIT_REGION_START (VA_START + KASAN_SHADOW_SIZE)
#define BPF_JIT_REGION_SIZE (SZ_128M)
#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE)
#define MODULES_END (MODULES_VADDR + MODULES_VSIZE) #define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
#define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE) #define MODULES_VADDR (BPF_JIT_REGION_END)
#define MODULES_VSIZE (SZ_128M) #define MODULES_VSIZE (SZ_128M)
#define VMEMMAP_START (PAGE_OFFSET - VMEMMAP_SIZE) #define VMEMMAP_START (PAGE_OFFSET - VMEMMAP_SIZE)
#define PCI_IO_END (VMEMMAP_START - SZ_2M) #define PCI_IO_END (VMEMMAP_START - SZ_2M)
@@ -323,13 +326,15 @@ static inline void *phys_to_virt(phys_addr_t x)
* virtual address. Therefore, use inline assembly to ensure we are * virtual address. Therefore, use inline assembly to ensure we are
* always taking the address of the actual function. * always taking the address of the actual function.
*/ */
#define __pa_function(x) ({ \ #define __va_function(x) ({ \
unsigned long addr; \ void *addr; \
asm("adrp %0, " __stringify(x) "\n\t" \ asm("adrp %0, " __stringify(x) "\n\t" \
"add %0, %0, :lo12:" __stringify(x) : "=r" (addr)); \ "add %0, %0, :lo12:" __stringify(x) : "=r" (addr)); \
__pa_symbol(addr); \ addr; \
}) })
#define __pa_function(x) __pa_symbol(__va_function(x))
/* /*
* virt_to_page(k) convert a _valid_ virtual address to struct page * * virt_to_page(k) convert a _valid_ virtual address to struct page *
* virt_addr_valid(k) indicates whether a virtual address is valid * virt_addr_valid(k) indicates whether a virtual address is valid

View File

@@ -16,7 +16,7 @@
void __cpu_soft_restart(unsigned long el2_switch, unsigned long entry, void __cpu_soft_restart(unsigned long el2_switch, unsigned long entry,
unsigned long arg0, unsigned long arg1, unsigned long arg2); unsigned long arg0, unsigned long arg1, unsigned long arg2);
static inline void __noreturn cpu_soft_restart(unsigned long entry, static inline void __noreturn __nocfi cpu_soft_restart(unsigned long entry,
unsigned long arg0, unsigned long arg0,
unsigned long arg1, unsigned long arg1,
unsigned long arg2) unsigned long arg2)

View File

@@ -58,7 +58,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
unsigned long pc; unsigned long pc;
u32 new; u32 new;
pc = (unsigned long)&ftrace_call; pc = (unsigned long)__va_function(ftrace_call);
new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func,
AARCH64_INSN_BRANCH_LINK); AARCH64_INSN_BRANCH_LINK);

View File

@@ -465,7 +465,7 @@ static void tls_thread_switch(struct task_struct *next)
if (is_compat_thread(task_thread_info(next))) if (is_compat_thread(task_thread_info(next)))
write_sysreg(next->thread.uw.tp_value, tpidrro_el0); write_sysreg(next->thread.uw.tp_value, tpidrro_el0);
else if (!arm64_kernel_unmapped_at_el0()) else
write_sysreg(0, tpidrro_el0); write_sysreg(0, tpidrro_el0);
write_sysreg(*task_user_tls(next), tpidr_el0); write_sysreg(*task_user_tls(next), tpidr_el0);

View File

@@ -964,24 +964,26 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
return prog; return prog;
} }
void *bpf_jit_alloc_exec(unsigned long size)
{
return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
BPF_JIT_REGION_END, GFP_KERNEL,
PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
void bpf_jit_free_exec(void *addr)
{
return vfree(addr);
}
#ifdef CONFIG_CFI_CLANG #ifdef CONFIG_CFI_CLANG
bool arch_bpf_jit_check_func(const struct bpf_prog *prog) bool arch_bpf_jit_check_func(const struct bpf_prog *prog)
{ {
const uintptr_t func = (const uintptr_t)prog->bpf_func; const uintptr_t func = (const uintptr_t)prog->bpf_func;
/* /* bpf_func must be correctly aligned and within the BPF JIT region */
* bpf_func must be correctly aligned and within the correct region. return (func >= BPF_JIT_REGION_START && func < BPF_JIT_REGION_END &&
* module_alloc places JIT code in the module region, unless IS_ALIGNED(func, sizeof(u32)));
* ARM64_MODULE_PLTS is enabled, in which case we might end up using
* the vmalloc region too.
*/
if (unlikely(!IS_ALIGNED(func, sizeof(u32))))
return false;
if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
is_vmalloc_addr(prog->bpf_func))
return true;
return (func >= MODULES_VADDR && func < MODULES_END);
} }
#endif #endif

View File

@@ -89,7 +89,7 @@ static struct platform_device mcf_uart = {
.dev.platform_data = mcf_uart_platform_data, .dev.platform_data = mcf_uart_platform_data,
}; };
#if IS_ENABLED(CONFIG_FEC) #ifdef MCFFEC_BASE0
#ifdef CONFIG_M5441x #ifdef CONFIG_M5441x
#define FEC_NAME "enet-fec" #define FEC_NAME "enet-fec"
@@ -141,6 +141,7 @@ static struct platform_device mcf_fec0 = {
.platform_data = FEC_PDATA, .platform_data = FEC_PDATA,
} }
}; };
#endif /* MCFFEC_BASE0 */
#ifdef MCFFEC_BASE1 #ifdef MCFFEC_BASE1
static struct resource mcf_fec1_resources[] = { static struct resource mcf_fec1_resources[] = {
@@ -178,7 +179,6 @@ static struct platform_device mcf_fec1 = {
} }
}; };
#endif /* MCFFEC_BASE1 */ #endif /* MCFFEC_BASE1 */
#endif /* CONFIG_FEC */
#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
/* /*
@@ -478,12 +478,12 @@ static struct platform_device mcf_i2c5 = {
static struct platform_device *mcf_devices[] __initdata = { static struct platform_device *mcf_devices[] __initdata = {
&mcf_uart, &mcf_uart,
#if IS_ENABLED(CONFIG_FEC) #ifdef MCFFEC_BASE0
&mcf_fec0, &mcf_fec0,
#endif
#ifdef MCFFEC_BASE1 #ifdef MCFFEC_BASE1
&mcf_fec1, &mcf_fec1,
#endif #endif
#endif
#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
&mcf_qspi, &mcf_qspi,
#endif #endif

View File

@@ -152,7 +152,7 @@ static inline void gpio_free(unsigned gpio)
* read-modify-write as well as those controlled by the EPORT and GPIO modules. * read-modify-write as well as those controlled by the EPORT and GPIO modules.
*/ */
#define MCFGPIO_SCR_START 40 #define MCFGPIO_SCR_START 40
#elif defined(CONFIGM5441x) #elif defined(CONFIG_M5441x)
/* The m5441x EPORT doesn't have its own GPIO port, uses PORT C */ /* The m5441x EPORT doesn't have its own GPIO port, uses PORT C */
#define MCFGPIO_SCR_START 0 #define MCFGPIO_SCR_START 0
#else #else

View File

@@ -90,8 +90,8 @@ struct pcc_regs {
#define M147_SCC_B_ADDR 0xfffe3000 #define M147_SCC_B_ADDR 0xfffe3000
#define M147_SCC_PCLK 5000000 #define M147_SCC_PCLK 5000000
#define MVME147_IRQ_SCSI_PORT (IRQ_USER+0x45) #define MVME147_IRQ_SCSI_PORT (IRQ_USER + 5)
#define MVME147_IRQ_SCSI_DMA (IRQ_USER+0x46) #define MVME147_IRQ_SCSI_DMA (IRQ_USER + 6)
/* SCC interrupts, for MVME147 */ /* SCC interrupts, for MVME147 */

View File

@@ -12,8 +12,9 @@
#include <linux/string.h> #include <linux/string.h>
#include <asm/setup.h> #include <asm/setup.h>
extern void mvme16x_cons_write(struct console *co,
const char *str, unsigned count); #include "../mvme147/mvme147.h"
#include "../mvme16x/mvme16x.h"
asmlinkage void __init debug_cons_nputs(const char *s, unsigned n); asmlinkage void __init debug_cons_nputs(const char *s, unsigned n);
@@ -22,7 +23,9 @@ static void __ref debug_cons_write(struct console *c,
{ {
#if !(defined(CONFIG_SUN3) || defined(CONFIG_M68000) || \ #if !(defined(CONFIG_SUN3) || defined(CONFIG_M68000) || \
defined(CONFIG_COLDFIRE)) defined(CONFIG_COLDFIRE))
if (MACH_IS_MVME16x) if (MACH_IS_MVME147)
mvme147_scc_write(c, s, n);
else if (MACH_IS_MVME16x)
mvme16x_cons_write(c, s, n); mvme16x_cons_write(c, s, n);
else else
debug_cons_nputs(s, n); debug_cons_nputs(s, n);

View File

@@ -35,6 +35,7 @@
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/mvme147hw.h> #include <asm/mvme147hw.h>
#include "mvme147.h"
static void mvme147_get_model(char *model); static void mvme147_get_model(char *model);
extern void mvme147_sched_init(irq_handler_t handler); extern void mvme147_sched_init(irq_handler_t handler);
@@ -164,3 +165,32 @@ int mvme147_hwclk(int op, struct rtc_time *t)
} }
return 0; return 0;
} }
static void scc_delay(void)
{
__asm__ __volatile__ ("nop; nop;");
}
static void scc_write(char ch)
{
do {
scc_delay();
} while (!(in_8(M147_SCC_A_ADDR) & BIT(2)));
scc_delay();
out_8(M147_SCC_A_ADDR, 8);
scc_delay();
out_8(M147_SCC_A_ADDR, ch);
}
void mvme147_scc_write(struct console *co, const char *str, unsigned int count)
{
unsigned long flags;
local_irq_save(flags);
while (count--) {
if (*str == '\n')
scc_write('\r');
scc_write(*str++);
}
local_irq_restore(flags);
}

View File

@@ -0,0 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
struct console;
/* config.c */
void mvme147_scc_write(struct console *co, const char *str, unsigned int count);

View File

@@ -38,6 +38,8 @@
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/mvme16xhw.h> #include <asm/mvme16xhw.h>
#include "mvme16x.h"
extern t_bdid mvme_bdid; extern t_bdid mvme_bdid;
static MK48T08ptr_t volatile rtc = (MK48T08ptr_t)MVME_RTC_BASE; static MK48T08ptr_t volatile rtc = (MK48T08ptr_t)MVME_RTC_BASE;

View File

@@ -0,0 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
struct console;
/* config.c */
void mvme16x_cons_write(struct console *co, const char *str, unsigned count);

View File

@@ -164,9 +164,4 @@ extern int emulate_step(struct pt_regs *regs, unsigned int instr);
*/ */
extern int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op); extern int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op);
extern void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
const void *mem, bool cross_endian);
extern void emulate_vsx_store(struct instruction_op *op,
const union vsx_reg *reg, void *mem,
bool cross_endian);
extern int emulate_dcbz(unsigned long ea, struct pt_regs *regs); extern int emulate_dcbz(unsigned long ea, struct pt_regs *regs);

View File

@@ -49,6 +49,7 @@ int vdso_getcpu_init(void);
#define V_FUNCTION_BEGIN(name) \ #define V_FUNCTION_BEGIN(name) \
.globl name; \ .globl name; \
.type name,@function; \
name: \ name: \
#define V_FUNCTION_END(name) \ #define V_FUNCTION_END(name) \

View File

@@ -667,8 +667,8 @@ static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
#endif /* __powerpc64 */ #endif /* __powerpc64 */
#ifdef CONFIG_VSX #ifdef CONFIG_VSX
void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg, static nokprobe_inline void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
const void *mem, bool rev) const void *mem, bool rev)
{ {
int size, read_size; int size, read_size;
int i, j; int i, j;
@@ -748,11 +748,9 @@ void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
break; break;
} }
} }
EXPORT_SYMBOL_GPL(emulate_vsx_load);
NOKPROBE_SYMBOL(emulate_vsx_load);
void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg, static nokprobe_inline void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
void *mem, bool rev) void *mem, bool rev)
{ {
int size, write_size; int size, write_size;
int i, j; int i, j;
@@ -824,8 +822,6 @@ void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
break; break;
} }
} }
EXPORT_SYMBOL_GPL(emulate_vsx_store);
NOKPROBE_SYMBOL(emulate_vsx_store);
static nokprobe_inline int do_vsx_load(struct instruction_op *op, static nokprobe_inline int do_vsx_load(struct instruction_op *op,
unsigned long ea, struct pt_regs *regs, unsigned long ea, struct pt_regs *regs,

View File

@@ -12,7 +12,7 @@ kapi-hdrs-y := $(kapi)/unistd_nr.h
uapi-hdrs-y := $(uapi)/unistd_32.h uapi-hdrs-y := $(uapi)/unistd_32.h
uapi-hdrs-y += $(uapi)/unistd_64.h uapi-hdrs-y += $(uapi)/unistd_64.h
targets += $(addprefix ../../../,$(gen-y) $(kapi-hdrs-y) $(uapi-hdrs-y)) targets += $(addprefix ../../../../,$(gen-y) $(kapi-hdrs-y) $(uapi-hdrs-y))
PHONY += kapi uapi PHONY += kapi uapi

View File

@@ -133,7 +133,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
static void *c_start(struct seq_file *m, loff_t *pos) static void *c_start(struct seq_file *m, loff_t *pos)
{ {
return *pos < NR_CPUS ? cpu_data + *pos : NULL; return *pos < nr_cpu_ids ? cpu_data + *pos : NULL;
} }
static void *c_next(struct seq_file *m, void *v, loff_t *pos) static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{ {

View File

@@ -349,7 +349,7 @@ static struct platform_driver uml_net_driver = {
static void net_device_release(struct device *dev) static void net_device_release(struct device *dev)
{ {
struct uml_net *device = dev_get_drvdata(dev); struct uml_net *device = container_of(dev, struct uml_net, pdev.dev);
struct net_device *netdev = device->dev; struct net_device *netdev = device->dev;
struct uml_net_private *lp = netdev_priv(netdev); struct uml_net_private *lp = netdev_priv(netdev);

View File

@@ -854,7 +854,7 @@ static int ubd_open_dev(struct ubd *ubd_dev)
static void ubd_device_release(struct device *dev) static void ubd_device_release(struct device *dev)
{ {
struct ubd *ubd_dev = dev_get_drvdata(dev); struct ubd *ubd_dev = container_of(dev, struct ubd, pdev.dev);
blk_cleanup_queue(ubd_dev->queue); blk_cleanup_queue(ubd_dev->queue);
*ubd_dev = ((struct ubd) DEFAULT_UBD); *ubd_dev = ((struct ubd) DEFAULT_UBD);

View File

@@ -797,7 +797,8 @@ static struct platform_driver uml_net_driver = {
static void vector_device_release(struct device *dev) static void vector_device_release(struct device *dev)
{ {
struct vector_device *device = dev_get_drvdata(dev); struct vector_device *device =
container_of(dev, struct vector_device, pdev.dev);
struct net_device *netdev = device->dev; struct net_device *netdev = device->dev;
list_del(&device->list); list_del(&device->list);

View File

@@ -396,6 +396,6 @@ int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
{ {
int cpu = current_thread_info()->cpu; int cpu = current_thread_info()->cpu;
return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu); return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu) == 0;
} }

View File

@@ -115,7 +115,10 @@ static inline bool amd_gart_present(void)
#define amd_nb_num(x) 0 #define amd_nb_num(x) 0
#define amd_nb_has_feature(x) false #define amd_nb_has_feature(x) false
#define node_to_amd_nb(x) NULL static inline struct amd_northbridge *node_to_amd_nb(int node)
{
return NULL;
}
#define amd_gart_present(x) false #define amd_gart_present(x) false
#endif #endif

View File

@@ -1544,6 +1544,12 @@ void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
return; return;
clear_bit(BLK_MQ_S_STOPPED, &hctx->state); clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
/*
* Pairs with the smp_mb() in blk_mq_hctx_stopped() to order the
* clearing of BLK_MQ_S_STOPPED above and the checking of dispatch
* list in the subsequent routine.
*/
smp_mb__after_atomic();
blk_mq_run_hw_queue(hctx, async); blk_mq_run_hw_queue(hctx, async);
} }
EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue); EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);

View File

@@ -142,6 +142,19 @@ static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data
static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
{ {
/* Fast path: hardware queue is not stopped most of the time. */
if (likely(!test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
return false;
/*
* This barrier is used to order adding of dispatch list before and
* the test of BLK_MQ_S_STOPPED below. Pairs with the memory barrier
* in blk_mq_start_stopped_hw_queue() so that dispatch code could
* either see BLK_MQ_S_STOPPED is cleared or dispatch list is not
* empty to avoid missing dispatching requests.
*/
smp_mb();
return test_bit(BLK_MQ_S_STOPPED, &hctx->state); return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
} }

View File

@@ -174,8 +174,10 @@ static int pcrypt_aead_encrypt(struct aead_request *req)
err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
if (!err) if (!err)
return -EINPROGRESS; return -EINPROGRESS;
if (err == -EBUSY) if (err == -EBUSY) {
return -EAGAIN; /* try non-parallel mode */
return crypto_aead_encrypt(creq);
}
return err; return err;
} }
@@ -220,8 +222,10 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt); err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt);
if (!err) if (!err)
return -EINPROGRESS; return -EINPROGRESS;
if (err == -EBUSY) if (err == -EBUSY) {
return -EAGAIN; /* try non-parallel mode */
return crypto_aead_decrypt(creq);
}
return err; return err;
} }

View File

@@ -286,7 +286,7 @@ static int __init gtdt_parse_timer_block(struct acpi_gtdt_timer_block *block,
if (frame->virt_irq > 0) if (frame->virt_irq > 0)
acpi_unregister_gsi(gtdt_frame->virtual_timer_interrupt); acpi_unregister_gsi(gtdt_frame->virtual_timer_interrupt);
frame->virt_irq = 0; frame->virt_irq = 0;
} while (i-- >= 0 && gtdt_frame--); } while (i-- > 0 && gtdt_frame--);
return -EINVAL; return -EINVAL;
} }

View File

@@ -397,12 +397,16 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
return IRQ_NONE; return IRQ_NONE;
} }
static struct lock_class_key regmap_irq_lock_class;
static struct lock_class_key regmap_irq_request_class;
static int regmap_irq_map(struct irq_domain *h, unsigned int virq, static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw) irq_hw_number_t hw)
{ {
struct regmap_irq_chip_data *data = h->host_data; struct regmap_irq_chip_data *data = h->host_data;
irq_set_chip_data(virq, data); irq_set_chip_data(virq, data);
irq_set_lockdep_class(virq, &regmap_irq_lock_class, &regmap_irq_request_class);
irq_set_chip(virq, &data->irq_chip); irq_set_chip(virq, &data->irq_chip);
irq_set_nested_thread(virq, 1); irq_set_nested_thread(virq, 1);
irq_set_parent(virq, data->irq); irq_set_parent(virq, data->irq);

View File

@@ -900,9 +900,11 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
map->refs--; map->refs--;
if (!map->refs) if (!map->refs)
hlist_del_init(&map->hn); hlist_del_init(&map->hn);
spin_unlock(&me->hlock); if (map->refs > 0) {
if (map->refs > 0) spin_unlock(&me->hlock);
return; return;
}
spin_unlock(&me->hlock);
} else { } else {
if (map->refs) if (map->refs)
map->refs--; map->refs--;

View File

@@ -9,6 +9,7 @@
*/ */
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/clk-provider.h> #include <linux/clk-provider.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/io.h> #include <linux/io.h>
@@ -414,7 +415,7 @@ static int axi_clkgen_probe(struct platform_device *pdev)
struct clk_init_data init = {}; struct clk_init_data init = {};
const char *parent_names[2]; const char *parent_names[2];
const char *clk_name; const char *clk_name;
struct resource *mem; struct clk *axi_clk;
unsigned int i; unsigned int i;
int ret; int ret;
@@ -429,14 +430,29 @@ static int axi_clkgen_probe(struct platform_device *pdev)
if (!axi_clkgen) if (!axi_clkgen)
return -ENOMEM; return -ENOMEM;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); axi_clkgen->base = devm_platform_ioremap_resource(pdev, 0);
axi_clkgen->base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(axi_clkgen->base)) if (IS_ERR(axi_clkgen->base))
return PTR_ERR(axi_clkgen->base); return PTR_ERR(axi_clkgen->base);
init.num_parents = of_clk_get_parent_count(pdev->dev.of_node); init.num_parents = of_clk_get_parent_count(pdev->dev.of_node);
if (init.num_parents < 1 || init.num_parents > 2)
return -EINVAL; axi_clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
if (!IS_ERR(axi_clk)) {
if (init.num_parents < 2 || init.num_parents > 3)
return -EINVAL;
init.num_parents -= 1;
} else {
/*
* Legacy... So that old DTs which do not have clock-names still
* work. In this case we don't explicitly enable the AXI bus
* clock.
*/
if (PTR_ERR(axi_clk) != -ENOENT)
return PTR_ERR(axi_clk);
if (init.num_parents < 1 || init.num_parents > 2)
return -EINVAL;
}
for (i = 0; i < init.num_parents; i++) { for (i = 0; i < init.num_parents; i++) {
parent_names[i] = of_clk_get_parent_name(pdev->dev.of_node, i); parent_names[i] = of_clk_get_parent_name(pdev->dev.of_node, i);

View File

@@ -544,6 +544,15 @@ config QM_DEBUGCC_SCUBA
SCUBA devices. SCUBA devices.
Say Y if you want to support the clock measurement functionality. Say Y if you want to support the clock measurement functionality.
config QM_LPASS_AUDIOCC_SCUBA
tristate "SCUBA Audio Clock Controllers"
select QM_GCC_SCUBA
help
Support for the LPASS(Low-Power Audio Subsytem) aon & audio clock
controllers on Qualcomm Technologies, Inc SCUBA devices.
Say Y if you want to use LPASS AUDIOCC and AONCC clocks required
to support audio devices and its functionalities.
config SDM_GCC_660 config SDM_GCC_660
tristate "SDM660 Global Clock Controller" tristate "SDM660 Global Clock Controller"
depends on COMMON_CLK_QCOM depends on COMMON_CLK_QCOM

View File

@@ -54,6 +54,7 @@ obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
obj-$(CONFIG_QM_DISPCC_SCUBA) += dispcc-scuba.o obj-$(CONFIG_QM_DISPCC_SCUBA) += dispcc-scuba.o
obj-$(CONFIG_QM_GCC_SCUBA) += gcc-scuba.o obj-$(CONFIG_QM_GCC_SCUBA) += gcc-scuba.o
obj-$(CONFIG_QM_GPUCC_SCUBA) += gpucc-scuba.o obj-$(CONFIG_QM_GPUCC_SCUBA) += gpucc-scuba.o
obj-$(CONFIG_QM_LPASS_AUDIOCC_SCUBA) += lpassaoncc-scuba.o lpassaudiocc-scuba.o
obj-$(CONFIG_QM_DEBUGCC_SCUBA) += debugcc-scuba.o obj-$(CONFIG_QM_DEBUGCC_SCUBA) += debugcc-scuba.o
obj-$(CONFIG_SDM_CAMCC_LAGOON) += camcc-lagoon.o obj-$(CONFIG_SDM_CAMCC_LAGOON) += camcc-lagoon.o
obj-$(CONFIG_SDM_DEBUGCC_429W) += debugcc-sdm429w.o obj-$(CONFIG_SDM_DEBUGCC_429W) += debugcc-sdm429w.o

View File

@@ -44,7 +44,7 @@
#define PLL_USER_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL]) #define PLL_USER_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL])
#define PLL_POST_DIV_SHIFT 8 #define PLL_POST_DIV_SHIFT 8
#define PLL_POST_DIV_MASK(p) GENMASK((p)->width - 1, 0) #define PLL_POST_DIV_MASK(p) GENMASK((p)->width ? (p)->width - 1 : 3, 0)
#define PLL_ALPHA_EN BIT(24) #define PLL_ALPHA_EN BIT(24)
#define PLL_ALPHA_MODE BIT(25) #define PLL_ALPHA_MODE BIT(25)
#define PLL_VCO_SHIFT 20 #define PLL_VCO_SHIFT 20

View File

@@ -0,0 +1,854 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/clk-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/pm_runtime.h>
#include <linux/pm_clock.h>
#include <dt-bindings/clock/qcom,scuba-lpassaoncc.h>
#include "clk-alpha-pll.h"
#include "clk-branch.h"
#include "clk-pll.h"
#include "clk-rcg.h"
#include "clk-regmap.h"
#include "clk-regmap-divider.h"
#include "clk-regmap-mux.h"
#include "common.h"
#include "reset.h"
#include "vdd-level.h"
static DEFINE_VDD_REGULATORS(vdd_lpi_cx, VDD_NUM + 1, 1, vdd_corner);
enum {
P_BI_TCXO,
P_LPASS_AON_CC_PLL_OUT_AUX,
P_LPASS_AON_CC_PLL_OUT_AUX2,
P_LPASS_AON_CC_PLL_OUT_EARLY,
P_LPASS_AUDIO_CC_PLL_MAIN_DIV_CLK,
P_LPASS_CORE_CC_PLL_ODD_CLK,
};
static const struct parent_map lpass_aon_cc_parent_map_0[] = {
{ P_BI_TCXO, 0 },
{ P_LPASS_AON_CC_PLL_OUT_EARLY, 2 },
};
static const char * const lpass_aon_cc_parent_names_0[] = {
"bi_tcxo",
"lpass_aon_cc_pll",
};
static const struct parent_map lpass_aon_cc_parent_map_1[] = {
{ P_BI_TCXO, 0 },
{ P_BI_TCXO, 4 },
};
static const char * const lpass_aon_cc_parent_names_1_ao[] = {
"bi_tcxo_ao",
"bi_tcxo",
};
static const struct parent_map lpass_aon_cc_parent_map_2[] = {
{ P_BI_TCXO, 0 },
{ P_LPASS_AON_CC_PLL_OUT_AUX2, 4 },
};
static const char * const lpass_aon_cc_parent_names_2[] = {
"bi_tcxo",
"lpass_aon_cc_pll_out_aux2",
};
static const struct parent_map lpass_aon_cc_parent_map_3[] = {
{ P_BI_TCXO, 0 },
{ P_LPASS_AON_CC_PLL_OUT_AUX, 1 },
{ P_LPASS_CORE_CC_PLL_ODD_CLK, 2 },
{ P_LPASS_AUDIO_CC_PLL_MAIN_DIV_CLK, 6 },
};
static const char * const lpass_aon_cc_parent_names_3[] = {
"bi_tcxo",
"lpass_aon_cc_pll_out_aux",
"lpass_core_cc_pll_odd_clk",
"lpass_audio_cc_pll_main_div_clk",
};
static const struct parent_map lpass_aon_cc_parent_map_4[] = {
{ P_BI_TCXO, 0 },
};
static const char * const lpass_aon_cc_parent_names_4[] = {
"bi_tcxo",
};
static const struct pll_vco aoncc_pll_vco[] = {
{ 1000000000, 2000000000, 0 },
{ 750000000, 1500000000, 1 },
{ 500000000, 1000000000, 2 },
{ 250000000, 500000000, 3 },
};
/* 614.4 MHz Configuration */
static const struct alpha_pll_config lpass_aon_cc_pll_config = {
.l = 0x20,
.config_ctl_val = 0x4001055b,
.user_ctl_val = 0x200101,
.user_ctl_hi_val = 0x4,
.test_ctl_val = 0,
.test_ctl_hi_val = 0x1,
.main_output_mask = BIT(0),
.aux_output_mask = BIT(1),
.aux2_output_mask = BIT(2),
.early_output_mask = BIT(3),
.post_div_val = 0x281 << 8,
.post_div_mask = GENMASK(17, 8),
.vco_val = 0x2 << 20,
.vco_mask = GENMASK(21, 20),
.test_ctl_mask = GENMASK(31, 0),
.test_ctl_hi_mask = 0x1,
};
static struct clk_alpha_pll lpass_aon_cc_pll = {
.offset = 0x0,
.vco_table = aoncc_pll_vco,
.num_vco = ARRAY_SIZE(aoncc_pll_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr = {
.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_pll",
.parent_names = (const char *[]){ "bi_tcxo" },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
.vdd_class = &vdd_lpi_cx,
.num_rate_max = VDD_NUM,
.rate_max = (unsigned long[VDD_NUM]) {
[VDD_MIN] = 1000000000,
[VDD_NOMINAL] = 2000000000},
},
},
};
static const struct clk_div_table post_div_table_lpass_aon_cc_pll_out_aux[] = {
{ 0x5, 5 },
{ }
};
static struct clk_alpha_pll_postdiv lpass_aon_cc_pll_out_aux = {
.offset = 0x0,
.post_div_shift = 15,
.post_div_table = post_div_table_lpass_aon_cc_pll_out_aux,
.num_post_div = ARRAY_SIZE(post_div_table_lpass_aon_cc_pll_out_aux),
.width = 3,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_pll_out_aux",
.parent_names = (const char *[]){ "lpass_aon_cc_pll" },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static const struct clk_div_table post_div_table_lpass_aon_cc_pll_out_aux2[] = {
{ 0x1, 2 },
{ }
};
static struct clk_alpha_pll_postdiv lpass_aon_cc_pll_out_aux2 = {
.offset = 0x0,
.post_div_shift = 8,
.post_div_table = post_div_table_lpass_aon_cc_pll_out_aux2,
.num_post_div = ARRAY_SIZE(post_div_table_lpass_aon_cc_pll_out_aux2),
.width = 4,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_pll_out_aux2",
.parent_names = (const char *[]){ "lpass_aon_cc_pll" },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_regmap_div lpass_aon_cc_cdiv_tx_mclk_div_clk_src = {
.reg = 0x13010,
.shift = 0,
.width = 2,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_cdiv_tx_mclk_div_clk_src",
.parent_names = (const char *[])
{ "lpass_aon_cc_tx_mclk_rcg_clk_src" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_regmap_div_ro_ops,
},
};
static struct clk_regmap_div lpass_aon_cc_cdiv_va_div_clk_src = {
.reg = 0x12010,
.shift = 0,
.width = 2,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_cdiv_va_div_clk_src",
.parent_names = (const char *[])
{ "lpass_aon_cc_va_rcg_clk_src" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_regmap_div_ro_ops,
},
};
static const struct freq_tbl ftbl_lpass_aon_cc_cpr_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
{ }
};
static struct clk_rcg2 lpass_aon_cc_cpr_clk_src = {
.cmd_rcgr = 0x2004,
.mnd_width = 0,
.hid_width = 5,
.parent_map = lpass_aon_cc_parent_map_1,
.freq_tbl = ftbl_lpass_aon_cc_cpr_clk_src,
.enable_safe_config = true,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_cpr_clk_src",
.parent_names = lpass_aon_cc_parent_names_1_ao,
.num_parents = ARRAY_SIZE(lpass_aon_cc_parent_names_1_ao),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
};
static const struct freq_tbl ftbl_lpass_aon_cc_main_rcg_clk_src[] = {
F(38400000, P_LPASS_AON_CC_PLL_OUT_AUX2, 8, 0, 0),
F(76800000, P_LPASS_AON_CC_PLL_OUT_AUX2, 4, 0, 0),
{ }
};
static struct clk_rcg2 lpass_aon_cc_main_rcg_clk_src = {
.cmd_rcgr = 0x1000,
.mnd_width = 0,
.hid_width = 5,
.parent_map = lpass_aon_cc_parent_map_2,
.freq_tbl = ftbl_lpass_aon_cc_main_rcg_clk_src,
.enable_safe_config = true,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_main_rcg_clk_src",
.parent_names = lpass_aon_cc_parent_names_2,
.num_parents = ARRAY_SIZE(lpass_aon_cc_parent_names_2),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.vdd_class = &vdd_lpi_cx,
.num_rate_max = VDD_NUM,
.rate_max = (unsigned long[VDD_NUM]) {
[VDD_LOWER] = 38400000,
[VDD_NOMINAL] = 76800000},
},
};
static struct clk_rcg2 lpass_aon_cc_q6_xo_clk_src = {
.cmd_rcgr = 0x8004,
.mnd_width = 0,
.hid_width = 5,
.parent_map = lpass_aon_cc_parent_map_1,
.freq_tbl = ftbl_lpass_aon_cc_cpr_clk_src,
.enable_safe_config = true,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_q6_xo_clk_src",
.parent_names = lpass_aon_cc_parent_names_1_ao,
.num_parents = ARRAY_SIZE(lpass_aon_cc_parent_names_1_ao),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
};
static const struct freq_tbl ftbl_lpass_aon_cc_tx_mclk_rcg_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
F(24576000, P_LPASS_AON_CC_PLL_OUT_AUX, 5, 0, 0),
{ }
};
static struct clk_rcg2 lpass_aon_cc_tx_mclk_rcg_clk_src = {
.cmd_rcgr = 0x13004,
.mnd_width = 0,
.hid_width = 5,
.parent_map = lpass_aon_cc_parent_map_3,
.freq_tbl = ftbl_lpass_aon_cc_tx_mclk_rcg_clk_src,
.enable_safe_config = true,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_tx_mclk_rcg_clk_src",
.parent_names = lpass_aon_cc_parent_names_3,
.num_parents = ARRAY_SIZE(lpass_aon_cc_parent_names_3),
.ops = &clk_rcg2_ops,
.vdd_class = &vdd_lpi_cx,
.num_rate_max = VDD_NUM,
.rate_max = (unsigned long[VDD_NUM]) {
[VDD_LOWER] = 24576000},
},
};
static const struct freq_tbl ftbl_lpass_aon_cc_va_rcg_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
{ }
};
static struct clk_rcg2 lpass_aon_cc_va_rcg_clk_src = {
.cmd_rcgr = 0x12004,
.mnd_width = 0,
.hid_width = 5,
.parent_map = lpass_aon_cc_parent_map_4,
.freq_tbl = ftbl_lpass_aon_cc_va_rcg_clk_src,
.enable_safe_config = true,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_va_rcg_clk_src",
.parent_names = lpass_aon_cc_parent_names_4,
.num_parents = ARRAY_SIZE(lpass_aon_cc_parent_names_4),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.vdd_class = &vdd_lpi_cx,
.num_rate_max = VDD_NUM,
.rate_max = (unsigned long[VDD_NUM]) {
[VDD_LOWER] = 19200000},
},
};
static const struct freq_tbl ftbl_lpass_aon_cc_vs_vddcx_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
F(614400000, P_LPASS_AON_CC_PLL_OUT_EARLY, 1, 0, 0),
{ }
};
static struct clk_rcg2 lpass_aon_cc_vs_vddcx_clk_src = {
.cmd_rcgr = 0x15010,
.mnd_width = 0,
.hid_width = 5,
.parent_map = lpass_aon_cc_parent_map_0,
.freq_tbl = ftbl_lpass_aon_cc_vs_vddcx_clk_src,
.enable_safe_config = true,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_vs_vddcx_clk_src",
.parent_names = lpass_aon_cc_parent_names_0,
.num_parents = ARRAY_SIZE(lpass_aon_cc_parent_names_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.vdd_class = &vdd_lpi_cx,
.num_rate_max = VDD_NUM,
.rate_max = (unsigned long[VDD_NUM]) {
[VDD_LOWER] = 614400000},
},
};
static struct clk_rcg2 lpass_aon_cc_vs_vddmx_clk_src = {
.cmd_rcgr = 0x15000,
.mnd_width = 0,
.hid_width = 5,
.parent_map = lpass_aon_cc_parent_map_0,
.freq_tbl = ftbl_lpass_aon_cc_vs_vddcx_clk_src,
.enable_safe_config = true,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_vs_vddmx_clk_src",
.parent_names = lpass_aon_cc_parent_names_0,
.num_parents = ARRAY_SIZE(lpass_aon_cc_parent_names_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.vdd_class = &vdd_lpi_cx,
.num_rate_max = VDD_NUM,
.rate_max = (unsigned long[VDD_NUM]) {
[VDD_LOWER] = 614400000},
},
};
static struct clk_branch lpass_aon_cc_ahb_timeout_clk = {
.halt_reg = 0x9030,
.halt_check = BRANCH_HALT_VOTED,
.clkr = {
.enable_reg = 0x9030,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_ahb_timeout_clk",
.parent_names = (const char *[]){
"lpass_aon_cc_main_rcg_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch lpass_aon_cc_aon_h_clk = {
.halt_reg = 0x903c,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x903c,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_aon_h_clk",
.parent_names = (const char *[]){
"lpass_aon_cc_main_rcg_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch lpass_aon_cc_bus_alt_clk = {
.halt_reg = 0x9048,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x9048,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_bus_alt_clk",
.parent_names = (const char *[]){
"lpass_aon_cc_main_rcg_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch lpass_aon_cc_csr_h_clk = {
.halt_reg = 0x9010,
.halt_check = BRANCH_HALT_VOTED,
.clkr = {
.enable_reg = 0x9010,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_csr_h_clk",
.parent_names = (const char *[]){
"lpass_aon_cc_main_rcg_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch lpass_aon_cc_mcc_access_clk = {
.halt_reg = 0x904c,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x904c,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_mcc_access_clk",
.parent_names = (const char *[]){
"lpass_aon_cc_main_rcg_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch lpass_aon_cc_pdc_h_clk = {
.halt_reg = 0x900c,
.halt_check = BRANCH_HALT_VOTED,
.clkr = {
.enable_reg = 0x900c,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_pdc_h_clk",
.parent_names = (const char *[]){
"lpass_aon_cc_main_rcg_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch lpass_aon_cc_q6_atbm_clk = {
.halt_reg = 0xa010,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0xa010,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_q6_atbm_clk",
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch lpass_aon_cc_qsm_xo_clk = {
.halt_reg = 0x6000,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x6000,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_qsm_xo_clk",
.parent_names = (const char *[]){
"lpass_aon_cc_q6_xo_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch lpass_aon_cc_rsc_hclk_clk = {
.halt_reg = 0x9078,
.halt_check = BRANCH_HALT,
.hwcg_reg = 0x9078,
.hwcg_bit = 1,
.clkr = {
.enable_reg = 0x9078,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_rsc_hclk_clk",
.parent_names = (const char *[]){
"lpass_aon_cc_main_rcg_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch lpass_aon_cc_sleep_clk = {
.halt_reg = 0x10004,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x10004,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_sleep_clk",
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch lpass_aon_cc_ssc_h_clk = {
.halt_reg = 0x9040,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x9040,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_ssc_h_clk",
.parent_names = (const char *[]){
"lpass_aon_cc_main_rcg_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch lpass_aon_cc_tx_mclk_2x_clk = {
.halt_reg = 0x1300c,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x1300c,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_tx_mclk_2x_clk",
.parent_names = (const char *[]){
"lpass_aon_cc_tx_mclk_rcg_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch lpass_aon_cc_tx_mclk_clk = {
.halt_reg = 0x13014,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x13014,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_tx_mclk_clk",
.parent_names = (const char *[]){
"lpass_aon_cc_cdiv_tx_mclk_div_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch lpass_aon_cc_va_2x_clk = {
.halt_reg = 0x1200c,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x1200c,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_va_2x_clk",
.parent_names = (const char *[]){
"lpass_aon_cc_va_rcg_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch lpass_aon_cc_va_clk = {
.halt_reg = 0x12014,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x12014,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_va_clk",
.parent_names = (const char *[]){
"lpass_aon_cc_cdiv_va_div_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch lpass_aon_cc_va_mem0_clk = {
.halt_reg = 0x9028,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x9028,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_va_mem0_clk",
.parent_names = (const char *[]){
"lpass_aon_cc_main_rcg_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch lpass_aon_cc_vs_vddcx_clk = {
.halt_reg = 0x15018,
.halt_check = BRANCH_HALT_VOTED,
.clkr = {
.enable_reg = 0x15018,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_vs_vddcx_clk",
.parent_names = (const char *[]){
"lpass_aon_cc_vs_vddcx_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch lpass_aon_cc_vs_vddmx_clk = {
.halt_reg = 0x15008,
.halt_check = BRANCH_HALT_VOTED,
.clkr = {
.enable_reg = 0x15008,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "lpass_aon_cc_vs_vddmx_clk",
.parent_names = (const char *[]){
"lpass_aon_cc_vs_vddmx_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch lpass_qdsp6ss_sleep_clk = {
.halt_reg = 0x3c,
.halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x3c,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "lpass_qdsp6ss_sleep_clk",
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch lpass_qdsp6ss_xo_clk = {
.halt_reg = 0x38,
.halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x38,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "lpass_qdsp6ss_xo_clk",
.ops = &clk_branch2_ops,
},
},
};
static struct clk_regmap *lpass_aon_cc_scuba_clocks[] = {
[LPASS_AON_CC_AHB_TIMEOUT_CLK] = &lpass_aon_cc_ahb_timeout_clk.clkr,
[LPASS_AON_CC_AON_H_CLK] = &lpass_aon_cc_aon_h_clk.clkr,
[LPASS_AON_CC_BUS_ALT_CLK] = &lpass_aon_cc_bus_alt_clk.clkr,
[LPASS_AON_CC_CDIV_TX_MCLK_DIV_CLK_SRC] =
&lpass_aon_cc_cdiv_tx_mclk_div_clk_src.clkr,
[LPASS_AON_CC_CDIV_VA_DIV_CLK_SRC] =
&lpass_aon_cc_cdiv_va_div_clk_src.clkr,
[LPASS_AON_CC_CPR_CLK_SRC] = &lpass_aon_cc_cpr_clk_src.clkr,
[LPASS_AON_CC_CSR_H_CLK] = &lpass_aon_cc_csr_h_clk.clkr,
[LPASS_AON_CC_MAIN_RCG_CLK_SRC] = &lpass_aon_cc_main_rcg_clk_src.clkr,
[LPASS_AON_CC_MCC_ACCESS_CLK] = &lpass_aon_cc_mcc_access_clk.clkr,
[LPASS_AON_CC_PDC_H_CLK] = &lpass_aon_cc_pdc_h_clk.clkr,
[LPASS_AON_CC_PLL] = &lpass_aon_cc_pll.clkr,
[LPASS_AON_CC_PLL_OUT_AUX] = &lpass_aon_cc_pll_out_aux.clkr,
[LPASS_AON_CC_PLL_OUT_AUX2] = &lpass_aon_cc_pll_out_aux2.clkr,
[LPASS_AON_CC_Q6_ATBM_CLK] = &lpass_aon_cc_q6_atbm_clk.clkr,
[LPASS_AON_CC_Q6_XO_CLK_SRC] = &lpass_aon_cc_q6_xo_clk_src.clkr,
[LPASS_AON_CC_QSM_XO_CLK] = &lpass_aon_cc_qsm_xo_clk.clkr,
[LPASS_AON_CC_RSC_HCLK_CLK] = &lpass_aon_cc_rsc_hclk_clk.clkr,
[LPASS_AON_CC_SLEEP_CLK] = &lpass_aon_cc_sleep_clk.clkr,
[LPASS_AON_CC_SSC_H_CLK] = &lpass_aon_cc_ssc_h_clk.clkr,
[LPASS_AON_CC_TX_MCLK_2X_CLK] = &lpass_aon_cc_tx_mclk_2x_clk.clkr,
[LPASS_AON_CC_TX_MCLK_CLK] = &lpass_aon_cc_tx_mclk_clk.clkr,
[LPASS_AON_CC_TX_MCLK_RCG_CLK_SRC] =
&lpass_aon_cc_tx_mclk_rcg_clk_src.clkr,
[LPASS_AON_CC_VA_2X_CLK] = &lpass_aon_cc_va_2x_clk.clkr,
[LPASS_AON_CC_VA_CLK] = &lpass_aon_cc_va_clk.clkr,
[LPASS_AON_CC_VA_MEM0_CLK] = &lpass_aon_cc_va_mem0_clk.clkr,
[LPASS_AON_CC_VA_RCG_CLK_SRC] = &lpass_aon_cc_va_rcg_clk_src.clkr,
[LPASS_AON_CC_VS_VDDCX_CLK] = &lpass_aon_cc_vs_vddcx_clk.clkr,
[LPASS_AON_CC_VS_VDDCX_CLK_SRC] = &lpass_aon_cc_vs_vddcx_clk_src.clkr,
[LPASS_AON_CC_VS_VDDMX_CLK] = &lpass_aon_cc_vs_vddmx_clk.clkr,
[LPASS_AON_CC_VS_VDDMX_CLK_SRC] = &lpass_aon_cc_vs_vddmx_clk_src.clkr,
[LPASS_QDSP6SS_SLEEP_CLK] = &lpass_qdsp6ss_sleep_clk.clkr,
[LPASS_QDSP6SS_XO_CLK] = &lpass_qdsp6ss_xo_clk.clkr,
};
static const struct regmap_config lpass_aon_cc_scuba_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x20008,
.fast_io = true,
};
static const struct qcom_cc_desc lpass_aon_cc_scuba_desc = {
.config = &lpass_aon_cc_scuba_regmap_config,
.clks = lpass_aon_cc_scuba_clocks,
.num_clks = ARRAY_SIZE(lpass_aon_cc_scuba_clocks),
};
static const struct of_device_id lpass_aon_cc_scuba_match_table[] = {
{ .compatible = "qcom,lpassaoncc-scuba" },
{ }
};
MODULE_DEVICE_TABLE(of, lpass_aon_cc_scuba_match_table);
static int lpass_aon_cc_scuba_probe(struct platform_device *pdev)
{
struct regmap *regmap;
int ret;
vdd_lpi_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_lpi_cx");
if (IS_ERR(vdd_lpi_cx.regulator[0])) {
if (PTR_ERR(vdd_lpi_cx.regulator[0]) != -EPROBE_DEFER)
dev_err(&pdev->dev, "Unable to get vdd_lpi_cx regulator\n");
return PTR_ERR(vdd_lpi_cx.regulator[0]);
}
pm_runtime_enable(&pdev->dev);
ret = pm_clk_create(&pdev->dev);
if (ret)
return ret;
ret = pm_clk_add(&pdev->dev, "iface_clk");
if (ret < 0) {
dev_err(&pdev->dev, "failed to acquire gcc sway clock\n");
goto err_destroy_pm_clk;
}
ret = pm_runtime_get_sync(&pdev->dev);
if (ret)
goto err_destroy_pm_clk;
regmap = qcom_cc_map(pdev, &lpass_aon_cc_scuba_desc);
if (IS_ERR(regmap)) {
ret = PTR_ERR(regmap);
goto err_put_rpm;
}
clk_alpha_pll_configure(&lpass_aon_cc_pll, regmap,
&lpass_aon_cc_pll_config);
/*
* Keep clocks always enabled:
* lpass_aon_cc_q6_ahbs_clk
* lpass_aon_cc_q6_ahbm_clk
*/
regmap_update_bits(regmap, 0x9020, BIT(0), BIT(0));
regmap_update_bits(regmap, 0x901C, BIT(0), BIT(0));
ret = qcom_cc_really_probe(pdev, &lpass_aon_cc_scuba_desc, regmap);
if (ret) {
dev_err(&pdev->dev, "Register Fail LPASS Aon clocks ret=%d\n",
ret);
goto err_put_rpm;
}
pm_runtime_put_sync(&pdev->dev);
dev_info(&pdev->dev, "Registered LPASS Aon clocks\n");
return 0;
err_put_rpm:
pm_runtime_put_sync(&pdev->dev);
err_destroy_pm_clk:
pm_clk_destroy(&pdev->dev);
return ret;
}
static const struct dev_pm_ops lpass_aon_cc_scuba_pm_ops = {
SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
};
static struct platform_driver lpass_aon_cc_scuba_driver = {
.probe = lpass_aon_cc_scuba_probe,
.driver = {
.name = "lpassaoncc-scuba",
.of_match_table = lpass_aon_cc_scuba_match_table,
.pm = &lpass_aon_cc_scuba_pm_ops,
},
};
static int __init lpass_aon_cc_scuba_init(void)
{
return platform_driver_register(&lpass_aon_cc_scuba_driver);
}
subsys_initcall(lpass_aon_cc_scuba_init);
static void __exit lpass_aon_cc_scuba_exit(void)
{
platform_driver_unregister(&lpass_aon_cc_scuba_driver);
}
module_exit(lpass_aon_cc_scuba_exit);
MODULE_DESCRIPTION("QTI LPASSAONCC SCUBA Driver");
MODULE_LICENSE("GPL v2");

File diff suppressed because it is too large Load Diff

View File

@@ -166,7 +166,9 @@ static int __init cpufreq_init(void)
ret = cpufreq_register_driver(&loongson2_cpufreq_driver); ret = cpufreq_register_driver(&loongson2_cpufreq_driver);
if (!ret && !nowait) { if (ret) {
platform_driver_unregister(&platform_driver);
} else if (!nowait) {
saved_cpu_wait = cpu_wait; saved_cpu_wait = cpu_wait;
cpu_wait = loongson2_cpu_wait; cpu_wait = loongson2_cpu_wait;
} }

View File

@@ -199,7 +199,7 @@ int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* @drv: cpuidle driver for this cpu * @drv: cpuidle driver for this cpu
* @index: index into the states table in @drv of the state to enter * @index: index into the states table in @drv of the state to enter
*/ */
int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int __nocfi cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
int index) int index)
{ {
int entered_state; int entered_state;

View File

@@ -2510,6 +2510,7 @@ static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
static int ahash_hmac_init(struct ahash_request *req) static int ahash_hmac_init(struct ahash_request *req)
{ {
int ret;
struct iproc_reqctx_s *rctx = ahash_request_ctx(req); struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
@@ -2519,7 +2520,9 @@ static int ahash_hmac_init(struct ahash_request *req)
flow_log("ahash_hmac_init()\n"); flow_log("ahash_hmac_init()\n");
/* init the context as a hash */ /* init the context as a hash */
ahash_init(req); ret = ahash_init(req);
if (ret)
return ret;
if (!spu_no_incr_hash(ctx)) { if (!spu_no_incr_hash(ctx)) {
/* SPU-M can do incr hashing but needs sw for outer HMAC */ /* SPU-M can do incr hashing but needs sw for outer HMAC */

View File

@@ -48,7 +48,7 @@ static void cpt_disable_cores(struct cpt_device *cpt, u64 coremask,
dev_err(dev, "Cores still busy %llx", coremask); dev_err(dev, "Cores still busy %llx", coremask);
grp = cpt_read_csr64(cpt->reg_base, grp = cpt_read_csr64(cpt->reg_base,
CPTX_PF_EXEC_BUSY(0)); CPTX_PF_EXEC_BUSY(0));
if (timeout--) if (!timeout--)
break; break;
udelay(CSR_DELAY); udelay(CSR_DELAY);
@@ -306,6 +306,8 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
ret = do_cpt_init(cpt, mcode); ret = do_cpt_init(cpt, mcode);
if (ret) { if (ret) {
dma_free_coherent(&cpt->pdev->dev, mcode->code_size,
mcode->code, mcode->phys_base);
dev_err(dev, "do_cpt_init failed with ret: %d\n", ret); dev_err(dev, "do_cpt_init failed with ret: %d\n", ret);
goto fw_release; goto fw_release;
} }
@@ -398,7 +400,7 @@ static void cpt_disable_all_cores(struct cpt_device *cpt)
dev_err(dev, "Cores still busy"); dev_err(dev, "Cores still busy");
grp = cpt_read_csr64(cpt->reg_base, grp = cpt_read_csr64(cpt->reg_base,
CPTX_PF_EXEC_BUSY(0)); CPTX_PF_EXEC_BUSY(0));
if (timeout--) if (!timeout--)
break; break;
udelay(CSR_DELAY); udelay(CSR_DELAY);

View File

@@ -327,21 +327,25 @@ static void fsl_mc_check(struct mem_ctl_info *mci)
* TODO: Add support for 32-bit wide buses * TODO: Add support for 32-bit wide buses
*/ */
if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) { if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
u64 cap = (u64)cap_high << 32 | cap_low;
u32 s = syndrome;
sbe_ecc_decode(cap_high, cap_low, syndrome, sbe_ecc_decode(cap_high, cap_low, syndrome,
&bad_data_bit, &bad_ecc_bit); &bad_data_bit, &bad_ecc_bit);
if (bad_data_bit != -1) if (bad_data_bit >= 0) {
fsl_mc_printk(mci, KERN_ERR, fsl_mc_printk(mci, KERN_ERR, "Faulty Data bit: %d\n", bad_data_bit);
"Faulty Data bit: %d\n", bad_data_bit); cap ^= 1ULL << bad_data_bit;
if (bad_ecc_bit != -1) }
fsl_mc_printk(mci, KERN_ERR,
"Faulty ECC bit: %d\n", bad_ecc_bit); if (bad_ecc_bit >= 0) {
fsl_mc_printk(mci, KERN_ERR, "Faulty ECC bit: %d\n", bad_ecc_bit);
s ^= 1 << bad_ecc_bit;
}
fsl_mc_printk(mci, KERN_ERR, fsl_mc_printk(mci, KERN_ERR,
"Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n", "Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
cap_high ^ (1 << (bad_data_bit - 32)), upper_32_bits(cap), lower_32_bits(cap), s);
cap_low ^ (1 << bad_data_bit),
syndrome ^ (1 << bad_ecc_bit));
} }
fsl_mc_printk(mci, KERN_ERR, fsl_mc_printk(mci, KERN_ERR,

View File

@@ -638,6 +638,9 @@ static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain)
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
if (!buf.opp_count)
return ERR_PTR(-ENOENT);
info = kmalloc(sizeof(*info), GFP_KERNEL); info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info) if (!info)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);

View File

@@ -164,7 +164,7 @@ static void show_leaks(struct drm_mm *mm) { }
INTERVAL_TREE_DEFINE(struct drm_mm_node, rb, INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
u64, __subtree_last, u64, __subtree_last,
START, LAST, static inline, drm_mm_interval_tree) START, LAST, static inline __maybe_unused, drm_mm_interval_tree)
struct drm_mm_node * struct drm_mm_node *
__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)

View File

@@ -108,17 +108,6 @@ static inline size_t size_vstruct(size_t nelem, size_t elem_size, size_t base)
return base + nelem * elem_size; return base + nelem * elem_size;
} }
/* returns true if fence a comes after fence b */
static inline bool fence_after(u32 a, u32 b)
{
return (s32)(a - b) > 0;
}
static inline bool fence_after_eq(u32 a, u32 b)
{
return (s32)(a - b) >= 0;
}
/* /*
* Etnaviv timeouts are specified wrt CLOCK_MONOTONIC, not jiffies. * Etnaviv timeouts are specified wrt CLOCK_MONOTONIC, not jiffies.
* We need to calculate the timeout in terms of number of jiffies * We need to calculate the timeout in terms of number of jiffies

View File

@@ -73,7 +73,7 @@ static void etnaviv_core_dump_header(struct core_dump_iterator *iter,
hdr->file_size = cpu_to_le32(data_end - iter->data); hdr->file_size = cpu_to_le32(data_end - iter->data);
iter->hdr++; iter->hdr++;
iter->data += hdr->file_size; iter->data += le32_to_cpu(hdr->file_size);
} }
static void etnaviv_core_dump_registers(struct core_dump_iterator *iter, static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
@@ -81,10 +81,15 @@ static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
{ {
struct etnaviv_dump_registers *reg = iter->data; struct etnaviv_dump_registers *reg = iter->data;
unsigned int i; unsigned int i;
u32 read_addr;
for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) { for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) {
reg->reg = etnaviv_dump_registers[i]; read_addr = etnaviv_dump_registers[i];
reg->value = gpu_read(gpu, etnaviv_dump_registers[i]); if (read_addr >= VIVS_PM_POWER_CONTROLS &&
read_addr <= VIVS_PM_PULSE_EATER)
read_addr = gpu_fix_power_address(gpu, read_addr);
reg->reg = cpu_to_le32(etnaviv_dump_registers[i]);
reg->value = cpu_to_le32(gpu_read(gpu, read_addr));
} }
etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg); etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg);
@@ -220,7 +225,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
if (!IS_ERR(pages)) { if (!IS_ERR(pages)) {
int j; int j;
iter.hdr->data[0] = bomap - bomap_start; iter.hdr->data[0] = cpu_to_le32((bomap - bomap_start));
for (j = 0; j < obj->base.size >> PAGE_SHIFT; j++) for (j = 0; j < obj->base.size >> PAGE_SHIFT; j++)
*bomap++ = cpu_to_le64(page_to_phys(*pages++)); *bomap++ = cpu_to_le64(page_to_phys(*pages++));

View File

@@ -540,7 +540,7 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
u32 pmc, ppc; u32 pmc, ppc;
/* enable clock gating */ /* enable clock gating */
ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS); ppc = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS);
ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
/* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */ /* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */
@@ -548,9 +548,9 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
gpu->identity.revision == 0x4302) gpu->identity.revision == 0x4302)
ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING; ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING;
gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc); gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, ppc);
pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS); pmc = gpu_read_power(gpu, VIVS_PM_MODULE_CONTROLS);
/* Disable PA clock gating for GC400+ without bugfix except for GC420 */ /* Disable PA clock gating for GC400+ without bugfix except for GC420 */
if (gpu->identity.model >= chipModel_GC400 && if (gpu->identity.model >= chipModel_GC400 &&
@@ -579,7 +579,7 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ; pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ; pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc); gpu_write_power(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
} }
void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch) void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
@@ -620,11 +620,11 @@ static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
(gpu->identity.features & chipFeatures_PIPE_3D)) (gpu->identity.features & chipFeatures_PIPE_3D))
{ {
/* Performance fix: disable internal DFS */ /* Performance fix: disable internal DFS */
pulse_eater = gpu_read(gpu, VIVS_PM_PULSE_EATER); pulse_eater = gpu_read_power(gpu, VIVS_PM_PULSE_EATER);
pulse_eater |= BIT(18); pulse_eater |= BIT(18);
} }
gpu_write(gpu, VIVS_PM_PULSE_EATER, pulse_eater); gpu_write_power(gpu, VIVS_PM_PULSE_EATER, pulse_eater);
} }
static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu) static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
@@ -1038,7 +1038,7 @@ static bool etnaviv_fence_signaled(struct dma_fence *fence)
{ {
struct etnaviv_fence *f = to_etnaviv_fence(fence); struct etnaviv_fence *f = to_etnaviv_fence(fence);
return fence_completed(f->gpu, f->base.seqno); return (s32)(f->gpu->completed_fence - f->base.seqno) >= 0;
} }
static void etnaviv_fence_release(struct dma_fence *fence) static void etnaviv_fence_release(struct dma_fence *fence)
@@ -1077,6 +1077,12 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
return &f->base; return &f->base;
} }
/* returns true if fence a comes after fence b */
static inline bool fence_after(u32 a, u32 b)
{
return (s32)(a - b) > 0;
}
/* /*
* event management: * event management:
*/ */
@@ -1231,10 +1237,12 @@ static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu,
{ {
u32 val; u32 val;
mutex_lock(&gpu->lock);
/* disable clock gating */ /* disable clock gating */
val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS); val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS);
val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val); gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val);
/* enable debug register */ /* enable debug register */
val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
@@ -1242,6 +1250,8 @@ static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu,
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val); gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE); sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE);
mutex_unlock(&gpu->lock);
} }
static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu, static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
@@ -1251,23 +1261,27 @@ static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
unsigned int i; unsigned int i;
u32 val; u32 val;
mutex_lock(&gpu->lock);
sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST); sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST);
for (i = 0; i < submit->nr_pmrs; i++) {
const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
*pmr->bo_vma = pmr->sequence;
}
/* disable debug register */ /* disable debug register */
val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
val |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; val |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val); gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
/* enable clock gating */ /* enable clock gating */
val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS); val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS);
val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val); gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val);
mutex_unlock(&gpu->lock);
for (i = 0; i < submit->nr_pmrs; i++) {
const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
*pmr->bo_vma = pmr->sequence;
}
} }

View File

@@ -11,6 +11,7 @@
#include "etnaviv_cmdbuf.h" #include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h" #include "etnaviv_drv.h"
#include "common.xml.h"
struct etnaviv_gem_submit; struct etnaviv_gem_submit;
struct etnaviv_vram_mapping; struct etnaviv_vram_mapping;
@@ -162,9 +163,24 @@ static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg)
return readl(gpu->mmio + reg); return readl(gpu->mmio + reg);
} }
static inline bool fence_completed(struct etnaviv_gpu *gpu, u32 fence) static inline u32 gpu_fix_power_address(struct etnaviv_gpu *gpu, u32 reg)
{ {
return fence_after_eq(gpu->completed_fence, fence); /* Power registers in GC300 < 2.0 are offset by 0x100 */
if (gpu->identity.model == chipModel_GC300 &&
gpu->identity.revision < 0x2000)
reg += 0x100;
return reg;
}
static inline void gpu_write_power(struct etnaviv_gpu *gpu, u32 reg, u32 data)
{
writel(data, gpu->mmio + gpu_fix_power_address(gpu, reg));
}
static inline u32 gpu_read_power(struct etnaviv_gpu *gpu, u32 reg)
{
return readl(gpu->mmio + gpu_fix_power_address(gpu, reg));
} }
int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value); int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value);

View File

@@ -1253,8 +1253,6 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
omap_obj = to_omap_bo(obj); omap_obj = to_omap_bo(obj);
mutex_lock(&omap_obj->lock);
omap_obj->sgt = sgt; omap_obj->sgt = sgt;
if (sgt->orig_nents == 1) { if (sgt->orig_nents == 1) {
@@ -1270,8 +1268,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
if (!pages) { if (!pages) {
omap_gem_free_object(obj); omap_gem_free_object(obj);
obj = ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
goto done;
} }
omap_obj->pages = pages; omap_obj->pages = pages;
@@ -1284,13 +1281,10 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
if (WARN_ON(i != npages)) { if (WARN_ON(i != npages)) {
omap_gem_free_object(obj); omap_gem_free_object(obj);
obj = ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
goto done;
} }
} }
done:
mutex_unlock(&omap_obj->lock);
return obj; return obj;
} }

View File

@@ -26,3 +26,7 @@ config QCOM_ADRENO_DEFAULT_GOVERNOR
config QCOM_KGSL_IOMMU config QCOM_KGSL_IOMMU
bool bool
default y if QCOM_KGSL && (MSM_IOMMU || ARM_SMMU) default y if QCOM_KGSL && (MSM_IOMMU || ARM_SMMU)
config CORESIGHT_ADRENO
bool
default y if QCOM_KGSL=y && CORESIGHT=y

View File

@@ -31,7 +31,6 @@ msm_adreno-y += \
adreno_drawctxt.o \ adreno_drawctxt.o \
adreno_dispatch.o \ adreno_dispatch.o \
adreno_snapshot.o \ adreno_snapshot.o \
adreno_coresight.o \
adreno_trace.o \ adreno_trace.o \
adreno_a3xx.o \ adreno_a3xx.o \
adreno_a5xx.o \ adreno_a5xx.o \
@@ -51,6 +50,7 @@ msm_adreno-y += \
msm_adreno-$(CONFIG_QCOM_KGSL_IOMMU) += adreno_iommu.o msm_adreno-$(CONFIG_QCOM_KGSL_IOMMU) += adreno_iommu.o
msm_adreno-$(CONFIG_DEBUG_FS) += adreno_debugfs.o adreno_profile.o msm_adreno-$(CONFIG_DEBUG_FS) += adreno_debugfs.o adreno_profile.o
msm_adreno-$(CONFIG_COMPAT) += adreno_compat.o msm_adreno-$(CONFIG_COMPAT) += adreno_compat.o
msm_adreno-$(CONFIG_CORESIGHT_ADRENO) += adreno_coresight.o
msm_kgsl_core-objs = $(msm_kgsl_core-y) msm_kgsl_core-objs = $(msm_kgsl_core-y)
msm_adreno-objs = $(msm_adreno-y) msm_adreno-objs = $(msm_adreno-y)

View File

@@ -806,6 +806,7 @@ struct adreno_coresight_attr {
struct adreno_coresight_register *reg; struct adreno_coresight_register *reg;
}; };
#if IS_ENABLED(CONFIG_CORESIGHT_ADRENO)
ssize_t adreno_coresight_show_register(struct device *device, ssize_t adreno_coresight_show_register(struct device *device,
struct device_attribute *attr, char *buf); struct device_attribute *attr, char *buf);
@@ -818,6 +819,12 @@ ssize_t adreno_coresight_store_register(struct device *dev,
adreno_coresight_show_register, \ adreno_coresight_show_register, \
adreno_coresight_store_register), \ adreno_coresight_store_register), \
(_reg), } (_reg), }
#else
#define ADRENO_CORESIGHT_ATTR(_attrname, _reg) \
struct adreno_coresight_attr coresight_attr_##_attrname = { \
__ATTR_NULL, \
(_reg), }
#endif /* CONFIG_CORESIGHT_ADRENO */
/** /**
* struct adreno_coresight - GPU specific coresight definition * struct adreno_coresight - GPU specific coresight definition
@@ -1089,12 +1096,22 @@ void adreno_fault_skipcmd_detached(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt, struct adreno_context *drawctxt,
struct kgsl_drawobj *drawobj); struct kgsl_drawobj *drawobj);
#if IS_ENABLED(CONFIG_CORESIGHT_ADRENO)
int adreno_coresight_init(struct adreno_device *adreno_dev); int adreno_coresight_init(struct adreno_device *adreno_dev);
void adreno_coresight_start(struct adreno_device *adreno_dev); void adreno_coresight_start(struct adreno_device *adreno_dev);
void adreno_coresight_stop(struct adreno_device *adreno_dev); void adreno_coresight_stop(struct adreno_device *adreno_dev);
void adreno_coresight_remove(struct adreno_device *adreno_dev); void adreno_coresight_remove(struct adreno_device *adreno_dev);
#else
static inline int adreno_coresight_init(struct adreno_device *adreno_dev)
{
return -ENODEV;
}
static inline void adreno_coresight_start(struct adreno_device *adreno_dev) { }
static inline void adreno_coresight_stop(struct adreno_device *adreno_dev) { }
static inline void adreno_coresight_remove(struct adreno_device *adreno_dev) { }
#endif /* CONFIG_CORESIGHT_ADRENO */
bool adreno_hw_isidle(struct adreno_device *adreno_dev); bool adreno_hw_isidle(struct adreno_device *adreno_dev);

View File

@@ -1321,9 +1321,9 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
rotation -= 1800; rotation -= 1800;
input_report_abs(pen_input, ABS_TILT_X, input_report_abs(pen_input, ABS_TILT_X,
(char)frame[7]); (signed char)frame[7]);
input_report_abs(pen_input, ABS_TILT_Y, input_report_abs(pen_input, ABS_TILT_Y,
(char)frame[8]); (signed char)frame[8]);
input_report_abs(pen_input, ABS_Z, rotation); input_report_abs(pen_input, ABS_Z, rotation);
input_report_abs(pen_input, ABS_WHEEL, input_report_abs(pen_input, ABS_WHEEL,
get_unaligned_le16(&frame[11])); get_unaligned_le16(&frame[11]));

View File

@@ -3110,7 +3110,7 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
wc->byte_len = orig_cqe->length; wc->byte_len = orig_cqe->length;
wc->qp = &qp1_qp->ib_qp; wc->qp = &qp1_qp->ib_qp;
wc->ex.imm_data = cpu_to_be32(le32_to_cpu(orig_cqe->immdata)); wc->ex.imm_data = cpu_to_be32(orig_cqe->immdata);
wc->src_qp = orig_cqe->src_qp; wc->src_qp = orig_cqe->src_qp;
memcpy(wc->smac, orig_cqe->smac, ETH_ALEN); memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) { if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
@@ -3231,7 +3231,10 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
continue; continue;
} }
wc->qp = &qp->ib_qp; wc->qp = &qp->ib_qp;
wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->immdata)); if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
wc->ex.imm_data = cpu_to_be32(cqe->immdata);
else
wc->ex.invalidate_rkey = cqe->invrkey;
wc->src_qp = cqe->src_qp; wc->src_qp = cqe->src_qp;
memcpy(wc->smac, cqe->smac, ETH_ALEN); memcpy(wc->smac, cqe->smac, ETH_ALEN);
wc->port_num = 1; wc->port_num = 1;

View File

@@ -349,7 +349,7 @@ struct bnxt_qplib_cqe {
u32 length; u32 length;
u64 wr_id; u64 wr_id;
union { union {
__le32 immdata; u32 immdata;
u32 invrkey; u32 invrkey;
}; };
u64 qp_handle; u64 qp_handle;

View File

@@ -671,7 +671,7 @@ static int create_linear_device(struct dm_target *ti, dev_t dev,
static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv) static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
{ {
dev_t uninitialized_var(dev); dev_t uninitialized_var(dev);
struct android_metadata *metadata = NULL; struct android_metadata *metadata;
int err = 0, i, mode; int err = 0, i, mode;
char *key_id = NULL, *table_ptr, dummy, *target_device; char *key_id = NULL, *table_ptr, dummy, *target_device;
char *verity_table_args[VERITY_TABLE_ARGS + 2 + VERITY_TABLE_OPT_FEC_ARGS]; char *verity_table_args[VERITY_TABLE_ARGS + 2 + VERITY_TABLE_OPT_FEC_ARGS];
@@ -733,7 +733,7 @@ static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
} }
DMERR("Error while extracting metadata"); DMERR("Error while extracting metadata");
handle_error(); handle_error();
goto free_metadata; return err;
} }
if (verity_enabled) { if (verity_enabled) {
@@ -864,11 +864,10 @@ static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
} }
free_metadata: free_metadata:
if (metadata) { kfree(metadata->header);
kfree(metadata->header); kfree(metadata->verity_table);
kfree(metadata->verity_table);
}
kfree(metadata); kfree(metadata);
return err; return err;
} }

View File

@@ -544,6 +544,9 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
for (minor = 0; minor < MAX_DVB_MINORS; minor++) for (minor = 0; minor < MAX_DVB_MINORS; minor++)
if (dvb_minors[minor] == NULL) if (dvb_minors[minor] == NULL)
break; break;
#else
minor = nums2minor(adap->num, type, id);
#endif
if (minor >= MAX_DVB_MINORS) { if (minor >= MAX_DVB_MINORS) {
if (new_node) { if (new_node) {
list_del (&new_node->list_head); list_del (&new_node->list_head);
@@ -557,17 +560,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
mutex_unlock(&dvbdev_register_lock); mutex_unlock(&dvbdev_register_lock);
return -EINVAL; return -EINVAL;
} }
#else
minor = nums2minor(adap->num, type, id);
if (minor >= MAX_DVB_MINORS) {
dvb_media_device_free(dvbdev);
list_del(&dvbdev->list_head);
kfree(dvbdev);
*pdvbdev = NULL;
mutex_unlock(&dvbdev_register_lock);
return ret;
}
#endif
dvbdev->minor = minor; dvbdev->minor = minor;
dvb_minors[minor] = dvb_device_get(dvbdev); dvb_minors[minor] = dvb_device_get(dvbdev);
up_write(&minor_rwsem); up_write(&minor_rwsem);

View File

@@ -781,7 +781,7 @@ static int __read_queue(struct cvp_iface_q_info *qinfo, u8 *packet,
u32 *read_ptr; u32 *read_ptr;
u32 receive_request = 0; u32 receive_request = 0;
u32 read_idx, write_idx; u32 read_idx, write_idx;
int rc = 0; int rc = 0;
if (!qinfo || !packet || !pb_tx_req_is_set) { if (!qinfo || !packet || !pb_tx_req_is_set) {
dprintk(CVP_ERR, "Invalid Params\n"); dprintk(CVP_ERR, "Invalid Params\n");
@@ -871,6 +871,12 @@ static int __read_queue(struct cvp_iface_q_info *qinfo, u8 *packet,
(u8 *)qinfo->q_array.align_virtual_addr, (u8 *)qinfo->q_array.align_virtual_addr,
new_read_idx << 2); new_read_idx << 2);
} }
/*
* Copy back the validated size to avoid security issue. As we are reading
* the packet from a shared queue, there is a possibility to get the
* packet->size data corrupted of shared queue by mallicious FW.
*/
*((u32 *) packet) = packet_size_in_words << 2;
} else { } else {
dprintk(CVP_WARN, dprintk(CVP_WARN,
"BAD packet received, read_idx: %#x, pkt_size: %d\n", "BAD packet received, read_idx: %#x, pkt_size: %d\n",

View File

@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/ */
/* ------------------------------------------------------------------------- /* -------------------------------------------------------------------------
@@ -2544,6 +2545,13 @@ int32_t npu_host_unload_network(struct npu_client *client,
return -EINVAL; return -EINVAL;
} }
if (network->is_executing) {
pr_err("network is in execution\n");
network_put(network);
mutex_unlock(&host_ctx->lock);
return -EINVAL;
}
if (network->fw_error) { if (network->fw_error) {
NPU_ERR("fw in error state, skip unload network in fw\n"); NPU_ERR("fw in error state, skip unload network in fw\n");
goto free_network; goto free_network;
@@ -2707,6 +2715,12 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
goto exec_v2_done; goto exec_v2_done;
} }
if (network->is_executing) {
pr_err("network is already in execution\n");
ret = -EINVAL;
goto exec_v2_done;
}
if (host_ctx->dev_shuttingdown) { if (host_ctx->dev_shuttingdown) {
NPU_ERR("device is shutting down\n"); NPU_ERR("device is shutting down\n");
ret = -EIO; ret = -EIO;
@@ -2724,6 +2738,7 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
goto exec_v2_done; goto exec_v2_done;
} }
network->is_executing = true;
for (i = 0; i < num_patch_params; i++) { for (i = 0; i < num_patch_params; i++) {
exec_packet->patch_params[i].id = patch_buf_info[i].buf_id; exec_packet->patch_params[i].id = patch_buf_info[i].buf_id;
NPU_DBG("%d: patch_id: %x\n", i, NPU_DBG("%d: patch_id: %x\n", i,
@@ -2833,6 +2848,7 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
npu_free_network_cmd(host_ctx, exec_cmd); npu_free_network_cmd(host_ctx, exec_cmd);
free_exec_packet: free_exec_packet:
kfree(exec_packet); kfree(exec_packet);
network->is_executing = false;
exec_v2_done: exec_v2_done:
network_put(network); network_put(network);
mutex_unlock(&host_ctx->lock); mutex_unlock(&host_ctx->lock);

View File

@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */ /* SPDX-License-Identifier: GPL-2.0-only */
/* /*
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/ */
#ifndef _NPU_MGR_H #ifndef _NPU_MGR_H
@@ -85,6 +86,7 @@ struct npu_network {
bool is_valid; bool is_valid;
bool is_active; bool is_active;
bool is_unloading; bool is_unloading;
bool is_executing;
bool fw_error; bool fw_error;
struct npu_client *client; struct npu_client *client;
struct list_head cmd_list; struct list_head cmd_list;

View File

@@ -472,11 +472,12 @@ int fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000); jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000);
return -ETIMEDOUT; return -ETIMEDOUT;
} }
spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
if (!fmdev->resp_skb) { if (!fmdev->resp_skb) {
spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
fmerr("Response SKB is missing\n"); fmerr("Response SKB is missing\n");
return -EFAULT; return -EFAULT;
} }
spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
skb = fmdev->resp_skb; skb = fmdev->resp_skb;
fmdev->resp_skb = NULL; fmdev->resp_skb = NULL;
spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags); spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);

View File

@@ -4206,10 +4206,8 @@ mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 phys_disk_num,
static void static void
mptsas_reprobe_lun(struct scsi_device *sdev, void *data) mptsas_reprobe_lun(struct scsi_device *sdev, void *data)
{ {
int rc;
sdev->no_uld_attach = data ? 1 : 0; sdev->no_uld_attach = data ? 1 : 0;
rc = scsi_device_reprobe(sdev); WARN_ON(scsi_device_reprobe(sdev));
} }
static void static void

View File

@@ -42,7 +42,7 @@ static int da9052_spi_probe(struct spi_device *spi)
spi_set_drvdata(spi, da9052); spi_set_drvdata(spi, da9052);
config = da9052_regmap_config; config = da9052_regmap_config;
config.read_flag_mask = 1; config.write_flag_mask = 1;
config.reg_bits = 7; config.reg_bits = 7;
config.pad_bits = 1; config.pad_bits = 1;
config.val_bits = 8; config.val_bits = 8;

View File

@@ -85,8 +85,8 @@ static int rt5033_i2c_probe(struct i2c_client *i2c,
} }
dev_info(&i2c->dev, "Device found Device ID: %04x\n", dev_id); dev_info(&i2c->dev, "Device found Device ID: %04x\n", dev_id);
ret = regmap_add_irq_chip(rt5033->regmap, rt5033->irq, ret = devm_regmap_add_irq_chip(rt5033->dev, rt5033->regmap,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT, rt5033->irq, IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
0, &rt5033_irq_chip, &rt5033->irq_data); 0, &rt5033_irq_chip, &rt5033->irq_data);
if (ret) { if (ret) {
dev_err(&i2c->dev, "Failed to request IRQ %d: %d\n", dev_err(&i2c->dev, "Failed to request IRQ %d: %d\n",

View File

@@ -1163,7 +1163,7 @@ static int apds990x_probe(struct i2c_client *client,
err = chip->pdata->setup_resources(); err = chip->pdata->setup_resources();
if (err) { if (err) {
err = -EINVAL; err = -EINVAL;
goto fail3; goto fail4;
} }
} }
@@ -1171,7 +1171,7 @@ static int apds990x_probe(struct i2c_client *client,
apds990x_attribute_group); apds990x_attribute_group);
if (err < 0) { if (err < 0) {
dev_err(&chip->client->dev, "Sysfs registration failed\n"); dev_err(&chip->client->dev, "Sysfs registration failed\n");
goto fail4; goto fail5;
} }
err = request_threaded_irq(client->irq, NULL, err = request_threaded_irq(client->irq, NULL,
@@ -1182,15 +1182,17 @@ static int apds990x_probe(struct i2c_client *client,
if (err) { if (err) {
dev_err(&client->dev, "could not get IRQ %d\n", dev_err(&client->dev, "could not get IRQ %d\n",
client->irq); client->irq);
goto fail5; goto fail6;
} }
return err; return err;
fail5: fail6:
sysfs_remove_group(&chip->client->dev.kobj, sysfs_remove_group(&chip->client->dev.kobj,
&apds990x_attribute_group[0]); &apds990x_attribute_group[0]);
fail4: fail5:
if (chip->pdata && chip->pdata->release_resources) if (chip->pdata && chip->pdata->release_resources)
chip->pdata->release_resources(); chip->pdata->release_resources();
fail4:
pm_runtime_disable(&client->dev);
fail3: fail3:
regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs); regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
fail2: fail2:

View File

@@ -2857,8 +2857,8 @@ static int dw_mci_init_slot(struct dw_mci *host)
if (host->use_dma == TRANS_MODE_IDMAC) { if (host->use_dma == TRANS_MODE_IDMAC) {
mmc->max_segs = host->ring_size; mmc->max_segs = host->ring_size;
mmc->max_blk_size = 65535; mmc->max_blk_size = 65535;
mmc->max_req_size = DW_MCI_DESC_DATA_LENGTH * host->ring_size; mmc->max_seg_size = 0x1000;
mmc->max_seg_size = mmc->max_req_size; mmc->max_req_size = mmc->max_seg_size * host->ring_size;
mmc->max_blk_count = mmc->max_req_size / 512; mmc->max_blk_count = mmc->max_req_size / 512;
} else if (host->use_dma == TRANS_MODE_EDMAC) { } else if (host->use_dma == TRANS_MODE_EDMAC) {
mmc->max_segs = 64; mmc->max_segs = 64;

View File

@@ -269,10 +269,6 @@ static int mmc_spi_response_get(struct mmc_spi_host *host,
u8 leftover = 0; u8 leftover = 0;
unsigned short rotator; unsigned short rotator;
int i; int i;
char tag[32];
snprintf(tag, sizeof(tag), " ... CMD%d response SPI_%s",
cmd->opcode, maptype(cmd));
/* Except for data block reads, the whole response will already /* Except for data block reads, the whole response will already
* be stored in the scratch buffer. It's somewhere after the * be stored in the scratch buffer. It's somewhere after the
@@ -422,8 +418,9 @@ static int mmc_spi_response_get(struct mmc_spi_host *host,
} }
if (value < 0) if (value < 0)
dev_dbg(&host->spi->dev, "%s: resp %04x %08x\n", dev_dbg(&host->spi->dev,
tag, cmd->resp[0], cmd->resp[1]); " ... CMD%d response SPI_%s: resp %04x %08x\n",
cmd->opcode, maptype(cmd), cmd->resp[0], cmd->resp[1]);
/* disable chipselect on errors and some success cases */ /* disable chipselect on errors and some success cases */
if (value >= 0 && cs_on) if (value >= 0 && cs_on)

View File

@@ -365,7 +365,7 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
size = ALIGN(size, sizeof(s32)); size = ALIGN(size, sizeof(s32));
size += (req->ecc.strength + 1) * sizeof(s32) * 3; size += (req->ecc.strength + 1) * sizeof(s32) * 3;
user = kzalloc(size, GFP_KERNEL); user = devm_kzalloc(pmecc->dev, size, GFP_KERNEL);
if (!user) if (!user)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
@@ -411,12 +411,6 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
} }
EXPORT_SYMBOL_GPL(atmel_pmecc_create_user); EXPORT_SYMBOL_GPL(atmel_pmecc_create_user);
void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user)
{
kfree(user);
}
EXPORT_SYMBOL_GPL(atmel_pmecc_destroy_user);
static int get_strength(struct atmel_pmecc_user *user) static int get_strength(struct atmel_pmecc_user *user)
{ {
const int *strengths = user->pmecc->caps->strengths; const int *strengths = user->pmecc->caps->strengths;

View File

@@ -59,8 +59,6 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *dev);
struct atmel_pmecc_user * struct atmel_pmecc_user *
atmel_pmecc_create_user(struct atmel_pmecc *pmecc, atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
struct atmel_pmecc_user_req *req); struct atmel_pmecc_user_req *req);
void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user);
void atmel_pmecc_reset(struct atmel_pmecc *pmecc); void atmel_pmecc_reset(struct atmel_pmecc *pmecc);
int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op); int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op);
void atmel_pmecc_disable(struct atmel_pmecc_user *user); void atmel_pmecc_disable(struct atmel_pmecc_user *user);

View File

@@ -1459,7 +1459,7 @@ static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
return err; return err;
} }
static struct ubi_attach_info *alloc_ai(void) static struct ubi_attach_info *alloc_ai(const char *slab_name)
{ {
struct ubi_attach_info *ai; struct ubi_attach_info *ai;
@@ -1473,7 +1473,7 @@ static struct ubi_attach_info *alloc_ai(void)
INIT_LIST_HEAD(&ai->alien); INIT_LIST_HEAD(&ai->alien);
INIT_LIST_HEAD(&ai->fastmap); INIT_LIST_HEAD(&ai->fastmap);
ai->volumes = RB_ROOT; ai->volumes = RB_ROOT;
ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache", ai->aeb_slab_cache = kmem_cache_create(slab_name,
sizeof(struct ubi_ainf_peb), sizeof(struct ubi_ainf_peb),
0, 0, NULL); 0, 0, NULL);
if (!ai->aeb_slab_cache) { if (!ai->aeb_slab_cache) {
@@ -1503,7 +1503,7 @@ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
err = -ENOMEM; err = -ENOMEM;
scan_ai = alloc_ai(); scan_ai = alloc_ai("ubi_aeb_slab_cache_fastmap");
if (!scan_ai) if (!scan_ai)
goto out; goto out;
@@ -1569,7 +1569,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
int err; int err;
struct ubi_attach_info *ai; struct ubi_attach_info *ai;
ai = alloc_ai(); ai = alloc_ai("ubi_aeb_slab_cache");
if (!ai) if (!ai)
return -ENOMEM; return -ENOMEM;
@@ -1587,7 +1587,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
if (err > 0 || mtd_is_eccerr(err)) { if (err > 0 || mtd_is_eccerr(err)) {
if (err != UBI_NO_FASTMAP) { if (err != UBI_NO_FASTMAP) {
destroy_ai(ai); destroy_ai(ai);
ai = alloc_ai(); ai = alloc_ai("ubi_aeb_slab_cache");
if (!ai) if (!ai)
return -ENOMEM; return -ENOMEM;
@@ -1626,7 +1626,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
if (ubi->fm && ubi_dbg_chk_fastmap(ubi)) { if (ubi->fm && ubi_dbg_chk_fastmap(ubi)) {
struct ubi_attach_info *scan_ai; struct ubi_attach_info *scan_ai;
scan_ai = alloc_ai(); scan_ai = alloc_ai("ubi_aeb_slab_cache_dbg_chk_fastmap");
if (!scan_ai) { if (!scan_ai) {
err = -ENOMEM; err = -ENOMEM;
goto out_wl; goto out_wl;

View File

@@ -810,7 +810,14 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
goto out_not_moved; goto out_not_moved;
} }
if (err == MOVE_RETRY) { if (err == MOVE_RETRY) {
scrubbing = 1; /*
* For source PEB:
* 1. The scrubbing is set for scrub type PEB, it will
* be put back into ubi->scrub list.
* 2. Non-scrub type PEB will be put back into ubi->used
* list.
*/
keep = 1;
dst_leb_clean = 1; dst_leb_clean = 1;
goto out_not_moved; goto out_not_moved;
} }

View File

@@ -17866,6 +17866,9 @@ static int tg3_init_one(struct pci_dev *pdev,
} else } else
persist_dma_mask = dma_mask = DMA_BIT_MASK(64); persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
if (tg3_asic_rev(tp) == ASIC_REV_57766)
persist_dma_mask = DMA_BIT_MASK(31);
/* Configure DMA attributes. */ /* Configure DMA attributes. */
if (dma_mask > DMA_BIT_MASK(32)) { if (dma_mask > DMA_BIT_MASK(32)) {
err = pci_set_dma_mask(pdev, dma_mask); err = pci_set_dma_mask(pdev, dma_mask);

View File

@@ -1417,18 +1417,15 @@ static int pxa168_eth_probe(struct platform_device *pdev)
printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n"); printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
clk = devm_clk_get(&pdev->dev, NULL); clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk)) { if (IS_ERR(clk)) {
dev_err(&pdev->dev, "Fast Ethernet failed to get clock\n"); dev_err(&pdev->dev, "Fast Ethernet failed to get and enable clock\n");
return -ENODEV; return -ENODEV;
} }
clk_prepare_enable(clk);
dev = alloc_etherdev(sizeof(struct pxa168_eth_private)); dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
if (!dev) { if (!dev)
err = -ENOMEM; return -ENOMEM;
goto err_clk;
}
platform_set_drvdata(pdev, dev); platform_set_drvdata(pdev, dev);
pep = netdev_priv(dev); pep = netdev_priv(dev);
@@ -1541,8 +1538,6 @@ static int pxa168_eth_probe(struct platform_device *pdev)
mdiobus_free(pep->smi_bus); mdiobus_free(pep->smi_bus);
err_netdev: err_netdev:
free_netdev(dev); free_netdev(dev);
err_clk:
clk_disable_unprepare(clk);
return err; return err;
} }

View File

@@ -346,6 +346,8 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
plat_dat->bsp_priv = dwmac; plat_dat->bsp_priv = dwmac;
plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed; plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
plat_dat->riwt_off = 1;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret) if (ret)
goto err_remove_config_dt; goto err_remove_config_dt;

View File

@@ -1428,13 +1428,13 @@ static int lan78xx_set_wol(struct net_device *netdev,
struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
int ret; int ret;
if (wol->wolopts & ~WAKE_ALL)
return -EINVAL;
ret = usb_autopm_get_interface(dev->intf); ret = usb_autopm_get_interface(dev->intf);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (wol->wolopts & ~WAKE_ALL)
return -EINVAL;
pdata->wol = wol->wolopts; pdata->wol = wol->wolopts;
device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts); device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
@@ -2191,6 +2191,7 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
if (dev->chipid == ID_REV_CHIP_ID_7801_) { if (dev->chipid == ID_REV_CHIP_ID_7801_) {
if (phy_is_pseudo_fixed_link(phydev)) { if (phy_is_pseudo_fixed_link(phydev)) {
fixed_phy_unregister(phydev); fixed_phy_unregister(phydev);
phy_device_free(phydev);
} else { } else {
phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
0xfffffff0); 0xfffffff0);
@@ -3871,8 +3872,10 @@ static void lan78xx_disconnect(struct usb_interface *intf)
phy_disconnect(net->phydev); phy_disconnect(net->phydev);
if (phy_is_pseudo_fixed_link(phydev)) if (phy_is_pseudo_fixed_link(phydev)) {
fixed_phy_unregister(phydev); fixed_phy_unregister(phydev);
phy_device_free(phydev);
}
unregister_netdev(net); unregister_netdev(net);

View File

@@ -1045,6 +1045,7 @@ static const struct usb_device_id products[] = {
USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7), USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
.driver_info = (unsigned long)&qmi_wwan_info, .driver_info = (unsigned long)&qmi_wwan_info,
}, },
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0122)}, /* Quectel RG650V */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */

View File

@@ -297,6 +297,9 @@ int htc_connect_service(struct htc_target *target,
return -ETIMEDOUT; return -ETIMEDOUT;
} }
if (target->conn_rsp_epid < 0 || target->conn_rsp_epid >= ENDPOINT_MAX)
return -EINVAL;
*conn_rsp_epid = target->conn_rsp_epid; *conn_rsp_epid = target->conn_rsp_epid;
return 0; return 0;
err: err:

View File

@@ -853,7 +853,7 @@ struct mwifiex_ietypes_chanstats {
struct mwifiex_ie_types_wildcard_ssid_params { struct mwifiex_ie_types_wildcard_ssid_params {
struct mwifiex_ie_types_header header; struct mwifiex_ie_types_header header;
u8 max_ssid_length; u8 max_ssid_length;
u8 ssid[1]; u8 ssid[];
} __packed; } __packed;
#define TSF_DATA_SIZE 8 #define TSF_DATA_SIZE 8

View File

@@ -802,11 +802,16 @@ static int nvme_submit_user_cmd(struct request_queue *q,
bool write = nvme_is_write(cmd); bool write = nvme_is_write(cmd);
struct nvme_ns *ns = q->queuedata; struct nvme_ns *ns = q->queuedata;
struct gendisk *disk = ns ? ns->disk : NULL; struct gendisk *disk = ns ? ns->disk : NULL;
bool supports_metadata = disk && blk_get_integrity(disk);
bool has_metadata = meta_buffer && meta_len;
struct request *req; struct request *req;
struct bio *bio = NULL; struct bio *bio = NULL;
void *meta = NULL; void *meta = NULL;
int ret; int ret;
if (has_metadata && !supports_metadata)
return -EINVAL;
req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY); req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
if (IS_ERR(req)) if (IS_ERR(req))
return PTR_ERR(req); return PTR_ERR(req);
@@ -821,7 +826,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
goto out; goto out;
bio = req->bio; bio = req->bio;
bio->bi_disk = disk; bio->bi_disk = disk;
if (disk && meta_buffer && meta_len) { if (has_metadata) {
meta = nvme_add_user_metadata(bio, meta_buffer, meta_len, meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
meta_seed, write); meta_seed, write);
if (IS_ERR(meta)) { if (IS_ERR(meta)) {

View File

@@ -135,11 +135,13 @@ int cpqhp_unconfigure_device(struct pci_func *func)
static int PCI_RefinedAccessConfig(struct pci_bus *bus, unsigned int devfn, u8 offset, u32 *value) static int PCI_RefinedAccessConfig(struct pci_bus *bus, unsigned int devfn, u8 offset, u32 *value)
{ {
u32 vendID = 0; u32 vendID = 0;
int ret;
if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &vendID) == -1) ret = pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &vendID);
return -1; if (ret != PCIBIOS_SUCCESSFUL)
if (vendID == 0xffffffff) return PCIBIOS_DEVICE_NOT_FOUND;
return -1; if (PCI_POSSIBLE_ERROR(vendID))
return PCIBIOS_DEVICE_NOT_FOUND;
return pci_bus_read_config_dword(bus, devfn, offset, value); return pci_bus_read_config_dword(bus, devfn, offset, value);
} }
@@ -200,13 +202,15 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 *dev_
{ {
u16 tdevice; u16 tdevice;
u32 work; u32 work;
int ret;
u8 tbus; u8 tbus;
ctrl->pci_bus->number = bus_num; ctrl->pci_bus->number = bus_num;
for (tdevice = 0; tdevice < 0xFF; tdevice++) { for (tdevice = 0; tdevice < 0xFF; tdevice++) {
/* Scan for access first */ /* Scan for access first */
if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1) ret = PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work);
if (ret)
continue; continue;
dbg("Looking for nonbridge bus_num %d dev_num %d\n", bus_num, tdevice); dbg("Looking for nonbridge bus_num %d dev_num %d\n", bus_num, tdevice);
/* Yep we got one. Not a bridge ? */ /* Yep we got one. Not a bridge ? */
@@ -218,7 +222,8 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 *dev_
} }
for (tdevice = 0; tdevice < 0xFF; tdevice++) { for (tdevice = 0; tdevice < 0xFF; tdevice++) {
/* Scan for access first */ /* Scan for access first */
if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1) ret = PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work);
if (ret)
continue; continue;
dbg("Looking for bridge bus_num %d dev_num %d\n", bus_num, tdevice); dbg("Looking for bridge bus_num %d dev_num %d\n", bus_num, tdevice);
/* Yep we got one. bridge ? */ /* Yep we got one. bridge ? */
@@ -251,7 +256,7 @@ static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num
*dev_num = tdevice; *dev_num = tdevice;
ctrl->pci_bus->number = tbus; ctrl->pci_bus->number = tbus;
pci_bus_read_config_dword(ctrl->pci_bus, *dev_num, PCI_VENDOR_ID, &work); pci_bus_read_config_dword(ctrl->pci_bus, *dev_num, PCI_VENDOR_ID, &work);
if (!nobridge || (work == 0xffffffff)) if (!nobridge || PCI_POSSIBLE_ERROR(work))
return 0; return 0;
dbg("bus_num %d devfn %d\n", *bus_num, *dev_num); dbg("bus_num %d devfn %d\n", *bus_num, *dev_num);

View File

@@ -115,6 +115,7 @@ static void pci_slot_release(struct kobject *kobj)
up_read(&pci_bus_sem); up_read(&pci_bus_sem);
list_del(&slot->list); list_del(&slot->list);
pci_bus_put(slot->bus);
kfree(slot); kfree(slot);
} }
@@ -296,7 +297,7 @@ struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
goto err; goto err;
} }
slot->bus = parent; slot->bus = pci_bus_get(parent);
slot->number = slot_nr; slot->number = slot_nr;
slot->kobj.kset = pci_slots_kset; slot->kobj.kset = pci_slots_kset;
@@ -304,6 +305,7 @@ struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
slot_name = make_slot_name(name); slot_name = make_slot_name(name);
if (!slot_name) { if (!slot_name) {
err = -ENOMEM; err = -ENOMEM;
pci_bus_put(slot->bus);
kfree(slot); kfree(slot);
goto err; goto err;
} }

View File

@@ -1055,7 +1055,10 @@ static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip,
} else { } else {
list_add(&entry->link, &tbl->head_flt_rule_list); list_add(&entry->link, &tbl->head_flt_rule_list);
} }
tbl->rule_cnt++; if (tbl->rule_cnt < IPA_RULE_CNT_MAX)
tbl->rule_cnt++;
else
return -EINVAL;
if (entry->rt_tbl) if (entry->rt_tbl)
entry->rt_tbl->ref_cnt++; entry->rt_tbl->ref_cnt++;
id = ipa_id_alloc(entry); id = ipa_id_alloc(entry);

View File

@@ -204,6 +204,7 @@
#define IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN 96 #define IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN 96
#define IPA2_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE 50 #define IPA2_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE 50
#define IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN 40 #define IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN 40
#define IPA_RULE_CNT_MAX 512
struct ipa2_active_client_htable_entry { struct ipa2_active_client_htable_entry {
struct hlist_node list; struct hlist_node list;

View File

@@ -1077,7 +1077,10 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
list_add_tail(&entry->link, &tbl->head_rt_rule_list); list_add_tail(&entry->link, &tbl->head_rt_rule_list);
else else
list_add(&entry->link, &tbl->head_rt_rule_list); list_add(&entry->link, &tbl->head_rt_rule_list);
tbl->rule_cnt++; if (tbl->rule_cnt < IPA_RULE_CNT_MAX)
tbl->rule_cnt++;
else
return -EINVAL;
if (entry->hdr) if (entry->hdr)
entry->hdr->ref_cnt++; entry->hdr->ref_cnt++;
else if (entry->proc_ctx) else if (entry->proc_ctx)

View File

@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* Copyright (c) 2014-2021, The Linux Foundation. All rights reserved. * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/ */
/* /*
@@ -2569,15 +2570,8 @@ static int ipa3_wwan_remove(struct platform_device *pdev)
if (ipa3_rmnet_res.ipa_napi_enable) if (ipa3_rmnet_res.ipa_napi_enable)
netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi)); netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi));
mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard); mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard);
IPAWANDBG("rmnet_ipa unregister_netdev started\n");
unregister_netdev(IPA_NETDEV());
IPAWANDBG("rmnet_ipa unregister_netdev completed\n");
ipa3_wwan_deregister_netdev_pm_client();
cancel_work_sync(&ipa3_tx_wakequeue_work); cancel_work_sync(&ipa3_tx_wakequeue_work);
cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work); cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
if (IPA_NETDEV())
free_netdev(IPA_NETDEV());
rmnet_ipa3_ctx->wwan_priv = NULL;
/* No need to remove wwan_ioctl during SSR */ /* No need to remove wwan_ioctl during SSR */
if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) if (!atomic_read(&rmnet_ipa3_ctx->is_ssr))
ipa3_wan_ioctl_deinit(); ipa3_wan_ioctl_deinit();
@@ -2778,6 +2772,15 @@ static int ipa3_lcl_mdm_ssr_notifier_cb(struct notifier_block *this,
break; break;
case SUBSYS_AFTER_SHUTDOWN: case SUBSYS_AFTER_SHUTDOWN:
IPAWANINFO("IPA Received MPSS AFTER_SHUTDOWN\n"); IPAWANINFO("IPA Received MPSS AFTER_SHUTDOWN\n");
IPAWANINFO("rmnet_ipa unregister_netdev\n");
if (IPA_NETDEV())
unregister_netdev(IPA_NETDEV());
ipa3_wwan_deregister_netdev_pm_client();
if (IPA_NETDEV())
free_netdev(IPA_NETDEV());
rmnet_ipa3_ctx->wwan_priv = NULL;
if (atomic_read(&rmnet_ipa3_ctx->is_ssr) && if (atomic_read(&rmnet_ipa3_ctx->is_ssr) &&
ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
ipa3_q6_post_shutdown_cleanup(); ipa3_q6_post_shutdown_cleanup();

View File

@@ -486,8 +486,6 @@ EXPORT_SYMBOL_GPL(power_supply_get_by_name);
*/ */
void power_supply_put(struct power_supply *psy) void power_supply_put(struct power_supply *psy)
{ {
might_sleep();
atomic_dec(&psy->use_cnt); atomic_dec(&psy->use_cnt);
put_device(&psy->dev); put_device(&psy->dev);
} }

View File

@@ -246,21 +246,21 @@ struct glink_channel {
static const struct rpmsg_endpoint_ops glink_endpoint_ops; static const struct rpmsg_endpoint_ops glink_endpoint_ops;
#define RPM_CMD_VERSION 0 #define GLINK_CMD_VERSION 0
#define RPM_CMD_VERSION_ACK 1 #define GLINK_CMD_VERSION_ACK 1
#define RPM_CMD_OPEN 2 #define GLINK_CMD_OPEN 2
#define RPM_CMD_CLOSE 3 #define GLINK_CMD_CLOSE 3
#define RPM_CMD_OPEN_ACK 4 #define GLINK_CMD_OPEN_ACK 4
#define RPM_CMD_INTENT 5 #define GLINK_CMD_INTENT 5
#define RPM_CMD_RX_DONE 6 #define GLINK_CMD_RX_DONE 6
#define RPM_CMD_RX_INTENT_REQ 7 #define GLINK_CMD_RX_INTENT_REQ 7
#define RPM_CMD_RX_INTENT_REQ_ACK 8 #define GLINK_CMD_RX_INTENT_REQ_ACK 8
#define RPM_CMD_TX_DATA 9 #define GLINK_CMD_TX_DATA 9
#define RPM_CMD_CLOSE_ACK 11 #define GLINK_CMD_CLOSE_ACK 11
#define RPM_CMD_TX_DATA_CONT 12 #define GLINK_CMD_TX_DATA_CONT 12
#define RPM_CMD_READ_NOTIF 13 #define GLINK_CMD_READ_NOTIF 13
#define RPM_CMD_RX_DONE_W_REUSE 14 #define GLINK_CMD_RX_DONE_W_REUSE 14
#define RPM_CMD_SIGNALS 15 #define GLINK_CMD_SIGNALS 15
#define GLINK_FEATURE_INTENTLESS BIT(1) #define GLINK_FEATURE_INTENTLESS BIT(1)
@@ -414,7 +414,7 @@ static void qcom_glink_send_read_notify(struct qcom_glink *glink)
{ {
struct glink_msg msg; struct glink_msg msg;
msg.cmd = cpu_to_le16(RPM_CMD_READ_NOTIF); msg.cmd = cpu_to_le16(GLINK_CMD_READ_NOTIF);
msg.param1 = 0; msg.param1 = 0;
msg.param2 = 0; msg.param2 = 0;
@@ -487,7 +487,7 @@ static int qcom_glink_send_version(struct qcom_glink *glink)
{ {
struct glink_msg msg; struct glink_msg msg;
msg.cmd = cpu_to_le16(RPM_CMD_VERSION); msg.cmd = cpu_to_le16(GLINK_CMD_VERSION);
msg.param1 = cpu_to_le16(GLINK_VERSION_1); msg.param1 = cpu_to_le16(GLINK_VERSION_1);
msg.param2 = cpu_to_le32(glink->features); msg.param2 = cpu_to_le32(glink->features);
@@ -499,7 +499,7 @@ static void qcom_glink_send_version_ack(struct qcom_glink *glink)
{ {
struct glink_msg msg; struct glink_msg msg;
msg.cmd = cpu_to_le16(RPM_CMD_VERSION_ACK); msg.cmd = cpu_to_le16(GLINK_CMD_VERSION_ACK);
msg.param1 = cpu_to_le16(GLINK_VERSION_1); msg.param1 = cpu_to_le16(GLINK_VERSION_1);
msg.param2 = cpu_to_le32(glink->features); msg.param2 = cpu_to_le32(glink->features);
@@ -512,7 +512,7 @@ static void qcom_glink_send_open_ack(struct qcom_glink *glink,
{ {
struct glink_msg msg; struct glink_msg msg;
msg.cmd = cpu_to_le16(RPM_CMD_OPEN_ACK); msg.cmd = cpu_to_le16(GLINK_CMD_OPEN_ACK);
msg.param1 = cpu_to_le16(channel->rcid); msg.param1 = cpu_to_le16(channel->rcid);
msg.param2 = cpu_to_le32(0); msg.param2 = cpu_to_le32(0);
@@ -539,11 +539,11 @@ static void qcom_glink_handle_intent_req_ack(struct qcom_glink *glink,
} }
/** /**
* qcom_glink_send_open_req() - send a RPM_CMD_OPEN request to the remote * qcom_glink_send_open_req() - send a GLINK_CMD_OPEN request to the remote
* @glink: Ptr to the glink edge * @glink: Ptr to the glink edge
* @channel: Ptr to the channel that the open req is sent * @channel: Ptr to the channel that the open req is sent
* *
* Allocates a local channel id and sends a RPM_CMD_OPEN message to the remote. * Allocates a local channel id and sends a GLINK_CMD_OPEN message to the remote.
* Will return with refcount held, regardless of outcome. * Will return with refcount held, regardless of outcome.
* *
* Returns 0 on success, negative errno otherwise. * Returns 0 on success, negative errno otherwise.
@@ -573,7 +573,7 @@ static int qcom_glink_send_open_req(struct qcom_glink *glink,
channel->lcid = ret; channel->lcid = ret;
CH_INFO(channel, "\n"); CH_INFO(channel, "\n");
req.msg.cmd = cpu_to_le16(RPM_CMD_OPEN); req.msg.cmd = cpu_to_le16(GLINK_CMD_OPEN);
req.msg.param1 = cpu_to_le16(channel->lcid); req.msg.param1 = cpu_to_le16(channel->lcid);
req.msg.param2 = cpu_to_le32(name_len); req.msg.param2 = cpu_to_le32(name_len);
strlcpy(req.name, channel->name, GLINK_NAME_SIZE); strlcpy(req.name, channel->name, GLINK_NAME_SIZE);
@@ -600,7 +600,7 @@ static void qcom_glink_send_close_req(struct qcom_glink *glink,
{ {
struct glink_msg req; struct glink_msg req;
req.cmd = cpu_to_le16(RPM_CMD_CLOSE); req.cmd = cpu_to_le16(GLINK_CMD_CLOSE);
req.param1 = cpu_to_le16(channel->lcid); req.param1 = cpu_to_le16(channel->lcid);
req.param2 = 0; req.param2 = 0;
@@ -613,7 +613,7 @@ static void qcom_glink_send_close_ack(struct qcom_glink *glink,
{ {
struct glink_msg req; struct glink_msg req;
req.cmd = cpu_to_le16(RPM_CMD_CLOSE_ACK); req.cmd = cpu_to_le16(GLINK_CMD_CLOSE_ACK);
req.param1 = cpu_to_le16(rcid); req.param1 = cpu_to_le16(rcid);
req.param2 = 0; req.param2 = 0;
@@ -637,7 +637,7 @@ static int __qcom_glink_rx_done(struct qcom_glink *glink,
bool reuse = intent->reuse; bool reuse = intent->reuse;
int ret; int ret;
cmd.id = reuse ? RPM_CMD_RX_DONE_W_REUSE : RPM_CMD_RX_DONE; cmd.id = reuse ? GLINK_CMD_RX_DONE_W_REUSE : GLINK_CMD_RX_DONE;
cmd.lcid = cid; cmd.lcid = cid;
cmd.liid = iid; cmd.liid = iid;
@@ -782,7 +782,7 @@ static int qcom_glink_send_intent_req_ack(struct qcom_glink *glink,
{ {
struct glink_msg msg; struct glink_msg msg;
msg.cmd = cpu_to_le16(RPM_CMD_RX_INTENT_REQ_ACK); msg.cmd = cpu_to_le16(GLINK_CMD_RX_INTENT_REQ_ACK);
msg.param1 = cpu_to_le16(channel->lcid); msg.param1 = cpu_to_le16(channel->lcid);
msg.param2 = cpu_to_le32(granted); msg.param2 = cpu_to_le32(granted);
@@ -823,7 +823,7 @@ static int qcom_glink_advertise_intent(struct qcom_glink *glink,
intent->advertised = true; intent->advertised = true;
spin_unlock_irqrestore(&channel->intent_lock, flags); spin_unlock_irqrestore(&channel->intent_lock, flags);
cmd.id = cpu_to_le16(RPM_CMD_INTENT); cmd.id = cpu_to_le16(GLINK_CMD_INTENT);
cmd.lcid = cpu_to_le16(channel->lcid); cmd.lcid = cpu_to_le16(channel->lcid);
cmd.count = cpu_to_le32(1); cmd.count = cpu_to_le32(1);
cmd.size = cpu_to_le32(intent->size); cmd.size = cpu_to_le32(intent->size);
@@ -1220,7 +1220,7 @@ static int qcom_glink_send_signals(struct qcom_glink *glink,
{ {
struct glink_msg msg; struct glink_msg msg;
msg.cmd = cpu_to_le16(RPM_CMD_SIGNALS); msg.cmd = cpu_to_le16(GLINK_CMD_SIGNALS);
msg.param1 = cpu_to_le16(channel->lcid); msg.param1 = cpu_to_le16(channel->lcid);
msg.param2 = cpu_to_le32(sigs); msg.param2 = cpu_to_le32(sigs);
@@ -1277,46 +1277,47 @@ static irqreturn_t qcom_glink_native_intr(int irq, void *data)
param2 = le32_to_cpu(msg.param2); param2 = le32_to_cpu(msg.param2);
switch (cmd) { switch (cmd) {
case RPM_CMD_VERSION: case GLINK_CMD_VERSION:
case RPM_CMD_VERSION_ACK: case GLINK_CMD_VERSION_ACK:
case RPM_CMD_CLOSE: case GLINK_CMD_CLOSE:
case RPM_CMD_CLOSE_ACK: case GLINK_CMD_CLOSE_ACK:
case RPM_CMD_RX_INTENT_REQ: case GLINK_CMD_RX_INTENT_REQ:
ret = qcom_glink_rx_defer(glink, 0); ret = qcom_glink_rx_defer(glink, 0);
break; break;
case RPM_CMD_OPEN_ACK: case GLINK_CMD_OPEN_ACK:
ret = qcom_glink_rx_open_ack(glink, param1); ret = qcom_glink_rx_open_ack(glink, param1);
qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
break; break;
case RPM_CMD_OPEN: case GLINK_CMD_OPEN:
ret = qcom_glink_rx_defer(glink, param2); /* upper 16 bits of param2 are the "prio" field */
ret = qcom_glink_rx_defer(glink, param2 & 0xffff);
break; break;
case RPM_CMD_TX_DATA: case GLINK_CMD_TX_DATA:
case RPM_CMD_TX_DATA_CONT: case GLINK_CMD_TX_DATA_CONT:
ret = qcom_glink_rx_data(glink, avail); ret = qcom_glink_rx_data(glink, avail);
break; break;
case RPM_CMD_READ_NOTIF: case GLINK_CMD_READ_NOTIF:
qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
mbox_send_message(glink->mbox_chan, NULL); mbox_send_message(glink->mbox_chan, NULL);
mbox_client_txdone(glink->mbox_chan, 0); mbox_client_txdone(glink->mbox_chan, 0);
break; break;
case RPM_CMD_INTENT: case GLINK_CMD_INTENT:
qcom_glink_handle_intent(glink, param1, param2, avail); qcom_glink_handle_intent(glink, param1, param2, avail);
break; break;
case RPM_CMD_RX_DONE: case GLINK_CMD_RX_DONE:
qcom_glink_handle_rx_done(glink, param1, param2, false); qcom_glink_handle_rx_done(glink, param1, param2, false);
qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
break; break;
case RPM_CMD_RX_DONE_W_REUSE: case GLINK_CMD_RX_DONE_W_REUSE:
qcom_glink_handle_rx_done(glink, param1, param2, true); qcom_glink_handle_rx_done(glink, param1, param2, true);
qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
break; break;
case RPM_CMD_RX_INTENT_REQ_ACK: case GLINK_CMD_RX_INTENT_REQ_ACK:
qcom_glink_handle_intent_req_ack(glink, param1, param2); qcom_glink_handle_intent_req_ack(glink, param1, param2);
qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
break; break;
case RPM_CMD_SIGNALS: case GLINK_CMD_SIGNALS:
qcom_glink_handle_signals(glink, param1, param2); qcom_glink_handle_signals(glink, param1, param2);
qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
break; break;
@@ -1548,7 +1549,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink,
atomic_set(&channel->intent_req_comp, 0); atomic_set(&channel->intent_req_comp, 0);
cmd.id = RPM_CMD_RX_INTENT_REQ; cmd.id = GLINK_CMD_RX_INTENT_REQ;
cmd.cid = channel->lcid; cmd.cid = channel->lcid;
cmd.size = size; cmd.size = size;
@@ -1632,7 +1633,7 @@ static int __qcom_glink_send(struct glink_channel *channel,
chunk_size = SZ_8K; chunk_size = SZ_8K;
left_size = len - chunk_size; left_size = len - chunk_size;
} }
req.msg.cmd = cpu_to_le16(RPM_CMD_TX_DATA); req.msg.cmd = cpu_to_le16(GLINK_CMD_TX_DATA);
req.msg.param1 = cpu_to_le16(channel->lcid); req.msg.param1 = cpu_to_le16(channel->lcid);
req.msg.param2 = cpu_to_le32(iid); req.msg.param2 = cpu_to_le32(iid);
req.chunk_size = cpu_to_le32(chunk_size); req.chunk_size = cpu_to_le32(chunk_size);
@@ -1641,8 +1642,9 @@ static int __qcom_glink_send(struct glink_channel *channel,
ret = qcom_glink_tx(glink, &req, sizeof(req), data, chunk_size, wait); ret = qcom_glink_tx(glink, &req, sizeof(req), data, chunk_size, wait);
/* Mark intent available if we failed */ /* Mark intent available if we failed */
if (ret && intent) { if (ret) {
intent->in_use = false; if (intent)
intent->in_use = false;
return ret; return ret;
} }
@@ -1653,7 +1655,7 @@ static int __qcom_glink_send(struct glink_channel *channel,
chunk_size = SZ_8K; chunk_size = SZ_8K;
left_size -= chunk_size; left_size -= chunk_size;
req.msg.cmd = cpu_to_le16(RPM_CMD_TX_DATA_CONT); req.msg.cmd = cpu_to_le16(GLINK_CMD_TX_DATA_CONT);
req.msg.param1 = cpu_to_le16(channel->lcid); req.msg.param1 = cpu_to_le16(channel->lcid);
req.msg.param2 = cpu_to_le32(iid); req.msg.param2 = cpu_to_le32(iid);
req.chunk_size = cpu_to_le32(chunk_size); req.chunk_size = cpu_to_le32(chunk_size);
@@ -1663,8 +1665,9 @@ static int __qcom_glink_send(struct glink_channel *channel,
chunk_size, wait); chunk_size, wait);
/* Mark intent available if we failed */ /* Mark intent available if we failed */
if (ret && intent) { if (ret) {
intent->in_use = false; if (intent)
intent->in_use = false;
break; break;
} }
} }
@@ -1874,6 +1877,9 @@ static void qcom_glink_rx_close_ack(struct qcom_glink *glink, unsigned int lcid)
struct glink_channel *channel; struct glink_channel *channel;
unsigned long flags; unsigned long flags;
/* To wakeup any blocking writers */
wake_up_all(&glink->tx_avail_notify);
spin_lock_irqsave(&glink->idr_lock, flags); spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->lcids, lcid); channel = idr_find(&glink->lcids, lcid);
if (WARN(!channel, "close ack on unknown channel\n")) { if (WARN(!channel, "close ack on unknown channel\n")) {
@@ -1927,22 +1933,22 @@ static void qcom_glink_work(struct work_struct *work)
param2 = le32_to_cpu(msg->param2); param2 = le32_to_cpu(msg->param2);
switch (cmd) { switch (cmd) {
case RPM_CMD_VERSION: case GLINK_CMD_VERSION:
qcom_glink_receive_version(glink, param1, param2); qcom_glink_receive_version(glink, param1, param2);
break; break;
case RPM_CMD_VERSION_ACK: case GLINK_CMD_VERSION_ACK:
qcom_glink_receive_version_ack(glink, param1, param2); qcom_glink_receive_version_ack(glink, param1, param2);
break; break;
case RPM_CMD_OPEN: case GLINK_CMD_OPEN:
qcom_glink_rx_open(glink, param1, msg->data); qcom_glink_rx_open(glink, param1, msg->data);
break; break;
case RPM_CMD_CLOSE: case GLINK_CMD_CLOSE:
qcom_glink_rx_close(glink, param1); qcom_glink_rx_close(glink, param1);
break; break;
case RPM_CMD_CLOSE_ACK: case GLINK_CMD_CLOSE_ACK:
qcom_glink_rx_close_ack(glink, param1); qcom_glink_rx_close_ack(glink, param1);
break; break;
case RPM_CMD_RX_INTENT_REQ: case GLINK_CMD_RX_INTENT_REQ:
qcom_glink_handle_intent_req(glink, param1, param2); qcom_glink_handle_intent_req(glink, param1, param2);
break; break;
default: default:

View File

@@ -917,13 +917,18 @@ void rtc_timer_do_work(struct work_struct *work)
struct timerqueue_node *next; struct timerqueue_node *next;
ktime_t now; ktime_t now;
struct rtc_time tm; struct rtc_time tm;
int err;
struct rtc_device *rtc = struct rtc_device *rtc =
container_of(work, struct rtc_device, irqwork); container_of(work, struct rtc_device, irqwork);
mutex_lock(&rtc->ops_lock); mutex_lock(&rtc->ops_lock);
again: again:
__rtc_read_time(rtc, &tm); err = __rtc_read_time(rtc, &tm);
if (err) {
mutex_unlock(&rtc->ops_lock);
return;
}
now = rtc_tm_to_ktime(tm); now = rtc_tm_to_ktime(tm);
while ((next = timerqueue_getnext(&rtc->timerqueue))) { while ((next = timerqueue_getnext(&rtc->timerqueue))) {
if (next->expires > now) if (next->expires > now)

View File

@@ -1711,9 +1711,8 @@ bfad_init(void)
error = bfad_im_module_init(); error = bfad_im_module_init();
if (error) { if (error) {
error = -ENOMEM;
printk(KERN_WARNING "bfad_im_module_init failure\n"); printk(KERN_WARNING "bfad_im_module_init failure\n");
goto ext; return -ENOMEM;
} }
if (strcmp(FCPI_NAME, " fcpim") == 0) if (strcmp(FCPI_NAME, " fcpim") == 0)

View File

@@ -357,6 +357,7 @@ static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi,
ret = qedi_ops->common->sb_init(qedi->cdev, sb_info, sb_virt, sb_phys, ret = qedi_ops->common->sb_init(qedi->cdev, sb_info, sb_virt, sb_phys,
sb_id, QED_SB_TYPE_STORAGE); sb_id, QED_SB_TYPE_STORAGE);
if (ret) { if (ret) {
dma_free_coherent(&qedi->pdev->dev, sizeof(*sb_virt), sb_virt, sb_phys);
QEDI_ERR(&qedi->dbg_ctx, QEDI_ERR(&qedi->dbg_ctx,
"Status block initialization failed for id = %d.\n", "Status block initialization failed for id = %d.\n",
sb_id); sb_id);

View File

@@ -194,7 +194,6 @@ int __init register_intc_controller(struct intc_desc *desc)
goto err0; goto err0;
INIT_LIST_HEAD(&d->list); INIT_LIST_HEAD(&d->list);
list_add_tail(&d->list, &intc_list);
raw_spin_lock_init(&d->lock); raw_spin_lock_init(&d->lock);
INIT_RADIX_TREE(&d->tree, GFP_ATOMIC); INIT_RADIX_TREE(&d->tree, GFP_ATOMIC);
@@ -380,6 +379,7 @@ int __init register_intc_controller(struct intc_desc *desc)
d->skip_suspend = desc->skip_syscore_suspend; d->skip_suspend = desc->skip_syscore_suspend;
list_add_tail(&d->list, &intc_list);
nr_intc_controllers++; nr_intc_controllers++;
return 0; return 0;

View File

@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/ */
#include <asm/dma-iommu.h> #include <asm/dma-iommu.h>
@@ -477,7 +478,6 @@ static int ngd_check_hw_status(struct msm_slim_ctrl *dev)
static int ngd_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn) static int ngd_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
{ {
DECLARE_COMPLETION_ONSTACK(done);
DECLARE_COMPLETION_ONSTACK(tx_sent); DECLARE_COMPLETION_ONSTACK(tx_sent);
struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl); struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
@@ -491,6 +491,8 @@ static int ngd_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
bool report_sat = false; bool report_sat = false;
bool sync_wr = true; bool sync_wr = true;
reinit_completion(&dev->xfer_done);
if (txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG) if (txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)
return -EPROTONOSUPPORT; return -EPROTONOSUPPORT;
@@ -649,7 +651,9 @@ static int ngd_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
wbuf[i++] = txn->wbuf[0]; wbuf[i++] = txn->wbuf[0];
if (txn->mc != SLIM_USR_MC_DISCONNECT_PORT) if (txn->mc != SLIM_USR_MC_DISCONNECT_PORT)
wbuf[i++] = txn->wbuf[1]; wbuf[i++] = txn->wbuf[1];
ret = ngd_get_tid(ctrl, txn, &wbuf[i++], &done);
txn->comp = &dev->xfer_done;
ret = ngd_get_tid(ctrl, txn, &wbuf[i++], &dev->xfer_done);
if (ret) { if (ret) {
SLIM_ERR(dev, "TID for connect/disconnect fail:%d\n", SLIM_ERR(dev, "TID for connect/disconnect fail:%d\n",
ret); ret);
@@ -2015,6 +2019,7 @@ static int ngd_slim_probe(struct platform_device *pdev)
init_completion(&dev->reconf); init_completion(&dev->reconf);
init_completion(&dev->ctrl_up); init_completion(&dev->ctrl_up);
init_completion(&dev->qmi_up); init_completion(&dev->qmi_up);
init_completion(&dev->xfer_done);
mutex_init(&dev->tx_lock); mutex_init(&dev->tx_lock);
mutex_init(&dev->ssr_lock); mutex_init(&dev->ssr_lock);
spin_lock_init(&dev->tx_buf_lock); spin_lock_init(&dev->tx_buf_lock);

View File

@@ -308,6 +308,8 @@ struct msm_slim_ctrl {
bool chan_active; bool chan_active;
enum msm_ctrl_state state; enum msm_ctrl_state state;
struct completion ctrl_up; struct completion ctrl_up;
struct completion xfer_done;
struct completion sync_done;
int nsats; int nsats;
u32 ver; u32 ver;
struct msm_slim_qmi qmi; struct msm_slim_qmi qmi;

View File

@@ -542,7 +542,8 @@ int geni_se_clk_tbl_get(struct geni_se *se, unsigned long **tbl)
for (i = 0; i < MAX_CLK_PERF_LEVEL; i++) { for (i = 0; i < MAX_CLK_PERF_LEVEL; i++) {
freq = clk_round_rate(se->clk, freq + 1); freq = clk_round_rate(se->clk, freq + 1);
if (freq <= 0 || freq == se->clk_perf_tbl[i - 1]) if (freq <= 0 ||
(i > 0 && freq == se->clk_perf_tbl[i - 1]))
break; break;
se->clk_perf_tbl[i] = freq; se->clk_perf_tbl[i] = freq;
} }

Some files were not shown because too many files have changed in this diff Show More