Merge branch 'lineage-20' of https://github.com/LineageOS/android_kernel_qcom_sm8250 into lineage-22.1

Change-Id: Ib5c234d95e3f41cfa458ac48d666fbab920b0b6e
This commit is contained in:
althafvly
2025-01-04 20:53:35 +05:30
1210 changed files with 210599 additions and 7728 deletions

1
.gitignore vendored
View File

@@ -114,7 +114,6 @@ GTAGS
# id-utils files
ID
*.orig
*~
\#*#

View File

@@ -518,7 +518,7 @@ at module load time (for a module) with::
[dbg_probe=1]
The addresses are normal I2C addresses. The adapter is the string
name of the adapter, as shown in /sys/class/i2c-adapter/i2c-<n>/name.
name of the adapter, as shown in /sys/bus/i2c/devices/i2c-<n>/name.
It is *NOT* i2c-<n> itself. Also, the comparison is done ignoring
spaces, so if the name is "This is an I2C chip" you can say
adapter_name=ThisisanI2cchip. This is because it's hard to pass in

View File

@@ -61,8 +61,28 @@ stable kernels.
| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
| ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
| ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 |
| ARM | Cortex-A76 | #3324349 | ARM64_ERRATUM_3194386 |
| ARM | Cortex-A77 | #3324348 | ARM64_ERRATUM_3194386 |
| ARM | Cortex-A78 | #3324344 | ARM64_ERRATUM_3194386 |
| ARM | Cortex-A78C | #3324346,3324347| ARM64_ERRATUM_3194386 |
| ARM | Cortex-A710 | #3324338 | ARM64_ERRATUM_3194386 |
| ARM | Cortex-A715 | #3456084 | ARM64_ERRATUM_3194386 |
| ARM | Cortex-A720 | #3456091 | ARM64_ERRATUM_3194386 |
| ARM | Cortex-A725 | #3456106 | ARM64_ERRATUM_3194386 |
| ARM | Cortex-X1 | #3324344 | ARM64_ERRATUM_3194386 |
| ARM | Cortex-X1C | #3324346 | ARM64_ERRATUM_3194386 |
| ARM | Cortex-X2 | #3324338 | ARM64_ERRATUM_3194386 |
| ARM | Cortex-X3 | #3324335 | ARM64_ERRATUM_3194386 |
| ARM | Cortex-X4 | #3194386 | ARM64_ERRATUM_3194386 |
| ARM | Cortex-X925 | #3324334 | ARM64_ERRATUM_3194386 |
| ARM | Cortex-A77 | #1542418 | ARM64_ERRATUM_1542418 |
| ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 |
| ARM | Neoverse-N1 | #3324349 | ARM64_ERRATUM_3194386 |
| ARM | Neoverse-N2 | #3324339 | ARM64_ERRATUM_3194386 |
| ARM | Neoverse-N3 | #3456111 | ARM64_ERRATUM_3194386 |
| ARM | Neoverse-V1 | #3324341 | ARM64_ERRATUM_3194386 |
| ARM | Neoverse-V2 | #3324336 | ARM64_ERRATUM_3194386 |
| ARM | Neoverse-V3 | #3312417 | ARM64_ERRATUM_3194386 |
| ARM | MMU-500 | #841119,#826419 | N/A |
| | | | |
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |

View File

@@ -235,6 +235,7 @@ certainly invest a bit more effort into libata core layer).
CLOCK
devm_clk_get()
devm_clk_get_optional()
devm_clk_put()
devm_clk_hw_register()
devm_of_clk_add_hw_provider()

View File

@@ -299,17 +299,25 @@ functions is used.
The header file linux/hwmon-sysfs.h provides a number of useful macros to
declare and use hardware monitoring sysfs attributes.
In many cases, you can use the exsting define DEVICE_ATTR to declare such
attributes. This is feasible if an attribute has no additional context. However,
in many cases there will be additional information such as a sensor index which
will need to be passed to the sysfs attribute handling function.
In many cases, you can use the exsting define DEVICE_ATTR or its variants
DEVICE_ATTR_{RW,RO,WO} to declare such attributes. This is feasible if an
attribute has no additional context. However, in many cases there will be
additional information such as a sensor index which will need to be passed
to the sysfs attribute handling function.
SENSOR_DEVICE_ATTR and SENSOR_DEVICE_ATTR_2 can be used to define attributes
which need such additional context information. SENSOR_DEVICE_ATTR requires
one additional argument, SENSOR_DEVICE_ATTR_2 requires two.
SENSOR_DEVICE_ATTR defines a struct sensor_device_attribute variable.
This structure has the following fields.
Simplified variants of SENSOR_DEVICE_ATTR and SENSOR_DEVICE_ATTR_2 are available
and should be used if standard attribute permissions and function names are
feasible. Standard permissions are 0644 for SENSOR_DEVICE_ATTR[_2]_RW,
0444 for SENSOR_DEVICE_ATTR[_2]_RO, and 0200 for SENSOR_DEVICE_ATTR[_2]_WO.
Standard functions, similar to DEVICE_ATTR_{RW,RO,WO}, have _show and _store
appended to the provided function name.
SENSOR_DEVICE_ATTR and its variants define a struct sensor_device_attribute
variable. This structure has the following fields.
struct sensor_device_attribute {
struct device_attribute dev_attr;
@@ -320,8 +328,8 @@ You can use to_sensor_dev_attr to get the pointer to this structure from the
attribute read or write function. Its parameter is the device to which the
attribute is attached.
SENSOR_DEVICE_ATTR_2 defines a struct sensor_device_attribute_2 variable,
which is defined as follows.
SENSOR_DEVICE_ATTR_2 and its variants define a struct sensor_device_attribute_2
variable, which is defined as follows.
struct sensor_device_attribute_2 {
struct device_attribute dev_attr;

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 318
SUBLEVEL = 324
EXTRAVERSION =
NAME = "People's Front"

View File

@@ -300,8 +300,8 @@
&i2c2 {
status = "okay";
rt5616: rt5616@1b {
compatible = "rt5616";
rt5616: audio-codec@1b {
compatible = "realtek,rt5616";
reg = <0x1b>;
clocks = <&cru SCLK_I2S_OUT>;
clock-names = "mclk";

View File

@@ -316,12 +316,13 @@
};
};
acodec: acodec-ana@20030000 {
compatible = "rk3036-codec";
acodec: audio-codec@20030000 {
compatible = "rockchip,rk3036-codec";
reg = <0x20030000 0x4000>;
rockchip,grf = <&grf>;
clock-names = "acodec_pclk";
clocks = <&cru PCLK_ACODEC>;
rockchip,grf = <&grf>;
#sound-dai-cells = <0>;
status = "disabled";
};
@@ -331,7 +332,6 @@
interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru PCLK_HDMI>;
clock-names = "pclk";
rockchip,grf = <&grf>;
pinctrl-names = "default";
pinctrl-0 = <&hdmi_ctl>;
status = "disabled";

View File

@@ -43,6 +43,8 @@ CONFIG_BPF_SYSCALL=y
CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB_FREELIST_RANDOM=y
CONFIG_SLAB_FREELIST_HARDENED=y
CONFIG_PROFILING=y
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_QM215=y

View File

@@ -42,6 +42,8 @@ CONFIG_BPF_SYSCALL=y
CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB_FREELIST_RANDOM=y
CONFIG_SLAB_FREELIST_HARDENED=y
CONFIG_PROFILING=y
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_QM215=y

View File

@@ -42,6 +42,8 @@ CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB_FREELIST_RANDOM=y
CONFIG_SLAB_FREELIST_HARDENED=y
CONFIG_PROFILING=y
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_QM215=y
@@ -715,7 +717,7 @@ CONFIG_PREEMPTIRQ_EVENTS=y
CONFIG_IRQSOFF_TRACER=y
CONFIG_PREEMPT_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_LKDTM=y
CONFIG_LKDTM=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_MEMTEST=y
CONFIG_BUG_ON_DATA_CORRUPTION=y

View File

@@ -44,6 +44,8 @@ CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB_FREELIST_RANDOM=y
CONFIG_SLAB_FREELIST_HARDENED=y
CONFIG_PROFILING=y
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_QM215=y
@@ -725,7 +727,7 @@ CONFIG_PREEMPTIRQ_EVENTS=y
CONFIG_IRQSOFF_TRACER=y
CONFIG_PREEMPT_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_LKDTM=y
CONFIG_LKDTM=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_MEMTEST=y
CONFIG_BUG_ON_DATA_CORRUPTION=y

View File

@@ -145,16 +145,6 @@ extern int __get_user_64t_1(void *);
extern int __get_user_64t_2(void *);
extern int __get_user_64t_4(void *);
#define __GUP_CLOBBER_1 "lr", "cc"
#ifdef CONFIG_CPU_USE_DOMAINS
#define __GUP_CLOBBER_2 "ip", "lr", "cc"
#else
#define __GUP_CLOBBER_2 "lr", "cc"
#endif
#define __GUP_CLOBBER_4 "lr", "cc"
#define __GUP_CLOBBER_32t_8 "lr", "cc"
#define __GUP_CLOBBER_8 "lr", "cc"
#define __get_user_x(__r2, __p, __e, __l, __s) \
__asm__ __volatile__ ( \
__asmeq("%0", "r0") __asmeq("%1", "r2") \
@@ -162,7 +152,7 @@ extern int __get_user_64t_4(void *);
"bl __get_user_" #__s \
: "=&r" (__e), "=r" (__r2) \
: "0" (__p), "r" (__l) \
: __GUP_CLOBBER_##__s)
: "ip", "lr", "cc")
/* narrowing a double-word get into a single 32bit word register: */
#ifdef __ARMEB__
@@ -184,7 +174,7 @@ extern int __get_user_64t_4(void *);
"bl __get_user_64t_" #__s \
: "=&r" (__e), "=r" (__r2) \
: "0" (__p), "r" (__l) \
: __GUP_CLOBBER_##__s)
: "ip", "lr", "cc")
#else
#define __get_user_x_64t __get_user_x
#endif

View File

@@ -70,6 +70,7 @@ static void __init realview_smp_prepare_cpus(unsigned int max_cpus)
return;
}
map = syscon_node_to_regmap(np);
of_node_put(np);
if (IS_ERR(map)) {
pr_err("PLATSMP: No syscon regmap\n");
return;

View File

@@ -579,6 +579,46 @@ config ARM64_ERRATUM_1742098
If unsure, say Y.
config ARM64_ERRATUM_3194386
bool "Cortex-*/Neoverse-*: workaround for MSR SSBS not self-synchronizing"
default y
help
This option adds the workaround for the following errata:
* ARM Cortex-A76 erratum 3324349
* ARM Cortex-A77 erratum 3324348
* ARM Cortex-A78 erratum 3324344
* ARM Cortex-A78C erratum 3324346
* ARM Cortex-A78C erratum 3324347
* ARM Cortex-A710 erratam 3324338
* ARM Cortex-A715 errartum 3456084
* ARM Cortex-A720 erratum 3456091
* ARM Cortex-A725 erratum 3456106
* ARM Cortex-X1 erratum 3324344
* ARM Cortex-X1C erratum 3324346
* ARM Cortex-X2 erratum 3324338
* ARM Cortex-X3 erratum 3324335
* ARM Cortex-X4 erratum 3194386
* ARM Cortex-X925 erratum 3324334
* ARM Neoverse-N1 erratum 3324349
* ARM Neoverse N2 erratum 3324339
* ARM Neoverse-N3 erratum 3456111
* ARM Neoverse-V1 erratum 3324341
* ARM Neoverse V2 erratum 3324336
* ARM Neoverse-V3 erratum 3312417
On affected cores "MSR SSBS, #0" instructions may not affect
subsequent speculative instructions, which may permit unexepected
speculative store bypassing.
Work around this problem by placing a Speculation Barrier (SB) or
Instruction Synchronization Barrier (ISB) after kernel changes to
SSBS. The presence of the SSBS special-purpose register is hidden
from hwcaps and EL0 reads of ID_AA64PFR1_EL1, such that userspace
will use the PR_SPEC_STORE_BYPASS prctl to change SSBS.
If unsure, say Y.
config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y
@@ -1157,7 +1197,8 @@ config ARM64_TAGGED_ADDR_ABI
config COMPAT_VDSO
bool "Enable vDSO for 32-bit applications"
depends on !CPU_BIG_ENDIAN && "$(CROSS_COMPILE_COMPAT)" != ""
depends on !CPU_BIG_ENDIAN
depends on (CC_IS_CLANG && LD_IS_LLD) || "$(CROSS_COMPILE_COMPAT)" != ""
select GENERIC_COMPAT_VDSO
default y
help

View File

@@ -649,8 +649,8 @@
<0>, <24000000>,
<24000000>, <24000000>,
<15000000>, <15000000>,
<100000000>, <100000000>,
<100000000>, <100000000>,
<300000000>, <100000000>,
<400000000>, <100000000>,
<50000000>, <100000000>,
<100000000>, <100000000>,
<50000000>, <50000000>,

View File

@@ -147,6 +147,22 @@
status = "okay";
};
&gpio3 {
/*
* The Qseven BIOS_DISABLE signal on the RK3399-Q7 keeps the on-module
* eMMC and SPI flash powered-down initially (in fact it keeps the
* reset signal asserted). BIOS_DISABLE_OVERRIDE pin allows to override
* that signal so that eMMC and SPI can be used regardless of the state
* of the signal.
*/
bios-disable-override-hog {
gpios = <RK_PD5 GPIO_ACTIVE_LOW>;
gpio-hog;
line-name = "bios_disable_override";
output-high;
};
};
&gmac {
assigned-clocks = <&cru SCLK_RMII_SRC>;
assigned-clock-parents = <&clkin_gmac>;
@@ -433,9 +449,14 @@
&pinctrl {
pinctrl-names = "default";
pinctrl-0 = <&q7_thermal_pin>;
pinctrl-0 = <&q7_thermal_pin &bios_disable_override_hog_pin>;
gpios {
bios_disable_override_hog_pin: bios-disable-override-hog-pin {
rockchip,pins =
<3 RK_PD5 RK_FUNC_GPIO &pcfg_pull_down>;
};
q7_thermal_pin: q7-thermal-pin {
rockchip,pins =
<0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;

View File

@@ -123,7 +123,7 @@
status = "okay";
rt5651: rt5651@1a {
compatible = "rockchip,rt5651";
compatible = "realtek,rt5651";
reg = <0x1a>;
clocks = <&cru SCLK_I2S_8CH_OUT>;
clock-names = "mclk";

View File

@@ -717,7 +717,7 @@ CONFIG_PREEMPTIRQ_EVENTS=y
CONFIG_IRQSOFF_TRACER=y
CONFIG_PREEMPT_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_LKDTM=y
CONFIG_LKDTM=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_MEMTEST=y
CONFIG_BUG_ON_DATA_CORRUPTION=y

View File

@@ -147,6 +147,19 @@
hint #22
.endm
/*
* Speculation barrier
*/
.macro sb
alternative_if_not ARM64_HAS_SB
dsb nsh
isb
alternative_else
SB_BARRIER_INSN
nop
alternative_endif
.endm
/*
* Sanitise a 64-bit bounded index wrt speculation, returning zero if out
* of bounds.

View File

@@ -34,6 +34,10 @@
#define psb_csync() asm volatile("hint #17" : : : "memory")
#define csdb() asm volatile("hint #20" : : : "memory")
#define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \
SB_BARRIER_INSN"nop\n", \
ARM64_HAS_SB))
#define mb() dsb(sy)
#define rmb() dsb(ld)
#define wmb() dsb(st)

View File

@@ -58,6 +58,8 @@
#define ARM64_WORKAROUND_1542419 37
#define ARM64_SPECTRE_BHB 38
#define ARM64_WORKAROUND_1742098 39
#define ARM64_HAS_SB 40
#define ARM64_WORKAROUND_SPECULATIVE_SSBS 41
/* kabi: reserve 40 - 62 for future cpu capabilities */
#define ARM64_NCAPS 62

View File

@@ -89,11 +89,11 @@
#define ARM_CPU_PART_CORTEX_A78 0xD41
#define ARM_CPU_PART_CORTEX_X1 0xD44
#define ARM_CPU_PART_CORTEX_A710 0xD47
#define ARM_CPU_PART_CORTEX_A715 0xD4D
#define ARM_CPU_PART_CORTEX_X2 0xD48
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
#define ARM_CPU_PART_CORTEX_A78C 0xD4B
#define ARM_CPU_PART_NEOVERSE_N1 0xD0C
#define ARM_CPU_PART_CORTEX_A77 0xD0D
#define ARM_CPU_PART_NEOVERSE_V1 0xD40
#define ARM_CPU_PART_CORTEX_A78 0xD41
#define ARM_CPU_PART_CORTEX_X1 0xD44
@@ -101,6 +101,15 @@
#define ARM_CPU_PART_CORTEX_X2 0xD48
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
#define ARM_CPU_PART_CORTEX_A78C 0xD4B
#define ARM_CPU_PART_CORTEX_X1C 0xD4C
#define ARM_CPU_PART_CORTEX_X3 0xD4E
#define ARM_CPU_PART_NEOVERSE_V2 0xD4F
#define ARM_CPU_PART_CORTEX_A720 0xD81
#define ARM_CPU_PART_CORTEX_X4 0xD82
#define ARM_CPU_PART_NEOVERSE_V3 0xD84
#define ARM_CPU_PART_CORTEX_X925 0xD85
#define ARM_CPU_PART_CORTEX_A725 0xD87
#define ARM_CPU_PART_NEOVERSE_N3 0xD8E
#define APM_CPU_PART_POTENZA 0x000
@@ -135,11 +144,11 @@
#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
#define MIDR_CORTEX_A715 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A715)
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
#define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
@@ -147,6 +156,15 @@
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
#define MIDR_CORTEX_X1C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1C)
#define MIDR_CORTEX_X3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X3)
#define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2)
#define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720)
#define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4)
#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
#define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
#define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725)
#define MIDR_NEOVERSE_N3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N3)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)

View File

@@ -107,6 +107,11 @@
#define SET_PSTATE_UAO(x) __emit_inst(0xd500401f | PSTATE_UAO | ((!!x) << PSTATE_Imm_shift))
#define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift))
#define __SYS_BARRIER_INSN(CRm, op2, Rt) \
__emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f))
#define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31)
#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
#define SYS_DC_IGSW sys_insn(1, 0, 7, 6, 4)
#define SYS_DC_IGDSW sys_insn(1, 0, 7, 6, 6)
@@ -538,6 +543,7 @@
#define ID_AA64ISAR0_AES_SHIFT 4
/* id_aa64isar1 */
#define ID_AA64ISAR1_SB_SHIFT 36
#define ID_AA64ISAR1_LRCPC_SHIFT 20
#define ID_AA64ISAR1_FCMA_SHIFT 16
#define ID_AA64ISAR1_JSCVT_SHIFT 12

View File

@@ -46,8 +46,7 @@ static inline void set_fs(mm_segment_t fs)
* Prevent a mispredicted conditional call to set_fs from forwarding
* the wrong address limit to access_ok under speculation.
*/
dsb(nsh);
isb();
spec_bar();
/* On user-mode return, check fs is correct */
set_thread_flag(TIF_FSCHECK);

View File

@@ -13,21 +13,19 @@
#include <asm/insn.h>
#include <asm/probes.h>
#define MAX_UINSN_BYTES AARCH64_INSN_SIZE
#define UPROBE_SWBP_INSN BRK64_OPCODE_UPROBES
#define UPROBE_SWBP_INSN cpu_to_le32(BRK64_OPCODE_UPROBES)
#define UPROBE_SWBP_INSN_SIZE AARCH64_INSN_SIZE
#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
#define UPROBE_XOL_SLOT_BYTES AARCH64_INSN_SIZE
typedef u32 uprobe_opcode_t;
typedef __le32 uprobe_opcode_t;
struct arch_uprobe_task {
};
struct arch_uprobe {
union {
u8 insn[MAX_UINSN_BYTES];
u8 ixol[MAX_UINSN_BYTES];
__le32 insn;
__le32 ixol;
};
struct arch_probe_insn api;
bool simulate;

View File

@@ -49,5 +49,6 @@
#define HWCAP_ILRCPC (1 << 26)
#define HWCAP_FLAGM (1 << 27)
#define HWCAP_SSBS (1 << 28)
#define HWCAP_SB (1 << 29)
#endif /* _UAPI__ASM_HWCAP_H */

View File

@@ -28,7 +28,7 @@
#include <asm/numa.h>
static int acpi_early_node_map[NR_CPUS] __initdata = { NUMA_NO_NODE };
static int acpi_early_node_map[NR_CPUS] __initdata = { [0 ... NR_CPUS - 1] = NUMA_NO_NODE };
int __init acpi_numa_get_nid(unsigned int cpu)
{

View File

@@ -345,6 +345,19 @@ void arm64_set_ssbd_mitigation(bool state)
asm volatile(SET_PSTATE_SSBS(0));
else
asm volatile(SET_PSTATE_SSBS(1));
/*
* SSBS is self-synchronizing and is intended to affect
* subsequent speculative instructions, but some CPUs can
* speculate with a stale value of SSBS.
*
* Mitigate this with an unconditional speculation barrier, as
* CPUs could mis-speculate branches and bypass a conditional
* barrier.
*/
if (IS_ENABLED(CONFIG_ARM64_ERRATUM_3194386))
spec_bar();
return;
}
@@ -743,6 +756,32 @@ static struct midr_range broken_aarch32_aes[] = {
};
#endif
#ifdef CONFIG_ARM64_ERRATUM_3194386
static const struct midr_range erratum_spec_ssbs_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A725),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X4),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X925),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N3),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
{}
};
#endif
const struct arm64_cpu_capabilities arm64_errata[] = {
#if defined(CONFIG_ARM64_ERRATUM_826319) || \
defined(CONFIG_ARM64_ERRATUM_827319) || \
@@ -964,6 +1003,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_3194386
{
.desc = "SSBS not fully self-synchronizing",
.capability = ARM64_WORKAROUND_SPECULATIVE_SSBS,
ERRATA_MIDR_RANGE_LIST(erratum_spec_ssbs_list),
},
#endif
{
}

View File

@@ -144,6 +144,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
@@ -273,6 +274,30 @@ static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_mvfr0[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPROUND_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPSHVEC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPSQRT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPDIVIDE_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPTRAP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPDP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPSP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_SIMD_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_mvfr1[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDFMAC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_FPHP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDHP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDSP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDINT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDLS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_FPDNAN_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_FPFTZ_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_mvfr2[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* FPMisc */
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* SIMDMisc */
@@ -288,10 +313,10 @@ static const struct arm64_ftr_bits ftr_dczid[] = {
static const struct arm64_ftr_bits ftr_id_isar5[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -331,7 +356,7 @@ static const struct arm64_ftr_bits ftr_zcr[] = {
* Common ftr bits for a 32bit register with all hidden, strict
* attributes, with 4bit feature fields and a default safe value of
* 0. Covers the following 32bit registers:
* id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
* id_isar[1-3], id_mmfr[1-3]
*/
static const struct arm64_ftr_bits ftr_generic_32bits[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
@@ -386,8 +411,8 @@ static const struct __ftr_reg_entry {
ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
/* Op1 = 0, CRn = 0, CRm = 3 */
ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_mvfr0),
ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_mvfr1),
ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
/* Op1 = 0, CRn = 0, CRm = 4 */
@@ -826,17 +851,39 @@ feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
return val >= entry->min_field_value;
}
static u64
read_scoped_sysreg(const struct arm64_cpu_capabilities *entry, int scope)
{
WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
if (scope == SCOPE_SYSTEM)
return read_sanitised_ftr_reg(entry->sys_reg);
else
return __read_sysreg_by_encoding(entry->sys_reg);
}
static bool
has_user_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
{
int mask;
struct arm64_ftr_reg *regp;
u64 val = read_scoped_sysreg(entry, scope);
regp = get_arm64_ftr_reg(entry->sys_reg);
if (!regp)
return false;
mask = cpuid_feature_extract_unsigned_field(regp->user_mask,
entry->field_pos);
if (!mask)
return false;
return feature_matches(val, entry);
}
static bool
has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
{
u64 val;
WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
if (scope == SCOPE_SYSTEM)
val = read_sanitised_ftr_reg(entry->sys_reg);
else
val = __read_sysreg_by_encoding(entry->sys_reg);
u64 val = read_scoped_sysreg(entry, scope);
return feature_matches(val, entry);
}
@@ -1157,6 +1204,17 @@ static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
}
#endif /* CONFIG_ARM64_SSBD */
static void user_feature_fixup(void)
{
if (cpus_have_cap(ARM64_WORKAROUND_SPECULATIVE_SSBS)) {
struct arm64_ftr_reg *regp;
regp = get_arm64_ftr_reg(SYS_ID_AA64PFR1_EL1);
if (regp)
regp->user_mask &= ~GENMASK(7, 4); /* SSBS */
}
}
static void elf_hwcap_fixup(void)
{
#ifdef CONFIG_ARM64_ERRATUM_1742098
@@ -1363,12 +1421,21 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.cpu_enable = cpu_enable_ssbs,
},
#endif
{
.desc = "Speculation barrier (SB)",
.capability = ARM64_HAS_SB,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64ISAR1_EL1,
.field_pos = ID_AA64ISAR1_SB_SHIFT,
.sign = FTR_UNSIGNED,
.min_field_value = 1,
},
{},
};
#define HWCAP_CPUID_MATCH(reg, field, s, min_value) \
.matches = has_cpuid_feature, \
.matches = has_user_cpuid_feature, \
.sys_reg = reg, \
.field_pos = field, \
.sign = s, \
@@ -1417,6 +1484,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SB),
HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT),
#ifdef CONFIG_ARM64_SVE
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
@@ -1811,6 +1879,7 @@ void __init setup_cpu_features(void)
setup_system_capabilities();
mark_const_caps_ready();
user_feature_fixup();
setup_elf_hwcaps(arm64_elf_hwcaps);
if (system_supports_32bit_el0()) {
@@ -1844,7 +1913,7 @@ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
/*
* We emulate only the following system register space.
* Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7]
* Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 2 - 7]
* See Table C5-6 System instruction encodings for System register accesses,
* ARMv8 ARM(ARM DDI 0487A.f) for more details.
*/
@@ -1854,7 +1923,7 @@ static inline bool __attribute_const__ is_emulated(u32 id)
sys_reg_CRn(id) == 0x0 &&
sys_reg_Op1(id) == 0x0 &&
(sys_reg_CRm(id) == 0 ||
((sys_reg_CRm(id) >= 4) && (sys_reg_CRm(id) <= 7))));
((sys_reg_CRm(id) >= 2) && (sys_reg_CRm(id) <= 7))));
}
/*

View File

@@ -89,6 +89,7 @@ static const char *const hwcap_str[] = {
"ilrcpc",
"flagm",
"ssbs",
"sb",
NULL
};

View File

@@ -104,10 +104,6 @@ arm_probe_decode_insn(probe_opcode_t insn, struct arch_probe_insn *api)
aarch64_insn_is_blr(insn) ||
aarch64_insn_is_ret(insn)) {
api->handler = simulate_br_blr_ret;
} else if (aarch64_insn_is_ldr_lit(insn)) {
api->handler = simulate_ldr_literal;
} else if (aarch64_insn_is_ldrsw_lit(insn)) {
api->handler = simulate_ldrsw_literal;
} else {
/*
* Instruction cannot be stepped out-of-line and we don't
@@ -145,6 +141,17 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
probe_opcode_t insn = le32_to_cpu(*addr);
probe_opcode_t *scan_end = NULL;
unsigned long size = 0, offset = 0;
struct arch_probe_insn *api = &asi->api;
if (aarch64_insn_is_ldr_lit(insn)) {
api->handler = simulate_ldr_literal;
decoded = INSN_GOOD_NO_SLOT;
} else if (aarch64_insn_is_ldrsw_lit(insn)) {
api->handler = simulate_ldrsw_literal;
decoded = INSN_GOOD_NO_SLOT;
} else {
decoded = arm_probe_decode_insn(insn, &asi->api);
}
/*
* If there's a symbol defined in front of and near enough to
@@ -162,7 +169,6 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
else
scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE;
}
decoded = arm_probe_decode_insn(insn, &asi->api);
if (decoded != INSN_REJECTED && scan_end)
if (is_probed_address_atomic(addr - 1, scan_end))

View File

@@ -178,17 +178,15 @@ simulate_tbz_tbnz(u32 opcode, long addr, struct pt_regs *regs)
void __kprobes
simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs)
{
u64 *load_addr;
unsigned long load_addr;
int xn = opcode & 0x1f;
int disp;
disp = ldr_displacement(opcode);
load_addr = (u64 *) (addr + disp);
load_addr = addr + ldr_displacement(opcode);
if (opcode & (1 << 30)) /* x0-x30 */
set_x_reg(regs, xn, *load_addr);
set_x_reg(regs, xn, READ_ONCE(*(u64 *)load_addr));
else /* w0-w30 */
set_w_reg(regs, xn, *load_addr);
set_w_reg(regs, xn, READ_ONCE(*(u32 *)load_addr));
instruction_pointer_set(regs, instruction_pointer(regs) + 4);
}
@@ -196,14 +194,12 @@ simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs)
void __kprobes
simulate_ldrsw_literal(u32 opcode, long addr, struct pt_regs *regs)
{
s32 *load_addr;
unsigned long load_addr;
int xn = opcode & 0x1f;
int disp;
disp = ldr_displacement(opcode);
load_addr = (s32 *) (addr + disp);
load_addr = addr + ldr_displacement(opcode);
set_x_reg(regs, xn, *load_addr);
set_x_reg(regs, xn, READ_ONCE(*(s32 *)load_addr));
instruction_pointer_set(regs, instruction_pointer(regs) + 4);
}

View File

@@ -45,7 +45,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
return -EINVAL;
insn = *(probe_opcode_t *)(&auprobe->insn[0]);
insn = le32_to_cpu(auprobe->insn);
switch (arm_probe_decode_insn(insn, &auprobe->api)) {
case INSN_REJECTED:
@@ -111,7 +111,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
if (!auprobe->simulate)
return false;
insn = *(probe_opcode_t *)(&auprobe->insn[0]);
insn = le32_to_cpu(auprobe->insn);
addr = instruction_pointer(regs);
if (auprobe->api.handler)

View File

@@ -10,18 +10,15 @@ include $(srctree)/lib/vdso/Makefile
# Same as cc-*option, but using CC_COMPAT instead of CC
ifeq ($(CONFIG_CC_IS_CLANG), y)
CC_COMPAT_CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%))
CC_COMPAT ?= $(CC)
CC_COMPAT += $(CC_COMPAT_CLANG_FLAGS)
ifeq ($(LLVM),1)
LD_COMPAT ?= $(LD)
else
LD_COMPAT ?= $(CROSS_COMPILE_COMPAT)ld
endif
CC_COMPAT += --target=arm-linux-gnueabi
else
CC_COMPAT ?= $(CROSS_COMPILE_COMPAT)gcc
endif
ifeq ($(CONFIG_LD_IS_LLD), y)
LD_COMPAT ?= $(LD)
else
LD_COMPAT ?= $(CROSS_COMPILE_COMPAT)ld
endif
@@ -47,10 +44,6 @@ VDSO_CPPFLAGS += $(LINUXINCLUDE)
# Common C and assembly flags
# From top-level Makefile
VDSO_CAFLAGS := $(VDSO_CPPFLAGS)
ifneq ($(shell $(CC_COMPAT) --version 2>&1 | head -n 1 | grep clang),)
VDSO_CAFLAGS += --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%))
endif
VDSO_CAFLAGS += $(call cc32-option,-fno-PIE)
ifdef CONFIG_DEBUG_INFO
VDSO_CAFLAGS += -g

View File

@@ -181,6 +181,15 @@ int __init amiga_parse_bootinfo(const struct bi_record *record)
dev->slotsize = be16_to_cpu(cd->cd_SlotSize);
dev->boardaddr = be32_to_cpu(cd->cd_BoardAddr);
dev->boardsize = be32_to_cpu(cd->cd_BoardSize);
/* CS-LAB Warp 1260 workaround */
if (be16_to_cpu(dev->rom.er_Manufacturer) == ZORRO_MANUF(ZORRO_PROD_CSLAB_WARP_1260) &&
dev->rom.er_Product == ZORRO_PROD(ZORRO_PROD_CSLAB_WARP_1260)) {
/* turn off all interrupts */
pr_info("Warp 1260 card detected: applying interrupt storm workaround\n");
*(uint32_t *)(dev->boardaddr + 0x1000) = 0xfff;
}
} else
pr_warn("amiga_parse_bootinfo: too many AutoConfig devices\n");
#endif /* CONFIG_ZORRO */

View File

@@ -302,11 +302,7 @@ void __init atari_init_IRQ(void)
if (ATARIHW_PRESENT(SCU)) {
/* init the SCU if present */
tt_scu.sys_mask = 0x10; /* enable VBL (for the cursor) and
* disable HSYNC interrupts (who
* needs them?) MFP and SCC are
* enabled in VME mask
*/
tt_scu.sys_mask = 0x0; /* disable all interrupts */
tt_scu.vme_mask = 0x60; /* enable MFP and SCC ints */
} else {
/* If no SCU and no Hades, the HSYNC interrupt needs to be

View File

@@ -33,7 +33,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
x = tmp;
break;
default:
tmp = __invalid_xchg_size(x, ptr, size);
x = __invalid_xchg_size(x, ptr, size);
break;
}

View File

@@ -289,11 +289,6 @@ asmlinkage void __init mmu_init(void)
{
unsigned int kstart, ksize;
if (!memblock.reserved.cnt) {
pr_emerg("Error memory count\n");
machine_restart(NULL);
}
if ((u32) memblock.memory.regions[0].size < 0x400000) {
pr_emerg("Memory must be greater than 4MB\n");
machine_restart(NULL);

View File

@@ -232,6 +232,10 @@ GCR_ACCESSOR_RO(32, 0x0d0, gic_status)
GCR_ACCESSOR_RO(32, 0x0f0, cpc_status)
#define CM_GCR_CPC_STATUS_EX BIT(0)
/* GCR_ACCESS - Controls core/IOCU access to GCRs */
GCR_ACCESSOR_RW(32, 0x120, access_cm3)
#define CM_GCR_ACCESS_ACCESSEN GENMASK(7, 0)
/* GCR_L2_CONFIG - Indicates L2 cache configuration when Config5.L2C=1 */
GCR_ACCESSOR_RW(32, 0x130, l2_config)
#define CM_GCR_L2_CONFIG_BYPASS BIT(20)

View File

@@ -233,7 +233,10 @@ static void boot_core(unsigned int core, unsigned int vpe_id)
write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB);
/* Ensure the core can access the GCRs */
if (mips_cm_revision() < CM_REV_CM3)
set_gcr_access(1 << core);
else
set_gcr_access_cm3(1 << core);
if (mips_cpc_present()) {
/* Reset the core */

0
arch/mips/pci/pcie-octeon.c Executable file → Normal file
View File

View File

@@ -287,6 +287,9 @@ void calibrate_delay(void)
void __init setup_arch(char **cmdline_p)
{
/* setup memblock allocator */
setup_memory();
unflatten_and_copy_device_tree();
setup_cpuinfo();
@@ -311,9 +314,6 @@ void __init setup_arch(char **cmdline_p)
initrd_below_start_ok = 1;
#endif
/* setup memblock allocator */
setup_memory();
/* paging_init() sets up the MMU and marks all pages as reserved */
paging_init();

View File

@@ -1089,8 +1089,7 @@ ENTRY_CFI(intr_save) /* for os_hpmc */
STREG %r16, PT_ISR(%r29)
STREG %r17, PT_IOR(%r29)
#if 0 && defined(CONFIG_64BIT)
/* Revisit when we have 64-bit code above 4Gb */
#if defined(CONFIG_64BIT)
b,n intr_save2
skip_save_ior:
@@ -1098,8 +1097,7 @@ skip_save_ior:
* need to adjust iasq/iaoq here in the same way we adjusted isr/ior
* above.
*/
extrd,u,* %r8,PSW_W_BIT,1,%r1
cmpib,COND(=),n 1,%r1,intr_save2
bb,COND(>=),n %r8,PSW_W_BIT,intr_save2
LDREG PT_IASQ0(%r29), %r16
LDREG PT_IAOQ0(%r29), %r17
/* adjust iasq/iaoq */

View File

@@ -217,10 +217,10 @@ linux_gateway_entry:
#ifdef CONFIG_64BIT
ldil L%sys_call_table, %r1
or,= %r2,%r2,%r2
addil L%(sys_call_table64-sys_call_table), %r1
or,ev %r2,%r2,%r2
ldil L%sys_call_table64, %r1
ldo R%sys_call_table(%r1), %r19
or,= %r2,%r2,%r2
or,ev %r2,%r2,%r2
ldo R%sys_call_table64(%r1), %r19
#else
load32 sys_call_table, %r19
@@ -355,10 +355,10 @@ tracesys_next:
extrd,u %r19,63,1,%r2 /* W hidden in bottom bit */
ldil L%sys_call_table, %r1
or,= %r2,%r2,%r2
addil L%(sys_call_table64-sys_call_table), %r1
or,ev %r2,%r2,%r2
ldil L%sys_call_table64, %r1
ldo R%sys_call_table(%r1), %r19
or,= %r2,%r2,%r2
or,ev %r2,%r2,%r2
ldo R%sys_call_table64(%r1), %r19
#else
load32 sys_call_table, %r19
@@ -930,6 +930,8 @@ ENTRY(sys_call_table)
END(sys_call_table)
#ifdef CONFIG_64BIT
#undef __SYSCALL_WITH_COMPAT
#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
.align 8
ENTRY(sys_call_table64)
#define SYSCALL_TABLE_64BIT

View File

@@ -114,8 +114,11 @@ static void *simple_realloc(void *ptr, unsigned long size)
return ptr;
new = simple_malloc(size);
if (new) {
memcpy(new, ptr, p->size);
simple_free(ptr);
}
return new;
}

View File

@@ -289,6 +289,7 @@ int __init opal_event_init(void)
name, NULL);
if (rc) {
pr_warn("Error %d requesting OPAL irq %d\n", rc, (int)r->start);
kfree(name);
continue;
}
}

View File

@@ -240,6 +240,8 @@ static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr,
rname = kasprintf(GFP_KERNEL, "CPU %d [0x%x] Interrupt Presentation",
cpu, hw_id);
if (!rname)
return -ENOMEM;
if (!request_mem_region(addr, size, rname)) {
pr_warn("icp_native: Could not reserve ICP MMIO for CPU %d, interrupt server #0x%x\n",
cpu, hw_id);

View File

@@ -133,32 +133,21 @@ int print_insn_powerpc (unsigned long insn, unsigned long memaddr)
bool insn_is_short;
ppc_cpu_t dialect;
dialect = PPC_OPCODE_PPC | PPC_OPCODE_COMMON
| PPC_OPCODE_64 | PPC_OPCODE_POWER4 | PPC_OPCODE_ALTIVEC;
dialect = PPC_OPCODE_PPC | PPC_OPCODE_COMMON;
if (cpu_has_feature(CPU_FTRS_POWER5))
dialect |= PPC_OPCODE_POWER5;
if (IS_ENABLED(CONFIG_PPC64))
dialect |= PPC_OPCODE_64 | PPC_OPCODE_POWER4 | PPC_OPCODE_CELL |
PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7 | PPC_OPCODE_POWER8 |
PPC_OPCODE_POWER9;
if (cpu_has_feature(CPU_FTRS_CELL))
dialect |= (PPC_OPCODE_CELL | PPC_OPCODE_ALTIVEC);
if (cpu_has_feature(CPU_FTR_TM))
dialect |= PPC_OPCODE_HTM;
if (cpu_has_feature(CPU_FTRS_POWER6))
dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_ALTIVEC);
if (cpu_has_feature(CPU_FTR_ALTIVEC))
dialect |= PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2;
if (cpu_has_feature(CPU_FTRS_POWER7))
dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7
| PPC_OPCODE_ALTIVEC | PPC_OPCODE_VSX);
if (cpu_has_feature(CPU_FTRS_POWER8))
dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7
| PPC_OPCODE_POWER8 | PPC_OPCODE_HTM
| PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2 | PPC_OPCODE_VSX);
if (cpu_has_feature(CPU_FTRS_POWER9))
dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7
| PPC_OPCODE_POWER8 | PPC_OPCODE_POWER9 | PPC_OPCODE_HTM
| PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2
| PPC_OPCODE_VSX | PPC_OPCODE_VSX3);
if (cpu_has_feature(CPU_FTR_VSX))
dialect |= PPC_OPCODE_VSX | PPC_OPCODE_VSX3;
/* Get the major opcode of the insn. */
opcode = NULL;

View File

@@ -84,6 +84,11 @@ config GENERIC_CSUM
config GENERIC_HWEIGHT
def_bool y
config ILLEGAL_POINTER_VALUE
hex
default 0 if 32BIT
default 0xdead000000000000 if 64BIT
config PGTABLE_LEVELS
int
default 3 if 64BIT

View File

@@ -53,9 +53,11 @@ static inline int test_facility(unsigned long nr)
unsigned long facilities_als[] = { FACILITIES_ALS };
if (__builtin_constant_p(nr) && nr < sizeof(facilities_als) * 8) {
if (__test_facility(nr, &facilities_als))
if (__test_facility(nr, &facilities_als)) {
if (!__is_defined(__DECOMPRESSOR))
return 1;
}
}
return __test_facility(nr, &S390_lowcore.stfle_fac_list);
}

View File

@@ -1360,7 +1360,7 @@ static int aux_output_begin(struct perf_output_handle *handle,
unsigned long head, base, offset;
struct hws_trailer_entry *te;
if (WARN_ON_ONCE(handle->head & ~PAGE_MASK))
if (handle->head & ~PAGE_MASK)
return -EINVAL;
aux->head = handle->head >> PAGE_SHIFT;
@@ -1528,7 +1528,7 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
unsigned long num_sdb;
aux = perf_get_aux(handle);
if (WARN_ON_ONCE(!aux))
if (!aux)
return;
/* Inform user space new data arrived */
@@ -1547,7 +1547,7 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
debug_sprintf_event(sfdbg, 1, "AUX buffer used up\n");
break;
}
if (WARN_ON_ONCE(!aux))
if (!aux)
return;
/* Update head and alert_mark to new position */
@@ -1746,12 +1746,8 @@ static void cpumsf_pmu_start(struct perf_event *event, int flags)
{
struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
if (!(event->hw.state & PERF_HES_STOPPED))
return;
if (flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
perf_pmu_disable(event->pmu);
event->hw.state = 0;
cpuhw->lsctl.cs = 1;

View File

@@ -78,7 +78,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
vcpu->stat.diagnose_258++;
if (vcpu->run->s.regs.gprs[rx] & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
rc = read_guest_real(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)

View File

@@ -794,46 +794,102 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
return 1;
}
static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
unsigned long *pages, unsigned long nr_pages,
/**
* guest_range_to_gpas() - Calculate guest physical addresses of page fragments
* covering a logical range
* @vcpu: virtual cpu
* @ga: guest address, start of range
* @ar: access register
* @gpas: output argument, may be NULL
* @len: length of range in bytes
* @asce: address-space-control element to use for translation
* @mode: access mode
*
* Translate a logical range to a series of guest absolute addresses,
* such that the concatenation of page fragments starting at each gpa make up
* the whole range.
* The translation is performed as if done by the cpu for the given @asce, @ar,
* @mode and state of the @vcpu.
* If the translation causes an exception, its program interruption code is
* returned and the &struct kvm_s390_pgm_info pgm member of @vcpu is modified
* such that a subsequent call to kvm_s390_inject_prog_vcpu() will inject
* a correct exception into the guest.
* The resulting gpas are stored into @gpas, unless it is NULL.
*
* Note: All fragments except the first one start at the beginning of a page.
* When deriving the boundaries of a fragment from a gpa, all but the last
* fragment end at the end of the page.
*
* Return:
* * 0 - success
* * <0 - translation could not be performed, for example if guest
* memory could not be accessed
* * >0 - an access exception occurred. In this case the returned value
* is the program interruption code and the contents of pgm may
* be used to inject an exception into the guest.
*/
static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
unsigned long *gpas, unsigned long len,
const union asce asce, enum gacc_mode mode)
{
psw_t *psw = &vcpu->arch.sie_block->gpsw;
unsigned int offset = offset_in_page(ga);
unsigned int fragment_len;
int lap_enabled, rc = 0;
enum prot_type prot;
unsigned long gpa;
lap_enabled = low_address_protection_enabled(vcpu, asce);
while (nr_pages) {
while (min(PAGE_SIZE - offset, len) > 0) {
fragment_len = min(PAGE_SIZE - offset, len);
ga = kvm_s390_logical_to_effective(vcpu, ga);
if (mode == GACC_STORE && lap_enabled && is_low_address(ga))
return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode,
PROT_TYPE_LA);
ga &= PAGE_MASK;
if (psw_bits(*psw).dat) {
rc = guest_translate(vcpu, ga, pages, asce, mode, &prot);
rc = guest_translate(vcpu, ga, &gpa, asce, mode, &prot);
if (rc < 0)
return rc;
} else {
*pages = kvm_s390_real_to_abs(vcpu, ga);
if (kvm_is_error_gpa(vcpu->kvm, *pages))
gpa = kvm_s390_real_to_abs(vcpu, ga);
if (kvm_is_error_gpa(vcpu->kvm, gpa))
rc = PGM_ADDRESSING;
}
if (rc)
return trans_exc(vcpu, rc, ga, ar, mode, prot);
ga += PAGE_SIZE;
pages++;
nr_pages--;
if (gpas)
*gpas++ = gpa;
offset = 0;
ga += fragment_len;
len -= fragment_len;
}
return 0;
}
static int access_guest_page(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa,
void *data, unsigned int len)
{
const unsigned int offset = offset_in_page(gpa);
const gfn_t gfn = gpa_to_gfn(gpa);
int rc;
if (!gfn_to_memslot(kvm, gfn))
return PGM_ADDRESSING;
if (mode == GACC_STORE)
rc = kvm_write_guest_page(kvm, gfn, data, offset, len);
else
rc = kvm_read_guest_page(kvm, gfn, data, offset, len);
return rc;
}
int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
unsigned long len, enum gacc_mode mode)
{
psw_t *psw = &vcpu->arch.sie_block->gpsw;
unsigned long _len, nr_pages, gpa, idx;
unsigned long pages_array[2];
unsigned long *pages;
unsigned long nr_pages, idx;
unsigned long gpa_array[2];
unsigned int fragment_len;
unsigned long *gpas;
int need_ipte_lock;
union asce asce;
int rc;
@@ -845,50 +901,45 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
if (rc)
return rc;
nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
pages = pages_array;
if (nr_pages > ARRAY_SIZE(pages_array))
pages = vmalloc(array_size(nr_pages, sizeof(unsigned long)));
if (!pages)
gpas = gpa_array;
if (nr_pages > ARRAY_SIZE(gpa_array))
gpas = vmalloc(array_size(nr_pages, sizeof(unsigned long)));
if (!gpas)
return -ENOMEM;
need_ipte_lock = psw_bits(*psw).dat && !asce.r;
if (need_ipte_lock)
ipte_lock(vcpu);
rc = guest_page_range(vcpu, ga, ar, pages, nr_pages, asce, mode);
rc = guest_range_to_gpas(vcpu, ga, ar, gpas, len, asce, mode);
for (idx = 0; idx < nr_pages && !rc; idx++) {
gpa = *(pages + idx) + (ga & ~PAGE_MASK);
_len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
if (mode == GACC_STORE)
rc = kvm_write_guest(vcpu->kvm, gpa, data, _len);
else
rc = kvm_read_guest(vcpu->kvm, gpa, data, _len);
len -= _len;
ga += _len;
data += _len;
fragment_len = min(PAGE_SIZE - offset_in_page(gpas[idx]), len);
rc = access_guest_page(vcpu->kvm, mode, gpas[idx], data, fragment_len);
len -= fragment_len;
data += fragment_len;
}
if (need_ipte_lock)
ipte_unlock(vcpu);
if (nr_pages > ARRAY_SIZE(pages_array))
vfree(pages);
if (nr_pages > ARRAY_SIZE(gpa_array))
vfree(gpas);
return rc;
}
int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
void *data, unsigned long len, enum gacc_mode mode)
{
unsigned long _len, gpa;
unsigned int fragment_len;
unsigned long gpa;
int rc = 0;
while (len && !rc) {
gpa = kvm_s390_real_to_abs(vcpu, gra);
_len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
if (mode)
rc = write_guest_abs(vcpu, gpa, data, _len);
else
rc = read_guest_abs(vcpu, gpa, data, _len);
len -= _len;
gra += _len;
data += _len;
fragment_len = min(PAGE_SIZE - offset_in_page(gpa), len);
rc = access_guest_page(vcpu->kvm, mode, gpa, data, fragment_len);
len -= fragment_len;
gra += fragment_len;
data += fragment_len;
}
if (rc > 0)
vcpu->arch.pgm.code = rc;
return rc;
}
@@ -904,8 +955,6 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
unsigned long *gpa, enum gacc_mode mode)
{
psw_t *psw = &vcpu->arch.sie_block->gpsw;
enum prot_type prot;
union asce asce;
int rc;
@@ -913,23 +962,7 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
if (rc)
return rc;
if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) {
if (mode == GACC_STORE)
return trans_exc(vcpu, PGM_PROTECTION, gva, 0,
mode, PROT_TYPE_LA);
}
if (psw_bits(*psw).dat && !asce.r) { /* Use DAT? */
rc = guest_translate(vcpu, gva, gpa, asce, mode, &prot);
if (rc > 0)
return trans_exc(vcpu, rc, gva, 0, mode, prot);
} else {
*gpa = kvm_s390_real_to_abs(vcpu, gva);
if (kvm_is_error_gpa(vcpu->kvm, *gpa))
return trans_exc(vcpu, rc, gva, PGM_ADDRESSING, mode, 0);
}
return rc;
return guest_range_to_gpas(vcpu, gva, ar, gpa, 1, asce, mode);
}
/**
@@ -938,17 +971,14 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
unsigned long length, enum gacc_mode mode)
{
unsigned long gpa;
unsigned long currlen;
union asce asce;
int rc = 0;
rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
if (rc)
return rc;
ipte_lock(vcpu);
while (length > 0 && !rc) {
currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE));
rc = guest_translate_address(vcpu, gva, ar, &gpa, mode);
gva += currlen;
length -= currlen;
}
rc = guest_range_to_gpas(vcpu, gva, ar, NULL, length, asce, mode);
ipte_unlock(vcpu);
return rc;

View File

@@ -344,11 +344,12 @@ int read_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
* @len: number of bytes to copy
*
* Copy @len bytes from @data (kernel space) to @gra (guest real address).
* It is up to the caller to ensure that the entire guest memory range is
* valid memory before calling this function.
* Guest low address and key protection are not checked.
*
* Returns zero on success or -EFAULT on error.
* Returns zero on success, -EFAULT when copying from @data failed, or
* PGM_ADRESSING in case @gra is outside a memslot. In this case, pgm check info
* is also stored to allow injecting into the guest (if applicable) using
* kvm_s390_inject_prog_cond().
*
* If an error occurs data may have been copied partially to guest memory.
*/
@@ -367,11 +368,12 @@ int write_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
* @len: number of bytes to copy
*
* Copy @len bytes from @gra (guest real address) to @data (kernel space).
* It is up to the caller to ensure that the entire guest memory range is
* valid memory before calling this function.
* Guest key protection is not checked.
*
* Returns zero on success or -EFAULT on error.
* Returns zero on success, -EFAULT when copying to @data failed, or
* PGM_ADRESSING in case @gra is outside a memslot. In this case, pgm check info
* is also stored to allow injecting into the guest (if applicable) using
* kvm_s390_inject_prog_cond().
*
* If an error occurs data may have been copied partially to kernel space.
*/

View File

@@ -98,11 +98,12 @@ static long cmm_alloc_pages(long nr, long *counter,
(*counter)++;
spin_unlock(&cmm_lock);
nr--;
cond_resched();
}
return nr;
}
static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
static long __cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
{
struct cmm_page_array *pa;
unsigned long addr;
@@ -126,6 +127,21 @@ static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
return nr;
}
static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
{
long inc = 0;
while (nr) {
inc = min(256L, nr);
nr -= inc;
inc = __cmm_free_pages(inc, counter, list);
if (inc)
break;
cond_resched();
}
return nr + inc;
}
static int cmm_oom_notify(struct notifier_block *self,
unsigned long dummy, void *parm)
{

View File

@@ -247,6 +247,7 @@ void prom_sun4v_guest_soft_state(void);
int prom_ihandle2path(int handle, char *buffer, int bufsize);
/* Client interface level routines. */
void prom_cif_init(void *cif_handler);
void p1275_cmd_direct(unsigned long *);
#endif /* !(__SPARC64_OPLIB_H) */

View File

@@ -26,9 +26,6 @@ phandle prom_chosen_node;
* routines in the prom library.
* It gets passed the pointer to the PROM vector.
*/
extern void prom_cif_init(void *);
void __init prom_init(void *cif_handler)
{
phandle node;

View File

@@ -49,7 +49,7 @@ void p1275_cmd_direct(unsigned long *args)
local_irq_restore(flags);
}
void prom_cif_init(void *cif_handler, void *cif_stack)
void prom_cif_init(void *cif_handler)
{
p1275buf.prom_cif_handler = (void (*)(long *))cif_handler;
}

View File

@@ -391,6 +391,7 @@ int setup_one_line(struct line *lines, int n, char *init,
parse_chan_pair(NULL, line, n, opts, error_out);
err = 0;
}
*error_out = "configured as 'none'";
} else {
char *new = kstrdup(init, GFP_KERNEL);
if (!new) {
@@ -414,6 +415,7 @@ int setup_one_line(struct line *lines, int n, char *init,
}
}
if (err) {
*error_out = "failed to parse channel pair";
line->init_str = NULL;
line->valid = 0;
kfree(new);

View File

@@ -75,7 +75,7 @@ static struct pt_cap_desc {
PT_CAP(psb_periods, 1, CPUID_EBX, 0xffff0000),
};
static u32 pt_cap_get(enum pt_capabilities cap)
u32 intel_pt_validate_hw_cap(enum pt_capabilities cap)
{
struct pt_cap_desc *cd = &pt_caps[cap];
u32 c = pt_pmu.caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg];
@@ -83,6 +83,7 @@ static u32 pt_cap_get(enum pt_capabilities cap)
return (c & cd->mask) >> shift;
}
EXPORT_SYMBOL_GPL(intel_pt_validate_hw_cap);
static ssize_t pt_cap_show(struct device *cdev,
struct device_attribute *attr,
@@ -92,7 +93,7 @@ static ssize_t pt_cap_show(struct device *cdev,
container_of(attr, struct dev_ext_attribute, attr);
enum pt_capabilities cap = (long)ea->var;
return snprintf(buf, PAGE_SIZE, "%x\n", pt_cap_get(cap));
return snprintf(buf, PAGE_SIZE, "%x\n", intel_pt_validate_hw_cap(cap));
}
static struct attribute_group pt_cap_group = {
@@ -310,16 +311,16 @@ static bool pt_event_valid(struct perf_event *event)
return false;
if (config & RTIT_CTL_CYC_PSB) {
if (!pt_cap_get(PT_CAP_psb_cyc))
if (!intel_pt_validate_hw_cap(PT_CAP_psb_cyc))
return false;
allowed = pt_cap_get(PT_CAP_psb_periods);
allowed = intel_pt_validate_hw_cap(PT_CAP_psb_periods);
requested = (config & RTIT_CTL_PSB_FREQ) >>
RTIT_CTL_PSB_FREQ_OFFSET;
if (requested && (!(allowed & BIT(requested))))
return false;
allowed = pt_cap_get(PT_CAP_cycle_thresholds);
allowed = intel_pt_validate_hw_cap(PT_CAP_cycle_thresholds);
requested = (config & RTIT_CTL_CYC_THRESH) >>
RTIT_CTL_CYC_THRESH_OFFSET;
if (requested && (!(allowed & BIT(requested))))
@@ -334,10 +335,10 @@ static bool pt_event_valid(struct perf_event *event)
* Spec says that setting mtc period bits while mtc bit in
* CPUID is 0 will #GP, so better safe than sorry.
*/
if (!pt_cap_get(PT_CAP_mtc))
if (!intel_pt_validate_hw_cap(PT_CAP_mtc))
return false;
allowed = pt_cap_get(PT_CAP_mtc_periods);
allowed = intel_pt_validate_hw_cap(PT_CAP_mtc_periods);
if (!allowed)
return false;
@@ -349,11 +350,11 @@ static bool pt_event_valid(struct perf_event *event)
}
if (config & RTIT_CTL_PWR_EVT_EN &&
!pt_cap_get(PT_CAP_power_event_trace))
!intel_pt_validate_hw_cap(PT_CAP_power_event_trace))
return false;
if (config & RTIT_CTL_PTW) {
if (!pt_cap_get(PT_CAP_ptwrite))
if (!intel_pt_validate_hw_cap(PT_CAP_ptwrite))
return false;
/* FUPonPTW without PTW doesn't make sense */
@@ -545,16 +546,8 @@ static void pt_config_buffer(void *buf, unsigned int topa_idx,
wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg);
}
/*
* Keep ToPA table-related metadata on the same page as the actual table,
* taking up a few words from the top
*/
#define TENTS_PER_PAGE (((PAGE_SIZE - 40) / sizeof(struct topa_entry)) - 1)
/**
* struct topa - page-sized ToPA table with metadata at the top
* @table: actual ToPA table entries, as understood by PT hardware
* struct topa - ToPA metadata
* @list: linkage to struct pt_buffer's list of tables
* @phys: physical address of this page
* @offset: offset of the first entry in this table in the buffer
@@ -562,7 +555,6 @@ static void pt_config_buffer(void *buf, unsigned int topa_idx,
* @last: index of the last initialized entry in this table
*/
struct topa {
struct topa_entry table[TENTS_PER_PAGE];
struct list_head list;
u64 phys;
u64 offset;
@@ -570,8 +562,40 @@ struct topa {
int last;
};
/*
* Keep ToPA table-related metadata on the same page as the actual table,
* taking up a few words from the top
*/
#define TENTS_PER_PAGE \
((PAGE_SIZE - sizeof(struct topa)) / sizeof(struct topa_entry))
/**
* struct topa_page - page-sized ToPA table with metadata at the top
* @table: actual ToPA table entries, as understood by PT hardware
* @topa: metadata
*/
struct topa_page {
struct topa_entry table[TENTS_PER_PAGE];
struct topa topa;
};
static inline struct topa_page *topa_to_page(struct topa *topa)
{
return container_of(topa, struct topa_page, topa);
}
static inline struct topa_page *topa_entry_to_page(struct topa_entry *te)
{
return (struct topa_page *)((unsigned long)te & PAGE_MASK);
}
/* make -1 stand for the last table entry */
#define TOPA_ENTRY(t, i) ((i) == -1 ? &(t)->table[(t)->last] : &(t)->table[(i)])
#define TOPA_ENTRY(t, i) \
((i) == -1 \
? &topa_to_page(t)->table[(t)->last] \
: &topa_to_page(t)->table[(i)])
#define TOPA_ENTRY_SIZE(t, i) (sizes(TOPA_ENTRY((t), (i))->size))
/**
* topa_alloc() - allocate page-sized ToPA table
@@ -583,27 +607,27 @@ struct topa {
static struct topa *topa_alloc(int cpu, gfp_t gfp)
{
int node = cpu_to_node(cpu);
struct topa *topa;
struct topa_page *tp;
struct page *p;
p = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
if (!p)
return NULL;
topa = page_address(p);
topa->last = 0;
topa->phys = page_to_phys(p);
tp = page_address(p);
tp->topa.last = 0;
tp->topa.phys = page_to_phys(p);
/*
* In case of singe-entry ToPA, always put the self-referencing END
* link as the 2nd entry in the table
*/
if (!pt_cap_get(PT_CAP_topa_multiple_entries)) {
TOPA_ENTRY(topa, 1)->base = topa->phys >> TOPA_SHIFT;
TOPA_ENTRY(topa, 1)->end = 1;
if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
TOPA_ENTRY(&tp->topa, 1)->base = tp->topa.phys;
TOPA_ENTRY(&tp->topa, 1)->end = 1;
}
return topa;
return &tp->topa;
}
/**
@@ -638,7 +662,7 @@ static void topa_insert_table(struct pt_buffer *buf, struct topa *topa)
topa->offset = last->offset + last->size;
buf->last = topa;
if (!pt_cap_get(PT_CAP_topa_multiple_entries))
if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
return;
BUG_ON(last->last != TENTS_PER_PAGE - 1);
@@ -654,7 +678,7 @@ static void topa_insert_table(struct pt_buffer *buf, struct topa *topa)
static bool topa_table_full(struct topa *topa)
{
/* single-entry ToPA is a special case */
if (!pt_cap_get(PT_CAP_topa_multiple_entries))
if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
return !!topa->last;
return topa->last == TENTS_PER_PAGE - 1;
@@ -690,7 +714,8 @@ static int topa_insert_pages(struct pt_buffer *buf, gfp_t gfp)
TOPA_ENTRY(topa, -1)->base = page_to_phys(p) >> TOPA_SHIFT;
TOPA_ENTRY(topa, -1)->size = order;
if (!buf->snapshot && !pt_cap_get(PT_CAP_topa_multiple_entries)) {
if (!buf->snapshot &&
!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
TOPA_ENTRY(topa, -1)->intr = 1;
TOPA_ENTRY(topa, -1)->stop = 1;
}
@@ -712,22 +737,23 @@ static void pt_topa_dump(struct pt_buffer *buf)
struct topa *topa;
list_for_each_entry(topa, &buf->tables, list) {
struct topa_page *tp = topa_to_page(topa);
int i;
pr_debug("# table @%p (%016Lx), off %llx size %zx\n", topa->table,
pr_debug("# table @%p (%016Lx), off %llx size %zx\n", tp->table,
topa->phys, topa->offset, topa->size);
for (i = 0; i < TENTS_PER_PAGE; i++) {
pr_debug("# entry @%p (%lx sz %u %c%c%c) raw=%16llx\n",
&topa->table[i],
(unsigned long)topa->table[i].base << TOPA_SHIFT,
sizes(topa->table[i].size),
topa->table[i].end ? 'E' : ' ',
topa->table[i].intr ? 'I' : ' ',
topa->table[i].stop ? 'S' : ' ',
*(u64 *)&topa->table[i]);
if ((pt_cap_get(PT_CAP_topa_multiple_entries) &&
topa->table[i].stop) ||
topa->table[i].end)
&tp->table[i],
(unsigned long)tp->table[i].base << TOPA_SHIFT,
sizes(tp->table[i].size),
tp->table[i].end ? 'E' : ' ',
tp->table[i].intr ? 'I' : ' ',
tp->table[i].stop ? 'S' : ' ',
*(u64 *)&tp->table[i]);
if ((intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) &&
tp->table[i].stop) ||
tp->table[i].end)
break;
}
}
@@ -770,7 +796,7 @@ static void pt_update_head(struct pt *pt)
/* offset of the current output region within this table */
for (topa_idx = 0; topa_idx < buf->cur_idx; topa_idx++)
base += sizes(buf->cur->table[topa_idx].size);
base += TOPA_ENTRY_SIZE(buf->cur, topa_idx);
if (buf->snapshot) {
local_set(&buf->data_size, base);
@@ -790,7 +816,7 @@ static void pt_update_head(struct pt *pt)
*/
static void *pt_buffer_region(struct pt_buffer *buf)
{
return phys_to_virt(buf->cur->table[buf->cur_idx].base << TOPA_SHIFT);
return phys_to_virt((phys_addr_t)TOPA_ENTRY(buf->cur, buf->cur_idx)->base << TOPA_SHIFT);
}
/**
@@ -799,7 +825,7 @@ static void *pt_buffer_region(struct pt_buffer *buf)
*/
static size_t pt_buffer_region_size(struct pt_buffer *buf)
{
return sizes(buf->cur->table[buf->cur_idx].size);
return TOPA_ENTRY_SIZE(buf->cur, buf->cur_idx);
}
/**
@@ -828,8 +854,8 @@ static void pt_handle_status(struct pt *pt)
* means we are already losing data; need to let the decoder
* know.
*/
if (!pt_cap_get(PT_CAP_topa_multiple_entries) ||
buf->output_off == sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) {
if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) ||
buf->output_off == pt_buffer_region_size(buf)) {
perf_aux_output_flag(&pt->handle,
PERF_AUX_FLAG_TRUNCATED);
advance++;
@@ -840,7 +866,8 @@ static void pt_handle_status(struct pt *pt)
* Also on single-entry ToPA implementations, interrupt will come
* before the output reaches its output region's boundary.
*/
if (!pt_cap_get(PT_CAP_topa_multiple_entries) && !buf->snapshot &&
if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) &&
!buf->snapshot &&
pt_buffer_region_size(buf) - buf->output_off <= TOPA_PMI_MARGIN) {
void *head = pt_buffer_region(buf);
@@ -866,9 +893,11 @@ static void pt_handle_status(struct pt *pt)
static void pt_read_offset(struct pt_buffer *buf)
{
u64 offset, base_topa;
struct topa_page *tp;
rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, base_topa);
buf->cur = phys_to_virt(base_topa);
tp = phys_to_virt(base_topa);
buf->cur = &tp->topa;
rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, offset);
/* offset within current output region */
@@ -923,15 +952,14 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
unsigned long idx, npages, wakeup;
/* can't stop in the middle of an output region */
if (buf->output_off + handle->size + 1 <
sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) {
if (buf->output_off + handle->size + 1 < pt_buffer_region_size(buf)) {
perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
return -EINVAL;
}
/* single entry ToPA is handled by marking all regions STOP=1 INT=1 */
if (!pt_cap_get(PT_CAP_topa_multiple_entries))
if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
return 0;
/* clear STOP and INT from current entry */
@@ -1019,6 +1047,7 @@ static void pt_buffer_setup_topa_index(struct pt_buffer *buf)
*/
static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head)
{
struct topa_page *cur_tp;
int pg;
if (buf->snapshot)
@@ -1027,10 +1056,10 @@ static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head)
pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1);
pg = pt_topa_next_entry(buf, pg);
buf->cur = (struct topa *)((unsigned long)buf->topa_index[pg] & PAGE_MASK);
buf->cur_idx = ((unsigned long)buf->topa_index[pg] -
(unsigned long)buf->cur) / sizeof(struct topa_entry);
buf->output_off = head & (sizes(buf->cur->table[buf->cur_idx].size) - 1);
cur_tp = topa_entry_to_page(buf->topa_index[pg]);
buf->cur = &cur_tp->topa;
buf->cur_idx = buf->topa_index[pg] - TOPA_ENTRY(buf->cur, 0);
buf->output_off = head & (pt_buffer_region_size(buf) - 1);
local64_set(&buf->head, head);
local_set(&buf->data_size, 0);
@@ -1082,7 +1111,7 @@ static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages,
pt_buffer_setup_topa_index(buf);
/* link last table to the first one, unless we're double buffering */
if (pt_cap_get(PT_CAP_topa_multiple_entries)) {
if (intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
TOPA_ENTRY(buf->last, -1)->base = buf->first->phys >> TOPA_SHIFT;
TOPA_ENTRY(buf->last, -1)->end = 1;
}
@@ -1154,7 +1183,7 @@ static int pt_addr_filters_init(struct perf_event *event)
struct pt_filters *filters;
int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
if (!pt_cap_get(PT_CAP_num_address_ranges))
if (!intel_pt_validate_hw_cap(PT_CAP_num_address_ranges))
return 0;
filters = kzalloc_node(sizeof(struct pt_filters), GFP_KERNEL, node);
@@ -1203,7 +1232,7 @@ static int pt_event_addr_filters_validate(struct list_head *filters)
return -EINVAL;
}
if (++range > pt_cap_get(PT_CAP_num_address_ranges))
if (++range > intel_pt_validate_hw_cap(PT_CAP_num_address_ranges))
return -EOPNOTSUPP;
}
@@ -1294,7 +1323,7 @@ void intel_pt_interrupt(void)
return;
}
pt_config_buffer(buf->cur->table, buf->cur_idx,
pt_config_buffer(topa_to_page(buf->cur)->table, buf->cur_idx,
buf->output_off);
pt_config(event);
}
@@ -1359,7 +1388,7 @@ static void pt_event_start(struct perf_event *event, int mode)
WRITE_ONCE(pt->handle_nmi, 1);
hwc->state = 0;
pt_config_buffer(buf->cur->table, buf->cur_idx,
pt_config_buffer(topa_to_page(buf->cur)->table, buf->cur_idx,
buf->output_off);
pt_config(event);
@@ -1509,12 +1538,12 @@ static __init int pt_init(void)
if (ret)
return ret;
if (!pt_cap_get(PT_CAP_topa_output)) {
if (!intel_pt_validate_hw_cap(PT_CAP_topa_output)) {
pr_warn("ToPA output is not supported on this CPU\n");
return -ENODEV;
}
if (!pt_cap_get(PT_CAP_topa_multiple_entries))
if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
pt_pmu.pmu.capabilities =
PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_AUX_SW_DOUBLEBUF;
@@ -1532,7 +1561,7 @@ static __init int pt_init(void)
pt_pmu.pmu.addr_filters_sync = pt_event_addr_filters_sync;
pt_pmu.pmu.addr_filters_validate = pt_event_addr_filters_validate;
pt_pmu.pmu.nr_addr_filters =
pt_cap_get(PT_CAP_num_address_ranges);
intel_pt_validate_hw_cap(PT_CAP_num_address_ranges);
ret = perf_pmu_register(&pt_pmu.pmu, "intel_pt", -1);

View File

@@ -78,34 +78,13 @@ struct topa_entry {
u64 rsvd2 : 1;
u64 size : 4;
u64 rsvd3 : 2;
u64 base : 36;
u64 rsvd4 : 16;
u64 base : 40;
u64 rsvd4 : 12;
};
#define PT_CPUID_LEAVES 2
#define PT_CPUID_REGS_NUM 4 /* number of regsters (eax, ebx, ecx, edx) */
/* TSC to Core Crystal Clock Ratio */
#define CPUID_TSC_LEAF 0x15
enum pt_capabilities {
PT_CAP_max_subleaf = 0,
PT_CAP_cr3_filtering,
PT_CAP_psb_cyc,
PT_CAP_ip_filtering,
PT_CAP_mtc,
PT_CAP_ptwrite,
PT_CAP_power_event_trace,
PT_CAP_topa_output,
PT_CAP_topa_multiple_entries,
PT_CAP_single_range_output,
PT_CAP_payloads_lip,
PT_CAP_num_address_ranges,
PT_CAP_mtc_periods,
PT_CAP_cycle_thresholds,
PT_CAP_psb_periods,
};
struct pt_pmu {
struct pmu pmu;
u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];

View File

@@ -216,7 +216,7 @@
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation via LS_CFG MSR */
#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
#define X86_FEATURE_IBPB ( 7*32+26) /* "ibpb" Indirect Branch Prediction Barrier without a guaranteed RSB flush */
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
@@ -306,6 +306,7 @@
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
#define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */
#define X86_FEATURE_AMD_IBPB_RET (13*32+30) /* "" IBPB clears return address predictor */
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */

View File

@@ -2,10 +2,33 @@
#ifndef _ASM_X86_INTEL_PT_H
#define _ASM_X86_INTEL_PT_H
#define PT_CPUID_LEAVES 2
#define PT_CPUID_REGS_NUM 4 /* number of regsters (eax, ebx, ecx, edx) */
enum pt_capabilities {
PT_CAP_max_subleaf = 0,
PT_CAP_cr3_filtering,
PT_CAP_psb_cyc,
PT_CAP_ip_filtering,
PT_CAP_mtc,
PT_CAP_ptwrite,
PT_CAP_power_event_trace,
PT_CAP_topa_output,
PT_CAP_topa_multiple_entries,
PT_CAP_single_range_output,
PT_CAP_payloads_lip,
PT_CAP_num_address_ranges,
PT_CAP_mtc_periods,
PT_CAP_cycle_thresholds,
PT_CAP_psb_periods,
};
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
void cpu_emergency_stop_pt(void);
extern u32 intel_pt_validate_hw_cap(enum pt_capabilities cap);
#else
static inline void cpu_emergency_stop_pt(void) {}
static inline u32 intel_pt_validate_hw_cap(enum pt_capabilities cap) { return 0; }
#endif
#endif /* _ASM_X86_INTEL_PT_H */

View File

@@ -484,7 +484,19 @@ static int lapic_timer_shutdown(struct clock_event_device *evt)
v = apic_read(APIC_LVTT);
v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
apic_write(APIC_LVTT, v);
/*
* Setting APIC_LVT_MASKED (above) should be enough to tell
* the hardware that this timer will never fire. But AMD
* erratum 411 and some Intel CPU behavior circa 2024 say
* otherwise. Time for belt and suspenders programming: mask
* the timer _and_ zero the counter registers:
*/
if (v & APIC_LVT_TIMER_TSCDEADLINE)
wrmsrl(MSR_IA32_TSC_DEADLINE, 0);
else
apic_write(APIC_TMICT, 0);
return 0;
}

View File

@@ -243,6 +243,7 @@ static void __init ms_hyperv_init_platform(void)
ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
x86_platform.calibrate_tsc = hv_get_tsc_khz;
x86_platform.calibrate_cpu = hv_get_tsc_khz;
setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
}
if (ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED) {

View File

@@ -819,7 +819,7 @@ void mtrr_save_state(void)
{
int first_cpu;
if (!mtrr_enabled())
if (!mtrr_enabled() || !mtrr_state.have_fixed)
return;
first_cpu = cpumask_first(cpu_online_mask);

View File

@@ -90,7 +90,7 @@ static int x86_of_pci_irq_enable(struct pci_dev *dev)
ret = pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
if (ret)
return ret;
return pcibios_err_to_errno(ret);
if (!pin)
return 0;

View File

@@ -383,14 +383,14 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
*/
*target_pmd = *pmd;
addr += PMD_SIZE;
addr = round_up(addr + 1, PMD_SIZE);
} else if (level == PTI_CLONE_PTE) {
/* Walk the page-table down to the pte level */
pte = pte_offset_kernel(pmd, addr);
if (pte_none(*pte)) {
addr += PAGE_SIZE;
addr = round_up(addr + 1, PAGE_SIZE);
continue;
}
@@ -410,7 +410,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
/* Clone the PTE */
*target_pte = *pte;
addr += PAGE_SIZE;
addr = round_up(addr + 1, PAGE_SIZE);
} else {
BUG();

View File

@@ -223,9 +223,9 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
return 0;
ret = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
if (ret < 0) {
if (ret) {
dev_warn(&dev->dev, "Failed to read interrupt line: %d\n", ret);
return ret;
return pcibios_err_to_errno(ret);
}
switch (intel_mid_identify_cpu()) {

View File

@@ -36,10 +36,10 @@ static int xen_pcifront_enable_irq(struct pci_dev *dev)
u8 gsi;
rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
if (rc < 0) {
if (rc) {
dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
rc);
return rc;
return pcibios_err_to_errno(rc);
}
/* In PV DomU the Xen PCI backend puts the PIRQ in the interrupt line.*/
pirq = gsi;

View File

@@ -68,7 +68,7 @@ static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr)
fail_read:
dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
return result;
return pcibios_err_to_errno(result);
}
static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
@@ -97,7 +97,7 @@ static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
fail_write:
dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
return result;
return pcibios_err_to_errno(result);
}
int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr)

View File

@@ -733,7 +733,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
* immediate unmapping.
*/
map_ops[i].status = GNTST_general_error;
unmap[0].host_addr = map_ops[i].host_addr,
unmap[0].host_addr = map_ops[i].host_addr;
unmap[0].handle = map_ops[i].handle;
map_ops[i].handle = ~0;
if (map_ops[i].flags & GNTMAP_device_map)
@@ -743,7 +743,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
if (kmap_ops) {
kmap_ops[i].status = GNTST_general_error;
unmap[1].host_addr = kmap_ops[i].host_addr,
unmap[1].host_addr = kmap_ops[i].host_addr;
unmap[1].handle = kmap_ops[i].handle;
kmap_ops[i].handle = ~0;
if (kmap_ops[i].flags & GNTMAP_device_map)

View File

@@ -862,7 +862,7 @@ char * __init xen_memory_setup(void)
* to relocating (and even reusing) pages with kernel text or data.
*/
if (xen_is_e820_reserved(__pa_symbol(_text),
__pa_symbol(__bss_stop) - __pa_symbol(_text))) {
__pa_symbol(_end) - __pa_symbol(_text))) {
xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
BUG();
}

View File

@@ -2226,8 +2226,12 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct bfq_queue *in_service_bfqq, *new_bfqq;
/* if a merge has already been setup, then proceed with that first */
if (bfqq->new_bfqq)
return bfqq->new_bfqq;
new_bfqq = bfqq->new_bfqq;
if (new_bfqq) {
while (new_bfqq->new_bfqq)
new_bfqq = new_bfqq->new_bfqq;
return new_bfqq;
}
/*
* Prevent bfqq from being merged if it has been created too
@@ -5033,7 +5037,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
{
bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
if (bfqq_process_refs(bfqq) == 1) {
if (bfqq_process_refs(bfqq) == 1 && !bfqq->new_bfqq) {
bfqq->pid = current->pid;
bfq_clear_bfqq_coop(bfqq);
bfq_clear_bfqq_split_coop(bfqq);
@@ -5218,7 +5222,8 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
* addition, if the queue has also just been split, we have to
* resume its state.
*/
if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
if (likely(bfqq != &bfqd->oom_bfqq) && !bfqq->new_bfqq &&
bfqq_process_refs(bfqq) == 1) {
bfqq->bic = bic;
if (split) {
/*

View File

@@ -227,6 +227,7 @@ bool bio_integrity_prep(struct bio *bio)
unsigned int bytes, offset, i;
unsigned int intervals;
blk_status_t status;
gfp_t gfp = GFP_NOIO;
if (!bi)
return true;
@@ -249,12 +250,20 @@ bool bio_integrity_prep(struct bio *bio)
if (!bi->profile->generate_fn ||
!(bi->flags & BLK_INTEGRITY_GENERATE))
return true;
/*
* Zero the memory allocated to not leak uninitialized kernel
* memory to disk. For PI this only affects the app tag, but
* for non-integrity metadata it affects the entire metadata
* buffer.
*/
gfp |= __GFP_ZERO;
}
intervals = bio_integrity_intervals(bi, bio_sectors(bio));
/* Allocate kernel buffer for protection data */
len = intervals * bi->tuple_size;
buf = kmalloc(len, GFP_NOIO | q->bounce_gfp);
buf = kmalloc(len, gfp | q->bounce_gfp);
status = BLK_STS_RESOURCE;
if (unlikely(buf == NULL)) {
printk(KERN_ERR "could not allocate integrity buffer\n");

View File

@@ -45,8 +45,7 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
ret = crypto_aead_alg(tfm)->setkey(tfm, alignbuffer, keylen);
memset(alignbuffer, 0, keylen);
kfree(buffer);
kzfree(buffer);
return ret;
}

View File

@@ -38,8 +38,7 @@ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
ret = cia->cia_setkey(tfm, alignbuffer, keylen);
memset(alignbuffer, 0, keylen);
kfree(buffer);
kzfree(buffer);
return ret;
}

View File

@@ -391,7 +391,7 @@ static int acpi_processor_add(struct acpi_device *device,
result = acpi_processor_get_info(device);
if (result) /* Processor is not physically present or unavailable */
return 0;
goto err_clear_driver_data;
BUG_ON(pr->id >= nr_cpu_ids);
@@ -406,7 +406,7 @@ static int acpi_processor_add(struct acpi_device *device,
"BIOS reported wrong ACPI id %d for the processor\n",
pr->id);
/* Give up, but do not abort the namespace scan. */
goto err;
goto err_clear_driver_data;
}
/*
* processor_device_array is not cleared on errors to allow buggy BIOS
@@ -418,12 +418,12 @@ static int acpi_processor_add(struct acpi_device *device,
dev = get_cpu_device(pr->id);
if (!dev) {
result = -ENODEV;
goto err;
goto err_clear_per_cpu;
}
result = acpi_bind_one(dev, device);
if (result)
goto err;
goto err_clear_per_cpu;
pr->dev = dev;
@@ -434,10 +434,11 @@ static int acpi_processor_add(struct acpi_device *device,
dev_err(dev, "Processor driver could not be attached\n");
acpi_unbind_one(dev);
err:
free_cpumask_var(pr->throttling.shared_cpu_map);
device->driver_data = NULL;
err_clear_per_cpu:
per_cpu(processors, pr->id) = NULL;
err_clear_driver_data:
device->driver_data = NULL;
free_cpumask_var(pr->throttling.shared_cpu_map);
err_free_pr:
kfree(pr);
return result;

View File

@@ -170,6 +170,8 @@ acpi_status acpi_db_convert_to_package(char *string, union acpi_object *object)
elements =
ACPI_ALLOCATE_ZEROED(DB_DEFAULT_PKG_ELEMENTS *
sizeof(union acpi_object));
if (!elements)
return (AE_NO_MEMORY);
this = string;
for (i = 0; i < (DB_DEFAULT_PKG_ELEMENTS - 1); i++) {

View File

@@ -437,6 +437,9 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
if (info->connection_node) {
second_desc = info->connection_node->object;
if (second_desc == NULL) {
break;
}
if (!(second_desc->common.flags & AOPOBJ_DATA_VALID)) {
status =
acpi_ds_get_buffer_arguments(second_desc);

View File

@@ -25,6 +25,8 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state);
static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
*parser_state);
static void acpi_ps_free_field_list(union acpi_parse_object *start);
/*******************************************************************************
*
* FUNCTION: acpi_ps_get_next_package_length
@@ -683,6 +685,39 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
return_PTR(field);
}
/*******************************************************************************
*
* FUNCTION: acpi_ps_free_field_list
*
* PARAMETERS: start - First Op in field list
*
* RETURN: None.
*
* DESCRIPTION: Free all Op objects inside a field list.
*
******************************************************************************/
static void acpi_ps_free_field_list(union acpi_parse_object *start)
{
union acpi_parse_object *cur = start;
union acpi_parse_object *next;
union acpi_parse_object *arg;
while (cur) {
next = cur->common.next;
/* AML_INT_CONNECTION_OP can have a single argument */
arg = acpi_ps_get_arg(cur, 0);
if (arg) {
acpi_ps_free_op(arg);
}
acpi_ps_free_op(cur);
cur = next;
}
}
/*******************************************************************************
*
* FUNCTION: acpi_ps_get_next_arg
@@ -751,6 +786,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
while (parser_state->aml < parser_state->pkg_end) {
field = acpi_ps_get_next_field(parser_state);
if (!field) {
if (arg) {
acpi_ps_free_field_list(arg);
}
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -820,6 +859,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
acpi_ps_get_next_namepath(walk_state, parser_state,
arg,
ACPI_NOT_METHOD_CALL);
if (ACPI_FAILURE(status)) {
acpi_ps_free_op(arg);
return_ACPI_STATUS(status);
}
} else {
/* Single complex argument, nothing returned */
@@ -854,6 +897,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
acpi_ps_get_next_namepath(walk_state, parser_state,
arg,
ACPI_POSSIBLE_METHOD_CALL);
if (ACPI_FAILURE(status)) {
acpi_ps_free_op(arg);
return_ACPI_STATUS(status);
}
if (arg->common.aml_opcode == AML_INT_METHODCALL_OP) {

View File

@@ -713,27 +713,34 @@ static LIST_HEAD(acpi_battery_list);
static LIST_HEAD(battery_hook_list);
static DEFINE_MUTEX(hook_mutex);
static void __battery_hook_unregister(struct acpi_battery_hook *hook, int lock)
static void battery_hook_unregister_unlocked(struct acpi_battery_hook *hook)
{
struct acpi_battery *battery;
/*
* In order to remove a hook, we first need to
* de-register all the batteries that are registered.
*/
if (lock)
mutex_lock(&hook_mutex);
list_for_each_entry(battery, &acpi_battery_list, list) {
hook->remove_battery(battery->bat);
}
list_del(&hook->list);
if (lock)
mutex_unlock(&hook_mutex);
list_del_init(&hook->list);
pr_info("extension unregistered: %s\n", hook->name);
}
void battery_hook_unregister(struct acpi_battery_hook *hook)
{
__battery_hook_unregister(hook, 1);
mutex_lock(&hook_mutex);
/*
* Ignore already unregistered battery hooks. This might happen
* if a battery hook was previously unloaded due to an error when
* adding a new battery.
*/
if (!list_empty(&hook->list))
battery_hook_unregister_unlocked(hook);
mutex_unlock(&hook_mutex);
}
EXPORT_SYMBOL_GPL(battery_hook_unregister);
@@ -742,7 +749,6 @@ void battery_hook_register(struct acpi_battery_hook *hook)
struct acpi_battery *battery;
mutex_lock(&hook_mutex);
INIT_LIST_HEAD(&hook->list);
list_add(&hook->list, &battery_hook_list);
/*
* Now that the driver is registered, we need
@@ -759,7 +765,7 @@ void battery_hook_register(struct acpi_battery_hook *hook)
* hooks.
*/
pr_err("extension failed to load: %s", hook->name);
__battery_hook_unregister(hook, 0);
battery_hook_unregister_unlocked(hook);
goto end;
}
}
@@ -796,7 +802,7 @@ static void battery_hook_add_battery(struct acpi_battery *battery)
*/
pr_err("error in extension, unloading: %s",
hook_node->name);
__battery_hook_unregister(hook_node, 0);
battery_hook_unregister_unlocked(hook_node);
}
}
mutex_unlock(&hook_mutex);
@@ -829,7 +835,7 @@ static void __exit battery_hook_exit(void)
* need to remove the hooks.
*/
list_for_each_entry_safe(hook, ptr, &battery_hook_list, list) {
__battery_hook_unregister(hook, 1);
battery_hook_unregister(hook);
}
mutex_destroy(&hook_mutex);
}

View File

@@ -124,6 +124,17 @@ static const struct dmi_system_id lid_blacklst[] = {
},
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
},
{
/*
* Samsung galaxybook2 ,initial _LID device notification returns
* lid closed.
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_PRODUCT_NAME, "750XED"),
},
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
},
{}
};

View File

@@ -539,8 +539,9 @@ int acpi_device_setup_files(struct acpi_device *dev)
* If device has _STR, 'description' file is created
*/
if (acpi_has_method(dev->handle, "_STR")) {
status = acpi_evaluate_object(dev->handle, "_STR",
NULL, &buffer);
status = acpi_evaluate_object_typed(dev->handle, "_STR",
NULL, &buffer,
ACPI_TYPE_BUFFER);
if (ACPI_FAILURE(status))
buffer.pointer = NULL;
dev->pnp.str_obj = buffer.pointer;

View File

@@ -807,6 +807,9 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
unsigned long tmp;
int ret = 0;
if (t->rdata)
memset(t->rdata, 0, t->rlen);
/* start transaction */
spin_lock_irqsave(&ec->lock, tmp);
/* Enable GPE for command processing (IBF=0/OBF=1) */
@@ -843,8 +846,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
return -EINVAL;
if (t->rdata)
memset(t->rdata, 0, t->rlen);
mutex_lock(&ec->mutex);
if (ec->global_lock) {
@@ -871,7 +872,7 @@ static int acpi_ec_burst_enable(struct acpi_ec *ec)
.wdata = NULL, .rdata = &d,
.wlen = 0, .rlen = 1};
return acpi_ec_transaction(ec, &t);
return acpi_ec_transaction_unlocked(ec, &t);
}
static int acpi_ec_burst_disable(struct acpi_ec *ec)
@@ -881,7 +882,7 @@ static int acpi_ec_burst_disable(struct acpi_ec *ec)
.wlen = 0, .rlen = 0};
return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
acpi_ec_transaction(ec, &t) : 0;
acpi_ec_transaction_unlocked(ec, &t) : 0;
}
static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
@@ -897,6 +898,19 @@ static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
return result;
}
static int acpi_ec_read_unlocked(struct acpi_ec *ec, u8 address, u8 *data)
{
int result;
u8 d;
struct transaction t = {.command = ACPI_EC_COMMAND_READ,
.wdata = &address, .rdata = &d,
.wlen = 1, .rlen = 1};
result = acpi_ec_transaction_unlocked(ec, &t);
*data = d;
return result;
}
static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
{
u8 wdata[2] = { address, data };
@@ -907,6 +921,16 @@ static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
return acpi_ec_transaction(ec, &t);
}
static int acpi_ec_write_unlocked(struct acpi_ec *ec, u8 address, u8 data)
{
u8 wdata[2] = { address, data };
struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
.wdata = wdata, .rdata = NULL,
.wlen = 2, .rlen = 0};
return acpi_ec_transaction_unlocked(ec, &t);
}
int ec_read(u8 addr, u8 *val)
{
int err;
@@ -1320,6 +1344,7 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
struct acpi_ec *ec = handler_context;
int result = 0, i, bytes = bits / 8;
u8 *value = (u8 *)value64;
u32 glk;
if ((address > 0xFF) || !value || !handler_context)
return AE_BAD_PARAMETER;
@@ -1327,17 +1352,38 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
if (function != ACPI_READ && function != ACPI_WRITE)
return AE_BAD_PARAMETER;
mutex_lock(&ec->mutex);
if (ec->global_lock) {
acpi_status status;
status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
if (ACPI_FAILURE(status)) {
result = -ENODEV;
goto unlock;
}
}
if (ec->busy_polling || bits > 8)
acpi_ec_burst_enable(ec);
for (i = 0; i < bytes; ++i, ++address, ++value)
for (i = 0; i < bytes; ++i, ++address, ++value) {
result = (function == ACPI_READ) ?
acpi_ec_read(ec, address, value) :
acpi_ec_write(ec, address, *value);
acpi_ec_read_unlocked(ec, address, value) :
acpi_ec_write_unlocked(ec, address, *value);
if (result < 0)
break;
}
if (ec->busy_polling || bits > 8)
acpi_ec_burst_disable(ec);
if (ec->global_lock)
acpi_release_global_lock(glk);
unlock:
mutex_unlock(&ec->mutex);
switch (result) {
case -EINVAL:
return AE_BAD_PARAMETER;
@@ -1345,8 +1391,10 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
return AE_NOT_FOUND;
case -ETIME:
return AE_TIME;
default:
case 0:
return AE_OK;
default:
return AE_ERROR;
}
}

View File

@@ -376,10 +376,8 @@ static int tps68470_pmic_opregion_probe(struct platform_device *pdev)
struct tps68470_pmic_opregion *opregion;
acpi_status status;
if (!dev || !tps68470_regmap) {
dev_warn(dev, "dev or regmap is NULL\n");
return -EINVAL;
}
if (!tps68470_regmap)
return dev_err_probe(dev, -EINVAL, "regmap is missing\n");
if (!handle) {
dev_warn(dev, "acpi handle is NULL\n");

View File

@@ -29,7 +29,6 @@
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/sched.h> /* need_resched() */
#include <linux/sort.h>
#include <linux/tick.h>
#include <linux/cpuidle.h>
#include <linux/cpu.h>
@@ -545,28 +544,24 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
return;
}
static int acpi_cst_latency_cmp(const void *a, const void *b)
static void acpi_cst_latency_sort(struct acpi_processor_cx *states, size_t length)
{
const struct acpi_processor_cx *x = a, *y = b;
int i, j, k;
if (!(x->valid && y->valid))
return 0;
if (x->latency > y->latency)
return 1;
if (x->latency < y->latency)
return -1;
return 0;
}
static void acpi_cst_latency_swap(void *a, void *b, int n)
{
struct acpi_processor_cx *x = a, *y = b;
u32 tmp;
for (i = 1; i < length; i++) {
if (!states[i].valid)
continue;
if (!(x->valid && y->valid))
return;
tmp = x->latency;
x->latency = y->latency;
y->latency = tmp;
for (j = i - 1, k = i; j >= 0; j--) {
if (!states[j].valid)
continue;
if (states[j].latency > states[k].latency)
swap(states[j].latency, states[k].latency);
k = j;
}
}
}
static int acpi_processor_power_verify(struct acpi_processor *pr)
@@ -611,10 +606,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
if (buggy_latency) {
pr_notice("FW issue: working around C-state latencies out of order\n");
sort(&pr->power.states[1], max_cstate,
sizeof(struct acpi_processor_cx),
acpi_cst_latency_cmp,
acpi_cst_latency_swap);
acpi_cst_latency_sort(&pr->power.states[1], max_cstate);
}
lapic_timer_propagate_broadcast(pr);

View File

@@ -1002,9 +1002,7 @@ static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
{
return !thread->transaction_stack &&
binder_worklist_empty_ilocked(&thread->todo) &&
(thread->looper & (BINDER_LOOPER_STATE_ENTERED |
BINDER_LOOPER_STATE_REGISTERED));
binder_worklist_empty_ilocked(&thread->todo);
}
static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,

View File

@@ -6159,6 +6159,9 @@ static void ata_host_release(struct kref *kref)
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
if (!ap)
continue;
kfree(ap->pmp_link);
kfree(ap->slave_link);
kfree(ap);
@@ -6218,8 +6221,10 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
}
dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
if (!dr)
if (!dr) {
kfree(host);
goto err_out;
}
devres_add(dev, dr);
dev_set_drvdata(dev, host);

View File

@@ -537,7 +537,8 @@ static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
while (sg_len) {
/* table overflow should never happen */
BUG_ON (pi++ >= MAX_DCMDS);
if (WARN_ON_ONCE(pi >= MAX_DCMDS))
return AC_ERR_SYSTEM;
len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG;
table->command = cpu_to_le16(write ? OUTPUT_MORE: INPUT_MORE);
@@ -549,11 +550,13 @@ static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
addr += len;
sg_len -= len;
++table;
++pi;
}
}
/* Should never happen according to Tejun */
BUG_ON(!pi);
if (WARN_ON_ONCE(!pi))
return AC_ERR_SYSTEM;
/* Convert the last command to an input/output */
table--;

View File

@@ -144,7 +144,7 @@ static const struct pci_device_id sil_pci_tbl[] = {
static const struct sil_drivelist {
const char *product;
unsigned int quirk;
} sil_blacklist [] = {
} sil_quirks[] = {
{ "ST320012AS", SIL_QUIRK_MOD15WRITE },
{ "ST330013AS", SIL_QUIRK_MOD15WRITE },
{ "ST340017AS", SIL_QUIRK_MOD15WRITE },
@@ -617,8 +617,8 @@ static void sil_thaw(struct ata_port *ap)
* list, and apply the fixups to only the specific
* devices/hosts/firmwares that need it.
*
* 20040111 - Seagate drives affected by the Mod15Write bug are blacklisted
* The Maxtor quirk is in the blacklist, but I'm keeping the original
* 20040111 - Seagate drives affected by the Mod15Write bug are quirked
* The Maxtor quirk is in sil_quirks, but I'm keeping the original
* pessimistic fix for the following reasons...
* - There seems to be less info on it, only one device gleaned off the
* Windows driver, maybe only one is affected. More info would be greatly
@@ -637,9 +637,9 @@ static void sil_dev_config(struct ata_device *dev)
ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
for (n = 0; sil_blacklist[n].product; n++)
if (!strcmp(sil_blacklist[n].product, model_num)) {
quirks = sil_blacklist[n].quirk;
for (n = 0; sil_quirks[n].product; n++)
if (!strcmp(sil_quirks[n].product, model_num)) {
quirks = sil_quirks[n].quirk;
break;
}

View File

@@ -1117,8 +1117,8 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
rpp->len += skb->len;
if (stat & SAR_RSQE_EPDU) {
unsigned int len, truesize;
unsigned char *l1l2;
unsigned int len;
l1l2 = (unsigned char *) ((unsigned long) skb->data + skb->len - 6);
@@ -1188,14 +1188,15 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
ATM_SKB(skb)->vcc = vcc;
__net_timestamp(skb);
truesize = skb->truesize;
vcc->push(vcc, skb);
atomic_inc(&vcc->stats->rx);
if (skb->truesize > SAR_FB_SIZE_3)
if (truesize > SAR_FB_SIZE_3)
add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
else if (skb->truesize > SAR_FB_SIZE_2)
else if (truesize > SAR_FB_SIZE_2)
add_rx_skb(card, 2, SAR_FB_SIZE_2, 1);
else if (skb->truesize > SAR_FB_SIZE_1)
else if (truesize > SAR_FB_SIZE_1)
add_rx_skb(card, 1, SAR_FB_SIZE_1, 1);
else
add_rx_skb(card, 0, SAR_FB_SIZE_0, 1);

View File

@@ -103,7 +103,8 @@ static ssize_t bus_attr_show(struct kobject *kobj, struct attribute *attr,
{
struct bus_attribute *bus_attr = to_bus_attr(attr);
struct subsys_private *subsys_priv = to_subsys_private(kobj);
ssize_t ret = 0;
/* return -EIO for reading a bus attribute without show() */
ssize_t ret = -EIO;
if (bus_attr->show)
ret = bus_attr->show(subsys_priv->bus, buf);
@@ -115,7 +116,8 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
{
struct bus_attribute *bus_attr = to_bus_attr(attr);
struct subsys_private *subsys_priv = to_subsys_private(kobj);
ssize_t ret = 0;
/* return -EIO for writing a bus attribute without store() */
ssize_t ret = -EIO;
if (bus_attr->store)
ret = bus_attr->store(subsys_priv->bus, buf, count);

View File

@@ -559,6 +559,7 @@ void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
grp->id = grp;
if (id)
grp->id = id;
grp->color = 0;
spin_lock_irqsave(&dev->devres_lock, flags);
add_dr(dev, &grp->node[0]);
@@ -1057,7 +1058,11 @@ EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
*/
void devm_free_percpu(struct device *dev, void __percpu *pdata)
{
WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
(void *)pdata));
/*
* Use devres_release() to prevent memory leakage as
* devm_free_pages() does.
*/
WARN_ON(devres_release(dev, devm_percpu_release, devm_percpu_match,
(__force void *)pdata));
}
EXPORT_SYMBOL_GPL(devm_free_percpu);

View File

@@ -570,6 +570,26 @@ static void fw_abort_batch_reqs(struct firmware *fw)
mutex_unlock(&fw_lock);
}
/*
* Reject firmware file names with ".." path components.
* There are drivers that construct firmware file names from device-supplied
* strings, and we don't want some device to be able to tell us "I would like to
* be sent my firmware from ../../../etc/shadow, please".
*
* Search for ".." surrounded by either '/' or start/end of string.
*
* This intentionally only looks at the firmware name, not at the firmware base
* directory or at symlink contents.
*/
static bool name_contains_dotdot(const char *name)
{
size_t name_len = strlen(name);
return strcmp(name, "..") == 0 || strncmp(name, "../", 3) == 0 ||
strstr(name, "/../") != NULL ||
(name_len >= 3 && strcmp(name+name_len-3, "/..") == 0);
}
/* called from request_firmware() and request_firmware_work_func() */
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
@@ -587,6 +607,14 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
goto out;
}
if (name_contains_dotdot(name)) {
dev_warn(device,
"Firmware load for '%s' refused, path contains '..' component\n",
name);
ret = -EINVAL;
goto out;
}
ret = _request_firmware_prepare(&fw, name, device, buf, size,
opt_flags);
if (ret <= 0) /* error or already assigned */
@@ -627,6 +655,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
* @name will be used as $FIRMWARE in the uevent environment and
* should be distinctive enough not to be confused with any other
* firmware image for this or any other device.
* It must not contain any ".." path components - "foo/bar..bin" is
* allowed, but "foo/../bar.bin" is not.
*
* Caller must hold the reference count of @device.
*

View File

@@ -362,6 +362,7 @@ ata_rw_frameinit(struct frame *f)
}
ah->cmdstat = ATA_CMD_PIO_READ | writebit | extbit;
dev_hold(t->ifp->nd);
skb->dev = t->ifp->nd;
}
@@ -402,6 +403,8 @@ aoecmd_ata_rw(struct aoedev *d)
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
} else {
dev_put(f->t->ifp->nd);
}
return 1;
}
@@ -484,10 +487,13 @@ resend(struct aoedev *d, struct frame *f)
memcpy(h->dst, t->addr, sizeof h->dst);
memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
dev_hold(t->ifp->nd);
skb->dev = t->ifp->nd;
skb = skb_clone(skb, GFP_ATOMIC);
if (skb == NULL)
if (skb == NULL) {
dev_put(t->ifp->nd);
return;
}
f->sent = ktime_get();
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
@@ -618,6 +624,8 @@ probe(struct aoetgt *t)
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
} else {
dev_put(f->t->ifp->nd);
}
}
@@ -1407,6 +1415,7 @@ aoecmd_ata_id(struct aoedev *d)
ah->cmdstat = ATA_CMD_ID_ATA;
ah->lba3 = 0xa0;
dev_hold(t->ifp->nd);
skb->dev = t->ifp->nd;
d->rttavg = RTTAVG_INIT;
@@ -1416,6 +1425,8 @@ aoecmd_ata_id(struct aoedev *d)
skb = skb_clone(skb, GFP_ATOMIC);
if (skb)
f->sent = ktime_get();
else
dev_put(t->ifp->nd);
return skb;
}

View File

@@ -3499,10 +3499,12 @@ void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
{
unsigned long flags;
if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
return;
spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) {
spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
return;
}
if (val == 0) {
drbd_uuid_move_history(device);
device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];

View File

@@ -888,7 +888,7 @@ is_valid_state(struct drbd_device *device, union drbd_state ns)
ns.disk == D_OUTDATED)
rv = SS_CONNECTED_OUTDATES;
else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
else if (nc && (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
(nc->verify_alg[0] == 0))
rv = SS_NO_VERIFY_ALG;

View File

@@ -743,7 +743,15 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags)
if (!urb)
return -ENOMEM;
if (le16_to_cpu(data->udev->descriptor.idVendor) == 0x0a12 &&
le16_to_cpu(data->udev->descriptor.idProduct) == 0x0001)
/* Fake CSR devices don't seem to support sort-transter */
size = le16_to_cpu(data->intr_ep->wMaxPacketSize);
else
/* Use maximum HCI Event size so the USB stack handles
* ZPL/short-transfer automatically.
*/
size = HCI_MAX_EVENT_SIZE;
buf = kmalloc(size, mem_flags);
if (!buf) {

Some files were not shown because too many files have changed in this diff Show More