Merge "Merge android-4.19-q.66 (5118163) into msm-4.19"
This commit is contained in:
committed by
Gerrit - the friendly Code Review server
commit
5dc7d7712c
@@ -41,10 +41,11 @@ Related CVEs
|
|||||||
|
|
||||||
The following CVE entries describe Spectre variants:
|
The following CVE entries describe Spectre variants:
|
||||||
|
|
||||||
============= ======================= =================
|
============= ======================= ==========================
|
||||||
CVE-2017-5753 Bounds check bypass Spectre variant 1
|
CVE-2017-5753 Bounds check bypass Spectre variant 1
|
||||||
CVE-2017-5715 Branch target injection Spectre variant 2
|
CVE-2017-5715 Branch target injection Spectre variant 2
|
||||||
============= ======================= =================
|
CVE-2019-1125 Spectre v1 swapgs Spectre variant 1 (swapgs)
|
||||||
|
============= ======================= ==========================
|
||||||
|
|
||||||
Problem
|
Problem
|
||||||
-------
|
-------
|
||||||
@@ -78,6 +79,13 @@ There are some extensions of Spectre variant 1 attacks for reading data
|
|||||||
over the network, see :ref:`[12] <spec_ref12>`. However such attacks
|
over the network, see :ref:`[12] <spec_ref12>`. However such attacks
|
||||||
are difficult, low bandwidth, fragile, and are considered low risk.
|
are difficult, low bandwidth, fragile, and are considered low risk.
|
||||||
|
|
||||||
|
Note that, despite "Bounds Check Bypass" name, Spectre variant 1 is not
|
||||||
|
only about user-controlled array bounds checks. It can affect any
|
||||||
|
conditional checks. The kernel entry code interrupt, exception, and NMI
|
||||||
|
handlers all have conditional swapgs checks. Those may be problematic
|
||||||
|
in the context of Spectre v1, as kernel code can speculatively run with
|
||||||
|
a user GS.
|
||||||
|
|
||||||
Spectre variant 2 (Branch Target Injection)
|
Spectre variant 2 (Branch Target Injection)
|
||||||
-------------------------------------------
|
-------------------------------------------
|
||||||
|
|
||||||
@@ -132,6 +140,9 @@ not cover all possible attack vectors.
|
|||||||
1. A user process attacking the kernel
|
1. A user process attacking the kernel
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Spectre variant 1
|
||||||
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
The attacker passes a parameter to the kernel via a register or
|
The attacker passes a parameter to the kernel via a register or
|
||||||
via a known address in memory during a syscall. Such parameter may
|
via a known address in memory during a syscall. Such parameter may
|
||||||
be used later by the kernel as an index to an array or to derive
|
be used later by the kernel as an index to an array or to derive
|
||||||
@@ -144,7 +155,40 @@ not cover all possible attack vectors.
|
|||||||
potentially be influenced for Spectre attacks, new "nospec" accessor
|
potentially be influenced for Spectre attacks, new "nospec" accessor
|
||||||
macros are used to prevent speculative loading of data.
|
macros are used to prevent speculative loading of data.
|
||||||
|
|
||||||
Spectre variant 2 attacker can :ref:`poison <poison_btb>` the branch
|
Spectre variant 1 (swapgs)
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
An attacker can train the branch predictor to speculatively skip the
|
||||||
|
swapgs path for an interrupt or exception. If they initialize
|
||||||
|
the GS register to a user-space value, if the swapgs is speculatively
|
||||||
|
skipped, subsequent GS-related percpu accesses in the speculation
|
||||||
|
window will be done with the attacker-controlled GS value. This
|
||||||
|
could cause privileged memory to be accessed and leaked.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
if (coming from user space)
|
||||||
|
swapgs
|
||||||
|
mov %gs:<percpu_offset>, %reg
|
||||||
|
mov (%reg), %reg1
|
||||||
|
|
||||||
|
When coming from user space, the CPU can speculatively skip the
|
||||||
|
swapgs, and then do a speculative percpu load using the user GS
|
||||||
|
value. So the user can speculatively force a read of any kernel
|
||||||
|
value. If a gadget exists which uses the percpu value as an address
|
||||||
|
in another load/store, then the contents of the kernel value may
|
||||||
|
become visible via an L1 side channel attack.
|
||||||
|
|
||||||
|
A similar attack exists when coming from kernel space. The CPU can
|
||||||
|
speculatively do the swapgs, causing the user GS to get used for the
|
||||||
|
rest of the speculative window.
|
||||||
|
|
||||||
|
Spectre variant 2
|
||||||
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
A spectre variant 2 attacker can :ref:`poison <poison_btb>` the branch
|
||||||
target buffer (BTB) before issuing syscall to launch an attack.
|
target buffer (BTB) before issuing syscall to launch an attack.
|
||||||
After entering the kernel, the kernel could use the poisoned branch
|
After entering the kernel, the kernel could use the poisoned branch
|
||||||
target buffer on indirect jump and jump to gadget code in speculative
|
target buffer on indirect jump and jump to gadget code in speculative
|
||||||
@@ -280,11 +324,18 @@ The sysfs file showing Spectre variant 1 mitigation status is:
|
|||||||
|
|
||||||
The possible values in this file are:
|
The possible values in this file are:
|
||||||
|
|
||||||
======================================= =================================
|
.. list-table::
|
||||||
'Mitigation: __user pointer sanitation' Protection in kernel on a case by
|
|
||||||
case base with explicit pointer
|
* - 'Not affected'
|
||||||
sanitation.
|
- The processor is not vulnerable.
|
||||||
======================================= =================================
|
* - 'Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers'
|
||||||
|
- The swapgs protections are disabled; otherwise it has
|
||||||
|
protection in the kernel on a case by case base with explicit
|
||||||
|
pointer sanitation and usercopy LFENCE barriers.
|
||||||
|
* - 'Mitigation: usercopy/swapgs barriers and __user pointer sanitization'
|
||||||
|
- Protection in the kernel on a case by case base with explicit
|
||||||
|
pointer sanitation, usercopy LFENCE barriers, and swapgs LFENCE
|
||||||
|
barriers.
|
||||||
|
|
||||||
However, the protections are put in place on a case by case basis,
|
However, the protections are put in place on a case by case basis,
|
||||||
and there is no guarantee that all possible attack vectors for Spectre
|
and there is no guarantee that all possible attack vectors for Spectre
|
||||||
@@ -366,12 +417,27 @@ Turning on mitigation for Spectre variant 1 and Spectre variant 2
|
|||||||
1. Kernel mitigation
|
1. Kernel mitigation
|
||||||
^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Spectre variant 1
|
||||||
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
For the Spectre variant 1, vulnerable kernel code (as determined
|
For the Spectre variant 1, vulnerable kernel code (as determined
|
||||||
by code audit or scanning tools) is annotated on a case by case
|
by code audit or scanning tools) is annotated on a case by case
|
||||||
basis to use nospec accessor macros for bounds clipping :ref:`[2]
|
basis to use nospec accessor macros for bounds clipping :ref:`[2]
|
||||||
<spec_ref2>` to avoid any usable disclosure gadgets. However, it may
|
<spec_ref2>` to avoid any usable disclosure gadgets. However, it may
|
||||||
not cover all attack vectors for Spectre variant 1.
|
not cover all attack vectors for Spectre variant 1.
|
||||||
|
|
||||||
|
Copy-from-user code has an LFENCE barrier to prevent the access_ok()
|
||||||
|
check from being mis-speculated. The barrier is done by the
|
||||||
|
barrier_nospec() macro.
|
||||||
|
|
||||||
|
For the swapgs variant of Spectre variant 1, LFENCE barriers are
|
||||||
|
added to interrupt, exception and NMI entry where needed. These
|
||||||
|
barriers are done by the FENCE_SWAPGS_KERNEL_ENTRY and
|
||||||
|
FENCE_SWAPGS_USER_ENTRY macros.
|
||||||
|
|
||||||
|
Spectre variant 2
|
||||||
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
For Spectre variant 2 mitigation, the compiler turns indirect calls or
|
For Spectre variant 2 mitigation, the compiler turns indirect calls or
|
||||||
jumps in the kernel into equivalent return trampolines (retpolines)
|
jumps in the kernel into equivalent return trampolines (retpolines)
|
||||||
:ref:`[3] <spec_ref3>` :ref:`[9] <spec_ref9>` to go to the target
|
:ref:`[3] <spec_ref3>` :ref:`[9] <spec_ref9>` to go to the target
|
||||||
@@ -473,6 +539,12 @@ Mitigation control on the kernel command line
|
|||||||
Spectre variant 2 mitigation can be disabled or force enabled at the
|
Spectre variant 2 mitigation can be disabled or force enabled at the
|
||||||
kernel command line.
|
kernel command line.
|
||||||
|
|
||||||
|
nospectre_v1
|
||||||
|
|
||||||
|
[X86,PPC] Disable mitigations for Spectre Variant 1
|
||||||
|
(bounds check bypass). With this option data leaks are
|
||||||
|
possible in the system.
|
||||||
|
|
||||||
nospectre_v2
|
nospectre_v2
|
||||||
|
|
||||||
[X86] Disable all mitigations for the Spectre variant 2
|
[X86] Disable all mitigations for the Spectre variant 2
|
||||||
|
|||||||
@@ -2522,6 +2522,7 @@
|
|||||||
Equivalent to: nopti [X86,PPC]
|
Equivalent to: nopti [X86,PPC]
|
||||||
nospectre_v1 [PPC]
|
nospectre_v1 [PPC]
|
||||||
nobp=0 [S390]
|
nobp=0 [S390]
|
||||||
|
nospectre_v1 [X86]
|
||||||
nospectre_v2 [X86,PPC,S390]
|
nospectre_v2 [X86,PPC,S390]
|
||||||
spectre_v2_user=off [X86]
|
spectre_v2_user=off [X86]
|
||||||
spec_store_bypass_disable=off [X86,PPC]
|
spec_store_bypass_disable=off [X86,PPC]
|
||||||
@@ -2868,9 +2869,9 @@
|
|||||||
nosmt=force: Force disable SMT, cannot be undone
|
nosmt=force: Force disable SMT, cannot be undone
|
||||||
via the sysfs control file.
|
via the sysfs control file.
|
||||||
|
|
||||||
nospectre_v1 [PPC] Disable mitigations for Spectre Variant 1 (bounds
|
nospectre_v1 [X66, PPC] Disable mitigations for Spectre Variant 1
|
||||||
check bypass). With this option data leaks are possible
|
(bounds check bypass). With this option data leaks
|
||||||
in the system.
|
are possible in the system.
|
||||||
|
|
||||||
nospectre_v2 [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
|
nospectre_v2 [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
|
||||||
(indirect branch prediction) vulnerability. System may
|
(indirect branch prediction) vulnerability. System may
|
||||||
|
|||||||
6
Makefile
6
Makefile
@@ -1,7 +1,7 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 19
|
PATCHLEVEL = 19
|
||||||
SUBLEVEL = 62
|
SUBLEVEL = 66
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = "People's Front"
|
NAME = "People's Front"
|
||||||
|
|
||||||
@@ -434,6 +434,7 @@ KBUILD_CFLAGS_MODULE := -DMODULE
|
|||||||
KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
|
KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
|
||||||
KBUILD_LDFLAGS :=
|
KBUILD_LDFLAGS :=
|
||||||
GCC_PLUGINS_CFLAGS :=
|
GCC_PLUGINS_CFLAGS :=
|
||||||
|
CLANG_FLAGS :=
|
||||||
|
|
||||||
export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC
|
export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC
|
||||||
export CPP AR NM STRIP OBJCOPY OBJDUMP KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS
|
export CPP AR NM STRIP OBJCOPY OBJDUMP KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS
|
||||||
@@ -487,7 +488,7 @@ endif
|
|||||||
ifeq ($(cc-name),clang)
|
ifeq ($(cc-name),clang)
|
||||||
ifneq ($(CROSS_COMPILE),)
|
ifneq ($(CROSS_COMPILE),)
|
||||||
CLANG_TRIPLE ?= $(CROSS_COMPILE)
|
CLANG_TRIPLE ?= $(CROSS_COMPILE)
|
||||||
CLANG_FLAGS := --target=$(notdir $(CLANG_TRIPLE:%-=%))
|
CLANG_FLAGS += --target=$(notdir $(CLANG_TRIPLE:%-=%))
|
||||||
ifeq ($(shell $(srctree)/scripts/clang-android.sh $(CC) $(CLANG_FLAGS)), y)
|
ifeq ($(shell $(srctree)/scripts/clang-android.sh $(CC) $(CLANG_FLAGS)), y)
|
||||||
$(error "Clang with Android --target detected. Did you specify CLANG_TRIPLE?")
|
$(error "Clang with Android --target detected. Did you specify CLANG_TRIPLE?")
|
||||||
endif
|
endif
|
||||||
@@ -499,6 +500,7 @@ ifneq ($(GCC_TOOLCHAIN),)
|
|||||||
CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN)
|
CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN)
|
||||||
endif
|
endif
|
||||||
CLANG_FLAGS += -no-integrated-as
|
CLANG_FLAGS += -no-integrated-as
|
||||||
|
CLANG_FLAGS += -Werror=unknown-warning-option
|
||||||
KBUILD_CFLAGS += $(CLANG_FLAGS)
|
KBUILD_CFLAGS += $(CLANG_FLAGS)
|
||||||
KBUILD_AFLAGS += $(CLANG_FLAGS)
|
KBUILD_AFLAGS += $(CLANG_FLAGS)
|
||||||
export CLANG_FLAGS
|
export CLANG_FLAGS
|
||||||
|
|||||||
@@ -199,7 +199,6 @@ config NR_CPUS
|
|||||||
|
|
||||||
config ARC_SMP_HALT_ON_RESET
|
config ARC_SMP_HALT_ON_RESET
|
||||||
bool "Enable Halt-on-reset boot mode"
|
bool "Enable Halt-on-reset boot mode"
|
||||||
default y if ARC_UBOOT_SUPPORT
|
|
||||||
help
|
help
|
||||||
In SMP configuration cores can be configured as Halt-on-reset
|
In SMP configuration cores can be configured as Halt-on-reset
|
||||||
or they could all start at same time. For Halt-on-reset, non
|
or they could all start at same time. For Halt-on-reset, non
|
||||||
@@ -539,18 +538,6 @@ config ARC_DBG_TLB_PARANOIA
|
|||||||
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
config ARC_UBOOT_SUPPORT
|
|
||||||
bool "Support uboot arg Handling"
|
|
||||||
default n
|
|
||||||
help
|
|
||||||
ARC Linux by default checks for uboot provided args as pointers to
|
|
||||||
external cmdline or DTB. This however breaks in absence of uboot,
|
|
||||||
when booting from Metaware debugger directly, as the registers are
|
|
||||||
not zeroed out on reset by mdb and/or ARCv2 based cores. The bogus
|
|
||||||
registers look like uboot args to kernel which then chokes.
|
|
||||||
So only enable the uboot arg checking/processing if users are sure
|
|
||||||
of uboot being in play.
|
|
||||||
|
|
||||||
config ARC_BUILTIN_DTB_NAME
|
config ARC_BUILTIN_DTB_NAME
|
||||||
string "Built in DTB"
|
string "Built in DTB"
|
||||||
help
|
help
|
||||||
|
|||||||
@@ -31,7 +31,6 @@ CONFIG_ARC_CACHE_LINE_SHIFT=5
|
|||||||
# CONFIG_ARC_HAS_LLSC is not set
|
# CONFIG_ARC_HAS_LLSC is not set
|
||||||
CONFIG_ARC_KVADDR_SIZE=402
|
CONFIG_ARC_KVADDR_SIZE=402
|
||||||
CONFIG_ARC_EMUL_UNALIGNED=y
|
CONFIG_ARC_EMUL_UNALIGNED=y
|
||||||
CONFIG_ARC_UBOOT_SUPPORT=y
|
|
||||||
CONFIG_PREEMPT=y
|
CONFIG_PREEMPT=y
|
||||||
CONFIG_NET=y
|
CONFIG_NET=y
|
||||||
CONFIG_UNIX=y
|
CONFIG_UNIX=y
|
||||||
|
|||||||
@@ -13,7 +13,6 @@ CONFIG_PARTITION_ADVANCED=y
|
|||||||
CONFIG_ARC_PLAT_AXS10X=y
|
CONFIG_ARC_PLAT_AXS10X=y
|
||||||
CONFIG_AXS103=y
|
CONFIG_AXS103=y
|
||||||
CONFIG_ISA_ARCV2=y
|
CONFIG_ISA_ARCV2=y
|
||||||
CONFIG_ARC_UBOOT_SUPPORT=y
|
|
||||||
CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38"
|
CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38"
|
||||||
CONFIG_PREEMPT=y
|
CONFIG_PREEMPT=y
|
||||||
CONFIG_NET=y
|
CONFIG_NET=y
|
||||||
|
|||||||
@@ -15,8 +15,6 @@ CONFIG_AXS103=y
|
|||||||
CONFIG_ISA_ARCV2=y
|
CONFIG_ISA_ARCV2=y
|
||||||
CONFIG_SMP=y
|
CONFIG_SMP=y
|
||||||
# CONFIG_ARC_TIMERS_64BIT is not set
|
# CONFIG_ARC_TIMERS_64BIT is not set
|
||||||
# CONFIG_ARC_SMP_HALT_ON_RESET is not set
|
|
||||||
CONFIG_ARC_UBOOT_SUPPORT=y
|
|
||||||
CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp"
|
CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp"
|
||||||
CONFIG_PREEMPT=y
|
CONFIG_PREEMPT=y
|
||||||
CONFIG_NET=y
|
CONFIG_NET=y
|
||||||
|
|||||||
@@ -100,7 +100,6 @@ ENTRY(stext)
|
|||||||
st.ab 0, [r5, 4]
|
st.ab 0, [r5, 4]
|
||||||
1:
|
1:
|
||||||
|
|
||||||
#ifdef CONFIG_ARC_UBOOT_SUPPORT
|
|
||||||
; Uboot - kernel ABI
|
; Uboot - kernel ABI
|
||||||
; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
|
; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
|
||||||
; r1 = magic number (always zero as of now)
|
; r1 = magic number (always zero as of now)
|
||||||
@@ -109,7 +108,6 @@ ENTRY(stext)
|
|||||||
st r0, [@uboot_tag]
|
st r0, [@uboot_tag]
|
||||||
st r1, [@uboot_magic]
|
st r1, [@uboot_magic]
|
||||||
st r2, [@uboot_arg]
|
st r2, [@uboot_arg]
|
||||||
#endif
|
|
||||||
|
|
||||||
; setup "current" tsk and optionally cache it in dedicated r25
|
; setup "current" tsk and optionally cache it in dedicated r25
|
||||||
mov r9, @init_task
|
mov r9, @init_task
|
||||||
|
|||||||
@@ -493,7 +493,6 @@ void __init handle_uboot_args(void)
|
|||||||
bool use_embedded_dtb = true;
|
bool use_embedded_dtb = true;
|
||||||
bool append_cmdline = false;
|
bool append_cmdline = false;
|
||||||
|
|
||||||
#ifdef CONFIG_ARC_UBOOT_SUPPORT
|
|
||||||
/* check that we know this tag */
|
/* check that we know this tag */
|
||||||
if (uboot_tag != UBOOT_TAG_NONE &&
|
if (uboot_tag != UBOOT_TAG_NONE &&
|
||||||
uboot_tag != UBOOT_TAG_CMDLINE &&
|
uboot_tag != UBOOT_TAG_CMDLINE &&
|
||||||
@@ -525,7 +524,6 @@ void __init handle_uboot_args(void)
|
|||||||
append_cmdline = true;
|
append_cmdline = true;
|
||||||
|
|
||||||
ignore_uboot_args:
|
ignore_uboot_args:
|
||||||
#endif
|
|
||||||
|
|
||||||
if (use_embedded_dtb) {
|
if (use_embedded_dtb) {
|
||||||
machine_desc = setup_machine_fdt(__dtb_start);
|
machine_desc = setup_machine_fdt(__dtb_start);
|
||||||
|
|||||||
@@ -124,10 +124,6 @@
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
&emmc {
|
|
||||||
/delete-property/mmc-hs200-1_8v;
|
|
||||||
};
|
|
||||||
|
|
||||||
&i2c2 {
|
&i2c2 {
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -90,10 +90,6 @@
|
|||||||
pwm-off-delay-ms = <200>;
|
pwm-off-delay-ms = <200>;
|
||||||
};
|
};
|
||||||
|
|
||||||
&emmc {
|
|
||||||
/delete-property/mmc-hs200-1_8v;
|
|
||||||
};
|
|
||||||
|
|
||||||
&gpio_keys {
|
&gpio_keys {
|
||||||
pinctrl-0 = <&pwr_key_l &ap_lid_int_l &volum_down_l &volum_up_l>;
|
pinctrl-0 = <&pwr_key_l &ap_lid_int_l &volum_down_l &volum_up_l>;
|
||||||
|
|
||||||
|
|||||||
@@ -227,6 +227,7 @@
|
|||||||
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
|
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
|
||||||
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
|
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
|
||||||
clock-frequency = <24000000>;
|
clock-frequency = <24000000>;
|
||||||
|
arm,no-tick-in-suspend;
|
||||||
};
|
};
|
||||||
|
|
||||||
timer: timer@ff810000 {
|
timer: timer@ff810000 {
|
||||||
|
|||||||
@@ -131,7 +131,7 @@ static irqreturn_t iomd_dma_handle(int irq, void *dev_id)
|
|||||||
} while (1);
|
} while (1);
|
||||||
|
|
||||||
idma->state = ~DMA_ST_AB;
|
idma->state = ~DMA_ST_AB;
|
||||||
disable_irq(irq);
|
disable_irq_nosync(irq);
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
@@ -174,6 +174,9 @@ static void iomd_enable_dma(unsigned int chan, dma_t *dma)
|
|||||||
DMA_FROM_DEVICE : DMA_TO_DEVICE);
|
DMA_FROM_DEVICE : DMA_TO_DEVICE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
idma->dma_addr = idma->dma.sg->dma_address;
|
||||||
|
idma->dma_len = idma->dma.sg->length;
|
||||||
|
|
||||||
iomd_writeb(DMA_CR_C, dma_base + CR);
|
iomd_writeb(DMA_CR_C, dma_base + CR);
|
||||||
idma->state = DMA_ST_AB;
|
idma->state = DMA_ST_AB;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1643,11 +1643,11 @@
|
|||||||
reg = <0x0 0xff914000 0x0 0x100>, <0x0 0xff915000 0x0 0x100>;
|
reg = <0x0 0xff914000 0x0 0x100>, <0x0 0xff915000 0x0 0x100>;
|
||||||
interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH 0>;
|
interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH 0>;
|
||||||
interrupt-names = "isp0_mmu";
|
interrupt-names = "isp0_mmu";
|
||||||
clocks = <&cru ACLK_ISP0_NOC>, <&cru HCLK_ISP0_NOC>;
|
clocks = <&cru ACLK_ISP0_WRAPPER>, <&cru HCLK_ISP0_WRAPPER>;
|
||||||
clock-names = "aclk", "iface";
|
clock-names = "aclk", "iface";
|
||||||
#iommu-cells = <0>;
|
#iommu-cells = <0>;
|
||||||
|
power-domains = <&power RK3399_PD_ISP0>;
|
||||||
rockchip,disable-mmu-reset;
|
rockchip,disable-mmu-reset;
|
||||||
status = "disabled";
|
|
||||||
};
|
};
|
||||||
|
|
||||||
isp1_mmu: iommu@ff924000 {
|
isp1_mmu: iommu@ff924000 {
|
||||||
@@ -1655,11 +1655,11 @@
|
|||||||
reg = <0x0 0xff924000 0x0 0x100>, <0x0 0xff925000 0x0 0x100>;
|
reg = <0x0 0xff924000 0x0 0x100>, <0x0 0xff925000 0x0 0x100>;
|
||||||
interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH 0>;
|
interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH 0>;
|
||||||
interrupt-names = "isp1_mmu";
|
interrupt-names = "isp1_mmu";
|
||||||
clocks = <&cru ACLK_ISP1_NOC>, <&cru HCLK_ISP1_NOC>;
|
clocks = <&cru ACLK_ISP1_WRAPPER>, <&cru HCLK_ISP1_WRAPPER>;
|
||||||
clock-names = "aclk", "iface";
|
clock-names = "aclk", "iface";
|
||||||
#iommu-cells = <0>;
|
#iommu-cells = <0>;
|
||||||
|
power-domains = <&power RK3399_PD_ISP1>;
|
||||||
rockchip,disable-mmu-reset;
|
rockchip,disable-mmu-reset;
|
||||||
status = "disabled";
|
|
||||||
};
|
};
|
||||||
|
|
||||||
hdmi_sound: hdmi-sound {
|
hdmi_sound: hdmi-sound {
|
||||||
|
|||||||
@@ -124,7 +124,11 @@
|
|||||||
* RAS Error Synchronization barrier
|
* RAS Error Synchronization barrier
|
||||||
*/
|
*/
|
||||||
.macro esb
|
.macro esb
|
||||||
|
#ifdef CONFIG_ARM64_RAS_EXTN
|
||||||
hint #16
|
hint #16
|
||||||
|
#else
|
||||||
|
nop
|
||||||
|
#endif
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -159,6 +159,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
|
#define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
|
||||||
|
#define COMPAT_MINSIGSTKSZ 2048
|
||||||
|
|
||||||
static inline void __user *arch_compat_alloc_user_space(long len)
|
static inline void __user *arch_compat_alloc_user_space(long len)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -48,6 +48,7 @@ enum ftr_type {
|
|||||||
FTR_EXACT, /* Use a predefined safe value */
|
FTR_EXACT, /* Use a predefined safe value */
|
||||||
FTR_LOWER_SAFE, /* Smaller value is safe */
|
FTR_LOWER_SAFE, /* Smaller value is safe */
|
||||||
FTR_HIGHER_SAFE, /* Bigger value is safe */
|
FTR_HIGHER_SAFE, /* Bigger value is safe */
|
||||||
|
FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */
|
||||||
};
|
};
|
||||||
|
|
||||||
#define FTR_STRICT true /* SANITY check strict matching required */
|
#define FTR_STRICT true /* SANITY check strict matching required */
|
||||||
|
|||||||
@@ -211,8 +211,8 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
|
|||||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
|
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
|
||||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
|
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
|
||||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
|
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
|
||||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_CWG_SHIFT, 4, 0),
|
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_CWG_SHIFT, 4, 0),
|
||||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_ERG_SHIFT, 4, 0),
|
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_ERG_SHIFT, 4, 0),
|
||||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
|
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
|
||||||
/*
|
/*
|
||||||
* Linux can handle differing I-cache policies. Userspace JITs will
|
* Linux can handle differing I-cache policies. Userspace JITs will
|
||||||
@@ -454,6 +454,10 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
|
|||||||
case FTR_LOWER_SAFE:
|
case FTR_LOWER_SAFE:
|
||||||
ret = new < cur ? new : cur;
|
ret = new < cur ? new : cur;
|
||||||
break;
|
break;
|
||||||
|
case FTR_HIGHER_OR_ZERO_SAFE:
|
||||||
|
if (!cur || !new)
|
||||||
|
break;
|
||||||
|
/* Fallthrough */
|
||||||
case FTR_HIGHER_SAFE:
|
case FTR_HIGHER_SAFE:
|
||||||
ret = new > cur ? new : cur;
|
ret = new > cur ? new : cur;
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -547,13 +547,14 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
|
|||||||
/* Aligned */
|
/* Aligned */
|
||||||
break;
|
break;
|
||||||
case 1:
|
case 1:
|
||||||
/* Allow single byte watchpoint. */
|
|
||||||
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
|
|
||||||
break;
|
|
||||||
case 2:
|
case 2:
|
||||||
/* Allow halfword watchpoints and breakpoints. */
|
/* Allow halfword watchpoints and breakpoints. */
|
||||||
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
|
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
|
||||||
break;
|
break;
|
||||||
|
case 3:
|
||||||
|
/* Allow single byte watchpoint. */
|
||||||
|
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -156,8 +156,9 @@ static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
|
|||||||
if (edge)
|
if (edge)
|
||||||
irq_set_handler(d->hwirq, handle_edge_irq);
|
irq_set_handler(d->hwirq, handle_edge_irq);
|
||||||
|
|
||||||
ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
|
ltq_eiu_w32((ltq_eiu_r32(LTQ_EIU_EXIN_C) &
|
||||||
(val << (i * 4)), LTQ_EIU_EXIN_C);
|
(~(7 << (i * 4)))) | (val << (i * 4)),
|
||||||
|
LTQ_EIU_EXIN_C);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -42,8 +42,8 @@ SECTIONS
|
|||||||
#endif
|
#endif
|
||||||
_startcode_end = .;
|
_startcode_end = .;
|
||||||
|
|
||||||
/* bootloader code and data starts behind area of extracted kernel */
|
/* bootloader code and data starts at least behind area of extracted kernel */
|
||||||
. = (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START);
|
. = MAX(ABSOLUTE(.), (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START));
|
||||||
|
|
||||||
/* align on next page boundary */
|
/* align on next page boundary */
|
||||||
. = ALIGN(4096);
|
. = ALIGN(4096);
|
||||||
|
|||||||
@@ -20,10 +20,30 @@ static inline uint32_t swab32p(void *p)
|
|||||||
|
|
||||||
#ifdef __LITTLE_ENDIAN__
|
#ifdef __LITTLE_ENDIAN__
|
||||||
#define get_le32(p) (*((uint32_t *) (p)))
|
#define get_le32(p) (*((uint32_t *) (p)))
|
||||||
|
#define cpu_to_be32(x) swab32(x)
|
||||||
|
static inline u32 be32_to_cpup(const u32 *p)
|
||||||
|
{
|
||||||
|
return swab32p((u32 *)p);
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
#define get_le32(p) swab32p(p)
|
#define get_le32(p) swab32p(p)
|
||||||
|
#define cpu_to_be32(x) (x)
|
||||||
|
static inline u32 be32_to_cpup(const u32 *p)
|
||||||
|
{
|
||||||
|
return *p;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static inline uint32_t get_unaligned_be32(const void *p)
|
||||||
|
{
|
||||||
|
return be32_to_cpup(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void put_unaligned_be32(u32 val, void *p)
|
||||||
|
{
|
||||||
|
*((u32 *)p) = cpu_to_be32(val);
|
||||||
|
}
|
||||||
|
|
||||||
#define memeq(a, b, size) (memcmp(a, b, size) == 0)
|
#define memeq(a, b, size) (memcmp(a, b, size) == 0)
|
||||||
#define memzero(buf, size) memset(buf, 0, size)
|
#define memzero(buf, size) memset(buf, 0, size)
|
||||||
|
|
||||||
|
|||||||
@@ -32,9 +32,12 @@
|
|||||||
* not expect this type of fault. flush_cache_vmap is not exactly the right
|
* not expect this type of fault. flush_cache_vmap is not exactly the right
|
||||||
* place to put this, but it seems to work well enough.
|
* place to put this, but it seems to work well enough.
|
||||||
*/
|
*/
|
||||||
#define flush_cache_vmap(start, end) do { asm volatile("ptesync" ::: "memory"); } while (0)
|
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
|
||||||
|
{
|
||||||
|
asm volatile("ptesync" ::: "memory");
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
#define flush_cache_vmap(start, end) do { } while (0)
|
static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||||
|
|||||||
@@ -360,10 +360,19 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
|
|||||||
ptep = find_init_mm_pte(token, &hugepage_shift);
|
ptep = find_init_mm_pte(token, &hugepage_shift);
|
||||||
if (!ptep)
|
if (!ptep)
|
||||||
return token;
|
return token;
|
||||||
WARN_ON(hugepage_shift);
|
|
||||||
pa = pte_pfn(*ptep) << PAGE_SHIFT;
|
|
||||||
|
|
||||||
return pa | (token & (PAGE_SIZE-1));
|
pa = pte_pfn(*ptep);
|
||||||
|
|
||||||
|
/* On radix we can do hugepage mappings for io, so handle that */
|
||||||
|
if (hugepage_shift) {
|
||||||
|
pa <<= hugepage_shift;
|
||||||
|
pa |= token & ((1ul << hugepage_shift) - 1);
|
||||||
|
} else {
|
||||||
|
pa <<= PAGE_SHIFT;
|
||||||
|
pa |= token & (PAGE_SIZE - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
return pa;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -45,6 +45,8 @@ unsigned int pci_parse_of_flags(u32 addr0, int bridge)
|
|||||||
if (addr0 & 0x02000000) {
|
if (addr0 & 0x02000000) {
|
||||||
flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
|
flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
|
||||||
flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
|
flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
|
||||||
|
if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
|
||||||
|
flags |= IORESOURCE_MEM_64;
|
||||||
flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
|
flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
|
||||||
if (addr0 & 0x40000000)
|
if (addr0 & 0x40000000)
|
||||||
flags |= IORESOURCE_PREFETCH
|
flags |= IORESOURCE_PREFETCH
|
||||||
|
|||||||
@@ -1202,6 +1202,9 @@ SYSCALL_DEFINE0(rt_sigreturn)
|
|||||||
goto bad;
|
goto bad;
|
||||||
|
|
||||||
if (MSR_TM_ACTIVE(msr_hi<<32)) {
|
if (MSR_TM_ACTIVE(msr_hi<<32)) {
|
||||||
|
/* Trying to start TM on non TM system */
|
||||||
|
if (!cpu_has_feature(CPU_FTR_TM))
|
||||||
|
goto bad;
|
||||||
/* We only recheckpoint on return if we're
|
/* We only recheckpoint on return if we're
|
||||||
* transaction.
|
* transaction.
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -750,6 +750,11 @@ SYSCALL_DEFINE0(rt_sigreturn)
|
|||||||
if (MSR_TM_ACTIVE(msr)) {
|
if (MSR_TM_ACTIVE(msr)) {
|
||||||
/* We recheckpoint on return. */
|
/* We recheckpoint on return. */
|
||||||
struct ucontext __user *uc_transact;
|
struct ucontext __user *uc_transact;
|
||||||
|
|
||||||
|
/* Trying to start TM on non TM system */
|
||||||
|
if (!cpu_has_feature(CPU_FTR_TM))
|
||||||
|
goto badframe;
|
||||||
|
|
||||||
if (__get_user(uc_transact, &uc->uc_link))
|
if (__get_user(uc_transact, &uc->uc_link))
|
||||||
goto badframe;
|
goto badframe;
|
||||||
if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
|
if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
|
||||||
|
|||||||
@@ -150,6 +150,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
|
|||||||
} else {
|
} else {
|
||||||
pdshift = PUD_SHIFT;
|
pdshift = PUD_SHIFT;
|
||||||
pu = pud_alloc(mm, pg, addr);
|
pu = pud_alloc(mm, pg, addr);
|
||||||
|
if (!pu)
|
||||||
|
return NULL;
|
||||||
if (pshift == PUD_SHIFT)
|
if (pshift == PUD_SHIFT)
|
||||||
return (pte_t *)pu;
|
return (pte_t *)pu;
|
||||||
else if (pshift > PMD_SHIFT) {
|
else if (pshift > PMD_SHIFT) {
|
||||||
@@ -158,6 +160,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
|
|||||||
} else {
|
} else {
|
||||||
pdshift = PMD_SHIFT;
|
pdshift = PMD_SHIFT;
|
||||||
pm = pmd_alloc(mm, pu, addr);
|
pm = pmd_alloc(mm, pu, addr);
|
||||||
|
if (!pm)
|
||||||
|
return NULL;
|
||||||
if (pshift == PMD_SHIFT)
|
if (pshift == PMD_SHIFT)
|
||||||
/* 16MB hugepage */
|
/* 16MB hugepage */
|
||||||
return (pte_t *)pm;
|
return (pte_t *)pm;
|
||||||
@@ -174,12 +178,16 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
|
|||||||
} else {
|
} else {
|
||||||
pdshift = PUD_SHIFT;
|
pdshift = PUD_SHIFT;
|
||||||
pu = pud_alloc(mm, pg, addr);
|
pu = pud_alloc(mm, pg, addr);
|
||||||
|
if (!pu)
|
||||||
|
return NULL;
|
||||||
if (pshift >= PUD_SHIFT) {
|
if (pshift >= PUD_SHIFT) {
|
||||||
ptl = pud_lockptr(mm, pu);
|
ptl = pud_lockptr(mm, pu);
|
||||||
hpdp = (hugepd_t *)pu;
|
hpdp = (hugepd_t *)pu;
|
||||||
} else {
|
} else {
|
||||||
pdshift = PMD_SHIFT;
|
pdshift = PMD_SHIFT;
|
||||||
pm = pmd_alloc(mm, pu, addr);
|
pm = pmd_alloc(mm, pu, addr);
|
||||||
|
if (!pm)
|
||||||
|
return NULL;
|
||||||
ptl = pmd_lockptr(mm, pm);
|
ptl = pmd_lockptr(mm, pm);
|
||||||
hpdp = (hugepd_t *)pm;
|
hpdp = (hugepd_t *)pm;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -158,6 +158,7 @@ static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type)
|
|||||||
|
|
||||||
mtdcr(uic->dcrbase + UIC_PR, pr);
|
mtdcr(uic->dcrbase + UIC_PR, pr);
|
||||||
mtdcr(uic->dcrbase + UIC_TR, tr);
|
mtdcr(uic->dcrbase + UIC_TR, tr);
|
||||||
|
mtdcr(uic->dcrbase + UIC_SR, ~mask);
|
||||||
|
|
||||||
raw_spin_unlock_irqrestore(&uic->lock, flags);
|
raw_spin_unlock_irqrestore(&uic->lock, flags);
|
||||||
|
|
||||||
|
|||||||
@@ -9,6 +9,7 @@
|
|||||||
* 2 as published by the Free Software Foundation.
|
* 2 as published by the Free Software Foundation.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/cpu.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/kobject.h>
|
#include <linux/kobject.h>
|
||||||
#include <linux/smp.h>
|
#include <linux/smp.h>
|
||||||
@@ -344,11 +345,19 @@ void post_mobility_fixup(void)
|
|||||||
if (rc)
|
if (rc)
|
||||||
printk(KERN_ERR "Post-mobility activate-fw failed: %d\n", rc);
|
printk(KERN_ERR "Post-mobility activate-fw failed: %d\n", rc);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We don't want CPUs to go online/offline while the device
|
||||||
|
* tree is being updated.
|
||||||
|
*/
|
||||||
|
cpus_read_lock();
|
||||||
|
|
||||||
rc = pseries_devicetree_update(MIGRATION_SCOPE);
|
rc = pseries_devicetree_update(MIGRATION_SCOPE);
|
||||||
if (rc)
|
if (rc)
|
||||||
printk(KERN_ERR "Post-mobility device tree update "
|
printk(KERN_ERR "Post-mobility device tree update "
|
||||||
"failed: %d\n", rc);
|
"failed: %d\n", rc);
|
||||||
|
|
||||||
|
cpus_read_unlock();
|
||||||
|
|
||||||
/* Possibly switch to a new RFI flush type */
|
/* Possibly switch to a new RFI flush type */
|
||||||
pseries_setup_rfi_flush();
|
pseries_setup_rfi_flush();
|
||||||
|
|
||||||
|
|||||||
@@ -483,7 +483,7 @@ static int xive_find_target_in_mask(const struct cpumask *mask,
|
|||||||
* Now go through the entire mask until we find a valid
|
* Now go through the entire mask until we find a valid
|
||||||
* target.
|
* target.
|
||||||
*/
|
*/
|
||||||
for (;;) {
|
do {
|
||||||
/*
|
/*
|
||||||
* We re-check online as the fallback case passes us
|
* We re-check online as the fallback case passes us
|
||||||
* an untested affinity mask
|
* an untested affinity mask
|
||||||
@@ -491,12 +491,11 @@ static int xive_find_target_in_mask(const struct cpumask *mask,
|
|||||||
if (cpu_online(cpu) && xive_try_pick_target(cpu))
|
if (cpu_online(cpu) && xive_try_pick_target(cpu))
|
||||||
return cpu;
|
return cpu;
|
||||||
cpu = cpumask_next(cpu, mask);
|
cpu = cpumask_next(cpu, mask);
|
||||||
if (cpu == first)
|
|
||||||
break;
|
|
||||||
/* Wrap around */
|
/* Wrap around */
|
||||||
if (cpu >= nr_cpu_ids)
|
if (cpu >= nr_cpu_ids)
|
||||||
cpu = cpumask_first(mask);
|
cpu = cpumask_first(mask);
|
||||||
}
|
} while (cpu != first);
|
||||||
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -466,8 +466,10 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
|
|||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
hard_irq_disable();
|
hard_irq_disable();
|
||||||
|
|
||||||
|
if (!fromipi) {
|
||||||
tracing_enabled = tracing_is_on();
|
tracing_enabled = tracing_is_on();
|
||||||
tracing_off();
|
tracing_off();
|
||||||
|
}
|
||||||
|
|
||||||
bp = in_breakpoint_table(regs->nip, &offset);
|
bp = in_breakpoint_table(regs->nip, &offset);
|
||||||
if (bp != NULL) {
|
if (bp != NULL) {
|
||||||
|
|||||||
@@ -8,27 +8,19 @@ config SH_ALPHA_BOARD
|
|||||||
bool
|
bool
|
||||||
|
|
||||||
config SH_DEVICE_TREE
|
config SH_DEVICE_TREE
|
||||||
bool "Board Described by Device Tree"
|
bool
|
||||||
select OF
|
select OF
|
||||||
select OF_EARLY_FLATTREE
|
select OF_EARLY_FLATTREE
|
||||||
select TIMER_OF
|
select TIMER_OF
|
||||||
select COMMON_CLK
|
select COMMON_CLK
|
||||||
select GENERIC_CALIBRATE_DELAY
|
select GENERIC_CALIBRATE_DELAY
|
||||||
help
|
|
||||||
Select Board Described by Device Tree to build a kernel that
|
|
||||||
does not hard-code any board-specific knowledge but instead uses
|
|
||||||
a device tree blob provided by the boot-loader. You must enable
|
|
||||||
drivers for any hardware you want to use separately. At this
|
|
||||||
time, only boards based on the open-hardware J-Core processors
|
|
||||||
have sufficient driver coverage to use this option; do not
|
|
||||||
select it if you are using original SuperH hardware.
|
|
||||||
|
|
||||||
config SH_JCORE_SOC
|
config SH_JCORE_SOC
|
||||||
bool "J-Core SoC"
|
bool "J-Core SoC"
|
||||||
depends on SH_DEVICE_TREE && (CPU_SH2 || CPU_J2)
|
select SH_DEVICE_TREE
|
||||||
select CLKSRC_JCORE_PIT
|
select CLKSRC_JCORE_PIT
|
||||||
select JCORE_AIC
|
select JCORE_AIC
|
||||||
default y if CPU_J2
|
depends on CPU_J2
|
||||||
help
|
help
|
||||||
Select this option to include drivers core components of the
|
Select this option to include drivers core components of the
|
||||||
J-Core SoC, including interrupt controllers and timers.
|
J-Core SoC, including interrupt controllers and timers.
|
||||||
|
|||||||
@@ -371,7 +371,11 @@ static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
|
|||||||
|
|
||||||
#define ioremap_nocache ioremap
|
#define ioremap_nocache ioremap
|
||||||
#define ioremap_uc ioremap
|
#define ioremap_uc ioremap
|
||||||
#define iounmap __iounmap
|
|
||||||
|
static inline void iounmap(void __iomem *addr)
|
||||||
|
{
|
||||||
|
__iounmap(addr);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
|
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
|
||||||
|
|||||||
@@ -53,7 +53,7 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
|
|||||||
* when the new ->mm is used for the first time.
|
* when the new ->mm is used for the first time.
|
||||||
*/
|
*/
|
||||||
__switch_mm(&new->context.id);
|
__switch_mm(&new->context.id);
|
||||||
down_write(&new->mmap_sem);
|
down_write_nested(&new->mmap_sem, 1);
|
||||||
uml_setup_stubs(new);
|
uml_setup_stubs(new);
|
||||||
up_write(&new->mmap_sem);
|
up_write(&new->mmap_sem);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,6 +17,7 @@
|
|||||||
#include "pgtable.h"
|
#include "pgtable.h"
|
||||||
#include "../string.h"
|
#include "../string.h"
|
||||||
#include "../voffset.h"
|
#include "../voffset.h"
|
||||||
|
#include <asm/bootparam_utils.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* WARNING!!
|
* WARNING!!
|
||||||
|
|||||||
@@ -22,7 +22,6 @@
|
|||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
#include <asm/boot.h>
|
#include <asm/boot.h>
|
||||||
#include <asm/bootparam.h>
|
#include <asm/bootparam.h>
|
||||||
#include <asm/bootparam_utils.h>
|
|
||||||
|
|
||||||
#define BOOT_BOOT_H
|
#define BOOT_BOOT_H
|
||||||
#include "../ctype.h"
|
#include "../ctype.h"
|
||||||
|
|||||||
@@ -329,6 +329,23 @@ For 32-bit we have the following conventions - kernel is built with
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Mitigate Spectre v1 for conditional swapgs code paths.
|
||||||
|
*
|
||||||
|
* FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to
|
||||||
|
* prevent a speculative swapgs when coming from kernel space.
|
||||||
|
*
|
||||||
|
* FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path,
|
||||||
|
* to prevent the swapgs from getting speculatively skipped when coming from
|
||||||
|
* user space.
|
||||||
|
*/
|
||||||
|
.macro FENCE_SWAPGS_USER_ENTRY
|
||||||
|
ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_USER
|
||||||
|
.endm
|
||||||
|
.macro FENCE_SWAPGS_KERNEL_ENTRY
|
||||||
|
ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL
|
||||||
|
.endm
|
||||||
|
|
||||||
#endif /* CONFIG_X86_64 */
|
#endif /* CONFIG_X86_64 */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -582,7 +582,7 @@ ENTRY(interrupt_entry)
|
|||||||
testb $3, CS-ORIG_RAX+8(%rsp)
|
testb $3, CS-ORIG_RAX+8(%rsp)
|
||||||
jz 1f
|
jz 1f
|
||||||
SWAPGS
|
SWAPGS
|
||||||
|
FENCE_SWAPGS_USER_ENTRY
|
||||||
/*
|
/*
|
||||||
* Switch to the thread stack. The IRET frame and orig_ax are
|
* Switch to the thread stack. The IRET frame and orig_ax are
|
||||||
* on the stack, as well as the return address. RDI..R12 are
|
* on the stack, as well as the return address. RDI..R12 are
|
||||||
@@ -612,8 +612,10 @@ ENTRY(interrupt_entry)
|
|||||||
UNWIND_HINT_FUNC
|
UNWIND_HINT_FUNC
|
||||||
|
|
||||||
movq (%rdi), %rdi
|
movq (%rdi), %rdi
|
||||||
|
jmp 2f
|
||||||
1:
|
1:
|
||||||
|
FENCE_SWAPGS_KERNEL_ENTRY
|
||||||
|
2:
|
||||||
PUSH_AND_CLEAR_REGS save_ret=1
|
PUSH_AND_CLEAR_REGS save_ret=1
|
||||||
ENCODE_FRAME_POINTER 8
|
ENCODE_FRAME_POINTER 8
|
||||||
|
|
||||||
@@ -1196,7 +1198,6 @@ idtentry stack_segment do_stack_segment has_error_code=1
|
|||||||
#ifdef CONFIG_XEN
|
#ifdef CONFIG_XEN
|
||||||
idtentry xennmi do_nmi has_error_code=0
|
idtentry xennmi do_nmi has_error_code=0
|
||||||
idtentry xendebug do_debug has_error_code=0
|
idtentry xendebug do_debug has_error_code=0
|
||||||
idtentry xenint3 do_int3 has_error_code=0
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
idtentry general_protection do_general_protection has_error_code=1
|
idtentry general_protection do_general_protection has_error_code=1
|
||||||
@@ -1241,6 +1242,13 @@ ENTRY(paranoid_entry)
|
|||||||
*/
|
*/
|
||||||
SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
|
SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an
|
||||||
|
* unconditional CR3 write, even in the PTI case. So do an lfence
|
||||||
|
* to prevent GS speculation, regardless of whether PTI is enabled.
|
||||||
|
*/
|
||||||
|
FENCE_SWAPGS_KERNEL_ENTRY
|
||||||
|
|
||||||
ret
|
ret
|
||||||
END(paranoid_entry)
|
END(paranoid_entry)
|
||||||
|
|
||||||
@@ -1291,6 +1299,7 @@ ENTRY(error_entry)
|
|||||||
* from user mode due to an IRET fault.
|
* from user mode due to an IRET fault.
|
||||||
*/
|
*/
|
||||||
SWAPGS
|
SWAPGS
|
||||||
|
FENCE_SWAPGS_USER_ENTRY
|
||||||
/* We have user CR3. Change to kernel CR3. */
|
/* We have user CR3. Change to kernel CR3. */
|
||||||
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
|
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
|
||||||
|
|
||||||
@@ -1312,6 +1321,8 @@ ENTRY(error_entry)
|
|||||||
CALL_enter_from_user_mode
|
CALL_enter_from_user_mode
|
||||||
ret
|
ret
|
||||||
|
|
||||||
|
.Lerror_entry_done_lfence:
|
||||||
|
FENCE_SWAPGS_KERNEL_ENTRY
|
||||||
.Lerror_entry_done:
|
.Lerror_entry_done:
|
||||||
TRACE_IRQS_OFF
|
TRACE_IRQS_OFF
|
||||||
ret
|
ret
|
||||||
@@ -1330,7 +1341,7 @@ ENTRY(error_entry)
|
|||||||
cmpq %rax, RIP+8(%rsp)
|
cmpq %rax, RIP+8(%rsp)
|
||||||
je .Lbstep_iret
|
je .Lbstep_iret
|
||||||
cmpq $.Lgs_change, RIP+8(%rsp)
|
cmpq $.Lgs_change, RIP+8(%rsp)
|
||||||
jne .Lerror_entry_done
|
jne .Lerror_entry_done_lfence
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* hack: .Lgs_change can fail with user gsbase. If this happens, fix up
|
* hack: .Lgs_change can fail with user gsbase. If this happens, fix up
|
||||||
@@ -1338,6 +1349,7 @@ ENTRY(error_entry)
|
|||||||
* .Lgs_change's error handler with kernel gsbase.
|
* .Lgs_change's error handler with kernel gsbase.
|
||||||
*/
|
*/
|
||||||
SWAPGS
|
SWAPGS
|
||||||
|
FENCE_SWAPGS_USER_ENTRY
|
||||||
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
|
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
|
||||||
jmp .Lerror_entry_done
|
jmp .Lerror_entry_done
|
||||||
|
|
||||||
@@ -1352,6 +1364,7 @@ ENTRY(error_entry)
|
|||||||
* gsbase and CR3. Switch to kernel gsbase and CR3:
|
* gsbase and CR3. Switch to kernel gsbase and CR3:
|
||||||
*/
|
*/
|
||||||
SWAPGS
|
SWAPGS
|
||||||
|
FENCE_SWAPGS_USER_ENTRY
|
||||||
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
|
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -1443,6 +1456,7 @@ ENTRY(nmi)
|
|||||||
|
|
||||||
swapgs
|
swapgs
|
||||||
cld
|
cld
|
||||||
|
FENCE_SWAPGS_USER_ENTRY
|
||||||
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
|
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
|
||||||
movq %rsp, %rdx
|
movq %rsp, %rdx
|
||||||
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
|
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
|
||||||
|
|||||||
@@ -29,12 +29,12 @@ extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz);
|
|||||||
extern time_t __vdso_time(time_t *t);
|
extern time_t __vdso_time(time_t *t);
|
||||||
|
|
||||||
#ifdef CONFIG_PARAVIRT_CLOCK
|
#ifdef CONFIG_PARAVIRT_CLOCK
|
||||||
extern u8 pvclock_page
|
extern u8 pvclock_page[PAGE_SIZE]
|
||||||
__attribute__((visibility("hidden")));
|
__attribute__((visibility("hidden")));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_HYPERV_TSCPAGE
|
#ifdef CONFIG_HYPERV_TSCPAGE
|
||||||
extern u8 hvclock_page
|
extern u8 hvclock_page[PAGE_SIZE]
|
||||||
__attribute__((visibility("hidden")));
|
__attribute__((visibility("hidden")));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -191,13 +191,24 @@ notrace static inline u64 vgetsns(int *mode)
|
|||||||
|
|
||||||
if (gtod->vclock_mode == VCLOCK_TSC)
|
if (gtod->vclock_mode == VCLOCK_TSC)
|
||||||
cycles = vread_tsc();
|
cycles = vread_tsc();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For any memory-mapped vclock type, we need to make sure that gcc
|
||||||
|
* doesn't cleverly hoist a load before the mode check. Otherwise we
|
||||||
|
* might end up touching the memory-mapped page even if the vclock in
|
||||||
|
* question isn't enabled, which will segfault. Hence the barriers.
|
||||||
|
*/
|
||||||
#ifdef CONFIG_PARAVIRT_CLOCK
|
#ifdef CONFIG_PARAVIRT_CLOCK
|
||||||
else if (gtod->vclock_mode == VCLOCK_PVCLOCK)
|
else if (gtod->vclock_mode == VCLOCK_PVCLOCK) {
|
||||||
|
barrier();
|
||||||
cycles = vread_pvclock(mode);
|
cycles = vread_pvclock(mode);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_HYPERV_TSCPAGE
|
#ifdef CONFIG_HYPERV_TSCPAGE
|
||||||
else if (gtod->vclock_mode == VCLOCK_HVCLOCK)
|
else if (gtod->vclock_mode == VCLOCK_HVCLOCK) {
|
||||||
|
barrier();
|
||||||
cycles = vread_hvclock(mode);
|
cycles = vread_hvclock(mode);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
else
|
else
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ static inline void generic_apic_probe(void)
|
|||||||
|
|
||||||
#ifdef CONFIG_X86_LOCAL_APIC
|
#ifdef CONFIG_X86_LOCAL_APIC
|
||||||
|
|
||||||
extern unsigned int apic_verbosity;
|
extern int apic_verbosity;
|
||||||
extern int local_apic_timer_c2_ok;
|
extern int local_apic_timer_c2_ok;
|
||||||
|
|
||||||
extern int disable_apic;
|
extern int disable_apic;
|
||||||
|
|||||||
@@ -22,8 +22,8 @@ enum cpuid_leafs
|
|||||||
CPUID_LNX_3,
|
CPUID_LNX_3,
|
||||||
CPUID_7_0_EBX,
|
CPUID_7_0_EBX,
|
||||||
CPUID_D_1_EAX,
|
CPUID_D_1_EAX,
|
||||||
CPUID_F_0_EDX,
|
CPUID_LNX_4,
|
||||||
CPUID_F_1_EDX,
|
CPUID_DUMMY,
|
||||||
CPUID_8000_0008_EBX,
|
CPUID_8000_0008_EBX,
|
||||||
CPUID_6_EAX,
|
CPUID_6_EAX,
|
||||||
CPUID_8000_000A_EDX,
|
CPUID_8000_000A_EDX,
|
||||||
|
|||||||
@@ -271,13 +271,18 @@
|
|||||||
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */
|
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */
|
||||||
#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */
|
#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */
|
||||||
|
|
||||||
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 11 */
|
/*
|
||||||
#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
|
* Extended auxiliary flags: Linux defined - for features scattered in various
|
||||||
|
* CPUID levels like 0xf, etc.
|
||||||
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */
|
*
|
||||||
#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring */
|
* Reuse free bits when adding new feature flags!
|
||||||
#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */
|
*/
|
||||||
#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */
|
#define X86_FEATURE_CQM_LLC (11*32+ 0) /* LLC QoS if 1 */
|
||||||
|
#define X86_FEATURE_CQM_OCCUP_LLC (11*32+ 1) /* LLC occupancy monitoring */
|
||||||
|
#define X86_FEATURE_CQM_MBM_TOTAL (11*32+ 2) /* LLC Total MBM monitoring */
|
||||||
|
#define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */
|
||||||
|
#define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */
|
||||||
|
#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
|
||||||
|
|
||||||
/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
|
/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
|
||||||
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
|
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
|
||||||
@@ -383,5 +388,6 @@
|
|||||||
#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
|
#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
|
||||||
#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
|
#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
|
||||||
#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
|
#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
|
||||||
|
#define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
|
||||||
|
|
||||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||||
|
|||||||
@@ -1427,25 +1427,29 @@ enum {
|
|||||||
#define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
|
#define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
|
||||||
#define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
|
#define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
|
||||||
|
|
||||||
|
asmlinkage void __noreturn kvm_spurious_fault(void);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Hardware virtualization extension instructions may fault if a
|
* Hardware virtualization extension instructions may fault if a
|
||||||
* reboot turns off virtualization while processes are running.
|
* reboot turns off virtualization while processes are running.
|
||||||
* Trap the fault and ignore the instruction if that happens.
|
* Usually after catching the fault we just panic; during reboot
|
||||||
|
* instead the instruction is ignored.
|
||||||
*/
|
*/
|
||||||
asmlinkage void kvm_spurious_fault(void);
|
|
||||||
|
|
||||||
#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \
|
#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \
|
||||||
"666: " insn "\n\t" \
|
"666: \n\t" \
|
||||||
"668: \n\t" \
|
insn "\n\t" \
|
||||||
".pushsection .fixup, \"ax\" \n" \
|
"jmp 668f \n\t" \
|
||||||
"667: \n\t" \
|
"667: \n\t" \
|
||||||
|
"call kvm_spurious_fault \n\t" \
|
||||||
|
"668: \n\t" \
|
||||||
|
".pushsection .fixup, \"ax\" \n\t" \
|
||||||
|
"700: \n\t" \
|
||||||
cleanup_insn "\n\t" \
|
cleanup_insn "\n\t" \
|
||||||
"cmpb $0, kvm_rebooting\n\t" \
|
"cmpb $0, kvm_rebooting\n\t" \
|
||||||
"jne 668b \n\t" \
|
"je 667b \n\t" \
|
||||||
__ASM_SIZE(push) " $666b \n\t" \
|
"jmp 668b \n\t" \
|
||||||
"jmp kvm_spurious_fault \n\t" \
|
|
||||||
".popsection \n\t" \
|
".popsection \n\t" \
|
||||||
_ASM_EXTABLE(666b, 667b)
|
_ASM_EXTABLE(666b, 700b)
|
||||||
|
|
||||||
#define __kvm_handle_fault_on_reboot(insn) \
|
#define __kvm_handle_fault_on_reboot(insn) \
|
||||||
____kvm_handle_fault_on_reboot(insn, "")
|
____kvm_handle_fault_on_reboot(insn, "")
|
||||||
|
|||||||
@@ -768,6 +768,7 @@ static __always_inline bool pv_vcpu_is_preempted(long cpu)
|
|||||||
PV_RESTORE_ALL_CALLER_REGS \
|
PV_RESTORE_ALL_CALLER_REGS \
|
||||||
FRAME_END \
|
FRAME_END \
|
||||||
"ret;" \
|
"ret;" \
|
||||||
|
".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \
|
||||||
".popsection")
|
".popsection")
|
||||||
|
|
||||||
/* Get a reference to a callee-save function */
|
/* Get a reference to a callee-save function */
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ asmlinkage void simd_coprocessor_error(void);
|
|||||||
asmlinkage void xen_divide_error(void);
|
asmlinkage void xen_divide_error(void);
|
||||||
asmlinkage void xen_xennmi(void);
|
asmlinkage void xen_xennmi(void);
|
||||||
asmlinkage void xen_xendebug(void);
|
asmlinkage void xen_xendebug(void);
|
||||||
asmlinkage void xen_xenint3(void);
|
asmlinkage void xen_int3(void);
|
||||||
asmlinkage void xen_overflow(void);
|
asmlinkage void xen_overflow(void);
|
||||||
asmlinkage void xen_bounds(void);
|
asmlinkage void xen_bounds(void);
|
||||||
asmlinkage void xen_invalid_op(void);
|
asmlinkage void xen_invalid_op(void);
|
||||||
|
|||||||
@@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
|
|||||||
/*
|
/*
|
||||||
* Debug level, exported for io_apic.c
|
* Debug level, exported for io_apic.c
|
||||||
*/
|
*/
|
||||||
unsigned int apic_verbosity;
|
int apic_verbosity;
|
||||||
|
|
||||||
int pic_mode;
|
int pic_mode;
|
||||||
|
|
||||||
|
|||||||
@@ -32,6 +32,7 @@
|
|||||||
#include <asm/e820/api.h>
|
#include <asm/e820/api.h>
|
||||||
#include <asm/hypervisor.h>
|
#include <asm/hypervisor.h>
|
||||||
|
|
||||||
|
static void __init spectre_v1_select_mitigation(void);
|
||||||
static void __init spectre_v2_select_mitigation(void);
|
static void __init spectre_v2_select_mitigation(void);
|
||||||
static void __init ssb_select_mitigation(void);
|
static void __init ssb_select_mitigation(void);
|
||||||
static void __init l1tf_select_mitigation(void);
|
static void __init l1tf_select_mitigation(void);
|
||||||
@@ -96,17 +97,11 @@ void __init check_bugs(void)
|
|||||||
if (boot_cpu_has(X86_FEATURE_STIBP))
|
if (boot_cpu_has(X86_FEATURE_STIBP))
|
||||||
x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
|
x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
|
||||||
|
|
||||||
/* Select the proper spectre mitigation before patching alternatives */
|
/* Select the proper CPU mitigations before patching alternatives: */
|
||||||
|
spectre_v1_select_mitigation();
|
||||||
spectre_v2_select_mitigation();
|
spectre_v2_select_mitigation();
|
||||||
|
|
||||||
/*
|
|
||||||
* Select proper mitigation for any exposure to the Speculative Store
|
|
||||||
* Bypass vulnerability.
|
|
||||||
*/
|
|
||||||
ssb_select_mitigation();
|
ssb_select_mitigation();
|
||||||
|
|
||||||
l1tf_select_mitigation();
|
l1tf_select_mitigation();
|
||||||
|
|
||||||
mds_select_mitigation();
|
mds_select_mitigation();
|
||||||
|
|
||||||
arch_smt_update();
|
arch_smt_update();
|
||||||
@@ -271,6 +266,98 @@ static int __init mds_cmdline(char *str)
|
|||||||
}
|
}
|
||||||
early_param("mds", mds_cmdline);
|
early_param("mds", mds_cmdline);
|
||||||
|
|
||||||
|
#undef pr_fmt
|
||||||
|
#define pr_fmt(fmt) "Spectre V1 : " fmt
|
||||||
|
|
||||||
|
enum spectre_v1_mitigation {
|
||||||
|
SPECTRE_V1_MITIGATION_NONE,
|
||||||
|
SPECTRE_V1_MITIGATION_AUTO,
|
||||||
|
};
|
||||||
|
|
||||||
|
static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
|
||||||
|
SPECTRE_V1_MITIGATION_AUTO;
|
||||||
|
|
||||||
|
static const char * const spectre_v1_strings[] = {
|
||||||
|
[SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
|
||||||
|
[SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Does SMAP provide full mitigation against speculative kernel access to
|
||||||
|
* userspace?
|
||||||
|
*/
|
||||||
|
static bool smap_works_speculatively(void)
|
||||||
|
{
|
||||||
|
if (!boot_cpu_has(X86_FEATURE_SMAP))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On CPUs which are vulnerable to Meltdown, SMAP does not
|
||||||
|
* prevent speculative access to user data in the L1 cache.
|
||||||
|
* Consider SMAP to be non-functional as a mitigation on these
|
||||||
|
* CPUs.
|
||||||
|
*/
|
||||||
|
if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init spectre_v1_select_mitigation(void)
|
||||||
|
{
|
||||||
|
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
|
||||||
|
spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
|
||||||
|
/*
|
||||||
|
* With Spectre v1, a user can speculatively control either
|
||||||
|
* path of a conditional swapgs with a user-controlled GS
|
||||||
|
* value. The mitigation is to add lfences to both code paths.
|
||||||
|
*
|
||||||
|
* If FSGSBASE is enabled, the user can put a kernel address in
|
||||||
|
* GS, in which case SMAP provides no protection.
|
||||||
|
*
|
||||||
|
* [ NOTE: Don't check for X86_FEATURE_FSGSBASE until the
|
||||||
|
* FSGSBASE enablement patches have been merged. ]
|
||||||
|
*
|
||||||
|
* If FSGSBASE is disabled, the user can only put a user space
|
||||||
|
* address in GS. That makes an attack harder, but still
|
||||||
|
* possible if there's no SMAP protection.
|
||||||
|
*/
|
||||||
|
if (!smap_works_speculatively()) {
|
||||||
|
/*
|
||||||
|
* Mitigation can be provided from SWAPGS itself or
|
||||||
|
* PTI as the CR3 write in the Meltdown mitigation
|
||||||
|
* is serializing.
|
||||||
|
*
|
||||||
|
* If neither is there, mitigate with an LFENCE to
|
||||||
|
* stop speculation through swapgs.
|
||||||
|
*/
|
||||||
|
if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
|
||||||
|
!boot_cpu_has(X86_FEATURE_PTI))
|
||||||
|
setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Enable lfences in the kernel entry (non-swapgs)
|
||||||
|
* paths, to prevent user entry from speculatively
|
||||||
|
* skipping swapgs.
|
||||||
|
*/
|
||||||
|
setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __init nospectre_v1_cmdline(char *str)
|
||||||
|
{
|
||||||
|
spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
early_param("nospectre_v1", nospectre_v1_cmdline);
|
||||||
|
|
||||||
#undef pr_fmt
|
#undef pr_fmt
|
||||||
#define pr_fmt(fmt) "Spectre V2 : " fmt
|
#define pr_fmt(fmt) "Spectre V2 : " fmt
|
||||||
|
|
||||||
@@ -1196,7 +1283,7 @@ static ssize_t l1tf_show_state(char *buf)
|
|||||||
|
|
||||||
static ssize_t mds_show_state(char *buf)
|
static ssize_t mds_show_state(char *buf)
|
||||||
{
|
{
|
||||||
if (!hypervisor_is_type(X86_HYPER_NATIVE)) {
|
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
|
||||||
return sprintf(buf, "%s; SMT Host state unknown\n",
|
return sprintf(buf, "%s; SMT Host state unknown\n",
|
||||||
mds_strings[mds_mitigation]);
|
mds_strings[mds_mitigation]);
|
||||||
}
|
}
|
||||||
@@ -1258,7 +1345,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case X86_BUG_SPECTRE_V1:
|
case X86_BUG_SPECTRE_V1:
|
||||||
return sprintf(buf, "Mitigation: __user pointer sanitization\n");
|
return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
|
||||||
|
|
||||||
case X86_BUG_SPECTRE_V2:
|
case X86_BUG_SPECTRE_V2:
|
||||||
return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
|
return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
|
||||||
|
|||||||
@@ -808,6 +808,30 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void init_cqm(struct cpuinfo_x86 *c)
|
||||||
|
{
|
||||||
|
if (!cpu_has(c, X86_FEATURE_CQM_LLC)) {
|
||||||
|
c->x86_cache_max_rmid = -1;
|
||||||
|
c->x86_cache_occ_scale = -1;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* will be overridden if occupancy monitoring exists */
|
||||||
|
c->x86_cache_max_rmid = cpuid_ebx(0xf);
|
||||||
|
|
||||||
|
if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) ||
|
||||||
|
cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) ||
|
||||||
|
cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) {
|
||||||
|
u32 eax, ebx, ecx, edx;
|
||||||
|
|
||||||
|
/* QoS sub-leaf, EAX=0Fh, ECX=1 */
|
||||||
|
cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx);
|
||||||
|
|
||||||
|
c->x86_cache_max_rmid = ecx;
|
||||||
|
c->x86_cache_occ_scale = ebx;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void get_cpu_cap(struct cpuinfo_x86 *c)
|
void get_cpu_cap(struct cpuinfo_x86 *c)
|
||||||
{
|
{
|
||||||
u32 eax, ebx, ecx, edx;
|
u32 eax, ebx, ecx, edx;
|
||||||
@@ -839,33 +863,6 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
|
|||||||
c->x86_capability[CPUID_D_1_EAX] = eax;
|
c->x86_capability[CPUID_D_1_EAX] = eax;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Additional Intel-defined flags: level 0x0000000F */
|
|
||||||
if (c->cpuid_level >= 0x0000000F) {
|
|
||||||
|
|
||||||
/* QoS sub-leaf, EAX=0Fh, ECX=0 */
|
|
||||||
cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
|
|
||||||
c->x86_capability[CPUID_F_0_EDX] = edx;
|
|
||||||
|
|
||||||
if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
|
|
||||||
/* will be overridden if occupancy monitoring exists */
|
|
||||||
c->x86_cache_max_rmid = ebx;
|
|
||||||
|
|
||||||
/* QoS sub-leaf, EAX=0Fh, ECX=1 */
|
|
||||||
cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
|
|
||||||
c->x86_capability[CPUID_F_1_EDX] = edx;
|
|
||||||
|
|
||||||
if ((cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) ||
|
|
||||||
((cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL)) ||
|
|
||||||
(cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)))) {
|
|
||||||
c->x86_cache_max_rmid = ecx;
|
|
||||||
c->x86_cache_occ_scale = ebx;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
c->x86_cache_max_rmid = -1;
|
|
||||||
c->x86_cache_occ_scale = -1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* AMD-defined flags: level 0x80000001 */
|
/* AMD-defined flags: level 0x80000001 */
|
||||||
eax = cpuid_eax(0x80000000);
|
eax = cpuid_eax(0x80000000);
|
||||||
c->extended_cpuid_level = eax;
|
c->extended_cpuid_level = eax;
|
||||||
@@ -896,6 +893,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
|
|||||||
|
|
||||||
init_scattered_cpuid_features(c);
|
init_scattered_cpuid_features(c);
|
||||||
init_speculation_control(c);
|
init_speculation_control(c);
|
||||||
|
init_cqm(c);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clear/Set all flags overridden by options, after probe.
|
* Clear/Set all flags overridden by options, after probe.
|
||||||
@@ -954,6 +952,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
|
|||||||
#define NO_L1TF BIT(3)
|
#define NO_L1TF BIT(3)
|
||||||
#define NO_MDS BIT(4)
|
#define NO_MDS BIT(4)
|
||||||
#define MSBDS_ONLY BIT(5)
|
#define MSBDS_ONLY BIT(5)
|
||||||
|
#define NO_SWAPGS BIT(6)
|
||||||
|
|
||||||
#define VULNWL(_vendor, _family, _model, _whitelist) \
|
#define VULNWL(_vendor, _family, _model, _whitelist) \
|
||||||
{ X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
|
{ X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
|
||||||
@@ -977,29 +976,37 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
|||||||
VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION),
|
VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION),
|
||||||
VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION),
|
VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION),
|
||||||
|
|
||||||
VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY),
|
VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
|
||||||
VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY),
|
VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
|
||||||
VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY),
|
VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
|
||||||
VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY),
|
VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
|
||||||
VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY),
|
VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
|
||||||
VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY),
|
VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
|
||||||
|
|
||||||
VULNWL_INTEL(CORE_YONAH, NO_SSB),
|
VULNWL_INTEL(CORE_YONAH, NO_SSB),
|
||||||
|
|
||||||
VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY),
|
VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
|
||||||
|
|
||||||
VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF),
|
VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS),
|
||||||
VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF),
|
VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF | NO_SWAPGS),
|
||||||
VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF),
|
VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS),
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Technically, swapgs isn't serializing on AMD (despite it previously
|
||||||
|
* being documented as such in the APM). But according to AMD, %gs is
|
||||||
|
* updated non-speculatively, and the issuing of %gs-relative memory
|
||||||
|
* operands will be blocked until the %gs update completes, which is
|
||||||
|
* good enough for our purposes.
|
||||||
|
*/
|
||||||
|
|
||||||
/* AMD Family 0xf - 0x12 */
|
/* AMD Family 0xf - 0x12 */
|
||||||
VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
|
VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
|
||||||
VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
|
VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
|
||||||
VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
|
VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
|
||||||
VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
|
VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
|
||||||
|
|
||||||
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
|
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
|
||||||
VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS),
|
VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1036,6 +1043,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
|||||||
setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
|
setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!cpu_matches(NO_SWAPGS))
|
||||||
|
setup_force_cpu_bug(X86_BUG_SWAPGS);
|
||||||
|
|
||||||
if (cpu_matches(NO_MELTDOWN))
|
if (cpu_matches(NO_MELTDOWN))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
|||||||
@@ -59,6 +59,9 @@ static const struct cpuid_dep cpuid_deps[] = {
|
|||||||
{ X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F },
|
{ X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F },
|
||||||
{ X86_FEATURE_AVX512_4FMAPS, X86_FEATURE_AVX512F },
|
{ X86_FEATURE_AVX512_4FMAPS, X86_FEATURE_AVX512F },
|
||||||
{ X86_FEATURE_AVX512_VPOPCNTDQ, X86_FEATURE_AVX512F },
|
{ X86_FEATURE_AVX512_VPOPCNTDQ, X86_FEATURE_AVX512F },
|
||||||
|
{ X86_FEATURE_CQM_OCCUP_LLC, X86_FEATURE_CQM_LLC },
|
||||||
|
{ X86_FEATURE_CQM_MBM_TOTAL, X86_FEATURE_CQM_LLC },
|
||||||
|
{ X86_FEATURE_CQM_MBM_LOCAL, X86_FEATURE_CQM_LLC },
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -21,6 +21,10 @@ struct cpuid_bit {
|
|||||||
static const struct cpuid_bit cpuid_bits[] = {
|
static const struct cpuid_bit cpuid_bits[] = {
|
||||||
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
|
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
|
||||||
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
|
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
|
||||||
|
{ X86_FEATURE_CQM_LLC, CPUID_EDX, 1, 0x0000000f, 0 },
|
||||||
|
{ X86_FEATURE_CQM_OCCUP_LLC, CPUID_EDX, 0, 0x0000000f, 1 },
|
||||||
|
{ X86_FEATURE_CQM_MBM_TOTAL, CPUID_EDX, 1, 0x0000000f, 1 },
|
||||||
|
{ X86_FEATURE_CQM_MBM_LOCAL, CPUID_EDX, 2, 0x0000000f, 1 },
|
||||||
{ X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 },
|
{ X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 },
|
||||||
{ X86_FEATURE_CAT_L2, CPUID_EBX, 2, 0x00000010, 0 },
|
{ X86_FEATURE_CAT_L2, CPUID_EBX, 2, 0x00000010, 0 },
|
||||||
{ X86_FEATURE_CDP_L3, CPUID_ECX, 2, 0x00000010, 1 },
|
{ X86_FEATURE_CDP_L3, CPUID_ECX, 2, 0x00000010, 1 },
|
||||||
|
|||||||
@@ -830,6 +830,7 @@ asm(
|
|||||||
"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
|
"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
|
||||||
"setne %al;"
|
"setne %al;"
|
||||||
"ret;"
|
"ret;"
|
||||||
|
".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
|
||||||
".popsection");
|
".popsection");
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -231,9 +231,55 @@ static const struct dmi_system_id efifb_dmi_system_table[] __initconst = {
|
|||||||
{},
|
{},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Some devices have a portrait LCD but advertise a landscape resolution (and
|
||||||
|
* pitch). We simply swap width and height for these devices so that we can
|
||||||
|
* correctly deal with some of them coming with multiple resolutions.
|
||||||
|
*/
|
||||||
|
static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Lenovo MIIX310-10ICR, only some batches have the troublesome
|
||||||
|
* 800x1280 portrait screen. Luckily the portrait version has
|
||||||
|
* its own BIOS version, so we match on that.
|
||||||
|
*/
|
||||||
|
.matches = {
|
||||||
|
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||||
|
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10ICR"),
|
||||||
|
DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1HCN44WW"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
/* Lenovo MIIX 320-10ICR with 800x1280 portrait screen */
|
||||||
|
.matches = {
|
||||||
|
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||||
|
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
|
||||||
|
"Lenovo MIIX 320-10ICR"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
/* Lenovo D330 with 800x1280 or 1200x1920 portrait screen */
|
||||||
|
.matches = {
|
||||||
|
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||||
|
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
|
||||||
|
"Lenovo ideapad D330-10IGM"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{},
|
||||||
|
};
|
||||||
|
|
||||||
__init void sysfb_apply_efi_quirks(void)
|
__init void sysfb_apply_efi_quirks(void)
|
||||||
{
|
{
|
||||||
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI ||
|
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI ||
|
||||||
!(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS))
|
!(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS))
|
||||||
dmi_check_system(efifb_dmi_system_table);
|
dmi_check_system(efifb_dmi_system_table);
|
||||||
|
|
||||||
|
if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI &&
|
||||||
|
dmi_check_system(efifb_dmi_swap_width_height)) {
|
||||||
|
u16 temp = screen_info.lfb_width;
|
||||||
|
|
||||||
|
screen_info.lfb_width = screen_info.lfb_height;
|
||||||
|
screen_info.lfb_height = temp;
|
||||||
|
screen_info.lfb_linelength = 4 * screen_info.lfb_width;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -47,8 +47,6 @@ static const struct cpuid_reg reverse_cpuid[] = {
|
|||||||
[CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
|
[CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
|
||||||
[CPUID_7_0_EBX] = { 7, 0, CPUID_EBX},
|
[CPUID_7_0_EBX] = { 7, 0, CPUID_EBX},
|
||||||
[CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX},
|
[CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX},
|
||||||
[CPUID_F_0_EDX] = { 0xf, 0, CPUID_EDX},
|
|
||||||
[CPUID_F_1_EDX] = { 0xf, 1, CPUID_EDX},
|
|
||||||
[CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
|
[CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
|
||||||
[CPUID_6_EAX] = { 6, 0, CPUID_EAX},
|
[CPUID_6_EAX] = { 6, 0, CPUID_EAX},
|
||||||
[CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
|
[CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
|
||||||
|
|||||||
@@ -4532,11 +4532,11 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu,
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/* Faults from writes to non-writable pages */
|
/* Faults from writes to non-writable pages */
|
||||||
u8 wf = (pfec & PFERR_WRITE_MASK) ? ~w : 0;
|
u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
|
||||||
/* Faults from user mode accesses to supervisor pages */
|
/* Faults from user mode accesses to supervisor pages */
|
||||||
u8 uf = (pfec & PFERR_USER_MASK) ? ~u : 0;
|
u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
|
||||||
/* Faults from fetches of non-executable pages*/
|
/* Faults from fetches of non-executable pages*/
|
||||||
u8 ff = (pfec & PFERR_FETCH_MASK) ? ~x : 0;
|
u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
|
||||||
/* Faults from kernel mode fetches of user pages */
|
/* Faults from kernel mode fetches of user pages */
|
||||||
u8 smepf = 0;
|
u8 smepf = 0;
|
||||||
/* Faults from kernel mode accesses of user pages */
|
/* Faults from kernel mode accesses of user pages */
|
||||||
|
|||||||
@@ -177,7 +177,7 @@ static inline void reg_copy(FPU_REG const *x, FPU_REG *y)
|
|||||||
#define setexponentpos(x,y) { (*(short *)&((x)->exp)) = \
|
#define setexponentpos(x,y) { (*(short *)&((x)->exp)) = \
|
||||||
((y) + EXTENDED_Ebias) & 0x7fff; }
|
((y) + EXTENDED_Ebias) & 0x7fff; }
|
||||||
#define exponent16(x) (*(short *)&((x)->exp))
|
#define exponent16(x) (*(short *)&((x)->exp))
|
||||||
#define setexponent16(x,y) { (*(short *)&((x)->exp)) = (y); }
|
#define setexponent16(x,y) { (*(short *)&((x)->exp)) = (u16)(y); }
|
||||||
#define addexponent(x,y) { (*(short *)&((x)->exp)) += (y); }
|
#define addexponent(x,y) { (*(short *)&((x)->exp)) += (y); }
|
||||||
#define stdexp(x) { (*(short *)&((x)->exp)) += EXTENDED_Ebias; }
|
#define stdexp(x) { (*(short *)&((x)->exp)) += EXTENDED_Ebias; }
|
||||||
|
|
||||||
|
|||||||
@@ -18,7 +18,7 @@
|
|||||||
#include "control_w.h"
|
#include "control_w.h"
|
||||||
|
|
||||||
#define MAKE_REG(s, e, l, h) { l, h, \
|
#define MAKE_REG(s, e, l, h) { l, h, \
|
||||||
((EXTENDED_Ebias+(e)) | ((SIGN_##s != 0)*0x8000)) }
|
(u16)((EXTENDED_Ebias+(e)) | ((SIGN_##s != 0)*0x8000)) }
|
||||||
|
|
||||||
FPU_REG const CONST_1 = MAKE_REG(POS, 0, 0x00000000, 0x80000000);
|
FPU_REG const CONST_1 = MAKE_REG(POS, 0, 0x00000000, 0x80000000);
|
||||||
#if 0
|
#if 0
|
||||||
|
|||||||
@@ -597,12 +597,12 @@ struct trap_array_entry {
|
|||||||
|
|
||||||
static struct trap_array_entry trap_array[] = {
|
static struct trap_array_entry trap_array[] = {
|
||||||
{ debug, xen_xendebug, true },
|
{ debug, xen_xendebug, true },
|
||||||
{ int3, xen_xenint3, true },
|
|
||||||
{ double_fault, xen_double_fault, true },
|
{ double_fault, xen_double_fault, true },
|
||||||
#ifdef CONFIG_X86_MCE
|
#ifdef CONFIG_X86_MCE
|
||||||
{ machine_check, xen_machine_check, true },
|
{ machine_check, xen_machine_check, true },
|
||||||
#endif
|
#endif
|
||||||
{ nmi, xen_xennmi, true },
|
{ nmi, xen_xennmi, true },
|
||||||
|
{ int3, xen_int3, false },
|
||||||
{ overflow, xen_overflow, false },
|
{ overflow, xen_overflow, false },
|
||||||
#ifdef CONFIG_IA32_EMULATION
|
#ifdef CONFIG_IA32_EMULATION
|
||||||
{ entry_INT80_compat, xen_entry_INT80_compat, false },
|
{ entry_INT80_compat, xen_entry_INT80_compat, false },
|
||||||
|
|||||||
@@ -30,7 +30,6 @@ xen_pv_trap divide_error
|
|||||||
xen_pv_trap debug
|
xen_pv_trap debug
|
||||||
xen_pv_trap xendebug
|
xen_pv_trap xendebug
|
||||||
xen_pv_trap int3
|
xen_pv_trap int3
|
||||||
xen_pv_trap xenint3
|
|
||||||
xen_pv_trap xennmi
|
xen_pv_trap xennmi
|
||||||
xen_pv_trap overflow
|
xen_pv_trap overflow
|
||||||
xen_pv_trap bounds
|
xen_pv_trap bounds
|
||||||
|
|||||||
@@ -291,8 +291,12 @@ bool bio_integrity_prep(struct bio *bio)
|
|||||||
ret = bio_integrity_add_page(bio, virt_to_page(buf),
|
ret = bio_integrity_add_page(bio, virt_to_page(buf),
|
||||||
bytes, offset);
|
bytes, offset);
|
||||||
|
|
||||||
if (ret == 0)
|
if (ret == 0) {
|
||||||
return false;
|
printk(KERN_ERR "could not attach integrity payload\n");
|
||||||
|
kfree(buf);
|
||||||
|
status = BLK_STS_RESOURCE;
|
||||||
|
goto err_end_io;
|
||||||
|
}
|
||||||
|
|
||||||
if (ret < bytes)
|
if (ret < bytes)
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -198,6 +198,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
|
|||||||
rq->internal_tag = -1;
|
rq->internal_tag = -1;
|
||||||
rq->start_time_ns = ktime_get_ns();
|
rq->start_time_ns = ktime_get_ns();
|
||||||
rq->part = NULL;
|
rq->part = NULL;
|
||||||
|
refcount_set(&rq->ref, 1);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_rq_init);
|
EXPORT_SYMBOL(blk_rq_init);
|
||||||
|
|
||||||
@@ -420,24 +421,25 @@ void blk_sync_queue(struct request_queue *q)
|
|||||||
EXPORT_SYMBOL(blk_sync_queue);
|
EXPORT_SYMBOL(blk_sync_queue);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_set_preempt_only - set QUEUE_FLAG_PREEMPT_ONLY
|
* blk_set_pm_only - increment pm_only counter
|
||||||
* @q: request queue pointer
|
* @q: request queue pointer
|
||||||
*
|
|
||||||
* Returns the previous value of the PREEMPT_ONLY flag - 0 if the flag was not
|
|
||||||
* set and 1 if the flag was already set.
|
|
||||||
*/
|
*/
|
||||||
int blk_set_preempt_only(struct request_queue *q)
|
void blk_set_pm_only(struct request_queue *q)
|
||||||
{
|
{
|
||||||
return blk_queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q);
|
atomic_inc(&q->pm_only);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_set_preempt_only);
|
EXPORT_SYMBOL_GPL(blk_set_pm_only);
|
||||||
|
|
||||||
void blk_clear_preempt_only(struct request_queue *q)
|
void blk_clear_pm_only(struct request_queue *q)
|
||||||
{
|
{
|
||||||
blk_queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
|
int pm_only;
|
||||||
|
|
||||||
|
pm_only = atomic_dec_return(&q->pm_only);
|
||||||
|
WARN_ON_ONCE(pm_only < 0);
|
||||||
|
if (pm_only == 0)
|
||||||
wake_up_all(&q->mq_freeze_wq);
|
wake_up_all(&q->mq_freeze_wq);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_clear_preempt_only);
|
EXPORT_SYMBOL_GPL(blk_clear_pm_only);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __blk_run_queue_uncond - run a queue whether or not it has been stopped
|
* __blk_run_queue_uncond - run a queue whether or not it has been stopped
|
||||||
@@ -915,7 +917,7 @@ EXPORT_SYMBOL(blk_alloc_queue);
|
|||||||
*/
|
*/
|
||||||
int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
|
int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
|
||||||
{
|
{
|
||||||
const bool preempt = flags & BLK_MQ_REQ_PREEMPT;
|
const bool pm = flags & BLK_MQ_REQ_PREEMPT;
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
bool success = false;
|
bool success = false;
|
||||||
@@ -923,11 +925,11 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
|
|||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
if (percpu_ref_tryget_live(&q->q_usage_counter)) {
|
if (percpu_ref_tryget_live(&q->q_usage_counter)) {
|
||||||
/*
|
/*
|
||||||
* The code that sets the PREEMPT_ONLY flag is
|
* The code that increments the pm_only counter is
|
||||||
* responsible for ensuring that that flag is globally
|
* responsible for ensuring that that counter is
|
||||||
* visible before the queue is unfrozen.
|
* globally visible before the queue is unfrozen.
|
||||||
*/
|
*/
|
||||||
if (preempt || !blk_queue_preempt_only(q)) {
|
if (pm || !blk_queue_pm_only(q)) {
|
||||||
success = true;
|
success = true;
|
||||||
} else {
|
} else {
|
||||||
percpu_ref_put(&q->q_usage_counter);
|
percpu_ref_put(&q->q_usage_counter);
|
||||||
@@ -952,7 +954,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
|
|||||||
|
|
||||||
wait_event(q->mq_freeze_wq,
|
wait_event(q->mq_freeze_wq,
|
||||||
(atomic_read(&q->mq_freeze_depth) == 0 &&
|
(atomic_read(&q->mq_freeze_depth) == 0 &&
|
||||||
(preempt || !blk_queue_preempt_only(q))) ||
|
(pm || !blk_queue_pm_only(q))) ||
|
||||||
blk_queue_dying(q));
|
blk_queue_dying(q));
|
||||||
if (blk_queue_dying(q))
|
if (blk_queue_dying(q))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|||||||
@@ -102,6 +102,14 @@ static int blk_flags_show(struct seq_file *m, const unsigned long flags,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int queue_pm_only_show(void *data, struct seq_file *m)
|
||||||
|
{
|
||||||
|
struct request_queue *q = data;
|
||||||
|
|
||||||
|
seq_printf(m, "%d\n", atomic_read(&q->pm_only));
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
|
#define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
|
||||||
static const char *const blk_queue_flag_name[] = {
|
static const char *const blk_queue_flag_name[] = {
|
||||||
QUEUE_FLAG_NAME(QUEUED),
|
QUEUE_FLAG_NAME(QUEUED),
|
||||||
@@ -132,7 +140,6 @@ static const char *const blk_queue_flag_name[] = {
|
|||||||
QUEUE_FLAG_NAME(REGISTERED),
|
QUEUE_FLAG_NAME(REGISTERED),
|
||||||
QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
|
QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
|
||||||
QUEUE_FLAG_NAME(QUIESCED),
|
QUEUE_FLAG_NAME(QUIESCED),
|
||||||
QUEUE_FLAG_NAME(PREEMPT_ONLY),
|
|
||||||
};
|
};
|
||||||
#undef QUEUE_FLAG_NAME
|
#undef QUEUE_FLAG_NAME
|
||||||
|
|
||||||
@@ -209,6 +216,7 @@ static ssize_t queue_write_hint_store(void *data, const char __user *buf,
|
|||||||
static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
|
static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
|
||||||
{ "poll_stat", 0400, queue_poll_stat_show },
|
{ "poll_stat", 0400, queue_poll_stat_show },
|
||||||
{ "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
|
{ "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
|
||||||
|
{ "pm_only", 0600, queue_pm_only_show, NULL },
|
||||||
{ "state", 0600, queue_state_show, queue_state_write },
|
{ "state", 0600, queue_state_show, queue_state_write },
|
||||||
{ "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
|
{ "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
|
||||||
{ "zone_wlock", 0400, queue_zone_wlock_show, NULL },
|
{ "zone_wlock", 0400, queue_zone_wlock_show, NULL },
|
||||||
|
|||||||
@@ -30,7 +30,9 @@
|
|||||||
|
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
|
#ifdef CONFIG_DMI
|
||||||
static const struct dmi_system_id acpi_rev_dmi_table[] __initconst;
|
static const struct dmi_system_id acpi_rev_dmi_table[] __initconst;
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* POLICY: If *anything* doesn't work, put it on the blacklist.
|
* POLICY: If *anything* doesn't work, put it on the blacklist.
|
||||||
@@ -74,7 +76,9 @@ int __init acpi_blacklisted(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
(void)early_acpi_osi_init();
|
(void)early_acpi_osi_init();
|
||||||
|
#ifdef CONFIG_DMI
|
||||||
dmi_check_system(acpi_rev_dmi_table);
|
dmi_check_system(acpi_rev_dmi_table);
|
||||||
|
#endif
|
||||||
|
|
||||||
return blacklisted;
|
return blacklisted;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2138,8 +2138,18 @@ static struct binder_thread *binder_get_txn_from_and_acq_inner(
|
|||||||
|
|
||||||
static void binder_free_transaction(struct binder_transaction *t)
|
static void binder_free_transaction(struct binder_transaction *t)
|
||||||
{
|
{
|
||||||
|
struct binder_proc *target_proc = t->to_proc;
|
||||||
|
|
||||||
|
if (target_proc) {
|
||||||
|
binder_inner_proc_lock(target_proc);
|
||||||
if (t->buffer)
|
if (t->buffer)
|
||||||
t->buffer->transaction = NULL;
|
t->buffer->transaction = NULL;
|
||||||
|
binder_inner_proc_unlock(target_proc);
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* If the transaction has no target_proc, then
|
||||||
|
* t->buffer->transaction has already been cleared.
|
||||||
|
*/
|
||||||
kfree(t);
|
kfree(t);
|
||||||
binder_stats_deleted(BINDER_STAT_TRANSACTION);
|
binder_stats_deleted(BINDER_STAT_TRANSACTION);
|
||||||
}
|
}
|
||||||
@@ -3104,7 +3114,7 @@ static void binder_transaction(struct binder_proc *proc,
|
|||||||
else
|
else
|
||||||
return_error = BR_DEAD_REPLY;
|
return_error = BR_DEAD_REPLY;
|
||||||
mutex_unlock(&context->context_mgr_node_lock);
|
mutex_unlock(&context->context_mgr_node_lock);
|
||||||
if (target_node && target_proc == proc) {
|
if (target_node && target_proc->pid == proc->pid) {
|
||||||
binder_user_error("%d:%d got transaction to context manager from process owning it\n",
|
binder_user_error("%d:%d got transaction to context manager from process owning it\n",
|
||||||
proc->pid, thread->pid);
|
proc->pid, thread->pid);
|
||||||
return_error = BR_FAILED_REPLY;
|
return_error = BR_FAILED_REPLY;
|
||||||
@@ -3848,10 +3858,12 @@ static int binder_thread_write(struct binder_proc *proc,
|
|||||||
buffer->debug_id,
|
buffer->debug_id,
|
||||||
buffer->transaction ? "active" : "finished");
|
buffer->transaction ? "active" : "finished");
|
||||||
|
|
||||||
|
binder_inner_proc_lock(proc);
|
||||||
if (buffer->transaction) {
|
if (buffer->transaction) {
|
||||||
buffer->transaction->buffer = NULL;
|
buffer->transaction->buffer = NULL;
|
||||||
buffer->transaction = NULL;
|
buffer->transaction = NULL;
|
||||||
}
|
}
|
||||||
|
binder_inner_proc_unlock(proc);
|
||||||
if (buffer->async_transaction && buffer->target_node) {
|
if (buffer->async_transaction && buffer->target_node) {
|
||||||
struct binder_node *buf_node;
|
struct binder_node *buf_node;
|
||||||
struct binder_work *w;
|
struct binder_work *w;
|
||||||
|
|||||||
@@ -63,6 +63,7 @@
|
|||||||
#include <asm/byteorder.h>
|
#include <asm/byteorder.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
#include <linux/jiffies.h>
|
#include <linux/jiffies.h>
|
||||||
|
#include <linux/nospec.h>
|
||||||
#include "iphase.h"
|
#include "iphase.h"
|
||||||
#include "suni.h"
|
#include "suni.h"
|
||||||
#define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
|
#define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
|
||||||
@@ -2760,8 +2761,11 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
|
|||||||
}
|
}
|
||||||
if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
|
if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
|
||||||
board = ia_cmds.status;
|
board = ia_cmds.status;
|
||||||
|
|
||||||
if ((board < 0) || (board > iadev_count))
|
if ((board < 0) || (board > iadev_count))
|
||||||
board = 0;
|
board = 0;
|
||||||
|
board = array_index_nospec(board, iadev_count + 1);
|
||||||
|
|
||||||
iadev = ia_dev[board];
|
iadev = ia_dev[board];
|
||||||
switch (ia_cmds.cmd) {
|
switch (ia_cmds.cmd) {
|
||||||
case MEMDUMP:
|
case MEMDUMP:
|
||||||
|
|||||||
@@ -66,6 +66,9 @@ struct driver_private {
|
|||||||
* probed first.
|
* probed first.
|
||||||
* @device - pointer back to the struct device that this structure is
|
* @device - pointer back to the struct device that this structure is
|
||||||
* associated with.
|
* associated with.
|
||||||
|
* @dead - This device is currently either in the process of or has been
|
||||||
|
* removed from the system. Any asynchronous events scheduled for this
|
||||||
|
* device should exit without taking any action.
|
||||||
*
|
*
|
||||||
* Nothing outside of the driver core should ever touch these fields.
|
* Nothing outside of the driver core should ever touch these fields.
|
||||||
*/
|
*/
|
||||||
@@ -76,6 +79,7 @@ struct device_private {
|
|||||||
struct klist_node knode_bus;
|
struct klist_node knode_bus;
|
||||||
struct list_head deferred_probe;
|
struct list_head deferred_probe;
|
||||||
struct device *device;
|
struct device *device;
|
||||||
|
u8 dead:1;
|
||||||
};
|
};
|
||||||
#define to_device_private_parent(obj) \
|
#define to_device_private_parent(obj) \
|
||||||
container_of(obj, struct device_private, knode_parent)
|
container_of(obj, struct device_private, knode_parent)
|
||||||
|
|||||||
@@ -2036,6 +2036,24 @@ void put_device(struct device *dev)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(put_device);
|
EXPORT_SYMBOL_GPL(put_device);
|
||||||
|
|
||||||
|
bool kill_device(struct device *dev)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Require the device lock and set the "dead" flag to guarantee that
|
||||||
|
* the update behavior is consistent with the other bitfields near
|
||||||
|
* it and that we cannot have an asynchronous probe routine trying
|
||||||
|
* to run while we are tearing out the bus/class/sysfs from
|
||||||
|
* underneath the device.
|
||||||
|
*/
|
||||||
|
lockdep_assert_held(&dev->mutex);
|
||||||
|
|
||||||
|
if (dev->p->dead)
|
||||||
|
return false;
|
||||||
|
dev->p->dead = true;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(kill_device);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* device_del - delete device from system.
|
* device_del - delete device from system.
|
||||||
* @dev: device.
|
* @dev: device.
|
||||||
@@ -2055,6 +2073,10 @@ void device_del(struct device *dev)
|
|||||||
struct kobject *glue_dir = NULL;
|
struct kobject *glue_dir = NULL;
|
||||||
struct class_interface *class_intf;
|
struct class_interface *class_intf;
|
||||||
|
|
||||||
|
device_lock(dev);
|
||||||
|
kill_device(dev);
|
||||||
|
device_unlock(dev);
|
||||||
|
|
||||||
/* Notify clients of device removal. This call must come
|
/* Notify clients of device removal. This call must come
|
||||||
* before dpm_sysfs_remove().
|
* before dpm_sysfs_remove().
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -746,15 +746,6 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
|
|||||||
bool async_allowed;
|
bool async_allowed;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/*
|
|
||||||
* Check if device has already been claimed. This may
|
|
||||||
* happen with driver loading, device discovery/registration,
|
|
||||||
* and deferred probe processing happens all at once with
|
|
||||||
* multiple threads.
|
|
||||||
*/
|
|
||||||
if (dev->driver)
|
|
||||||
return -EBUSY;
|
|
||||||
|
|
||||||
ret = driver_match_device(drv, dev);
|
ret = driver_match_device(drv, dev);
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
/* no match */
|
/* no match */
|
||||||
@@ -789,6 +780,15 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
|
|||||||
|
|
||||||
device_lock(dev);
|
device_lock(dev);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check if device has already been removed or claimed. This may
|
||||||
|
* happen with driver loading, device discovery/registration,
|
||||||
|
* and deferred probe processing happens all at once with
|
||||||
|
* multiple threads.
|
||||||
|
*/
|
||||||
|
if (dev->p->dead || dev->driver)
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
if (dev->parent)
|
if (dev->parent)
|
||||||
pm_runtime_get_sync(dev->parent);
|
pm_runtime_get_sync(dev->parent);
|
||||||
|
|
||||||
@@ -799,7 +799,7 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
|
|||||||
|
|
||||||
if (dev->parent)
|
if (dev->parent)
|
||||||
pm_runtime_put(dev->parent);
|
pm_runtime_put(dev->parent);
|
||||||
|
out_unlock:
|
||||||
device_unlock(dev);
|
device_unlock(dev);
|
||||||
|
|
||||||
put_device(dev);
|
put_device(dev);
|
||||||
@@ -912,7 +912,7 @@ static int __driver_attach(struct device *dev, void *data)
|
|||||||
if (dev->parent && dev->bus->need_parent_lock)
|
if (dev->parent && dev->bus->need_parent_lock)
|
||||||
device_lock(dev->parent);
|
device_lock(dev->parent);
|
||||||
device_lock(dev);
|
device_lock(dev);
|
||||||
if (!dev->driver)
|
if (!dev->p->dead && !dev->driver)
|
||||||
driver_probe_device(drv, dev);
|
driver_probe_device(drv, dev);
|
||||||
device_unlock(dev);
|
device_unlock(dev);
|
||||||
if (dev->parent && dev->bus->need_parent_lock)
|
if (dev->parent && dev->bus->need_parent_lock)
|
||||||
|
|||||||
@@ -1218,7 +1218,7 @@ static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
|
|||||||
struct block_device *bdev)
|
struct block_device *bdev)
|
||||||
{
|
{
|
||||||
sock_shutdown(nbd);
|
sock_shutdown(nbd);
|
||||||
kill_bdev(bdev);
|
__invalidate_device(bdev, true);
|
||||||
nbd_bdev_reset(bdev);
|
nbd_bdev_reset(bdev);
|
||||||
if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
|
if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
|
||||||
&nbd->config->runtime_flags))
|
&nbd->config->runtime_flags))
|
||||||
|
|||||||
@@ -112,6 +112,9 @@ static int ath_open(struct hci_uart *hu)
|
|||||||
|
|
||||||
BT_DBG("hu %p", hu);
|
BT_DBG("hu %p", hu);
|
||||||
|
|
||||||
|
if (!hci_uart_has_flow_control(hu))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
ath = kzalloc(sizeof(*ath), GFP_KERNEL);
|
ath = kzalloc(sizeof(*ath), GFP_KERNEL);
|
||||||
if (!ath)
|
if (!ath)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|||||||
@@ -369,6 +369,9 @@ static int bcm_open(struct hci_uart *hu)
|
|||||||
|
|
||||||
bt_dev_dbg(hu->hdev, "hu %p", hu);
|
bt_dev_dbg(hu->hdev, "hu %p", hu);
|
||||||
|
|
||||||
|
if (!hci_uart_has_flow_control(hu))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
bcm = kzalloc(sizeof(*bcm), GFP_KERNEL);
|
bcm = kzalloc(sizeof(*bcm), GFP_KERNEL);
|
||||||
if (!bcm)
|
if (!bcm)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|||||||
@@ -406,6 +406,9 @@ static int intel_open(struct hci_uart *hu)
|
|||||||
|
|
||||||
BT_DBG("hu %p", hu);
|
BT_DBG("hu %p", hu);
|
||||||
|
|
||||||
|
if (!hci_uart_has_flow_control(hu))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
intel = kzalloc(sizeof(*intel), GFP_KERNEL);
|
intel = kzalloc(sizeof(*intel), GFP_KERNEL);
|
||||||
if (!intel)
|
if (!intel)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|||||||
@@ -299,6 +299,19 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Check the underlying device or tty has flow control support */
|
||||||
|
bool hci_uart_has_flow_control(struct hci_uart *hu)
|
||||||
|
{
|
||||||
|
/* serdev nodes check if the needed operations are present */
|
||||||
|
if (hu->serdev)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (hu->tty->driver->ops->tiocmget && hu->tty->driver->ops->tiocmset)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/* Flow control or un-flow control the device */
|
/* Flow control or un-flow control the device */
|
||||||
void hci_uart_set_flow_control(struct hci_uart *hu, bool enable)
|
void hci_uart_set_flow_control(struct hci_uart *hu, bool enable)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -66,6 +66,9 @@ static int mrvl_open(struct hci_uart *hu)
|
|||||||
|
|
||||||
BT_DBG("hu %p", hu);
|
BT_DBG("hu %p", hu);
|
||||||
|
|
||||||
|
if (!hci_uart_has_flow_control(hu))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL);
|
mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL);
|
||||||
if (!mrvl)
|
if (!mrvl)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|||||||
@@ -450,6 +450,9 @@ static int qca_open(struct hci_uart *hu)
|
|||||||
|
|
||||||
BT_DBG("hu %p qca_open", hu);
|
BT_DBG("hu %p qca_open", hu);
|
||||||
|
|
||||||
|
if (!hci_uart_has_flow_control(hu))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL);
|
qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL);
|
||||||
if (!qca)
|
if (!qca)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|||||||
@@ -118,6 +118,7 @@ int hci_uart_tx_wakeup(struct hci_uart *hu);
|
|||||||
int hci_uart_init_ready(struct hci_uart *hu);
|
int hci_uart_init_ready(struct hci_uart *hu);
|
||||||
void hci_uart_init_work(struct work_struct *work);
|
void hci_uart_init_work(struct work_struct *work);
|
||||||
void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed);
|
void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed);
|
||||||
|
bool hci_uart_has_flow_control(struct hci_uart *hu);
|
||||||
void hci_uart_set_flow_control(struct hci_uart *hu, bool enable);
|
void hci_uart_set_flow_control(struct hci_uart *hu, bool enable);
|
||||||
void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed,
|
void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed,
|
||||||
unsigned int oper_speed);
|
unsigned int oper_speed);
|
||||||
|
|||||||
@@ -570,8 +570,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
|
|||||||
unsigned long long m;
|
unsigned long long m;
|
||||||
|
|
||||||
m = hpets->hp_tick_freq + (dis >> 1);
|
m = hpets->hp_tick_freq + (dis >> 1);
|
||||||
do_div(m, dis);
|
return div64_ul(m, dis);
|
||||||
return (unsigned long)m;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
|||||||
@@ -2023,6 +2023,7 @@ static int sc9860_clk_probe(struct platform_device *pdev)
|
|||||||
{
|
{
|
||||||
const struct of_device_id *match;
|
const struct of_device_id *match;
|
||||||
const struct sprd_clk_desc *desc;
|
const struct sprd_clk_desc *desc;
|
||||||
|
int ret;
|
||||||
|
|
||||||
match = of_match_node(sprd_sc9860_clk_ids, pdev->dev.of_node);
|
match = of_match_node(sprd_sc9860_clk_ids, pdev->dev.of_node);
|
||||||
if (!match) {
|
if (!match) {
|
||||||
@@ -2031,7 +2032,9 @@ static int sc9860_clk_probe(struct platform_device *pdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
desc = match->data;
|
desc = match->data;
|
||||||
sprd_clk_regmap_init(pdev, desc);
|
ret = sprd_clk_regmap_init(pdev, desc);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
return sprd_clk_probe(&pdev->dev, desc->hw_clks);
|
return sprd_clk_probe(&pdev->dev, desc->hw_clks);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2214,9 +2214,9 @@ static struct div_nmp pllu_nmp = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static struct tegra_clk_pll_freq_table pll_u_freq_table[] = {
|
static struct tegra_clk_pll_freq_table pll_u_freq_table[] = {
|
||||||
{ 12000000, 480000000, 40, 1, 0, 0 },
|
{ 12000000, 480000000, 40, 1, 1, 0 },
|
||||||
{ 13000000, 480000000, 36, 1, 0, 0 }, /* actual: 468.0 MHz */
|
{ 13000000, 480000000, 36, 1, 1, 0 }, /* actual: 468.0 MHz */
|
||||||
{ 38400000, 480000000, 25, 2, 0, 0 },
|
{ 38400000, 480000000, 25, 2, 1, 0 },
|
||||||
{ 0, 0, 0, 0, 0, 0 },
|
{ 0, 0, 0, 0, 0, 0 },
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -3343,6 +3343,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
|
|||||||
{ TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 },
|
{ TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 },
|
||||||
{ TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 },
|
{ TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 },
|
||||||
{ TEGRA210_CLK_PLL_RE_VCO, TEGRA210_CLK_CLK_MAX, 672000000, 1 },
|
{ TEGRA210_CLK_PLL_RE_VCO, TEGRA210_CLK_CLK_MAX, 672000000, 1 },
|
||||||
|
{ TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 },
|
||||||
{ TEGRA210_CLK_XUSB_GATE, TEGRA210_CLK_CLK_MAX, 0, 1 },
|
{ TEGRA210_CLK_XUSB_GATE, TEGRA210_CLK_CLK_MAX, 0, 1 },
|
||||||
{ TEGRA210_CLK_XUSB_SS_SRC, TEGRA210_CLK_PLL_U_480M, 120000000, 0 },
|
{ TEGRA210_CLK_XUSB_SS_SRC, TEGRA210_CLK_PLL_U_480M, 120000000, 0 },
|
||||||
{ TEGRA210_CLK_XUSB_FS_SRC, TEGRA210_CLK_PLL_U_48M, 48000000, 0 },
|
{ TEGRA210_CLK_XUSB_FS_SRC, TEGRA210_CLK_PLL_U_48M, 48000000, 0 },
|
||||||
@@ -3367,7 +3368,6 @@ static struct tegra_clk_init_table init_table[] __initdata = {
|
|||||||
{ TEGRA210_CLK_PLL_DP, TEGRA210_CLK_CLK_MAX, 270000000, 0 },
|
{ TEGRA210_CLK_PLL_DP, TEGRA210_CLK_CLK_MAX, 270000000, 0 },
|
||||||
{ TEGRA210_CLK_SOC_THERM, TEGRA210_CLK_PLL_P, 51000000, 0 },
|
{ TEGRA210_CLK_SOC_THERM, TEGRA210_CLK_PLL_P, 51000000, 0 },
|
||||||
{ TEGRA210_CLK_CCLK_G, TEGRA210_CLK_CLK_MAX, 0, 1 },
|
{ TEGRA210_CLK_CCLK_G, TEGRA210_CLK_CLK_MAX, 0, 1 },
|
||||||
{ TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 },
|
|
||||||
{ TEGRA210_CLK_PLL_U_OUT2, TEGRA210_CLK_CLK_MAX, 60000000, 1 },
|
{ TEGRA210_CLK_PLL_U_OUT2, TEGRA210_CLK_CLK_MAX, 60000000, 1 },
|
||||||
/* This MUST be the last entry. */
|
/* This MUST be the last entry. */
|
||||||
{ TEGRA210_CLK_CLK_MAX, TEGRA210_CLK_CLK_MAX, 0, 0 },
|
{ TEGRA210_CLK_CLK_MAX, TEGRA210_CLK_CLK_MAX, 0, 0 },
|
||||||
|
|||||||
@@ -1164,7 +1164,7 @@ rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||||||
struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
|
struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
|
||||||
|
|
||||||
/* Someone calling slave DMA on a generic channel? */
|
/* Someone calling slave DMA on a generic channel? */
|
||||||
if (rchan->mid_rid < 0 || !sg_len) {
|
if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) {
|
||||||
dev_warn(chan->device->dev,
|
dev_warn(chan->device->dev,
|
||||||
"%s: bad parameter: len=%d, id=%d\n",
|
"%s: bad parameter: len=%d, id=%d\n",
|
||||||
__func__, sg_len, rchan->mid_rid);
|
__func__, sg_len, rchan->mid_rid);
|
||||||
|
|||||||
@@ -981,8 +981,12 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
|
|||||||
csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
|
csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (flags & DMA_PREP_INTERRUPT)
|
if (flags & DMA_PREP_INTERRUPT) {
|
||||||
csr |= TEGRA_APBDMA_CSR_IE_EOC;
|
csr |= TEGRA_APBDMA_CSR_IE_EOC;
|
||||||
|
} else {
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
|
apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
|
||||||
|
|
||||||
@@ -1124,8 +1128,12 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
|
|||||||
csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
|
csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (flags & DMA_PREP_INTERRUPT)
|
if (flags & DMA_PREP_INTERRUPT) {
|
||||||
csr |= TEGRA_APBDMA_CSR_IE_EOC;
|
csr |= TEGRA_APBDMA_CSR_IE_EOC;
|
||||||
|
} else {
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
|
apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
|
||||||
|
|
||||||
|
|||||||
@@ -366,16 +366,16 @@ static int suspend_test_thread(void *arg)
|
|||||||
for (;;) {
|
for (;;) {
|
||||||
/* Needs to be set first to avoid missing a wakeup. */
|
/* Needs to be set first to avoid missing a wakeup. */
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
if (kthread_should_stop()) {
|
if (kthread_should_park())
|
||||||
__set_current_state(TASK_RUNNING);
|
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
schedule();
|
schedule();
|
||||||
}
|
}
|
||||||
|
|
||||||
pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n",
|
pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n",
|
||||||
cpu, nb_suspend, nb_shallow_sleep, nb_err);
|
cpu, nb_suspend, nb_shallow_sleep, nb_err);
|
||||||
|
|
||||||
|
kthread_parkme();
|
||||||
|
|
||||||
return nb_err;
|
return nb_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -440,8 +440,10 @@ static int suspend_tests(void)
|
|||||||
|
|
||||||
|
|
||||||
/* Stop and destroy all threads, get return status. */
|
/* Stop and destroy all threads, get return status. */
|
||||||
for (i = 0; i < nb_threads; ++i)
|
for (i = 0; i < nb_threads; ++i) {
|
||||||
|
err += kthread_park(threads[i]);
|
||||||
err += kthread_stop(threads[i]);
|
err += kthread_stop(threads[i]);
|
||||||
|
}
|
||||||
out:
|
out:
|
||||||
cpuidle_resume_and_unlock();
|
cpuidle_resume_and_unlock();
|
||||||
kfree(threads);
|
kfree(threads);
|
||||||
|
|||||||
@@ -39,6 +39,7 @@ config ALTERA_PR_IP_CORE_PLAT
|
|||||||
config FPGA_MGR_ALTERA_PS_SPI
|
config FPGA_MGR_ALTERA_PS_SPI
|
||||||
tristate "Altera FPGA Passive Serial over SPI"
|
tristate "Altera FPGA Passive Serial over SPI"
|
||||||
depends on SPI
|
depends on SPI
|
||||||
|
select BITREVERSE
|
||||||
help
|
help
|
||||||
FPGA manager driver support for Altera Arria/Cyclone/Stratix
|
FPGA manager driver support for Altera Arria/Cyclone/Stratix
|
||||||
using the passive serial interface over SPI.
|
using the passive serial interface over SPI.
|
||||||
|
|||||||
@@ -946,9 +946,11 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
|
if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
|
||||||
irqflags |= IRQF_TRIGGER_RISING;
|
irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
|
||||||
|
IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
|
||||||
if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
|
if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
|
||||||
irqflags |= IRQF_TRIGGER_FALLING;
|
irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
|
||||||
|
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
|
||||||
irqflags |= IRQF_ONESHOT;
|
irqflags |= IRQF_ONESHOT;
|
||||||
irqflags |= IRQF_SHARED;
|
irqflags |= IRQF_SHARED;
|
||||||
|
|
||||||
|
|||||||
@@ -1037,6 +1037,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
|
|||||||
tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
|
tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
|
||||||
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
|
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
|
||||||
|
|
||||||
|
WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
|
||||||
|
WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
|
||||||
|
|
||||||
/* After HDP is initialized, flush HDP.*/
|
/* After HDP is initialized, flush HDP.*/
|
||||||
adev->nbio_funcs->hdp_flush(adev, NULL);
|
adev->nbio_funcs->hdp_flush(adev, NULL);
|
||||||
|
|
||||||
|
|||||||
@@ -1268,12 +1268,17 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int unmap_sdma_queues(struct device_queue_manager *dqm,
|
static int unmap_sdma_queues(struct device_queue_manager *dqm)
|
||||||
unsigned int sdma_engine)
|
|
||||||
{
|
{
|
||||||
return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
|
int i, retval = 0;
|
||||||
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false,
|
|
||||||
sdma_engine);
|
for (i = 0; i < dqm->dev->device_info->num_sdma_engines; i++) {
|
||||||
|
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
|
||||||
|
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false, i);
|
||||||
|
if (retval)
|
||||||
|
return retval;
|
||||||
|
}
|
||||||
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* dqm->lock mutex has to be locked before calling this function */
|
/* dqm->lock mutex has to be locked before calling this function */
|
||||||
@@ -1312,10 +1317,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
|
|||||||
pr_debug("Before destroying queues, sdma queue count is : %u\n",
|
pr_debug("Before destroying queues, sdma queue count is : %u\n",
|
||||||
dqm->sdma_queue_count);
|
dqm->sdma_queue_count);
|
||||||
|
|
||||||
if (dqm->sdma_queue_count > 0) {
|
if (dqm->sdma_queue_count > 0)
|
||||||
unmap_sdma_queues(dqm, 0);
|
unmap_sdma_queues(dqm);
|
||||||
unmap_sdma_queues(dqm, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
|
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
|
||||||
filter, filter_param, false, 0);
|
filter, filter_param, false, 0);
|
||||||
|
|||||||
@@ -75,6 +75,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
|
|||||||
struct v9_mqd *m;
|
struct v9_mqd *m;
|
||||||
struct kfd_dev *kfd = mm->dev;
|
struct kfd_dev *kfd = mm->dev;
|
||||||
|
|
||||||
|
*mqd_mem_obj = NULL;
|
||||||
/* From V9, for CWSR, the control stack is located on the next page
|
/* From V9, for CWSR, the control stack is located on the next page
|
||||||
* boundary after the mqd, we will use the gtt allocation function
|
* boundary after the mqd, we will use the gtt allocation function
|
||||||
* instead of sub-allocation function.
|
* instead of sub-allocation function.
|
||||||
@@ -92,8 +93,10 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
|
|||||||
} else
|
} else
|
||||||
retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd),
|
retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd),
|
||||||
mqd_mem_obj);
|
mqd_mem_obj);
|
||||||
if (retval != 0)
|
if (retval) {
|
||||||
|
kfree(*mqd_mem_obj);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
m = (struct v9_mqd *) (*mqd_mem_obj)->cpu_ptr;
|
m = (struct v9_mqd *) (*mqd_mem_obj)->cpu_ptr;
|
||||||
addr = (*mqd_mem_obj)->gpu_addr;
|
addr = (*mqd_mem_obj)->gpu_addr;
|
||||||
|
|||||||
@@ -3644,6 +3644,13 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = dm->ddev->dev_private;
|
struct amdgpu_device *adev = dm->ddev->dev_private;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Some of the properties below require access to state, like bpc.
|
||||||
|
* Allocate some default initial connector state with our reset helper.
|
||||||
|
*/
|
||||||
|
if (aconnector->base.funcs->reset)
|
||||||
|
aconnector->base.funcs->reset(&aconnector->base);
|
||||||
|
|
||||||
aconnector->connector_id = link_index;
|
aconnector->connector_id = link_index;
|
||||||
aconnector->dc_link = link;
|
aconnector->dc_link = link;
|
||||||
aconnector->base.interlace_allowed = false;
|
aconnector->base.interlace_allowed = false;
|
||||||
@@ -3811,9 +3818,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
|
|||||||
&aconnector->base,
|
&aconnector->base,
|
||||||
&amdgpu_dm_connector_helper_funcs);
|
&amdgpu_dm_connector_helper_funcs);
|
||||||
|
|
||||||
if (aconnector->base.funcs->reset)
|
|
||||||
aconnector->base.funcs->reset(&aconnector->base);
|
|
||||||
|
|
||||||
amdgpu_dm_connector_init_helper(
|
amdgpu_dm_connector_init_helper(
|
||||||
dm,
|
dm,
|
||||||
aconnector,
|
aconnector,
|
||||||
|
|||||||
@@ -474,6 +474,8 @@ void dce_abm_destroy(struct abm **abm)
|
|||||||
{
|
{
|
||||||
struct dce_abm *abm_dce = TO_DCE_ABM(*abm);
|
struct dce_abm *abm_dce = TO_DCE_ABM(*abm);
|
||||||
|
|
||||||
|
abm_dce->base.funcs->set_abm_immediate_disable(*abm);
|
||||||
|
|
||||||
kfree(abm_dce);
|
kfree(abm_dce);
|
||||||
*abm = NULL;
|
*abm = NULL;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -242,6 +242,9 @@ static void build_prescale_params(struct ipp_prescale_params *prescale_params,
|
|||||||
prescale_params->mode = IPP_PRESCALE_MODE_FIXED_UNSIGNED;
|
prescale_params->mode = IPP_PRESCALE_MODE_FIXED_UNSIGNED;
|
||||||
|
|
||||||
switch (plane_state->format) {
|
switch (plane_state->format) {
|
||||||
|
case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
|
||||||
|
prescale_params->scale = 0x2082;
|
||||||
|
break;
|
||||||
case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
|
case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
|
||||||
case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
|
case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
|
||||||
prescale_params->scale = 0x2020;
|
prescale_params->scale = 0x2020;
|
||||||
|
|||||||
@@ -23,6 +23,7 @@
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/delay.h>
|
||||||
#include "dm_services.h"
|
#include "dm_services.h"
|
||||||
#include "core_types.h"
|
#include "core_types.h"
|
||||||
#include "resource.h"
|
#include "resource.h"
|
||||||
|
|||||||
@@ -261,10 +261,11 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
|
|||||||
struct regmap *regmap = sii902x->regmap;
|
struct regmap *regmap = sii902x->regmap;
|
||||||
u8 buf[HDMI_INFOFRAME_SIZE(AVI)];
|
u8 buf[HDMI_INFOFRAME_SIZE(AVI)];
|
||||||
struct hdmi_avi_infoframe frame;
|
struct hdmi_avi_infoframe frame;
|
||||||
|
u16 pixel_clock_10kHz = adj->clock / 10;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
buf[0] = adj->clock;
|
buf[0] = pixel_clock_10kHz & 0xff;
|
||||||
buf[1] = adj->clock >> 8;
|
buf[1] = pixel_clock_10kHz >> 8;
|
||||||
buf[2] = adj->vrefresh;
|
buf[2] = adj->vrefresh;
|
||||||
buf[3] = 0x00;
|
buf[3] = 0x00;
|
||||||
buf[4] = adj->hdisplay;
|
buf[4] = adj->hdisplay;
|
||||||
|
|||||||
@@ -1149,6 +1149,13 @@ static int tc_connector_get_modes(struct drm_connector *connector)
|
|||||||
struct tc_data *tc = connector_to_tc(connector);
|
struct tc_data *tc = connector_to_tc(connector);
|
||||||
struct edid *edid;
|
struct edid *edid;
|
||||||
unsigned int count;
|
unsigned int count;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = tc_get_display_props(tc);
|
||||||
|
if (ret < 0) {
|
||||||
|
dev_err(tc->dev, "failed to read display props: %d\n", ret);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (tc->panel && tc->panel->funcs && tc->panel->funcs->get_modes) {
|
if (tc->panel && tc->panel->funcs && tc->panel->funcs->get_modes) {
|
||||||
count = tc->panel->funcs->get_modes(tc->panel);
|
count = tc->panel->funcs->get_modes(tc->panel);
|
||||||
|
|||||||
@@ -379,12 +379,13 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
|
|||||||
struct drm_crtc_crc *crc = &crtc->crc;
|
struct drm_crtc_crc *crc = &crtc->crc;
|
||||||
struct drm_crtc_crc_entry *entry;
|
struct drm_crtc_crc_entry *entry;
|
||||||
int head, tail;
|
int head, tail;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock(&crc->lock);
|
spin_lock_irqsave(&crc->lock, flags);
|
||||||
|
|
||||||
/* Caller may not have noticed yet that userspace has stopped reading */
|
/* Caller may not have noticed yet that userspace has stopped reading */
|
||||||
if (!crc->entries) {
|
if (!crc->entries) {
|
||||||
spin_unlock(&crc->lock);
|
spin_unlock_irqrestore(&crc->lock, flags);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -395,7 +396,7 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
|
|||||||
bool was_overflow = crc->overflow;
|
bool was_overflow = crc->overflow;
|
||||||
|
|
||||||
crc->overflow = true;
|
crc->overflow = true;
|
||||||
spin_unlock(&crc->lock);
|
spin_unlock_irqrestore(&crc->lock, flags);
|
||||||
|
|
||||||
if (!was_overflow)
|
if (!was_overflow)
|
||||||
DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n");
|
DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n");
|
||||||
@@ -411,7 +412,7 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
|
|||||||
head = (head + 1) & (DRM_CRC_ENTRIES_NR - 1);
|
head = (head + 1) & (DRM_CRC_ENTRIES_NR - 1);
|
||||||
crc->head = head;
|
crc->head = head;
|
||||||
|
|
||||||
spin_unlock(&crc->lock);
|
spin_unlock_irqrestore(&crc->lock, flags);
|
||||||
|
|
||||||
wake_up_interruptible(&crc->wq);
|
wake_up_interruptible(&crc->wq);
|
||||||
|
|
||||||
|
|||||||
@@ -290,6 +290,8 @@ struct edid *drm_load_edid_firmware(struct drm_connector *connector)
|
|||||||
* the last one found one as a fallback.
|
* the last one found one as a fallback.
|
||||||
*/
|
*/
|
||||||
fwstr = kstrdup(edid_firmware, GFP_KERNEL);
|
fwstr = kstrdup(edid_firmware, GFP_KERNEL);
|
||||||
|
if (!fwstr)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
edidstr = fwstr;
|
edidstr = fwstr;
|
||||||
|
|
||||||
while ((edidname = strsep(&edidstr, ","))) {
|
while ((edidname = strsep(&edidstr, ","))) {
|
||||||
|
|||||||
@@ -1741,6 +1741,18 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
|
|||||||
|
|
||||||
entry = __gvt_cache_find_gfn(info->vgpu, gfn);
|
entry = __gvt_cache_find_gfn(info->vgpu, gfn);
|
||||||
if (!entry) {
|
if (!entry) {
|
||||||
|
ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
|
||||||
|
if (ret)
|
||||||
|
goto err_unlock;
|
||||||
|
|
||||||
|
ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
|
||||||
|
if (ret)
|
||||||
|
goto err_unmap;
|
||||||
|
} else if (entry->size != size) {
|
||||||
|
/* the same gfn with different size: unmap and re-map */
|
||||||
|
gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size);
|
||||||
|
__gvt_cache_remove_entry(vgpu, entry);
|
||||||
|
|
||||||
ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
|
ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_unlock;
|
goto err_unlock;
|
||||||
|
|||||||
@@ -1336,16 +1336,24 @@ static int msm_pdev_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
ret = add_gpu_components(&pdev->dev, &match);
|
ret = add_gpu_components(&pdev->dev, &match);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
goto fail;
|
||||||
|
|
||||||
/* on all devices that I am aware of, iommu's which can map
|
/* on all devices that I am aware of, iommu's which can map
|
||||||
* any address the cpu can see are used:
|
* any address the cpu can see are used:
|
||||||
*/
|
*/
|
||||||
ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
|
ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
goto fail;
|
||||||
|
|
||||||
return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
|
ret = component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
|
||||||
|
if (ret)
|
||||||
|
goto fail;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
fail:
|
||||||
|
of_platform_depopulate(&pdev->dev);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int msm_pdev_remove(struct platform_device *pdev)
|
static int msm_pdev_remove(struct platform_device *pdev)
|
||||||
|
|||||||
@@ -251,7 +251,7 @@ nouveau_conn_reset(struct drm_connector *connector)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
if (connector->state)
|
if (connector->state)
|
||||||
__drm_atomic_helper_connector_destroy_state(connector->state);
|
nouveau_conn_atomic_destroy_state(connector, connector->state);
|
||||||
__drm_atomic_helper_connector_reset(connector, &asyc->state);
|
__drm_atomic_helper_connector_reset(connector, &asyc->state);
|
||||||
asyc->dither.mode = DITHERING_MODE_AUTO;
|
asyc->dither.mode = DITHERING_MODE_AUTO;
|
||||||
asyc->dither.depth = DITHERING_DEPTH_AUTO;
|
asyc->dither.depth = DITHERING_DEPTH_AUTO;
|
||||||
|
|||||||
@@ -2803,7 +2803,14 @@ static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi)
|
|||||||
dsi->format = desc->format;
|
dsi->format = desc->format;
|
||||||
dsi->lanes = desc->lanes;
|
dsi->lanes = desc->lanes;
|
||||||
|
|
||||||
return mipi_dsi_attach(dsi);
|
err = mipi_dsi_attach(dsi);
|
||||||
|
if (err) {
|
||||||
|
struct panel_simple *panel = dev_get_drvdata(&dsi->dev);
|
||||||
|
|
||||||
|
drm_panel_remove(&panel->base);
|
||||||
|
}
|
||||||
|
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int panel_simple_dsi_remove(struct mipi_dsi_device *dsi)
|
static int panel_simple_dsi_remove(struct mipi_dsi_device *dsi)
|
||||||
|
|||||||
@@ -880,7 +880,8 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
|
|||||||
struct vop *vop = to_vop(crtc);
|
struct vop *vop = to_vop(crtc);
|
||||||
|
|
||||||
adjusted_mode->clock =
|
adjusted_mode->clock =
|
||||||
clk_round_rate(vop->dclk, mode->clock * 1000) / 1000;
|
DIV_ROUND_UP(clk_round_rate(vop->dclk, mode->clock * 1000),
|
||||||
|
1000);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user