Merge 4.19.320 into android-4.19-stable

Changes in 4.19.320
	platform/chrome: cros_ec_debugfs: fix wrong EC message version
	hfsplus: fix to avoid false alarm of circular locking
	x86/of: Return consistent error type from x86_of_pci_irq_enable()
	x86/pci/intel_mid_pci: Fix PCIBIOS_* return code handling
	x86/pci/xen: Fix PCIBIOS_* return code handling
	x86/platform/iosf_mbi: Convert PCIBIOS_* return codes to errnos
	hwmon: (adt7475) Fix default duty on fan is disabled
	pwm: stm32: Always do lazy disabling
	hwmon: (max6697) Fix underflow when writing limit attributes
	hwmon: Introduce SENSOR_DEVICE_ATTR_{RO, RW, WO} and variants
	hwmon: (max6697) Auto-convert to use SENSOR_DEVICE_ATTR_{RO, RW, WO}
	hwmon: (max6697) Fix swapped temp{1,8} critical alarms
	arm64: dts: rockchip: Increase VOP clk rate on RK3328
	m68k: atari: Fix TT bootup freeze / unexpected (SCU) interrupt messages
	x86/xen: Convert comma to semicolon
	m68k: cmpxchg: Fix return value for default case in __arch_xchg()
	wifi: brcmsmac: LCN PHY code is used for BCM4313 2G-only device
	net/smc: Allow SMC-D 1MB DMB allocations
	net/smc: set rmb's SG_MAX_SINGLE_ALLOC limitation only when CONFIG_ARCH_NO_SG_CHAIN is defined
	selftests/bpf: Check length of recv in test_sockmap
	wifi: cfg80211: fix typo in cfg80211_calculate_bitrate_he()
	wifi: cfg80211: handle 2x996 RU allocation in cfg80211_calculate_bitrate_he()
	net: fec: Refactor: #define magic constants
	net: fec: Fix FEC_ECR_EN1588 being cleared on link-down
	ipvs: Avoid unnecessary calls to skb_is_gso_sctp
	perf: Fix perf_aux_size() for greater-than 32-bit size
	perf: Prevent passing zero nr_pages to rb_alloc_aux()
	bna: adjust 'name' buf size of bna_tcb and bna_ccb structures
	selftests: forwarding: devlink_lib: Wait for udev events after reloading
	media: imon: Fix race getting ictx->lock
	saa7134: Unchecked i2c_transfer function result fixed
	media: uvcvideo: Allow entity-defined get_info and get_cur
	media: uvcvideo: Override default flags
	media: renesas: vsp1: Fix _irqsave and _irq mix
	media: renesas: vsp1: Store RPF partition configuration per RPF instance
	leds: trigger: Unregister sysfs attributes before calling deactivate()
	perf report: Fix condition in sort__sym_cmp()
	drm/etnaviv: fix DMA direction handling for cached RW buffers
	mfd: omap-usb-tll: Use struct_size to allocate tll
	ext4: avoid writing unitialized memory to disk in EA inodes
	sparc64: Fix incorrect function signature and add prototype for prom_cif_init
	PCI: Equalize hotplug memory and io for occupied and empty slots
	PCI: Fix resource double counting on remove & rescan
	RDMA/mlx4: Fix truncated output warning in mad.c
	RDMA/mlx4: Fix truncated output warning in alias_GUID.c
	RDMA/rxe: Don't set BTH_ACK_MASK for UC or UD QPs
	mtd: make mtd_test.c a separate module
	Input: elan_i2c - do not leave interrupt disabled on suspend failure
	MIPS: Octeron: remove source file executable bit
	powerpc/xmon: Fix disassembly CPU feature checks
	macintosh/therm_windtunnel: fix module unload.
	bnxt_re: Fix imm_data endianness
	ice: Rework flex descriptor programming
	netfilter: ctnetlink: use helper function to calculate expect ID
	pinctrl: core: fix possible memory leak when pinctrl_enable() fails
	pinctrl: single: fix possible memory leak when pinctrl_enable() fails
	pinctrl: ti: ti-iodelay: Drop if block with always false condition
	pinctrl: ti: ti-iodelay: fix possible memory leak when pinctrl_enable() fails
	pinctrl: freescale: mxs: Fix refcount of child
	fs/nilfs2: remove some unused macros to tame gcc
	nilfs2: avoid undefined behavior in nilfs_cnt32_ge macro
	tick/broadcast: Make takeover of broadcast hrtimer reliable
	net: netconsole: Disable target before netpoll cleanup
	af_packet: Handle outgoing VLAN packets without hardware offloading
	ipv6: take care of scope when choosing the src addr
	char: tpm: Fix possible memory leak in tpm_bios_measurements_open()
	media: venus: fix use after free in vdec_close
	hfs: fix to initialize fields of hfs_inode_info after hfs_alloc_inode()
	drm/gma500: fix null pointer dereference in cdv_intel_lvds_get_modes
	drm/gma500: fix null pointer dereference in psb_intel_lvds_get_modes
	m68k: amiga: Turn off Warp1260 interrupts during boot
	ext4: check dot and dotdot of dx_root before making dir indexed
	ext4: make sure the first directory block is not a hole
	wifi: mwifiex: Fix interface type change
	leds: ss4200: Convert PCIBIOS_* return codes to errnos
	tools/memory-model: Fix bug in lock.cat
	hwrng: amd - Convert PCIBIOS_* return codes to errnos
	PCI: hv: Return zero, not garbage, when reading PCI_INTERRUPT_PIN
	binder: fix hang of unregistered readers
	scsi: qla2xxx: Return ENOBUFS if sg_cnt is more than one for ELS cmds
	f2fs: fix to don't dirty inode for readonly filesystem
	clk: davinci: da8xx-cfgchip: Initialize clk_init_data before use
	ubi: eba: properly rollback inside self_check_eba
	decompress_bunzip2: fix rare decompression failure
	kobject_uevent: Fix OOB access within zap_modalias_env()
	rtc: cmos: Fix return value of nvmem callbacks
	scsi: qla2xxx: During vport delete send async logout explicitly
	scsi: qla2xxx: validate nvme_local_port correctly
	perf/x86/intel/pt: Fix topa_entry base length
	watchdog/perf: properly initialize the turbo mode timestamp and rearm counter
	platform: mips: cpu_hwmon: Disable driver on unsupported hardware
	RDMA/iwcm: Fix a use-after-free related to destroying CM IDs
	selftests/sigaltstack: Fix ppc64 GCC build
	nilfs2: handle inconsistent state in nilfs_btnode_create_block()
	kdb: Fix bound check compiler warning
	kdb: address -Wformat-security warnings
	kdb: Use the passed prompt in kdb_position_cursor()
	jfs: Fix array-index-out-of-bounds in diFree
	dma: fix call order in dmam_free_coherent
	MIPS: SMP-CPS: Fix address for GCR_ACCESS register for CM3 and later
	net: ip_rt_get_source() - use new style struct initializer instead of memset
	ipv4: Fix incorrect source address in Record Route option
	net: bonding: correctly annotate RCU in bond_should_notify_peers()
	tipc: Return non-zero value from tipc_udp_addr2str() on error
	mISDN: Fix a use after free in hfcmulti_tx()
	mm: avoid overflows in dirty throttling logic
	PCI: rockchip: Make 'ep-gpios' DT property optional
	PCI: rockchip: Use GPIOD_OUT_LOW flag while requesting ep_gpio
	parport: parport_pc: Mark expected switch fall-through
	parport: Convert printk(KERN_<LEVEL> to pr_<level>(
	parport: Standardize use of printmode
	dev/parport: fix the array out-of-bounds risk
	driver core: Cast to (void *) with __force for __percpu pointer
	devres: Fix memory leakage caused by driver API devm_free_percpu()
	perf/x86/intel/pt: Export pt_cap_get()
	perf/x86/intel/pt: Use helpers to obtain ToPA entry size
	perf/x86/intel/pt: Use pointer arithmetics instead in ToPA entry calculation
	perf/x86/intel/pt: Split ToPA metadata and page layout
	perf/x86/intel/pt: Fix a topa_entry base address calculation
	remoteproc: imx_rproc: ignore mapping vdev regions
	remoteproc: imx_rproc: Fix ignoring mapping vdev regions
	remoteproc: imx_rproc: Skip over memory region when node value is NULL
	drm/vmwgfx: Fix overlay when using Screen Targets
	net/iucv: fix use after free in iucv_sock_close()
	ipv6: fix ndisc_is_useropt() handling for PIO
	protect the fetch of ->fd[fd] in do_dup2() from mispredictions
	ALSA: usb-audio: Correct surround channels in UAC1 channel map
	net: usb: sr9700: fix uninitialized variable use in sr_mdio_read
	irqchip/mbigen: Fix mbigen node address layout
	x86/mm: Fix pti_clone_pgtable() alignment assumption
	net: usb: qmi_wwan: fix memory leak for not ip packets
	net: linkwatch: use system_unbound_wq
	Bluetooth: l2cap: always unlock channel in l2cap_conless_channel()
	net: fec: Stop PPS on driver remove
	md/raid5: avoid BUG_ON() while continue reshape after reassembling
	clocksource/drivers/sh_cmt: Address race condition for clock events
	PCI: Add Edimax Vendor ID to pci_ids.h
	udf: prevent integer overflow in udf_bitmap_free_blocks()
	wifi: nl80211: don't give key data to userspace
	btrfs: fix bitmap leak when loading free space cache on duplicate entry
	media: uvcvideo: Ignore empty TS packets
	media: uvcvideo: Fix the bandwdith quirk on USB 3.x
	jbd2: avoid memleak in jbd2_journal_write_metadata_buffer
	s390/sclp: Prevent release of buffer in I/O
	SUNRPC: Fix a race to wake a sync task
	ext4: fix wrong unit use in ext4_mb_find_by_goal
	arm64: Add support for SB barrier and patch in over DSB; ISB sequences
	arm64: cpufeature: Force HWCAP to be based on the sysreg visible to user-space
	arm64: Add Neoverse-V2 part
	arm64: cputype: Add Cortex-X4 definitions
	arm64: cputype: Add Neoverse-V3 definitions
	arm64: errata: Add workaround for Arm errata 3194386 and 3312417
	arm64: cputype: Add Cortex-X3 definitions
	arm64: cputype: Add Cortex-A720 definitions
	arm64: cputype: Add Cortex-X925 definitions
	arm64: errata: Unify speculative SSBS errata logic
	arm64: errata: Expand speculative SSBS workaround
	arm64: cputype: Add Cortex-X1C definitions
	arm64: cputype: Add Cortex-A725 definitions
	arm64: errata: Expand speculative SSBS workaround (again)
	i2c: smbus: Don't filter out duplicate alerts
	i2c: smbus: Improve handling of stuck alerts
	i2c: smbus: Send alert notifications to all devices if source not found
	bpf: kprobe: remove unused declaring of bpf_kprobe_override
	spi: lpspi: Replace all "master" with "controller"
	spi: lpspi: Add slave mode support
	spi: lpspi: Let watermark change with send data length
	spi: lpspi: Add i.MX8 boards support for lpspi
	spi: lpspi: add the error info of transfer speed setting
	spi: fsl-lpspi: remove unneeded array
	spi: spi-fsl-lpspi: Fix scldiv calculation
	ALSA: line6: Fix racy access to midibuf
	usb: vhci-hcd: Do not drop references before new references are gained
	USB: serial: debug: do not echo input by default
	usb: gadget: core: Check for unset descriptor
	scsi: ufs: core: Fix hba->last_dme_cmd_tstamp timestamp updating logic
	tick/broadcast: Move per CPU pointer access into the atomic section
	ntp: Clamp maxerror and esterror to operating range
	driver core: Fix uevent_show() vs driver detach race
	ntp: Safeguard against time_constant overflow
	serial: core: check uartclk for zero to avoid divide by zero
	power: supply: axp288_charger: Fix constant_charge_voltage writes
	power: supply: axp288_charger: Round constant_charge_voltage writes down
	tracing: Fix overflow in get_free_elt()
	x86/mtrr: Check if fixed MTRRs exist before saving them
	drm/bridge: analogix_dp: properly handle zero sized AUX transactions
	drm/mgag200: Set DDC timeout in milliseconds
	kbuild: Fix '-S -c' in x86 stack protector scripts
	netfilter: nf_tables: set element extended ACK reporting support
	netfilter: nf_tables: use timestamp to check for set element timeout
	netfilter: nf_tables: prefer nft_chain_validate
	arm64: cpufeature: Fix the visibility of compat hwcaps
	media: uvcvideo: Use entity get_cur in uvc_ctrl_set
	drm/i915/gem: Fix Virtual Memory mapping boundaries calculation
	exec: Fix ToCToU between perm check and set-uid/gid usage
	nvme/pci: Add APST quirk for Lenovo N60z laptop
	Linux 4.19.320

Change-Id: I12efa55c04d97f29d34f1a49511948735871b2bd
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2024-08-28 09:03:40 +00:00
187 changed files with 1963 additions and 1076 deletions

View File

@@ -61,7 +61,25 @@ stable kernels.
| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 | | ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
| ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 | | ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
| ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 | | ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 |
| ARM | Cortex-A76 | #3324349 | ARM64_ERRATUM_3194386 |
| ARM | Cortex-A77 | #3324348 | ARM64_ERRATUM_3194386 |
| ARM | Cortex-A78 | #3324344 | ARM64_ERRATUM_3194386 |
| ARM | Cortex-A78C | #3324346,3324347| ARM64_ERRATUM_3194386 |
| ARM | Cortex-A710 | #3324338 | ARM64_ERRATUM_3194386 |
| ARM | Cortex-A720 | #3456091 | ARM64_ERRATUM_3194386 |
| ARM | Cortex-A725 | #3456106 | ARM64_ERRATUM_3194386 |
| ARM | Cortex-X1 | #3324344 | ARM64_ERRATUM_3194386 |
| ARM | Cortex-X1C | #3324346 | ARM64_ERRATUM_3194386 |
| ARM | Cortex-X2 | #3324338 | ARM64_ERRATUM_3194386 |
| ARM | Cortex-X3 | #3324335 | ARM64_ERRATUM_3194386 |
| ARM | Cortex-X4 | #3194386 | ARM64_ERRATUM_3194386 |
| ARM | Cortex-X925 | #3324334 | ARM64_ERRATUM_3194386 |
| ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 | | ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 |
| ARM | Neoverse-N1 | #3324349 | ARM64_ERRATUM_3194386 |
| ARM | Neoverse-N2 | #3324339 | ARM64_ERRATUM_3194386 |
| ARM | Neoverse-V1 | #3324341 | ARM64_ERRATUM_3194386 |
| ARM | Neoverse-V2 | #3324336 | ARM64_ERRATUM_3194386 |
| ARM | Neoverse-V3 | #3312417 | ARM64_ERRATUM_3194386 |
| ARM | MMU-500 | #841119,#826419 | N/A | | ARM | MMU-500 | #841119,#826419 | N/A |
| | | | | | | | | |
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |

View File

@@ -299,17 +299,25 @@ functions is used.
The header file linux/hwmon-sysfs.h provides a number of useful macros to The header file linux/hwmon-sysfs.h provides a number of useful macros to
declare and use hardware monitoring sysfs attributes. declare and use hardware monitoring sysfs attributes.
In many cases, you can use the exsting define DEVICE_ATTR to declare such In many cases, you can use the exsting define DEVICE_ATTR or its variants
attributes. This is feasible if an attribute has no additional context. However, DEVICE_ATTR_{RW,RO,WO} to declare such attributes. This is feasible if an
in many cases there will be additional information such as a sensor index which attribute has no additional context. However, in many cases there will be
will need to be passed to the sysfs attribute handling function. additional information such as a sensor index which will need to be passed
to the sysfs attribute handling function.
SENSOR_DEVICE_ATTR and SENSOR_DEVICE_ATTR_2 can be used to define attributes SENSOR_DEVICE_ATTR and SENSOR_DEVICE_ATTR_2 can be used to define attributes
which need such additional context information. SENSOR_DEVICE_ATTR requires which need such additional context information. SENSOR_DEVICE_ATTR requires
one additional argument, SENSOR_DEVICE_ATTR_2 requires two. one additional argument, SENSOR_DEVICE_ATTR_2 requires two.
SENSOR_DEVICE_ATTR defines a struct sensor_device_attribute variable. Simplified variants of SENSOR_DEVICE_ATTR and SENSOR_DEVICE_ATTR_2 are available
This structure has the following fields. and should be used if standard attribute permissions and function names are
feasible. Standard permissions are 0644 for SENSOR_DEVICE_ATTR[_2]_RW,
0444 for SENSOR_DEVICE_ATTR[_2]_RO, and 0200 for SENSOR_DEVICE_ATTR[_2]_WO.
Standard functions, similar to DEVICE_ATTR_{RW,RO,WO}, have _show and _store
appended to the provided function name.
SENSOR_DEVICE_ATTR and its variants define a struct sensor_device_attribute
variable. This structure has the following fields.
struct sensor_device_attribute { struct sensor_device_attribute {
struct device_attribute dev_attr; struct device_attribute dev_attr;
@@ -320,8 +328,8 @@ You can use to_sensor_dev_attr to get the pointer to this structure from the
attribute read or write function. Its parameter is the device to which the attribute read or write function. Its parameter is the device to which the
attribute is attached. attribute is attached.
SENSOR_DEVICE_ATTR_2 defines a struct sensor_device_attribute_2 variable, SENSOR_DEVICE_ATTR_2 and its variants define a struct sensor_device_attribute_2
which is defined as follows. variable, which is defined as follows.
struct sensor_device_attribute_2 { struct sensor_device_attribute_2 {
struct device_attribute dev_attr; struct device_attribute dev_attr;

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 4 VERSION = 4
PATCHLEVEL = 19 PATCHLEVEL = 19
SUBLEVEL = 319 SUBLEVEL = 320
EXTRAVERSION = EXTRAVERSION =
NAME = "People's Front" NAME = "People's Front"

View File

@@ -539,6 +539,44 @@ config ARM64_ERRATUM_1742098
If unsure, say Y. If unsure, say Y.
config ARM64_ERRATUM_3194386
bool "Cortex-*/Neoverse-*: workaround for MSR SSBS not self-synchronizing"
default y
help
This option adds the workaround for the following errata:
* ARM Cortex-A76 erratum 3324349
* ARM Cortex-A77 erratum 3324348
* ARM Cortex-A78 erratum 3324344
* ARM Cortex-A78C erratum 3324346
* ARM Cortex-A78C erratum 3324347
* ARM Cortex-A710 erratam 3324338
* ARM Cortex-A720 erratum 3456091
* ARM Cortex-A725 erratum 3456106
* ARM Cortex-X1 erratum 3324344
* ARM Cortex-X1C erratum 3324346
* ARM Cortex-X2 erratum 3324338
* ARM Cortex-X3 erratum 3324335
* ARM Cortex-X4 erratum 3194386
* ARM Cortex-X925 erratum 3324334
* ARM Neoverse-N1 erratum 3324349
* ARM Neoverse N2 erratum 3324339
* ARM Neoverse-V1 erratum 3324341
* ARM Neoverse V2 erratum 3324336
* ARM Neoverse-V3 erratum 3312417
On affected cores "MSR SSBS, #0" instructions may not affect
subsequent speculative instructions, which may permit unexepected
speculative store bypassing.
Work around this problem by placing a Speculation Barrier (SB) or
Instruction Synchronization Barrier (ISB) after kernel changes to
SSBS. The presence of the SSBS special-purpose register is hidden
from hwcaps and EL0 reads of ID_AA64PFR1_EL1, such that userspace
will use the PR_SPEC_STORE_BYPASS prctl to change SSBS.
If unsure, say Y.
config CAVIUM_ERRATUM_22375 config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313" bool "Cavium erratum 22375, 24313"
default y default y

View File

@@ -649,8 +649,8 @@
<0>, <24000000>, <0>, <24000000>,
<24000000>, <24000000>, <24000000>, <24000000>,
<15000000>, <15000000>, <15000000>, <15000000>,
<100000000>, <100000000>, <300000000>, <100000000>,
<100000000>, <100000000>, <400000000>, <100000000>,
<50000000>, <100000000>, <50000000>, <100000000>,
<100000000>, <100000000>, <100000000>, <100000000>,
<50000000>, <50000000>, <50000000>, <50000000>,

View File

@@ -147,6 +147,19 @@
hint #22 hint #22
.endm .endm
/*
* Speculation barrier
*/
.macro sb
alternative_if_not ARM64_HAS_SB
dsb nsh
isb
alternative_else
SB_BARRIER_INSN
nop
alternative_endif
.endm
/* /*
* Sanitise a 64-bit bounded index wrt speculation, returning zero if out * Sanitise a 64-bit bounded index wrt speculation, returning zero if out
* of bounds. * of bounds.

View File

@@ -34,6 +34,10 @@
#define psb_csync() asm volatile("hint #17" : : : "memory") #define psb_csync() asm volatile("hint #17" : : : "memory")
#define csdb() asm volatile("hint #20" : : : "memory") #define csdb() asm volatile("hint #20" : : : "memory")
#define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \
SB_BARRIER_INSN"nop\n", \
ARM64_HAS_SB))
#define mb() dsb(sy) #define mb() dsb(sy)
#define rmb() dsb(ld) #define rmb() dsb(ld)
#define wmb() dsb(st) #define wmb() dsb(st)

View File

@@ -56,8 +56,10 @@
#define ARM64_WORKAROUND_1542419 35 #define ARM64_WORKAROUND_1542419 35
#define ARM64_SPECTRE_BHB 36 #define ARM64_SPECTRE_BHB 36
#define ARM64_WORKAROUND_1742098 37 #define ARM64_WORKAROUND_1742098 37
#define ARM64_HAS_SB 38
#define ARM64_WORKAROUND_SPECULATIVE_SSBS 39
/* kabi: reserve 38 - 62 for future cpu capabilities */ /* kabi: reserve 40 - 62 for future cpu capabilities */
#define ARM64_NCAPS 62 #define ARM64_NCAPS 62
#endif /* __ASM_CPUCAPS_H */ #endif /* __ASM_CPUCAPS_H */

View File

@@ -89,6 +89,14 @@
#define ARM_CPU_PART_CORTEX_X2 0xD48 #define ARM_CPU_PART_CORTEX_X2 0xD48
#define ARM_CPU_PART_NEOVERSE_N2 0xD49 #define ARM_CPU_PART_NEOVERSE_N2 0xD49
#define ARM_CPU_PART_CORTEX_A78C 0xD4B #define ARM_CPU_PART_CORTEX_A78C 0xD4B
#define ARM_CPU_PART_CORTEX_X1C 0xD4C
#define ARM_CPU_PART_CORTEX_X3 0xD4E
#define ARM_CPU_PART_NEOVERSE_V2 0xD4F
#define ARM_CPU_PART_CORTEX_A720 0xD81
#define ARM_CPU_PART_CORTEX_X4 0xD82
#define ARM_CPU_PART_NEOVERSE_V3 0xD84
#define ARM_CPU_PART_CORTEX_X925 0xD85
#define ARM_CPU_PART_CORTEX_A725 0xD87
#define APM_CPU_PART_POTENZA 0x000 #define APM_CPU_PART_POTENZA 0x000
@@ -125,6 +133,14 @@
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2) #define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2) #define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C) #define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
#define MIDR_CORTEX_X1C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1C)
#define MIDR_CORTEX_X3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X3)
#define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2)
#define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720)
#define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4)
#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
#define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
#define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)

View File

@@ -107,6 +107,11 @@
#define SET_PSTATE_UAO(x) __emit_inst(0xd500401f | PSTATE_UAO | ((!!x) << PSTATE_Imm_shift)) #define SET_PSTATE_UAO(x) __emit_inst(0xd500401f | PSTATE_UAO | ((!!x) << PSTATE_Imm_shift))
#define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift)) #define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift))
#define __SYS_BARRIER_INSN(CRm, op2, Rt) \
__emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f))
#define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31)
#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2) #define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
#define SYS_DC_IGSW sys_insn(1, 0, 7, 6, 4) #define SYS_DC_IGSW sys_insn(1, 0, 7, 6, 4)
#define SYS_DC_IGDSW sys_insn(1, 0, 7, 6, 6) #define SYS_DC_IGDSW sys_insn(1, 0, 7, 6, 6)
@@ -538,6 +543,7 @@
#define ID_AA64ISAR0_AES_SHIFT 4 #define ID_AA64ISAR0_AES_SHIFT 4
/* id_aa64isar1 */ /* id_aa64isar1 */
#define ID_AA64ISAR1_SB_SHIFT 36
#define ID_AA64ISAR1_LRCPC_SHIFT 20 #define ID_AA64ISAR1_LRCPC_SHIFT 20
#define ID_AA64ISAR1_FCMA_SHIFT 16 #define ID_AA64ISAR1_FCMA_SHIFT 16
#define ID_AA64ISAR1_JSCVT_SHIFT 12 #define ID_AA64ISAR1_JSCVT_SHIFT 12

View File

@@ -46,8 +46,7 @@ static inline void set_fs(mm_segment_t fs)
* Prevent a mispredicted conditional call to set_fs from forwarding * Prevent a mispredicted conditional call to set_fs from forwarding
* the wrong address limit to access_ok under speculation. * the wrong address limit to access_ok under speculation.
*/ */
dsb(nsh); spec_bar();
isb();
/* On user-mode return, check fs is correct */ /* On user-mode return, check fs is correct */
set_thread_flag(TIF_FSCHECK); set_thread_flag(TIF_FSCHECK);

View File

@@ -49,5 +49,6 @@
#define HWCAP_ILRCPC (1 << 26) #define HWCAP_ILRCPC (1 << 26)
#define HWCAP_FLAGM (1 << 27) #define HWCAP_FLAGM (1 << 27)
#define HWCAP_SSBS (1 << 28) #define HWCAP_SSBS (1 << 28)
#define HWCAP_SB (1 << 29)
#endif /* _UAPI__ASM_HWCAP_H */ #endif /* _UAPI__ASM_HWCAP_H */

View File

@@ -344,6 +344,19 @@ void arm64_set_ssbd_mitigation(bool state)
asm volatile(SET_PSTATE_SSBS(0)); asm volatile(SET_PSTATE_SSBS(0));
else else
asm volatile(SET_PSTATE_SSBS(1)); asm volatile(SET_PSTATE_SSBS(1));
/*
* SSBS is self-synchronizing and is intended to affect
* subsequent speculative instructions, but some CPUs can
* speculate with a stale value of SSBS.
*
* Mitigate this with an unconditional speculation barrier, as
* CPUs could mis-speculate branches and bypass a conditional
* barrier.
*/
if (IS_ENABLED(CONFIG_ARM64_ERRATUM_3194386))
spec_bar();
return; return;
} }
@@ -694,6 +707,29 @@ static struct midr_range broken_aarch32_aes[] = {
}; };
#endif #endif
#ifdef CONFIG_ARM64_ERRATUM_3194386
static const struct midr_range erratum_spec_ssbs_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A725),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X4),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X925),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
{}
};
#endif
const struct arm64_cpu_capabilities arm64_errata[] = { const struct arm64_cpu_capabilities arm64_errata[] = {
#if defined(CONFIG_ARM64_ERRATUM_826319) || \ #if defined(CONFIG_ARM64_ERRATUM_826319) || \
@@ -903,6 +939,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
CAP_MIDR_RANGE_LIST(broken_aarch32_aes), CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
}, },
#endif
#ifdef CONFIG_ARM64_ERRATUM_3194386
{
.desc = "SSBS not fully self-synchronizing",
.capability = ARM64_WORKAROUND_SPECULATIVE_SSBS,
ERRATA_MIDR_RANGE_LIST(erratum_spec_ssbs_list),
},
#endif #endif
{ {
} }

View File

@@ -144,6 +144,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
}; };
static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
@@ -273,6 +274,30 @@ static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
ARM64_FTR_END, ARM64_FTR_END,
}; };
static const struct arm64_ftr_bits ftr_mvfr0[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPROUND_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPSHVEC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPSQRT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPDIVIDE_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPTRAP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPDP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPSP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_SIMD_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_mvfr1[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDFMAC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_FPHP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDHP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDSP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDINT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDLS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_FPDNAN_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_FPFTZ_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_mvfr2[] = { static const struct arm64_ftr_bits ftr_mvfr2[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* FPMisc */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* FPMisc */
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* SIMDMisc */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* SIMDMisc */
@@ -288,10 +313,10 @@ static const struct arm64_ftr_bits ftr_dczid[] = {
static const struct arm64_ftr_bits ftr_id_isar5[] = { static const struct arm64_ftr_bits ftr_id_isar5[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
ARM64_FTR_END, ARM64_FTR_END,
}; };
@@ -331,7 +356,7 @@ static const struct arm64_ftr_bits ftr_zcr[] = {
* Common ftr bits for a 32bit register with all hidden, strict * Common ftr bits for a 32bit register with all hidden, strict
* attributes, with 4bit feature fields and a default safe value of * attributes, with 4bit feature fields and a default safe value of
* 0. Covers the following 32bit registers: * 0. Covers the following 32bit registers:
* id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1] * id_isar[1-3], id_mmfr[1-3]
*/ */
static const struct arm64_ftr_bits ftr_generic_32bits[] = { static const struct arm64_ftr_bits ftr_generic_32bits[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
@@ -386,8 +411,8 @@ static const struct __ftr_reg_entry {
ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4), ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
/* Op1 = 0, CRn = 0, CRm = 3 */ /* Op1 = 0, CRn = 0, CRm = 3 */
ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits), ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_mvfr0),
ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits), ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_mvfr1),
ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2), ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
/* Op1 = 0, CRn = 0, CRm = 4 */ /* Op1 = 0, CRn = 0, CRm = 4 */
@@ -826,17 +851,39 @@ feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
return val >= entry->min_field_value; return val >= entry->min_field_value;
} }
static u64
read_scoped_sysreg(const struct arm64_cpu_capabilities *entry, int scope)
{
WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
if (scope == SCOPE_SYSTEM)
return read_sanitised_ftr_reg(entry->sys_reg);
else
return __read_sysreg_by_encoding(entry->sys_reg);
}
static bool
has_user_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
{
int mask;
struct arm64_ftr_reg *regp;
u64 val = read_scoped_sysreg(entry, scope);
regp = get_arm64_ftr_reg(entry->sys_reg);
if (!regp)
return false;
mask = cpuid_feature_extract_unsigned_field(regp->user_mask,
entry->field_pos);
if (!mask)
return false;
return feature_matches(val, entry);
}
static bool static bool
has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope) has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
{ {
u64 val; u64 val = read_scoped_sysreg(entry, scope);
WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
if (scope == SCOPE_SYSTEM)
val = read_sanitised_ftr_reg(entry->sys_reg);
else
val = __read_sysreg_by_encoding(entry->sys_reg);
return feature_matches(val, entry); return feature_matches(val, entry);
} }
@@ -1155,6 +1202,17 @@ static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
} }
#endif /* CONFIG_ARM64_SSBD */ #endif /* CONFIG_ARM64_SSBD */
static void user_feature_fixup(void)
{
if (cpus_have_cap(ARM64_WORKAROUND_SPECULATIVE_SSBS)) {
struct arm64_ftr_reg *regp;
regp = get_arm64_ftr_reg(SYS_ID_AA64PFR1_EL1);
if (regp)
regp->user_mask &= ~GENMASK(7, 4); /* SSBS */
}
}
static void elf_hwcap_fixup(void) static void elf_hwcap_fixup(void)
{ {
#ifdef CONFIG_ARM64_ERRATUM_1742098 #ifdef CONFIG_ARM64_ERRATUM_1742098
@@ -1361,12 +1419,21 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.cpu_enable = cpu_enable_ssbs, .cpu_enable = cpu_enable_ssbs,
}, },
#endif #endif
{
.desc = "Speculation barrier (SB)",
.capability = ARM64_HAS_SB,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64ISAR1_EL1,
.field_pos = ID_AA64ISAR1_SB_SHIFT,
.sign = FTR_UNSIGNED,
.min_field_value = 1,
},
{}, {},
}; };
#define HWCAP_CPUID_MATCH(reg, field, s, min_value) \ #define HWCAP_CPUID_MATCH(reg, field, s, min_value) \
.matches = has_cpuid_feature, \ .matches = has_user_cpuid_feature, \
.sys_reg = reg, \ .sys_reg = reg, \
.field_pos = field, \ .field_pos = field, \
.sign = s, \ .sign = s, \
@@ -1415,6 +1482,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SB),
HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT), HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT),
#ifdef CONFIG_ARM64_SVE #ifdef CONFIG_ARM64_SVE
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
@@ -1809,6 +1877,7 @@ void __init setup_cpu_features(void)
setup_system_capabilities(); setup_system_capabilities();
mark_const_caps_ready(); mark_const_caps_ready();
user_feature_fixup();
setup_elf_hwcaps(arm64_elf_hwcaps); setup_elf_hwcaps(arm64_elf_hwcaps);
if (system_supports_32bit_el0()) { if (system_supports_32bit_el0()) {
@@ -1842,7 +1911,7 @@ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
/* /*
* We emulate only the following system register space. * We emulate only the following system register space.
* Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7] * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 2 - 7]
* See Table C5-6 System instruction encodings for System register accesses, * See Table C5-6 System instruction encodings for System register accesses,
* ARMv8 ARM(ARM DDI 0487A.f) for more details. * ARMv8 ARM(ARM DDI 0487A.f) for more details.
*/ */
@@ -1852,7 +1921,7 @@ static inline bool __attribute_const__ is_emulated(u32 id)
sys_reg_CRn(id) == 0x0 && sys_reg_CRn(id) == 0x0 &&
sys_reg_Op1(id) == 0x0 && sys_reg_Op1(id) == 0x0 &&
(sys_reg_CRm(id) == 0 || (sys_reg_CRm(id) == 0 ||
((sys_reg_CRm(id) >= 4) && (sys_reg_CRm(id) <= 7)))); ((sys_reg_CRm(id) >= 2) && (sys_reg_CRm(id) <= 7))));
} }
/* /*

View File

@@ -88,6 +88,7 @@ static const char *const hwcap_str[] = {
"ilrcpc", "ilrcpc",
"flagm", "flagm",
"ssbs", "ssbs",
"sb",
NULL NULL
}; };

View File

@@ -181,6 +181,15 @@ int __init amiga_parse_bootinfo(const struct bi_record *record)
dev->slotsize = be16_to_cpu(cd->cd_SlotSize); dev->slotsize = be16_to_cpu(cd->cd_SlotSize);
dev->boardaddr = be32_to_cpu(cd->cd_BoardAddr); dev->boardaddr = be32_to_cpu(cd->cd_BoardAddr);
dev->boardsize = be32_to_cpu(cd->cd_BoardSize); dev->boardsize = be32_to_cpu(cd->cd_BoardSize);
/* CS-LAB Warp 1260 workaround */
if (be16_to_cpu(dev->rom.er_Manufacturer) == ZORRO_MANUF(ZORRO_PROD_CSLAB_WARP_1260) &&
dev->rom.er_Product == ZORRO_PROD(ZORRO_PROD_CSLAB_WARP_1260)) {
/* turn off all interrupts */
pr_info("Warp 1260 card detected: applying interrupt storm workaround\n");
*(uint32_t *)(dev->boardaddr + 0x1000) = 0xfff;
}
} else } else
pr_warn("amiga_parse_bootinfo: too many AutoConfig devices\n"); pr_warn("amiga_parse_bootinfo: too many AutoConfig devices\n");
#endif /* CONFIG_ZORRO */ #endif /* CONFIG_ZORRO */

View File

@@ -302,11 +302,7 @@ void __init atari_init_IRQ(void)
if (ATARIHW_PRESENT(SCU)) { if (ATARIHW_PRESENT(SCU)) {
/* init the SCU if present */ /* init the SCU if present */
tt_scu.sys_mask = 0x10; /* enable VBL (for the cursor) and tt_scu.sys_mask = 0x0; /* disable all interrupts */
* disable HSYNC interrupts (who
* needs them?) MFP and SCC are
* enabled in VME mask
*/
tt_scu.vme_mask = 0x60; /* enable MFP and SCC ints */ tt_scu.vme_mask = 0x60; /* enable MFP and SCC ints */
} else { } else {
/* If no SCU and no Hades, the HSYNC interrupt needs to be /* If no SCU and no Hades, the HSYNC interrupt needs to be

View File

@@ -33,7 +33,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
x = tmp; x = tmp;
break; break;
default: default:
tmp = __invalid_xchg_size(x, ptr, size); x = __invalid_xchg_size(x, ptr, size);
break; break;
} }

View File

@@ -232,6 +232,10 @@ GCR_ACCESSOR_RO(32, 0x0d0, gic_status)
GCR_ACCESSOR_RO(32, 0x0f0, cpc_status) GCR_ACCESSOR_RO(32, 0x0f0, cpc_status)
#define CM_GCR_CPC_STATUS_EX BIT(0) #define CM_GCR_CPC_STATUS_EX BIT(0)
/* GCR_ACCESS - Controls core/IOCU access to GCRs */
GCR_ACCESSOR_RW(32, 0x120, access_cm3)
#define CM_GCR_ACCESS_ACCESSEN GENMASK(7, 0)
/* GCR_L2_CONFIG - Indicates L2 cache configuration when Config5.L2C=1 */ /* GCR_L2_CONFIG - Indicates L2 cache configuration when Config5.L2C=1 */
GCR_ACCESSOR_RW(32, 0x130, l2_config) GCR_ACCESSOR_RW(32, 0x130, l2_config)
#define CM_GCR_L2_CONFIG_BYPASS BIT(20) #define CM_GCR_L2_CONFIG_BYPASS BIT(20)

View File

@@ -233,7 +233,10 @@ static void boot_core(unsigned int core, unsigned int vpe_id)
write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB); write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB);
/* Ensure the core can access the GCRs */ /* Ensure the core can access the GCRs */
set_gcr_access(1 << core); if (mips_cm_revision() < CM_REV_CM3)
set_gcr_access(1 << core);
else
set_gcr_access_cm3(1 << core);
if (mips_cpc_present()) { if (mips_cpc_present()) {
/* Reset the core */ /* Reset the core */

0
arch/mips/pci/pcie-octeon.c Executable file → Normal file
View File

View File

@@ -133,32 +133,21 @@ int print_insn_powerpc (unsigned long insn, unsigned long memaddr)
bool insn_is_short; bool insn_is_short;
ppc_cpu_t dialect; ppc_cpu_t dialect;
dialect = PPC_OPCODE_PPC | PPC_OPCODE_COMMON dialect = PPC_OPCODE_PPC | PPC_OPCODE_COMMON;
| PPC_OPCODE_64 | PPC_OPCODE_POWER4 | PPC_OPCODE_ALTIVEC;
if (cpu_has_feature(CPU_FTRS_POWER5)) if (IS_ENABLED(CONFIG_PPC64))
dialect |= PPC_OPCODE_POWER5; dialect |= PPC_OPCODE_64 | PPC_OPCODE_POWER4 | PPC_OPCODE_CELL |
PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7 | PPC_OPCODE_POWER8 |
PPC_OPCODE_POWER9;
if (cpu_has_feature(CPU_FTRS_CELL)) if (cpu_has_feature(CPU_FTR_TM))
dialect |= (PPC_OPCODE_CELL | PPC_OPCODE_ALTIVEC); dialect |= PPC_OPCODE_HTM;
if (cpu_has_feature(CPU_FTRS_POWER6)) if (cpu_has_feature(CPU_FTR_ALTIVEC))
dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_ALTIVEC); dialect |= PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2;
if (cpu_has_feature(CPU_FTRS_POWER7)) if (cpu_has_feature(CPU_FTR_VSX))
dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7 dialect |= PPC_OPCODE_VSX | PPC_OPCODE_VSX3;
| PPC_OPCODE_ALTIVEC | PPC_OPCODE_VSX);
if (cpu_has_feature(CPU_FTRS_POWER8))
dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7
| PPC_OPCODE_POWER8 | PPC_OPCODE_HTM
| PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2 | PPC_OPCODE_VSX);
if (cpu_has_feature(CPU_FTRS_POWER9))
dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7
| PPC_OPCODE_POWER8 | PPC_OPCODE_POWER9 | PPC_OPCODE_HTM
| PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2
| PPC_OPCODE_VSX | PPC_OPCODE_VSX3);
/* Get the major opcode of the insn. */ /* Get the major opcode of the insn. */
opcode = NULL; opcode = NULL;

View File

@@ -247,6 +247,7 @@ void prom_sun4v_guest_soft_state(void);
int prom_ihandle2path(int handle, char *buffer, int bufsize); int prom_ihandle2path(int handle, char *buffer, int bufsize);
/* Client interface level routines. */ /* Client interface level routines. */
void prom_cif_init(void *cif_handler);
void p1275_cmd_direct(unsigned long *); void p1275_cmd_direct(unsigned long *);
#endif /* !(__SPARC64_OPLIB_H) */ #endif /* !(__SPARC64_OPLIB_H) */

View File

@@ -26,9 +26,6 @@ phandle prom_chosen_node;
* routines in the prom library. * routines in the prom library.
* It gets passed the pointer to the PROM vector. * It gets passed the pointer to the PROM vector.
*/ */
extern void prom_cif_init(void *);
void __init prom_init(void *cif_handler) void __init prom_init(void *cif_handler)
{ {
phandle node; phandle node;

View File

@@ -49,7 +49,7 @@ void p1275_cmd_direct(unsigned long *args)
local_irq_restore(flags); local_irq_restore(flags);
} }
void prom_cif_init(void *cif_handler, void *cif_stack) void prom_cif_init(void *cif_handler)
{ {
p1275buf.prom_cif_handler = (void (*)(long *))cif_handler; p1275buf.prom_cif_handler = (void (*)(long *))cif_handler;
} }

View File

@@ -75,7 +75,7 @@ static struct pt_cap_desc {
PT_CAP(psb_periods, 1, CPUID_EBX, 0xffff0000), PT_CAP(psb_periods, 1, CPUID_EBX, 0xffff0000),
}; };
static u32 pt_cap_get(enum pt_capabilities cap) u32 intel_pt_validate_hw_cap(enum pt_capabilities cap)
{ {
struct pt_cap_desc *cd = &pt_caps[cap]; struct pt_cap_desc *cd = &pt_caps[cap];
u32 c = pt_pmu.caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg]; u32 c = pt_pmu.caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg];
@@ -83,6 +83,7 @@ static u32 pt_cap_get(enum pt_capabilities cap)
return (c & cd->mask) >> shift; return (c & cd->mask) >> shift;
} }
EXPORT_SYMBOL_GPL(intel_pt_validate_hw_cap);
static ssize_t pt_cap_show(struct device *cdev, static ssize_t pt_cap_show(struct device *cdev,
struct device_attribute *attr, struct device_attribute *attr,
@@ -92,7 +93,7 @@ static ssize_t pt_cap_show(struct device *cdev,
container_of(attr, struct dev_ext_attribute, attr); container_of(attr, struct dev_ext_attribute, attr);
enum pt_capabilities cap = (long)ea->var; enum pt_capabilities cap = (long)ea->var;
return snprintf(buf, PAGE_SIZE, "%x\n", pt_cap_get(cap)); return snprintf(buf, PAGE_SIZE, "%x\n", intel_pt_validate_hw_cap(cap));
} }
static struct attribute_group pt_cap_group = { static struct attribute_group pt_cap_group = {
@@ -310,16 +311,16 @@ static bool pt_event_valid(struct perf_event *event)
return false; return false;
if (config & RTIT_CTL_CYC_PSB) { if (config & RTIT_CTL_CYC_PSB) {
if (!pt_cap_get(PT_CAP_psb_cyc)) if (!intel_pt_validate_hw_cap(PT_CAP_psb_cyc))
return false; return false;
allowed = pt_cap_get(PT_CAP_psb_periods); allowed = intel_pt_validate_hw_cap(PT_CAP_psb_periods);
requested = (config & RTIT_CTL_PSB_FREQ) >> requested = (config & RTIT_CTL_PSB_FREQ) >>
RTIT_CTL_PSB_FREQ_OFFSET; RTIT_CTL_PSB_FREQ_OFFSET;
if (requested && (!(allowed & BIT(requested)))) if (requested && (!(allowed & BIT(requested))))
return false; return false;
allowed = pt_cap_get(PT_CAP_cycle_thresholds); allowed = intel_pt_validate_hw_cap(PT_CAP_cycle_thresholds);
requested = (config & RTIT_CTL_CYC_THRESH) >> requested = (config & RTIT_CTL_CYC_THRESH) >>
RTIT_CTL_CYC_THRESH_OFFSET; RTIT_CTL_CYC_THRESH_OFFSET;
if (requested && (!(allowed & BIT(requested)))) if (requested && (!(allowed & BIT(requested))))
@@ -334,10 +335,10 @@ static bool pt_event_valid(struct perf_event *event)
* Spec says that setting mtc period bits while mtc bit in * Spec says that setting mtc period bits while mtc bit in
* CPUID is 0 will #GP, so better safe than sorry. * CPUID is 0 will #GP, so better safe than sorry.
*/ */
if (!pt_cap_get(PT_CAP_mtc)) if (!intel_pt_validate_hw_cap(PT_CAP_mtc))
return false; return false;
allowed = pt_cap_get(PT_CAP_mtc_periods); allowed = intel_pt_validate_hw_cap(PT_CAP_mtc_periods);
if (!allowed) if (!allowed)
return false; return false;
@@ -349,11 +350,11 @@ static bool pt_event_valid(struct perf_event *event)
} }
if (config & RTIT_CTL_PWR_EVT_EN && if (config & RTIT_CTL_PWR_EVT_EN &&
!pt_cap_get(PT_CAP_power_event_trace)) !intel_pt_validate_hw_cap(PT_CAP_power_event_trace))
return false; return false;
if (config & RTIT_CTL_PTW) { if (config & RTIT_CTL_PTW) {
if (!pt_cap_get(PT_CAP_ptwrite)) if (!intel_pt_validate_hw_cap(PT_CAP_ptwrite))
return false; return false;
/* FUPonPTW without PTW doesn't make sense */ /* FUPonPTW without PTW doesn't make sense */
@@ -545,16 +546,8 @@ static void pt_config_buffer(void *buf, unsigned int topa_idx,
wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg); wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg);
} }
/*
* Keep ToPA table-related metadata on the same page as the actual table,
* taking up a few words from the top
*/
#define TENTS_PER_PAGE (((PAGE_SIZE - 40) / sizeof(struct topa_entry)) - 1)
/** /**
* struct topa - page-sized ToPA table with metadata at the top * struct topa - ToPA metadata
* @table: actual ToPA table entries, as understood by PT hardware
* @list: linkage to struct pt_buffer's list of tables * @list: linkage to struct pt_buffer's list of tables
* @phys: physical address of this page * @phys: physical address of this page
* @offset: offset of the first entry in this table in the buffer * @offset: offset of the first entry in this table in the buffer
@@ -562,7 +555,6 @@ static void pt_config_buffer(void *buf, unsigned int topa_idx,
* @last: index of the last initialized entry in this table * @last: index of the last initialized entry in this table
*/ */
struct topa { struct topa {
struct topa_entry table[TENTS_PER_PAGE];
struct list_head list; struct list_head list;
u64 phys; u64 phys;
u64 offset; u64 offset;
@@ -570,8 +562,40 @@ struct topa {
int last; int last;
}; };
/*
* Keep ToPA table-related metadata on the same page as the actual table,
* taking up a few words from the top
*/
#define TENTS_PER_PAGE \
((PAGE_SIZE - sizeof(struct topa)) / sizeof(struct topa_entry))
/**
* struct topa_page - page-sized ToPA table with metadata at the top
* @table: actual ToPA table entries, as understood by PT hardware
* @topa: metadata
*/
struct topa_page {
struct topa_entry table[TENTS_PER_PAGE];
struct topa topa;
};
static inline struct topa_page *topa_to_page(struct topa *topa)
{
return container_of(topa, struct topa_page, topa);
}
static inline struct topa_page *topa_entry_to_page(struct topa_entry *te)
{
return (struct topa_page *)((unsigned long)te & PAGE_MASK);
}
/* make -1 stand for the last table entry */ /* make -1 stand for the last table entry */
#define TOPA_ENTRY(t, i) ((i) == -1 ? &(t)->table[(t)->last] : &(t)->table[(i)]) #define TOPA_ENTRY(t, i) \
((i) == -1 \
? &topa_to_page(t)->table[(t)->last] \
: &topa_to_page(t)->table[(i)])
#define TOPA_ENTRY_SIZE(t, i) (sizes(TOPA_ENTRY((t), (i))->size))
/** /**
* topa_alloc() - allocate page-sized ToPA table * topa_alloc() - allocate page-sized ToPA table
@@ -583,27 +607,27 @@ struct topa {
static struct topa *topa_alloc(int cpu, gfp_t gfp) static struct topa *topa_alloc(int cpu, gfp_t gfp)
{ {
int node = cpu_to_node(cpu); int node = cpu_to_node(cpu);
struct topa *topa; struct topa_page *tp;
struct page *p; struct page *p;
p = alloc_pages_node(node, gfp | __GFP_ZERO, 0); p = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
if (!p) if (!p)
return NULL; return NULL;
topa = page_address(p); tp = page_address(p);
topa->last = 0; tp->topa.last = 0;
topa->phys = page_to_phys(p); tp->topa.phys = page_to_phys(p);
/* /*
* In case of singe-entry ToPA, always put the self-referencing END * In case of singe-entry ToPA, always put the self-referencing END
* link as the 2nd entry in the table * link as the 2nd entry in the table
*/ */
if (!pt_cap_get(PT_CAP_topa_multiple_entries)) { if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
TOPA_ENTRY(topa, 1)->base = topa->phys >> TOPA_SHIFT; TOPA_ENTRY(&tp->topa, 1)->base = tp->topa.phys;
TOPA_ENTRY(topa, 1)->end = 1; TOPA_ENTRY(&tp->topa, 1)->end = 1;
} }
return topa; return &tp->topa;
} }
/** /**
@@ -638,7 +662,7 @@ static void topa_insert_table(struct pt_buffer *buf, struct topa *topa)
topa->offset = last->offset + last->size; topa->offset = last->offset + last->size;
buf->last = topa; buf->last = topa;
if (!pt_cap_get(PT_CAP_topa_multiple_entries)) if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
return; return;
BUG_ON(last->last != TENTS_PER_PAGE - 1); BUG_ON(last->last != TENTS_PER_PAGE - 1);
@@ -654,7 +678,7 @@ static void topa_insert_table(struct pt_buffer *buf, struct topa *topa)
static bool topa_table_full(struct topa *topa) static bool topa_table_full(struct topa *topa)
{ {
/* single-entry ToPA is a special case */ /* single-entry ToPA is a special case */
if (!pt_cap_get(PT_CAP_topa_multiple_entries)) if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
return !!topa->last; return !!topa->last;
return topa->last == TENTS_PER_PAGE - 1; return topa->last == TENTS_PER_PAGE - 1;
@@ -690,7 +714,8 @@ static int topa_insert_pages(struct pt_buffer *buf, gfp_t gfp)
TOPA_ENTRY(topa, -1)->base = page_to_phys(p) >> TOPA_SHIFT; TOPA_ENTRY(topa, -1)->base = page_to_phys(p) >> TOPA_SHIFT;
TOPA_ENTRY(topa, -1)->size = order; TOPA_ENTRY(topa, -1)->size = order;
if (!buf->snapshot && !pt_cap_get(PT_CAP_topa_multiple_entries)) { if (!buf->snapshot &&
!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
TOPA_ENTRY(topa, -1)->intr = 1; TOPA_ENTRY(topa, -1)->intr = 1;
TOPA_ENTRY(topa, -1)->stop = 1; TOPA_ENTRY(topa, -1)->stop = 1;
} }
@@ -712,22 +737,23 @@ static void pt_topa_dump(struct pt_buffer *buf)
struct topa *topa; struct topa *topa;
list_for_each_entry(topa, &buf->tables, list) { list_for_each_entry(topa, &buf->tables, list) {
struct topa_page *tp = topa_to_page(topa);
int i; int i;
pr_debug("# table @%p (%016Lx), off %llx size %zx\n", topa->table, pr_debug("# table @%p (%016Lx), off %llx size %zx\n", tp->table,
topa->phys, topa->offset, topa->size); topa->phys, topa->offset, topa->size);
for (i = 0; i < TENTS_PER_PAGE; i++) { for (i = 0; i < TENTS_PER_PAGE; i++) {
pr_debug("# entry @%p (%lx sz %u %c%c%c) raw=%16llx\n", pr_debug("# entry @%p (%lx sz %u %c%c%c) raw=%16llx\n",
&topa->table[i], &tp->table[i],
(unsigned long)topa->table[i].base << TOPA_SHIFT, (unsigned long)tp->table[i].base << TOPA_SHIFT,
sizes(topa->table[i].size), sizes(tp->table[i].size),
topa->table[i].end ? 'E' : ' ', tp->table[i].end ? 'E' : ' ',
topa->table[i].intr ? 'I' : ' ', tp->table[i].intr ? 'I' : ' ',
topa->table[i].stop ? 'S' : ' ', tp->table[i].stop ? 'S' : ' ',
*(u64 *)&topa->table[i]); *(u64 *)&tp->table[i]);
if ((pt_cap_get(PT_CAP_topa_multiple_entries) && if ((intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) &&
topa->table[i].stop) || tp->table[i].stop) ||
topa->table[i].end) tp->table[i].end)
break; break;
} }
} }
@@ -770,7 +796,7 @@ static void pt_update_head(struct pt *pt)
/* offset of the current output region within this table */ /* offset of the current output region within this table */
for (topa_idx = 0; topa_idx < buf->cur_idx; topa_idx++) for (topa_idx = 0; topa_idx < buf->cur_idx; topa_idx++)
base += sizes(buf->cur->table[topa_idx].size); base += TOPA_ENTRY_SIZE(buf->cur, topa_idx);
if (buf->snapshot) { if (buf->snapshot) {
local_set(&buf->data_size, base); local_set(&buf->data_size, base);
@@ -790,7 +816,7 @@ static void pt_update_head(struct pt *pt)
*/ */
static void *pt_buffer_region(struct pt_buffer *buf) static void *pt_buffer_region(struct pt_buffer *buf)
{ {
return phys_to_virt(buf->cur->table[buf->cur_idx].base << TOPA_SHIFT); return phys_to_virt((phys_addr_t)TOPA_ENTRY(buf->cur, buf->cur_idx)->base << TOPA_SHIFT);
} }
/** /**
@@ -799,7 +825,7 @@ static void *pt_buffer_region(struct pt_buffer *buf)
*/ */
static size_t pt_buffer_region_size(struct pt_buffer *buf) static size_t pt_buffer_region_size(struct pt_buffer *buf)
{ {
return sizes(buf->cur->table[buf->cur_idx].size); return TOPA_ENTRY_SIZE(buf->cur, buf->cur_idx);
} }
/** /**
@@ -828,8 +854,8 @@ static void pt_handle_status(struct pt *pt)
* means we are already losing data; need to let the decoder * means we are already losing data; need to let the decoder
* know. * know.
*/ */
if (!pt_cap_get(PT_CAP_topa_multiple_entries) || if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) ||
buf->output_off == sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) { buf->output_off == pt_buffer_region_size(buf)) {
perf_aux_output_flag(&pt->handle, perf_aux_output_flag(&pt->handle,
PERF_AUX_FLAG_TRUNCATED); PERF_AUX_FLAG_TRUNCATED);
advance++; advance++;
@@ -840,7 +866,8 @@ static void pt_handle_status(struct pt *pt)
* Also on single-entry ToPA implementations, interrupt will come * Also on single-entry ToPA implementations, interrupt will come
* before the output reaches its output region's boundary. * before the output reaches its output region's boundary.
*/ */
if (!pt_cap_get(PT_CAP_topa_multiple_entries) && !buf->snapshot && if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) &&
!buf->snapshot &&
pt_buffer_region_size(buf) - buf->output_off <= TOPA_PMI_MARGIN) { pt_buffer_region_size(buf) - buf->output_off <= TOPA_PMI_MARGIN) {
void *head = pt_buffer_region(buf); void *head = pt_buffer_region(buf);
@@ -866,9 +893,11 @@ static void pt_handle_status(struct pt *pt)
static void pt_read_offset(struct pt_buffer *buf) static void pt_read_offset(struct pt_buffer *buf)
{ {
u64 offset, base_topa; u64 offset, base_topa;
struct topa_page *tp;
rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, base_topa); rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, base_topa);
buf->cur = phys_to_virt(base_topa); tp = phys_to_virt(base_topa);
buf->cur = &tp->topa;
rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, offset); rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, offset);
/* offset within current output region */ /* offset within current output region */
@@ -923,15 +952,14 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
unsigned long idx, npages, wakeup; unsigned long idx, npages, wakeup;
/* can't stop in the middle of an output region */ /* can't stop in the middle of an output region */
if (buf->output_off + handle->size + 1 < if (buf->output_off + handle->size + 1 < pt_buffer_region_size(buf)) {
sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) {
perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
return -EINVAL; return -EINVAL;
} }
/* single entry ToPA is handled by marking all regions STOP=1 INT=1 */ /* single entry ToPA is handled by marking all regions STOP=1 INT=1 */
if (!pt_cap_get(PT_CAP_topa_multiple_entries)) if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
return 0; return 0;
/* clear STOP and INT from current entry */ /* clear STOP and INT from current entry */
@@ -1019,6 +1047,7 @@ static void pt_buffer_setup_topa_index(struct pt_buffer *buf)
*/ */
static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head) static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head)
{ {
struct topa_page *cur_tp;
int pg; int pg;
if (buf->snapshot) if (buf->snapshot)
@@ -1027,10 +1056,10 @@ static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head)
pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1); pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1);
pg = pt_topa_next_entry(buf, pg); pg = pt_topa_next_entry(buf, pg);
buf->cur = (struct topa *)((unsigned long)buf->topa_index[pg] & PAGE_MASK); cur_tp = topa_entry_to_page(buf->topa_index[pg]);
buf->cur_idx = ((unsigned long)buf->topa_index[pg] - buf->cur = &cur_tp->topa;
(unsigned long)buf->cur) / sizeof(struct topa_entry); buf->cur_idx = buf->topa_index[pg] - TOPA_ENTRY(buf->cur, 0);
buf->output_off = head & (sizes(buf->cur->table[buf->cur_idx].size) - 1); buf->output_off = head & (pt_buffer_region_size(buf) - 1);
local64_set(&buf->head, head); local64_set(&buf->head, head);
local_set(&buf->data_size, 0); local_set(&buf->data_size, 0);
@@ -1082,7 +1111,7 @@ static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages,
pt_buffer_setup_topa_index(buf); pt_buffer_setup_topa_index(buf);
/* link last table to the first one, unless we're double buffering */ /* link last table to the first one, unless we're double buffering */
if (pt_cap_get(PT_CAP_topa_multiple_entries)) { if (intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
TOPA_ENTRY(buf->last, -1)->base = buf->first->phys >> TOPA_SHIFT; TOPA_ENTRY(buf->last, -1)->base = buf->first->phys >> TOPA_SHIFT;
TOPA_ENTRY(buf->last, -1)->end = 1; TOPA_ENTRY(buf->last, -1)->end = 1;
} }
@@ -1154,7 +1183,7 @@ static int pt_addr_filters_init(struct perf_event *event)
struct pt_filters *filters; struct pt_filters *filters;
int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu); int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
if (!pt_cap_get(PT_CAP_num_address_ranges)) if (!intel_pt_validate_hw_cap(PT_CAP_num_address_ranges))
return 0; return 0;
filters = kzalloc_node(sizeof(struct pt_filters), GFP_KERNEL, node); filters = kzalloc_node(sizeof(struct pt_filters), GFP_KERNEL, node);
@@ -1203,7 +1232,7 @@ static int pt_event_addr_filters_validate(struct list_head *filters)
return -EINVAL; return -EINVAL;
} }
if (++range > pt_cap_get(PT_CAP_num_address_ranges)) if (++range > intel_pt_validate_hw_cap(PT_CAP_num_address_ranges))
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
@@ -1294,7 +1323,7 @@ void intel_pt_interrupt(void)
return; return;
} }
pt_config_buffer(buf->cur->table, buf->cur_idx, pt_config_buffer(topa_to_page(buf->cur)->table, buf->cur_idx,
buf->output_off); buf->output_off);
pt_config(event); pt_config(event);
} }
@@ -1359,7 +1388,7 @@ static void pt_event_start(struct perf_event *event, int mode)
WRITE_ONCE(pt->handle_nmi, 1); WRITE_ONCE(pt->handle_nmi, 1);
hwc->state = 0; hwc->state = 0;
pt_config_buffer(buf->cur->table, buf->cur_idx, pt_config_buffer(topa_to_page(buf->cur)->table, buf->cur_idx,
buf->output_off); buf->output_off);
pt_config(event); pt_config(event);
@@ -1509,12 +1538,12 @@ static __init int pt_init(void)
if (ret) if (ret)
return ret; return ret;
if (!pt_cap_get(PT_CAP_topa_output)) { if (!intel_pt_validate_hw_cap(PT_CAP_topa_output)) {
pr_warn("ToPA output is not supported on this CPU\n"); pr_warn("ToPA output is not supported on this CPU\n");
return -ENODEV; return -ENODEV;
} }
if (!pt_cap_get(PT_CAP_topa_multiple_entries)) if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
pt_pmu.pmu.capabilities = pt_pmu.pmu.capabilities =
PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_AUX_SW_DOUBLEBUF; PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_AUX_SW_DOUBLEBUF;
@@ -1532,7 +1561,7 @@ static __init int pt_init(void)
pt_pmu.pmu.addr_filters_sync = pt_event_addr_filters_sync; pt_pmu.pmu.addr_filters_sync = pt_event_addr_filters_sync;
pt_pmu.pmu.addr_filters_validate = pt_event_addr_filters_validate; pt_pmu.pmu.addr_filters_validate = pt_event_addr_filters_validate;
pt_pmu.pmu.nr_addr_filters = pt_pmu.pmu.nr_addr_filters =
pt_cap_get(PT_CAP_num_address_ranges); intel_pt_validate_hw_cap(PT_CAP_num_address_ranges);
ret = perf_pmu_register(&pt_pmu.pmu, "intel_pt", -1); ret = perf_pmu_register(&pt_pmu.pmu, "intel_pt", -1);

View File

@@ -78,34 +78,13 @@ struct topa_entry {
u64 rsvd2 : 1; u64 rsvd2 : 1;
u64 size : 4; u64 size : 4;
u64 rsvd3 : 2; u64 rsvd3 : 2;
u64 base : 36; u64 base : 40;
u64 rsvd4 : 16; u64 rsvd4 : 12;
}; };
#define PT_CPUID_LEAVES 2
#define PT_CPUID_REGS_NUM 4 /* number of regsters (eax, ebx, ecx, edx) */
/* TSC to Core Crystal Clock Ratio */ /* TSC to Core Crystal Clock Ratio */
#define CPUID_TSC_LEAF 0x15 #define CPUID_TSC_LEAF 0x15
enum pt_capabilities {
PT_CAP_max_subleaf = 0,
PT_CAP_cr3_filtering,
PT_CAP_psb_cyc,
PT_CAP_ip_filtering,
PT_CAP_mtc,
PT_CAP_ptwrite,
PT_CAP_power_event_trace,
PT_CAP_topa_output,
PT_CAP_topa_multiple_entries,
PT_CAP_single_range_output,
PT_CAP_payloads_lip,
PT_CAP_num_address_ranges,
PT_CAP_mtc_periods,
PT_CAP_cycle_thresholds,
PT_CAP_psb_periods,
};
struct pt_pmu { struct pt_pmu {
struct pmu pmu; struct pmu pmu;
u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES]; u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];

View File

@@ -2,10 +2,33 @@
#ifndef _ASM_X86_INTEL_PT_H #ifndef _ASM_X86_INTEL_PT_H
#define _ASM_X86_INTEL_PT_H #define _ASM_X86_INTEL_PT_H
#define PT_CPUID_LEAVES 2
#define PT_CPUID_REGS_NUM 4 /* number of regsters (eax, ebx, ecx, edx) */
enum pt_capabilities {
PT_CAP_max_subleaf = 0,
PT_CAP_cr3_filtering,
PT_CAP_psb_cyc,
PT_CAP_ip_filtering,
PT_CAP_mtc,
PT_CAP_ptwrite,
PT_CAP_power_event_trace,
PT_CAP_topa_output,
PT_CAP_topa_multiple_entries,
PT_CAP_single_range_output,
PT_CAP_payloads_lip,
PT_CAP_num_address_ranges,
PT_CAP_mtc_periods,
PT_CAP_cycle_thresholds,
PT_CAP_psb_periods,
};
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
void cpu_emergency_stop_pt(void); void cpu_emergency_stop_pt(void);
extern u32 intel_pt_validate_hw_cap(enum pt_capabilities cap);
#else #else
static inline void cpu_emergency_stop_pt(void) {} static inline void cpu_emergency_stop_pt(void) {}
static inline u32 intel_pt_validate_hw_cap(enum pt_capabilities cap) { return 0; }
#endif #endif
#endif /* _ASM_X86_INTEL_PT_H */ #endif /* _ASM_X86_INTEL_PT_H */

View File

@@ -819,7 +819,7 @@ void mtrr_save_state(void)
{ {
int first_cpu; int first_cpu;
if (!mtrr_enabled()) if (!mtrr_enabled() || !mtrr_state.have_fixed)
return; return;
first_cpu = cpumask_first(cpu_online_mask); first_cpu = cpumask_first(cpu_online_mask);

View File

@@ -90,7 +90,7 @@ static int x86_of_pci_irq_enable(struct pci_dev *dev)
ret = pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); ret = pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
if (ret) if (ret)
return ret; return pcibios_err_to_errno(ret);
if (!pin) if (!pin)
return 0; return 0;

View File

@@ -383,14 +383,14 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
*/ */
*target_pmd = *pmd; *target_pmd = *pmd;
addr += PMD_SIZE; addr = round_up(addr + 1, PMD_SIZE);
} else if (level == PTI_CLONE_PTE) { } else if (level == PTI_CLONE_PTE) {
/* Walk the page-table down to the pte level */ /* Walk the page-table down to the pte level */
pte = pte_offset_kernel(pmd, addr); pte = pte_offset_kernel(pmd, addr);
if (pte_none(*pte)) { if (pte_none(*pte)) {
addr += PAGE_SIZE; addr = round_up(addr + 1, PAGE_SIZE);
continue; continue;
} }
@@ -410,7 +410,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
/* Clone the PTE */ /* Clone the PTE */
*target_pte = *pte; *target_pte = *pte;
addr += PAGE_SIZE; addr = round_up(addr + 1, PAGE_SIZE);
} else { } else {
BUG(); BUG();

View File

@@ -223,9 +223,9 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
return 0; return 0;
ret = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi); ret = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
if (ret < 0) { if (ret) {
dev_warn(&dev->dev, "Failed to read interrupt line: %d\n", ret); dev_warn(&dev->dev, "Failed to read interrupt line: %d\n", ret);
return ret; return pcibios_err_to_errno(ret);
} }
switch (intel_mid_identify_cpu()) { switch (intel_mid_identify_cpu()) {

View File

@@ -36,10 +36,10 @@ static int xen_pcifront_enable_irq(struct pci_dev *dev)
u8 gsi; u8 gsi;
rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi); rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
if (rc < 0) { if (rc) {
dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n", dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
rc); rc);
return rc; return pcibios_err_to_errno(rc);
} }
/* In PV DomU the Xen PCI backend puts the PIRQ in the interrupt line.*/ /* In PV DomU the Xen PCI backend puts the PIRQ in the interrupt line.*/
pirq = gsi; pirq = gsi;

View File

@@ -68,7 +68,7 @@ static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr)
fail_read: fail_read:
dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result); dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
return result; return pcibios_err_to_errno(result);
} }
static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr) static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
@@ -97,7 +97,7 @@ static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
fail_write: fail_write:
dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result); dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
return result; return pcibios_err_to_errno(result);
} }
int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr) int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr)

View File

@@ -733,7 +733,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
* immediate unmapping. * immediate unmapping.
*/ */
map_ops[i].status = GNTST_general_error; map_ops[i].status = GNTST_general_error;
unmap[0].host_addr = map_ops[i].host_addr, unmap[0].host_addr = map_ops[i].host_addr;
unmap[0].handle = map_ops[i].handle; unmap[0].handle = map_ops[i].handle;
map_ops[i].handle = ~0; map_ops[i].handle = ~0;
if (map_ops[i].flags & GNTMAP_device_map) if (map_ops[i].flags & GNTMAP_device_map)
@@ -743,7 +743,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
if (kmap_ops) { if (kmap_ops) {
kmap_ops[i].status = GNTST_general_error; kmap_ops[i].status = GNTST_general_error;
unmap[1].host_addr = kmap_ops[i].host_addr, unmap[1].host_addr = kmap_ops[i].host_addr;
unmap[1].handle = kmap_ops[i].handle; unmap[1].handle = kmap_ops[i].handle;
kmap_ops[i].handle = ~0; kmap_ops[i].handle = ~0;
if (kmap_ops[i].flags & GNTMAP_device_map) if (kmap_ops[i].flags & GNTMAP_device_map)

View File

@@ -1002,9 +1002,7 @@ static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread) static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
{ {
return !thread->transaction_stack && return !thread->transaction_stack &&
binder_worklist_empty_ilocked(&thread->todo) && binder_worklist_empty_ilocked(&thread->todo);
(thread->looper & (BINDER_LOOPER_STATE_ENTERED |
BINDER_LOOPER_STATE_REGISTERED));
} }
static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc, static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,

View File

@@ -24,6 +24,7 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/rcupdate.h>
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
#include <linux/sysfs.h> #include <linux/sysfs.h>
@@ -1559,6 +1560,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
struct kobj_uevent_env *env) struct kobj_uevent_env *env)
{ {
struct device *dev = kobj_to_dev(kobj); struct device *dev = kobj_to_dev(kobj);
struct device_driver *driver;
int retval = 0; int retval = 0;
/* add device node properties if present */ /* add device node properties if present */
@@ -1587,8 +1589,12 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
if (dev->type && dev->type->name) if (dev->type && dev->type->name)
add_uevent_var(env, "DEVTYPE=%s", dev->type->name); add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
if (dev->driver) /* Synchronize with module_remove_driver() */
add_uevent_var(env, "DRIVER=%s", dev->driver->name); rcu_read_lock();
driver = READ_ONCE(dev->driver);
if (driver)
add_uevent_var(env, "DRIVER=%s", driver->name);
rcu_read_unlock();
/* Add common DT information about the device */ /* Add common DT information about the device */
of_device_uevent(dev, env); of_device_uevent(dev, env);
@@ -1658,11 +1664,8 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
if (!env) if (!env)
return -ENOMEM; return -ENOMEM;
/* Synchronize with really_probe() */
device_lock(dev);
/* let the kset specific function add its keys */ /* let the kset specific function add its keys */
retval = kset->uevent_ops->uevent(kset, &dev->kobj, env); retval = kset->uevent_ops->uevent(kset, &dev->kobj, env);
device_unlock(dev);
if (retval) if (retval)
goto out; goto out;

View File

@@ -1057,7 +1057,11 @@ EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
*/ */
void devm_free_percpu(struct device *dev, void __percpu *pdata) void devm_free_percpu(struct device *dev, void __percpu *pdata)
{ {
WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match, /*
(void *)pdata)); * Use devres_release() to prevent memory leakage as
* devm_free_pages() does.
*/
WARN_ON(devres_release(dev, devm_percpu_release, devm_percpu_match,
(__force void *)pdata));
} }
EXPORT_SYMBOL_GPL(devm_free_percpu); EXPORT_SYMBOL_GPL(devm_free_percpu);

View File

@@ -7,6 +7,7 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/rcupdate.h>
#include "base.h" #include "base.h"
static char *make_driver_name(struct device_driver *drv) static char *make_driver_name(struct device_driver *drv)
@@ -77,6 +78,9 @@ void module_remove_driver(struct device_driver *drv)
if (!drv) if (!drv)
return; return;
/* Synchronize with dev_uevent() */
synchronize_rcu();
sysfs_remove_link(&drv->p->kobj, "module"); sysfs_remove_link(&drv->p->kobj, "module");
if (drv->owner) if (drv->owner)

View File

@@ -142,8 +142,10 @@ static int __init mod_init(void)
found: found:
err = pci_read_config_dword(pdev, 0x58, &pmbase); err = pci_read_config_dword(pdev, 0x58, &pmbase);
if (err) if (err) {
err = pcibios_err_to_errno(err);
goto put_dev; goto put_dev;
}
pmbase &= 0x0000FF00; pmbase &= 0x0000FF00;
if (pmbase == 0) { if (pmbase == 0) {

View File

@@ -52,6 +52,8 @@ static int tpm_bios_measurements_open(struct inode *inode,
if (!err) { if (!err) {
seq = file->private_data; seq = file->private_data;
seq->private = chip; seq->private = chip;
} else {
put_device(&chip->dev);
} }
return err; return err;

View File

@@ -518,6 +518,7 @@ static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
{ {
struct sh_cmt_channel *ch = dev_id; struct sh_cmt_channel *ch = dev_id;
unsigned long flags;
/* clear flags */ /* clear flags */
sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) & sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) &
@@ -548,6 +549,8 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
ch->flags &= ~FLAG_SKIPEVENT; ch->flags &= ~FLAG_SKIPEVENT;
raw_spin_lock_irqsave(&ch->lock, flags);
if (ch->flags & FLAG_REPROGRAM) { if (ch->flags & FLAG_REPROGRAM) {
ch->flags &= ~FLAG_REPROGRAM; ch->flags &= ~FLAG_REPROGRAM;
sh_cmt_clock_event_program_verify(ch, 1); sh_cmt_clock_event_program_verify(ch, 1);
@@ -560,6 +563,8 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
ch->flags &= ~FLAG_IRQCONTEXT; ch->flags &= ~FLAG_IRQCONTEXT;
raw_spin_unlock_irqrestore(&ch->lock, flags);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@@ -758,12 +763,18 @@ static int sh_cmt_clock_event_next(unsigned long delta,
struct clock_event_device *ced) struct clock_event_device *ced)
{ {
struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
unsigned long flags;
BUG_ON(!clockevent_state_oneshot(ced)); BUG_ON(!clockevent_state_oneshot(ced));
raw_spin_lock_irqsave(&ch->lock, flags);
if (likely(ch->flags & FLAG_IRQCONTEXT)) if (likely(ch->flags & FLAG_IRQCONTEXT))
ch->next_match_value = delta - 1; ch->next_match_value = delta - 1;
else else
sh_cmt_set_next(ch, delta - 1); __sh_cmt_set_next(ch, delta - 1);
raw_spin_unlock_irqrestore(&ch->lock, flags);
return 0; return 0;
} }

View File

@@ -1109,7 +1109,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
u32 status_reg; u32 status_reg;
u8 *buffer = msg->buffer; u8 *buffer = msg->buffer;
unsigned int i; unsigned int i;
int num_transferred = 0;
int ret; int ret;
/* Buffer size of AUX CH is 16 bytes */ /* Buffer size of AUX CH is 16 bytes */
@@ -1161,7 +1160,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
reg = buffer[i]; reg = buffer[i];
writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0 + writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0 +
4 * i); 4 * i);
num_transferred++;
} }
} }
@@ -1209,7 +1207,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0 + reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0 +
4 * i); 4 * i);
buffer[i] = (unsigned char)reg; buffer[i] = (unsigned char)reg;
num_transferred++;
} }
} }
@@ -1226,7 +1223,7 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
(msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_READ) (msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_READ)
msg->reply = DP_AUX_NATIVE_REPLY_ACK; msg->reply = DP_AUX_NATIVE_REPLY_ACK;
return num_transferred > 0 ? num_transferred : -EBUSY; return msg->size;
aux_error: aux_error:
/* if aux err happen, reset aux */ /* if aux err happen, reset aux */

View File

@@ -370,9 +370,11 @@ static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op) static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
{ {
if (op & ETNA_PREP_READ) op &= ETNA_PREP_READ | ETNA_PREP_WRITE;
if (op == ETNA_PREP_READ)
return DMA_FROM_DEVICE; return DMA_FROM_DEVICE;
else if (op & ETNA_PREP_WRITE) else if (op == ETNA_PREP_WRITE)
return DMA_TO_DEVICE; return DMA_TO_DEVICE;
else else
return DMA_BIDIRECTIONAL; return DMA_BIDIRECTIONAL;

View File

@@ -404,6 +404,9 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
if (mode_dev->panel_fixed_mode != NULL) { if (mode_dev->panel_fixed_mode != NULL) {
struct drm_display_mode *mode = struct drm_display_mode *mode =
drm_mode_duplicate(dev, mode_dev->panel_fixed_mode); drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
if (!mode)
return 0;
drm_mode_probed_add(connector, mode); drm_mode_probed_add(connector, mode);
return 1; return 1;
} }

View File

@@ -519,6 +519,9 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector)
if (mode_dev->panel_fixed_mode != NULL) { if (mode_dev->panel_fixed_mode != NULL) {
struct drm_display_mode *mode = struct drm_display_mode *mode =
drm_mode_duplicate(dev, mode_dev->panel_fixed_mode); drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
if (!mode)
return 0;
drm_mode_probed_add(connector, mode); drm_mode_probed_add(connector, mode);
return 1; return 1;
} }

View File

@@ -2009,6 +2009,39 @@ compute_partial_view(struct drm_i915_gem_object *obj,
return view; return view;
} }
static void set_address_limits(struct vm_area_struct *area,
struct i915_vma *vma,
unsigned long *start_vaddr,
unsigned long *end_vaddr)
{
unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */
long start, end; /* memory boundaries */
/*
* Let's move into the ">> PAGE_SHIFT"
* domain to be sure not to lose bits
*/
vm_start = area->vm_start >> PAGE_SHIFT;
vm_end = area->vm_end >> PAGE_SHIFT;
vma_size = vma->size >> PAGE_SHIFT;
/*
* Calculate the memory boundaries by considering the offset
* provided by the user during memory mapping and the offset
* provided for the partial mapping.
*/
start = vm_start;
start += vma->ggtt_view.partial.offset;
end = start + vma_size;
start = max_t(long, start, vm_start);
end = min_t(long, end, vm_end);
/* Let's move back into the "<< PAGE_SHIFT" domain */
*start_vaddr = (unsigned long)start << PAGE_SHIFT;
*end_vaddr = (unsigned long)end << PAGE_SHIFT;
}
/** /**
* i915_gem_fault - fault a page into the GTT * i915_gem_fault - fault a page into the GTT
* @vmf: fault info * @vmf: fault info
@@ -2036,8 +2069,10 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool write = !!(vmf->flags & FAULT_FLAG_WRITE); bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
unsigned long start, end; /* memory boundaries */
struct i915_vma *vma; struct i915_vma *vma;
pgoff_t page_offset; pgoff_t page_offset;
unsigned long pfn;
int ret; int ret;
/* Sanity check that we allow writing into this object */ /* Sanity check that we allow writing into this object */
@@ -2119,12 +2154,14 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
if (ret) if (ret)
goto err_unpin; goto err_unpin;
set_address_limits(area, vma, &start, &end);
pfn = (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT;
pfn += (start - area->vm_start) >> PAGE_SHIFT;
pfn -= vma->ggtt_view.partial.offset;
/* Finally, remap it using the new GTT offset */ /* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area, ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap);
area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
(ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->iomap);
if (ret) if (ret)
goto err_fence; goto err_fence;

View File

@@ -133,7 +133,7 @@ struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
i2c->adapter.algo_data = &i2c->bit; i2c->adapter.algo_data = &i2c->bit;
i2c->bit.udelay = 10; i2c->bit.udelay = 10;
i2c->bit.timeout = 2; i2c->bit.timeout = usecs_to_jiffies(2200);
i2c->bit.data = i2c; i2c->bit.data = i2c;
i2c->bit.setsda = mga_gpio_setsda; i2c->bit.setsda = mga_gpio_setsda;
i2c->bit.setscl = mga_gpio_setscl; i2c->bit.setscl = mga_gpio_setscl;

View File

@@ -100,7 +100,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
{ {
struct vmw_escape_video_flush *flush; struct vmw_escape_video_flush *flush;
size_t fifo_size; size_t fifo_size;
bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object); bool have_so = (dev_priv->active_display_unit != vmw_du_legacy);
int i, num_items; int i, num_items;
SVGAGuestPtr ptr; SVGAGuestPtr ptr;

View File

@@ -1785,7 +1785,7 @@ static void adt7475_read_pwm(struct i2c_client *client, int index)
data->pwm[CONTROL][index] &= ~0xE0; data->pwm[CONTROL][index] &= ~0xE0;
data->pwm[CONTROL][index] |= (7 << 5); data->pwm[CONTROL][index] |= (7 << 5);
i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(index), i2c_smbus_write_byte_data(client, PWM_REG(index),
data->pwm[INPUT][index]); data->pwm[INPUT][index]);
i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(index), i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(index),

View File

@@ -251,7 +251,7 @@ static struct max6697_data *max6697_update_device(struct device *dev)
return ret; return ret;
} }
static ssize_t show_temp_input(struct device *dev, static ssize_t temp_input_show(struct device *dev,
struct device_attribute *devattr, char *buf) struct device_attribute *devattr, char *buf)
{ {
int index = to_sensor_dev_attr(devattr)->index; int index = to_sensor_dev_attr(devattr)->index;
@@ -267,8 +267,8 @@ static ssize_t show_temp_input(struct device *dev,
return sprintf(buf, "%d\n", temp * 125); return sprintf(buf, "%d\n", temp * 125);
} }
static ssize_t show_temp(struct device *dev, static ssize_t temp_show(struct device *dev, struct device_attribute *devattr,
struct device_attribute *devattr, char *buf) char *buf)
{ {
int nr = to_sensor_dev_attr_2(devattr)->nr; int nr = to_sensor_dev_attr_2(devattr)->nr;
int index = to_sensor_dev_attr_2(devattr)->index; int index = to_sensor_dev_attr_2(devattr)->index;
@@ -284,7 +284,7 @@ static ssize_t show_temp(struct device *dev,
return sprintf(buf, "%d\n", temp * 1000); return sprintf(buf, "%d\n", temp * 1000);
} }
static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
char *buf) char *buf)
{ {
int index = to_sensor_dev_attr(attr)->index; int index = to_sensor_dev_attr(attr)->index;
@@ -299,9 +299,9 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%u\n", (data->alarms >> index) & 0x1); return sprintf(buf, "%u\n", (data->alarms >> index) & 0x1);
} }
static ssize_t set_temp(struct device *dev, static ssize_t temp_store(struct device *dev,
struct device_attribute *devattr, struct device_attribute *devattr, const char *buf,
const char *buf, size_t count) size_t count)
{ {
int nr = to_sensor_dev_attr_2(devattr)->nr; int nr = to_sensor_dev_attr_2(devattr)->nr;
int index = to_sensor_dev_attr_2(devattr)->index; int index = to_sensor_dev_attr_2(devattr)->index;
@@ -314,6 +314,7 @@ static ssize_t set_temp(struct device *dev,
return ret; return ret;
mutex_lock(&data->update_lock); mutex_lock(&data->update_lock);
temp = clamp_val(temp, -1000000, 1000000); /* prevent underflow */
temp = DIV_ROUND_CLOSEST(temp, 1000) + data->temp_offset; temp = DIV_ROUND_CLOSEST(temp, 1000) + data->temp_offset;
temp = clamp_val(temp, 0, data->type == max6581 ? 255 : 127); temp = clamp_val(temp, 0, data->type == max6581 ? 255 : 127);
data->temp[nr][index] = temp; data->temp[nr][index] = temp;
@@ -326,79 +327,63 @@ static ssize_t set_temp(struct device *dev,
return ret < 0 ? ret : count; return ret < 0 ? ret : count;
} }
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input, NULL, 0); static SENSOR_DEVICE_ATTR_RO(temp1_input, temp_input, 0);
static SENSOR_DEVICE_ATTR_2(temp1_max, S_IRUGO | S_IWUSR, show_temp, set_temp, static SENSOR_DEVICE_ATTR_2_RW(temp1_max, temp, 0, MAX6697_TEMP_MAX);
0, MAX6697_TEMP_MAX); static SENSOR_DEVICE_ATTR_2_RW(temp1_crit, temp, 0, MAX6697_TEMP_CRIT);
static SENSOR_DEVICE_ATTR_2(temp1_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
0, MAX6697_TEMP_CRIT);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_input, NULL, 1); static SENSOR_DEVICE_ATTR_RO(temp2_input, temp_input, 1);
static SENSOR_DEVICE_ATTR_2(temp2_max, S_IRUGO | S_IWUSR, show_temp, set_temp, static SENSOR_DEVICE_ATTR_2_RW(temp2_max, temp, 1, MAX6697_TEMP_MAX);
1, MAX6697_TEMP_MAX); static SENSOR_DEVICE_ATTR_2_RW(temp2_crit, temp, 1, MAX6697_TEMP_CRIT);
static SENSOR_DEVICE_ATTR_2(temp2_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
1, MAX6697_TEMP_CRIT);
static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp_input, NULL, 2); static SENSOR_DEVICE_ATTR_RO(temp3_input, temp_input, 2);
static SENSOR_DEVICE_ATTR_2(temp3_max, S_IRUGO | S_IWUSR, show_temp, set_temp, static SENSOR_DEVICE_ATTR_2_RW(temp3_max, temp, 2, MAX6697_TEMP_MAX);
2, MAX6697_TEMP_MAX); static SENSOR_DEVICE_ATTR_2_RW(temp3_crit, temp, 2, MAX6697_TEMP_CRIT);
static SENSOR_DEVICE_ATTR_2(temp3_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
2, MAX6697_TEMP_CRIT);
static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp_input, NULL, 3); static SENSOR_DEVICE_ATTR_RO(temp4_input, temp_input, 3);
static SENSOR_DEVICE_ATTR_2(temp4_max, S_IRUGO | S_IWUSR, show_temp, set_temp, static SENSOR_DEVICE_ATTR_2_RW(temp4_max, temp, 3, MAX6697_TEMP_MAX);
3, MAX6697_TEMP_MAX); static SENSOR_DEVICE_ATTR_2_RW(temp4_crit, temp, 3, MAX6697_TEMP_CRIT);
static SENSOR_DEVICE_ATTR_2(temp4_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
3, MAX6697_TEMP_CRIT);
static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_temp_input, NULL, 4); static SENSOR_DEVICE_ATTR_RO(temp5_input, temp_input, 4);
static SENSOR_DEVICE_ATTR_2(temp5_max, S_IRUGO | S_IWUSR, show_temp, set_temp, static SENSOR_DEVICE_ATTR_2_RW(temp5_max, temp, 4, MAX6697_TEMP_MAX);
4, MAX6697_TEMP_MAX); static SENSOR_DEVICE_ATTR_2_RW(temp5_crit, temp, 4, MAX6697_TEMP_CRIT);
static SENSOR_DEVICE_ATTR_2(temp5_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
4, MAX6697_TEMP_CRIT);
static SENSOR_DEVICE_ATTR(temp6_input, S_IRUGO, show_temp_input, NULL, 5); static SENSOR_DEVICE_ATTR_RO(temp6_input, temp_input, 5);
static SENSOR_DEVICE_ATTR_2(temp6_max, S_IRUGO | S_IWUSR, show_temp, set_temp, static SENSOR_DEVICE_ATTR_2_RW(temp6_max, temp, 5, MAX6697_TEMP_MAX);
5, MAX6697_TEMP_MAX); static SENSOR_DEVICE_ATTR_2_RW(temp6_crit, temp, 5, MAX6697_TEMP_CRIT);
static SENSOR_DEVICE_ATTR_2(temp6_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
5, MAX6697_TEMP_CRIT);
static SENSOR_DEVICE_ATTR(temp7_input, S_IRUGO, show_temp_input, NULL, 6); static SENSOR_DEVICE_ATTR_RO(temp7_input, temp_input, 6);
static SENSOR_DEVICE_ATTR_2(temp7_max, S_IRUGO | S_IWUSR, show_temp, set_temp, static SENSOR_DEVICE_ATTR_2_RW(temp7_max, temp, 6, MAX6697_TEMP_MAX);
6, MAX6697_TEMP_MAX); static SENSOR_DEVICE_ATTR_2_RW(temp7_crit, temp, 6, MAX6697_TEMP_CRIT);
static SENSOR_DEVICE_ATTR_2(temp7_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
6, MAX6697_TEMP_CRIT);
static SENSOR_DEVICE_ATTR(temp8_input, S_IRUGO, show_temp_input, NULL, 7); static SENSOR_DEVICE_ATTR_RO(temp8_input, temp_input, 7);
static SENSOR_DEVICE_ATTR_2(temp8_max, S_IRUGO | S_IWUSR, show_temp, set_temp, static SENSOR_DEVICE_ATTR_2_RW(temp8_max, temp, 7, MAX6697_TEMP_MAX);
7, MAX6697_TEMP_MAX); static SENSOR_DEVICE_ATTR_2_RW(temp8_crit, temp, 7, MAX6697_TEMP_CRIT);
static SENSOR_DEVICE_ATTR_2(temp8_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
7, MAX6697_TEMP_CRIT);
static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 22); static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, 22);
static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 16); static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, alarm, 16);
static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_alarm, NULL, 17); static SENSOR_DEVICE_ATTR_RO(temp3_max_alarm, alarm, 17);
static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_alarm, NULL, 18); static SENSOR_DEVICE_ATTR_RO(temp4_max_alarm, alarm, 18);
static SENSOR_DEVICE_ATTR(temp5_max_alarm, S_IRUGO, show_alarm, NULL, 19); static SENSOR_DEVICE_ATTR_RO(temp5_max_alarm, alarm, 19);
static SENSOR_DEVICE_ATTR(temp6_max_alarm, S_IRUGO, show_alarm, NULL, 20); static SENSOR_DEVICE_ATTR_RO(temp6_max_alarm, alarm, 20);
static SENSOR_DEVICE_ATTR(temp7_max_alarm, S_IRUGO, show_alarm, NULL, 21); static SENSOR_DEVICE_ATTR_RO(temp7_max_alarm, alarm, 21);
static SENSOR_DEVICE_ATTR(temp8_max_alarm, S_IRUGO, show_alarm, NULL, 23); static SENSOR_DEVICE_ATTR_RO(temp8_max_alarm, alarm, 23);
static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14); static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 15);
static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 8); static SENSOR_DEVICE_ATTR_RO(temp2_crit_alarm, alarm, 8);
static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 9); static SENSOR_DEVICE_ATTR_RO(temp3_crit_alarm, alarm, 9);
static SENSOR_DEVICE_ATTR(temp4_crit_alarm, S_IRUGO, show_alarm, NULL, 10); static SENSOR_DEVICE_ATTR_RO(temp4_crit_alarm, alarm, 10);
static SENSOR_DEVICE_ATTR(temp5_crit_alarm, S_IRUGO, show_alarm, NULL, 11); static SENSOR_DEVICE_ATTR_RO(temp5_crit_alarm, alarm, 11);
static SENSOR_DEVICE_ATTR(temp6_crit_alarm, S_IRUGO, show_alarm, NULL, 12); static SENSOR_DEVICE_ATTR_RO(temp6_crit_alarm, alarm, 12);
static SENSOR_DEVICE_ATTR(temp7_crit_alarm, S_IRUGO, show_alarm, NULL, 13); static SENSOR_DEVICE_ATTR_RO(temp7_crit_alarm, alarm, 13);
static SENSOR_DEVICE_ATTR(temp8_crit_alarm, S_IRUGO, show_alarm, NULL, 15); static SENSOR_DEVICE_ATTR_RO(temp8_crit_alarm, alarm, 14);
static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 1);
static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, 2);
static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_alarm, NULL, 3); static SENSOR_DEVICE_ATTR_RO(temp4_fault, alarm, 3);
static SENSOR_DEVICE_ATTR(temp5_fault, S_IRUGO, show_alarm, NULL, 4); static SENSOR_DEVICE_ATTR_RO(temp5_fault, alarm, 4);
static SENSOR_DEVICE_ATTR(temp6_fault, S_IRUGO, show_alarm, NULL, 5); static SENSOR_DEVICE_ATTR_RO(temp6_fault, alarm, 5);
static SENSOR_DEVICE_ATTR(temp7_fault, S_IRUGO, show_alarm, NULL, 6); static SENSOR_DEVICE_ATTR_RO(temp7_fault, alarm, 6);
static SENSOR_DEVICE_ATTR(temp8_fault, S_IRUGO, show_alarm, NULL, 7); static SENSOR_DEVICE_ATTR_RO(temp8_fault, alarm, 7);
static DEVICE_ATTR(dummy, 0, NULL, NULL); static DEVICE_ATTR(dummy, 0, NULL, NULL);

View File

@@ -42,6 +42,7 @@ static int smbus_do_alert(struct device *dev, void *addrp)
struct i2c_client *client = i2c_verify_client(dev); struct i2c_client *client = i2c_verify_client(dev);
struct alert_data *data = addrp; struct alert_data *data = addrp;
struct i2c_driver *driver; struct i2c_driver *driver;
int ret;
if (!client || client->addr != data->addr) if (!client || client->addr != data->addr)
return 0; return 0;
@@ -55,16 +56,47 @@ static int smbus_do_alert(struct device *dev, void *addrp)
device_lock(dev); device_lock(dev);
if (client->dev.driver) { if (client->dev.driver) {
driver = to_i2c_driver(client->dev.driver); driver = to_i2c_driver(client->dev.driver);
if (driver->alert) if (driver->alert) {
/* Stop iterating after we find the device */
driver->alert(client, data->type, data->data); driver->alert(client, data->type, data->data);
else ret = -EBUSY;
} else {
dev_warn(&client->dev, "no driver alert()!\n"); dev_warn(&client->dev, "no driver alert()!\n");
} else ret = -EOPNOTSUPP;
}
} else {
dev_dbg(&client->dev, "alert with no driver\n"); dev_dbg(&client->dev, "alert with no driver\n");
ret = -ENODEV;
}
device_unlock(dev); device_unlock(dev);
/* Stop iterating after we find the device */ return ret;
return -EBUSY; }
/* Same as above, but call back all drivers with alert handler */
static int smbus_do_alert_force(struct device *dev, void *addrp)
{
struct i2c_client *client = i2c_verify_client(dev);
struct alert_data *data = addrp;
struct i2c_driver *driver;
if (!client || (client->flags & I2C_CLIENT_TEN))
return 0;
/*
* Drivers should either disable alerts, or provide at least
* a minimal handler. Lock so the driver won't change.
*/
device_lock(dev);
if (client->dev.driver) {
driver = to_i2c_driver(client->dev.driver);
if (driver->alert)
driver->alert(client, data->type, data->data);
}
device_unlock(dev);
return 0;
} }
/* /*
@@ -75,7 +107,7 @@ static irqreturn_t smbus_alert(int irq, void *d)
{ {
struct i2c_smbus_alert *alert = d; struct i2c_smbus_alert *alert = d;
struct i2c_client *ara; struct i2c_client *ara;
unsigned short prev_addr = 0; /* Not a valid address */ unsigned short prev_addr = I2C_CLIENT_END; /* Not a valid address */
ara = alert->ara; ara = alert->ara;
@@ -99,17 +131,28 @@ static irqreturn_t smbus_alert(int irq, void *d)
data.addr = status >> 1; data.addr = status >> 1;
data.type = I2C_PROTOCOL_SMBUS_ALERT; data.type = I2C_PROTOCOL_SMBUS_ALERT;
if (data.addr == prev_addr) {
dev_warn(&ara->dev, "Duplicate SMBALERT# from dev "
"0x%02x, skipping\n", data.addr);
break;
}
dev_dbg(&ara->dev, "SMBALERT# from dev 0x%02x, flag %d\n", dev_dbg(&ara->dev, "SMBALERT# from dev 0x%02x, flag %d\n",
data.addr, data.data); data.addr, data.data);
/* Notify driver for the device which issued the alert */ /* Notify driver for the device which issued the alert */
device_for_each_child(&ara->adapter->dev, &data, status = device_for_each_child(&ara->adapter->dev, &data,
smbus_do_alert); smbus_do_alert);
/*
* If we read the same address more than once, and the alert
* was not handled by a driver, it won't do any good to repeat
* the loop because it will never terminate. Try again, this
* time calling the alert handlers of all devices connected to
* the bus, and abort the loop afterwards. If this helps, we
* are all set. If it doesn't, there is nothing else we can do,
* so we might as well abort the loop.
* Note: This assumes that a driver with alert handler handles
* the alert properly and clears it if necessary.
*/
if (data.addr == prev_addr && status != -EBUSY) {
device_for_each_child(&ara->adapter->dev, &data,
smbus_do_alert_force);
break;
}
prev_addr = data.addr; prev_addr = data.addr;
} }

View File

@@ -369,8 +369,10 @@ EXPORT_SYMBOL(iw_cm_disconnect);
* *
* Clean up all resources associated with the connection and release * Clean up all resources associated with the connection and release
* the initial reference taken by iw_create_cm_id. * the initial reference taken by iw_create_cm_id.
*
* Returns true if and only if the last cm_id_priv reference has been dropped.
*/ */
static void destroy_cm_id(struct iw_cm_id *cm_id) static bool destroy_cm_id(struct iw_cm_id *cm_id)
{ {
struct iwcm_id_private *cm_id_priv; struct iwcm_id_private *cm_id_priv;
unsigned long flags; unsigned long flags;
@@ -438,7 +440,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM); iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
} }
(void)iwcm_deref_id(cm_id_priv); return iwcm_deref_id(cm_id_priv);
} }
/* /*
@@ -449,7 +451,8 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
*/ */
void iw_destroy_cm_id(struct iw_cm_id *cm_id) void iw_destroy_cm_id(struct iw_cm_id *cm_id)
{ {
destroy_cm_id(cm_id); if (!destroy_cm_id(cm_id))
flush_workqueue(iwcm_wq);
} }
EXPORT_SYMBOL(iw_destroy_cm_id); EXPORT_SYMBOL(iw_destroy_cm_id);
@@ -1022,7 +1025,7 @@ static void cm_work_handler(struct work_struct *_work)
if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) { if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
ret = process_event(cm_id_priv, &levent); ret = process_event(cm_id_priv, &levent);
if (ret) if (ret)
destroy_cm_id(&cm_id_priv->id); WARN_ON_ONCE(destroy_cm_id(&cm_id_priv->id));
} else } else
pr_debug("dropping event %d\n", levent.event); pr_debug("dropping event %d\n", levent.event);
if (iwcm_deref_id(cm_id_priv)) if (iwcm_deref_id(cm_id_priv))

View File

@@ -2112,7 +2112,7 @@ static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
break; break;
case IB_WR_SEND_WITH_IMM: case IB_WR_SEND_WITH_IMM:
wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM; wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
wqe->send.imm_data = wr->ex.imm_data; wqe->send.imm_data = be32_to_cpu(wr->ex.imm_data);
break; break;
case IB_WR_SEND_WITH_INV: case IB_WR_SEND_WITH_INV:
wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV; wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
@@ -2142,7 +2142,7 @@ static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
break; break;
case IB_WR_RDMA_WRITE_WITH_IMM: case IB_WR_RDMA_WRITE_WITH_IMM:
wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM; wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
wqe->rdma.imm_data = wr->ex.imm_data; wqe->rdma.imm_data = be32_to_cpu(wr->ex.imm_data);
break; break;
case IB_WR_RDMA_READ: case IB_WR_RDMA_READ:
wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ; wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
@@ -3110,7 +3110,7 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
wc->byte_len = orig_cqe->length; wc->byte_len = orig_cqe->length;
wc->qp = &qp1_qp->ib_qp; wc->qp = &qp1_qp->ib_qp;
wc->ex.imm_data = orig_cqe->immdata; wc->ex.imm_data = cpu_to_be32(le32_to_cpu(orig_cqe->immdata));
wc->src_qp = orig_cqe->src_qp; wc->src_qp = orig_cqe->src_qp;
memcpy(wc->smac, orig_cqe->smac, ETH_ALEN); memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) { if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
@@ -3231,7 +3231,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
continue; continue;
} }
wc->qp = &qp->ib_qp; wc->qp = &qp->ib_qp;
wc->ex.imm_data = cqe->immdata; wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->immdata));
wc->src_qp = cqe->src_qp; wc->src_qp = cqe->src_qp;
memcpy(wc->smac, cqe->smac, ETH_ALEN); memcpy(wc->smac, cqe->smac, ETH_ALEN);
wc->port_num = 1; wc->port_num = 1;

View File

@@ -145,7 +145,7 @@ struct bnxt_qplib_swqe {
/* Send, with imm, inval key */ /* Send, with imm, inval key */
struct { struct {
union { union {
__be32 imm_data; u32 imm_data;
u32 inv_key; u32 inv_key;
}; };
u32 q_key; u32 q_key;
@@ -163,7 +163,7 @@ struct bnxt_qplib_swqe {
/* RDMA write, with imm, read */ /* RDMA write, with imm, read */
struct { struct {
union { union {
__be32 imm_data; u32 imm_data;
u32 inv_key; u32 inv_key;
}; };
u64 remote_va; u64 remote_va;
@@ -349,7 +349,7 @@ struct bnxt_qplib_cqe {
u32 length; u32 length;
u64 wr_id; u64 wr_id;
union { union {
__be32 immdata; __le32 immdata;
u32 invrkey; u32 invrkey;
}; };
u64 qp_handle; u64 qp_handle;

View File

@@ -832,7 +832,7 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev) int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
{ {
char alias_wq_name[15]; char alias_wq_name[22];
int ret = 0; int ret = 0;
int i, j; int i, j;
union ib_gid gid; union ib_gid gid;

View File

@@ -2158,7 +2158,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
struct mlx4_ib_demux_ctx *ctx, struct mlx4_ib_demux_ctx *ctx,
int port) int port)
{ {
char name[12]; char name[21];
int ret = 0; int ret = 0;
int i; int i;

View File

@@ -390,7 +390,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
int solicited; int solicited;
u16 pkey; u16 pkey;
u32 qp_num; u32 qp_num;
int ack_req; int ack_req = 0;
/* length from start of bth to end of icrc */ /* length from start of bth to end of icrc */
paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE; paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
@@ -426,8 +426,9 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn : qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
qp->attr.dest_qp_num; qp->attr.dest_qp_num;
ack_req = ((pkt->mask & RXE_END_MASK) || if (qp_type(qp) != IB_QPT_UD && qp_type(qp) != IB_QPT_UC)
(qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK)); ack_req = ((pkt->mask & RXE_END_MASK) ||
(qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
if (ack_req) if (ack_req)
qp->req.noack_pkts = 0; qp->req.noack_pkts = 0;

View File

@@ -1270,6 +1270,8 @@ static int __maybe_unused elan_suspend(struct device *dev)
} }
err: err:
if (ret)
enable_irq(client->irq);
mutex_unlock(&data->sysfs_mutex); mutex_unlock(&data->sysfs_mutex);
return ret; return ret;
} }

View File

@@ -75,6 +75,20 @@ struct mbigen_device {
void __iomem *base; void __iomem *base;
}; };
static inline unsigned int get_mbigen_node_offset(unsigned int nid)
{
unsigned int offset = nid * MBIGEN_NODE_OFFSET;
/*
* To avoid touched clear register in unexpected way, we need to directly
* skip clear register when access to more than 10 mbigen nodes.
*/
if (nid >= (REG_MBIGEN_CLEAR_OFFSET / MBIGEN_NODE_OFFSET))
offset += MBIGEN_NODE_OFFSET;
return offset;
}
static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq) static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq)
{ {
unsigned int nid, pin; unsigned int nid, pin;
@@ -83,8 +97,7 @@ static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq)
nid = hwirq / IRQS_PER_MBIGEN_NODE + 1; nid = hwirq / IRQS_PER_MBIGEN_NODE + 1;
pin = hwirq % IRQS_PER_MBIGEN_NODE; pin = hwirq % IRQS_PER_MBIGEN_NODE;
return pin * 4 + nid * MBIGEN_NODE_OFFSET return pin * 4 + get_mbigen_node_offset(nid) + REG_MBIGEN_VEC_OFFSET;
+ REG_MBIGEN_VEC_OFFSET;
} }
static inline void get_mbigen_type_reg(irq_hw_number_t hwirq, static inline void get_mbigen_type_reg(irq_hw_number_t hwirq,
@@ -99,8 +112,7 @@ static inline void get_mbigen_type_reg(irq_hw_number_t hwirq,
*mask = 1 << (irq_ofst % 32); *mask = 1 << (irq_ofst % 32);
ofst = irq_ofst / 32 * 4; ofst = irq_ofst / 32 * 4;
*addr = ofst + nid * MBIGEN_NODE_OFFSET *addr = ofst + get_mbigen_node_offset(nid) + REG_MBIGEN_TYPE_OFFSET;
+ REG_MBIGEN_TYPE_OFFSET;
} }
static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq, static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq,

View File

@@ -1945,7 +1945,7 @@ hfcmulti_dtmf(struct hfc_multi *hc)
static void static void
hfcmulti_tx(struct hfc_multi *hc, int ch) hfcmulti_tx(struct hfc_multi *hc, int ch)
{ {
int i, ii, temp, len = 0; int i, ii, temp, tmp_len, len = 0;
int Zspace, z1, z2; /* must be int for calculation */ int Zspace, z1, z2; /* must be int for calculation */
int Fspace, f1, f2; int Fspace, f1, f2;
u_char *d; u_char *d;
@@ -2166,14 +2166,15 @@ hfcmulti_tx(struct hfc_multi *hc, int ch)
HFC_wait_nodebug(hc); HFC_wait_nodebug(hc);
} }
tmp_len = (*sp)->len;
dev_kfree_skb(*sp); dev_kfree_skb(*sp);
/* check for next frame */ /* check for next frame */
if (bch && get_next_bframe(bch)) { if (bch && get_next_bframe(bch)) {
len = (*sp)->len; len = tmp_len;
goto next_frame; goto next_frame;
} }
if (dch && get_next_dframe(dch)) { if (dch && get_next_dframe(dch)) {
len = (*sp)->len; len = tmp_len;
goto next_frame; goto next_frame;
} }

View File

@@ -125,9 +125,9 @@ int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
flags); flags);
cancel_work_sync(&led_cdev->set_brightness_work); cancel_work_sync(&led_cdev->set_brightness_work);
led_stop_software_blink(led_cdev); led_stop_software_blink(led_cdev);
device_remove_groups(led_cdev->dev, led_cdev->trigger->groups);
if (led_cdev->trigger->deactivate) if (led_cdev->trigger->deactivate)
led_cdev->trigger->deactivate(led_cdev); led_cdev->trigger->deactivate(led_cdev);
device_remove_groups(led_cdev->dev, led_cdev->trigger->groups);
led_cdev->trigger = NULL; led_cdev->trigger = NULL;
led_cdev->trigger_data = NULL; led_cdev->trigger_data = NULL;
led_cdev->activated = false; led_cdev->activated = false;

View File

@@ -368,8 +368,10 @@ static int ich7_lpc_probe(struct pci_dev *dev,
nas_gpio_pci_dev = dev; nas_gpio_pci_dev = dev;
status = pci_read_config_dword(dev, PMBASE, &g_pm_io_base); status = pci_read_config_dword(dev, PMBASE, &g_pm_io_base);
if (status) if (status) {
status = pcibios_err_to_errno(status);
goto out; goto out;
}
g_pm_io_base &= 0x00000ff80; g_pm_io_base &= 0x00000ff80;
status = pci_read_config_dword(dev, GPIO_CTRL, &gc); status = pci_read_config_dword(dev, GPIO_CTRL, &gc);
@@ -381,8 +383,9 @@ static int ich7_lpc_probe(struct pci_dev *dev,
} }
status = pci_read_config_dword(dev, GPIO_BASE, &nas_gpio_io_base); status = pci_read_config_dword(dev, GPIO_BASE, &nas_gpio_io_base);
if (0 > status) { if (status) {
dev_info(&dev->dev, "Unable to read GPIOBASE.\n"); dev_info(&dev->dev, "Unable to read GPIOBASE.\n");
status = pcibios_err_to_errno(status);
goto out; goto out;
} }
dev_dbg(&dev->dev, ": GPIOBASE = 0x%08x\n", nas_gpio_io_base); dev_dbg(&dev->dev, ": GPIOBASE = 0x%08x\n", nas_gpio_io_base);

View File

@@ -549,7 +549,7 @@ g4fan_exit( void )
platform_driver_unregister( &therm_of_driver ); platform_driver_unregister( &therm_of_driver );
if( x.of_dev ) if( x.of_dev )
of_device_unregister( x.of_dev ); of_platform_device_destroy(&x.of_dev->dev, NULL);
} }
module_init(g4fan_init); module_init(g4fan_init);

View File

@@ -5818,7 +5818,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
safepos = conf->reshape_safe; safepos = conf->reshape_safe;
sector_div(safepos, data_disks); sector_div(safepos, data_disks);
if (mddev->reshape_backwards) { if (mddev->reshape_backwards) {
BUG_ON(writepos < reshape_sectors); if (WARN_ON(writepos < reshape_sectors))
return MaxSector;
writepos -= reshape_sectors; writepos -= reshape_sectors;
readpos += reshape_sectors; readpos += reshape_sectors;
safepos += reshape_sectors; safepos += reshape_sectors;
@@ -5836,14 +5838,18 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
* to set 'stripe_addr' which is where we will write to. * to set 'stripe_addr' which is where we will write to.
*/ */
if (mddev->reshape_backwards) { if (mddev->reshape_backwards) {
BUG_ON(conf->reshape_progress == 0); if (WARN_ON(conf->reshape_progress == 0))
return MaxSector;
stripe_addr = writepos; stripe_addr = writepos;
BUG_ON((mddev->dev_sectors & if (WARN_ON((mddev->dev_sectors &
~((sector_t)reshape_sectors - 1)) ~((sector_t)reshape_sectors - 1)) -
- reshape_sectors - stripe_addr reshape_sectors - stripe_addr != sector_nr))
!= sector_nr); return MaxSector;
} else { } else {
BUG_ON(writepos != sector_nr + reshape_sectors); if (WARN_ON(writepos != sector_nr + reshape_sectors))
return MaxSector;
stripe_addr = sector_nr; stripe_addr = sector_nr;
} }

View File

@@ -475,7 +475,9 @@ static int philips_europa_tuner_sleep(struct dvb_frontend *fe)
/* switch the board to analog mode */ /* switch the board to analog mode */
if (fe->ops.i2c_gate_ctrl) if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1); fe->ops.i2c_gate_ctrl(fe, 1);
i2c_transfer(&dev->i2c_adap, &analog_msg, 1); if (i2c_transfer(&dev->i2c_adap, &analog_msg, 1) != 1)
return -EIO;
return 0; return 0;
} }
@@ -1027,7 +1029,9 @@ static int md8800_set_voltage2(struct dvb_frontend *fe,
else else
wbuf[1] = rbuf & 0xef; wbuf[1] = rbuf & 0xef;
msg[0].len = 2; msg[0].len = 2;
i2c_transfer(&dev->i2c_adap, msg, 1); if (i2c_transfer(&dev->i2c_adap, msg, 1) != 1)
return -EIO;
return 0; return 0;
} }

View File

@@ -1096,6 +1096,7 @@ static int vdec_close(struct file *file)
{ {
struct venus_inst *inst = to_inst(file); struct venus_inst *inst = to_inst(file);
cancel_work_sync(&inst->delayed_process_work);
v4l2_m2m_ctx_release(inst->m2m_ctx); v4l2_m2m_ctx_release(inst->m2m_ctx);
v4l2_m2m_release(inst->m2m_dev); v4l2_m2m_release(inst->m2m_dev);
vdec_ctrl_deinit(inst); vdec_ctrl_deinit(inst);

View File

@@ -36,9 +36,8 @@ struct vsp1_histogram_buffer *
vsp1_histogram_buffer_get(struct vsp1_histogram *histo) vsp1_histogram_buffer_get(struct vsp1_histogram *histo)
{ {
struct vsp1_histogram_buffer *buf = NULL; struct vsp1_histogram_buffer *buf = NULL;
unsigned long flags;
spin_lock_irqsave(&histo->irqlock, flags); spin_lock(&histo->irqlock);
if (list_empty(&histo->irqqueue)) if (list_empty(&histo->irqqueue))
goto done; goto done;
@@ -49,7 +48,7 @@ vsp1_histogram_buffer_get(struct vsp1_histogram *histo)
histo->readout = true; histo->readout = true;
done: done:
spin_unlock_irqrestore(&histo->irqlock, flags); spin_unlock(&histo->irqlock);
return buf; return buf;
} }
@@ -58,7 +57,6 @@ void vsp1_histogram_buffer_complete(struct vsp1_histogram *histo,
size_t size) size_t size)
{ {
struct vsp1_pipeline *pipe = histo->entity.pipe; struct vsp1_pipeline *pipe = histo->entity.pipe;
unsigned long flags;
/* /*
* The pipeline pointer is guaranteed to be valid as this function is * The pipeline pointer is guaranteed to be valid as this function is
@@ -70,10 +68,10 @@ void vsp1_histogram_buffer_complete(struct vsp1_histogram *histo,
vb2_set_plane_payload(&buf->buf.vb2_buf, 0, size); vb2_set_plane_payload(&buf->buf.vb2_buf, 0, size);
vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE); vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
spin_lock_irqsave(&histo->irqlock, flags); spin_lock(&histo->irqlock);
histo->readout = false; histo->readout = false;
wake_up(&histo->wait_queue); wake_up(&histo->wait_queue);
spin_unlock_irqrestore(&histo->irqlock, flags); spin_unlock(&histo->irqlock);
} }
/* ----------------------------------------------------------------------------- /* -----------------------------------------------------------------------------
@@ -124,11 +122,10 @@ static void histo_buffer_queue(struct vb2_buffer *vb)
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vsp1_histogram *histo = vb2_get_drv_priv(vb->vb2_queue); struct vsp1_histogram *histo = vb2_get_drv_priv(vb->vb2_queue);
struct vsp1_histogram_buffer *buf = to_vsp1_histogram_buffer(vbuf); struct vsp1_histogram_buffer *buf = to_vsp1_histogram_buffer(vbuf);
unsigned long flags;
spin_lock_irqsave(&histo->irqlock, flags); spin_lock_irq(&histo->irqlock);
list_add_tail(&buf->queue, &histo->irqqueue); list_add_tail(&buf->queue, &histo->irqqueue);
spin_unlock_irqrestore(&histo->irqlock, flags); spin_unlock_irq(&histo->irqlock);
} }
static int histo_start_streaming(struct vb2_queue *vq, unsigned int count) static int histo_start_streaming(struct vb2_queue *vq, unsigned int count)
@@ -140,9 +137,8 @@ static void histo_stop_streaming(struct vb2_queue *vq)
{ {
struct vsp1_histogram *histo = vb2_get_drv_priv(vq); struct vsp1_histogram *histo = vb2_get_drv_priv(vq);
struct vsp1_histogram_buffer *buffer; struct vsp1_histogram_buffer *buffer;
unsigned long flags;
spin_lock_irqsave(&histo->irqlock, flags); spin_lock_irq(&histo->irqlock);
/* Remove all buffers from the IRQ queue. */ /* Remove all buffers from the IRQ queue. */
list_for_each_entry(buffer, &histo->irqqueue, queue) list_for_each_entry(buffer, &histo->irqqueue, queue)
@@ -152,7 +148,7 @@ static void histo_stop_streaming(struct vb2_queue *vq)
/* Wait for the buffer being read out (if any) to complete. */ /* Wait for the buffer being read out (if any) to complete. */
wait_event_lock_irq(histo->wait_queue, !histo->readout, histo->irqlock); wait_event_lock_irq(histo->wait_queue, !histo->readout, histo->irqlock);
spin_unlock_irqrestore(&histo->irqlock, flags); spin_unlock_irq(&histo->irqlock);
} }
static const struct vb2_ops histo_video_queue_qops = { static const struct vb2_ops histo_video_queue_qops = {

View File

@@ -73,7 +73,7 @@ struct vsp1_partition_window {
* @wpf: The WPF partition window configuration * @wpf: The WPF partition window configuration
*/ */
struct vsp1_partition { struct vsp1_partition {
struct vsp1_partition_window rpf; struct vsp1_partition_window rpf[VSP1_MAX_RPF];
struct vsp1_partition_window uds_sink; struct vsp1_partition_window uds_sink;
struct vsp1_partition_window uds_source; struct vsp1_partition_window uds_source;
struct vsp1_partition_window sru; struct vsp1_partition_window sru;

View File

@@ -270,8 +270,8 @@ static void rpf_configure_partition(struct vsp1_entity *entity,
* 'width' need to be adjusted. * 'width' need to be adjusted.
*/ */
if (pipe->partitions > 1) { if (pipe->partitions > 1) {
crop.width = pipe->partition->rpf.width; crop.width = pipe->partition->rpf[rpf->entity.index].width;
crop.left += pipe->partition->rpf.left; crop.left += pipe->partition->rpf[rpf->entity.index].left;
} }
if (pipe->interlaced) { if (pipe->interlaced) {
@@ -326,7 +326,9 @@ static void rpf_partition(struct vsp1_entity *entity,
unsigned int partition_idx, unsigned int partition_idx,
struct vsp1_partition_window *window) struct vsp1_partition_window *window)
{ {
partition->rpf = *window; struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev);
partition->rpf[rpf->entity.index] = *window;
} }
static const struct vsp1_entity_operations rpf_entity_ops = { static const struct vsp1_entity_operations rpf_entity_ops = {

View File

@@ -1126,10 +1126,7 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 *rc_proto)
memcpy(ictx->usb_tx_buf, &ir_proto_packet, sizeof(ir_proto_packet)); memcpy(ictx->usb_tx_buf, &ir_proto_packet, sizeof(ir_proto_packet));
if (!mutex_is_locked(&ictx->lock)) { unlock = mutex_trylock(&ictx->lock);
unlock = true;
mutex_lock(&ictx->lock);
}
retval = send_packet(ictx); retval = send_packet(ictx);
if (retval) if (retval)

View File

@@ -997,25 +997,55 @@ static s32 __uvc_ctrl_get_value(struct uvc_control_mapping *mapping,
return value; return value;
} }
static int __uvc_ctrl_load_cur(struct uvc_video_chain *chain,
struct uvc_control *ctrl)
{
u8 *data;
int ret;
if (ctrl->loaded)
return 0;
data = uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT);
if ((ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) == 0) {
memset(data, 0, ctrl->info.size);
ctrl->loaded = 1;
return 0;
}
if (ctrl->entity->get_cur)
ret = ctrl->entity->get_cur(chain->dev, ctrl->entity,
ctrl->info.selector, data,
ctrl->info.size);
else
ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR,
ctrl->entity->id, chain->dev->intfnum,
ctrl->info.selector, data,
ctrl->info.size);
if (ret < 0)
return ret;
ctrl->loaded = 1;
return ret;
}
static int __uvc_ctrl_get(struct uvc_video_chain *chain, static int __uvc_ctrl_get(struct uvc_video_chain *chain,
struct uvc_control *ctrl, struct uvc_control_mapping *mapping, struct uvc_control *ctrl,
s32 *value) struct uvc_control_mapping *mapping,
s32 *value)
{ {
int ret; int ret;
if ((ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) == 0) if ((ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) == 0)
return -EACCES; return -EACCES;
if (!ctrl->loaded) { ret = __uvc_ctrl_load_cur(chain, ctrl);
ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR, ctrl->entity->id, if (ret < 0)
chain->dev->intfnum, ctrl->info.selector, return ret;
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
ctrl->info.size);
if (ret < 0)
return ret;
ctrl->loaded = 1;
}
*value = __uvc_ctrl_get_value(mapping, *value = __uvc_ctrl_get_value(mapping,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT)); uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
@@ -1670,21 +1700,10 @@ int uvc_ctrl_set(struct uvc_fh *handle,
* needs to be loaded from the device to perform the read-modify-write * needs to be loaded from the device to perform the read-modify-write
* operation. * operation.
*/ */
if (!ctrl->loaded && (ctrl->info.size * 8) != mapping->size) { if ((ctrl->info.size * 8) != mapping->size) {
if ((ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) == 0) { ret = __uvc_ctrl_load_cur(chain, ctrl);
memset(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT), if (ret < 0)
0, ctrl->info.size); return ret;
} else {
ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR,
ctrl->entity->id, chain->dev->intfnum,
ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
ctrl->info.size);
if (ret < 0)
return ret;
}
ctrl->loaded = 1;
} }
/* Backup the current value in case we need to rollback later. */ /* Backup the current value in case we need to rollback later. */
@@ -1723,9 +1742,19 @@ static int uvc_ctrl_get_flags(struct uvc_device *dev,
if (data == NULL) if (data == NULL)
return -ENOMEM; return -ENOMEM;
ret = uvc_query_ctrl(dev, UVC_GET_INFO, ctrl->entity->id, dev->intfnum, if (ctrl->entity->get_info)
info->selector, data, 1); ret = ctrl->entity->get_info(dev, ctrl->entity,
if (!ret) ctrl->info.selector, data);
else
ret = uvc_query_ctrl(dev, UVC_GET_INFO, ctrl->entity->id,
dev->intfnum, info->selector, data, 1);
if (!ret) {
info->flags &= ~(UVC_CTRL_FLAG_GET_CUR |
UVC_CTRL_FLAG_SET_CUR |
UVC_CTRL_FLAG_AUTO_UPDATE |
UVC_CTRL_FLAG_ASYNCHRONOUS);
info->flags |= (data[0] & UVC_CONTROL_CAP_GET ? info->flags |= (data[0] & UVC_CONTROL_CAP_GET ?
UVC_CTRL_FLAG_GET_CUR : 0) UVC_CTRL_FLAG_GET_CUR : 0)
| (data[0] & UVC_CONTROL_CAP_SET ? | (data[0] & UVC_CONTROL_CAP_SET ?
@@ -1734,6 +1763,7 @@ static int uvc_ctrl_get_flags(struct uvc_device *dev,
UVC_CTRL_FLAG_AUTO_UPDATE : 0) UVC_CTRL_FLAG_AUTO_UPDATE : 0)
| (data[0] & UVC_CONTROL_CAP_ASYNCHRONOUS ? | (data[0] & UVC_CONTROL_CAP_ASYNCHRONOUS ?
UVC_CTRL_FLAG_ASYNCHRONOUS : 0); UVC_CTRL_FLAG_ASYNCHRONOUS : 0);
}
kfree(data); kfree(data);
return ret; return ret;

View File

@@ -212,13 +212,13 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
/* Compute a bandwidth estimation by multiplying the frame /* Compute a bandwidth estimation by multiplying the frame
* size by the number of video frames per second, divide the * size by the number of video frames per second, divide the
* result by the number of USB frames (or micro-frames for * result by the number of USB frames (or micro-frames for
* high-speed devices) per second and add the UVC header size * high- and super-speed devices) per second and add the UVC
* (assumed to be 12 bytes long). * header size (assumed to be 12 bytes long).
*/ */
bandwidth = frame->wWidth * frame->wHeight / 8 * format->bpp; bandwidth = frame->wWidth * frame->wHeight / 8 * format->bpp;
bandwidth *= 10000000 / interval + 1; bandwidth *= 10000000 / interval + 1;
bandwidth /= 1000; bandwidth /= 1000;
if (stream->dev->udev->speed == USB_SPEED_HIGH) if (stream->dev->udev->speed >= USB_SPEED_HIGH)
bandwidth /= 8; bandwidth /= 8;
bandwidth += 12; bandwidth += 12;
@@ -473,6 +473,7 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
ktime_t time; ktime_t time;
u16 host_sof; u16 host_sof;
u16 dev_sof; u16 dev_sof;
u32 dev_stc;
switch (data[1] & (UVC_STREAM_PTS | UVC_STREAM_SCR)) { switch (data[1] & (UVC_STREAM_PTS | UVC_STREAM_SCR)) {
case UVC_STREAM_PTS | UVC_STREAM_SCR: case UVC_STREAM_PTS | UVC_STREAM_SCR:
@@ -517,6 +518,34 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
if (dev_sof == stream->clock.last_sof) if (dev_sof == stream->clock.last_sof)
return; return;
dev_stc = get_unaligned_le32(&data[header_size - 6]);
/*
* STC (Source Time Clock) is the clock used by the camera. The UVC 1.5
* standard states that it "must be captured when the first video data
* of a video frame is put on the USB bus". This is generally understood
* as requiring devices to clear the payload header's SCR bit before
* the first packet containing video data.
*
* Most vendors follow that interpretation, but some (namely SunplusIT
* on some devices) always set the `UVC_STREAM_SCR` bit, fill the SCR
* field with 0's,and expect that the driver only processes the SCR if
* there is data in the packet.
*
* Ignore all the hardware timestamp information if we haven't received
* any data for this frame yet, the packet contains no data, and both
* STC and SOF are zero. This heuristics should be safe on compliant
* devices. This should be safe with compliant devices, as in the very
* unlikely case where a UVC 1.1 device would send timing information
* only before the first packet containing data, and both STC and SOF
* happen to be zero for a particular frame, we would only miss one
* clock sample from many and the clock recovery algorithm wouldn't
* suffer from this condition.
*/
if (buf && buf->bytesused == 0 && len == header_size &&
dev_stc == 0 && dev_sof == 0)
return;
stream->clock.last_sof = dev_sof; stream->clock.last_sof = dev_sof;
host_sof = usb_get_current_frame_number(stream->dev->udev); host_sof = usb_get_current_frame_number(stream->dev->udev);
@@ -554,7 +583,7 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
spin_lock_irqsave(&stream->clock.lock, flags); spin_lock_irqsave(&stream->clock.lock, flags);
sample = &stream->clock.samples[stream->clock.head]; sample = &stream->clock.samples[stream->clock.head];
sample->dev_stc = get_unaligned_le32(&data[header_size - 6]); sample->dev_stc = dev_stc;
sample->dev_sof = dev_sof; sample->dev_sof = dev_sof;
sample->host_sof = host_sof; sample->host_sof = host_sof;
sample->host_time = time; sample->host_time = time;

View File

@@ -345,6 +345,11 @@ struct uvc_entity {
u8 bNrInPins; u8 bNrInPins;
u8 *baSourceID; u8 *baSourceID;
int (*get_info)(struct uvc_device *dev, struct uvc_entity *entity,
u8 cs, u8 *caps);
int (*get_cur)(struct uvc_device *dev, struct uvc_entity *entity,
u8 cs, void *data, u16 size);
unsigned int ncontrols; unsigned int ncontrols;
struct uvc_control *controls; struct uvc_control *controls;
}; };

View File

@@ -246,8 +246,7 @@ static int usbtll_omap_probe(struct platform_device *pdev)
break; break;
} }
tll = devm_kzalloc(dev, sizeof(*tll) + sizeof(tll->ch_clk[nch]), tll = devm_kzalloc(dev, struct_size(tll, ch_clk, nch), GFP_KERNEL);
GFP_KERNEL);
if (!tll) { if (!tll) {
pm_runtime_put_sync(dev); pm_runtime_put_sync(dev);
pm_runtime_disable(dev); pm_runtime_disable(dev);

View File

@@ -1,19 +1,19 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MTD_TESTS) += mtd_oobtest.o obj-$(CONFIG_MTD_TESTS) += mtd_oobtest.o mtd_test.o
obj-$(CONFIG_MTD_TESTS) += mtd_pagetest.o obj-$(CONFIG_MTD_TESTS) += mtd_pagetest.o mtd_test.o
obj-$(CONFIG_MTD_TESTS) += mtd_readtest.o obj-$(CONFIG_MTD_TESTS) += mtd_readtest.o mtd_test.o
obj-$(CONFIG_MTD_TESTS) += mtd_speedtest.o obj-$(CONFIG_MTD_TESTS) += mtd_speedtest.o mtd_test.o
obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o mtd_test.o
obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o mtd_test.o
obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o mtd_test.o
obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o mtd_test.o
obj-$(CONFIG_MTD_TESTS) += mtd_nandbiterrs.o obj-$(CONFIG_MTD_TESTS) += mtd_nandbiterrs.o mtd_test.o
mtd_oobtest-objs := oobtest.o mtd_test.o mtd_oobtest-objs := oobtest.o
mtd_pagetest-objs := pagetest.o mtd_test.o mtd_pagetest-objs := pagetest.o
mtd_readtest-objs := readtest.o mtd_test.o mtd_readtest-objs := readtest.o
mtd_speedtest-objs := speedtest.o mtd_test.o mtd_speedtest-objs := speedtest.o
mtd_stresstest-objs := stresstest.o mtd_test.o mtd_stresstest-objs := stresstest.o
mtd_subpagetest-objs := subpagetest.o mtd_test.o mtd_subpagetest-objs := subpagetest.o
mtd_torturetest-objs := torturetest.o mtd_test.o mtd_torturetest-objs := torturetest.o
mtd_nandbiterrs-objs := nandbiterrs.o mtd_test.o mtd_nandbiterrs-objs := nandbiterrs.o

View File

@@ -25,6 +25,7 @@ int mtdtest_erase_eraseblock(struct mtd_info *mtd, unsigned int ebnum)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(mtdtest_erase_eraseblock);
static int is_block_bad(struct mtd_info *mtd, unsigned int ebnum) static int is_block_bad(struct mtd_info *mtd, unsigned int ebnum)
{ {
@@ -57,6 +58,7 @@ int mtdtest_scan_for_bad_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(mtdtest_scan_for_bad_eraseblocks);
int mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt, int mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
unsigned int eb, int ebcnt) unsigned int eb, int ebcnt)
@@ -75,6 +77,7 @@ int mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(mtdtest_erase_good_eraseblocks);
int mtdtest_read(struct mtd_info *mtd, loff_t addr, size_t size, void *buf) int mtdtest_read(struct mtd_info *mtd, loff_t addr, size_t size, void *buf)
{ {
@@ -92,6 +95,7 @@ int mtdtest_read(struct mtd_info *mtd, loff_t addr, size_t size, void *buf)
return err; return err;
} }
EXPORT_SYMBOL_GPL(mtdtest_read);
int mtdtest_write(struct mtd_info *mtd, loff_t addr, size_t size, int mtdtest_write(struct mtd_info *mtd, loff_t addr, size_t size,
const void *buf) const void *buf)
@@ -107,3 +111,8 @@ int mtdtest_write(struct mtd_info *mtd, loff_t addr, size_t size,
return err; return err;
} }
EXPORT_SYMBOL_GPL(mtdtest_write);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MTD function test helpers");
MODULE_AUTHOR("Akinobu Mita");

View File

@@ -1573,6 +1573,7 @@ int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
GFP_KERNEL); GFP_KERNEL);
if (!fm_eba[i]) { if (!fm_eba[i]) {
ret = -ENOMEM; ret = -ENOMEM;
kfree(scan_eba[i]);
goto out_free; goto out_free;
} }
@@ -1608,7 +1609,7 @@ int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
} }
out_free: out_free:
for (i = 0; i < num_volumes; i++) { while (--i >= 0) {
if (!ubi->volumes[i]) if (!ubi->volumes[i])
continue; continue;

View File

@@ -774,13 +774,10 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
return bestslave; return bestslave;
} }
/* must be called in RCU critical section or with RTNL held */
static bool bond_should_notify_peers(struct bonding *bond) static bool bond_should_notify_peers(struct bonding *bond)
{ {
struct slave *slave; struct slave *slave = rcu_dereference_rtnl(bond->curr_active_slave);
rcu_read_lock();
slave = rcu_dereference(bond->curr_active_slave);
rcu_read_unlock();
if (!slave || !bond->send_peer_notif || if (!slave || !bond->send_peer_notif ||
!netif_carrier_ok(bond->dev) || !netif_carrier_ok(bond->dev) ||

View File

@@ -418,7 +418,7 @@ struct bna_ib {
/* Tx object */ /* Tx object */
/* Tx datapath control structure */ /* Tx datapath control structure */
#define BNA_Q_NAME_SIZE 16 #define BNA_Q_NAME_SIZE (IFNAMSIZ + 6)
struct bna_tcb { struct bna_tcb {
/* Fast path */ /* Fast path */
void **sw_qpt; void **sw_qpt;

View File

@@ -1543,8 +1543,9 @@ bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
for (i = 0; i < num_txqs; i++) { for (i = 0; i < num_txqs; i++) {
vector_num = tx_info->tcb[i]->intr_vector; vector_num = tx_info->tcb[i]->intr_vector;
sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name, snprintf(tx_info->tcb[i]->name, BNA_Q_NAME_SIZE, "%s TXQ %d",
tx_id + tx_info->tcb[i]->id); bnad->netdev->name,
tx_id + tx_info->tcb[i]->id);
err = request_irq(bnad->msix_table[vector_num].vector, err = request_irq(bnad->msix_table[vector_num].vector,
(irq_handler_t)bnad_msix_tx, 0, (irq_handler_t)bnad_msix_tx, 0,
tx_info->tcb[i]->name, tx_info->tcb[i]->name,
@@ -1594,9 +1595,9 @@ bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
for (i = 0; i < num_rxps; i++) { for (i = 0; i < num_rxps; i++) {
vector_num = rx_info->rx_ctrl[i].ccb->intr_vector; vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d", snprintf(rx_info->rx_ctrl[i].ccb->name, BNA_Q_NAME_SIZE,
bnad->netdev->name, "%s CQ %d", bnad->netdev->name,
rx_id + rx_info->rx_ctrl[i].ccb->id); rx_id + rx_info->rx_ctrl[i].ccb->id);
err = request_irq(bnad->msix_table[vector_num].vector, err = request_irq(bnad->msix_table[vector_num].vector,
(irq_handler_t)bnad_msix_rx, 0, (irq_handler_t)bnad_msix_rx, 0,
rx_info->rx_ctrl[i].ccb->name, rx_info->rx_ctrl[i].ccb->name,

View File

@@ -223,8 +223,8 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define PKT_MINBUF_SIZE 64 #define PKT_MINBUF_SIZE 64
/* FEC receive acceleration */ /* FEC receive acceleration */
#define FEC_RACC_IPDIS (1 << 1) #define FEC_RACC_IPDIS BIT(1)
#define FEC_RACC_PRODIS (1 << 2) #define FEC_RACC_PRODIS BIT(2)
#define FEC_RACC_SHIFT16 BIT(7) #define FEC_RACC_SHIFT16 BIT(7)
#define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS) #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
@@ -253,8 +253,23 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_MMFR_TA (2 << 16) #define FEC_MMFR_TA (2 << 16)
#define FEC_MMFR_DATA(v) (v & 0xffff) #define FEC_MMFR_DATA(v) (v & 0xffff)
/* FEC ECR bits definition */ /* FEC ECR bits definition */
#define FEC_ECR_MAGICEN (1 << 2) #define FEC_ECR_RESET BIT(0)
#define FEC_ECR_SLEEP (1 << 3) #define FEC_ECR_ETHEREN BIT(1)
#define FEC_ECR_MAGICEN BIT(2)
#define FEC_ECR_SLEEP BIT(3)
#define FEC_ECR_EN1588 BIT(4)
#define FEC_ECR_BYTESWP BIT(8)
/* FEC RCR bits definition */
#define FEC_RCR_LOOP BIT(0)
#define FEC_RCR_HALFDPX BIT(1)
#define FEC_RCR_MII BIT(2)
#define FEC_RCR_PROMISC BIT(3)
#define FEC_RCR_BC_REJ BIT(4)
#define FEC_RCR_FLOWCTL BIT(5)
#define FEC_RCR_RMII BIT(8)
#define FEC_RCR_10BASET BIT(9)
/* TX WMARK bits */
#define FEC_TXWMRK_STRFWD BIT(8)
#define FEC_MII_TIMEOUT 30000 /* us */ #define FEC_MII_TIMEOUT 30000 /* us */
@@ -950,7 +965,7 @@ fec_restart(struct net_device *ndev)
u32 val; u32 val;
u32 temp_mac[2]; u32 temp_mac[2];
u32 rcntl = OPT_FRAME_SIZE | 0x04; u32 rcntl = OPT_FRAME_SIZE | 0x04;
u32 ecntl = 0x2; /* ETHEREN */ u32 ecntl = FEC_ECR_ETHEREN;
/* Whack a reset. We should wait for this. /* Whack a reset. We should wait for this.
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
@@ -1026,18 +1041,18 @@ fec_restart(struct net_device *ndev)
fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
rcntl |= (1 << 6); rcntl |= (1 << 6);
else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
rcntl |= (1 << 8); rcntl |= FEC_RCR_RMII;
else else
rcntl &= ~(1 << 8); rcntl &= ~FEC_RCR_RMII;
/* 1G, 100M or 10M */ /* 1G, 100M or 10M */
if (ndev->phydev) { if (ndev->phydev) {
if (ndev->phydev->speed == SPEED_1000) if (ndev->phydev->speed == SPEED_1000)
ecntl |= (1 << 5); ecntl |= (1 << 5);
else if (ndev->phydev->speed == SPEED_100) else if (ndev->phydev->speed == SPEED_100)
rcntl &= ~(1 << 9); rcntl &= ~FEC_RCR_10BASET;
else else
rcntl |= (1 << 9); rcntl |= FEC_RCR_10BASET;
} }
} else { } else {
#ifdef FEC_MIIGSK_ENR #ifdef FEC_MIIGSK_ENR
@@ -1096,13 +1111,13 @@ fec_restart(struct net_device *ndev)
if (fep->quirks & FEC_QUIRK_ENET_MAC) { if (fep->quirks & FEC_QUIRK_ENET_MAC) {
/* enable ENET endian swap */ /* enable ENET endian swap */
ecntl |= (1 << 8); ecntl |= FEC_ECR_BYTESWP;
/* enable ENET store and forward mode */ /* enable ENET store and forward mode */
writel(1 << 8, fep->hwp + FEC_X_WMRK); writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
} }
if (fep->bufdesc_ex) if (fep->bufdesc_ex)
ecntl |= (1 << 4); ecntl |= FEC_ECR_EN1588;
#ifndef CONFIG_M5272 #ifndef CONFIG_M5272
/* Enable the MIB statistic event counters */ /* Enable the MIB statistic event counters */
@@ -1149,7 +1164,7 @@ static void
fec_stop(struct net_device *ndev) fec_stop(struct net_device *ndev)
{ {
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & FEC_RCR_RMII;
u32 val; u32 val;
/* We cannot expect a graceful transmit stop without link !!! */ /* We cannot expect a graceful transmit stop without link !!! */
@@ -1168,7 +1183,7 @@ fec_stop(struct net_device *ndev)
if (fep->quirks & FEC_QUIRK_HAS_AVB) { if (fep->quirks & FEC_QUIRK_HAS_AVB) {
writel(0, fep->hwp + FEC_ECNTRL); writel(0, fep->hwp + FEC_ECNTRL);
} else { } else {
writel(1, fep->hwp + FEC_ECNTRL); writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
udelay(10); udelay(10);
} }
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
@@ -1184,11 +1199,16 @@ fec_stop(struct net_device *ndev)
/* We have to keep ENET enabled to have MII interrupt stay working */ /* We have to keep ENET enabled to have MII interrupt stay working */
if (fep->quirks & FEC_QUIRK_ENET_MAC && if (fep->quirks & FEC_QUIRK_ENET_MAC &&
!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
writel(2, fep->hwp + FEC_ECNTRL); writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL);
writel(rmii_mode, fep->hwp + FEC_R_CNTRL); writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
} }
}
if (fep->bufdesc_ex) {
val = readl(fep->hwp + FEC_ECNTRL);
val |= FEC_ECR_EN1588;
writel(val, fep->hwp + FEC_ECNTRL);
}
}
static void static void
fec_timeout(struct net_device *ndev) fec_timeout(struct net_device *ndev)

View File

@@ -635,6 +635,9 @@ void fec_ptp_stop(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev); struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
if (fep->pps_enable)
fec_ptp_enable_pps(fep, 0);
cancel_delayed_work_sync(&fep->time_keep); cancel_delayed_work_sync(&fep->time_keep);
if (fep->ptp_clock) if (fep->ptp_clock)
ptp_clock_unregister(fep->ptp_clock); ptp_clock_unregister(fep->ptp_clock);

View File

@@ -7,16 +7,16 @@
#define ICE_PF_RESET_WAIT_COUNT 200 #define ICE_PF_RESET_WAIT_COUNT 200
#define ICE_NIC_FLX_ENTRY(hw, mdid, idx) \ #define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \
wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(ICE_RXDID_FLEX_NIC), \ wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \
((ICE_RX_OPC_MDID << \ ((ICE_RX_OPC_MDID << \
GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \ GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \ GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
(((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \ (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M)) GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
#define ICE_NIC_FLX_FLG_ENTRY(hw, flg_0, flg_1, flg_2, flg_3, idx) \ #define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \
wr32((hw), GLFLXP_RXDID_FLAGS(ICE_RXDID_FLEX_NIC, idx), \ wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \
(((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \ (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \ GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
(((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \ (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
@@ -290,30 +290,85 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
} }
/** /**
* ice_init_flex_parser - initialize rx flex parser * ice_init_flex_flags
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
* @prof_id: Rx Descriptor Builder profile ID
* *
* Function to initialize flex descriptors * Function to initialize Rx flex flags
*/ */
static void ice_init_flex_parser(struct ice_hw *hw) static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
{ {
u8 idx = 0; u8 idx = 0;
ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_LOW, 0); /* Flex-flag fields (0-2) are programmed with FLG64 bits with layout:
ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_HIGH, 1); * flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE
ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_LOWER, 2); * flexiflags1[3:0] - Not used for flag programming
ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_HIGH, 3); * flexiflags2[7:0] - Tunnel and VLAN types
ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_FRG, ICE_RXFLG_UDP_GRE, * 2 invalid fields in last index
ICE_RXFLG_PKT_DSI, ICE_RXFLG_FIN, idx++); */
ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_SYN, ICE_RXFLG_RST, switch (prof_id) {
ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++); /* Rx flex flags are currently programmed for the NIC profiles only.
ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, * Different flag bit programming configurations can be added per
ICE_RXFLG_EVLAN_x8100, ICE_RXFLG_EVLAN_x9100, * profile as needed.
idx++); */
ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_VLAN_x8100, ICE_RXFLG_TNL_VLAN, case ICE_RXDID_FLEX_NIC:
ICE_RXFLG_TNL_MAC, ICE_RXFLG_TNL0, idx++); case ICE_RXDID_FLEX_NIC_2:
ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2, ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_FRG,
ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx); ICE_RXFLG_UDP_GRE, ICE_RXFLG_PKT_DSI,
ICE_RXFLG_FIN, idx++);
/* flex flag 1 is not used for flexi-flag programming, skipping
* these four FLG64 bits.
*/
ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_SYN, ICE_RXFLG_RST,
ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++);
ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_DSI,
ICE_RXFLG_PKT_DSI, ICE_RXFLG_EVLAN_x8100,
ICE_RXFLG_EVLAN_x9100, idx++);
ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_VLAN_x8100,
ICE_RXFLG_TNL_VLAN, ICE_RXFLG_TNL_MAC,
ICE_RXFLG_TNL0, idx++);
ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2,
ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx);
break;
default:
ice_debug(hw, ICE_DBG_INIT,
"Flag programming for profile ID %d not supported\n",
prof_id);
}
}
/**
* ice_init_flex_flds
* @hw: pointer to the hardware structure
* @prof_id: Rx Descriptor Builder profile ID
*
* Function to initialize flex descriptors
*/
static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
{
enum ice_flex_rx_mdid mdid;
switch (prof_id) {
case ICE_RXDID_FLEX_NIC:
case ICE_RXDID_FLEX_NIC_2:
ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0);
ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1);
ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2);
mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ?
ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH;
ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3);
ice_init_flex_flags(hw, prof_id);
break;
default:
ice_debug(hw, ICE_DBG_INIT,
"Field init for profile ID %d not supported\n",
prof_id);
}
} }
/** /**
@@ -494,7 +549,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status) if (status)
goto err_unroll_fltr_mgmt_struct; goto err_unroll_fltr_mgmt_struct;
ice_init_flex_parser(hw); ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
return 0; return 0;

View File

@@ -188,23 +188,25 @@ struct ice_32b_rx_flex_desc_nic {
* with a specific metadata (profile 7 reserved for HW) * with a specific metadata (profile 7 reserved for HW)
*/ */
enum ice_rxdid { enum ice_rxdid {
ICE_RXDID_START = 0, ICE_RXDID_LEGACY_0 = 0,
ICE_RXDID_LEGACY_0 = ICE_RXDID_START, ICE_RXDID_LEGACY_1 = 1,
ICE_RXDID_LEGACY_1, ICE_RXDID_FLEX_NIC = 2,
ICE_RXDID_FLX_START, ICE_RXDID_FLEX_NIC_2 = 6,
ICE_RXDID_FLEX_NIC = ICE_RXDID_FLX_START, ICE_RXDID_HW = 7,
ICE_RXDID_FLX_LAST = 63, ICE_RXDID_LAST = 63,
ICE_RXDID_LAST = ICE_RXDID_FLX_LAST
}; };
/* Receive Flex Descriptor Rx opcode values */ /* Receive Flex Descriptor Rx opcode values */
#define ICE_RX_OPC_MDID 0x01 #define ICE_RX_OPC_MDID 0x01
/* Receive Descriptor MDID values */ /* Receive Descriptor MDID values */
#define ICE_RX_MDID_FLOW_ID_LOWER 5 enum ice_flex_rx_mdid {
#define ICE_RX_MDID_FLOW_ID_HIGH 6 ICE_RX_MDID_FLOW_ID_LOWER = 5,
#define ICE_RX_MDID_HASH_LOW 56 ICE_RX_MDID_FLOW_ID_HIGH,
#define ICE_RX_MDID_HASH_HIGH 57 ICE_RX_MDID_SRC_VSI = 19,
ICE_RX_MDID_HASH_LOW = 56,
ICE_RX_MDID_HASH_HIGH,
};
/* Rx Flag64 packet flag bits */ /* Rx Flag64 packet flag bits */
enum ice_rx_flg64_bits { enum ice_rx_flg64_bits {

View File

@@ -727,6 +727,7 @@ static int netconsole_netdev_event(struct notifier_block *this,
/* rtnl_lock already held /* rtnl_lock already held
* we might sleep in __netpoll_cleanup() * we might sleep in __netpoll_cleanup()
*/ */
nt->enabled = false;
spin_unlock_irqrestore(&target_list_lock, flags); spin_unlock_irqrestore(&target_list_lock, flags);
__netpoll_cleanup(&nt->np); __netpoll_cleanup(&nt->np);
@@ -734,7 +735,6 @@ static int netconsole_netdev_event(struct notifier_block *this,
spin_lock_irqsave(&target_list_lock, flags); spin_lock_irqsave(&target_list_lock, flags);
dev_put(nt->np.dev); dev_put(nt->np.dev);
nt->np.dev = NULL; nt->np.dev = NULL;
nt->enabled = false;
stopped = true; stopped = true;
netconsole_target_put(nt); netconsole_target_put(nt);
goto restart; goto restart;

View File

@@ -241,6 +241,7 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
break; break;
default: default:
/* not ip - do not know what to do */ /* not ip - do not know what to do */
kfree_skb(skbn);
goto skip; goto skip;
} }

View File

@@ -178,6 +178,7 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
struct usbnet *dev = netdev_priv(netdev); struct usbnet *dev = netdev_priv(netdev);
__le16 res; __le16 res;
int rc = 0; int rc = 0;
int err;
if (phy_id) { if (phy_id) {
netdev_dbg(netdev, "Only internal phy supported\n"); netdev_dbg(netdev, "Only internal phy supported\n");
@@ -188,11 +189,17 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
if (loc == MII_BMSR) { if (loc == MII_BMSR) {
u8 value; u8 value;
sr_read_reg(dev, SR_NSR, &value); err = sr_read_reg(dev, SR_NSR, &value);
if (err < 0)
return err;
if (value & NSR_LINKST) if (value & NSR_LINKST)
rc = 1; rc = 1;
} }
sr_share_read_word(dev, 1, loc, &res); err = sr_share_read_word(dev, 1, loc, &res);
if (err < 0)
return err;
if (rc == 1) if (rc == 1)
res = le16_to_cpu(res) | BMSR_LSTATUS; res = le16_to_cpu(res) | BMSR_LSTATUS;
else else

View File

@@ -2638,7 +2638,6 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
struct lcnphy_txgains cal_gains, temp_gains; struct lcnphy_txgains cal_gains, temp_gains;
u16 hash; u16 hash;
u8 band_idx;
int j; int j;
u16 ncorr_override[5]; u16 ncorr_override[5];
u16 syst_coeffs[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, u16 syst_coeffs[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
@@ -2670,6 +2669,9 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
u16 *values_to_save; u16 *values_to_save;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy; struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
if (WARN_ON(CHSPEC_IS5G(pi->radio_chanspec)))
return;
values_to_save = kmalloc_array(20, sizeof(u16), GFP_ATOMIC); values_to_save = kmalloc_array(20, sizeof(u16), GFP_ATOMIC);
if (NULL == values_to_save) if (NULL == values_to_save)
return; return;
@@ -2733,20 +2735,18 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
hash = (target_gains->gm_gain << 8) | hash = (target_gains->gm_gain << 8) |
(target_gains->pga_gain << 4) | (target_gains->pad_gain); (target_gains->pga_gain << 4) | (target_gains->pad_gain);
band_idx = (CHSPEC_IS5G(pi->radio_chanspec) ? 1 : 0);
cal_gains = *target_gains; cal_gains = *target_gains;
memset(ncorr_override, 0, sizeof(ncorr_override)); memset(ncorr_override, 0, sizeof(ncorr_override));
for (j = 0; j < iqcal_gainparams_numgains_lcnphy[band_idx]; j++) { for (j = 0; j < iqcal_gainparams_numgains_lcnphy[0]; j++) {
if (hash == tbl_iqcal_gainparams_lcnphy[band_idx][j][0]) { if (hash == tbl_iqcal_gainparams_lcnphy[0][j][0]) {
cal_gains.gm_gain = cal_gains.gm_gain =
tbl_iqcal_gainparams_lcnphy[band_idx][j][1]; tbl_iqcal_gainparams_lcnphy[0][j][1];
cal_gains.pga_gain = cal_gains.pga_gain =
tbl_iqcal_gainparams_lcnphy[band_idx][j][2]; tbl_iqcal_gainparams_lcnphy[0][j][2];
cal_gains.pad_gain = cal_gains.pad_gain =
tbl_iqcal_gainparams_lcnphy[band_idx][j][3]; tbl_iqcal_gainparams_lcnphy[0][j][3];
memcpy(ncorr_override, memcpy(ncorr_override,
&tbl_iqcal_gainparams_lcnphy[band_idx][j][3], &tbl_iqcal_gainparams_lcnphy[0][j][3],
sizeof(ncorr_override)); sizeof(ncorr_override));
break; break;
} }

View File

@@ -934,6 +934,8 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
priv->bss_num = mwifiex_get_unused_bss_num(adapter, priv->bss_type);
spin_lock_irqsave(&adapter->main_proc_lock, flags); spin_lock_irqsave(&adapter->main_proc_lock, flags);
adapter->main_locked = false; adapter->main_locked = false;
spin_unlock_irqrestore(&adapter->main_proc_lock, flags); spin_unlock_irqrestore(&adapter->main_proc_lock, flags);

View File

@@ -2481,6 +2481,13 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
return NVME_QUIRK_NO_APST; return NVME_QUIRK_NO_APST;
} }
/*
* NVMe SSD drops off the PCIe bus after system idle
* for 10 hours on a Lenovo N60z board.
*/
if (dmi_match(DMI_BOARD_NAME, "LXKT-ZXEG-N6"))
return NVME_QUIRK_NO_APST;
return 0; return 0;
} }

View File

@@ -109,8 +109,7 @@ int parport_daisy_init(struct parport *port)
((num_ports = num_mux_ports(port)) == 2 || num_ports == 4)) { ((num_ports = num_mux_ports(port)) == 2 || num_ports == 4)) {
/* Leave original as port zero. */ /* Leave original as port zero. */
port->muxport = 0; port->muxport = 0;
printk(KERN_INFO pr_info("%s: 1st (default) port of %d-way multiplexor\n",
"%s: 1st (default) port of %d-way multiplexor\n",
port->name, num_ports); port->name, num_ports);
for (i = 1; i < num_ports; i++) { for (i = 1; i < num_ports; i++) {
/* Clone the port. */ /* Clone the port. */
@@ -123,8 +122,7 @@ int parport_daisy_init(struct parport *port)
continue; continue;
} }
printk(KERN_INFO pr_info("%s: %d%s port of %d-way multiplexor on %s\n",
"%s: %d%s port of %d-way multiplexor on %s\n",
extra->name, i + 1, th[i + 1], num_ports, extra->name, i + 1, th[i + 1], num_ports,
port->name); port->name);

View File

@@ -329,7 +329,7 @@ int parport_negotiate (struct parport *port, int mode)
#ifndef CONFIG_PARPORT_1284 #ifndef CONFIG_PARPORT_1284
if (mode == IEEE1284_MODE_COMPAT) if (mode == IEEE1284_MODE_COMPAT)
return 0; return 0;
printk (KERN_ERR "parport: IEEE1284 not supported in this kernel\n"); pr_err("parport: IEEE1284 not supported in this kernel\n");
return -1; return -1;
#else #else
int m = mode & ~IEEE1284_ADDR; int m = mode & ~IEEE1284_ADDR;
@@ -694,7 +694,7 @@ ssize_t parport_write (struct parport *port, const void *buffer, size_t len)
ssize_t parport_read (struct parport *port, void *buffer, size_t len) ssize_t parport_read (struct parport *port, void *buffer, size_t len)
{ {
#ifndef CONFIG_PARPORT_1284 #ifndef CONFIG_PARPORT_1284
printk (KERN_ERR "parport: IEEE1284 not supported in this kernel\n"); pr_err("parport: IEEE1284 not supported in this kernel\n");
return -ENODEV; return -ENODEV;
#else #else
int mode = port->physport->ieee1284.mode; int mode = port->physport->ieee1284.mode;

View File

@@ -599,8 +599,7 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
DPRINTK (KERN_DEBUG "ECP read timed out at 45\n"); DPRINTK (KERN_DEBUG "ECP read timed out at 45\n");
if (command) if (command)
printk (KERN_WARNING pr_warn("%s: command ignored (%02x)\n",
"%s: command ignored (%02x)\n",
port->name, byte); port->name, byte);
break; break;

View File

@@ -211,7 +211,7 @@ static int __init amiga_parallel_probe(struct platform_device *pdev)
if (err) if (err)
goto out_irq; goto out_irq;
printk(KERN_INFO "%s: Amiga built-in port using irq\n", p->name); pr_info("%s: Amiga built-in port using irq\n", p->name);
/* XXX: set operating mode */ /* XXX: set operating mode */
parport_announce_port(p); parport_announce_port(p);

View File

@@ -199,7 +199,7 @@ static int __init parport_atari_init(void)
} }
this_port = p; this_port = p;
printk(KERN_INFO "%s: Atari built-in port using irq\n", p->name); pr_info("%s: Atari built-in port using irq\n", p->name);
parport_announce_port (p); parport_announce_port (p);
return 0; return 0;

View File

@@ -142,10 +142,8 @@ static int parport_config(struct pcmcia_device *link)
link->irq, PARPORT_DMA_NONE, link->irq, PARPORT_DMA_NONE,
&link->dev, IRQF_SHARED); &link->dev, IRQF_SHARED);
if (p == NULL) { if (p == NULL) {
printk(KERN_NOTICE "parport_cs: parport_pc_probe_port() at " pr_notice("parport_cs: parport_pc_probe_port() at 0x%3x, irq %u failed\n",
"0x%3x, irq %u failed\n", (unsigned int)link->resource[0]->start, link->irq);
(unsigned int) link->resource[0]->start,
link->irq);
goto failed; goto failed;
} }

View File

@@ -287,7 +287,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
p->size = (p->modes & PARPORT_MODE_EPP)?8:3; p->size = (p->modes & PARPORT_MODE_EPP)?8:3;
p->private_data = priv; p->private_data = priv;
printk(KERN_INFO "%s: PC-style at 0x%lx", p->name, p->base); pr_info("%s: PC-style at 0x%lx", p->name, p->base);
p->irq = irq; p->irq = irq;
if (p->irq == PARPORT_IRQ_AUTO) { if (p->irq == PARPORT_IRQ_AUTO) {
p->irq = PARPORT_IRQ_NONE; p->irq = PARPORT_IRQ_NONE;
@@ -304,12 +304,16 @@ struct parport *parport_gsc_probe_port(unsigned long base,
p->dma = PARPORT_DMA_NONE; p->dma = PARPORT_DMA_NONE;
pr_cont(" ["); pr_cont(" [");
#define printmode(x) {if(p->modes&PARPORT_MODE_##x){pr_cont("%s%s",f?",":"",#x);f++;}} #define printmode(x) \
do { \
if (p->modes & PARPORT_MODE_##x) \
pr_cont("%s%s", f++ ? "," : "", #x); \
} while (0)
{ {
int f = 0; int f = 0;
printmode(PCSPP); printmode(PCSPP);
printmode(TRISTATE); printmode(TRISTATE);
printmode(COMPAT) printmode(COMPAT);
printmode(EPP); printmode(EPP);
// printmode(ECP); // printmode(ECP);
// printmode(DMA); // printmode(DMA);
@@ -320,8 +324,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
if (p->irq != PARPORT_IRQ_NONE) { if (p->irq != PARPORT_IRQ_NONE) {
if (request_irq (p->irq, parport_irq_handler, if (request_irq (p->irq, parport_irq_handler,
0, p->name, p)) { 0, p->name, p)) {
printk (KERN_WARNING "%s: irq %d in use, " pr_warn("%s: irq %d in use, resorting to polled operation\n",
"resorting to polled operation\n",
p->name, p->irq); p->name, p->irq);
p->irq = PARPORT_IRQ_NONE; p->irq = PARPORT_IRQ_NONE;
p->dma = PARPORT_DMA_NONE; p->dma = PARPORT_DMA_NONE;
@@ -352,7 +355,7 @@ static int __init parport_init_chip(struct parisc_device *dev)
unsigned long port; unsigned long port;
if (!dev->irq) { if (!dev->irq) {
printk(KERN_WARNING "IRQ not found for parallel device at 0x%llx\n", pr_warn("IRQ not found for parallel device at 0x%llx\n",
(unsigned long long)dev->hpa.start); (unsigned long long)dev->hpa.start);
return -ENODEV; return -ENODEV;
} }

View File

@@ -1348,9 +1348,8 @@ static unsigned int parport_ip32_fwp_wait_interrupt(struct parport *p)
ecr = parport_ip32_read_econtrol(p); ecr = parport_ip32_read_econtrol(p);
if ((ecr & ECR_F_EMPTY) && !(ecr & ECR_SERVINTR) if ((ecr & ECR_F_EMPTY) && !(ecr & ECR_SERVINTR)
&& !lost_interrupt) { && !lost_interrupt) {
printk(KERN_WARNING PPIP32 pr_warn(PPIP32 "%s: lost interrupt in %s\n",
"%s: lost interrupt in %s\n", p->name, __func__);
p->name, __func__);
lost_interrupt = 1; lost_interrupt = 1;
} }
} }
@@ -1654,8 +1653,8 @@ static size_t parport_ip32_compat_write_data(struct parport *p,
DSR_nBUSY | DSR_nFAULT)) { DSR_nBUSY | DSR_nFAULT)) {
/* Avoid to flood the logs */ /* Avoid to flood the logs */
if (ready_before) if (ready_before)
printk(KERN_INFO PPIP32 "%s: not ready in %s\n", pr_info(PPIP32 "%s: not ready in %s\n",
p->name, __func__); p->name, __func__);
ready_before = 0; ready_before = 0;
goto stop; goto stop;
} }
@@ -1735,8 +1734,8 @@ static size_t parport_ip32_ecp_write_data(struct parport *p,
DSR_nBUSY | DSR_nFAULT)) { DSR_nBUSY | DSR_nFAULT)) {
/* Avoid to flood the logs */ /* Avoid to flood the logs */
if (ready_before) if (ready_before)
printk(KERN_INFO PPIP32 "%s: not ready in %s\n", pr_info(PPIP32 "%s: not ready in %s\n",
p->name, __func__); p->name, __func__);
ready_before = 0; ready_before = 0;
goto stop; goto stop;
} }
@@ -2075,8 +2074,7 @@ static __init struct parport *parport_ip32_probe_port(void)
p->modes |= PARPORT_MODE_TRISTATE; p->modes |= PARPORT_MODE_TRISTATE;
if (!parport_ip32_fifo_supported(p)) { if (!parport_ip32_fifo_supported(p)) {
printk(KERN_WARNING PPIP32 pr_warn(PPIP32 "%s: error: FIFO disabled\n", p->name);
"%s: error: FIFO disabled\n", p->name);
/* Disable hardware modes depending on a working FIFO. */ /* Disable hardware modes depending on a working FIFO. */
features &= ~PARPORT_IP32_ENABLE_SPP; features &= ~PARPORT_IP32_ENABLE_SPP;
features &= ~PARPORT_IP32_ENABLE_ECP; features &= ~PARPORT_IP32_ENABLE_ECP;
@@ -2088,8 +2086,7 @@ static __init struct parport *parport_ip32_probe_port(void)
if (features & PARPORT_IP32_ENABLE_IRQ) { if (features & PARPORT_IP32_ENABLE_IRQ) {
int irq = MACEISA_PARALLEL_IRQ; int irq = MACEISA_PARALLEL_IRQ;
if (request_irq(irq, parport_ip32_interrupt, 0, p->name, p)) { if (request_irq(irq, parport_ip32_interrupt, 0, p->name, p)) {
printk(KERN_WARNING PPIP32 pr_warn(PPIP32 "%s: error: IRQ disabled\n", p->name);
"%s: error: IRQ disabled\n", p->name);
/* DMA cannot work without interrupts. */ /* DMA cannot work without interrupts. */
features &= ~PARPORT_IP32_ENABLE_DMA; features &= ~PARPORT_IP32_ENABLE_DMA;
} else { } else {
@@ -2102,8 +2099,7 @@ static __init struct parport *parport_ip32_probe_port(void)
/* Allocate DMA resources */ /* Allocate DMA resources */
if (features & PARPORT_IP32_ENABLE_DMA) { if (features & PARPORT_IP32_ENABLE_DMA) {
if (parport_ip32_dma_register()) if (parport_ip32_dma_register())
printk(KERN_WARNING PPIP32 pr_warn(PPIP32 "%s: error: DMA disabled\n", p->name);
"%s: error: DMA disabled\n", p->name);
else { else {
pr_probe(p, "DMA support enabled\n"); pr_probe(p, "DMA support enabled\n");
p->dma = 0; /* arbitrary value != PARPORT_DMA_NONE */ p->dma = 0; /* arbitrary value != PARPORT_DMA_NONE */
@@ -2145,8 +2141,7 @@ static __init struct parport *parport_ip32_probe_port(void)
parport_ip32_dump_state(p, "end init", 0); parport_ip32_dump_state(p, "end init", 0);
/* Print out what we found */ /* Print out what we found */
printk(KERN_INFO "%s: SGI IP32 at 0x%lx (0x%lx)", pr_info("%s: SGI IP32 at 0x%lx (0x%lx)", p->name, p->base, p->base_hi);
p->name, p->base, p->base_hi);
if (p->irq != PARPORT_IRQ_NONE) if (p->irq != PARPORT_IRQ_NONE)
printk(", irq %d", p->irq); printk(", irq %d", p->irq);
printk(" ["); printk(" [");

View File

@@ -324,7 +324,7 @@ static int __init parport_mfc3_init(void)
p->dev = &z->dev; p->dev = &z->dev;
this_port[pias++] = p; this_port[pias++] = p;
printk(KERN_INFO "%s: Multiface III port using irq\n", p->name); pr_info("%s: Multiface III port using irq\n", p->name);
/* XXX: set operating mode */ /* XXX: set operating mode */
p->private_data = (void *)piabase; p->private_data = (void *)piabase;

Some files were not shown because too many files have changed in this diff Show More