Merge 4.19.313 into android-4.19-stable

Changes in 4.19.313
	batman-adv: Avoid infinite loop trying to resize local TT
	Bluetooth: Fix memory leak in hci_req_sync_complete()
	nouveau: fix function cast warning
	geneve: fix header validation in geneve[6]_xmit_skb
	ipv6: fib: hide unused 'pn' variable
	ipv4/route: avoid unused-but-set-variable warning
	ipv6: fix race condition between ipv6_get_ifaddr and ipv6_del_addr
	net/mlx5: Properly link new fs rules into the tree
	tracing: hide unused ftrace_event_id_fops
	vhost: Add smp_rmb() in vhost_vq_avail_empty()
	selftests: timers: Fix abs() warning in posix_timers test
	x86/apic: Force native_apic_mem_read() to use the MOV instruction
	btrfs: record delayed inode root in transaction
	selftests/ftrace: Limit length in subsystem-enable tests
	kprobes: Fix possible use-after-free issue on kprobe registration
	Revert "tracing/trigger: Fix to return error if failed to alloc snapshot"
	netfilter: nf_tables: __nft_expr_type_get() selects specific family type
	netfilter: nf_tables: Fix potential data-race in __nft_expr_type_get()
	tun: limit printing rate when illegal packet received by tun dev
	RDMA/mlx5: Fix port number for counter query in multi-port configuration
	drm: nv04: Fix out of bounds access
	comedi: vmk80xx: fix incomplete endpoint checking
	serial/pmac_zilog: Remove flawed mitigation for rx irq flood
	USB: serial: option: add Fibocom FM135-GL variants
	USB: serial: option: add support for Fibocom FM650/FG650
	USB: serial: option: add Lonsung U8300/U9300 product
	USB: serial: option: support Quectel EM060K sub-models
	USB: serial: option: add Rolling RW101-GL and RW135-GL support
	USB: serial: option: add Telit FN920C04 rmnet compositions
	Revert "usb: cdc-wdm: close race between read and workqueue"
	usb: dwc2: host: Fix dereference issue in DDMA completion flow.
	speakup: Avoid crash on very long word
	fs: sysfs: Fix reference leak in sysfs_break_active_protection()
	nouveau: fix instmem race condition around ptr stores
	nilfs2: fix OOB in nilfs_set_de_type
	tracing: Remove hist trigger synth_var_refs
	tracing: Use var_refs[] for hist trigger reference checking
	arm64: dts: rockchip: fix alphabetical ordering RK3399 puma
	arm64: dts: rockchip: enable internal pull-up on PCIE_WAKE# for RK3399 Puma
	arm64: dts: mediatek: mt7622: fix IR nodename
	arm64: dts: mediatek: mt7622: fix ethernet controller "compatible"
	arm64: dts: mediatek: mt7622: drop "reset-names" from thermal block
	ARC: [plat-hsdk]: Remove misplaced interrupt-cells property
	vxlan: drop packets from invalid src-address
	mlxsw: core: Unregister EMAD trap using FORWARD action
	NFC: trf7970a: disable all regulators on removal
	net: usb: ax88179_178a: stop lying about skb->truesize
	net: gtp: Fix Use-After-Free in gtp_dellink
	ipvs: Fix checksumming on GSO of SCTP packets
	net: openvswitch: ovs_ct_exit to be done under ovs_lock
	net: openvswitch: Fix Use-After-Free in ovs_ct_exit
	i40e: Do not use WQ_MEM_RECLAIM flag for workqueue
	serial: core: Provide port lock wrappers
	serial: mxs-auart: add spinlock around changing cts state
	drm/amdgpu: restrict bo mapping within gpu address limits
	amdgpu: validate offset_in_bo of drm_amdgpu_gem_va
	drm/amdgpu: validate the parameters of bo mapping operations more clearly
	Revert "crypto: api - Disallow identical driver names"
	tracing: Show size of requested perf buffer
	tracing: Increase PERF_MAX_TRACE_SIZE to handle Sentinel1 and docker together
	Bluetooth: Fix type of len in {l2cap,sco}_sock_getsockopt_old()
	btrfs: fix information leak in btrfs_ioctl_logical_to_ino()
	arm64: dts: rockchip: enable internal pull-up for Q7_THRM# on RK3399 Puma
	irqchip/gic-v3-its: Prevent double free on error
	net: b44: set pause params only when interface is up
	stackdepot: respect __GFP_NOLOCKDEP allocation flag
	mtd: diskonchip: work around ubsan link failure
	tcp: Clean up kernel listener's reqsk in inet_twsk_purge()
	tcp: Fix NEW_SYN_RECV handling in inet_twsk_purge()
	dmaengine: owl: fix register access functions
	idma64: Don't try to serve interrupts when device is powered off
	i2c: smbus: fix NULL function pointer dereference
	HID: i2c-hid: remove I2C_HID_READ_PENDING flag to prevent lock-up
	Revert "loop: Remove sector_t truncation checks"
	Revert "y2038: rusage: use __kernel_old_timeval"
	udp: preserve the connected status if only UDP cmsg
	serial: core: fix kernel-doc for uart_port_unlock_irqrestore()
	Linux 4.19.313

Change-Id: I6558068fa522b4cd853251716389c0d30a47522f
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2024-05-03 06:16:46 +00:00
68 changed files with 482 additions and 315 deletions

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 4 VERSION = 4
PATCHLEVEL = 19 PATCHLEVEL = 19
SUBLEVEL = 312 SUBLEVEL = 313
EXTRAVERSION = EXTRAVERSION =
NAME = "People's Front" NAME = "People's Front"

View File

@@ -964,7 +964,7 @@ put_tv32(struct timeval32 __user *o, struct timespec64 *i)
} }
static inline long static inline long
put_tv_to_tv32(struct timeval32 __user *o, struct __kernel_old_timeval *i) put_tv_to_tv32(struct timeval32 __user *o, struct timeval *i)
{ {
return copy_to_user(o, &(struct timeval32){ return copy_to_user(o, &(struct timeval32){
.tv_sec = i->tv_sec, .tv_sec = i->tv_sec,

View File

@@ -170,7 +170,6 @@
}; };
gmac: ethernet@8000 { gmac: ethernet@8000 {
#interrupt-cells = <1>;
compatible = "snps,dwmac"; compatible = "snps,dwmac";
reg = <0x8000 0x2000>; reg = <0x8000 0x2000>;
interrupts = <10>; interrupts = <10>;

View File

@@ -232,7 +232,7 @@
clock-names = "hif_sel"; clock-names = "hif_sel";
}; };
cir: cir@10009000 { cir: ir-receiver@10009000 {
compatible = "mediatek,mt7622-cir"; compatible = "mediatek,mt7622-cir";
reg = <0 0x10009000 0 0x1000>; reg = <0 0x10009000 0 0x1000>;
interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_LOW>; interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_LOW>;
@@ -459,7 +459,6 @@
<&pericfg CLK_PERI_AUXADC_PD>; <&pericfg CLK_PERI_AUXADC_PD>;
clock-names = "therm", "auxadc"; clock-names = "therm", "auxadc";
resets = <&pericfg MT7622_PERI_THERM_SW_RST>; resets = <&pericfg MT7622_PERI_THERM_SW_RST>;
reset-names = "therm";
mediatek,auxadc = <&auxadc>; mediatek,auxadc = <&auxadc>;
mediatek,apmixedsys = <&apmixedsys>; mediatek,apmixedsys = <&apmixedsys>;
nvmem-cells = <&thermal_calibration>; nvmem-cells = <&thermal_calibration>;
@@ -846,9 +845,7 @@
}; };
eth: ethernet@1b100000 { eth: ethernet@1b100000 {
compatible = "mediatek,mt7622-eth", compatible = "mediatek,mt7622-eth";
"mediatek,mt2701-eth",
"syscon";
reg = <0 0x1b100000 0 0x20000>; reg = <0 0x1b100000 0 0x20000>;
interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_LOW>, interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 224 IRQ_TYPE_LEVEL_LOW>, <GIC_SPI 224 IRQ_TYPE_LEVEL_LOW>,

View File

@@ -426,16 +426,22 @@
gpio1830-supply = <&vcc_1v8>; gpio1830-supply = <&vcc_1v8>;
}; };
&pmu_io_domains { &pcie_clkreqn_cpm {
status = "okay"; rockchip,pins =
pmu1830-supply = <&vcc_1v8>; <2 RK_PD2 RK_FUNC_GPIO &pcfg_pull_up>;
};
&pwm2 {
status = "okay";
}; };
&pinctrl { &pinctrl {
pinctrl-names = "default";
pinctrl-0 = <&q7_thermal_pin>;
gpios {
q7_thermal_pin: q7-thermal-pin {
rockchip,pins =
<0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
i2c8 { i2c8 {
i2c8_xfer_a: i2c8-xfer { i2c8_xfer_a: i2c8-xfer {
rockchip,pins = rockchip,pins =
@@ -466,6 +472,15 @@
}; };
}; };
&pmu_io_domains {
status = "okay";
pmu1830-supply = <&vcc_1v8>;
};
&pwm2 {
status = "okay";
};
&sdhci { &sdhci {
/* /*
* Signal integrity isn't great at 200MHz but 100MHz has proven stable * Signal integrity isn't great at 200MHz but 100MHz has proven stable

View File

@@ -11,6 +11,7 @@
#include <asm/mpspec.h> #include <asm/mpspec.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/hardirq.h> #include <asm/hardirq.h>
#include <asm/io.h>
#define ARCH_APICTIMER_STOPS_ON_C3 1 #define ARCH_APICTIMER_STOPS_ON_C3 1
@@ -110,7 +111,7 @@ static inline void native_apic_mem_write(u32 reg, u32 v)
static inline u32 native_apic_mem_read(u32 reg) static inline u32 native_apic_mem_read(u32 reg)
{ {
return *((volatile u32 *)(APIC_BASE + reg)); return readl((void __iomem *)(APIC_BASE + reg));
} }
extern void native_apic_wait_icr_idle(void); extern void native_apic_wait_icr_idle(void);

View File

@@ -231,7 +231,6 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
} }
if (!strcmp(q->cra_driver_name, alg->cra_name) || if (!strcmp(q->cra_driver_name, alg->cra_name) ||
!strcmp(q->cra_driver_name, alg->cra_driver_name) ||
!strcmp(q->cra_name, alg->cra_driver_name)) !strcmp(q->cra_name, alg->cra_driver_name))
goto err; goto err;
} }

View File

@@ -172,6 +172,10 @@ static irqreturn_t idma64_irq(int irq, void *dev)
u32 status_err; u32 status_err;
unsigned short i; unsigned short i;
/* Since IRQ may be shared, check if DMA controller is powered on */
if (status == GENMASK(31, 0))
return IRQ_NONE;
dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status); dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
/* Check if we have any interrupt from the DMA controller */ /* Check if we have any interrupt from the DMA controller */

View File

@@ -230,7 +230,7 @@ static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
else else
regval &= ~val; regval &= ~val;
writel(val, pchan->base + reg); writel(regval, pchan->base + reg);
} }
static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data) static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
@@ -254,7 +254,7 @@ static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
else else
regval &= ~val; regval &= ~val;
writel(val, od->base + reg); writel(regval, od->base + reg);
} }
static void dma_writel(struct owl_dma *od, u32 reg, u32 data) static void dma_writel(struct owl_dma *od, u32 reg, u32 data)

View File

@@ -2048,6 +2048,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
trace_amdgpu_vm_bo_map(bo_va, mapping); trace_amdgpu_vm_bo_map(bo_va, mapping);
} }
/* Validate operation parameters to prevent potential abuse */
static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
struct amdgpu_bo *bo,
uint64_t saddr,
uint64_t offset,
uint64_t size)
{
uint64_t tmp, lpfn;
if (saddr & AMDGPU_GPU_PAGE_MASK
|| offset & AMDGPU_GPU_PAGE_MASK
|| size & AMDGPU_GPU_PAGE_MASK)
return -EINVAL;
if (check_add_overflow(saddr, size, &tmp)
|| check_add_overflow(offset, size, &tmp)
|| size == 0 /* which also leads to end < begin */)
return -EINVAL;
/* make sure object fit at this offset */
if (bo && offset + size > amdgpu_bo_size(bo))
return -EINVAL;
/* Ensure last pfn not exceed max_pfn */
lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
if (lpfn >= adev->vm_manager.max_pfn)
return -EINVAL;
return 0;
}
/** /**
* amdgpu_vm_bo_map - map bo inside a vm * amdgpu_vm_bo_map - map bo inside a vm
* *
@@ -2074,20 +2105,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo *bo = bo_va->base.bo; struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm; struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr; uint64_t eaddr;
int r;
/* validate the parameters */ r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || if (r)
size == 0 || size & ~PAGE_MASK) return r;
return -EINVAL;
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if (saddr >= eaddr ||
(bo && offset + size > amdgpu_bo_size(bo)))
return -EINVAL;
saddr /= AMDGPU_GPU_PAGE_SIZE; saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE; eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
if (tmp) { if (tmp) {
@@ -2140,16 +2165,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
uint64_t eaddr; uint64_t eaddr;
int r; int r;
/* validate the parameters */ r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || if (r)
size == 0 || size & ~PAGE_MASK) return r;
return -EINVAL;
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if (saddr >= eaddr ||
(bo && offset + size > amdgpu_bo_size(bo)))
return -EINVAL;
/* Allocate all the needed memory */ /* Allocate all the needed memory */
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
@@ -2163,7 +2181,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
} }
saddr /= AMDGPU_GPU_PAGE_SIZE; saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE; eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
mapping->start = saddr; mapping->start = saddr;
mapping->last = eaddr; mapping->last = eaddr;
@@ -2250,10 +2268,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next; struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
LIST_HEAD(removed); LIST_HEAD(removed);
uint64_t eaddr; uint64_t eaddr;
int r;
r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
if (r)
return r;
eaddr = saddr + size - 1;
saddr /= AMDGPU_GPU_PAGE_SIZE; saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE; eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
/* Allocate all the needed memory */ /* Allocate all the needed memory */
before = kzalloc(sizeof(*before), GFP_KERNEL); before = kzalloc(sizeof(*before), GFP_KERNEL);

View File

@@ -25,6 +25,7 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include "nouveau_drv.h" #include "nouveau_drv.h"
#include "nouveau_bios.h"
#include "nouveau_reg.h" #include "nouveau_reg.h"
#include "dispnv04/hw.h" #include "dispnv04/hw.h"
#include "nouveau_encoder.h" #include "nouveau_encoder.h"
@@ -1674,7 +1675,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
*/ */
if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) { if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
if (*conn == 0xf2005014 && *conf == 0xffffffff) { if (*conn == 0xf2005014 && *conf == 0xffffffff) {
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1); fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, DCB_OUTPUT_B);
return false; return false;
} }
} }
@@ -1760,26 +1761,26 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
#ifdef __powerpc__ #ifdef __powerpc__
/* Apple iMac G4 NV17 */ /* Apple iMac G4 NV17 */
if (of_machine_is_compatible("PowerMac4,5")) { if (of_machine_is_compatible("PowerMac4,5")) {
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1); fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, DCB_OUTPUT_B);
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2); fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, DCB_OUTPUT_C);
return; return;
} }
#endif #endif
/* Make up some sane defaults */ /* Make up some sane defaults */
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
bios->legacy.i2c_indices.crt, 1, 1); bios->legacy.i2c_indices.crt, 1, DCB_OUTPUT_B);
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0) if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
fabricate_dcb_output(dcb, DCB_OUTPUT_TV, fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
bios->legacy.i2c_indices.tv, bios->legacy.i2c_indices.tv,
all_heads, 0); all_heads, DCB_OUTPUT_A);
else if (bios->tmds.output0_script_ptr || else if (bios->tmds.output0_script_ptr ||
bios->tmds.output1_script_ptr) bios->tmds.output1_script_ptr)
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
bios->legacy.i2c_indices.panel, bios->legacy.i2c_indices.panel,
all_heads, 1); all_heads, DCB_OUTPUT_B);
} }
static int static int

View File

@@ -66,11 +66,16 @@ of_init(struct nvkm_bios *bios, const char *name)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
static void of_fini(void *p)
{
kfree(p);
}
const struct nvbios_source const struct nvbios_source
nvbios_of = { nvbios_of = {
.name = "OpenFirmware", .name = "OpenFirmware",
.init = of_init, .init = of_init,
.fini = (void(*)(void *))kfree, .fini = of_fini,
.read = of_read, .read = of_read,
.size = of_size, .size = of_size,
.rw = false, .rw = false,

View File

@@ -221,8 +221,11 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
void __iomem *map = NULL; void __iomem *map = NULL;
/* Already mapped? */ /* Already mapped? */
if (refcount_inc_not_zero(&iobj->maps)) if (refcount_inc_not_zero(&iobj->maps)) {
/* read barrier match the wmb on refcount set */
smp_rmb();
return iobj->map; return iobj->map;
}
/* Take the lock, and re-check that another thread hasn't /* Take the lock, and re-check that another thread hasn't
* already mapped the object in the meantime. * already mapped the object in the meantime.
@@ -249,6 +252,8 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
iobj->base.memory.ptrs = &nv50_instobj_fast; iobj->base.memory.ptrs = &nv50_instobj_fast;
else else
iobj->base.memory.ptrs = &nv50_instobj_slow; iobj->base.memory.ptrs = &nv50_instobj_slow;
/* barrier to ensure the ptrs are written before refcount is set */
smp_wmb();
refcount_set(&iobj->maps, 1); refcount_set(&iobj->maps, 1);
} }

View File

@@ -58,7 +58,6 @@
/* flags */ /* flags */
#define I2C_HID_STARTED 0 #define I2C_HID_STARTED 0
#define I2C_HID_RESET_PENDING 1 #define I2C_HID_RESET_PENDING 1
#define I2C_HID_READ_PENDING 2
#define I2C_HID_PWR_ON 0x00 #define I2C_HID_PWR_ON 0x00
#define I2C_HID_PWR_SLEEP 0x01 #define I2C_HID_PWR_SLEEP 0x01
@@ -259,7 +258,6 @@ static int __i2c_hid_command(struct i2c_client *client,
msg[1].len = data_len; msg[1].len = data_len;
msg[1].buf = buf_recv; msg[1].buf = buf_recv;
msg_num = 2; msg_num = 2;
set_bit(I2C_HID_READ_PENDING, &ihid->flags);
} }
if (wait) if (wait)
@@ -267,9 +265,6 @@ static int __i2c_hid_command(struct i2c_client *client,
ret = i2c_transfer(client->adapter, msg, msg_num); ret = i2c_transfer(client->adapter, msg, msg_num);
if (data_len > 0)
clear_bit(I2C_HID_READ_PENDING, &ihid->flags);
if (ret != msg_num) if (ret != msg_num)
return ret < 0 ? ret : -EIO; return ret < 0 ? ret : -EIO;
@@ -550,9 +545,6 @@ static irqreturn_t i2c_hid_irq(int irq, void *dev_id)
{ {
struct i2c_hid *ihid = dev_id; struct i2c_hid *ihid = dev_id;
if (test_bit(I2C_HID_READ_PENDING, &ihid->flags))
return IRQ_HANDLED;
i2c_hid_get_input(ihid); i2c_hid_get_input(ihid);
return IRQ_HANDLED; return IRQ_HANDLED;

View File

@@ -1872,13 +1872,18 @@ static int i2c_check_for_quirks(struct i2c_adapter *adap, struct i2c_msg *msgs,
* Returns negative errno, else the number of messages executed. * Returns negative errno, else the number of messages executed.
* *
* Adapter lock must be held when calling this function. No debug logging * Adapter lock must be held when calling this function. No debug logging
* takes place. adap->algo->master_xfer existence isn't checked. * takes place.
*/ */
int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{ {
unsigned long orig_jiffies; unsigned long orig_jiffies;
int ret, try; int ret, try;
if (!adap->algo->master_xfer) {
dev_dbg(&adap->dev, "I2C level transfers not supported\n");
return -EOPNOTSUPP;
}
if (WARN_ON(!msgs || num < 1)) if (WARN_ON(!msgs || num < 1))
return -EINVAL; return -EINVAL;

View File

@@ -216,7 +216,8 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num,
mdev = dev->mdev; mdev = dev->mdev;
mdev_port_num = 1; mdev_port_num = 1;
} }
if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) { if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1 &&
!mlx5_core_mp_enabled(mdev)) {
/* set local port to one for Function-Per-Port HCA. */ /* set local port to one for Function-Per-Port HCA. */
mdev = dev->mdev; mdev = dev->mdev;
mdev_port_num = 1; mdev_port_num = 1;

View File

@@ -2994,13 +2994,8 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
set_bit(i, bitmap); set_bit(i, bitmap);
} }
if (err) { if (err)
if (i > 0) its_vpe_irq_domain_free(domain, virq, i);
its_vpe_irq_domain_free(domain, virq, i);
its_lpi_free(bitmap, base, nr_ids);
its_free_prop_table(vprop_page);
}
return err; return err;
} }

View File

@@ -52,7 +52,7 @@ static unsigned long doc_locations[] __initdata = {
0xe8000, 0xea000, 0xec000, 0xee000, 0xe8000, 0xea000, 0xec000, 0xee000,
#endif #endif
#endif #endif
0xffffffff }; };
static struct mtd_info *doclist = NULL; static struct mtd_info *doclist = NULL;
@@ -1678,7 +1678,7 @@ static int __init init_nanddoc(void)
if (ret < 0) if (ret < 0)
return ret; return ret;
} else { } else {
for (i = 0; (doc_locations[i] != 0xffffffff); i++) { for (i = 0; i < ARRAY_SIZE(doc_locations); i++) {
doc_probe(doc_locations[i]); doc_probe(doc_locations[i]);
} }
} }

View File

@@ -2033,12 +2033,14 @@ static int b44_set_pauseparam(struct net_device *dev,
bp->flags |= B44_FLAG_TX_PAUSE; bp->flags |= B44_FLAG_TX_PAUSE;
else else
bp->flags &= ~B44_FLAG_TX_PAUSE; bp->flags &= ~B44_FLAG_TX_PAUSE;
if (bp->flags & B44_FLAG_PAUSE_AUTO) { if (netif_running(dev)) {
b44_halt(bp); if (bp->flags & B44_FLAG_PAUSE_AUTO) {
b44_init_rings(bp); b44_halt(bp);
b44_init_hw(bp, B44_FULL_RESET); b44_init_rings(bp);
} else { b44_init_hw(bp, B44_FULL_RESET);
__b44_set_flow_ctrl(bp, bp->flags); } else {
__b44_set_flow_ctrl(bp, bp->flags);
}
} }
spin_unlock_irq(&bp->lock); spin_unlock_irq(&bp->lock);

View File

@@ -14728,7 +14728,7 @@ static int __init i40e_init_module(void)
* since we need to be able to guarantee forward progress even under * since we need to be able to guarantee forward progress even under
* memory pressure. * memory pressure.
*/ */
i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name); i40e_wq = alloc_workqueue("%s", 0, 0, i40e_driver_name);
if (!i40e_wq) { if (!i40e_wq) {
pr_err("%s: Failed to create workqueue\n", i40e_driver_name); pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
return -ENOMEM; return -ENOMEM;

View File

@@ -1452,8 +1452,9 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
} }
trace_mlx5_fs_set_fte(fte, false); trace_mlx5_fs_set_fte(fte, false);
/* Link newly added rules into the tree. */
for (i = 0; i < handle->num_rules; i++) { for (i = 0; i < handle->num_rules; i++) {
if (refcount_read(&handle->rule[i]->node.refcount) == 1) { if (!handle->rule[i]->node.parent) {
tree_add_node(&handle->rule[i]->node, &fte->node); tree_add_node(&handle->rule[i]->node, &fte->node);
trace_mlx5_fs_add_rule(handle->rule[i]); trace_mlx5_fs_add_rule(handle->rule[i]);
} }

View File

@@ -561,7 +561,7 @@ static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
static const struct mlxsw_listener mlxsw_emad_rx_listener = static const struct mlxsw_listener mlxsw_emad_rx_listener =
MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false, MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
EMAD, DISCARD); EMAD, FORWARD);
static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
{ {

View File

@@ -838,7 +838,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 df; __be16 df;
int err; int err;
if (!pskb_inet_may_pull(skb)) if (!skb_vlan_inet_prepare(skb))
return -EINVAL; return -EINVAL;
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
@@ -884,7 +884,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport; __be16 sport;
int err; int err;
if (!pskb_inet_may_pull(skb)) if (!skb_vlan_inet_prepare(skb))
return -EINVAL; return -EINVAL;
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);

View File

@@ -710,11 +710,12 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
static void gtp_dellink(struct net_device *dev, struct list_head *head) static void gtp_dellink(struct net_device *dev, struct list_head *head)
{ {
struct gtp_dev *gtp = netdev_priv(dev); struct gtp_dev *gtp = netdev_priv(dev);
struct hlist_node *next;
struct pdp_ctx *pctx; struct pdp_ctx *pctx;
int i; int i;
for (i = 0; i < gtp->hash_size; i++) for (i = 0; i < gtp->hash_size; i++)
hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) hlist_for_each_entry_safe(pctx, next, &gtp->tid_hash[i], hlist_tid)
pdp_context_delete(pctx); pdp_context_delete(pctx);
gtp_encap_disable(gtp); gtp_encap_disable(gtp);

View File

@@ -2168,14 +2168,16 @@ static ssize_t tun_put_user(struct tun_struct *tun,
tun_is_little_endian(tun), true, tun_is_little_endian(tun), true,
vlan_hlen)) { vlan_hlen)) {
struct skb_shared_info *sinfo = skb_shinfo(skb); struct skb_shared_info *sinfo = skb_shinfo(skb);
pr_err("unexpected GSO type: "
"0x%x, gso_size %d, hdr_len %d\n", if (net_ratelimit()) {
sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
tun16_to_cpu(tun, gso.hdr_len)); sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
print_hex_dump(KERN_ERR, "tun: ", tun16_to_cpu(tun, gso.hdr_len));
DUMP_PREFIX_NONE, print_hex_dump(KERN_ERR, "tun: ",
16, 1, skb->head, DUMP_PREFIX_NONE,
min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); 16, 1, skb->head,
min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
}
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
return -EINVAL; return -EINVAL;
} }

View File

@@ -1465,21 +1465,16 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* Skip IP alignment pseudo header */ /* Skip IP alignment pseudo header */
skb_pull(skb, 2); skb_pull(skb, 2);
skb->truesize = SKB_TRUESIZE(pkt_len_plus_padd);
ax88179_rx_checksum(skb, pkt_hdr); ax88179_rx_checksum(skb, pkt_hdr);
return 1; return 1;
} }
ax_skb = skb_clone(skb, GFP_ATOMIC); ax_skb = netdev_alloc_skb_ip_align(dev->net, pkt_len);
if (!ax_skb) if (!ax_skb)
return 0; return 0;
skb_trim(ax_skb, pkt_len); skb_put(ax_skb, pkt_len);
memcpy(ax_skb->data, skb->data + 2, pkt_len);
/* Skip IP alignment pseudo header */
skb_pull(ax_skb, 2);
skb->truesize = pkt_len_plus_padd +
SKB_DATA_ALIGN(sizeof(struct sk_buff));
ax88179_rx_checksum(ax_skb, pkt_hdr); ax88179_rx_checksum(ax_skb, pkt_hdr);
usbnet_skb_return(dev, ax_skb); usbnet_skb_return(dev, ax_skb);

View File

@@ -1320,6 +1320,10 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
return false; return false;
/* Ignore packets from invalid src-address */
if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
return false;
/* Get address from the outer IP header */ /* Get address from the outer IP header */
if (vxlan_get_sk_family(vs) == AF_INET) { if (vxlan_get_sk_family(vs) == AF_INET) {
saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr; saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;

View File

@@ -427,7 +427,8 @@ struct trf7970a {
enum trf7970a_state state; enum trf7970a_state state;
struct device *dev; struct device *dev;
struct spi_device *spi; struct spi_device *spi;
struct regulator *regulator; struct regulator *vin_regulator;
struct regulator *vddio_regulator;
struct nfc_digital_dev *ddev; struct nfc_digital_dev *ddev;
u32 quirks; u32 quirks;
bool is_initiator; bool is_initiator;
@@ -1886,7 +1887,7 @@ static int trf7970a_power_up(struct trf7970a *trf)
if (trf->state != TRF7970A_ST_PWR_OFF) if (trf->state != TRF7970A_ST_PWR_OFF)
return 0; return 0;
ret = regulator_enable(trf->regulator); ret = regulator_enable(trf->vin_regulator);
if (ret) { if (ret) {
dev_err(trf->dev, "%s - Can't enable VIN: %d\n", __func__, ret); dev_err(trf->dev, "%s - Can't enable VIN: %d\n", __func__, ret);
return ret; return ret;
@@ -1929,7 +1930,7 @@ static int trf7970a_power_down(struct trf7970a *trf)
if (trf->en2_gpiod && !(trf->quirks & TRF7970A_QUIRK_EN2_MUST_STAY_LOW)) if (trf->en2_gpiod && !(trf->quirks & TRF7970A_QUIRK_EN2_MUST_STAY_LOW))
gpiod_set_value_cansleep(trf->en2_gpiod, 0); gpiod_set_value_cansleep(trf->en2_gpiod, 0);
ret = regulator_disable(trf->regulator); ret = regulator_disable(trf->vin_regulator);
if (ret) if (ret)
dev_err(trf->dev, "%s - Can't disable VIN: %d\n", __func__, dev_err(trf->dev, "%s - Can't disable VIN: %d\n", __func__,
ret); ret);
@@ -2068,37 +2069,37 @@ static int trf7970a_probe(struct spi_device *spi)
mutex_init(&trf->lock); mutex_init(&trf->lock);
INIT_DELAYED_WORK(&trf->timeout_work, trf7970a_timeout_work_handler); INIT_DELAYED_WORK(&trf->timeout_work, trf7970a_timeout_work_handler);
trf->regulator = devm_regulator_get(&spi->dev, "vin"); trf->vin_regulator = devm_regulator_get(&spi->dev, "vin");
if (IS_ERR(trf->regulator)) { if (IS_ERR(trf->vin_regulator)) {
ret = PTR_ERR(trf->regulator); ret = PTR_ERR(trf->vin_regulator);
dev_err(trf->dev, "Can't get VIN regulator: %d\n", ret); dev_err(trf->dev, "Can't get VIN regulator: %d\n", ret);
goto err_destroy_lock; goto err_destroy_lock;
} }
ret = regulator_enable(trf->regulator); ret = regulator_enable(trf->vin_regulator);
if (ret) { if (ret) {
dev_err(trf->dev, "Can't enable VIN: %d\n", ret); dev_err(trf->dev, "Can't enable VIN: %d\n", ret);
goto err_destroy_lock; goto err_destroy_lock;
} }
uvolts = regulator_get_voltage(trf->regulator); uvolts = regulator_get_voltage(trf->vin_regulator);
if (uvolts > 4000000) if (uvolts > 4000000)
trf->chip_status_ctrl = TRF7970A_CHIP_STATUS_VRS5_3; trf->chip_status_ctrl = TRF7970A_CHIP_STATUS_VRS5_3;
trf->regulator = devm_regulator_get(&spi->dev, "vdd-io"); trf->vddio_regulator = devm_regulator_get(&spi->dev, "vdd-io");
if (IS_ERR(trf->regulator)) { if (IS_ERR(trf->vddio_regulator)) {
ret = PTR_ERR(trf->regulator); ret = PTR_ERR(trf->vddio_regulator);
dev_err(trf->dev, "Can't get VDD_IO regulator: %d\n", ret); dev_err(trf->dev, "Can't get VDD_IO regulator: %d\n", ret);
goto err_destroy_lock; goto err_disable_vin_regulator;
} }
ret = regulator_enable(trf->regulator); ret = regulator_enable(trf->vddio_regulator);
if (ret) { if (ret) {
dev_err(trf->dev, "Can't enable VDD_IO: %d\n", ret); dev_err(trf->dev, "Can't enable VDD_IO: %d\n", ret);
goto err_destroy_lock; goto err_disable_vin_regulator;
} }
if (regulator_get_voltage(trf->regulator) == 1800000) { if (regulator_get_voltage(trf->vddio_regulator) == 1800000) {
trf->io_ctrl = TRF7970A_REG_IO_CTRL_IO_LOW; trf->io_ctrl = TRF7970A_REG_IO_CTRL_IO_LOW;
dev_dbg(trf->dev, "trf7970a config vdd_io to 1.8V\n"); dev_dbg(trf->dev, "trf7970a config vdd_io to 1.8V\n");
} }
@@ -2111,7 +2112,7 @@ static int trf7970a_probe(struct spi_device *spi)
if (!trf->ddev) { if (!trf->ddev) {
dev_err(trf->dev, "Can't allocate NFC digital device\n"); dev_err(trf->dev, "Can't allocate NFC digital device\n");
ret = -ENOMEM; ret = -ENOMEM;
goto err_disable_regulator; goto err_disable_vddio_regulator;
} }
nfc_digital_set_parent_dev(trf->ddev, trf->dev); nfc_digital_set_parent_dev(trf->ddev, trf->dev);
@@ -2140,8 +2141,10 @@ static int trf7970a_probe(struct spi_device *spi)
trf7970a_shutdown(trf); trf7970a_shutdown(trf);
err_free_ddev: err_free_ddev:
nfc_digital_free_device(trf->ddev); nfc_digital_free_device(trf->ddev);
err_disable_regulator: err_disable_vddio_regulator:
regulator_disable(trf->regulator); regulator_disable(trf->vddio_regulator);
err_disable_vin_regulator:
regulator_disable(trf->vin_regulator);
err_destroy_lock: err_destroy_lock:
mutex_destroy(&trf->lock); mutex_destroy(&trf->lock);
return ret; return ret;
@@ -2160,7 +2163,8 @@ static int trf7970a_remove(struct spi_device *spi)
nfc_digital_unregister_device(trf->ddev); nfc_digital_unregister_device(trf->ddev);
nfc_digital_free_device(trf->ddev); nfc_digital_free_device(trf->ddev);
regulator_disable(trf->regulator); regulator_disable(trf->vddio_regulator);
regulator_disable(trf->vin_regulator);
mutex_destroy(&trf->lock); mutex_destroy(&trf->lock);

View File

@@ -642,32 +642,21 @@ static int vmk80xx_find_usb_endpoints(struct comedi_device *dev)
struct vmk80xx_private *devpriv = dev->private; struct vmk80xx_private *devpriv = dev->private;
struct usb_interface *intf = comedi_to_usb_interface(dev); struct usb_interface *intf = comedi_to_usb_interface(dev);
struct usb_host_interface *iface_desc = intf->cur_altsetting; struct usb_host_interface *iface_desc = intf->cur_altsetting;
struct usb_endpoint_descriptor *ep_desc; struct usb_endpoint_descriptor *ep_rx_desc, *ep_tx_desc;
int i; int ret;
if (iface_desc->desc.bNumEndpoints != 2) if (devpriv->model == VMK8061_MODEL)
ret = usb_find_common_endpoints(iface_desc, &ep_rx_desc,
&ep_tx_desc, NULL, NULL);
else
ret = usb_find_common_endpoints(iface_desc, NULL, NULL,
&ep_rx_desc, &ep_tx_desc);
if (ret)
return -ENODEV; return -ENODEV;
for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) { devpriv->ep_rx = ep_rx_desc;
ep_desc = &iface_desc->endpoint[i].desc; devpriv->ep_tx = ep_tx_desc;
if (usb_endpoint_is_int_in(ep_desc) ||
usb_endpoint_is_bulk_in(ep_desc)) {
if (!devpriv->ep_rx)
devpriv->ep_rx = ep_desc;
continue;
}
if (usb_endpoint_is_int_out(ep_desc) ||
usb_endpoint_is_bulk_out(ep_desc)) {
if (!devpriv->ep_tx)
devpriv->ep_tx = ep_desc;
continue;
}
}
if (!devpriv->ep_rx || !devpriv->ep_tx)
return -ENODEV;
if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx)) if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx))
return -EINVAL; return -EINVAL;

View File

@@ -577,7 +577,7 @@ static u_long get_word(struct vc_data *vc)
} }
attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr); attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
buf[cnt++] = attr_ch; buf[cnt++] = attr_ch;
while (tmpx < vc->vc_cols - 1) { while (tmpx < vc->vc_cols - 1 && cnt < sizeof(buf) - 1) {
tmp_pos += 2; tmp_pos += 2;
tmpx++; tmpx++;
ch = get_char(vc, (u_short *)tmp_pos, &temp); ch = get_char(vc, (u_short *)tmp_pos, &temp);

View File

@@ -1128,11 +1128,13 @@ static void mxs_auart_set_ldisc(struct uart_port *port,
static irqreturn_t mxs_auart_irq_handle(int irq, void *context) static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
{ {
u32 istat; u32 istat, stat;
struct mxs_auart_port *s = context; struct mxs_auart_port *s = context;
u32 mctrl_temp = s->mctrl_prev; u32 mctrl_temp = s->mctrl_prev;
u32 stat = mxs_read(s, REG_STAT);
uart_port_lock(&s->port);
stat = mxs_read(s, REG_STAT);
istat = mxs_read(s, REG_INTR); istat = mxs_read(s, REG_INTR);
/* ack irq */ /* ack irq */
@@ -1168,6 +1170,8 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
istat &= ~AUART_INTR_TXIS; istat &= ~AUART_INTR_TXIS;
} }
uart_port_unlock(&s->port);
return IRQ_HANDLED; return IRQ_HANDLED;
} }

View File

@@ -220,7 +220,6 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
{ {
struct tty_port *port; struct tty_port *port;
unsigned char ch, r1, drop, error, flag; unsigned char ch, r1, drop, error, flag;
int loops = 0;
/* Sanity check, make sure the old bug is no longer happening */ /* Sanity check, make sure the old bug is no longer happening */
if (uap->port.state == NULL) { if (uap->port.state == NULL) {
@@ -303,24 +302,11 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
if (r1 & Rx_OVR) if (r1 & Rx_OVR)
tty_insert_flip_char(port, 0, TTY_OVERRUN); tty_insert_flip_char(port, 0, TTY_OVERRUN);
next_char: next_char:
/* We can get stuck in an infinite loop getting char 0 when the
* line is in a wrong HW state, we break that here.
* When that happens, I disable the receive side of the driver.
* Note that what I've been experiencing is a real irq loop where
* I'm getting flooded regardless of the actual port speed.
* Something strange is going on with the HW
*/
if ((++loops) > 1000)
goto flood;
ch = read_zsreg(uap, R0); ch = read_zsreg(uap, R0);
if (!(ch & Rx_CH_AV)) if (!(ch & Rx_CH_AV))
break; break;
} }
return true;
flood:
pmz_interrupt_control(uap, 0);
pmz_error("pmz: rx irq flood !\n");
return true; return true;
} }

View File

@@ -471,7 +471,6 @@ static ssize_t wdm_write
static int service_outstanding_interrupt(struct wdm_device *desc) static int service_outstanding_interrupt(struct wdm_device *desc)
{ {
int rv = 0; int rv = 0;
int used;
/* submit read urb only if the device is waiting for it */ /* submit read urb only if the device is waiting for it */
if (!desc->resp_count || !--desc->resp_count) if (!desc->resp_count || !--desc->resp_count)
@@ -486,10 +485,7 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
goto out; goto out;
} }
used = test_and_set_bit(WDM_RESPONDING, &desc->flags); set_bit(WDM_RESPONDING, &desc->flags);
if (used)
goto out;
spin_unlock_irq(&desc->iuspin); spin_unlock_irq(&desc->iuspin);
rv = usb_submit_urb(desc->response, GFP_KERNEL); rv = usb_submit_urb(desc->response, GFP_KERNEL);
spin_lock_irq(&desc->iuspin); spin_lock_irq(&desc->iuspin);

View File

@@ -897,13 +897,15 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_dma_desc *dma_desc; struct dwc2_dma_desc *dma_desc;
struct dwc2_hcd_iso_packet_desc *frame_desc; struct dwc2_hcd_iso_packet_desc *frame_desc;
u16 frame_desc_idx; u16 frame_desc_idx;
struct urb *usb_urb = qtd->urb->priv; struct urb *usb_urb;
u16 remain = 0; u16 remain = 0;
int rc = 0; int rc = 0;
if (!qtd->urb) if (!qtd->urb)
return -EINVAL; return -EINVAL;
usb_urb = qtd->urb->priv;
dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx * dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
sizeof(struct dwc2_dma_desc)), sizeof(struct dwc2_dma_desc)),
sizeof(struct dwc2_dma_desc), sizeof(struct dwc2_dma_desc),

View File

@@ -255,6 +255,10 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EM061K_LMS 0x0124 #define QUECTEL_PRODUCT_EM061K_LMS 0x0124
#define QUECTEL_PRODUCT_EC25 0x0125 #define QUECTEL_PRODUCT_EC25 0x0125
#define QUECTEL_PRODUCT_EM060K_128 0x0128 #define QUECTEL_PRODUCT_EM060K_128 0x0128
#define QUECTEL_PRODUCT_EM060K_129 0x0129
#define QUECTEL_PRODUCT_EM060K_12a 0x012a
#define QUECTEL_PRODUCT_EM060K_12b 0x012b
#define QUECTEL_PRODUCT_EM060K_12c 0x012c
#define QUECTEL_PRODUCT_EG91 0x0191 #define QUECTEL_PRODUCT_EG91 0x0191
#define QUECTEL_PRODUCT_EG95 0x0195 #define QUECTEL_PRODUCT_EG95 0x0195
#define QUECTEL_PRODUCT_BG96 0x0296 #define QUECTEL_PRODUCT_BG96 0x0296
@@ -1218,6 +1222,18 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0x00, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) },
@@ -1360,6 +1376,12 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) | RSVD(3) }, .driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990 (ECM) */ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990 (ECM) */
.driver_info = NCTRL(0) | RSVD(1) }, .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a0, 0xff), /* Telit FN20C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a4, 0xff), /* Telit FN20C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a9, 0xff), /* Telit FN20C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) }, .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -2052,6 +2074,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(3) }, .driver_info = RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff), { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
.driver_info = RSVD(4) }, .driver_info = RSVD(4) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b05), /* Longsung U8300 */
.driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b3c), /* Longsung U9300 */
.driver_info = RSVD(0) | RSVD(4) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
@@ -2272,15 +2298,29 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0115, 0xff), /* Fibocom FM135 (laptop MBIM) */
.driver_info = RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a3, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a3, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
.driver_info = RSVD(4) }, .driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a04, 0xff) }, /* Fibocom FM650-CN (ECM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a05, 0xff) }, /* Fibocom FM650-CN (NCM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a06, 0xff) }, /* Fibocom FM650-CN (RNDIS mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a07, 0xff) }, /* Fibocom FM650-CN (MBIM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */ { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
{ USB_DEVICE(0x33f8, 0x0104), /* Rolling RW101-GL (laptop RMNET) */
.driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a2, 0xff) }, /* Rolling RW101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a3, 0xff) }, /* Rolling RW101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a4, 0xff), /* Rolling RW101-GL (laptop MBIM) */
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0115, 0xff), /* Rolling RW135-GL (laptop MBIM) */
.driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },

View File

@@ -2432,9 +2432,19 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
r = vhost_get_avail(vq, avail_idx, &vq->avail->idx); r = vhost_get_avail(vq, avail_idx, &vq->avail->idx);
if (unlikely(r)) if (unlikely(r))
return false; return false;
vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
return vq->avail_idx == vq->last_avail_idx; vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
if (vq->avail_idx != vq->last_avail_idx) {
/* Since we have updated avail_idx, the following
* call to vhost_get_vq_desc() will read available
* ring entries. Make sure that read happens after
* the avail_idx read.
*/
smp_rmb();
return false;
}
return true;
} }
EXPORT_SYMBOL_GPL(vhost_vq_avail_empty); EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);

View File

@@ -2236,20 +2236,14 @@ struct btrfs_data_container *init_data_container(u32 total_bytes)
size_t alloc_bytes; size_t alloc_bytes;
alloc_bytes = max_t(size_t, total_bytes, sizeof(*data)); alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
data = kvmalloc(alloc_bytes, GFP_KERNEL); data = kvzalloc(alloc_bytes, GFP_KERNEL);
if (!data) if (!data)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
if (total_bytes >= sizeof(*data)) { if (total_bytes >= sizeof(*data))
data->bytes_left = total_bytes - sizeof(*data); data->bytes_left = total_bytes - sizeof(*data);
data->bytes_missing = 0; else
} else {
data->bytes_missing = sizeof(*data) - total_bytes; data->bytes_missing = sizeof(*data) - total_bytes;
data->bytes_left = 0;
}
data->elem_cnt = 0;
data->elem_missed = 0;
return data; return data;
} }

View File

@@ -1133,6 +1133,9 @@ __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
if (ret) if (ret)
return ret; return ret;
ret = btrfs_record_root_in_trans(trans, node->root);
if (ret)
return ret;
ret = btrfs_update_delayed_inode(trans, node->root, path, node); ret = btrfs_update_delayed_inode(trans, node->root, path, node);
return ret; return ret;
} }

View File

@@ -243,7 +243,7 @@ nilfs_filetype_table[NILFS_FT_MAX] = {
#define S_SHIFT 12 #define S_SHIFT 12
static unsigned char static unsigned char
nilfs_type_by_mode[S_IFMT >> S_SHIFT] = { nilfs_type_by_mode[(S_IFMT >> S_SHIFT) + 1] = {
[S_IFREG >> S_SHIFT] = NILFS_FT_REG_FILE, [S_IFREG >> S_SHIFT] = NILFS_FT_REG_FILE,
[S_IFDIR >> S_SHIFT] = NILFS_FT_DIR, [S_IFDIR >> S_SHIFT] = NILFS_FT_DIR,
[S_IFCHR >> S_SHIFT] = NILFS_FT_CHRDEV, [S_IFCHR >> S_SHIFT] = NILFS_FT_CHRDEV,

View File

@@ -431,6 +431,8 @@ struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
kn = kernfs_find_and_get(kobj->sd, attr->name); kn = kernfs_find_and_get(kobj->sd, attr->name);
if (kn) if (kn)
kernfs_break_active_protection(kn); kernfs_break_active_protection(kn);
else
kobject_put(kobj);
return kn; return kn;
} }
EXPORT_SYMBOL_GPL(sysfs_break_active_protection); EXPORT_SYMBOL_GPL(sysfs_break_active_protection);

View File

@@ -264,6 +264,85 @@ struct uart_port {
void *private_data; /* generic platform data pointer */ void *private_data; /* generic platform data pointer */
}; };
/**
* uart_port_lock - Lock the UART port
* @up: Pointer to UART port structure
*/
static inline void uart_port_lock(struct uart_port *up)
{
spin_lock(&up->lock);
}
/**
* uart_port_lock_irq - Lock the UART port and disable interrupts
* @up: Pointer to UART port structure
*/
static inline void uart_port_lock_irq(struct uart_port *up)
{
spin_lock_irq(&up->lock);
}
/**
* uart_port_lock_irqsave - Lock the UART port, save and disable interrupts
* @up: Pointer to UART port structure
* @flags: Pointer to interrupt flags storage
*/
static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
{
spin_lock_irqsave(&up->lock, *flags);
}
/**
* uart_port_trylock - Try to lock the UART port
* @up: Pointer to UART port structure
*
* Returns: True if lock was acquired, false otherwise
*/
static inline bool uart_port_trylock(struct uart_port *up)
{
return spin_trylock(&up->lock);
}
/**
* uart_port_trylock_irqsave - Try to lock the UART port, save and disable interrupts
* @up: Pointer to UART port structure
* @flags: Pointer to interrupt flags storage
*
* Returns: True if lock was acquired, false otherwise
*/
static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags)
{
return spin_trylock_irqsave(&up->lock, *flags);
}
/**
* uart_port_unlock - Unlock the UART port
* @up: Pointer to UART port structure
*/
static inline void uart_port_unlock(struct uart_port *up)
{
spin_unlock(&up->lock);
}
/**
* uart_port_unlock_irq - Unlock the UART port and re-enable interrupts
* @up: Pointer to UART port structure
*/
static inline void uart_port_unlock_irq(struct uart_port *up)
{
spin_unlock_irq(&up->lock);
}
/**
* uart_port_unlock_irqrestore - Unlock the UART port, restore interrupts
* @up: Pointer to UART port structure
* @flags: The saved interrupt flags for restore
*/
static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
{
spin_unlock_irqrestore(&up->lock, flags);
}
static inline int serial_port_in(struct uart_port *up, int offset) static inline int serial_port_in(struct uart_port *up, int offset)
{ {
return up->serial_in(up, offset); return up->serial_in(up, offset);

View File

@@ -414,7 +414,7 @@ struct trace_event_file {
} \ } \
early_initcall(trace_init_perf_perm_##name); early_initcall(trace_init_perf_perm_##name);
#define PERF_MAX_TRACE_SIZE 2048 #define PERF_MAX_TRACE_SIZE 8192
#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */

View File

@@ -455,6 +455,10 @@ static inline void in6_ifa_hold(struct inet6_ifaddr *ifp)
refcount_inc(&ifp->refcnt); refcount_inc(&ifp->refcnt);
} }
static inline bool in6_ifa_hold_safe(struct inet6_ifaddr *ifp)
{
return refcount_inc_not_zero(&ifp->refcnt);
}
/* /*
* compute link-local solicited-node multicast address * compute link-local solicited-node multicast address

View File

@@ -348,6 +348,39 @@ static inline bool pskb_inet_may_pull(struct sk_buff *skb)
return pskb_network_may_pull(skb, nhlen); return pskb_network_may_pull(skb, nhlen);
} }
/* Variant of pskb_inet_may_pull().
*/
static inline bool skb_vlan_inet_prepare(struct sk_buff *skb)
{
int nhlen = 0, maclen = ETH_HLEN;
__be16 type = skb->protocol;
/* Essentially this is skb_protocol(skb, true)
* And we get MAC len.
*/
if (eth_type_vlan(type))
type = __vlan_get_protocol(skb, type, &maclen);
switch (type) {
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
nhlen = sizeof(struct ipv6hdr);
break;
#endif
case htons(ETH_P_IP):
nhlen = sizeof(struct iphdr);
break;
}
/* For ETH_P_IPV6/ETH_P_IP we make sure to pull
* a base network header in skb->head.
*/
if (!pskb_may_pull(skb, maclen + nhlen))
return false;
skb_set_network_header(skb, maclen);
return true;
}
static inline int ip_encap_hlen(struct ip_tunnel_encap *e) static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
{ {
const struct ip_tunnel_encap_ops *ops; const struct ip_tunnel_encap_ops *ops;

View File

@@ -22,8 +22,8 @@
#define RUSAGE_THREAD 1 /* only the calling thread */ #define RUSAGE_THREAD 1 /* only the calling thread */
struct rusage { struct rusage {
struct __kernel_old_timeval ru_utime; /* user time used */ struct timeval ru_utime; /* user time used */
struct __kernel_old_timeval ru_stime; /* system time used */ struct timeval ru_stime; /* system time used */
__kernel_long_t ru_maxrss; /* maximum resident set size */ __kernel_long_t ru_maxrss; /* maximum resident set size */
__kernel_long_t ru_ixrss; /* integral shared memory size */ __kernel_long_t ru_ixrss; /* integral shared memory size */
__kernel_long_t ru_idrss; /* integral unshared data size */ __kernel_long_t ru_idrss; /* integral unshared data size */

View File

@@ -1565,10 +1565,17 @@ static int check_kprobe_address_safe(struct kprobe *p,
jump_label_lock(); jump_label_lock();
preempt_disable(); preempt_disable();
/* Ensure it is not in reserved area nor out of text */ /* Ensure the address is in a text area, and find a module if exists. */
if (!(core_kernel_text((unsigned long) p->addr) || *probed_mod = NULL;
is_module_text_address((unsigned long) p->addr)) || if (!core_kernel_text((unsigned long) p->addr)) {
in_gate_area_no_mm((unsigned long) p->addr) || *probed_mod = __module_text_address((unsigned long) p->addr);
if (!(*probed_mod)) {
ret = -EINVAL;
goto out;
}
}
/* Ensure it is not in reserved area. */
if (in_gate_area_no_mm((unsigned long) p->addr) ||
within_kprobe_blacklist((unsigned long) p->addr) || within_kprobe_blacklist((unsigned long) p->addr) ||
jump_label_text_reserved(p->addr, p->addr) || jump_label_text_reserved(p->addr, p->addr) ||
find_bug((unsigned long)p->addr)) { find_bug((unsigned long)p->addr)) {
@@ -1576,8 +1583,7 @@ static int check_kprobe_address_safe(struct kprobe *p,
goto out; goto out;
} }
/* Check if are we probing a module */ /* Get module refcount and reject __init functions for loaded modules. */
*probed_mod = __module_text_address((unsigned long) p->addr);
if (*probed_mod) { if (*probed_mod) {
/* /*
* We must hold a refcount of the probed module while updating * We must hold a refcount of the probed module while updating

View File

@@ -1803,8 +1803,8 @@ void getrusage(struct task_struct *p, int who, struct rusage *r)
out_children: out_children:
r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */ r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
r->ru_utime = ns_to_kernel_old_timeval(utime); r->ru_utime = ns_to_timeval(utime);
r->ru_stime = ns_to_kernel_old_timeval(stime); r->ru_stime = ns_to_timeval(stime);
} }
SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru) SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)

View File

@@ -399,7 +399,8 @@ void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
"perf buffer not large enough")) "perf buffer not large enough, wanted %d, have %d",
size, PERF_MAX_TRACE_SIZE))
return NULL; return NULL;
*rctxp = rctx = perf_swevent_get_recursion_context(); *rctxp = rctx = perf_swevent_get_recursion_context();

View File

@@ -1309,6 +1309,7 @@ static int trace_format_open(struct inode *inode, struct file *file)
return 0; return 0;
} }
#ifdef CONFIG_PERF_EVENTS
static ssize_t static ssize_t
event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
{ {
@@ -1323,6 +1324,7 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
} }
#endif
static ssize_t static ssize_t
event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
@@ -1727,10 +1729,12 @@ static const struct file_operations ftrace_event_format_fops = {
.release = seq_release, .release = seq_release,
}; };
#ifdef CONFIG_PERF_EVENTS
static const struct file_operations ftrace_event_id_fops = { static const struct file_operations ftrace_event_id_fops = {
.read = event_id_read, .read = event_id_read,
.llseek = default_llseek, .llseek = default_llseek,
}; };
#endif
static const struct file_operations ftrace_event_filter_fops = { static const struct file_operations ftrace_event_filter_fops = {
.open = tracing_open_generic, .open = tracing_open_generic,

View File

@@ -280,8 +280,6 @@ struct hist_trigger_data {
struct action_data *actions[HIST_ACTIONS_MAX]; struct action_data *actions[HIST_ACTIONS_MAX];
unsigned int n_actions; unsigned int n_actions;
struct hist_field *synth_var_refs[SYNTH_FIELDS_MAX];
unsigned int n_synth_var_refs;
struct field_var *field_vars[SYNTH_FIELDS_MAX]; struct field_var *field_vars[SYNTH_FIELDS_MAX];
unsigned int n_field_vars; unsigned int n_field_vars;
unsigned int n_field_var_str; unsigned int n_field_var_str;
@@ -1291,49 +1289,13 @@ check_field_for_var_ref(struct hist_field *hist_field,
struct hist_trigger_data *var_data, struct hist_trigger_data *var_data,
unsigned int var_idx) unsigned int var_idx)
{ {
struct hist_field *found = NULL; WARN_ON(!(hist_field && hist_field->flags & HIST_FIELD_FL_VAR_REF));
if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR_REF) { if (hist_field && hist_field->var.idx == var_idx &&
if (hist_field->var.idx == var_idx && hist_field->var.hist_data == var_data)
hist_field->var.hist_data == var_data) { return hist_field;
found = hist_field;
}
}
return found; return NULL;
}
static struct hist_field *
check_field_for_var_refs(struct hist_trigger_data *hist_data,
struct hist_field *hist_field,
struct hist_trigger_data *var_data,
unsigned int var_idx,
unsigned int level)
{
struct hist_field *found = NULL;
unsigned int i;
if (level > 3)
return found;
if (!hist_field)
return found;
found = check_field_for_var_ref(hist_field, var_data, var_idx);
if (found)
return found;
for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) {
struct hist_field *operand;
operand = hist_field->operands[i];
found = check_field_for_var_refs(hist_data, operand, var_data,
var_idx, level + 1);
if (found)
return found;
}
return found;
} }
/** /**
@@ -1352,26 +1314,16 @@ static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data,
struct hist_trigger_data *var_data, struct hist_trigger_data *var_data,
unsigned int var_idx) unsigned int var_idx)
{ {
struct hist_field *hist_field, *found = NULL; struct hist_field *hist_field;
unsigned int i; unsigned int i;
for_each_hist_field(i, hist_data) { for (i = 0; i < hist_data->n_var_refs; i++) {
hist_field = hist_data->fields[i]; hist_field = hist_data->var_refs[i];
found = check_field_for_var_refs(hist_data, hist_field, if (check_field_for_var_ref(hist_field, var_data, var_idx))
var_data, var_idx, 0); return hist_field;
if (found)
return found;
} }
for (i = 0; i < hist_data->n_synth_var_refs; i++) { return NULL;
hist_field = hist_data->synth_var_refs[i];
found = check_field_for_var_refs(hist_data, hist_field,
var_data, var_idx, 0);
if (found)
return found;
}
return found;
} }
/** /**
@@ -3708,20 +3660,6 @@ static void save_field_var(struct hist_trigger_data *hist_data,
} }
static void destroy_synth_var_refs(struct hist_trigger_data *hist_data)
{
unsigned int i;
for (i = 0; i < hist_data->n_synth_var_refs; i++)
destroy_hist_field(hist_data->synth_var_refs[i], 0);
}
static void save_synth_var_ref(struct hist_trigger_data *hist_data,
struct hist_field *var_ref)
{
hist_data->synth_var_refs[hist_data->n_synth_var_refs++] = var_ref;
}
static int check_synth_field(struct synth_event *event, static int check_synth_field(struct synth_event *event,
struct hist_field *hist_field, struct hist_field *hist_field,
unsigned int field_pos) unsigned int field_pos)
@@ -3884,7 +3822,6 @@ static int onmatch_create(struct hist_trigger_data *hist_data,
goto err; goto err;
} }
save_synth_var_ref(hist_data, var_ref);
field_pos++; field_pos++;
kfree(p); kfree(p);
continue; continue;
@@ -4631,7 +4568,6 @@ static void destroy_hist_data(struct hist_trigger_data *hist_data)
destroy_actions(hist_data); destroy_actions(hist_data);
destroy_field_vars(hist_data); destroy_field_vars(hist_data);
destroy_field_var_hists(hist_data); destroy_field_var_hists(hist_data);
destroy_synth_var_refs(hist_data);
kfree(hist_data); kfree(hist_data);
} }

View File

@@ -1133,10 +1133,8 @@ register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
struct event_trigger_data *data, struct event_trigger_data *data,
struct trace_event_file *file) struct trace_event_file *file)
{ {
int ret = tracing_alloc_snapshot_instance(file->tr); if (tracing_alloc_snapshot_instance(file->tr) != 0)
return 0;
if (ret < 0)
return ret;
return register_trigger(glob, ops, data, file); return register_trigger(glob, ops, data, file);
} }

View File

@@ -256,10 +256,10 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
/* /*
* Zero out zone modifiers, as we don't have specific zone * Zero out zone modifiers, as we don't have specific zone
* requirements. Keep the flags related to allocation in atomic * requirements. Keep the flags related to allocation in atomic
* contexts and I/O. * contexts, I/O, nolockdep.
*/ */
alloc_flags &= ~GFP_ZONEMASK; alloc_flags &= ~GFP_ZONEMASK;
alloc_flags &= (GFP_ATOMIC | GFP_KERNEL); alloc_flags &= (GFP_ATOMIC | GFP_KERNEL | __GFP_NOLOCKDEP);
alloc_flags |= __GFP_NOWARN; alloc_flags |= __GFP_NOWARN;
page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER); page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
if (page) if (page)

View File

@@ -4198,7 +4198,7 @@ void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface)
spin_lock_bh(&bat_priv->tt.commit_lock); spin_lock_bh(&bat_priv->tt.commit_lock);
while (true) { while (timeout) {
table_size = batadv_tt_local_table_transmit_size(bat_priv); table_size = batadv_tt_local_table_transmit_size(bat_priv);
if (packet_size_max >= table_size) if (packet_size_max >= table_size)
break; break;

View File

@@ -107,8 +107,10 @@ static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
if (hdev->req_status == HCI_REQ_PEND) { if (hdev->req_status == HCI_REQ_PEND) {
hdev->req_result = result; hdev->req_result = result;
hdev->req_status = HCI_REQ_DONE; hdev->req_status = HCI_REQ_DONE;
if (skb) if (skb) {
kfree_skb(hdev->req_skb);
hdev->req_skb = skb_get(skb); hdev->req_skb = skb_get(skb);
}
wake_up_interruptible(&hdev->req_wait_q); wake_up_interruptible(&hdev->req_wait_q);
} }
} }

View File

@@ -405,7 +405,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
struct l2cap_chan *chan = l2cap_pi(sk)->chan; struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct l2cap_options opts; struct l2cap_options opts;
struct l2cap_conninfo cinfo; struct l2cap_conninfo cinfo;
int len, err = 0; int err = 0;
size_t len;
u32 opt; u32 opt;
BT_DBG("sk %p", sk); BT_DBG("sk %p", sk);
@@ -436,7 +437,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
opts.max_tx = chan->max_tx; opts.max_tx = chan->max_tx;
opts.txwin_size = chan->tx_win; opts.txwin_size = chan->tx_win;
len = min_t(unsigned int, len, sizeof(opts)); len = min(len, sizeof(opts));
if (copy_to_user(optval, (char *) &opts, len)) if (copy_to_user(optval, (char *) &opts, len))
err = -EFAULT; err = -EFAULT;
@@ -486,7 +487,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
cinfo.hci_handle = chan->conn->hcon->handle; cinfo.hci_handle = chan->conn->hcon->handle;
memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3); memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo)); len = min(len, sizeof(cinfo));
if (copy_to_user(optval, (char *) &cinfo, len)) if (copy_to_user(optval, (char *) &cinfo, len))
err = -EFAULT; err = -EFAULT;

View File

@@ -880,7 +880,8 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
struct sco_options opts; struct sco_options opts;
struct sco_conninfo cinfo; struct sco_conninfo cinfo;
int len, err = 0; int err = 0;
size_t len;
BT_DBG("sk %p", sk); BT_DBG("sk %p", sk);
@@ -902,7 +903,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
BT_DBG("mtu %d", opts.mtu); BT_DBG("mtu %d", opts.mtu);
len = min_t(unsigned int, len, sizeof(opts)); len = min(len, sizeof(opts));
if (copy_to_user(optval, (char *)&opts, len)) if (copy_to_user(optval, (char *)&opts, len))
err = -EFAULT; err = -EFAULT;
@@ -920,7 +921,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle; cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3); memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo)); len = min(len, sizeof(cinfo));
if (copy_to_user(optval, (char *)&cinfo, len)) if (copy_to_user(optval, (char *)&cinfo, len))
err = -EFAULT; err = -EFAULT;

View File

@@ -253,12 +253,12 @@ void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
} }
EXPORT_SYMBOL_GPL(__inet_twsk_schedule); EXPORT_SYMBOL_GPL(__inet_twsk_schedule);
/* Remove all non full sockets (TIME_WAIT and NEW_SYN_RECV) for dead netns */
void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family) void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family)
{ {
struct inet_timewait_sock *tw;
struct sock *sk;
struct hlist_nulls_node *node; struct hlist_nulls_node *node;
unsigned int slot; unsigned int slot;
struct sock *sk;
for (slot = 0; slot <= hashinfo->ehash_mask; slot++) { for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
@@ -267,25 +267,35 @@ void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family)
rcu_read_lock(); rcu_read_lock();
restart: restart:
sk_nulls_for_each_rcu(sk, node, &head->chain) { sk_nulls_for_each_rcu(sk, node, &head->chain) {
if (sk->sk_state != TCP_TIME_WAIT) int state = inet_sk_state_load(sk);
continue;
tw = inet_twsk(sk); if ((1 << state) & ~(TCPF_TIME_WAIT |
if ((tw->tw_family != family) || TCPF_NEW_SYN_RECV))
refcount_read(&twsk_net(tw)->count))
continue; continue;
if (unlikely(!refcount_inc_not_zero(&tw->tw_refcnt))) if (sk->sk_family != family ||
refcount_read(&sock_net(sk)->count))
continue; continue;
if (unlikely((tw->tw_family != family) || if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
refcount_read(&twsk_net(tw)->count))) { continue;
inet_twsk_put(tw);
if (unlikely(sk->sk_family != family ||
refcount_read(&sock_net(sk)->count))) {
sock_gen_put(sk);
goto restart; goto restart;
} }
rcu_read_unlock(); rcu_read_unlock();
local_bh_disable(); local_bh_disable();
inet_twsk_deschedule_put(tw); if (state == TCP_TIME_WAIT) {
inet_twsk_deschedule_put(inet_twsk(sk));
} else {
struct request_sock *req = inet_reqsk(sk);
inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
req);
}
local_bh_enable(); local_bh_enable();
goto restart_rcu; goto restart_rcu;
} }

View File

@@ -937,13 +937,11 @@ void ip_rt_send_redirect(struct sk_buff *skb)
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw); icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
peer->rate_last = jiffies; peer->rate_last = jiffies;
++peer->n_redirects; ++peer->n_redirects;
#ifdef CONFIG_IP_ROUTE_VERBOSE if (IS_ENABLED(CONFIG_IP_ROUTE_VERBOSE) && log_martians &&
if (log_martians &&
peer->n_redirects == ip_rt_redirect_number) peer->n_redirects == ip_rt_redirect_number)
net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n", net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
&ip_hdr(skb)->saddr, inet_iif(skb), &ip_hdr(skb)->saddr, inet_iif(skb),
&ip_hdr(skb)->daddr, &gw); &ip_hdr(skb)->daddr, &gw);
#endif
} }
out_put_peer: out_put_peer:
inet_putpeer(peer); inet_putpeer(peer);

View File

@@ -1001,16 +1001,17 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (msg->msg_controllen) { if (msg->msg_controllen) {
err = udp_cmsg_send(sk, msg, &ipc.gso_size); err = udp_cmsg_send(sk, msg, &ipc.gso_size);
if (err > 0) if (err > 0) {
err = ip_cmsg_send(sk, msg, &ipc, err = ip_cmsg_send(sk, msg, &ipc,
sk->sk_family == AF_INET6); sk->sk_family == AF_INET6);
connected = 0;
}
if (unlikely(err < 0)) { if (unlikely(err < 0)) {
kfree(ipc.opt); kfree(ipc.opt);
return err; return err;
} }
if (ipc.opt) if (ipc.opt)
free = 1; free = 1;
connected = 0;
} }
if (!ipc.opt) { if (!ipc.opt) {
struct ip_options_rcu *inet_opt; struct ip_options_rcu *inet_opt;

View File

@@ -1967,9 +1967,10 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
if (ipv6_addr_equal(&ifp->addr, addr)) { if (ipv6_addr_equal(&ifp->addr, addr)) {
if (!dev || ifp->idev->dev == dev || if (!dev || ifp->idev->dev == dev ||
!(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) { !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
result = ifp; if (in6_ifa_hold_safe(ifp)) {
in6_ifa_hold(ifp); result = ifp;
break; break;
}
} }
} }
} }

View File

@@ -1226,7 +1226,10 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
struct nl_info *info, struct netlink_ext_ack *extack) struct nl_info *info, struct netlink_ext_ack *extack)
{ {
struct fib6_table *table = rt->fib6_table; struct fib6_table *table = rt->fib6_table;
struct fib6_node *fn, *pn = NULL; struct fib6_node *fn;
#ifdef CONFIG_IPV6_SUBTREES
struct fib6_node *pn = NULL;
#endif
int err = -ENOMEM; int err = -ENOMEM;
int allow_create = 1; int allow_create = 1;
int replace_required = 0; int replace_required = 0;
@@ -1251,9 +1254,9 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
goto out; goto out;
} }
#ifdef CONFIG_IPV6_SUBTREES
pn = fn; pn = fn;
#ifdef CONFIG_IPV6_SUBTREES
if (rt->fib6_src.plen) { if (rt->fib6_src.plen) {
struct fib6_node *sn; struct fib6_node *sn;

View File

@@ -1324,9 +1324,11 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
ipc6.opt = opt; ipc6.opt = opt;
err = udp_cmsg_send(sk, msg, &ipc6.gso_size); err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
if (err > 0) if (err > 0) {
err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6,
&ipc6); &ipc6);
connected = false;
}
if (err < 0) { if (err < 0) {
fl6_sock_release(flowlabel); fl6_sock_release(flowlabel);
return err; return err;
@@ -1338,7 +1340,6 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
} }
if (!(opt->opt_nflen|opt->opt_flen)) if (!(opt->opt_nflen|opt->opt_flen))
opt = NULL; opt = NULL;
connected = false;
} }
if (!opt) { if (!opt) {
opt = txopt_get(np); opt = txopt_get(np);

View File

@@ -123,7 +123,8 @@ sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
if (sctph->source != cp->vport || payload_csum || if (sctph->source != cp->vport || payload_csum ||
skb->ip_summed == CHECKSUM_PARTIAL) { skb->ip_summed == CHECKSUM_PARTIAL) {
sctph->source = cp->vport; sctph->source = cp->vport;
sctp_nat_csum(skb, sctph, sctphoff); if (!skb_is_gso(skb) || !skb_is_gso_sctp(skb))
sctp_nat_csum(skb, sctph, sctphoff);
} else { } else {
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
} }
@@ -171,7 +172,8 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
(skb->ip_summed == CHECKSUM_PARTIAL && (skb->ip_summed == CHECKSUM_PARTIAL &&
!(skb_dst(skb)->dev->features & NETIF_F_SCTP_CRC))) { !(skb_dst(skb)->dev->features & NETIF_F_SCTP_CRC))) {
sctph->dest = cp->dport; sctph->dest = cp->dport;
sctp_nat_csum(skb, sctph, sctphoff); if (!skb_is_gso(skb) || !skb_is_gso_sctp(skb))
sctp_nat_csum(skb, sctph, sctphoff);
} else if (skb->ip_summed != CHECKSUM_PARTIAL) { } else if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
} }

View File

@@ -2081,14 +2081,17 @@ EXPORT_SYMBOL_GPL(nft_unregister_expr);
static const struct nft_expr_type *__nft_expr_type_get(u8 family, static const struct nft_expr_type *__nft_expr_type_get(u8 family,
struct nlattr *nla) struct nlattr *nla)
{ {
const struct nft_expr_type *type; const struct nft_expr_type *type, *candidate = NULL;
list_for_each_entry(type, &nf_tables_expressions, list) { list_for_each_entry_rcu(type, &nf_tables_expressions, list) {
if (!nla_strcmp(nla, type->name) && if (!nla_strcmp(nla, type->name)) {
(!type->family || type->family == family)) if (!type->family && !candidate)
return type; candidate = type;
else if (type->family == family)
candidate = type;
}
} }
return NULL; return candidate;
} }
static const struct nft_expr_type *nft_expr_type_get(struct net *net, static const struct nft_expr_type *nft_expr_type_get(struct net *net,
@@ -2100,9 +2103,13 @@ static const struct nft_expr_type *nft_expr_type_get(struct net *net,
if (nla == NULL) if (nla == NULL)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
rcu_read_lock();
type = __nft_expr_type_get(family, nla); type = __nft_expr_type_get(family, nla);
if (type != NULL && try_module_get(type->owner)) if (type != NULL && try_module_get(type->owner)) {
rcu_read_unlock();
return type; return type;
}
rcu_read_unlock();
lockdep_nfnl_nft_mutex_not_held(); lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES #ifdef CONFIG_MODULES

View File

@@ -1836,8 +1836,9 @@ static void ovs_ct_limit_exit(struct net *net, struct ovs_net *ovs_net)
for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) { for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) {
struct hlist_head *head = &info->limits[i]; struct hlist_head *head = &info->limits[i];
struct ovs_ct_limit *ct_limit; struct ovs_ct_limit *ct_limit;
struct hlist_node *next;
hlist_for_each_entry_rcu(ct_limit, head, hlist_node) hlist_for_each_entry_safe(ct_limit, next, head, hlist_node)
kfree_rcu(ct_limit, rcu); kfree_rcu(ct_limit, rcu);
} }
kfree(ovs_net->ct_limit_info->limits); kfree(ovs_net->ct_limit_info->limits);

View File

@@ -2390,8 +2390,10 @@ static void __net_exit ovs_exit_net(struct net *dnet)
struct net *net; struct net *net;
LIST_HEAD(head); LIST_HEAD(head);
ovs_ct_exit(dnet);
ovs_lock(); ovs_lock();
ovs_ct_exit(dnet);
list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node) list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
__dp_destroy(dp); __dp_destroy(dp);

View File

@@ -30,7 +30,7 @@ echo 'sched:*' > set_event
yield yield
count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l` count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
if [ $count -lt 3 ]; then if [ $count -lt 3 ]; then
fail "at least fork, exec and exit events should be recorded" fail "at least fork, exec and exit events should be recorded"
fi fi
@@ -41,7 +41,7 @@ echo 1 > events/sched/enable
yield yield
count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l` count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
if [ $count -lt 3 ]; then if [ $count -lt 3 ]; then
fail "at least fork, exec and exit events should be recorded" fail "at least fork, exec and exit events should be recorded"
fi fi
@@ -52,7 +52,7 @@ echo 0 > events/sched/enable
yield yield
count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l` count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
if [ $count -ne 0 ]; then if [ $count -ne 0 ]; then
fail "any of scheduler events should not be recorded" fail "any of scheduler events should not be recorded"
fi fi

View File

@@ -67,7 +67,7 @@ static int check_diff(struct timeval start, struct timeval end)
diff = end.tv_usec - start.tv_usec; diff = end.tv_usec - start.tv_usec;
diff += (end.tv_sec - start.tv_sec) * USECS_PER_SEC; diff += (end.tv_sec - start.tv_sec) * USECS_PER_SEC;
if (abs(diff - DELAY * USECS_PER_SEC) > USECS_PER_SEC / 2) { if (llabs(diff - DELAY * USECS_PER_SEC) > USECS_PER_SEC / 2) {
printf("Diff too high: %lld..", diff); printf("Diff too high: %lld..", diff);
return -1; return -1;
} }