Merge 4.19.228 into android-4.19-stable

Changes in 4.19.228
	Bluetooth: refactor malicious adv data check
	s390/hypfs: include z/VM guests with access control group set
	scsi: zfcp: Fix failed recovery on gone remote port with non-NPIV FCP devices
	udf: Restore i_lenAlloc when inode expansion fails
	udf: Fix NULL ptr deref when converting from inline format
	PM: wakeup: simplify the output logic of pm_show_wakelocks()
	drm/etnaviv: relax submit size limits
	netfilter: nft_payload: do not update layer 4 checksum when mangling fragments
	serial: 8250: of: Fix mapped region size when using reg-offset property
	serial: stm32: fix software flow control transfer
	tty: n_gsm: fix SW flow control encoding/handling
	tty: Add support for Brainboxes UC cards.
	usb-storage: Add unusual-devs entry for VL817 USB-SATA bridge
	usb: common: ulpi: Fix crash in ulpi_match()
	usb: gadget: f_sourcesink: Fix isoc transfer for USB_SPEED_SUPER_PLUS
	USB: core: Fix hang in usb_kill_urb by adding memory barriers
	usb: typec: tcpm: Do not disconnect while receiving VBUS off
	net: sfp: ignore disabled SFP node
	powerpc/32: Fix boot failure with GCC latent entropy plugin
	i40e: Increase delay to 1 s after global EMP reset
	i40e: Fix issue when maximum queues is exceeded
	i40e: Fix queues reservation for XDP
	i40e: fix unsigned stat widths
	rpmsg: char: Fix race between the release of rpmsg_ctrldev and cdev
	rpmsg: char: Fix race between the release of rpmsg_eptdev and cdev
	scsi: bnx2fc: Flush destroy_work queue before calling bnx2fc_interface_put()
	ipv6_tunnel: Rate limit warning messages
	net: fix information leakage in /proc/net/ptype
	ping: fix the sk_bound_dev_if match in ping_lookup
	ipv4: avoid using shared IP generator for connected sockets
	hwmon: (lm90) Reduce maximum conversion rate for G781
	NFSv4: Handle case where the lookup of a directory fails
	NFSv4: nfs_atomic_open() can race when looking up a non-regular file
	net-procfs: show net devices bound packet types
	drm/msm: Fix wrong size calculation
	drm/msm/dsi: invalid parameter check in msm_dsi_phy_enable
	ipv6: annotate accesses to fn->fn_sernum
	NFS: Ensure the server has an up to date ctime before hardlinking
	NFS: Ensure the server has an up to date ctime before renaming
	phylib: fix potential use-after-free
	ibmvnic: init ->running_cap_crqs early
	ibmvnic: don't spin in tasklet
	yam: fix a memory leak in yam_siocdevprivate()
	ipv4: raw: lock the socket in raw_bind()
	ipv4: tcp: send zero IPID in SYNACK messages
	netfilter: nat: remove l4 protocol port rovers
	netfilter: nat: limit port clash resolution attempts
	tcp: fix possible socket leaks in internal pacing mode
	ipheth: fix EOVERFLOW in ipheth_rcvbulk_callback
	net: amd-xgbe: ensure to reset the tx_timer_active flag
	net: amd-xgbe: Fix skb data length underflow
	rtnetlink: make sure to refresh master_dev/m_ops in __rtnl_newlink()
	af_packet: fix data-race in packet_setsockopt / packet_setsockopt
	audit: improve audit queue handling when "audit=1" on cmdline
	ASoC: ops: Reject out of bounds values in snd_soc_put_volsw()
	ASoC: ops: Reject out of bounds values in snd_soc_put_volsw_sx()
	ASoC: ops: Reject out of bounds values in snd_soc_put_xr_sx()
	ALSA: hda/realtek: Add missing fixup-model entry for Gigabyte X570 ALC1220 quirks
	ALSA: hda/realtek: Fix silent output on Gigabyte X570S Aorus Master (newer chipset)
	ALSA: hda/realtek: Fix silent output on Gigabyte X570 Aorus Xtreme after reboot from Windows
	drm/nouveau: fix off by one in BIOS boundary checking
	block: bio-integrity: Advance seed correctly for larger interval sizes
	Revert "ASoC: mediatek: Check for error clk pointer"
	RDMA/mlx4: Don't continue event handler after memory allocation failure
	iommu/vt-d: Fix potential memory leak in intel_setup_irq_remapping()
	iommu/amd: Fix loop timeout issue in iommu_ga_log_enable()
	spi: bcm-qspi: check for valid cs before applying chip select
	spi: mediatek: Avoid NULL pointer crash in interrupt
	spi: meson-spicc: add IRQ check in meson_spicc_probe
	net: ieee802154: hwsim: Ensure proper channel selection at probe time
	net: ieee802154: mcr20a: Fix lifs/sifs periods
	net: ieee802154: ca8210: Stop leaking skb's
	net: ieee802154: Return meaningful error codes from the netlink helpers
	net: macsec: Verify that send_sci is on when setting Tx sci explicitly
	net: stmmac: ensure PTP time register reads are consistent
	drm/i915/overlay: Prevent divide by zero bugs in scaling
	ASoC: fsl: Add missing error handling in pcm030_fabric_probe
	ASoC: cpcap: Check for NULL pointer after calling of_get_child_by_name
	ASoC: max9759: fix underflow in speaker_gain_control_put()
	scsi: bnx2fc: Make bnx2fc_recv_frame() mp safe
	nfsd: nfsd4_setclientid_confirm mistakenly expires confirmed client.
	selftests: futex: Use variable MAKE instead of make
	rtc: cmos: Evaluate century appropriate
	EDAC/altera: Fix deferred probing
	EDAC/xgene: Fix deferred probing
	ext4: fix error handling in ext4_restore_inline_data()
	Linux 4.19.228

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I519bfcc5c5e4bba354c76e47dad34fba237809c0
This commit is contained in:
Greg Kroah-Hartman
2022-02-09 08:26:20 +01:00
83 changed files with 729 additions and 301 deletions

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 4 VERSION = 4
PATCHLEVEL = 19 PATCHLEVEL = 19
SUBLEVEL = 227 SUBLEVEL = 228
EXTRAVERSION = EXTRAVERSION =
NAME = "People's Front" NAME = "People's Front"

View File

@@ -15,6 +15,7 @@ CFLAGS_prom_init.o += -fPIC
CFLAGS_btext.o += -fPIC CFLAGS_btext.o += -fPIC
endif endif
CFLAGS_setup_32.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)

View File

@@ -10,6 +10,9 @@ ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
CFLAGS_code-patching.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_feature-fixups.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
obj-y += string.o alloc.o code-patching.o feature-fixups.o obj-y += string.o alloc.o code-patching.o feature-fixups.o
obj-$(CONFIG_PPC32) += div64.o copy_32.o crtsavres.o strlen_32.o obj-$(CONFIG_PPC32) += div64.o copy_32.o crtsavres.o strlen_32.o

View File

@@ -20,6 +20,7 @@
static char local_guest[] = " "; static char local_guest[] = " ";
static char all_guests[] = "* "; static char all_guests[] = "* ";
static char *all_groups = all_guests;
static char *guest_query; static char *guest_query;
struct diag2fc_data { struct diag2fc_data {
@@ -62,10 +63,11 @@ static int diag2fc(int size, char* query, void *addr)
memcpy(parm_list.userid, query, NAME_LEN); memcpy(parm_list.userid, query, NAME_LEN);
ASCEBC(parm_list.userid, NAME_LEN); ASCEBC(parm_list.userid, NAME_LEN);
parm_list.addr = (unsigned long) addr ; memcpy(parm_list.aci_grp, all_groups, NAME_LEN);
ASCEBC(parm_list.aci_grp, NAME_LEN);
parm_list.addr = (unsigned long)addr;
parm_list.size = size; parm_list.size = size;
parm_list.fmt = 0x02; parm_list.fmt = 0x02;
memset(parm_list.aci_grp, 0x40, NAME_LEN);
rc = -1; rc = -1;
diag_stat_inc(DIAG_STAT_X2FC); diag_stat_inc(DIAG_STAT_X2FC);

View File

@@ -399,7 +399,7 @@ void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9); unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
bip->bip_iter.bi_sector += bytes_done >> 9; bip->bip_iter.bi_sector += bio_integrity_intervals(bi, bytes_done >> 9);
bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes); bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
} }
EXPORT_SYMBOL(bio_integrity_advance); EXPORT_SYMBOL(bio_integrity_advance);

View File

@@ -366,7 +366,7 @@ static int altr_sdram_probe(struct platform_device *pdev)
if (irq < 0) { if (irq < 0) {
edac_printk(KERN_ERR, EDAC_MC, edac_printk(KERN_ERR, EDAC_MC,
"No irq %d in DT\n", irq); "No irq %d in DT\n", irq);
return -ENODEV; return irq;
} }
/* Arria10 has a 2nd IRQ */ /* Arria10 has a 2nd IRQ */

View File

@@ -1934,7 +1934,7 @@ static int xgene_edac_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, i); irq = platform_get_irq(pdev, i);
if (irq < 0) { if (irq < 0) {
dev_err(&pdev->dev, "No IRQ resource\n"); dev_err(&pdev->dev, "No IRQ resource\n");
rc = -EINVAL; rc = irq;
goto out_err; goto out_err;
} }
rc = devm_request_irq(&pdev->dev, irq, rc = devm_request_irq(&pdev->dev, irq,

View File

@@ -444,8 +444,8 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
if (args->stream_size > SZ_64K || args->nr_relocs > SZ_64K || if (args->stream_size > SZ_128K || args->nr_relocs > SZ_128K ||
args->nr_bos > SZ_64K || args->nr_pmrs > 128) { args->nr_bos > SZ_128K || args->nr_pmrs > 128) {
DRM_ERROR("submit arguments out of size limits\n"); DRM_ERROR("submit arguments out of size limits\n");
return -EINVAL; return -EINVAL;
} }

View File

@@ -929,6 +929,9 @@ static int check_overlay_dst(struct intel_overlay *overlay,
const struct intel_crtc_state *pipe_config = const struct intel_crtc_state *pipe_config =
overlay->crtc->config; overlay->crtc->config;
if (rec->dst_height == 0 || rec->dst_width == 0)
return -EINVAL;
if (rec->dst_x < pipe_config->pipe_src_w && if (rec->dst_x < pipe_config->pipe_src_w &&
rec->dst_x + rec->dst_width <= pipe_config->pipe_src_w && rec->dst_x + rec->dst_width <= pipe_config->pipe_src_w &&
rec->dst_y < pipe_config->pipe_src_h && rec->dst_y < pipe_config->pipe_src_h &&

View File

@@ -667,12 +667,14 @@ void __exit msm_dsi_phy_driver_unregister(void)
int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
struct msm_dsi_phy_clk_request *clk_req) struct msm_dsi_phy_clk_request *clk_req)
{ {
struct device *dev = &phy->pdev->dev; struct device *dev;
int ret; int ret;
if (!phy || !phy->cfg->ops.enable) if (!phy || !phy->cfg->ops.enable)
return -EINVAL; return -EINVAL;
dev = &phy->pdev->dev;
ret = dsi_phy_enable_resource(phy); ret = dsi_phy_enable_resource(phy);
if (ret) { if (ret) {
dev_err(dev, "%s: resource enable failed, %d\n", dev_err(dev, "%s: resource enable failed, %d\n",

View File

@@ -388,7 +388,7 @@ static int msm_init_vram(struct drm_device *dev)
of_node_put(node); of_node_put(node);
if (ret) if (ret)
return ret; return ret;
size = r.end - r.start; size = r.end - r.start + 1;
DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start); DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
/* if we have no IOMMU, then we need to use carveout allocator. /* if we have no IOMMU, then we need to use carveout allocator.

View File

@@ -38,7 +38,7 @@ nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size)
*addr += bios->imaged_addr; *addr += bios->imaged_addr;
} }
if (unlikely(*addr + size >= bios->size)) { if (unlikely(*addr + size > bios->size)) {
nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr); nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr);
return false; return false;
} }

View File

@@ -359,7 +359,7 @@ static const struct lm90_params lm90_params[] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
| LM90_HAVE_BROKEN_ALERT, | LM90_HAVE_BROKEN_ALERT,
.alert_alarms = 0x7c, .alert_alarms = 0x7c,
.max_convrate = 8, .max_convrate = 7,
}, },
[lm86] = { [lm86] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT, .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,

View File

@@ -3351,7 +3351,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
ew = kmalloc(sizeof *ew, GFP_ATOMIC); ew = kmalloc(sizeof *ew, GFP_ATOMIC);
if (!ew) if (!ew)
break; return;
INIT_WORK(&ew->work, handle_port_mgmt_change_event); INIT_WORK(&ew->work, handle_port_mgmt_change_event);
memcpy(&ew->ib_eqe, eqe, sizeof *eqe); memcpy(&ew->ib_eqe, eqe, sizeof *eqe);

View File

@@ -30,6 +30,7 @@
#include <linux/iommu.h> #include <linux/iommu.h>
#include <linux/kmemleak.h> #include <linux/kmemleak.h>
#include <linux/mem_encrypt.h> #include <linux/mem_encrypt.h>
#include <linux/iopoll.h>
#include <asm/pci-direct.h> #include <asm/pci-direct.h>
#include <asm/iommu.h> #include <asm/iommu.h>
#include <asm/gart.h> #include <asm/gart.h>
@@ -772,6 +773,7 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
if (status & (MMIO_STATUS_GALOG_RUN_MASK)) if (status & (MMIO_STATUS_GALOG_RUN_MASK))
break; break;
udelay(10);
} }
if (i >= LOOP_TIMEOUT) if (i >= LOOP_TIMEOUT)

View File

@@ -543,9 +543,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
fn, &intel_ir_domain_ops, fn, &intel_ir_domain_ops,
iommu); iommu);
if (!iommu->ir_domain) { if (!iommu->ir_domain) {
irq_domain_free_fwnode(fn);
pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id); pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
goto out_free_bitmap; goto out_free_fwnode;
} }
iommu->ir_msi_domain = iommu->ir_msi_domain =
arch_create_remap_msi_irq_domain(iommu->ir_domain, arch_create_remap_msi_irq_domain(iommu->ir_domain,
@@ -569,7 +568,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
if (dmar_enable_qi(iommu)) { if (dmar_enable_qi(iommu)) {
pr_err("Failed to enable queued invalidation\n"); pr_err("Failed to enable queued invalidation\n");
goto out_free_bitmap; goto out_free_ir_domain;
} }
} }
@@ -593,6 +592,14 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
return 0; return 0;
out_free_ir_domain:
if (iommu->ir_msi_domain)
irq_domain_remove(iommu->ir_msi_domain);
iommu->ir_msi_domain = NULL;
irq_domain_remove(iommu->ir_domain);
iommu->ir_domain = NULL;
out_free_fwnode:
irq_domain_free_fwnode(fn);
out_free_bitmap: out_free_bitmap:
kfree(bitmap); kfree(bitmap);
out_free_pages: out_free_pages:

View File

@@ -722,7 +722,9 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
if (!channel->tx_ring) if (!channel->tx_ring)
break; break;
/* Deactivate the Tx timer */
del_timer_sync(&channel->tx_timer); del_timer_sync(&channel->tx_timer);
channel->tx_timer_active = 0;
} }
} }
@@ -2766,6 +2768,14 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
buf2_len = xgbe_rx_buf2_len(rdata, packet, len); buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
len += buf2_len; len += buf2_len;
if (buf2_len > rdata->rx.buf.dma_len) {
/* Hardware inconsistency within the descriptors
* that has resulted in a length underflow.
*/
error = 1;
goto skip_data;
}
if (!skb) { if (!skb) {
skb = xgbe_create_skb(pdata, napi, rdata, skb = xgbe_create_skb(pdata, napi, rdata,
buf1_len); buf1_len);
@@ -2795,8 +2805,10 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
if (!last || context_next) if (!last || context_next)
goto read_again; goto read_again;
if (!skb) if (!skb || error) {
dev_kfree_skb(skb);
goto next_packet; goto next_packet;
}
/* Be sure we don't exceed the configured MTU */ /* Be sure we don't exceed the configured MTU */
max_len = netdev->mtu + ETH_HLEN; max_len = netdev->mtu + ETH_HLEN;

View File

@@ -3044,11 +3044,25 @@ static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
struct device *dev = &adapter->vdev->dev; struct device *dev = &adapter->vdev->dev;
union ibmvnic_crq crq; union ibmvnic_crq crq;
int max_entries; int max_entries;
int cap_reqs;
/* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on
* the PROMISC flag). Initialize this count upfront. When the tasklet
* receives a response to all of these, it will send the next protocol
* message (QUERY_IP_OFFLOAD).
*/
if (!(adapter->netdev->flags & IFF_PROMISC) ||
adapter->promisc_supported)
cap_reqs = 7;
else
cap_reqs = 6;
if (!retry) { if (!retry) {
/* Sub-CRQ entries are 32 byte long */ /* Sub-CRQ entries are 32 byte long */
int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4); int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
atomic_set(&adapter->running_cap_crqs, cap_reqs);
if (adapter->min_tx_entries_per_subcrq > entries_page || if (adapter->min_tx_entries_per_subcrq > entries_page ||
adapter->min_rx_add_entries_per_subcrq > entries_page) { adapter->min_rx_add_entries_per_subcrq > entries_page) {
dev_err(dev, "Fatal, invalid entries per sub-crq\n"); dev_err(dev, "Fatal, invalid entries per sub-crq\n");
@@ -3109,44 +3123,45 @@ static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
adapter->opt_rx_comp_queues; adapter->opt_rx_comp_queues;
adapter->req_rx_add_queues = adapter->max_rx_add_queues; adapter->req_rx_add_queues = adapter->max_rx_add_queues;
} else {
atomic_add(cap_reqs, &adapter->running_cap_crqs);
} }
memset(&crq, 0, sizeof(crq)); memset(&crq, 0, sizeof(crq));
crq.request_capability.first = IBMVNIC_CRQ_CMD; crq.request_capability.first = IBMVNIC_CRQ_CMD;
crq.request_capability.cmd = REQUEST_CAPABILITY; crq.request_capability.cmd = REQUEST_CAPABILITY;
crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
atomic_inc(&adapter->running_cap_crqs); cap_reqs--;
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
atomic_inc(&adapter->running_cap_crqs); cap_reqs--;
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
atomic_inc(&adapter->running_cap_crqs); cap_reqs--;
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = crq.request_capability.capability =
cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
crq.request_capability.number = crq.request_capability.number =
cpu_to_be64(adapter->req_tx_entries_per_subcrq); cpu_to_be64(adapter->req_tx_entries_per_subcrq);
atomic_inc(&adapter->running_cap_crqs); cap_reqs--;
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = crq.request_capability.capability =
cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
crq.request_capability.number = crq.request_capability.number =
cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
atomic_inc(&adapter->running_cap_crqs); cap_reqs--;
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_MTU); crq.request_capability.capability = cpu_to_be16(REQ_MTU);
crq.request_capability.number = cpu_to_be64(adapter->req_mtu); crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
atomic_inc(&adapter->running_cap_crqs); cap_reqs--;
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
if (adapter->netdev->flags & IFF_PROMISC) { if (adapter->netdev->flags & IFF_PROMISC) {
@@ -3154,16 +3169,21 @@ static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
crq.request_capability.capability = crq.request_capability.capability =
cpu_to_be16(PROMISC_REQUESTED); cpu_to_be16(PROMISC_REQUESTED);
crq.request_capability.number = cpu_to_be64(1); crq.request_capability.number = cpu_to_be64(1);
atomic_inc(&adapter->running_cap_crqs); cap_reqs--;
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
} }
} else { } else {
crq.request_capability.capability = crq.request_capability.capability =
cpu_to_be16(PROMISC_REQUESTED); cpu_to_be16(PROMISC_REQUESTED);
crq.request_capability.number = cpu_to_be64(0); crq.request_capability.number = cpu_to_be64(0);
atomic_inc(&adapter->running_cap_crqs); cap_reqs--;
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
} }
/* Keep at end to catch any discrepancy between expected and actual
* CRQs sent.
*/
WARN_ON(cap_reqs != 0);
} }
static int pending_scrq(struct ibmvnic_adapter *adapter, static int pending_scrq(struct ibmvnic_adapter *adapter,
@@ -3568,118 +3588,132 @@ static void send_map_query(struct ibmvnic_adapter *adapter)
static void send_cap_queries(struct ibmvnic_adapter *adapter) static void send_cap_queries(struct ibmvnic_adapter *adapter)
{ {
union ibmvnic_crq crq; union ibmvnic_crq crq;
int cap_reqs;
/* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count
* upfront. When the tasklet receives a response to all of these, it
* can send out the next protocol messaage (REQUEST_CAPABILITY).
*/
cap_reqs = 25;
atomic_set(&adapter->running_cap_crqs, cap_reqs);
atomic_set(&adapter->running_cap_crqs, 0);
memset(&crq, 0, sizeof(crq)); memset(&crq, 0, sizeof(crq));
crq.query_capability.first = IBMVNIC_CRQ_CMD; crq.query_capability.first = IBMVNIC_CRQ_CMD;
crq.query_capability.cmd = QUERY_CAPABILITY; crq.query_capability.cmd = QUERY_CAPABILITY;
crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES); crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES); crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES); crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES); crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES); crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES); crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = crq.query_capability.capability =
cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ); cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = crq.query_capability.capability =
cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ); cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = crq.query_capability.capability =
cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ); cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = crq.query_capability.capability =
cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ); cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD); crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED); crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MIN_MTU); crq.query_capability.capability = cpu_to_be16(MIN_MTU);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_MTU); crq.query_capability.capability = cpu_to_be16(MAX_MTU);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS); crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION); crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION); crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES); crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED); crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES); crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES); crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = crq.query_capability.capability =
cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q); cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = crq.query_capability.capability =
cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ); cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = crq.query_capability.capability =
cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ); cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ); crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq); ibmvnic_send_crq(adapter, &crq);
cap_reqs--;
/* Keep at end to catch any discrepancy between expected and actual
* CRQs sent.
*/
WARN_ON(cap_reqs != 0);
} }
static void handle_vpd_size_rsp(union ibmvnic_crq *crq, static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
@@ -3923,6 +3957,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
char *name; char *name;
atomic_dec(&adapter->running_cap_crqs); atomic_dec(&adapter->running_cap_crqs);
netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
atomic_read(&adapter->running_cap_crqs));
switch (be16_to_cpu(crq->request_capability_rsp.capability)) { switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
case REQ_TX_QUEUES: case REQ_TX_QUEUES:
req_value = &adapter->req_tx_queues; req_value = &adapter->req_tx_queues;
@@ -4457,12 +4493,6 @@ static void ibmvnic_tasklet(void *data)
ibmvnic_handle_crq(crq, adapter); ibmvnic_handle_crq(crq, adapter);
crq->generic.first = 0; crq->generic.first = 0;
} }
/* remain in tasklet until all
* capabilities responses are received
*/
if (!adapter->wait_capability)
done = true;
} }
/* if capabilities CRQ's were sent in this tasklet, the following /* if capabilities CRQ's were sent in this tasklet, the following
* tasklet must wait until all responses are received * tasklet must wait until all responses are received

View File

@@ -179,7 +179,6 @@ enum i40e_interrupt_policy {
struct i40e_lump_tracking { struct i40e_lump_tracking {
u16 num_entries; u16 num_entries;
u16 search_hint;
u16 list[0]; u16 list[0];
#define I40E_PILE_VALID_BIT 0x8000 #define I40E_PILE_VALID_BIT 0x8000
#define I40E_IWARP_IRQ_PILE_ID (I40E_PILE_VALID_BIT - 2) #define I40E_IWARP_IRQ_PILE_ID (I40E_PILE_VALID_BIT - 2)
@@ -709,12 +708,12 @@ struct i40e_vsi {
struct rtnl_link_stats64 net_stats_offsets; struct rtnl_link_stats64 net_stats_offsets;
struct i40e_eth_stats eth_stats; struct i40e_eth_stats eth_stats;
struct i40e_eth_stats eth_stats_offsets; struct i40e_eth_stats eth_stats_offsets;
u32 tx_restart; u64 tx_restart;
u32 tx_busy; u64 tx_busy;
u64 tx_linearize; u64 tx_linearize;
u64 tx_force_wb; u64 tx_force_wb;
u32 rx_buf_failed; u64 rx_buf_failed;
u32 rx_page_failed; u64 rx_page_failed;
/* These are containers of ring pointers, allocated at run-time */ /* These are containers of ring pointers, allocated at run-time */
struct i40e_ring **rx_rings; struct i40e_ring **rx_rings;

View File

@@ -236,7 +236,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
(unsigned long int)vsi->net_stats_offsets.rx_compressed, (unsigned long int)vsi->net_stats_offsets.rx_compressed,
(unsigned long int)vsi->net_stats_offsets.tx_compressed); (unsigned long int)vsi->net_stats_offsets.tx_compressed);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n", " tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, rx_page_failed = %llu\n",
vsi->tx_restart, vsi->tx_busy, vsi->tx_restart, vsi->tx_busy,
vsi->rx_buf_failed, vsi->rx_page_failed); vsi->rx_buf_failed, vsi->rx_page_failed);
rcu_read_lock(); rcu_read_lock();

View File

@@ -193,10 +193,6 @@ int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
* @id: an owner id to stick on the items assigned * @id: an owner id to stick on the items assigned
* *
* Returns the base item index of the lump, or negative for error * Returns the base item index of the lump, or negative for error
*
* The search_hint trick and lack of advanced fit-finding only work
* because we're highly likely to have all the same size lump requests.
* Linear search time and any fragmentation should be minimal.
**/ **/
static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
u16 needed, u16 id) u16 needed, u16 id)
@@ -211,8 +207,21 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
return -EINVAL; return -EINVAL;
} }
/* start the linear search with an imperfect hint */ /* Allocate last queue in the pile for FDIR VSI queue
i = pile->search_hint; * so it doesn't fragment the qp_pile
*/
if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) {
if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) {
dev_err(&pf->pdev->dev,
"Cannot allocate queue %d for I40E_VSI_FDIR\n",
pile->num_entries - 1);
return -ENOMEM;
}
pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT;
return pile->num_entries - 1;
}
i = 0;
while (i < pile->num_entries) { while (i < pile->num_entries) {
/* skip already allocated entries */ /* skip already allocated entries */
if (pile->list[i] & I40E_PILE_VALID_BIT) { if (pile->list[i] & I40E_PILE_VALID_BIT) {
@@ -231,7 +240,6 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
for (j = 0; j < needed; j++) for (j = 0; j < needed; j++)
pile->list[i+j] = id | I40E_PILE_VALID_BIT; pile->list[i+j] = id | I40E_PILE_VALID_BIT;
ret = i; ret = i;
pile->search_hint = i + j;
break; break;
} }
@@ -254,7 +262,7 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
{ {
int valid_id = (id | I40E_PILE_VALID_BIT); int valid_id = (id | I40E_PILE_VALID_BIT);
int count = 0; int count = 0;
int i; u16 i;
if (!pile || index >= pile->num_entries) if (!pile || index >= pile->num_entries)
return -EINVAL; return -EINVAL;
@@ -266,8 +274,6 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
count++; count++;
} }
if (count && index < pile->search_hint)
pile->search_hint = index;
return count; return count;
} }
@@ -785,9 +791,9 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
struct rtnl_link_stats64 *ns; /* netdev stats */ struct rtnl_link_stats64 *ns; /* netdev stats */
struct i40e_eth_stats *oes; struct i40e_eth_stats *oes;
struct i40e_eth_stats *es; /* device's eth stats */ struct i40e_eth_stats *es; /* device's eth stats */
u32 tx_restart, tx_busy; u64 tx_restart, tx_busy;
struct i40e_ring *p; struct i40e_ring *p;
u32 rx_page, rx_buf; u64 rx_page, rx_buf;
u64 bytes, packets; u64 bytes, packets;
unsigned int start; unsigned int start;
u64 tx_linearize; u64 tx_linearize;
@@ -9486,15 +9492,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
} }
i40e_get_oem_version(&pf->hw); i40e_get_oem_version(&pf->hw);
if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) {
((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) || /* The following delay is necessary for firmware update. */
hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) { mdelay(1000);
/* The following delay is necessary for 4.33 firmware and older
* to recover after EMP reset. 200 ms should suffice but we
* put here 300 ms to be sure that FW is ready to operate
* after reset.
*/
mdelay(300);
} }
/* re-verify the eeprom if we just had an EMP reset */ /* re-verify the eeprom if we just had an EMP reset */
@@ -10733,7 +10733,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
return -ENOMEM; return -ENOMEM;
pf->irq_pile->num_entries = vectors; pf->irq_pile->num_entries = vectors;
pf->irq_pile->search_hint = 0;
/* track first vector for misc interrupts, ignore return */ /* track first vector for misc interrupts, ignore return */
(void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
@@ -11442,7 +11441,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
goto sw_init_done; goto sw_init_done;
} }
pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
pf->qp_pile->search_hint = 0;
pf->tx_timeout_recovery_level = 1; pf->tx_timeout_recovery_level = 1;

View File

@@ -2338,6 +2338,59 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret); aq_ret);
} }
/**
* i40e_check_enough_queue - find big enough queue number
* @vf: pointer to the VF info
* @needed: the number of items needed
*
* Returns the base item index of the queue, or negative for error
**/
static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed)
{
unsigned int i, cur_queues, more, pool_size;
struct i40e_lump_tracking *pile;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi;
vsi = pf->vsi[vf->lan_vsi_idx];
cur_queues = vsi->alloc_queue_pairs;
/* if current allocated queues are enough for need */
if (cur_queues >= needed)
return vsi->base_queue;
pile = pf->qp_pile;
if (cur_queues > 0) {
/* if the allocated queues are not zero
* just check if there are enough queues for more
* behind the allocated queues.
*/
more = needed - cur_queues;
for (i = vsi->base_queue + cur_queues;
i < pile->num_entries; i++) {
if (pile->list[i] & I40E_PILE_VALID_BIT)
break;
if (more-- == 1)
/* there is enough */
return vsi->base_queue;
}
}
pool_size = 0;
for (i = 0; i < pile->num_entries; i++) {
if (pile->list[i] & I40E_PILE_VALID_BIT) {
pool_size = 0;
continue;
}
if (needed <= ++pool_size)
/* there is enough */
return i;
}
return -ENOMEM;
}
/** /**
* i40e_vc_request_queues_msg * i40e_vc_request_queues_msg
* @vf: pointer to the VF info * @vf: pointer to the VF info
@@ -2377,6 +2430,12 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen)
req_pairs - cur_pairs, req_pairs - cur_pairs,
pf->queues_left); pf->queues_left);
vfres->num_queue_pairs = pf->queues_left + cur_pairs; vfres->num_queue_pairs = pf->queues_left + cur_pairs;
} else if (i40e_check_enough_queue(vf, req_pairs) < 0) {
dev_warn(&pf->pdev->dev,
"VF %d requested %d more queues, but there is not enough for it.\n",
vf->vf_id,
req_pairs - cur_pairs);
vfres->num_queue_pairs = cur_pairs;
} else { } else {
/* successful request */ /* successful request */
vf->num_req_queues = req_pairs; vf->num_req_queues = req_pairs;

View File

@@ -159,15 +159,20 @@ static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
static void get_systime(void __iomem *ioaddr, u64 *systime) static void get_systime(void __iomem *ioaddr, u64 *systime)
{ {
u64 ns; u64 ns, sec0, sec1;
/* Get the TSSS value */ /* Get the TSS value */
ns = readl(ioaddr + PTP_STNSR); sec1 = readl_relaxed(ioaddr + PTP_STSR);
/* Get the TSS and convert sec time value to nanosecond */ do {
ns += readl(ioaddr + PTP_STSR) * 1000000000ULL; sec0 = sec1;
/* Get the TSSS value */
ns = readl_relaxed(ioaddr + PTP_STNSR);
/* Get the TSS value */
sec1 = readl_relaxed(ioaddr + PTP_STSR);
} while (sec0 != sec1);
if (systime) if (systime)
*systime = ns; *systime = ns + (sec1 * 1000000000ULL);
} }
const struct stmmac_hwtimestamp stmmac_ptp = { const struct stmmac_hwtimestamp stmmac_ptp = {

View File

@@ -966,9 +966,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
sizeof(struct yamdrv_ioctl_mcs)); sizeof(struct yamdrv_ioctl_mcs));
if (IS_ERR(ym)) if (IS_ERR(ym))
return PTR_ERR(ym); return PTR_ERR(ym);
if (ym->cmd != SIOCYAMSMCS) if (ym->cmd != SIOCYAMSMCS || ym->bitrate > YAM_MAXBITRATE) {
return -EINVAL;
if (ym->bitrate > YAM_MAXBITRATE) {
kfree(ym); kfree(ym);
return -EINVAL; return -EINVAL;
} }

View File

@@ -1769,6 +1769,7 @@ static int ca8210_async_xmit_complete(
status status
); );
if (status != MAC_TRANSACTION_OVERFLOW) { if (status != MAC_TRANSACTION_OVERFLOW) {
dev_kfree_skb_any(priv->tx_skb);
ieee802154_wake_queue(priv->hw); ieee802154_wake_queue(priv->hw);
return 0; return 0;
} }

View File

@@ -805,6 +805,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
goto err_pib; goto err_pib;
} }
pib->channel = 13;
rcu_assign_pointer(phy->pib, pib); rcu_assign_pointer(phy->pib, pib);
phy->idx = idx; phy->idx = idx;
INIT_LIST_HEAD(&phy->edges); INIT_LIST_HEAD(&phy->edges);

View File

@@ -1005,8 +1005,8 @@ static void mcr20a_hw_setup(struct mcr20a_local *lp)
dev_dbg(printdev(lp), "%s\n", __func__); dev_dbg(printdev(lp), "%s\n", __func__);
phy->symbol_duration = 16; phy->symbol_duration = 16;
phy->lifs_period = 40; phy->lifs_period = 40 * phy->symbol_duration;
phy->sifs_period = 12; phy->sifs_period = 12 * phy->symbol_duration;
hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | hw->flags = IEEE802154_HW_TX_OMIT_CKSUM |
IEEE802154_HW_AFILT | IEEE802154_HW_AFILT |

View File

@@ -3259,6 +3259,15 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
macsec->real_dev = real_dev; macsec->real_dev = real_dev;
/* send_sci must be set to true when transmit sci explicitly is set */
if ((data && data[IFLA_MACSEC_SCI]) &&
(data && data[IFLA_MACSEC_INC_SCI])) {
u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
if (!send_sci)
return -EINVAL;
}
if (data && data[IFLA_MACSEC_ICV_LEN]) if (data && data[IFLA_MACSEC_ICV_LEN])
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
mtu = real_dev->mtu - icv_len - macsec_extra_len(true); mtu = real_dev->mtu - icv_len - macsec_extra_len(true);

View File

@@ -1166,6 +1166,9 @@ void phy_detach(struct phy_device *phydev)
phydev->mdio.dev.driver == &genphy_driver.mdiodrv.driver) phydev->mdio.dev.driver == &genphy_driver.mdiodrv.driver)
device_release_driver(&phydev->mdio.dev); device_release_driver(&phydev->mdio.dev);
/* Assert the reset signal */
phy_device_reset(phydev, 1);
/* /*
* The phydev might go away on the put_device() below, so avoid * The phydev might go away on the put_device() below, so avoid
* a use-after-free bug by reading the underlying bus first. * a use-after-free bug by reading the underlying bus first.
@@ -1175,9 +1178,6 @@ void phy_detach(struct phy_device *phydev)
put_device(&phydev->mdio.dev); put_device(&phydev->mdio.dev);
if (ndev_owner != bus->owner) if (ndev_owner != bus->owner)
module_put(bus->owner); module_put(bus->owner);
/* Assert the reset signal */
phy_device_reset(phydev, 1);
} }
EXPORT_SYMBOL(phy_detach); EXPORT_SYMBOL(phy_detach);

View File

@@ -554,6 +554,11 @@ static int phylink_register_sfp(struct phylink *pl,
return ret; return ret;
} }
if (!fwnode_device_is_available(ref.fwnode)) {
fwnode_handle_put(ref.fwnode);
return 0;
}
pl->sfp_bus = sfp_register_upstream(ref.fwnode, pl->netdev, pl, pl->sfp_bus = sfp_register_upstream(ref.fwnode, pl->netdev, pl,
&sfp_phylink_ops); &sfp_phylink_ops);
if (!pl->sfp_bus) if (!pl->sfp_bus)

View File

@@ -173,7 +173,7 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone)
if (tx_buf == NULL) if (tx_buf == NULL)
goto free_rx_urb; goto free_rx_urb;
rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE, rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN,
GFP_KERNEL, &rx_urb->transfer_dma); GFP_KERNEL, &rx_urb->transfer_dma);
if (rx_buf == NULL) if (rx_buf == NULL)
goto free_tx_buf; goto free_tx_buf;
@@ -198,7 +198,7 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone)
static void ipheth_free_urbs(struct ipheth_device *iphone) static void ipheth_free_urbs(struct ipheth_device *iphone)
{ {
usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf, usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, iphone->rx_buf,
iphone->rx_urb->transfer_dma); iphone->rx_urb->transfer_dma);
usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf, usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf,
iphone->tx_urb->transfer_dma); iphone->tx_urb->transfer_dma);
@@ -371,7 +371,7 @@ static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags)
usb_fill_bulk_urb(dev->rx_urb, udev, usb_fill_bulk_urb(dev->rx_urb, udev,
usb_rcvbulkpipe(udev, dev->bulk_in), usb_rcvbulkpipe(udev, dev->bulk_in),
dev->rx_buf, IPHETH_BUF_SIZE, dev->rx_buf, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN,
ipheth_rcvbulk_callback, ipheth_rcvbulk_callback,
dev); dev);
dev->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; dev->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;

View File

@@ -92,7 +92,7 @@ static int rpmsg_eptdev_destroy(struct device *dev, void *data)
/* wake up any blocked readers */ /* wake up any blocked readers */
wake_up_interruptible(&eptdev->readq); wake_up_interruptible(&eptdev->readq);
device_del(&eptdev->dev); cdev_device_del(&eptdev->cdev, &eptdev->dev);
put_device(&eptdev->dev); put_device(&eptdev->dev);
return 0; return 0;
@@ -329,7 +329,6 @@ static void rpmsg_eptdev_release_device(struct device *dev)
ida_simple_remove(&rpmsg_ept_ida, dev->id); ida_simple_remove(&rpmsg_ept_ida, dev->id);
ida_simple_remove(&rpmsg_minor_ida, MINOR(eptdev->dev.devt)); ida_simple_remove(&rpmsg_minor_ida, MINOR(eptdev->dev.devt));
cdev_del(&eptdev->cdev);
kfree(eptdev); kfree(eptdev);
} }
@@ -374,19 +373,13 @@ static int rpmsg_eptdev_create(struct rpmsg_ctrldev *ctrldev,
dev->id = ret; dev->id = ret;
dev_set_name(dev, "rpmsg%d", ret); dev_set_name(dev, "rpmsg%d", ret);
ret = cdev_add(&eptdev->cdev, dev->devt, 1); ret = cdev_device_add(&eptdev->cdev, &eptdev->dev);
if (ret) if (ret)
goto free_ept_ida; goto free_ept_ida;
/* We can now rely on the release function for cleanup */ /* We can now rely on the release function for cleanup */
dev->release = rpmsg_eptdev_release_device; dev->release = rpmsg_eptdev_release_device;
ret = device_add(dev);
if (ret) {
dev_err(dev, "device_add failed: %d\n", ret);
put_device(dev);
}
return ret; return ret;
free_ept_ida: free_ept_ida:
@@ -455,7 +448,6 @@ static void rpmsg_ctrldev_release_device(struct device *dev)
ida_simple_remove(&rpmsg_ctrl_ida, dev->id); ida_simple_remove(&rpmsg_ctrl_ida, dev->id);
ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt)); ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
cdev_del(&ctrldev->cdev);
kfree(ctrldev); kfree(ctrldev);
} }
@@ -490,19 +482,13 @@ static int rpmsg_chrdev_probe(struct rpmsg_device *rpdev)
dev->id = ret; dev->id = ret;
dev_set_name(&ctrldev->dev, "rpmsg_ctrl%d", ret); dev_set_name(&ctrldev->dev, "rpmsg_ctrl%d", ret);
ret = cdev_add(&ctrldev->cdev, dev->devt, 1); ret = cdev_device_add(&ctrldev->cdev, &ctrldev->dev);
if (ret) if (ret)
goto free_ctrl_ida; goto free_ctrl_ida;
/* We can now rely on the release function for cleanup */ /* We can now rely on the release function for cleanup */
dev->release = rpmsg_ctrldev_release_device; dev->release = rpmsg_ctrldev_release_device;
ret = device_add(dev);
if (ret) {
dev_err(&rpdev->dev, "device_add failed: %d\n", ret);
put_device(dev);
}
dev_set_drvdata(&rpdev->dev, ctrldev); dev_set_drvdata(&rpdev->dev, ctrldev);
return ret; return ret;
@@ -528,7 +514,7 @@ static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev)
if (ret) if (ret)
dev_warn(&rpdev->dev, "failed to nuke endpoints: %d\n", ret); dev_warn(&rpdev->dev, "failed to nuke endpoints: %d\n", ret);
device_del(&ctrldev->dev); cdev_device_del(&ctrldev->cdev, &ctrldev->dev);
put_device(&ctrldev->dev); put_device(&ctrldev->dev);
} }

View File

@@ -82,7 +82,7 @@ unsigned int mc146818_get_time(struct rtc_time *time)
time->tm_year += real_year - 72; time->tm_year += real_year - 72;
#endif #endif
if (century > 20) if (century > 19)
time->tm_year += (century - 19) * 100; time->tm_year += (century - 19) * 100;
/* /*

View File

@@ -521,6 +521,8 @@ static void zfcp_fc_adisc_handler(void *data)
goto out; goto out;
} }
/* re-init to undo drop from zfcp_fc_adisc() */
port->d_id = ntoh24(adisc_resp->adisc_port_id);
/* port is good, unblock rport without going through erp */ /* port is good, unblock rport without going through erp */
zfcp_scsi_schedule_rport_register(port); zfcp_scsi_schedule_rport_register(port);
out: out:
@@ -534,6 +536,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
struct zfcp_fc_req *fc_req; struct zfcp_fc_req *fc_req;
struct zfcp_adapter *adapter = port->adapter; struct zfcp_adapter *adapter = port->adapter;
struct Scsi_Host *shost = adapter->scsi_host; struct Scsi_Host *shost = adapter->scsi_host;
u32 d_id;
int ret; int ret;
fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC); fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
@@ -558,7 +561,15 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
fc_req->u.adisc.req.adisc_cmd = ELS_ADISC; fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost)); hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els, d_id = port->d_id; /* remember as destination for send els below */
/*
* Force fresh GID_PN lookup on next port recovery.
* Must happen after request setup and before sending request,
* to prevent race with port->d_id re-init in zfcp_fc_adisc_handler().
*/
port->d_id = 0;
ret = zfcp_fsf_send_els(adapter, d_id, &fc_req->ct_els,
ZFCP_FC_CTELS_TMO); ZFCP_FC_CTELS_TMO);
if (ret) if (ret)
kmem_cache_free(zfcp_fc_req_cache, fc_req); kmem_cache_free(zfcp_fc_req_cache, fc_req);

View File

@@ -80,7 +80,7 @@ static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba); static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
struct device *parent, int npiv); struct device *parent, int npiv);
static void bnx2fc_destroy_work(struct work_struct *work); static void bnx2fc_port_destroy(struct fcoe_port *port);
static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev); static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
@@ -515,7 +515,8 @@ static int bnx2fc_l2_rcv_thread(void *arg)
static void bnx2fc_recv_frame(struct sk_buff *skb) static void bnx2fc_recv_frame(struct sk_buff *skb)
{ {
u32 fr_len; u64 crc_err;
u32 fr_len, fr_crc;
struct fc_lport *lport; struct fc_lport *lport;
struct fcoe_rcv_info *fr; struct fcoe_rcv_info *fr;
struct fc_stats *stats; struct fc_stats *stats;
@@ -549,6 +550,11 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
skb_pull(skb, sizeof(struct fcoe_hdr)); skb_pull(skb, sizeof(struct fcoe_hdr));
fr_len = skb->len - sizeof(struct fcoe_crc_eof); fr_len = skb->len - sizeof(struct fcoe_crc_eof);
stats = per_cpu_ptr(lport->stats, get_cpu());
stats->RxFrames++;
stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
put_cpu();
fp = (struct fc_frame *)skb; fp = (struct fc_frame *)skb;
fc_frame_init(fp); fc_frame_init(fp);
fr_dev(fp) = lport; fr_dev(fp) = lport;
@@ -631,16 +637,15 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
return; return;
} }
stats = per_cpu_ptr(lport->stats, smp_processor_id()); fr_crc = le32_to_cpu(fr_crc(fp));
stats->RxFrames++;
stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
if (le32_to_cpu(fr_crc(fp)) != if (unlikely(fr_crc != ~crc32(~0, skb->data, fr_len))) {
~crc32(~0, skb->data, fr_len)) { stats = per_cpu_ptr(lport->stats, get_cpu());
if (stats->InvalidCRCCount < 5) crc_err = (stats->InvalidCRCCount++);
put_cpu();
if (crc_err < 5)
printk(KERN_WARNING PFX "dropping frame with " printk(KERN_WARNING PFX "dropping frame with "
"CRC error\n"); "CRC error\n");
stats->InvalidCRCCount++;
kfree_skb(skb); kfree_skb(skb);
return; return;
} }
@@ -911,9 +916,6 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
__bnx2fc_destroy(interface); __bnx2fc_destroy(interface);
} }
mutex_unlock(&bnx2fc_dev_lock); mutex_unlock(&bnx2fc_dev_lock);
/* Ensure ALL destroy work has been completed before return */
flush_workqueue(bnx2fc_wq);
return; return;
default: default:
@@ -1220,8 +1222,8 @@ static int bnx2fc_vport_destroy(struct fc_vport *vport)
mutex_unlock(&n_port->lp_mutex); mutex_unlock(&n_port->lp_mutex);
bnx2fc_free_vport(interface->hba, port->lport); bnx2fc_free_vport(interface->hba, port->lport);
bnx2fc_port_shutdown(port->lport); bnx2fc_port_shutdown(port->lport);
bnx2fc_port_destroy(port);
bnx2fc_interface_put(interface); bnx2fc_interface_put(interface);
queue_work(bnx2fc_wq, &port->destroy_work);
return 0; return 0;
} }
@@ -1530,7 +1532,6 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
port->lport = lport; port->lport = lport;
port->priv = interface; port->priv = interface;
port->get_netdev = bnx2fc_netdev; port->get_netdev = bnx2fc_netdev;
INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
/* Configure fcoe_port */ /* Configure fcoe_port */
rc = bnx2fc_lport_config(lport); rc = bnx2fc_lport_config(lport);
@@ -1658,8 +1659,8 @@ static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
bnx2fc_interface_cleanup(interface); bnx2fc_interface_cleanup(interface);
bnx2fc_stop(interface); bnx2fc_stop(interface);
list_del(&interface->list); list_del(&interface->list);
bnx2fc_port_destroy(port);
bnx2fc_interface_put(interface); bnx2fc_interface_put(interface);
queue_work(bnx2fc_wq, &port->destroy_work);
} }
/** /**
@@ -1700,15 +1701,12 @@ static int bnx2fc_destroy(struct net_device *netdev)
return rc; return rc;
} }
static void bnx2fc_destroy_work(struct work_struct *work) static void bnx2fc_port_destroy(struct fcoe_port *port)
{ {
struct fcoe_port *port;
struct fc_lport *lport; struct fc_lport *lport;
port = container_of(work, struct fcoe_port, destroy_work);
lport = port->lport; lport = port->lport;
BNX2FC_HBA_DBG(lport, "Entered %s, destroying lport %p\n", __func__, lport);
BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
bnx2fc_if_destroy(lport); bnx2fc_if_destroy(lport);
} }
@@ -2562,9 +2560,6 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
__bnx2fc_destroy(interface); __bnx2fc_destroy(interface);
mutex_unlock(&bnx2fc_dev_lock); mutex_unlock(&bnx2fc_dev_lock);
/* Ensure ALL destroy work has been completed before return */
flush_workqueue(bnx2fc_wq);
bnx2fc_ulp_stop(hba); bnx2fc_ulp_stop(hba);
/* unregister cnic device */ /* unregister cnic device */
if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic)) if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))

View File

@@ -341,17 +341,12 @@ static int scpsys_power_off(struct generic_pm_domain *genpd)
return ret; return ret;
} }
static int init_clks(struct platform_device *pdev, struct clk **clk) static void init_clks(struct platform_device *pdev, struct clk **clk)
{ {
int i; int i;
for (i = CLK_NONE + 1; i < CLK_MAX; i++) { for (i = CLK_NONE + 1; i < CLK_MAX; i++)
clk[i] = devm_clk_get(&pdev->dev, clk_names[i]); clk[i] = devm_clk_get(&pdev->dev, clk_names[i]);
if (IS_ERR(clk[i]))
return PTR_ERR(clk[i]);
}
return 0;
} }
static struct scp *init_scp(struct platform_device *pdev, static struct scp *init_scp(struct platform_device *pdev,
@@ -361,7 +356,7 @@ static struct scp *init_scp(struct platform_device *pdev,
{ {
struct genpd_onecell_data *pd_data; struct genpd_onecell_data *pd_data;
struct resource *res; struct resource *res;
int i, j, ret; int i, j;
struct scp *scp; struct scp *scp;
struct clk *clk[CLK_MAX]; struct clk *clk[CLK_MAX];
@@ -416,9 +411,7 @@ static struct scp *init_scp(struct platform_device *pdev,
pd_data->num_domains = num; pd_data->num_domains = num;
ret = init_clks(pdev, clk); init_clks(pdev, clk);
if (ret)
return ERR_PTR(ret);
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
struct scp_domain *scpd = &scp->domains[i]; struct scp_domain *scpd = &scp->domains[i];

View File

@@ -520,7 +520,7 @@ static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
u32 rd = 0; u32 rd = 0;
u32 wr = 0; u32 wr = 0;
if (qspi->base[CHIP_SELECT]) { if (cs >= 0 && qspi->base[CHIP_SELECT]) {
rd = bcm_qspi_read(qspi, CHIP_SELECT, 0); rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
wr = (rd & ~0xff) | (1 << cs); wr = (rd & ~0xff) | (1 << cs);
if (rd == wr) if (rd == wr)

View File

@@ -529,6 +529,11 @@ static int meson_spicc_probe(struct platform_device *pdev)
writel_relaxed(0, spicc->base + SPICC_INTREG); writel_relaxed(0, spicc->base + SPICC_INTREG);
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
goto out_master;
}
ret = devm_request_irq(&pdev->dev, irq, meson_spicc_irq, ret = devm_request_irq(&pdev->dev, irq, meson_spicc_irq,
0, NULL, spicc); 0, NULL, spicc);
if (ret) { if (ret) {

View File

@@ -498,7 +498,7 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
else else
mdata->state = MTK_SPI_IDLE; mdata->state = MTK_SPI_IDLE;
if (!master->can_dma(master, master->cur_msg->spi, trans)) { if (!master->can_dma(master, NULL, trans)) {
if (trans->rx_buf) { if (trans->rx_buf) {
cnt = mdata->xfer_len / 4; cnt = mdata->xfer_len / 4;
ioread32_rep(mdata->base + SPI_RX_DATA_REG, ioread32_rep(mdata->base + SPI_RX_DATA_REG,

View File

@@ -313,6 +313,7 @@ static struct tty_driver *gsm_tty_driver;
#define GSM1_ESCAPE_BITS 0x20 #define GSM1_ESCAPE_BITS 0x20
#define XON 0x11 #define XON 0x11
#define XOFF 0x13 #define XOFF 0x13
#define ISO_IEC_646_MASK 0x7F
static const struct tty_port_operations gsm_port_ops; static const struct tty_port_operations gsm_port_ops;
@@ -531,7 +532,8 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len)
int olen = 0; int olen = 0;
while (len--) { while (len--) {
if (*input == GSM1_SOF || *input == GSM1_ESCAPE if (*input == GSM1_SOF || *input == GSM1_ESCAPE
|| *input == XON || *input == XOFF) { || (*input & ISO_IEC_646_MASK) == XON
|| (*input & ISO_IEC_646_MASK) == XOFF) {
*output++ = GSM1_ESCAPE; *output++ = GSM1_ESCAPE;
*output++ = *input++ ^ GSM1_ESCAPE_BITS; *output++ = *input++ ^ GSM1_ESCAPE_BITS;
olen++; olen++;

View File

@@ -104,8 +104,17 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
port->mapsize = resource_size(&resource); port->mapsize = resource_size(&resource);
/* Check for shifted address mapping */ /* Check for shifted address mapping */
if (of_property_read_u32(np, "reg-offset", &prop) == 0) if (of_property_read_u32(np, "reg-offset", &prop) == 0) {
if (prop >= port->mapsize) {
dev_warn(&ofdev->dev, "reg-offset %u exceeds region size %pa\n",
prop, &port->mapsize);
ret = -EINVAL;
goto err_unprepare;
}
port->mapbase += prop; port->mapbase += prop;
port->mapsize -= prop;
}
port->iotype = UPIO_MEM; port->iotype = UPIO_MEM;
if (of_property_read_u32(np, "reg-io-width", &prop) == 0) { if (of_property_read_u32(np, "reg-io-width", &prop) == 0) {

View File

@@ -4797,8 +4797,30 @@ static const struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400, { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */
pbn_b2_4_115200 }, pbn_b2_4_115200 },
/* Brainboxes Devices */
/* /*
* BrainBoxes UC-260 * Brainboxes UC-101
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0BA1,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
/*
* Brainboxes UC-235/246
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0AA1,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_1_115200 },
/*
* Brainboxes UC-257
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0861,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
/*
* Brainboxes UC-260/271/701/756
*/ */
{ PCI_VENDOR_ID_INTASHIELD, 0x0D21, { PCI_VENDOR_ID_INTASHIELD, 0x0D21,
PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -4806,7 +4828,81 @@ static const struct pci_device_id serial_pci_tbl[] = {
pbn_b2_4_115200 }, pbn_b2_4_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0E34, { PCI_VENDOR_ID_INTASHIELD, 0x0E34,
PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
pbn_b2_4_115200 },
/*
* Brainboxes UC-268
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0841,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_4_115200 },
/*
* Brainboxes UC-275/279
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0881,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_8_115200 },
/*
* Brainboxes UC-302
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x08E1,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
/*
* Brainboxes UC-310
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x08C1,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
/*
* Brainboxes UC-313
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x08A3,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
/*
* Brainboxes UC-320/324
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0A61,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_1_115200 },
/*
* Brainboxes UC-346
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0B02,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_4_115200 },
/*
* Brainboxes UC-357
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0A81,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0A83,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
/*
* Brainboxes UC-368
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0C41,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_4_115200 },
/*
* Brainboxes UC-420/431
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0921,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_4_115200 }, pbn_b2_4_115200 },
/* /*
* Perle PCI-RAS cards * Perle PCI-RAS cards

View File

@@ -509,7 +509,7 @@ static void stm32_start_tx(struct uart_port *port)
{ {
struct circ_buf *xmit = &port->state->xmit; struct circ_buf *xmit = &port->state->xmit;
if (uart_circ_empty(xmit)) if (uart_circ_empty(xmit) && !port->x_char)
return; return;
stm32_transmit_chars(port); stm32_transmit_chars(port);

View File

@@ -39,8 +39,11 @@ static int ulpi_match(struct device *dev, struct device_driver *driver)
struct ulpi *ulpi = to_ulpi_dev(dev); struct ulpi *ulpi = to_ulpi_dev(dev);
const struct ulpi_device_id *id; const struct ulpi_device_id *id;
/* Some ULPI devices don't have a vendor id so rely on OF match */ /*
if (ulpi->id.vendor == 0) * Some ULPI devices don't have a vendor id
* or provide an id_table so rely on OF match.
*/
if (ulpi->id.vendor == 0 || !drv->id_table)
return of_driver_match_device(dev, driver); return of_driver_match_device(dev, driver);
for (id = drv->id_table; id->vendor; id++) for (id = drv->id_table; id->vendor; id++)

View File

@@ -1670,6 +1670,13 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
urb->hcpriv = NULL; urb->hcpriv = NULL;
INIT_LIST_HEAD(&urb->urb_list); INIT_LIST_HEAD(&urb->urb_list);
atomic_dec(&urb->use_count); atomic_dec(&urb->use_count);
/*
* Order the write of urb->use_count above before the read
* of urb->reject below. Pairs with the memory barriers in
* usb_kill_urb() and usb_poison_urb().
*/
smp_mb__after_atomic();
atomic_dec(&urb->dev->urbnum); atomic_dec(&urb->dev->urbnum);
if (atomic_read(&urb->reject)) if (atomic_read(&urb->reject))
wake_up(&usb_kill_urb_queue); wake_up(&usb_kill_urb_queue);
@@ -1779,6 +1786,13 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
usb_anchor_resume_wakeups(anchor); usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count); atomic_dec(&urb->use_count);
/*
* Order the write of urb->use_count above before the read
* of urb->reject below. Pairs with the memory barriers in
* usb_kill_urb() and usb_poison_urb().
*/
smp_mb__after_atomic();
if (unlikely(atomic_read(&urb->reject))) if (unlikely(atomic_read(&urb->reject)))
wake_up(&usb_kill_urb_queue); wake_up(&usb_kill_urb_queue);
usb_put_urb(urb); usb_put_urb(urb);

View File

@@ -692,6 +692,12 @@ void usb_kill_urb(struct urb *urb)
if (!(urb && urb->dev && urb->ep)) if (!(urb && urb->dev && urb->ep))
return; return;
atomic_inc(&urb->reject); atomic_inc(&urb->reject);
/*
* Order the write of urb->reject above before the read
* of urb->use_count below. Pairs with the barriers in
* __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
*/
smp_mb__after_atomic();
usb_hcd_unlink_urb(urb, -ENOENT); usb_hcd_unlink_urb(urb, -ENOENT);
wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
@@ -733,6 +739,12 @@ void usb_poison_urb(struct urb *urb)
if (!urb) if (!urb)
return; return;
atomic_inc(&urb->reject); atomic_inc(&urb->reject);
/*
* Order the write of urb->reject above before the read
* of urb->use_count below. Pairs with the barriers in
* __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
*/
smp_mb__after_atomic();
if (!urb->dev || !urb->ep) if (!urb->dev || !urb->ep)
return; return;

View File

@@ -583,6 +583,7 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
if (is_iso) { if (is_iso) {
switch (speed) { switch (speed) {
case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER: case USB_SPEED_SUPER:
size = ss->isoc_maxpacket * size = ss->isoc_maxpacket *
(ss->isoc_mult + 1) * (ss->isoc_mult + 1) *

View File

@@ -2301,6 +2301,16 @@ UNUSUAL_DEV( 0x2027, 0xa001, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
US_FL_SCM_MULT_TARG ), US_FL_SCM_MULT_TARG ),
/*
* Reported by DocMAX <mail@vacharakis.de>
* and Thomas Weißschuh <linux@weissschuh.net>
*/
UNUSUAL_DEV( 0x2109, 0x0715, 0x9999, 0x9999,
"VIA Labs, Inc.",
"VL817 SATA Bridge",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_UAS),
UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001, UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
"ST", "ST",
"2A", "2A",

View File

@@ -3865,7 +3865,8 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
case SNK_TRYWAIT_DEBOUNCE: case SNK_TRYWAIT_DEBOUNCE:
break; break;
case SNK_ATTACH_WAIT: case SNK_ATTACH_WAIT:
tcpm_set_state(port, SNK_UNATTACHED, 0); case SNK_DEBOUNCED:
/* Do nothing, as TCPM is still waiting for vbus to reaach VSAFE5V to connect */
break; break;
case SNK_NEGOTIATE_CAPABILITIES: case SNK_NEGOTIATE_CAPABILITIES:

View File

@@ -1139,7 +1139,15 @@ static void ext4_restore_inline_data(handle_t *handle, struct inode *inode,
struct ext4_iloc *iloc, struct ext4_iloc *iloc,
void *buf, int inline_size) void *buf, int inline_size)
{ {
ext4_create_inline_data(handle, inode, inline_size); int ret;
ret = ext4_create_inline_data(handle, inode, inline_size);
if (ret) {
ext4_msg(inode->i_sb, KERN_EMERG,
"error restoring inline_data for inode -- potential data loss! (inode %lu, error %d)",
inode->i_ino, ret);
return;
}
ext4_write_inline_data(inode, iloc, buf, 0, inline_size); ext4_write_inline_data(inode, iloc, buf, 0, inline_size);
ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
} }

View File

@@ -1627,6 +1627,24 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
no_open: no_open:
res = nfs_lookup(dir, dentry, lookup_flags); res = nfs_lookup(dir, dentry, lookup_flags);
if (!res) {
inode = d_inode(dentry);
if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
!S_ISDIR(inode->i_mode))
res = ERR_PTR(-ENOTDIR);
else if (inode && S_ISREG(inode->i_mode))
res = ERR_PTR(-EOPENSTALE);
} else if (!IS_ERR(res)) {
inode = d_inode(res);
if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
!S_ISDIR(inode->i_mode)) {
dput(res);
res = ERR_PTR(-ENOTDIR);
} else if (inode && S_ISREG(inode->i_mode)) {
dput(res);
res = ERR_PTR(-EOPENSTALE);
}
}
if (switched) { if (switched) {
d_lookup_done(dentry); d_lookup_done(dentry);
if (!res) if (!res)
@@ -2016,6 +2034,8 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
trace_nfs_link_enter(inode, dir, dentry); trace_nfs_link_enter(inode, dir, dentry);
d_drop(dentry); d_drop(dentry);
if (S_ISREG(inode->i_mode))
nfs_sync_inode(inode);
error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name); error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name);
if (error == 0) { if (error == 0) {
ihold(inode); ihold(inode);
@@ -2104,6 +2124,8 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
} }
} }
if (S_ISREG(old_inode->i_mode))
nfs_sync_inode(old_inode);
task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL); task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL);
if (IS_ERR(task)) { if (IS_ERR(task)) {
error = PTR_ERR(task); error = PTR_ERR(task);

View File

@@ -3446,8 +3446,10 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
status = nfserr_clid_inuse; status = nfserr_clid_inuse;
if (client_has_state(old) if (client_has_state(old)
&& !same_creds(&unconf->cl_cred, && !same_creds(&unconf->cl_cred,
&old->cl_cred)) &old->cl_cred)) {
old = NULL;
goto out; goto out;
}
status = mark_client_expired_locked(old); status = mark_client_expired_locked(old);
if (status) { if (status) {
old = NULL; old = NULL;

View File

@@ -251,10 +251,6 @@ int udf_expand_file_adinicb(struct inode *inode)
char *kaddr; char *kaddr;
struct udf_inode_info *iinfo = UDF_I(inode); struct udf_inode_info *iinfo = UDF_I(inode);
int err; int err;
struct writeback_control udf_wbc = {
.sync_mode = WB_SYNC_NONE,
.nr_to_write = 1,
};
WARN_ON_ONCE(!inode_is_locked(inode)); WARN_ON_ONCE(!inode_is_locked(inode));
if (!iinfo->i_lenAlloc) { if (!iinfo->i_lenAlloc) {
@@ -298,8 +294,10 @@ int udf_expand_file_adinicb(struct inode *inode)
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG; iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
/* from now on we have normal address_space methods */ /* from now on we have normal address_space methods */
inode->i_data.a_ops = &udf_aops; inode->i_data.a_ops = &udf_aops;
set_page_dirty(page);
unlock_page(page);
up_write(&iinfo->i_data_sem); up_write(&iinfo->i_data_sem);
err = inode->i_data.a_ops->writepage(page, &udf_wbc); err = filemap_fdatawrite(inode->i_mapping);
if (err) { if (err) {
/* Restore everything back so that we don't lose data... */ /* Restore everything back so that we don't lose data... */
lock_page(page); lock_page(page);
@@ -311,6 +309,7 @@ int udf_expand_file_adinicb(struct inode *inode)
unlock_page(page); unlock_page(page);
iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
inode->i_data.a_ops = &udf_adinicb_aops; inode->i_data.a_ops = &udf_adinicb_aops;
iinfo->i_lenAlloc = inode->i_size;
up_write(&iinfo->i_data_sem); up_write(&iinfo->i_data_sem);
} }
put_page(page); put_page(page);

View File

@@ -2392,6 +2392,7 @@ struct packet_type {
struct net_device *); struct net_device *);
bool (*id_match)(struct packet_type *ptype, bool (*id_match)(struct packet_type *ptype,
struct sock *sk); struct sock *sk);
struct net *af_packet_net;
void *af_packet_priv; void *af_packet_priv;
struct list_head list; struct list_head list;

View File

@@ -443,19 +443,18 @@ static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
{ {
struct iphdr *iph = ip_hdr(skb); struct iphdr *iph = ip_hdr(skb);
/* We had many attacks based on IPID, use the private
* generator as much as we can.
*/
if (sk && inet_sk(sk)->inet_daddr) {
iph->id = htons(inet_sk(sk)->inet_id);
inet_sk(sk)->inet_id += segs;
return;
}
if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) { if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
/* This is only to work around buggy Windows95/2000 iph->id = 0;
* VJ compression implementations. If the ID field
* does not change, they drop every other packet in
* a TCP stream using header compression.
*/
if (sk && inet_sk(sk)->inet_daddr) {
iph->id = htons(inet_sk(sk)->inet_id);
inet_sk(sk)->inet_id += segs;
} else {
iph->id = 0;
}
} else { } else {
/* Unfortunately we need the big hammer to get a suitable IPID */
__ip_select_ident(net, iph, segs); __ip_select_ident(net, iph, segs);
} }
} }

View File

@@ -243,7 +243,7 @@ static inline bool fib6_get_cookie_safe(const struct fib6_info *f6i,
fn = rcu_dereference(f6i->fib6_node); fn = rcu_dereference(f6i->fib6_node);
if (fn) { if (fn) {
*cookie = fn->fn_sernum; *cookie = READ_ONCE(fn->fn_sernum);
/* pairs with smp_wmb() in fib6_update_sernum_upto_root() */ /* pairs with smp_wmb() in fib6_update_sernum_upto_root() */
smp_rmb(); smp_rmb();
status = true; status = true;

View File

@@ -74,7 +74,7 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
struct nf_conntrack_tuple *tuple, struct nf_conntrack_tuple *tuple,
const struct nf_nat_range2 *range, const struct nf_nat_range2 *range,
enum nf_nat_manip_type maniptype, enum nf_nat_manip_type maniptype,
const struct nf_conn *ct, u16 *rover); const struct nf_conn *ct);
int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[], int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
struct nf_nat_range2 *range); struct nf_nat_range2 *range);

View File

@@ -549,20 +549,22 @@ static void kauditd_printk_skb(struct sk_buff *skb)
/** /**
* kauditd_rehold_skb - Handle a audit record send failure in the hold queue * kauditd_rehold_skb - Handle a audit record send failure in the hold queue
* @skb: audit record * @skb: audit record
* @error: error code (unused)
* *
* Description: * Description:
* This should only be used by the kauditd_thread when it fails to flush the * This should only be used by the kauditd_thread when it fails to flush the
* hold queue. * hold queue.
*/ */
static void kauditd_rehold_skb(struct sk_buff *skb) static void kauditd_rehold_skb(struct sk_buff *skb, __always_unused int error)
{ {
/* put the record back in the queue at the same place */ /* put the record back in the queue */
skb_queue_head(&audit_hold_queue, skb); skb_queue_tail(&audit_hold_queue, skb);
} }
/** /**
* kauditd_hold_skb - Queue an audit record, waiting for auditd * kauditd_hold_skb - Queue an audit record, waiting for auditd
* @skb: audit record * @skb: audit record
* @error: error code
* *
* Description: * Description:
* Queue the audit record, waiting for an instance of auditd. When this * Queue the audit record, waiting for an instance of auditd. When this
@@ -572,19 +574,31 @@ static void kauditd_rehold_skb(struct sk_buff *skb)
* and queue it, if we have room. If we want to hold on to the record, but we * and queue it, if we have room. If we want to hold on to the record, but we
* don't have room, record a record lost message. * don't have room, record a record lost message.
*/ */
static void kauditd_hold_skb(struct sk_buff *skb) static void kauditd_hold_skb(struct sk_buff *skb, int error)
{ {
/* at this point it is uncertain if we will ever send this to auditd so /* at this point it is uncertain if we will ever send this to auditd so
* try to send the message via printk before we go any further */ * try to send the message via printk before we go any further */
kauditd_printk_skb(skb); kauditd_printk_skb(skb);
/* can we just silently drop the message? */ /* can we just silently drop the message? */
if (!audit_default) { if (!audit_default)
kfree_skb(skb); goto drop;
return;
/* the hold queue is only for when the daemon goes away completely,
* not -EAGAIN failures; if we are in a -EAGAIN state requeue the
* record on the retry queue unless it's full, in which case drop it
*/
if (error == -EAGAIN) {
if (!audit_backlog_limit ||
skb_queue_len(&audit_retry_queue) < audit_backlog_limit) {
skb_queue_tail(&audit_retry_queue, skb);
return;
}
audit_log_lost("kauditd retry queue overflow");
goto drop;
} }
/* if we have room, queue the message */ /* if we have room in the hold queue, queue the message */
if (!audit_backlog_limit || if (!audit_backlog_limit ||
skb_queue_len(&audit_hold_queue) < audit_backlog_limit) { skb_queue_len(&audit_hold_queue) < audit_backlog_limit) {
skb_queue_tail(&audit_hold_queue, skb); skb_queue_tail(&audit_hold_queue, skb);
@@ -593,24 +607,32 @@ static void kauditd_hold_skb(struct sk_buff *skb)
/* we have no other options - drop the message */ /* we have no other options - drop the message */
audit_log_lost("kauditd hold queue overflow"); audit_log_lost("kauditd hold queue overflow");
drop:
kfree_skb(skb); kfree_skb(skb);
} }
/** /**
* kauditd_retry_skb - Queue an audit record, attempt to send again to auditd * kauditd_retry_skb - Queue an audit record, attempt to send again to auditd
* @skb: audit record * @skb: audit record
* @error: error code (unused)
* *
* Description: * Description:
* Not as serious as kauditd_hold_skb() as we still have a connected auditd, * Not as serious as kauditd_hold_skb() as we still have a connected auditd,
* but for some reason we are having problems sending it audit records so * but for some reason we are having problems sending it audit records so
* queue the given record and attempt to resend. * queue the given record and attempt to resend.
*/ */
static void kauditd_retry_skb(struct sk_buff *skb) static void kauditd_retry_skb(struct sk_buff *skb, __always_unused int error)
{ {
/* NOTE: because records should only live in the retry queue for a if (!audit_backlog_limit ||
* short period of time, before either being sent or moved to the hold skb_queue_len(&audit_retry_queue) < audit_backlog_limit) {
* queue, we don't currently enforce a limit on this queue */ skb_queue_tail(&audit_retry_queue, skb);
skb_queue_tail(&audit_retry_queue, skb); return;
}
/* we have to drop the record, send it via printk as a last effort */
kauditd_printk_skb(skb);
audit_log_lost("kauditd retry queue overflow");
kfree_skb(skb);
} }
/** /**
@@ -648,7 +670,7 @@ static void auditd_reset(const struct auditd_connection *ac)
/* flush the retry queue to the hold queue, but don't touch the main /* flush the retry queue to the hold queue, but don't touch the main
* queue since we need to process that normally for multicast */ * queue since we need to process that normally for multicast */
while ((skb = skb_dequeue(&audit_retry_queue))) while ((skb = skb_dequeue(&audit_retry_queue)))
kauditd_hold_skb(skb); kauditd_hold_skb(skb, -ECONNREFUSED);
} }
/** /**
@@ -722,16 +744,18 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
struct sk_buff_head *queue, struct sk_buff_head *queue,
unsigned int retry_limit, unsigned int retry_limit,
void (*skb_hook)(struct sk_buff *skb), void (*skb_hook)(struct sk_buff *skb),
void (*err_hook)(struct sk_buff *skb)) void (*err_hook)(struct sk_buff *skb, int error))
{ {
int rc = 0; int rc = 0;
struct sk_buff *skb; struct sk_buff *skb = NULL;
struct sk_buff *skb_tail;
unsigned int failed = 0; unsigned int failed = 0;
/* NOTE: kauditd_thread takes care of all our locking, we just use /* NOTE: kauditd_thread takes care of all our locking, we just use
* the netlink info passed to us (e.g. sk and portid) */ * the netlink info passed to us (e.g. sk and portid) */
while ((skb = skb_dequeue(queue))) { skb_tail = skb_peek_tail(queue);
while ((skb != skb_tail) && (skb = skb_dequeue(queue))) {
/* call the skb_hook for each skb we touch */ /* call the skb_hook for each skb we touch */
if (skb_hook) if (skb_hook)
(*skb_hook)(skb); (*skb_hook)(skb);
@@ -739,7 +763,7 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
/* can we send to anyone via unicast? */ /* can we send to anyone via unicast? */
if (!sk) { if (!sk) {
if (err_hook) if (err_hook)
(*err_hook)(skb); (*err_hook)(skb, -ECONNREFUSED);
continue; continue;
} }
@@ -753,7 +777,7 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
rc == -ECONNREFUSED || rc == -EPERM) { rc == -ECONNREFUSED || rc == -EPERM) {
sk = NULL; sk = NULL;
if (err_hook) if (err_hook)
(*err_hook)(skb); (*err_hook)(skb, rc);
if (rc == -EAGAIN) if (rc == -EAGAIN)
rc = 0; rc = 0;
/* continue to drain the queue */ /* continue to drain the queue */

View File

@@ -39,23 +39,19 @@ ssize_t pm_show_wakelocks(char *buf, bool show_active)
{ {
struct rb_node *node; struct rb_node *node;
struct wakelock *wl; struct wakelock *wl;
char *str = buf; int len = 0;
char *end = buf + PAGE_SIZE;
mutex_lock(&wakelocks_lock); mutex_lock(&wakelocks_lock);
for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) { for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) {
wl = rb_entry(node, struct wakelock, node); wl = rb_entry(node, struct wakelock, node);
if (wl->ws->active == show_active) if (wl->ws->active == show_active)
str += scnprintf(str, end - str, "%s ", wl->name); len += sysfs_emit_at(buf, len, "%s ", wl->name);
} }
if (str > buf) len += sysfs_emit_at(buf, len, "\n");
str--;
str += scnprintf(str, end - str, "\n");
mutex_unlock(&wakelocks_lock); mutex_unlock(&wakelocks_lock);
return (str - buf); return len;
} }
#if CONFIG_PM_WAKELOCKS_LIMIT > 0 #if CONFIG_PM_WAKELOCKS_LIMIT > 0

View File

@@ -5391,6 +5391,11 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
struct hci_ev_le_advertising_info *ev = ptr; struct hci_ev_le_advertising_info *ev = ptr;
s8 rssi; s8 rssi;
if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) {
bt_dev_err(hdev, "Malicious advertising data.");
break;
}
if (ev->length <= HCI_MAX_AD_LENGTH && if (ev->length <= HCI_MAX_AD_LENGTH &&
ev->data + ev->length <= skb_tail_pointer(skb)) { ev->data + ev->length <= skb_tail_pointer(skb)) {
rssi = ev->data[ev->length]; rssi = ev->data[ev->length];
@@ -5402,11 +5407,6 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
} }
ptr += sizeof(*ev) + ev->length + 1; ptr += sizeof(*ev) + ev->length + 1;
if (ptr > (void *) skb_tail_pointer(skb) - sizeof(*ev)) {
bt_dev_err(hdev, "Malicious advertising data. Stopping processing");
break;
}
} }
hci_dev_unlock(hdev); hci_dev_unlock(hdev);

View File

@@ -182,12 +182,23 @@ static const struct seq_operations softnet_seq_ops = {
.show = softnet_seq_show, .show = softnet_seq_show,
}; };
static void *ptype_get_idx(loff_t pos) static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
{ {
struct list_head *ptype_list = NULL;
struct packet_type *pt = NULL; struct packet_type *pt = NULL;
struct net_device *dev;
loff_t i = 0; loff_t i = 0;
int t; int t;
for_each_netdev_rcu(seq_file_net(seq), dev) {
ptype_list = &dev->ptype_all;
list_for_each_entry_rcu(pt, ptype_list, list) {
if (i == pos)
return pt;
++i;
}
}
list_for_each_entry_rcu(pt, &ptype_all, list) { list_for_each_entry_rcu(pt, &ptype_all, list) {
if (i == pos) if (i == pos)
return pt; return pt;
@@ -208,22 +219,40 @@ static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU) __acquires(RCU)
{ {
rcu_read_lock(); rcu_read_lock();
return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN; return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
} }
static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{ {
struct net_device *dev;
struct packet_type *pt; struct packet_type *pt;
struct list_head *nxt; struct list_head *nxt;
int hash; int hash;
++*pos; ++*pos;
if (v == SEQ_START_TOKEN) if (v == SEQ_START_TOKEN)
return ptype_get_idx(0); return ptype_get_idx(seq, 0);
pt = v; pt = v;
nxt = pt->list.next; nxt = pt->list.next;
if (pt->dev) {
if (nxt != &pt->dev->ptype_all)
goto found;
dev = pt->dev;
for_each_netdev_continue_rcu(seq_file_net(seq), dev) {
if (!list_empty(&dev->ptype_all)) {
nxt = dev->ptype_all.next;
goto found;
}
}
nxt = ptype_all.next;
goto ptype_all;
}
if (pt->type == htons(ETH_P_ALL)) { if (pt->type == htons(ETH_P_ALL)) {
ptype_all:
if (nxt != &ptype_all) if (nxt != &ptype_all)
goto found; goto found;
hash = 0; hash = 0;
@@ -252,7 +281,8 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
if (v == SEQ_START_TOKEN) if (v == SEQ_START_TOKEN)
seq_puts(seq, "Type Device Function\n"); seq_puts(seq, "Type Device Function\n");
else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) { else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) &&
(!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) {
if (pt->type == htons(ETH_P_ALL)) if (pt->type == htons(ETH_P_ALL))
seq_puts(seq, "ALL "); seq_puts(seq, "ALL ");
else else

View File

@@ -2942,9 +2942,9 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
{ {
struct net *net = sock_net(skb->sk); struct net *net = sock_net(skb->sk);
const struct rtnl_link_ops *ops; const struct rtnl_link_ops *ops;
const struct rtnl_link_ops *m_ops = NULL; const struct rtnl_link_ops *m_ops;
struct net_device *dev; struct net_device *dev;
struct net_device *master_dev = NULL; struct net_device *master_dev;
struct ifinfomsg *ifm; struct ifinfomsg *ifm;
char kind[MODULE_NAME_LEN]; char kind[MODULE_NAME_LEN];
char ifname[IFNAMSIZ]; char ifname[IFNAMSIZ];
@@ -2979,6 +2979,8 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
dev = NULL; dev = NULL;
} }
master_dev = NULL;
m_ops = NULL;
if (dev) { if (dev) {
master_dev = netdev_master_upper_dev_get(dev); master_dev = netdev_master_upper_dev_get(dev);
if (master_dev) if (master_dev)

View File

@@ -1459,7 +1459,7 @@ static int nl802154_send_key(struct sk_buff *msg, u32 cmd, u32 portid,
hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
if (!hdr) if (!hdr)
return -1; return -ENOBUFS;
if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
goto nla_put_failure; goto nla_put_failure;
@@ -1650,7 +1650,7 @@ static int nl802154_send_device(struct sk_buff *msg, u32 cmd, u32 portid,
hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
if (!hdr) if (!hdr)
return -1; return -ENOBUFS;
if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
goto nla_put_failure; goto nla_put_failure;
@@ -1828,7 +1828,7 @@ static int nl802154_send_devkey(struct sk_buff *msg, u32 cmd, u32 portid,
hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
if (!hdr) if (!hdr)
return -1; return -ENOBUFS;
if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
goto nla_put_failure; goto nla_put_failure;
@@ -2005,7 +2005,7 @@ static int nl802154_send_seclevel(struct sk_buff *msg, u32 cmd, u32 portid,
hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
if (!hdr) if (!hdr)
return -1; return -ENOBUFS;
if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
goto nla_put_failure; goto nla_put_failure;

View File

@@ -160,12 +160,19 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr); iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
iph->saddr = saddr; iph->saddr = saddr;
iph->protocol = sk->sk_protocol; iph->protocol = sk->sk_protocol;
if (ip_dont_fragment(sk, &rt->dst)) { /* Do not bother generating IPID for small packets (eg SYNACK) */
if (skb->len <= IPV4_MIN_MTU || ip_dont_fragment(sk, &rt->dst)) {
iph->frag_off = htons(IP_DF); iph->frag_off = htons(IP_DF);
iph->id = 0; iph->id = 0;
} else { } else {
iph->frag_off = 0; iph->frag_off = 0;
__ip_select_ident(net, iph, 1); /* TCP packets here are SYNACK with fat IPv4/TCP options.
* Avoid using the hashed IP ident generator.
*/
if (sk->sk_protocol == IPPROTO_TCP)
iph->id = (__force __be16)prandom_u32();
else
__ip_select_ident(net, iph, 1);
} }
if (opt && opt->opt.optlen) { if (opt && opt->opt.optlen) {

View File

@@ -225,7 +225,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
continue; continue;
} }
if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif &&
sk->sk_bound_dev_if != inet_sdif(skb))
continue; continue;
sock_hold(sk); sock_hold(sk);

View File

@@ -725,6 +725,7 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
int ret = -EINVAL; int ret = -EINVAL;
int chk_addr_ret; int chk_addr_ret;
lock_sock(sk);
if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in)) if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in))
goto out; goto out;
@@ -744,7 +745,9 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
inet->inet_saddr = 0; /* Use device */ inet->inet_saddr = 0; /* Use device */
sk_dst_reset(sk); sk_dst_reset(sk);
ret = 0; ret = 0;
out: return ret; out:
release_sock(sk);
return ret;
} }
/* /*

View File

@@ -968,6 +968,8 @@ enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb) static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb)
{ {
struct tcp_sock *tp = tcp_sk(sk);
ktime_t expire, now;
u64 len_ns; u64 len_ns;
u32 rate; u32 rate;
@@ -979,12 +981,28 @@ static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb)
len_ns = (u64)skb->len * NSEC_PER_SEC; len_ns = (u64)skb->len * NSEC_PER_SEC;
do_div(len_ns, rate); do_div(len_ns, rate);
hrtimer_start(&tcp_sk(sk)->pacing_timer, now = ktime_get();
ktime_add_ns(ktime_get(), len_ns), /* If hrtimer is already armed, then our caller has not
* used tcp_pacing_check().
*/
if (unlikely(hrtimer_is_queued(&tp->pacing_timer))) {
expire = hrtimer_get_softexpires(&tp->pacing_timer);
if (ktime_after(expire, now))
now = expire;
if (hrtimer_try_to_cancel(&tp->pacing_timer) == 1)
__sock_put(sk);
}
hrtimer_start(&tp->pacing_timer, ktime_add_ns(now, len_ns),
HRTIMER_MODE_ABS_PINNED_SOFT); HRTIMER_MODE_ABS_PINNED_SOFT);
sock_hold(sk); sock_hold(sk);
} }
static bool tcp_pacing_check(const struct sock *sk)
{
return tcp_needs_internal_pacing(sk) &&
hrtimer_is_queued(&tcp_sk(sk)->pacing_timer);
}
static void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb) static void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb)
{ {
skb->skb_mstamp = tp->tcp_mstamp; skb->skb_mstamp = tp->tcp_mstamp;
@@ -2121,6 +2139,9 @@ static int tcp_mtu_probe(struct sock *sk)
if (!tcp_can_coalesce_send_queue_head(sk, probe_size)) if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
return -1; return -1;
if (tcp_pacing_check(sk))
return -1;
/* We're allowed to probe. Build it now. */ /* We're allowed to probe. Build it now. */
nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false); nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
if (!nskb) if (!nskb)
@@ -2194,12 +2215,6 @@ static int tcp_mtu_probe(struct sock *sk)
return -1; return -1;
} }
static bool tcp_pacing_check(const struct sock *sk)
{
return tcp_needs_internal_pacing(sk) &&
hrtimer_is_queued(&tcp_sk(sk)->pacing_timer);
}
/* TCP Small Queues : /* TCP Small Queues :
* Control number of packets in qdisc/devices to two packets / or ~1 ms. * Control number of packets in qdisc/devices to two packets / or ~1 ms.
* (These limits are doubled for retransmits) * (These limits are doubled for retransmits)

View File

@@ -112,7 +112,7 @@ void fib6_update_sernum(struct net *net, struct fib6_info *f6i)
fn = rcu_dereference_protected(f6i->fib6_node, fn = rcu_dereference_protected(f6i->fib6_node,
lockdep_is_held(&f6i->fib6_table->tb6_lock)); lockdep_is_held(&f6i->fib6_table->tb6_lock));
if (fn) if (fn)
fn->fn_sernum = fib6_new_sernum(net); WRITE_ONCE(fn->fn_sernum, fib6_new_sernum(net));
} }
/* /*
@@ -544,12 +544,13 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
spin_unlock_bh(&table->tb6_lock); spin_unlock_bh(&table->tb6_lock);
if (res > 0) { if (res > 0) {
cb->args[4] = 1; cb->args[4] = 1;
cb->args[5] = w->root->fn_sernum; cb->args[5] = READ_ONCE(w->root->fn_sernum);
} }
} else { } else {
if (cb->args[5] != w->root->fn_sernum) { int sernum = READ_ONCE(w->root->fn_sernum);
if (cb->args[5] != sernum) {
/* Begin at the root if the tree changed */ /* Begin at the root if the tree changed */
cb->args[5] = w->root->fn_sernum; cb->args[5] = sernum;
w->state = FWS_INIT; w->state = FWS_INIT;
w->node = w->root; w->node = w->root;
w->skip = w->count; w->skip = w->count;
@@ -1203,7 +1204,7 @@ static void __fib6_update_sernum_upto_root(struct fib6_info *rt,
/* paired with smp_rmb() in rt6_get_cookie_safe() */ /* paired with smp_rmb() in rt6_get_cookie_safe() */
smp_wmb(); smp_wmb();
while (fn) { while (fn) {
fn->fn_sernum = sernum; WRITE_ONCE(fn->fn_sernum, sernum);
fn = rcu_dereference_protected(fn->parent, fn = rcu_dereference_protected(fn->parent,
lockdep_is_held(&rt->fib6_table->tb6_lock)); lockdep_is_held(&rt->fib6_table->tb6_lock));
} }
@@ -1983,8 +1984,8 @@ static int fib6_clean_node(struct fib6_walker *w)
}; };
if (c->sernum != FIB6_NO_SERNUM_CHANGE && if (c->sernum != FIB6_NO_SERNUM_CHANGE &&
w->node->fn_sernum != c->sernum) READ_ONCE(w->node->fn_sernum) != c->sernum)
w->node->fn_sernum = c->sernum; WRITE_ONCE(w->node->fn_sernum, c->sernum);
if (!c->func) { if (!c->func) {
WARN_ON_ONCE(c->sernum == FIB6_NO_SERNUM_CHANGE); WARN_ON_ONCE(c->sernum == FIB6_NO_SERNUM_CHANGE);
@@ -2332,7 +2333,7 @@ static void ipv6_route_seq_setup_walk(struct ipv6_route_iter *iter,
iter->w.state = FWS_INIT; iter->w.state = FWS_INIT;
iter->w.node = iter->w.root; iter->w.node = iter->w.root;
iter->w.args = iter; iter->w.args = iter;
iter->sernum = iter->w.root->fn_sernum; iter->sernum = READ_ONCE(iter->w.root->fn_sernum);
INIT_LIST_HEAD(&iter->w.lh); INIT_LIST_HEAD(&iter->w.lh);
fib6_walker_link(net, &iter->w); fib6_walker_link(net, &iter->w);
} }
@@ -2360,8 +2361,10 @@ static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl,
static void ipv6_route_check_sernum(struct ipv6_route_iter *iter) static void ipv6_route_check_sernum(struct ipv6_route_iter *iter)
{ {
if (iter->sernum != iter->w.root->fn_sernum) { int sernum = READ_ONCE(iter->w.root->fn_sernum);
iter->sernum = iter->w.root->fn_sernum;
if (iter->sernum != sernum) {
iter->sernum = sernum;
iter->w.state = FWS_INIT; iter->w.state = FWS_INIT;
iter->w.node = iter->w.root; iter->w.node = iter->w.root;
WARN_ON(iter->w.skip); WARN_ON(iter->w.skip);

View File

@@ -1005,14 +1005,14 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
if (unlikely(!ipv6_chk_addr_and_flags(net, laddr, ldev, false, if (unlikely(!ipv6_chk_addr_and_flags(net, laddr, ldev, false,
0, IFA_F_TENTATIVE))) 0, IFA_F_TENTATIVE)))
pr_warn("%s xmit: Local address not yet configured!\n", pr_warn_ratelimited("%s xmit: Local address not yet configured!\n",
p->name); p->name);
else if (!(p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) && else if (!(p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) &&
!ipv6_addr_is_multicast(raddr) && !ipv6_addr_is_multicast(raddr) &&
unlikely(ipv6_chk_addr_and_flags(net, raddr, ldev, unlikely(ipv6_chk_addr_and_flags(net, raddr, ldev,
true, 0, IFA_F_TENTATIVE))) true, 0, IFA_F_TENTATIVE)))
pr_warn("%s xmit: Routing loop! Remote address found on this node!\n", pr_warn_ratelimited("%s xmit: Routing loop! Remote address found on this node!\n",
p->name); p->name);
else else
ret = 1; ret = 1;
rcu_read_unlock(); rcu_read_unlock();

View File

@@ -2320,7 +2320,7 @@ static void ip6_link_failure(struct sk_buff *skb)
if (from) { if (from) {
fn = rcu_dereference(from->fib6_node); fn = rcu_dereference(from->fib6_node);
if (fn && (rt->rt6i_flags & RTF_DEFAULT)) if (fn && (rt->rt6i_flags & RTF_DEFAULT))
fn->fn_sernum = -1; WRITE_ONCE(fn->fn_sernum, -1);
} }
} }
rcu_read_unlock(); rcu_read_unlock();

View File

@@ -38,12 +38,12 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
struct nf_conntrack_tuple *tuple, struct nf_conntrack_tuple *tuple,
const struct nf_nat_range2 *range, const struct nf_nat_range2 *range,
enum nf_nat_manip_type maniptype, enum nf_nat_manip_type maniptype,
const struct nf_conn *ct, const struct nf_conn *ct)
u16 *rover)
{ {
unsigned int range_size, min, max, i; unsigned int range_size, min, max, i, attempts;
__be16 *portptr; __be16 *portptr;
u_int16_t off; u16 off;
static const unsigned int max_attempts = 128;
if (maniptype == NF_NAT_MANIP_SRC) if (maniptype == NF_NAT_MANIP_SRC)
portptr = &tuple->src.u.all; portptr = &tuple->src.u.all;
@@ -86,18 +86,31 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
} else if (range->flags & NF_NAT_RANGE_PROTO_OFFSET) { } else if (range->flags & NF_NAT_RANGE_PROTO_OFFSET) {
off = (ntohs(*portptr) - ntohs(range->base_proto.all)); off = (ntohs(*portptr) - ntohs(range->base_proto.all));
} else { } else {
off = *rover; off = prandom_u32();
} }
for (i = 0; ; ++off) { attempts = range_size;
if (attempts > max_attempts)
attempts = max_attempts;
/* We are in softirq; doing a search of the entire range risks
* soft lockup when all tuples are already used.
*
* If we can't find any free port from first offset, pick a new
* one and try again, with ever smaller search window.
*/
another_round:
for (i = 0; i < attempts; i++, off++) {
*portptr = htons(min + off % range_size); *portptr = htons(min + off % range_size);
if (++i != range_size && nf_nat_used_tuple(tuple, ct)) if (!nf_nat_used_tuple(tuple, ct))
continue; return;
if (!(range->flags & (NF_NAT_RANGE_PROTO_RANDOM_ALL|
NF_NAT_RANGE_PROTO_OFFSET)))
*rover = off;
return;
} }
if (attempts >= range_size || attempts < 16)
return;
attempts /= 2;
off = prandom_u32();
goto another_round;
} }
EXPORT_SYMBOL_GPL(nf_nat_l4proto_unique_tuple); EXPORT_SYMBOL_GPL(nf_nat_l4proto_unique_tuple);

View File

@@ -18,8 +18,6 @@
#include <net/netfilter/nf_nat_l3proto.h> #include <net/netfilter/nf_nat_l3proto.h>
#include <net/netfilter/nf_nat_l4proto.h> #include <net/netfilter/nf_nat_l4proto.h>
static u_int16_t dccp_port_rover;
static void static void
dccp_unique_tuple(const struct nf_nat_l3proto *l3proto, dccp_unique_tuple(const struct nf_nat_l3proto *l3proto,
struct nf_conntrack_tuple *tuple, struct nf_conntrack_tuple *tuple,
@@ -27,8 +25,7 @@ dccp_unique_tuple(const struct nf_nat_l3proto *l3proto,
enum nf_nat_manip_type maniptype, enum nf_nat_manip_type maniptype,
const struct nf_conn *ct) const struct nf_conn *ct)
{ {
nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct, nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct);
&dccp_port_rover);
} }
static bool static bool

View File

@@ -12,8 +12,6 @@
#include <net/netfilter/nf_nat_l4proto.h> #include <net/netfilter/nf_nat_l4proto.h>
static u_int16_t nf_sctp_port_rover;
static void static void
sctp_unique_tuple(const struct nf_nat_l3proto *l3proto, sctp_unique_tuple(const struct nf_nat_l3proto *l3proto,
struct nf_conntrack_tuple *tuple, struct nf_conntrack_tuple *tuple,
@@ -21,8 +19,7 @@ sctp_unique_tuple(const struct nf_nat_l3proto *l3proto,
enum nf_nat_manip_type maniptype, enum nf_nat_manip_type maniptype,
const struct nf_conn *ct) const struct nf_conn *ct)
{ {
nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct, nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct);
&nf_sctp_port_rover);
} }
static bool static bool

View File

@@ -18,8 +18,6 @@
#include <net/netfilter/nf_nat_l4proto.h> #include <net/netfilter/nf_nat_l4proto.h>
#include <net/netfilter/nf_nat_core.h> #include <net/netfilter/nf_nat_core.h>
static u16 tcp_port_rover;
static void static void
tcp_unique_tuple(const struct nf_nat_l3proto *l3proto, tcp_unique_tuple(const struct nf_nat_l3proto *l3proto,
struct nf_conntrack_tuple *tuple, struct nf_conntrack_tuple *tuple,
@@ -27,8 +25,7 @@ tcp_unique_tuple(const struct nf_nat_l3proto *l3proto,
enum nf_nat_manip_type maniptype, enum nf_nat_manip_type maniptype,
const struct nf_conn *ct) const struct nf_conn *ct)
{ {
nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct, nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct);
&tcp_port_rover);
} }
static bool static bool

View File

@@ -17,8 +17,6 @@
#include <net/netfilter/nf_nat_l3proto.h> #include <net/netfilter/nf_nat_l3proto.h>
#include <net/netfilter/nf_nat_l4proto.h> #include <net/netfilter/nf_nat_l4proto.h>
static u16 udp_port_rover;
static void static void
udp_unique_tuple(const struct nf_nat_l3proto *l3proto, udp_unique_tuple(const struct nf_nat_l3proto *l3proto,
struct nf_conntrack_tuple *tuple, struct nf_conntrack_tuple *tuple,
@@ -26,8 +24,7 @@ udp_unique_tuple(const struct nf_nat_l3proto *l3proto,
enum nf_nat_manip_type maniptype, enum nf_nat_manip_type maniptype,
const struct nf_conn *ct) const struct nf_conn *ct)
{ {
nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct, nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct);
&udp_port_rover);
} }
static void static void
@@ -78,8 +75,6 @@ static bool udp_manip_pkt(struct sk_buff *skb,
} }
#ifdef CONFIG_NF_NAT_PROTO_UDPLITE #ifdef CONFIG_NF_NAT_PROTO_UDPLITE
static u16 udplite_port_rover;
static bool udplite_manip_pkt(struct sk_buff *skb, static bool udplite_manip_pkt(struct sk_buff *skb,
const struct nf_nat_l3proto *l3proto, const struct nf_nat_l3proto *l3proto,
unsigned int iphdroff, unsigned int hdroff, unsigned int iphdroff, unsigned int hdroff,
@@ -103,8 +98,7 @@ udplite_unique_tuple(const struct nf_nat_l3proto *l3proto,
enum nf_nat_manip_type maniptype, enum nf_nat_manip_type maniptype,
const struct nf_conn *ct) const struct nf_conn *ct)
{ {
nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct, nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct);
&udplite_port_rover);
} }
const struct nf_nat_l4proto nf_nat_l4proto_udplite = { const struct nf_nat_l4proto nf_nat_l4proto_udplite = {

View File

@@ -194,6 +194,9 @@ static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
struct sk_buff *skb, struct sk_buff *skb,
unsigned int *l4csum_offset) unsigned int *l4csum_offset)
{ {
if (pkt->xt.fragoff)
return -1;
switch (pkt->tprot) { switch (pkt->tprot) {
case IPPROTO_TCP: case IPPROTO_TCP:
*l4csum_offset = offsetof(struct tcphdr, check); *l4csum_offset = offsetof(struct tcphdr, check);

View File

@@ -1716,6 +1716,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
match->prot_hook.dev = po->prot_hook.dev; match->prot_hook.dev = po->prot_hook.dev;
match->prot_hook.func = packet_rcv_fanout; match->prot_hook.func = packet_rcv_fanout;
match->prot_hook.af_packet_priv = match; match->prot_hook.af_packet_priv = match;
match->prot_hook.af_packet_net = read_pnet(&match->net);
match->prot_hook.id_match = match_fanout_group; match->prot_hook.id_match = match_fanout_group;
list_add(&match->list, &fanout_list); list_add(&match->list, &fanout_list);
} }
@@ -1729,7 +1730,10 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
err = -ENOSPC; err = -ENOSPC;
if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) { if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
__dev_remove_pack(&po->prot_hook); __dev_remove_pack(&po->prot_hook);
po->fanout = match;
/* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */
WRITE_ONCE(po->fanout, match);
po->rollover = rollover; po->rollover = rollover;
rollover = NULL; rollover = NULL;
refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1); refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
@@ -3294,6 +3298,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
po->prot_hook.func = packet_rcv_spkt; po->prot_hook.func = packet_rcv_spkt;
po->prot_hook.af_packet_priv = sk; po->prot_hook.af_packet_priv = sk;
po->prot_hook.af_packet_net = sock_net(sk);
if (proto) { if (proto) {
po->prot_hook.type = proto; po->prot_hook.type = proto;
@@ -3875,7 +3880,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
} }
case PACKET_FANOUT_DATA: case PACKET_FANOUT_DATA:
{ {
if (!po->fanout) /* Paired with the WRITE_ONCE() in fanout_add() */
if (!READ_ONCE(po->fanout))
return -EINVAL; return -EINVAL;
return fanout_set_data(po, optval, optlen); return fanout_set_data(po, optval, optlen);

View File

@@ -2106,6 +2106,7 @@ static void alc1220_fixup_gb_x570(struct hda_codec *codec,
{ {
static const hda_nid_t conn1[] = { 0x0c }; static const hda_nid_t conn1[] = { 0x0c };
static const struct coef_fw gb_x570_coefs[] = { static const struct coef_fw gb_x570_coefs[] = {
WRITE_COEF(0x07, 0x03c0),
WRITE_COEF(0x1a, 0x01c1), WRITE_COEF(0x1a, 0x01c1),
WRITE_COEF(0x1b, 0x0202), WRITE_COEF(0x1b, 0x0202),
WRITE_COEF(0x43, 0x3005), WRITE_COEF(0x43, 0x3005),
@@ -2532,7 +2533,8 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_GB_X570), SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_GB_X570),
SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_GB_X570),
SND_PCI_QUIRK(0x1458, 0xa0d5, "Gigabyte X570S Aorus Master", ALC1220_FIXUP_GB_X570),
SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x1229, "MSI-GP73", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1229, "MSI-GP73", ALC1220_FIXUP_CLEVO_P950),
@@ -2607,6 +2609,7 @@ static const struct hda_model_fixup alc882_fixup_models[] = {
{.id = ALC882_FIXUP_NO_PRIMARY_HP, .name = "no-primary-hp"}, {.id = ALC882_FIXUP_NO_PRIMARY_HP, .name = "no-primary-hp"},
{.id = ALC887_FIXUP_ASUS_BASS, .name = "asus-bass"}, {.id = ALC887_FIXUP_ASUS_BASS, .name = "asus-bass"},
{.id = ALC1220_FIXUP_GB_DUAL_CODECS, .name = "dual-codecs"}, {.id = ALC1220_FIXUP_GB_DUAL_CODECS, .name = "dual-codecs"},
{.id = ALC1220_FIXUP_GB_X570, .name = "gb-x570"},
{.id = ALC1220_FIXUP_CLEVO_P950, .name = "clevo-p950"}, {.id = ALC1220_FIXUP_CLEVO_P950, .name = "clevo-p950"},
{} {}
}; };

View File

@@ -1541,6 +1541,8 @@ static int cpcap_codec_probe(struct platform_device *pdev)
{ {
struct device_node *codec_node = struct device_node *codec_node =
of_get_child_by_name(pdev->dev.parent->of_node, "audio-codec"); of_get_child_by_name(pdev->dev.parent->of_node, "audio-codec");
if (!codec_node)
return -ENODEV;
pdev->dev.of_node = codec_node; pdev->dev.of_node = codec_node;

View File

@@ -64,7 +64,8 @@ static int speaker_gain_control_put(struct snd_kcontrol *kcontrol,
struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol); struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol);
struct max9759 *priv = snd_soc_component_get_drvdata(c); struct max9759 *priv = snd_soc_component_get_drvdata(c);
if (ucontrol->value.integer.value[0] > 3) if (ucontrol->value.integer.value[0] < 0 ||
ucontrol->value.integer.value[0] > 3)
return -EINVAL; return -EINVAL;
priv->gain = ucontrol->value.integer.value[0]; priv->gain = ucontrol->value.integer.value[0];

View File

@@ -90,16 +90,21 @@ static int pcm030_fabric_probe(struct platform_device *op)
dev_err(&op->dev, "platform_device_alloc() failed\n"); dev_err(&op->dev, "platform_device_alloc() failed\n");
ret = platform_device_add(pdata->codec_device); ret = platform_device_add(pdata->codec_device);
if (ret) if (ret) {
dev_err(&op->dev, "platform_device_add() failed: %d\n", ret); dev_err(&op->dev, "platform_device_add() failed: %d\n", ret);
platform_device_put(pdata->codec_device);
}
ret = snd_soc_register_card(card); ret = snd_soc_register_card(card);
if (ret) if (ret) {
dev_err(&op->dev, "snd_soc_register_card() failed: %d\n", ret); dev_err(&op->dev, "snd_soc_register_card() failed: %d\n", ret);
platform_device_del(pdata->codec_device);
platform_device_put(pdata->codec_device);
}
platform_set_drvdata(op, pdata); platform_set_drvdata(op, pdata);
return ret; return ret;
} }
static int pcm030_fabric_remove(struct platform_device *op) static int pcm030_fabric_remove(struct platform_device *op)

View File

@@ -322,13 +322,27 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
if (sign_bit) if (sign_bit)
mask = BIT(sign_bit + 1) - 1; mask = BIT(sign_bit + 1) - 1;
val = ((ucontrol->value.integer.value[0] + min) & mask); val = ucontrol->value.integer.value[0];
if (mc->platform_max && val > mc->platform_max)
return -EINVAL;
if (val > max - min)
return -EINVAL;
if (val < 0)
return -EINVAL;
val = (val + min) & mask;
if (invert) if (invert)
val = max - val; val = max - val;
val_mask = mask << shift; val_mask = mask << shift;
val = val << shift; val = val << shift;
if (snd_soc_volsw_is_stereo(mc)) { if (snd_soc_volsw_is_stereo(mc)) {
val2 = ((ucontrol->value.integer.value[1] + min) & mask); val2 = ucontrol->value.integer.value[1];
if (mc->platform_max && val2 > mc->platform_max)
return -EINVAL;
if (val2 > max - min)
return -EINVAL;
if (val2 < 0)
return -EINVAL;
val2 = (val2 + min) & mask;
if (invert) if (invert)
val2 = max - val2; val2 = max - val2;
if (reg == reg2) { if (reg == reg2) {
@@ -422,8 +436,15 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
int err = 0; int err = 0;
unsigned int val, val_mask, val2 = 0; unsigned int val, val_mask, val2 = 0;
val = ucontrol->value.integer.value[0];
if (mc->platform_max && val > mc->platform_max)
return -EINVAL;
if (val > max - min)
return -EINVAL;
if (val < 0)
return -EINVAL;
val_mask = mask << shift; val_mask = mask << shift;
val = (ucontrol->value.integer.value[0] + min) & mask; val = (val + min) & mask;
val = val << shift; val = val << shift;
err = snd_soc_component_update_bits(component, reg, val_mask, val); err = snd_soc_component_update_bits(component, reg, val_mask, val);
@@ -889,6 +910,8 @@ int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol,
unsigned int i, regval, regmask; unsigned int i, regval, regmask;
int err; int err;
if (val < mc->min || val > mc->max)
return -EINVAL;
if (invert) if (invert)
val = max - val; val = max - val;
val &= mask; val &= mask;

View File

@@ -11,7 +11,7 @@ all:
@for DIR in $(SUBDIRS); do \ @for DIR in $(SUBDIRS); do \
BUILD_TARGET=$(OUTPUT)/$$DIR; \ BUILD_TARGET=$(OUTPUT)/$$DIR; \
mkdir $$BUILD_TARGET -p; \ mkdir $$BUILD_TARGET -p; \
make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
if [ -e $$DIR/$(TEST_PROGS) ]; then \ if [ -e $$DIR/$(TEST_PROGS) ]; then \
rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/; \ rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/; \
fi \ fi \
@@ -32,6 +32,6 @@ override define CLEAN
@for DIR in $(SUBDIRS); do \ @for DIR in $(SUBDIRS); do \
BUILD_TARGET=$(OUTPUT)/$$DIR; \ BUILD_TARGET=$(OUTPUT)/$$DIR; \
mkdir $$BUILD_TARGET -p; \ mkdir $$BUILD_TARGET -p; \
make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
done done
endef endef