Merge android-4.19.26 (c97d2b5) into msm-4.19

* refs/heads/tmp-c97d2b5:
  Linux 4.19.26
  net: phylink: avoid resolving link state too early
  pinctrl: max77620: Use define directive for max77620_pinconf_param values
  udlfb: handle unplug properly
  netfilter: ipt_CLUSTERIP: fix sleep-in-atomic bug in clusterip_config_entry_put()
  netfilter: nfnetlink_osf: add missing fmatch check
  netfilter: ipv6: Don't preserve original oif for loopback address
  netfilter: nft_compat: use-after-free when deleting targets
  netfilter: nf_tables: fix flush after rule deletion in the same batch
  Revert "bridge: do not add port to router list when receives query with source 0.0.0.0"
  staging: erofs: unzip_vle_lz4.c,utils.c: rectify BUG_ONs
  staging: erofs: unzip_{pagevec.h,vle.c}: rectify BUG_ONs
  staging: erofs: {dir,inode,super}.c: rectify BUG_ONs
  staging: erofs: add a full barrier in erofs_workgroup_unfreeze
  staging: erofs: fix `erofs_workgroup_{try_to_freeze, unfreeze}'
  staging: erofs: atomic_cond_read_relaxed on ref-locked workgroup
  staging: erofs: remove the redundant d_rehash() for the root dentry
  staging: erofs: drop multiref support temporarily
  staging: erofs: replace BUG_ON with DBG_BUGON in data.c
  staging: erofs: complete error handing of z_erofs_do_read_page
  staging: erofs: fix a bug when appling cache strategy
  net: avoid false positives in untrusted gso validation
  net: validate untrusted gso packets without csum offload
  kvm: x86: Return LA57 feature based on hardware capability
  mac80211: allocate tailroom for forwarded mesh packets
  drm/amd/display: Fix MST reboot/poweroff sequence
  drm/i915/fbdev: Actually configure untiled displays
  gpu: drm: radeon: Set DPM_FLAG_NEVER_SKIP when enabling PM-runtime
  drm/amdgpu: Set DPM_FLAG_NEVER_SKIP when enabling PM-runtime
  ARC: define ARCH_SLAB_MINALIGN = 8
  ARC: U-boot: check arguments paranoidly
  ARCv2: Enable unaligned access in early ASM code
  parisc: Fix ptrace syscall number modification
  KEYS: always initialize keyring_index_key::desc_len
  KEYS: user: Align the payload buffer
  RDMA/srp: Rework SCSI device reset handling
  net/mlx5e: XDP, fix redirect resources availability check
  net_sched: fix two more memory leaks in cls_tcindex
  net_sched: fix a memory leak in cls_tcindex
  net_sched: fix a race condition in tcindex_destroy()
  sit: check if IPv6 enabled before calling ip6_err_gen_icmpv6_unreach()
  geneve: should not call rt6_lookup() when ipv6 was disabled
  net: socket: make bond ioctls go through compat_ifreq_ioctl()
  net: socket: fix SIOCGIFNAME in compat
  Revert "kill dev_ifsioc()"
  Revert "socket: fix struct ifreq size in compat ioctl"
  team: avoid complex list operations in team_nl_cmd_options_set()
  sctp: set stream ext to NULL after freeing it in sctp_stream_outq_migrate
  sctp: call gso_reset_checksum when computing checksum in sctp_gso_segment
  net: sfp: do not probe SFP module before we're attached
  net/packet: fix 4gb buffer limit due to overflow check
  net/mlx5e: Don't overwrite pedit action when multiple pedit used
  net/mlx4_en: Force CHECKSUM_NONE for short ethernet frames
  net: ena: fix race between link up and device initalization
  ipv6: propagate genlmsg_reply return code
  inet_diag: fix reporting cgroup classid and fallback to priority
  batman-adv: fix uninit-value in batadv_interface_tx()
  isdn: avm: Fix string plus integer warning from Clang
  net/mlx5e: Fix wrong (zero) TX drop counter indication for representor
  selftests: forwarding: Add a test case for externally learned FDB entries
  mlxsw: spectrum_switchdev: Do not treat static FDB entries as sticky
  net: bridge: Mark FDB entries that were added by user as such
  mlxsw: pci: Return error on PCI reset timeout
  dpaa_eth: NETIF_F_LLTX requires to do our own update of trans_start
  bpf: bpf_setsockopt: reset sock dst on SO_MARK changes
  leds: lp5523: fix a missing check of return value of lp55xx_read
  hwmon: (tmp421) Correct the misspelling of the tmp442 compatible attribute in OF device ID table
  atm: he: fix sign-extension overflow on large shift
  selftests/bpf: retry tests that expect build-id
  bpf: zero out build_id for BPF_STACK_BUILD_ID_IP
  bpf: don't assume build-id length is always 20 bytes
  afs: Fix key refcounting in file locking code
  afs: Don't set vnode->cb_s_break in afs_validate()
  selftests: tc-testing: fix parsing of ife type
  selftests: tc-testing: fix tunnel_key failure if dst_port is unspecified
  selftests: tc-testing: drop test on missing tunnel key id
  pvcalls-front: fix potential null dereference
  drm/sun4i: backend: add missing of_node_puts
  vhost: return EINVAL if iovecs size does not match the message size
  drm/amd/display: fix PME notification not working in RV desktop
  drm/amdkfd: Don't assign dGPUs to APU topology devices
  drm/meson: add missing of_node_put
  always clear the X2APIC_ENABLE bit for PV guest
  netfilter: nft_flow_offload: fix checking method of conntrack helper
  scsi: cxgb4i: add wait_for_completion()
  scsi: ufs: Fix geometry descriptor size
  scsi: qedi: Add ep_state for login completion on un-reachable targets
  scsi: ufs: Fix system suspend status
  scsi: tcmu: avoid cmd/qfull timers updated whenever a new cmd comes
  isdn: i4l: isdn_tty: Fix some concurrency double-free bugs
  net: stmmac: Prevent RX starvation in stmmac_napi_poll()
  net: stmmac: Fix the logic of checking if RX Watchdog must be enabled
  net: stmmac: Check if CBS is supported before configuring
  net: stmmac: dwxgmac2: Only clear interrupts that are active
  net: stmmac: Fix PCI module removal leak
  acpi/nfit: Fix race accessing memdev in nfit_get_smbios_id()
  powerpc/8xx: fix setting of pagetable for Abatron BDI debug tool.
  RDMA/mthca: Clear QP objects during their allocation
  netfilter: nft_flow_offload: fix interaction with vrf slave device
  bpf: fix panic in stack_map_get_build_id() on i386 and arm32
  pvcalls-front: Avoid get_free_pages(GFP_KERNEL) under spinlock
  bpf: correctly set initial window on active Fast Open sender
  netfilter: nft_flow_offload: Fix reverse route lookup
  MIPS: jazz: fix 64bit build
  include/linux/compiler*.h: fix OPTIMIZER_HIDE_VAR
  scsi: isci: initialize shost fully before calling scsi_add_host()
  scsi: qla4xxx: check return code of qla4xxx_copy_from_fwddb_param
  netfilter: nf_tables: fix leaking object reference count
  selftests: forwarding: Add a test for VLAN deletion
  mlxsw: spectrum_acl: Add cleanup after C-TCAM update error condition
  xprtrdma: Double free in rpcrdma_sendctxs_create()
  MIPS: ath79: Enable OF serial ports in the default config
  net/mlx4: Get rid of page operation after dma_alloc_coherent
  watchdog: mt7621_wdt/rt2880_wdt: Fix compilation problem
  selftests/bpf: Test [::] -> [::1] rewrite in sys_sendmsg in test_sock_addr
  bpf: Fix [::] -> [::1] rewrite in sys_sendmsg
  net: hns: Fix use after free identified by SLUB debug
  qed: Fix qed_ll2_post_rx_buffer_notify_fw() by adding a write memory barrier
  qed: Fix qed_chain_set_prod() for PBL chains with non power of 2 page count
  xen/pvcalls: remove set but not used variable 'intf'
  mfd: mc13xxx: Fix a missing check of a register-read failure
  mfd: tps65218: Use devm_regmap_add_irq_chip and clean up error path in probe()
  mfd: cros_ec_dev: Add missing mfd_remove_devices() call in remove
  mfd: axp20x: Add supported cells for AXP803
  mfd: axp20x: Re-align MFD cell entries
  mfd: axp20x: Add AC power supply cell for AXP813
  mfd: wm5110: Add missing ASRC rate register
  mfd: qcom_rpm: write fw_version to CTRL_REG
  mfd: bd9571mwv: Add volatile register to make DVFS work
  mfd: ab8500-core: Return zero in get_register_interruptible()
  mfd: mt6397: Do not call irq_domain_remove if PMIC unsupported
  mfd: db8500-prcmu: Fix some section annotations
  mfd: twl-core: Fix section annotations on {,un}protect_pm_master
  pvcalls-back: set -ENOTCONN in pvcalls_conn_back_read
  pvcalls-front: properly allocate sk
  pvcalls-front: don't try to free unallocated rings
  pvcalls-front: read all data before closing the connection
  mfd: ti_am335x_tscadc: Use PLATFORM_DEVID_AUTO while registering mfd cells
  backlight: pwm_bl: Fix devicetree parsing with auto-generated brightness tables
  KEYS: allow reaching the keys quotas exactly
  ALSA: hda/realtek: Disable PC beep in passthrough on alc285
  ALSA: hda/realtek - Headset microphone and internal speaker support for System76 oryp5
  proc, oom: do not report alien mms when setting oom_score_adj
  numa: change get_mempolicy() to use nr_node_ids instead of MAX_NUMNODES
  ceph: avoid repeatedly adding inode to mdsc->snap_flush_list
  libceph: handle an empty authorize reply
  mac80211: Free mpath object when rhashtable insertion fails
  mac80211: Use linked list instead of rhashtable walk for mesh tables
  mac80211: Restore vif beacon interval if start ap fails
  gpio: pxa: avoid attempting to set pin direction via pinctrl on MMP2
  gpio: MT7621: use a per instance irq_chip structure
  MIPS: eBPF: Always return sign extended 32b values
  tracing: Fix number of entries in trace header
  ARM: 8834/1: Fix: kprobes: optimized kprobes illegal instruction

Change-Id: Ie585d8274f881ac87155e9deda341c43cd8923b4
Signed-off-by: Ivaylo Georgiev <irgeorgiev@codeaurora.org>
This commit is contained in:
Ivaylo Georgiev
2019-03-13 10:41:19 -07:00
153 changed files with 1488 additions and 1006 deletions

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 25
SUBLEVEL = 26
EXTRAVERSION =
NAME = "People's Front"

View File

@@ -52,6 +52,17 @@
#define cache_line_size() SMP_CACHE_BYTES
#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
/*
* Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses
* ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit
* alignment for any atomic64_t embedded in buffer.
* Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed
* value of 4 (and not 8) in ARC ABI.
*/
#if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC)
#define ARCH_SLAB_MINALIGN 8
#endif
extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
extern void read_decode_cache_bcr(void);

View File

@@ -17,6 +17,7 @@
#include <asm/entry.h>
#include <asm/arcregs.h>
#include <asm/cache.h>
#include <asm/irqflags.h>
.macro CPU_EARLY_SETUP
@@ -47,6 +48,15 @@
sr r5, [ARC_REG_DC_CTRL]
1:
#ifdef CONFIG_ISA_ARCV2
; Unaligned access is disabled at reset, so re-enable early as
; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
; by default
lr r5, [status32]
bset r5, r5, STATUS_AD_BIT
kflag r5
#endif
.endm
.section .init.text, "ax",@progbits
@@ -93,9 +103,9 @@ ENTRY(stext)
#ifdef CONFIG_ARC_UBOOT_SUPPORT
; Uboot - kernel ABI
; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
; r1 = magic number (board identity, unused as of now
; r1 = magic number (always zero as of now)
; r2 = pointer to uboot provided cmdline or external DTB in mem
; These are handled later in setup_arch()
; These are handled later in handle_uboot_args()
st r0, [@uboot_tag]
st r2, [@uboot_arg]
#endif

View File

@@ -449,43 +449,80 @@ void setup_processor(void)
arc_chk_core_config();
}
static inline int is_kernel(unsigned long addr)
static inline bool uboot_arg_invalid(unsigned long addr)
{
if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
return 1;
return 0;
/*
* Check that it is a untranslated address (although MMU is not enabled
* yet, it being a high address ensures this is not by fluke)
*/
if (addr < PAGE_OFFSET)
return true;
/* Check that address doesn't clobber resident kernel image */
return addr >= (unsigned long)_stext && addr <= (unsigned long)_end;
}
#define IGNORE_ARGS "Ignore U-boot args: "
/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */
#define UBOOT_TAG_NONE 0
#define UBOOT_TAG_CMDLINE 1
#define UBOOT_TAG_DTB 2
void __init handle_uboot_args(void)
{
bool use_embedded_dtb = true;
bool append_cmdline = false;
#ifdef CONFIG_ARC_UBOOT_SUPPORT
/* check that we know this tag */
if (uboot_tag != UBOOT_TAG_NONE &&
uboot_tag != UBOOT_TAG_CMDLINE &&
uboot_tag != UBOOT_TAG_DTB) {
pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag);
goto ignore_uboot_args;
}
if (uboot_tag != UBOOT_TAG_NONE &&
uboot_arg_invalid((unsigned long)uboot_arg)) {
pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
goto ignore_uboot_args;
}
/* see if U-boot passed an external Device Tree blob */
if (uboot_tag == UBOOT_TAG_DTB) {
machine_desc = setup_machine_fdt((void *)uboot_arg);
/* external Device Tree blob is invalid - use embedded one */
use_embedded_dtb = !machine_desc;
}
if (uboot_tag == UBOOT_TAG_CMDLINE)
append_cmdline = true;
ignore_uboot_args:
#endif
if (use_embedded_dtb) {
machine_desc = setup_machine_fdt(__dtb_start);
if (!machine_desc)
panic("Embedded DT invalid\n");
}
/*
* NOTE: @boot_command_line is populated by setup_machine_fdt() so this
* append processing can only happen after.
*/
if (append_cmdline) {
/* Ensure a whitespace between the 2 cmdlines */
strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE);
}
}
void __init setup_arch(char **cmdline_p)
{
#ifdef CONFIG_ARC_UBOOT_SUPPORT
/* make sure that uboot passed pointer to cmdline/dtb is valid */
if (uboot_tag && is_kernel((unsigned long)uboot_arg))
panic("Invalid uboot arg\n");
/* See if u-boot passed an external Device Tree blob */
machine_desc = setup_machine_fdt(uboot_arg); /* uboot_tag == 2 */
if (!machine_desc)
#endif
{
/* No, so try the embedded one */
machine_desc = setup_machine_fdt(__dtb_start);
if (!machine_desc)
panic("Embedded DT invalid\n");
/*
* If we are here, it is established that @uboot_arg didn't
* point to DT blob. Instead if u-boot says it is cmdline,
* append to embedded DT cmdline.
* setup_machine_fdt() would have populated @boot_command_line
*/
if (uboot_tag == 1) {
/* Ensure a whitespace between the 2 cmdlines */
strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
strlcat(boot_command_line, uboot_arg,
COMMAND_LINE_SIZE);
}
}
handle_uboot_args();
/* Save unparsed command line copy for /proc/cmdline */
*cmdline_p = boot_command_line;

View File

@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
}
/* Copy arch-dep-instance from template. */
memcpy(code, (unsigned char *)optprobe_template_entry,
memcpy(code, (unsigned long *)&optprobe_template_entry,
TMPL_END_IDX * sizeof(kprobe_opcode_t));
/* Adjust buffer according to instruction. */

View File

@@ -71,6 +71,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_PCI is not set
CONFIG_SERIAL_8250_NR_UARTS=1
CONFIG_SERIAL_8250_RUNTIME_UARTS=1
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_AR933X=y
CONFIG_SERIAL_AR933X_CONSOLE=y
# CONFIG_HW_RANDOM is not set

View File

@@ -74,14 +74,15 @@ static int __init vdma_init(void)
get_order(VDMA_PGTBL_SIZE));
BUG_ON(!pgtbl);
dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl);
pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
/*
* Clear the R4030 translation table
*/
vdma_pgtbl_init();
r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl));
r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
CPHYSADDR((unsigned long)pgtbl));
r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);

View File

@@ -343,12 +343,15 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
const struct bpf_prog *prog = ctx->skf;
int stack_adjust = ctx->stack_size;
int store_offset = stack_adjust - 8;
enum reg_val_type td;
int r0 = MIPS_R_V0;
if (dest_reg == MIPS_R_RA &&
get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
if (dest_reg == MIPS_R_RA) {
/* Don't let zero extended value escape. */
emit_instr(ctx, sll, r0, r0, 0);
td = get_reg_val_type(ctx, prog->len, BPF_REG_0);
if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
emit_instr(ctx, sll, r0, r0, 0);
}
if (ctx->flags & EBPF_SAVE_RA) {
emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);

View File

@@ -308,15 +308,29 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
long do_syscall_trace_enter(struct pt_regs *regs)
{
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs)) {
if (test_thread_flag(TIF_SYSCALL_TRACE)) {
int rc = tracehook_report_syscall_entry(regs);
/*
* Tracing decided this syscall should not happen or the
* debugger stored an invalid system call number. Skip
* the system call and the system call restart handling.
* As tracesys_next does not set %r28 to -ENOSYS
* when %r20 is set to -1, initialize it here.
*/
regs->gr[20] = -1UL;
goto out;
regs->gr[28] = -ENOSYS;
if (rc) {
/*
* A nonzero return code from
* tracehook_report_syscall_entry() tells us
* to prevent the syscall execution. Skip
* the syscall call and the syscall restart handling.
*
* Note that the tracer may also just change
* regs->gr[20] to an invalid syscall number,
* that is handled by tracesys_next.
*/
regs->gr[20] = -1UL;
return -1;
}
}
/* Do the secure computing check after ptrace. */
@@ -340,7 +354,6 @@ long do_syscall_trace_enter(struct pt_regs *regs)
regs->gr[24] & 0xffffffff,
regs->gr[23] & 0xffffffff);
out:
/*
* Sign extend the syscall number to 64bit since it may have been
* modified by a compat ptrace call

View File

@@ -919,11 +919,12 @@ start_here:
/* set up the PTE pointers for the Abatron bdiGDB.
*/
tovirt(r6,r6)
lis r5, abatron_pteptrs@h
ori r5, r5, abatron_pteptrs@l
stw r5, 0xf0(0) /* Must match your Abatron config file */
tophys(r5,r5)
lis r6, swapper_pg_dir@h
ori r6, r6, swapper_pg_dir@l
stw r6, 0(r5)
/* Now turn on the MMU for real! */

View File

@@ -337,6 +337,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
unsigned f_la57 = 0;
/* cpuid 1.edx */
const u32 kvm_cpuid_1_edx_x86_features =
@@ -491,7 +492,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
// TSC_ADJUST is emulated
entry->ebx |= F(TSC_ADJUST);
entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
f_la57 = entry->ecx & F(LA57);
cpuid_mask(&entry->ecx, CPUID_7_ECX);
/* Set LA57 based on hardware capability. */
entry->ecx |= f_la57;
entry->ecx |= f_umip;
/* PKU is not yet implemented for shadow paging. */
if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))

View File

@@ -899,10 +899,7 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err)
val = native_read_msr_safe(msr, err);
switch (msr) {
case MSR_IA32_APICBASE:
#ifdef CONFIG_X86_X2APIC
if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
#endif
val &= ~X2APIC_ENABLE;
val &= ~X2APIC_ENABLE;
break;
}
return val;

View File

@@ -719,6 +719,7 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
struct acpi_nfit_memory_map *memdev;
struct acpi_nfit_desc *acpi_desc;
struct nfit_mem *nfit_mem;
u16 physical_id;
mutex_lock(&acpi_desc_lock);
list_for_each_entry(acpi_desc, &acpi_descs, list) {
@@ -726,10 +727,11 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
memdev = __to_nfit_memdev(nfit_mem);
if (memdev->device_handle == device_handle) {
*flags = memdev->flags;
physical_id = memdev->physical_id;
mutex_unlock(&acpi_desc->init_mutex);
mutex_unlock(&acpi_desc_lock);
*flags = memdev->flags;
return memdev->physical_id;
return physical_id;
}
}
mutex_unlock(&acpi_desc->init_mutex);

View File

@@ -717,7 +717,7 @@ static int he_init_cs_block_rcm(struct he_dev *he_dev)
instead of '/ 512', use '>> 9' to prevent a call
to divdu3 on x86 platforms
*/
rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9;
if (rate_cps < 10)
rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */

View File

@@ -30,6 +30,7 @@
#define GPIO_REG_EDGE 0xA0
struct mtk_gc {
struct irq_chip irq_chip;
struct gpio_chip chip;
spinlock_t lock;
int bank;
@@ -189,13 +190,6 @@ mediatek_gpio_irq_type(struct irq_data *d, unsigned int type)
return 0;
}
static struct irq_chip mediatek_gpio_irq_chip = {
.irq_unmask = mediatek_gpio_irq_unmask,
.irq_mask = mediatek_gpio_irq_mask,
.irq_mask_ack = mediatek_gpio_irq_mask,
.irq_set_type = mediatek_gpio_irq_type,
};
static int
mediatek_gpio_xlate(struct gpio_chip *chip,
const struct of_phandle_args *spec, u32 *flags)
@@ -254,6 +248,13 @@ mediatek_gpio_bank_probe(struct device *dev,
return ret;
}
rg->irq_chip.name = dev_name(dev);
rg->irq_chip.parent_device = dev;
rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
rg->irq_chip.irq_set_type = mediatek_gpio_irq_type;
if (mtk->gpio_irq) {
/*
* Manually request the irq here instead of passing
@@ -270,14 +271,14 @@ mediatek_gpio_bank_probe(struct device *dev,
return ret;
}
ret = gpiochip_irqchip_add(&rg->chip, &mediatek_gpio_irq_chip,
ret = gpiochip_irqchip_add(&rg->chip, &rg->irq_chip,
0, handle_simple_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(dev, "failed to add gpiochip_irqchip\n");
return ret;
}
gpiochip_set_chained_irqchip(&rg->chip, &mediatek_gpio_irq_chip,
gpiochip_set_chained_irqchip(&rg->chip, &rg->irq_chip,
mtk->gpio_irq, NULL);
}
@@ -310,7 +311,6 @@ mediatek_gpio_probe(struct platform_device *pdev)
mtk->gpio_irq = irq_of_parse_and_map(np, 0);
mtk->dev = dev;
platform_set_drvdata(pdev, mtk);
mediatek_gpio_irq_chip.name = dev_name(dev);
for (i = 0; i < MTK_BANK_CNT; i++) {
ret = mediatek_gpio_bank_probe(dev, np, i);

View File

@@ -245,6 +245,7 @@ static bool pxa_gpio_has_pinctrl(void)
{
switch (gpio_type) {
case PXA3XX_GPIO:
case MMP2_GPIO:
return false;
default:

View File

@@ -159,6 +159,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
if (amdgpu_device_is_px(dev)) {
dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);

View File

@@ -1072,8 +1072,6 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
* the GPU device is not already present in the topology device
* list then return NULL. This means a new topology device has to
* be created for this GPU.
* TODO: Rather than assiging @gpu to first topology device withtout
* gpu attached, it will better to have more stringent check.
*/
static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
{
@@ -1081,12 +1079,20 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
struct kfd_topology_device *out_dev = NULL;
down_write(&topology_lock);
list_for_each_entry(dev, &topology_device_list, list)
list_for_each_entry(dev, &topology_device_list, list) {
/* Discrete GPUs need their own topology device list
* entries. Don't assign them to CPU/APU nodes.
*/
if (!gpu->device_info->needs_iommu_device &&
dev->node_props.cpu_cores_count)
continue;
if (!dev->gpu && (dev->node_props.simd_count > 0)) {
dev->gpu = gpu;
out_dev = dev;
break;
}
}
up_write(&topology_lock);
return out_dev;
}

View File

@@ -624,12 +624,13 @@ static int dm_suspend(void *handle)
struct amdgpu_display_manager *dm = &adev->dm;
int ret = 0;
WARN_ON(adev->dm.cached_state);
adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
s3_handle_mst(adev->ddev, true);
amdgpu_dm_irq_suspend(adev);
WARN_ON(adev->dm.cached_state);
adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);

View File

@@ -1000,7 +1000,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
/* un-mute audio */
@@ -1017,6 +1017,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, true);
if (pipe_ctx->stream_res.audio) {
struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
if (option != KEEP_ACQUIRED_RESOURCE ||
!dc->debug.az_endpoint_mute_only) {
/*only disalbe az_endpoint if power down or free*/
@@ -1036,6 +1038,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
pipe_ctx->stream_res.audio = NULL;
}
if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
/* TODO: notify audio driver for if audio modes list changed
* add audio mode list change flag */

View File

@@ -334,8 +334,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
bool *enabled, int width, int height)
{
struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
unsigned long conn_configured, conn_seq, mask;
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
unsigned long conn_configured, conn_seq;
int i, j;
bool *save_enabled;
bool fallback = true, ret = true;
@@ -353,10 +353,9 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
drm_modeset_backoff(&ctx);
memcpy(save_enabled, enabled, count);
mask = GENMASK(count - 1, 0);
conn_seq = GENMASK(count - 1, 0);
conn_configured = 0;
retry:
conn_seq = conn_configured;
for (i = 0; i < count; i++) {
struct drm_fb_helper_connector *fb_conn;
struct drm_connector *connector;
@@ -369,7 +368,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
if (conn_configured & BIT(i))
continue;
if (conn_seq == 0 && !connector->has_tile)
/* First pass, only consider tiled connectors */
if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
continue;
if (connector->status == connector_status_connected)
@@ -473,8 +473,10 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
conn_configured |= BIT(i);
}
if ((conn_configured & mask) != mask && conn_configured != conn_seq)
if (conn_configured != conn_seq) { /* repeat until no more are found */
conn_seq = conn_configured;
goto retry;
}
/*
* If the BIOS didn't enable everything it could, fall back to have the

View File

@@ -368,8 +368,10 @@ static int meson_probe_remote(struct platform_device *pdev,
remote_node = of_graph_get_remote_port_parent(ep);
if (!remote_node ||
remote_node == parent || /* Ignore parent endpoint */
!of_device_is_available(remote_node))
!of_device_is_available(remote_node)) {
of_node_put(remote_node);
continue;
}
count += meson_probe_remote(pdev, match, remote, remote_node);
@@ -388,10 +390,13 @@ static int meson_drv_probe(struct platform_device *pdev)
for_each_endpoint_of_node(np, ep) {
remote = of_graph_get_remote_port_parent(ep);
if (!remote || !of_device_is_available(remote))
if (!remote || !of_device_is_available(remote)) {
of_node_put(remote);
continue;
}
count += meson_probe_remote(pdev, &match, np, remote);
of_node_put(remote);
}
if (count && !match)

View File

@@ -172,6 +172,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
if (radeon_is_px(dev)) {
dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);

View File

@@ -717,17 +717,18 @@ static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv,
remote = of_graph_get_remote_port_parent(ep);
if (!remote)
continue;
of_node_put(remote);
/* does this node match any registered engines? */
list_for_each_entry(frontend, &drv->frontend_list, list) {
if (remote == frontend->node) {
of_node_put(remote);
of_node_put(port);
of_node_put(ep);
return frontend;
}
}
}
of_node_put(port);
return ERR_PTR(-EINVAL);
}

View File

@@ -88,7 +88,7 @@ static const struct of_device_id tmp421_of_match[] = {
.data = (void *)2
},
{
.compatible = "ti,tmp422",
.compatible = "ti,tmp442",
.data = (void *)3
},
{ },

View File

@@ -533,7 +533,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
{
struct mthca_ucontext *context;
qp = kmalloc(sizeof *qp, GFP_KERNEL);
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
@@ -599,7 +599,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
if (pd->uobject)
return ERR_PTR(-EINVAL);
qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);

View File

@@ -2951,7 +2951,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
struct srp_rdma_ch *ch;
int i, j;
u8 status;
shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
@@ -2963,15 +2962,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
if (status)
return FAILED;
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
for (j = 0; j < target->req_ring_size; ++j) {
struct srp_request *req = &ch->req_ring[j];
srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
}
}
return SUCCESS;
}

View File

@@ -423,7 +423,7 @@ void b1_parse_version(avmctrl_info *cinfo)
int i, j;
for (j = 0; j < AVM_MAXVERSION; j++)
cinfo->version[j] = "\0\0" + 1;
cinfo->version[j] = "";
for (i = 0, j = 0;
j < AVM_MAXVERSION && i < cinfo->versionlen;
j++, i += cinfo->versionbuf[i] + 1)

View File

@@ -1456,15 +1456,19 @@ isdn_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
{
modem_info *info = (modem_info *) tty->driver_data;
mutex_lock(&modem_info_mutex);
if (!old_termios)
isdn_tty_change_speed(info);
else {
if (tty->termios.c_cflag == old_termios->c_cflag &&
tty->termios.c_ispeed == old_termios->c_ispeed &&
tty->termios.c_ospeed == old_termios->c_ospeed)
tty->termios.c_ospeed == old_termios->c_ospeed) {
mutex_unlock(&modem_info_mutex);
return;
}
isdn_tty_change_speed(info);
}
mutex_unlock(&modem_info_mutex);
}
/*

View File

@@ -318,7 +318,9 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
/* Let the programs run for couple of ms and check the engine status */
usleep_range(3000, 6000);
lp55xx_read(chip, LP5523_REG_STATUS, &status);
ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
if (ret)
return ret;
status &= LP5523_ENG_STATUS_MASK;
if (status != LP5523_ENG_STATUS_MASK) {

View File

@@ -261,7 +261,7 @@ static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
mutex_unlock(&ab8500->lock);
dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret);
return ret;
return (ret < 0) ? ret : 0;
}
static int ab8500_get_register(struct device *dev, u8 bank,

View File

@@ -640,9 +640,9 @@ static const struct mfd_cell axp221_cells[] = {
static const struct mfd_cell axp223_cells[] = {
{
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp22x_pek_resources),
.resources = axp22x_pek_resources,
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp22x_pek_resources),
.resources = axp22x_pek_resources,
}, {
.name = "axp22x-adc",
.of_compatible = "x-powers,axp221-adc",
@@ -650,7 +650,7 @@ static const struct mfd_cell axp223_cells[] = {
.name = "axp20x-battery-power-supply",
.of_compatible = "x-powers,axp221-battery-power-supply",
}, {
.name = "axp20x-regulator",
.name = "axp20x-regulator",
}, {
.name = "axp20x-ac-power-supply",
.of_compatible = "x-powers,axp221-ac-power-supply",
@@ -666,9 +666,9 @@ static const struct mfd_cell axp223_cells[] = {
static const struct mfd_cell axp152_cells[] = {
{
.name = "axp20x-pek",
.num_resources = ARRAY_SIZE(axp152_pek_resources),
.resources = axp152_pek_resources,
.name = "axp20x-pek",
.num_resources = ARRAY_SIZE(axp152_pek_resources),
.resources = axp152_pek_resources,
},
};
@@ -697,87 +697,101 @@ static const struct resource axp288_charger_resources[] = {
static const struct mfd_cell axp288_cells[] = {
{
.name = "axp288_adc",
.num_resources = ARRAY_SIZE(axp288_adc_resources),
.resources = axp288_adc_resources,
},
{
.name = "axp288_extcon",
.num_resources = ARRAY_SIZE(axp288_extcon_resources),
.resources = axp288_extcon_resources,
},
{
.name = "axp288_charger",
.num_resources = ARRAY_SIZE(axp288_charger_resources),
.resources = axp288_charger_resources,
},
{
.name = "axp288_fuel_gauge",
.num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources),
.resources = axp288_fuel_gauge_resources,
},
{
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp288_power_button_resources),
.resources = axp288_power_button_resources,
},
{
.name = "axp288_pmic_acpi",
.name = "axp288_adc",
.num_resources = ARRAY_SIZE(axp288_adc_resources),
.resources = axp288_adc_resources,
}, {
.name = "axp288_extcon",
.num_resources = ARRAY_SIZE(axp288_extcon_resources),
.resources = axp288_extcon_resources,
}, {
.name = "axp288_charger",
.num_resources = ARRAY_SIZE(axp288_charger_resources),
.resources = axp288_charger_resources,
}, {
.name = "axp288_fuel_gauge",
.num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources),
.resources = axp288_fuel_gauge_resources,
}, {
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp288_power_button_resources),
.resources = axp288_power_button_resources,
}, {
.name = "axp288_pmic_acpi",
},
};
static const struct mfd_cell axp803_cells[] = {
{
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp803_pek_resources),
.resources = axp803_pek_resources,
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp803_pek_resources),
.resources = axp803_pek_resources,
}, {
.name = "axp20x-gpio",
.of_compatible = "x-powers,axp813-gpio",
}, {
.name = "axp813-adc",
.of_compatible = "x-powers,axp813-adc",
}, {
.name = "axp20x-battery-power-supply",
.of_compatible = "x-powers,axp813-battery-power-supply",
}, {
.name = "axp20x-ac-power-supply",
.of_compatible = "x-powers,axp813-ac-power-supply",
.num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
.resources = axp20x_ac_power_supply_resources,
},
{ .name = "axp20x-regulator" },
{ .name = "axp20x-regulator" },
};
static const struct mfd_cell axp806_self_working_cells[] = {
{
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp806_pek_resources),
.resources = axp806_pek_resources,
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp806_pek_resources),
.resources = axp806_pek_resources,
},
{ .name = "axp20x-regulator" },
{ .name = "axp20x-regulator" },
};
static const struct mfd_cell axp806_cells[] = {
{
.id = 2,
.name = "axp20x-regulator",
.id = 2,
.name = "axp20x-regulator",
},
};
static const struct mfd_cell axp809_cells[] = {
{
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp809_pek_resources),
.resources = axp809_pek_resources,
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp809_pek_resources),
.resources = axp809_pek_resources,
}, {
.id = 1,
.name = "axp20x-regulator",
.id = 1,
.name = "axp20x-regulator",
},
};
static const struct mfd_cell axp813_cells[] = {
{
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp803_pek_resources),
.resources = axp803_pek_resources,
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp803_pek_resources),
.resources = axp803_pek_resources,
}, {
.name = "axp20x-regulator",
.name = "axp20x-regulator",
}, {
.name = "axp20x-gpio",
.of_compatible = "x-powers,axp813-gpio",
.name = "axp20x-gpio",
.of_compatible = "x-powers,axp813-gpio",
}, {
.name = "axp813-adc",
.of_compatible = "x-powers,axp813-adc",
.name = "axp813-adc",
.of_compatible = "x-powers,axp813-adc",
}, {
.name = "axp20x-battery-power-supply",
.of_compatible = "x-powers,axp813-battery-power-supply",
}, {
.name = "axp20x-ac-power-supply",
.of_compatible = "x-powers,axp813-ac-power-supply",
.num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
.resources = axp20x_ac_power_supply_resources,
},
};

View File

@@ -59,6 +59,7 @@ static const struct regmap_access_table bd9571mwv_writable_table = {
};
static const struct regmap_range bd9571mwv_volatile_yes_ranges[] = {
regmap_reg_range(BD9571MWV_DVFS_MONIVDAC, BD9571MWV_DVFS_MONIVDAC),
regmap_reg_range(BD9571MWV_GPIO_IN, BD9571MWV_GPIO_IN),
regmap_reg_range(BD9571MWV_GPIO_INT, BD9571MWV_GPIO_INT),
regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTREQ),

View File

@@ -499,6 +499,7 @@ static int ec_device_remove(struct platform_device *pdev)
cros_ec_debugfs_remove(ec);
mfd_remove_devices(ec->dev);
cdev_del(&ec->cdev);
device_unregister(&ec->class_dev);
return 0;

View File

@@ -2584,7 +2584,7 @@ static struct irq_chip prcmu_irq_chip = {
.irq_unmask = prcmu_irq_unmask,
};
static __init char *fw_project_name(u32 project)
static char *fw_project_name(u32 project)
{
switch (project) {
case PRCMU_FW_PROJECT_U8500:
@@ -2732,7 +2732,7 @@ void __init db8500_prcmu_early_init(u32 phy_base, u32 size)
INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
}
static void __init init_prcm_registers(void)
static void init_prcm_registers(void)
{
u32 val;

View File

@@ -274,7 +274,9 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
mc13xxx->adcflags |= MC13XXX_ADC_WORKING;
mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
ret = mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
if (ret)
goto out;
adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2;
adc1 = MC13XXX_ADC1_ADEN | MC13XXX_ADC1_ADTRIGIGN | MC13XXX_ADC1_ASC;

View File

@@ -329,8 +329,7 @@ static int mt6397_probe(struct platform_device *pdev)
default:
dev_err(&pdev->dev, "unsupported chip: %d\n", id);
ret = -ENODEV;
break;
return -ENODEV;
}
if (ret) {

View File

@@ -638,6 +638,10 @@ static int qcom_rpm_probe(struct platform_device *pdev)
return -EFAULT;
}
writel(fw_version[0], RPM_CTRL_REG(rpm, 0));
writel(fw_version[1], RPM_CTRL_REG(rpm, 1));
writel(fw_version[2], RPM_CTRL_REG(rpm, 2));
dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0],
fw_version[1],
fw_version[2]);

View File

@@ -264,8 +264,9 @@ static int ti_tscadc_probe(struct platform_device *pdev)
cell->pdata_size = sizeof(tscadc);
}
err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells,
tscadc->used_cells, NULL, 0, NULL);
err = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
tscadc->cells, tscadc->used_cells, NULL,
0, NULL);
if (err < 0)
goto err_disable_clk;

View File

@@ -235,9 +235,9 @@ static int tps65218_probe(struct i2c_client *client,
mutex_init(&tps->tps_lock);
ret = regmap_add_irq_chip(tps->regmap, tps->irq,
IRQF_ONESHOT, 0, &tps65218_irq_chip,
&tps->irq_data);
ret = devm_regmap_add_irq_chip(&client->dev, tps->regmap, tps->irq,
IRQF_ONESHOT, 0, &tps65218_irq_chip,
&tps->irq_data);
if (ret < 0)
return ret;
@@ -253,26 +253,9 @@ static int tps65218_probe(struct i2c_client *client,
ARRAY_SIZE(tps65218_cells), NULL, 0,
regmap_irq_get_domain(tps->irq_data));
if (ret < 0)
goto err_irq;
return 0;
err_irq:
regmap_del_irq_chip(tps->irq, tps->irq_data);
return ret;
}
static int tps65218_remove(struct i2c_client *client)
{
struct tps65218 *tps = i2c_get_clientdata(client);
regmap_del_irq_chip(tps->irq, tps->irq_data);
return 0;
}
static const struct i2c_device_id tps65218_id_table[] = {
{ "tps65218", TPS65218 },
{ },
@@ -285,7 +268,6 @@ static struct i2c_driver tps65218_driver = {
.of_match_table = of_tps65218_match_table,
},
.probe = tps65218_probe,
.remove = tps65218_remove,
.id_table = tps65218_id_table,
};

View File

@@ -979,7 +979,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
* letting it generate the right frequencies for USB, MADC, and
* other purposes.
*/
static inline int __init protect_pm_master(void)
static inline int protect_pm_master(void)
{
int e = 0;
@@ -988,7 +988,7 @@ static inline int __init protect_pm_master(void)
return e;
}
static inline int __init unprotect_pm_master(void)
static inline int unprotect_pm_master(void)
{
int e = 0;

View File

@@ -1618,6 +1618,7 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
{ 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
{ 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
{ 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */
{ 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
{ 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
{ 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
@@ -2869,6 +2870,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_ASRC_ENABLE:
case ARIZONA_ASRC_STATUS:
case ARIZONA_ASRC_RATE1:
case ARIZONA_ASRC_RATE2:
case ARIZONA_ISRC_1_CTRL_1:
case ARIZONA_ISRC_1_CTRL_2:
case ARIZONA_ISRC_1_CTRL_3:

View File

@@ -2595,11 +2595,6 @@ static int ena_restore_device(struct ena_adapter *adapter)
goto err_device_destroy;
}
clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
/* Make sure we don't have a race with AENQ Links state handler */
if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
netif_carrier_on(adapter->netdev);
rc = ena_enable_msix_and_set_admin_interrupts(adapter,
adapter->num_queues);
if (rc) {
@@ -2616,6 +2611,11 @@ static int ena_restore_device(struct ena_adapter *adapter)
}
set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
netif_carrier_on(adapter->netdev);
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
dev_err(&pdev->dev, "Device reset completed successfully\n");

View File

@@ -2052,6 +2052,7 @@ static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
bool nonlinear = skb_is_nonlinear(skb);
struct rtnl_link_stats64 *percpu_stats;
struct dpaa_percpu_priv *percpu_priv;
struct netdev_queue *txq;
struct dpaa_priv *priv;
struct qm_fd fd;
int offset = 0;
@@ -2101,6 +2102,11 @@ static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
if (unlikely(err < 0))
goto skb_to_fd_failed;
txq = netdev_get_tx_queue(net_dev, queue_mapping);
/* LLTX requires to do our own update of trans_start */
txq->trans_start = jiffies;
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;

View File

@@ -147,12 +147,10 @@ static void hns_ae_put_handle(struct hnae_handle *handle)
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
int i;
vf_cb->mac_cb = NULL;
kfree(vf_cb);
for (i = 0; i < handle->q_num; i++)
hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
kfree(vf_cb);
}
static int hns_ae_wait_flow_down(struct hnae_handle *handle)

View File

@@ -620,6 +620,8 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
}
#endif
#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
/* We reach this function only after checking that any of
* the (IPv4 | IPv6) bits are set in cqe->status.
*/
@@ -627,9 +629,20 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
netdev_features_t dev_features)
{
__wsum hw_checksum = 0;
void *hdr;
void *hdr = (u8 *)va + sizeof(struct ethhdr);
/* CQE csum doesn't cover padding octets in short ethernet
* frames. And the pad field is appended prior to calculating
* and appending the FCS field.
*
* Detecting these padded frames requires to verify and parse
* IP headers, so we simply force all those small frames to skip
* checksum complete.
*/
if (short_frame(skb->len))
return -EINVAL;
hdr = (u8 *)va + sizeof(struct ethhdr);
hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
@@ -822,6 +835,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
skb_record_rx_queue(skb, cq_ring);
if (likely(dev->features & NETIF_F_RXCSUM)) {
/* TODO: For IP non TCP/UDP packets when csum complete is
* not an option (not supported or any other reason) we can
* actually check cqe IPOK status bit and report
* CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE
*/
if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
MLX4_CQE_STATUS_UDP)) &&
(cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&

View File

@@ -57,12 +57,12 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu
int i;
if (chunk->nsg > 0)
pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages,
pci_unmap_sg(dev->persist->pdev, chunk->sg, chunk->npages,
PCI_DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->npages; ++i)
__free_pages(sg_page(&chunk->mem[i]),
get_order(chunk->mem[i].length));
__free_pages(sg_page(&chunk->sg[i]),
get_order(chunk->sg[i].length));
}
static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
@@ -71,9 +71,9 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *
for (i = 0; i < chunk->npages; ++i)
dma_free_coherent(&dev->persist->pdev->dev,
chunk->mem[i].length,
lowmem_page_address(sg_page(&chunk->mem[i])),
sg_dma_address(&chunk->mem[i]));
chunk->buf[i].size,
chunk->buf[i].addr,
chunk->buf[i].dma_addr);
}
void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
@@ -111,22 +111,21 @@ static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
return 0;
}
static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
int order, gfp_t gfp_mask)
static int mlx4_alloc_icm_coherent(struct device *dev, struct mlx4_icm_buf *buf,
int order, gfp_t gfp_mask)
{
void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
&sg_dma_address(mem), gfp_mask);
if (!buf)
buf->addr = dma_alloc_coherent(dev, PAGE_SIZE << order,
&buf->dma_addr, gfp_mask);
if (!buf->addr)
return -ENOMEM;
if (offset_in_page(buf)) {
dma_free_coherent(dev, PAGE_SIZE << order,
buf, sg_dma_address(mem));
if (offset_in_page(buf->addr)) {
dma_free_coherent(dev, PAGE_SIZE << order, buf->addr,
buf->dma_addr);
return -ENOMEM;
}
sg_set_buf(mem, buf, PAGE_SIZE << order);
sg_dma_len(mem) = PAGE_SIZE << order;
buf->size = PAGE_SIZE << order;
return 0;
}
@@ -159,21 +158,21 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
while (npages > 0) {
if (!chunk) {
chunk = kmalloc_node(sizeof(*chunk),
chunk = kzalloc_node(sizeof(*chunk),
gfp_mask & ~(__GFP_HIGHMEM |
__GFP_NOWARN),
dev->numa_node);
if (!chunk) {
chunk = kmalloc(sizeof(*chunk),
chunk = kzalloc(sizeof(*chunk),
gfp_mask & ~(__GFP_HIGHMEM |
__GFP_NOWARN));
if (!chunk)
goto fail;
}
chunk->coherent = coherent;
sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
chunk->npages = 0;
chunk->nsg = 0;
if (!coherent)
sg_init_table(chunk->sg, MLX4_ICM_CHUNK_LEN);
list_add_tail(&chunk->list, &icm->chunk_list);
}
@@ -186,10 +185,10 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
if (coherent)
ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
&chunk->mem[chunk->npages],
cur_order, mask);
&chunk->buf[chunk->npages],
cur_order, mask);
else
ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages],
cur_order, mask,
dev->numa_node);
@@ -205,7 +204,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
if (coherent)
++chunk->nsg;
else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg,
chunk->npages,
PCI_DMA_BIDIRECTIONAL);
@@ -220,7 +219,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
}
if (!coherent && chunk) {
chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg,
chunk->npages,
PCI_DMA_BIDIRECTIONAL);
@@ -320,7 +319,7 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
u64 idx;
struct mlx4_icm_chunk *chunk;
struct mlx4_icm *icm;
struct page *page = NULL;
void *addr = NULL;
if (!table->lowmem)
return NULL;
@@ -336,28 +335,49 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
list_for_each_entry(chunk, &icm->chunk_list, list) {
for (i = 0; i < chunk->npages; ++i) {
if (dma_handle && dma_offset >= 0) {
if (sg_dma_len(&chunk->mem[i]) > dma_offset)
*dma_handle = sg_dma_address(&chunk->mem[i]) +
dma_offset;
dma_offset -= sg_dma_len(&chunk->mem[i]);
dma_addr_t dma_addr;
size_t len;
if (table->coherent) {
len = chunk->buf[i].size;
dma_addr = chunk->buf[i].dma_addr;
addr = chunk->buf[i].addr;
} else {
struct page *page;
len = sg_dma_len(&chunk->sg[i]);
dma_addr = sg_dma_address(&chunk->sg[i]);
/* XXX: we should never do this for highmem
* allocation. This function either needs
* to be split, or the kernel virtual address
* return needs to be made optional.
*/
page = sg_page(&chunk->sg[i]);
addr = lowmem_page_address(page);
}
if (dma_handle && dma_offset >= 0) {
if (len > dma_offset)
*dma_handle = dma_addr + dma_offset;
dma_offset -= len;
}
/*
* DMA mapping can merge pages but not split them,
* so if we found the page, dma_handle has already
* been assigned to.
*/
if (chunk->mem[i].length > offset) {
page = sg_page(&chunk->mem[i]);
if (len > offset)
goto out;
}
offset -= chunk->mem[i].length;
offset -= len;
}
}
addr = NULL;
out:
mutex_unlock(&table->mutex);
return page ? lowmem_page_address(page) + offset : NULL;
return addr ? addr + offset : NULL;
}
int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,

View File

@@ -47,11 +47,21 @@ enum {
MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT,
};
struct mlx4_icm_buf {
void *addr;
size_t size;
dma_addr_t dma_addr;
};
struct mlx4_icm_chunk {
struct list_head list;
int npages;
int nsg;
struct scatterlist mem[MLX4_ICM_CHUNK_LEN];
bool coherent;
union {
struct scatterlist sg[MLX4_ICM_CHUNK_LEN];
struct mlx4_icm_buf buf[MLX4_ICM_CHUNK_LEN];
};
};
struct mlx4_icm {
@@ -114,12 +124,18 @@ static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
{
return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
if (iter->chunk->coherent)
return iter->chunk->buf[iter->page_idx].dma_addr;
else
return sg_dma_address(&iter->chunk->sg[iter->page_idx]);
}
static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
{
return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
if (iter->chunk->coherent)
return iter->chunk->buf[iter->page_idx].size;
else
return sg_dma_len(&iter->chunk->sg[iter->page_idx]);
}
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);

View File

@@ -633,6 +633,7 @@ enum {
MLX5E_STATE_ASYNC_EVENTS_ENABLED,
MLX5E_STATE_OPENED,
MLX5E_STATE_DESTROYING,
MLX5E_STATE_XDP_TX_ENABLED,
};
struct mlx5e_rqt {

View File

@@ -262,7 +262,8 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
int sq_num;
int i;
if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state)))
/* this flag is sufficient, no need to test internal sq state */
if (unlikely(!mlx5e_xdp_tx_is_enabled(priv)))
return -ENETDOWN;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
@@ -275,9 +276,6 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
sq = &priv->channels.c[sq_num]->xdpsq;
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return -ENETDOWN;
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
struct mlx5e_xdp_info xdpi;

View File

@@ -49,6 +49,23 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi);
int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
{
set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
}
static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
{
clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
/* let other device's napi(s) see our new state */
synchronize_rcu();
}
static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
{
return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
}
static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
{
struct mlx5_wq_cyc *wq = &sq->wq;

View File

@@ -2890,6 +2890,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
mlx5e_build_tx2sq_maps(priv);
mlx5e_activate_channels(&priv->channels);
mlx5e_xdp_tx_enable(priv);
netif_tx_start_all_queues(priv->netdev);
if (MLX5_ESWITCH_MANAGER(priv->mdev))
@@ -2911,6 +2912,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
*/
netif_tx_stop_all_queues(priv->netdev);
netif_tx_disable(priv->netdev);
mlx5e_xdp_tx_disable(priv);
mlx5e_deactivate_channels(&priv->channels);
}

View File

@@ -144,6 +144,7 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes;
s->tx_queue_dropped += sq_stats->dropped;
}
}
}

View File

@@ -96,6 +96,7 @@ struct mlx5e_tc_flow_parse_attr {
struct ip_tunnel_info tun_info;
struct mlx5_flow_spec spec;
int num_mod_hdr_actions;
int max_mod_hdr_actions;
void *mod_hdr_actions;
int mirred_ifindex;
};
@@ -1742,9 +1743,9 @@ static struct mlx5_fields fields[] = {
OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
};
/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
* max from the SW pedit action. On success, it says how many HW actions were
* actually parsed.
/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
* max from the SW pedit action. On success, attr->num_mod_hdr_actions
* says how many HW actions were actually parsed.
*/
static int offload_pedit_fields(struct pedit_headers *masks,
struct pedit_headers *vals,
@@ -1767,9 +1768,11 @@ static int offload_pedit_fields(struct pedit_headers *masks,
add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
action = parse_attr->mod_hdr_actions;
max_actions = parse_attr->num_mod_hdr_actions;
nactions = 0;
action = parse_attr->mod_hdr_actions +
parse_attr->num_mod_hdr_actions * action_size;
max_actions = parse_attr->max_mod_hdr_actions;
nactions = parse_attr->num_mod_hdr_actions;
for (i = 0; i < ARRAY_SIZE(fields); i++) {
f = &fields[i];
@@ -1874,7 +1877,7 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
if (!parse_attr->mod_hdr_actions)
return -ENOMEM;
parse_attr->num_mod_hdr_actions = max_actions;
parse_attr->max_mod_hdr_actions = max_actions;
return 0;
}
@@ -1918,9 +1921,11 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
goto out_err;
}
err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
if (err)
goto out_err;
if (!parse_attr->mod_hdr_actions) {
err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
if (err)
goto out_err;
}
err = offload_pedit_fields(masks, vals, parse_attr);
if (err < 0)

View File

@@ -1367,10 +1367,10 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
break;
return 0;
cond_resched();
} while (time_before(jiffies, end));
return 0;
return -EBUSY;
}
static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)

View File

@@ -75,7 +75,15 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
act_set = mlxsw_afa_block_first_set(rulei->act_block);
mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
if (err)
goto err_ptce2_write;
return 0;
err_ptce2_write:
cregion->ops->entry_remove(cregion, centry);
return err;
}
static void

View File

@@ -1209,7 +1209,7 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
{
return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
}
static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
@@ -1221,7 +1221,7 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
const char *mac, u16 fid, bool adding,
enum mlxsw_reg_sfd_rec_action action,
bool dynamic)
enum mlxsw_reg_sfd_rec_policy policy)
{
char *sfd_pl;
u8 num_rec;
@@ -1232,8 +1232,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
return -ENOMEM;
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
mac, fid, action, local_port);
mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
@@ -1252,7 +1251,8 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
bool dynamic)
{
return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
MLXSW_REG_SFD_REC_ACTION_NOP,
mlxsw_sp_sfd_rec_policy(dynamic));
}
int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
@@ -1260,7 +1260,7 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
{
return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
false);
MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
}
static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,

View File

@@ -1592,6 +1592,10 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
rx_prod.bd_prod = cpu_to_le16(bd_prod);
rx_prod.cqe_prod = cpu_to_le16(cq_prod);
/* Make sure chain element is updated before ringing the doorbell */
dma_wmb();
DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
}

View File

@@ -260,6 +260,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan)
{
u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
int ret = 0;
/* ABNORMAL interrupts */
@@ -279,8 +280,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
x->normal_irq_n++;
if (likely(intr_status & XGMAC_RI)) {
u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
if (likely(value & XGMAC_RIE)) {
if (likely(intr_en & XGMAC_RIE)) {
x->rx_normal_irq_n++;
ret |= handle_rx;
}
@@ -292,7 +292,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
}
/* Clear interrupts */
writel(~0x0, ioaddr + XGMAC_DMA_CH_STATUS(chan));
writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan));
return ret;
}

View File

@@ -3522,27 +3522,28 @@ static int stmmac_napi_poll(struct napi_struct *napi, int budget)
struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, napi);
struct stmmac_priv *priv = ch->priv_data;
int work_done = 0, work_rem = budget;
int work_done, rx_done = 0, tx_done = 0;
u32 chan = ch->index;
priv->xstats.napi_poll++;
if (ch->has_tx) {
int done = stmmac_tx_clean(priv, work_rem, chan);
if (ch->has_tx)
tx_done = stmmac_tx_clean(priv, budget, chan);
if (ch->has_rx)
rx_done = stmmac_rx(priv, budget, chan);
work_done += done;
work_rem -= done;
}
work_done = max(rx_done, tx_done);
work_done = min(work_done, budget);
if (ch->has_rx) {
int done = stmmac_rx(priv, work_rem, chan);
if (work_done < budget && napi_complete_done(napi, work_done)) {
int stat;
work_done += done;
work_rem -= done;
}
if (work_done < budget && napi_complete_done(napi, work_done))
stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
stat = stmmac_dma_interrupt_status(priv, priv->ioaddr,
&priv->xstats, chan);
if (stat && napi_reschedule(napi))
stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
}
return work_done;
}
@@ -4191,6 +4192,18 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
return ret;
}
/* Rx Watchdog is available in the COREs newer than the 3.40.
* In some case, for example on bugged HW this feature
* has to be disable and this can be done by passing the
* riwt_off field from the platform.
*/
if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
(priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
priv->use_riwt = 1;
dev_info(priv->device,
"Enable RX Mitigation via HW Watchdog Timer\n");
}
return 0;
}
@@ -4323,18 +4336,6 @@ int stmmac_dvr_probe(struct device *device,
if (flow_ctrl)
priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
/* Rx Watchdog is available in the COREs newer than the 3.40.
* In some case, for example on bugged HW this feature
* has to be disable and this can be done by passing the
* riwt_off field from the platform.
*/
if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
(priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
priv->use_riwt = 1;
dev_info(priv->device,
"Enable RX Mitigation via HW Watchdog Timer\n");
}
/* Setup channels NAPI */
maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);

View File

@@ -299,7 +299,17 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
*/
static void stmmac_pci_remove(struct pci_dev *pdev)
{
int i;
stmmac_dvr_remove(&pdev->dev);
for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
pcim_iounmap_regions(pdev, BIT(i));
break;
}
pci_disable_device(pdev);
}

View File

@@ -301,6 +301,8 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
/* Queue 0 is not AVB capable */
if (queue <= 0 || queue >= tx_queues_count)
return -EINVAL;
if (!priv->dma_cap.av)
return -EOPNOTSUPP;
if (priv->speed != SPEED_100 && priv->speed != SPEED_1000)
return -EOPNOTSUPP;

View File

@@ -1406,9 +1406,13 @@ static void geneve_link_config(struct net_device *dev,
}
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6: {
struct rt6_info *rt = rt6_lookup(geneve->net,
&info->key.u.ipv6.dst, NULL, 0,
NULL, 0);
struct rt6_info *rt;
if (!__in6_dev_get(dev))
break;
rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0,
NULL, 0);
if (rt && rt->dst.dev)
ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN;

View File

@@ -502,6 +502,17 @@ static void phylink_run_resolve(struct phylink *pl)
queue_work(system_power_efficient_wq, &pl->resolve);
}
static void phylink_run_resolve_and_disable(struct phylink *pl, int bit)
{
unsigned long state = pl->phylink_disable_state;
set_bit(bit, &pl->phylink_disable_state);
if (state == 0) {
queue_work(system_power_efficient_wq, &pl->resolve);
flush_work(&pl->resolve);
}
}
static void phylink_fixed_poll(struct timer_list *t)
{
struct phylink *pl = container_of(t, struct phylink, link_poll);
@@ -955,9 +966,7 @@ void phylink_stop(struct phylink *pl)
if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio))
del_timer_sync(&pl->link_poll);
set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
queue_work(system_power_efficient_wq, &pl->resolve);
flush_work(&pl->resolve);
phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_STOPPED);
}
EXPORT_SYMBOL_GPL(phylink_stop);
@@ -1664,9 +1673,7 @@ static void phylink_sfp_link_down(void *upstream)
ASSERT_RTNL();
set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
queue_work(system_power_efficient_wq, &pl->resolve);
flush_work(&pl->resolve);
phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_LINK);
}
static void phylink_sfp_link_up(void *upstream)

View File

@@ -347,6 +347,7 @@ static int sfp_register_bus(struct sfp_bus *bus)
return ret;
}
}
bus->socket_ops->attach(bus->sfp);
if (bus->started)
bus->socket_ops->start(bus->sfp);
bus->netdev->sfp_bus = bus;
@@ -362,6 +363,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
if (bus->registered) {
if (bus->started)
bus->socket_ops->stop(bus->sfp);
bus->socket_ops->detach(bus->sfp);
if (bus->phydev && ops && ops->disconnect_phy)
ops->disconnect_phy(bus->upstream);
}

View File

@@ -184,6 +184,7 @@ struct sfp {
struct gpio_desc *gpio[GPIO_MAX];
bool attached;
unsigned int state;
struct delayed_work poll;
struct delayed_work timeout;
@@ -1475,7 +1476,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
*/
switch (sfp->sm_mod_state) {
default:
if (event == SFP_E_INSERT) {
if (event == SFP_E_INSERT && sfp->attached) {
sfp_module_tx_disable(sfp);
sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT);
}
@@ -1607,6 +1608,19 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
mutex_unlock(&sfp->sm_mutex);
}
static void sfp_attach(struct sfp *sfp)
{
sfp->attached = true;
if (sfp->state & SFP_F_PRESENT)
sfp_sm_event(sfp, SFP_E_INSERT);
}
static void sfp_detach(struct sfp *sfp)
{
sfp->attached = false;
sfp_sm_event(sfp, SFP_E_REMOVE);
}
static void sfp_start(struct sfp *sfp)
{
sfp_sm_event(sfp, SFP_E_DEV_UP);
@@ -1667,6 +1681,8 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee,
}
static const struct sfp_socket_ops sfp_module_ops = {
.attach = sfp_attach,
.detach = sfp_detach,
.start = sfp_start,
.stop = sfp_stop,
.module_info = sfp_module_info,
@@ -1834,10 +1850,6 @@ static int sfp_probe(struct platform_device *pdev)
dev_info(sfp->dev, "Host maximum power %u.%uW\n",
sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10);
sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
if (!sfp->sfp_bus)
return -ENOMEM;
/* Get the initial state, and always signal TX disable,
* since the network interface will not be up.
*/
@@ -1848,10 +1860,6 @@ static int sfp_probe(struct platform_device *pdev)
sfp->state |= SFP_F_RATE_SELECT;
sfp_set_state(sfp, sfp->state);
sfp_module_tx_disable(sfp);
rtnl_lock();
if (sfp->state & SFP_F_PRESENT)
sfp_sm_event(sfp, SFP_E_INSERT);
rtnl_unlock();
for (i = 0; i < GPIO_MAX; i++) {
if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i])
@@ -1884,6 +1892,10 @@ static int sfp_probe(struct platform_device *pdev)
dev_warn(sfp->dev,
"No tx_disable pin: SFP modules will always be emitting.\n");
sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
if (!sfp->sfp_bus)
return -ENOMEM;
return 0;
}

View File

@@ -7,6 +7,8 @@
struct sfp;
struct sfp_socket_ops {
void (*attach)(struct sfp *sfp);
void (*detach)(struct sfp *sfp);
void (*start)(struct sfp *sfp);
void (*stop)(struct sfp *sfp);
int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo);

View File

@@ -256,17 +256,6 @@ static void __team_option_inst_mark_removed_port(struct team *team,
}
}
static bool __team_option_inst_tmp_find(const struct list_head *opts,
const struct team_option_inst *needle)
{
struct team_option_inst *opt_inst;
list_for_each_entry(opt_inst, opts, tmp_list)
if (opt_inst == needle)
return true;
return false;
}
static int __team_options_register(struct team *team,
const struct team_option *option,
size_t option_count)
@@ -2463,7 +2452,6 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
int err = 0;
int i;
struct nlattr *nl_option;
LIST_HEAD(opt_inst_list);
rtnl_lock();
@@ -2483,6 +2471,7 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
struct nlattr *attr;
struct nlattr *attr_data;
LIST_HEAD(opt_inst_list);
enum team_option_type opt_type;
int opt_port_ifindex = 0; /* != 0 for per-port options */
u32 opt_array_index = 0;
@@ -2587,23 +2576,17 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
if (err)
goto team_put;
opt_inst->changed = true;
/* dumb/evil user-space can send us duplicate opt,
* keep only the last one
*/
if (__team_option_inst_tmp_find(&opt_inst_list,
opt_inst))
continue;
list_add(&opt_inst->tmp_list, &opt_inst_list);
}
if (!opt_found) {
err = -ENOENT;
goto team_put;
}
}
err = team_nl_send_event_options_get(team, &opt_inst_list);
err = team_nl_send_event_options_get(team, &opt_inst_list);
if (err)
break;
}
team_put:
team_nl_team_put(team);

View File

@@ -34,14 +34,12 @@ enum max77620_pin_ppdrv {
MAX77620_PIN_PP_DRV,
};
enum max77620_pinconf_param {
MAX77620_ACTIVE_FPS_SOURCE = PIN_CONFIG_END + 1,
MAX77620_ACTIVE_FPS_POWER_ON_SLOTS,
MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS,
MAX77620_SUSPEND_FPS_SOURCE,
MAX77620_SUSPEND_FPS_POWER_ON_SLOTS,
MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS,
};
#define MAX77620_ACTIVE_FPS_SOURCE (PIN_CONFIG_END + 1)
#define MAX77620_ACTIVE_FPS_POWER_ON_SLOTS (PIN_CONFIG_END + 2)
#define MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS (PIN_CONFIG_END + 3)
#define MAX77620_SUSPEND_FPS_SOURCE (PIN_CONFIG_END + 4)
#define MAX77620_SUSPEND_FPS_POWER_ON_SLOTS (PIN_CONFIG_END + 5)
#define MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS (PIN_CONFIG_END + 6)
struct max77620_pin_function {
const char *name;

View File

@@ -1144,7 +1144,7 @@ static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm,
}
static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
unsigned int tid, int pg_idx, bool reply)
unsigned int tid, int pg_idx)
{
struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
GFP_KERNEL);
@@ -1160,7 +1160,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
req = (struct cpl_set_tcb_field *)skb->head;
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
req->reply = V_NO_REPLY(reply ? 0 : 1);
req->reply = V_NO_REPLY(1);
req->cpu_idx = 0;
req->word = htons(31);
req->mask = cpu_to_be64(0xF0000000);
@@ -1177,11 +1177,10 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
* @tid: connection id
* @hcrc: header digest enabled
* @dcrc: data digest enabled
* @reply: request reply from h/w
* set up the iscsi digest settings for a connection identified by tid
*/
static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
int hcrc, int dcrc, int reply)
int hcrc, int dcrc)
{
struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
GFP_KERNEL);
@@ -1197,7 +1196,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
req = (struct cpl_set_tcb_field *)skb->head;
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
req->reply = V_NO_REPLY(reply ? 0 : 1);
req->reply = V_NO_REPLY(1);
req->cpu_idx = 0;
req->word = htons(31);
req->mask = cpu_to_be64(0x0F000000);

View File

@@ -1517,16 +1517,22 @@ static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
struct cxgbi_sock *csk;
csk = lookup_tid(t, tid);
if (!csk)
if (!csk) {
pr_err("can't find conn. for tid %u.\n", tid);
return;
}
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,%lx,%u, status 0x%x.\n",
csk, csk->state, csk->flags, csk->tid, rpl->status);
if (rpl->status != CPL_ERR_NONE)
if (rpl->status != CPL_ERR_NONE) {
pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
csk, tid, rpl->status);
csk->err = -EINVAL;
}
complete(&csk->cmpl);
__kfree_skb(skb);
}
@@ -1903,7 +1909,7 @@ static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
}
static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
int pg_idx, bool reply)
int pg_idx)
{
struct sk_buff *skb;
struct cpl_set_tcb_field *req;
@@ -1919,7 +1925,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
req = (struct cpl_set_tcb_field *)skb->head;
INIT_TP_WR(req, csk->tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
req->word_cookie = htons(0);
req->mask = cpu_to_be64(0x3 << 8);
req->val = cpu_to_be64(pg_idx << 8);
@@ -1928,12 +1934,15 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
reinit_completion(&csk->cmpl);
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
return 0;
wait_for_completion(&csk->cmpl);
return csk->err;
}
static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
int hcrc, int dcrc, int reply)
int hcrc, int dcrc)
{
struct sk_buff *skb;
struct cpl_set_tcb_field *req;
@@ -1951,7 +1960,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
req = (struct cpl_set_tcb_field *)skb->head;
INIT_TP_WR(req, tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
req->word_cookie = htons(0);
req->mask = cpu_to_be64(0x3 << 4);
req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
@@ -1961,8 +1970,11 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
reinit_completion(&csk->cmpl);
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
return 0;
wait_for_completion(&csk->cmpl);
return csk->err;
}
static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)

View File

@@ -573,6 +573,7 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
skb_queue_head_init(&csk->receive_queue);
skb_queue_head_init(&csk->write_queue);
timer_setup(&csk->retry_timer, NULL, 0);
init_completion(&csk->cmpl);
rwlock_init(&csk->callback_lock);
csk->cdev = cdev;
csk->flags = 0;
@@ -2252,14 +2253,14 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
if (!err && conn->hdrdgst_en)
err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
conn->hdrdgst_en,
conn->datadgst_en, 0);
conn->datadgst_en);
break;
case ISCSI_PARAM_DATADGST_EN:
err = iscsi_set_param(cls_conn, param, buf, buflen);
if (!err && conn->datadgst_en)
err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
conn->hdrdgst_en,
conn->datadgst_en, 0);
conn->datadgst_en);
break;
case ISCSI_PARAM_MAX_R2T:
return iscsi_tcp_set_max_r2t(conn, buf);
@@ -2385,7 +2386,7 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
ppm = csk->cdev->cdev2ppm(csk->cdev);
err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
ppm->tformat.pgsz_idx_dflt, 0);
ppm->tformat.pgsz_idx_dflt);
if (err < 0)
return err;

View File

@@ -146,6 +146,7 @@ struct cxgbi_sock {
struct sk_buff_head receive_queue;
struct sk_buff_head write_queue;
struct timer_list retry_timer;
struct completion cmpl;
int err;
rwlock_t callback_lock;
void *user_data;
@@ -487,9 +488,9 @@ struct cxgbi_device {
struct cxgbi_ppm *,
struct cxgbi_task_tag_info *);
int (*csk_ddp_setup_digest)(struct cxgbi_sock *,
unsigned int, int, int, int);
unsigned int, int, int);
int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *,
unsigned int, int, bool);
unsigned int, int);
void (*csk_release_offload_resources)(struct cxgbi_sock *);
int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *);

View File

@@ -588,6 +588,13 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
shost->max_lun = ~0;
shost->max_cmd_len = MAX_COMMAND_SIZE;
/* turn on DIF support */
scsi_host_set_prot(shost,
SHOST_DIF_TYPE1_PROTECTION |
SHOST_DIF_TYPE2_PROTECTION |
SHOST_DIF_TYPE3_PROTECTION);
scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
err = scsi_add_host(shost, &pdev->dev);
if (err)
goto err_shost;
@@ -675,13 +682,6 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_host_alloc;
}
pci_info->hosts[i] = h;
/* turn on DIF support */
scsi_host_set_prot(to_shost(h),
SHOST_DIF_TYPE1_PROTECTION |
SHOST_DIF_TYPE2_PROTECTION |
SHOST_DIF_TYPE3_PROTECTION);
scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC);
}
err = isci_setup_interrupts(pdev);

View File

@@ -954,6 +954,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
qedi_ep = ep->dd_data;
if (qedi_ep->state == EP_STATE_IDLE ||
qedi_ep->state == EP_STATE_OFLDCONN_NONE ||
qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
return -1;
@@ -1036,6 +1037,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
switch (qedi_ep->state) {
case EP_STATE_OFLDCONN_START:
case EP_STATE_OFLDCONN_NONE:
goto ep_release_conn;
case EP_STATE_OFLDCONN_FAILED:
break;
@@ -1226,6 +1228,7 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
qedi_ep->state = EP_STATE_OFLDCONN_NONE;
ret = -EIO;
goto set_path_exit;
}

View File

@@ -59,6 +59,7 @@ enum {
EP_STATE_OFLDCONN_FAILED = 0x2000,
EP_STATE_CONNECT_FAILED = 0x4000,
EP_STATE_DISCONN_TIMEDOUT = 0x8000,
EP_STATE_OFLDCONN_NONE = 0x10000,
};
struct qedi_conn;

View File

@@ -7237,6 +7237,8 @@ static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
fw_ddb_entry);
if (rc)
goto free_sess;
ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
__func__, fnode_sess->dev.kobj.name);

View File

@@ -141,7 +141,7 @@ enum ufs_desc_def_size {
QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
QUERY_DESC_UNIT_DEF_SIZE = 0x23,
QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44,
QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48,
QUERY_DESC_POWER_DEF_SIZE = 0x62,
QUERY_DESC_HEALTH_DEF_SIZE = 0x25,
};

View File

@@ -10461,6 +10461,8 @@ int ufshcd_system_resume(struct ufs_hba *hba)
trace_ufshcd_system_resume(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
if (!ret)
hba->is_sys_suspended = false;
return ret;
}
EXPORT_SYMBOL(ufshcd_system_resume);

View File

@@ -25,7 +25,7 @@ static inline void read_endio(struct bio *bio)
struct page *page = bvec->bv_page;
/* page is already locked */
BUG_ON(PageUptodate(page));
DBG_BUGON(PageUptodate(page));
if (unlikely(err))
SetPageError(page);
@@ -91,12 +91,12 @@ static int erofs_map_blocks_flatmode(struct inode *inode,
struct erofs_map_blocks *map,
int flags)
{
int err = 0;
erofs_blk_t nblocks, lastblk;
u64 offset = map->m_la;
struct erofs_vnode *vi = EROFS_V(inode);
trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
BUG_ON(is_inode_layout_compression(inode));
nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
lastblk = nblocks - is_inode_layout_inline(inode);
@@ -123,18 +123,27 @@ static int erofs_map_blocks_flatmode(struct inode *inode,
map->m_plen = inode->i_size - offset;
/* inline data should locate in one meta block */
BUG_ON(erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE);
if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
DBG_BUGON(1);
err = -EIO;
goto err_out;
}
map->m_flags |= EROFS_MAP_META;
} else {
errln("internal error @ nid: %llu (size %llu), m_la 0x%llx",
vi->nid, inode->i_size, map->m_la);
BUG();
DBG_BUGON(1);
err = -EIO;
goto err_out;
}
out:
map->m_llen = map->m_plen;
err_out:
trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
return 0;
return err;
}
#ifdef CONFIG_EROFS_FS_ZIP
@@ -190,7 +199,7 @@ static inline struct bio *erofs_read_raw_page(
erofs_off_t current_block = (erofs_off_t)page->index;
int err;
BUG_ON(!nblocks);
DBG_BUGON(!nblocks);
if (PageUptodate(page)) {
err = 0;
@@ -233,7 +242,7 @@ static inline struct bio *erofs_read_raw_page(
}
/* for RAW access mode, m_plen must be equal to m_llen */
BUG_ON(map.m_plen != map.m_llen);
DBG_BUGON(map.m_plen != map.m_llen);
blknr = erofs_blknr(map.m_pa);
blkoff = erofs_blkoff(map.m_pa);
@@ -243,7 +252,7 @@ static inline struct bio *erofs_read_raw_page(
void *vsrc, *vto;
struct page *ipage;
BUG_ON(map.m_plen > PAGE_SIZE);
DBG_BUGON(map.m_plen > PAGE_SIZE);
ipage = erofs_get_meta_page(inode->i_sb, blknr, 0);
@@ -270,7 +279,7 @@ static inline struct bio *erofs_read_raw_page(
}
/* pa must be block-aligned for raw reading */
BUG_ON(erofs_blkoff(map.m_pa) != 0);
DBG_BUGON(erofs_blkoff(map.m_pa));
/* max # of continuous pages */
if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
@@ -331,7 +340,7 @@ static int erofs_raw_access_readpage(struct file *file, struct page *page)
if (IS_ERR(bio))
return PTR_ERR(bio);
BUG_ON(bio != NULL); /* since we have only one bio -- must be NULL */
DBG_BUGON(bio); /* since we have only one bio -- must be NULL */
return 0;
}
@@ -369,7 +378,7 @@ static int erofs_raw_access_readpages(struct file *filp,
/* pages could still be locked */
put_page(page);
}
BUG_ON(!list_empty(pages));
DBG_BUGON(!list_empty(pages));
/* the rare case (end in gaps) */
if (unlikely(bio != NULL))

View File

@@ -53,8 +53,11 @@ static int erofs_fill_dentries(struct dir_context *ctx,
strnlen(de_name, maxsize - nameoff) :
le16_to_cpu(de[1].nameoff) - nameoff;
/* the corrupted directory found */
BUG_ON(de_namelen < 0);
/* a corrupted entry is found */
if (unlikely(de_namelen < 0)) {
DBG_BUGON(1);
return -EIO;
}
#ifdef CONFIG_EROFS_FS_DEBUG
dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen);

View File

@@ -132,7 +132,13 @@ static int fill_inline_data(struct inode *inode, void *data, unsigned m_pofs)
return -ENOMEM;
m_pofs += vi->inode_isize + vi->xattr_isize;
BUG_ON(m_pofs + inode->i_size > PAGE_SIZE);
/* inline symlink data shouldn't across page boundary as well */
if (unlikely(m_pofs + inode->i_size > PAGE_SIZE)) {
DBG_BUGON(1);
kfree(lnk);
return -EIO;
}
/* get in-page inline data */
memcpy(lnk, data + m_pofs, inode->i_size);
@@ -170,7 +176,7 @@ static int fill_inode(struct inode *inode, int isdir)
return PTR_ERR(page);
}
BUG_ON(!PageUptodate(page));
DBG_BUGON(!PageUptodate(page));
data = page_address(page);
err = read_inode(inode, data + ofs);

View File

@@ -184,50 +184,70 @@ struct erofs_workgroup {
#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
static inline bool erofs_workgroup_try_to_freeze(
struct erofs_workgroup *grp, int v)
#if defined(CONFIG_SMP)
static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
int val)
{
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
if (v != atomic_cmpxchg(&grp->refcount,
v, EROFS_LOCKED_MAGIC))
return false;
preempt_disable();
#else
preempt_disable();
if (atomic_read(&grp->refcount) != v) {
if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) {
preempt_enable();
return false;
}
#endif
return true;
}
static inline void erofs_workgroup_unfreeze(
struct erofs_workgroup *grp, int v)
static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
int orig_val)
{
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
atomic_set(&grp->refcount, v);
#endif
/*
* other observers should notice all modifications
* in the freezing period.
*/
smp_mb();
atomic_set(&grp->refcount, orig_val);
preempt_enable();
}
static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
{
return atomic_cond_read_relaxed(&grp->refcount,
VAL != EROFS_LOCKED_MAGIC);
}
#else
static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
int val)
{
preempt_disable();
/* no need to spin on UP platforms, let's just disable preemption. */
if (val != atomic_read(&grp->refcount)) {
preempt_enable();
return false;
}
return true;
}
static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
int orig_val)
{
preempt_enable();
}
static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
{
int v = atomic_read(&grp->refcount);
/* workgroup is never freezed on uniprocessor systems */
DBG_BUGON(v == EROFS_LOCKED_MAGIC);
return v;
}
#endif
static inline bool erofs_workgroup_get(struct erofs_workgroup *grp, int *ocnt)
{
const int locked = (int)EROFS_LOCKED_MAGIC;
int o;
repeat:
o = atomic_read(&grp->refcount);
/* spin if it is temporarily locked at the reclaim path */
if (unlikely(o == locked)) {
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
do
cpu_relax();
while (atomic_read(&grp->refcount) == locked);
#endif
goto repeat;
}
o = erofs_wait_on_workgroup_freezed(grp);
if (unlikely(o <= 0))
return -1;

View File

@@ -40,7 +40,6 @@ static int erofs_init_inode_cache(void)
static void erofs_exit_inode_cache(void)
{
BUG_ON(erofs_inode_cachep == NULL);
kmem_cache_destroy(erofs_inode_cachep);
}
@@ -265,8 +264,8 @@ static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
int ret = 1; /* 0 - busy */
struct address_space *const mapping = page->mapping;
BUG_ON(!PageLocked(page));
BUG_ON(mapping->a_ops != &managed_cache_aops);
DBG_BUGON(!PageLocked(page));
DBG_BUGON(mapping->a_ops != &managed_cache_aops);
if (PagePrivate(page))
ret = erofs_try_to_free_cached_page(mapping, page);
@@ -279,10 +278,10 @@ static void managed_cache_invalidatepage(struct page *page,
{
const unsigned int stop = length + offset;
BUG_ON(!PageLocked(page));
DBG_BUGON(!PageLocked(page));
/* Check for overflow */
BUG_ON(stop > PAGE_SIZE || stop < length);
/* Check for potential overflow in debug mode */
DBG_BUGON(stop > PAGE_SIZE || stop < length);
if (offset == 0 && stop == PAGE_SIZE)
while (!managed_cache_releasepage(page, GFP_NOFS))
@@ -404,12 +403,6 @@ static int erofs_read_super(struct super_block *sb,
erofs_register_super(sb);
/*
* We already have a positive dentry, which was instantiated
* by d_make_root. Just need to d_rehash it.
*/
d_rehash(sb->s_root);
if (!silent)
infoln("mounted on %s with opts: %s.", dev_name,
(char *)data);
@@ -625,7 +618,7 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root)
static int erofs_remount(struct super_block *sb, int *flags, char *data)
{
BUG_ON(!sb_rdonly(sb));
DBG_BUGON(!sb_rdonly(sb));
*flags |= SB_RDONLY;
return 0;

View File

@@ -150,7 +150,7 @@ z_erofs_pagevec_ctor_dequeue(struct z_erofs_pagevec_ctor *ctor,
erofs_vtptr_t t;
if (unlikely(ctor->index >= ctor->nr)) {
BUG_ON(ctor->next == NULL);
DBG_BUGON(!ctor->next);
z_erofs_pagevec_ctor_pagedown(ctor, true);
}

View File

@@ -18,9 +18,6 @@ static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
void z_erofs_exit_zip_subsystem(void)
{
BUG_ON(z_erofs_workqueue == NULL);
BUG_ON(z_erofs_workgroup_cachep == NULL);
destroy_workqueue(z_erofs_workqueue);
kmem_cache_destroy(z_erofs_workgroup_cachep);
}
@@ -293,12 +290,9 @@ z_erofs_vle_work_lookup(struct super_block *sb,
*grp_ret = grp = container_of(egrp,
struct z_erofs_vle_workgroup, obj);
#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
work = z_erofs_vle_grab_work(grp, pageofs);
/* if multiref is disabled, `primary' is always true */
primary = true;
#else
BUG();
#endif
DBG_BUGON(work->pageofs != pageofs);
@@ -365,12 +359,12 @@ z_erofs_vle_work_register(struct super_block *sb,
struct z_erofs_vle_workgroup *grp = *grp_ret;
struct z_erofs_vle_work *work;
#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
BUG_ON(grp != NULL);
#else
if (grp != NULL)
goto skip;
#endif
/* if multiref is disabled, grp should never be nullptr */
if (unlikely(grp)) {
DBG_BUGON(1);
return ERR_PTR(-EINVAL);
}
/* no available workgroup, let's allocate one */
grp = kmem_cache_zalloc(z_erofs_workgroup_cachep, GFP_NOFS);
if (unlikely(grp == NULL))
@@ -393,13 +387,7 @@ z_erofs_vle_work_register(struct super_block *sb,
*hosted = true;
newgrp = true;
#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
skip:
/* currently unimplemented */
BUG();
#else
work = z_erofs_vle_grab_primary_work(grp);
#endif
work->pageofs = pageofs;
mutex_init(&work->lock);
@@ -606,7 +594,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
enum z_erofs_page_type page_type;
unsigned cur, end, spiltted, index;
int err;
int err = 0;
/* register locked file pages as online pages in pack */
z_erofs_onlinepage_init(page);
@@ -624,7 +612,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
/* go ahead the next map_blocks */
debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
if (!z_erofs_vle_work_iter_end(builder))
if (z_erofs_vle_work_iter_end(builder))
fe->initial = false;
map->m_la = offset + cur;
@@ -633,12 +621,11 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
if (unlikely(err))
goto err_out;
/* deal with hole (FIXME! broken now) */
if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
goto hitted;
DBG_BUGON(map->m_plen != 1 << sbi->clusterbits);
BUG_ON(erofs_blkoff(map->m_pa));
DBG_BUGON(erofs_blkoff(map->m_pa));
err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head);
if (unlikely(err))
@@ -683,7 +670,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
err = z_erofs_vle_work_add_page(builder,
newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE);
if (!err)
if (likely(!err))
goto retry;
}
@@ -694,9 +681,10 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
/* FIXME! avoid the last relundant fixup & endio */
z_erofs_onlinepage_fixup(page, index, true);
++spiltted;
/* also update nr_pages and increase queued_pages */
/* bump up the number of spiltted parts of a page */
++spiltted;
/* also update nr_pages */
work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1);
next_part:
/* can be used for verification */
@@ -706,16 +694,18 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
if (end > 0)
goto repeat;
out:
/* FIXME! avoid the last relundant fixup & endio */
z_erofs_onlinepage_endio(page);
debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
__func__, page, spiltted, map->m_llen);
return 0;
err_out:
/* TODO: the missing error handing cases */
return err;
/* if some error occurred while processing this page */
err_out:
SetPageError(page);
goto out;
}
static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
@@ -752,7 +742,7 @@ static inline void z_erofs_vle_read_endio(struct bio *bio)
bool cachemngd = false;
DBG_BUGON(PageUptodate(page));
BUG_ON(page->mapping == NULL);
DBG_BUGON(!page->mapping);
#ifdef EROFS_FS_HAS_MANAGED_CACHE
if (unlikely(mngda == NULL && !z_erofs_is_stagingpage(page))) {
@@ -796,10 +786,8 @@ static int z_erofs_vle_unzip(struct super_block *sb,
const unsigned clusterpages = erofs_clusterpages(sbi);
struct z_erofs_pagevec_ctor ctor;
unsigned nr_pages;
#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
unsigned sparsemem_pages = 0;
#endif
unsigned int nr_pages;
unsigned int sparsemem_pages = 0;
struct page *pages_onstack[Z_EROFS_VLE_VMAP_ONSTACK_PAGES];
struct page **pages, **compressed_pages, *page;
unsigned i, llen;
@@ -811,12 +799,8 @@ static int z_erofs_vle_unzip(struct super_block *sb,
int err;
might_sleep();
#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
work = z_erofs_vle_grab_primary_work(grp);
#else
BUG();
#endif
BUG_ON(!READ_ONCE(work->nr_pages));
DBG_BUGON(!READ_ONCE(work->nr_pages));
mutex_lock(&work->lock);
nr_pages = work->nr_pages;
@@ -865,14 +849,12 @@ static int z_erofs_vle_unzip(struct super_block *sb,
else
pagenr = z_erofs_onlinepage_index(page);
BUG_ON(pagenr >= nr_pages);
DBG_BUGON(pagenr >= nr_pages);
DBG_BUGON(pages[pagenr]);
#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
BUG_ON(pages[pagenr] != NULL);
++sparsemem_pages;
#endif
pages[pagenr] = page;
}
sparsemem_pages = i;
z_erofs_pagevec_ctor_exit(&ctor, true);
@@ -891,9 +873,8 @@ static int z_erofs_vle_unzip(struct super_block *sb,
if (z_erofs_is_stagingpage(page))
continue;
#ifdef EROFS_FS_HAS_MANAGED_CACHE
else if (page->mapping == mngda) {
BUG_ON(PageLocked(page));
BUG_ON(!PageUptodate(page));
if (page->mapping == mngda) {
DBG_BUGON(!PageUptodate(page));
continue;
}
#endif
@@ -901,11 +882,9 @@ static int z_erofs_vle_unzip(struct super_block *sb,
/* only non-head page could be reused as a compressed page */
pagenr = z_erofs_onlinepage_index(page);
BUG_ON(pagenr >= nr_pages);
#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
BUG_ON(pages[pagenr] != NULL);
DBG_BUGON(pagenr >= nr_pages);
DBG_BUGON(pages[pagenr]);
++sparsemem_pages;
#endif
pages[pagenr] = page;
overlapped = true;
@@ -914,9 +893,6 @@ static int z_erofs_vle_unzip(struct super_block *sb,
llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
/* FIXME! this should be fixed in the future */
BUG_ON(grp->llen != llen);
err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
pages, nr_pages, work->pageofs);
goto out;
@@ -931,12 +907,8 @@ static int z_erofs_vle_unzip(struct super_block *sb,
if (err != -ENOTSUPP)
goto out_percpu;
#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
if (sparsemem_pages >= nr_pages) {
BUG_ON(sparsemem_pages > nr_pages);
if (sparsemem_pages >= nr_pages)
goto skip_allocpage;
}
#endif
for (i = 0; i < nr_pages; ++i) {
if (pages[i] != NULL)
@@ -945,9 +917,7 @@ static int z_erofs_vle_unzip(struct super_block *sb,
pages[i] = __stagingpage_alloc(page_pool, GFP_NOFS);
}
#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
skip_allocpage:
#endif
vout = erofs_vmap(pages, nr_pages);
err = z_erofs_vle_unzip_vmap(compressed_pages,
@@ -1031,7 +1001,7 @@ static void z_erofs_vle_unzip_wq(struct work_struct *work)
struct z_erofs_vle_unzip_io_sb, io.u.work);
LIST_HEAD(page_pool);
BUG_ON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
put_pages_list(&page_pool);
@@ -1360,7 +1330,6 @@ static inline int __z_erofs_vle_normalaccess_readpages(
continue;
}
BUG_ON(PagePrivate(page));
set_page_private(page, (unsigned long)head);
head = page;
}

View File

@@ -47,13 +47,6 @@ static inline bool z_erofs_gather_if_stagingpage(struct list_head *page_pool,
#define Z_EROFS_VLE_INLINE_PAGEVECS 3
struct z_erofs_vle_work {
/* struct z_erofs_vle_work *left, *right; */
#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
struct list_head list;
atomic_t refcount;
#endif
struct mutex lock;
/* I: decompression offset in page */
@@ -107,10 +100,8 @@ static inline void z_erofs_vle_set_workgrp_fmt(
grp->flags = fmt | (grp->flags & ~Z_EROFS_VLE_WORKGRP_FMT_MASK);
}
#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
#error multiref decompression is unimplemented yet
#else
/* definitions if multiref is disabled */
#define z_erofs_vle_grab_primary_work(grp) (&(grp)->work)
#define z_erofs_vle_grab_work(grp, pageofs) (&(grp)->work)
#define z_erofs_vle_work_workgroup(wrk, primary) \
@@ -118,7 +109,6 @@ static inline void z_erofs_vle_set_workgrp_fmt(
struct z_erofs_vle_workgroup, work) : \
({ BUG(); (void *)NULL; }))
#endif
#define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_vle_workgroup)

View File

@@ -57,7 +57,7 @@ int z_erofs_vle_plain_copy(struct page **compressed_pages,
if (compressed_pages[j] != page)
continue;
BUG_ON(mirrored[j]);
DBG_BUGON(mirrored[j]);
memcpy(percpu_data + j * PAGE_SIZE, dst, PAGE_SIZE);
mirrored[j] = true;
break;

View File

@@ -23,9 +23,6 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
list_del(&page->lru);
} else {
page = alloc_pages(gfp | __GFP_NOFAIL, 0);
BUG_ON(page == NULL);
BUG_ON(page->mapping != NULL);
}
return page;
}
@@ -60,7 +57,7 @@ struct erofs_workgroup *erofs_find_workgroup(
/* decrease refcount added by erofs_workgroup_put */
if (unlikely(oldcount == 1))
atomic_long_dec(&erofs_global_shrink_cnt);
BUG_ON(index != grp->index);
DBG_BUGON(index != grp->index);
}
rcu_read_unlock();
return grp;
@@ -73,8 +70,11 @@ int erofs_register_workgroup(struct super_block *sb,
struct erofs_sb_info *sbi;
int err;
/* grp->refcount should not < 1 */
BUG_ON(!atomic_read(&grp->refcount));
/* grp shouldn't be broken or used before */
if (unlikely(atomic_read(&grp->refcount) != 1)) {
DBG_BUGON(1);
return -EINVAL;
}
err = radix_tree_preload(GFP_NOFS);
if (err)

View File

@@ -148,7 +148,7 @@ struct tcmu_dev {
size_t ring_size;
struct mutex cmdr_lock;
struct list_head cmdr_queue;
struct list_head qfull_queue;
uint32_t dbi_max;
uint32_t dbi_thresh;
@@ -159,6 +159,7 @@ struct tcmu_dev {
struct timer_list cmd_timer;
unsigned int cmd_time_out;
struct list_head inflight_queue;
struct timer_list qfull_timer;
int qfull_time_out;
@@ -179,7 +180,7 @@ struct tcmu_dev {
struct tcmu_cmd {
struct se_cmd *se_cmd;
struct tcmu_dev *tcmu_dev;
struct list_head cmdr_queue_entry;
struct list_head queue_entry;
uint16_t cmd_id;
@@ -192,6 +193,7 @@ struct tcmu_cmd {
unsigned long deadline;
#define TCMU_CMD_BIT_EXPIRED 0
#define TCMU_CMD_BIT_INFLIGHT 1
unsigned long flags;
};
/*
@@ -586,7 +588,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
if (!tcmu_cmd)
return NULL;
INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry);
INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
tcmu_cmd->se_cmd = se_cmd;
tcmu_cmd->tcmu_dev = udev;
@@ -915,11 +917,13 @@ static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
return 0;
tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
mod_timer(timer, tcmu_cmd->deadline);
if (!timer_pending(timer))
mod_timer(timer, tcmu_cmd->deadline);
return 0;
}
static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
{
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
unsigned int tmo;
@@ -942,7 +946,7 @@ static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
if (ret)
return ret;
list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue);
list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
tcmu_cmd->cmd_id, udev->name);
return 0;
@@ -999,7 +1003,7 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
if (!list_empty(&udev->cmdr_queue))
if (!list_empty(&udev->qfull_queue))
goto queue;
mb = udev->mb_addr;
@@ -1096,13 +1100,16 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
tcmu_flush_dcache_range(mb, sizeof(*mb));
list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags);
/* TODO: only if FLUSH and FUA? */
uio_event_notify(&udev->uio_info);
return 0;
queue:
if (add_to_cmdr_queue(tcmu_cmd)) {
if (add_to_qfull_queue(tcmu_cmd)) {
*scsi_err = TCM_OUT_OF_RESOURCES;
return -1;
}
@@ -1145,6 +1152,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
goto out;
list_del_init(&cmd->queue_entry);
tcmu_cmd_reset_dbi_cur(cmd);
if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
@@ -1194,9 +1203,29 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
tcmu_free_cmd(cmd);
}
static void tcmu_set_next_deadline(struct list_head *queue,
struct timer_list *timer)
{
struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
unsigned long deadline = 0;
list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) {
if (!time_after(jiffies, tcmu_cmd->deadline)) {
deadline = tcmu_cmd->deadline;
break;
}
}
if (deadline)
mod_timer(timer, deadline);
else
del_timer(timer);
}
static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
{
struct tcmu_mailbox *mb;
struct tcmu_cmd *cmd;
int handled = 0;
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
@@ -1210,7 +1239,6 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
struct tcmu_cmd *cmd;
tcmu_flush_dcache_range(entry, sizeof(*entry));
@@ -1243,7 +1271,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
/* no more pending commands */
del_timer(&udev->cmd_timer);
if (list_empty(&udev->cmdr_queue)) {
if (list_empty(&udev->qfull_queue)) {
/*
* no more pending or waiting commands so try to
* reclaim blocks if needed.
@@ -1252,6 +1280,8 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
tcmu_global_max_blocks)
schedule_delayed_work(&tcmu_unmap_work, 0);
}
} else if (udev->cmd_time_out) {
tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
}
return handled;
@@ -1271,7 +1301,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
if (!time_after(jiffies, cmd->deadline))
return 0;
is_running = list_empty(&cmd->cmdr_queue_entry);
is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags);
se_cmd = cmd->se_cmd;
if (is_running) {
@@ -1288,12 +1318,11 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
*/
scsi_status = SAM_STAT_CHECK_CONDITION;
} else {
list_del_init(&cmd->cmdr_queue_entry);
idr_remove(&udev->commands, id);
tcmu_free_cmd(cmd);
scsi_status = SAM_STAT_TASK_SET_FULL;
}
list_del_init(&cmd->queue_entry);
pr_debug("Timing out cmd %u on dev %s that is %s.\n",
id, udev->name, is_running ? "inflight" : "queued");
@@ -1372,7 +1401,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
INIT_LIST_HEAD(&udev->node);
INIT_LIST_HEAD(&udev->timedout_entry);
INIT_LIST_HEAD(&udev->cmdr_queue);
INIT_LIST_HEAD(&udev->qfull_queue);
INIT_LIST_HEAD(&udev->inflight_queue);
idr_init(&udev->commands);
timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
@@ -1383,7 +1413,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
return &udev->se_dev;
}
static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
{
struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
LIST_HEAD(cmds);
@@ -1391,15 +1421,15 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
sense_reason_t scsi_ret;
int ret;
if (list_empty(&udev->cmdr_queue))
if (list_empty(&udev->qfull_queue))
return true;
pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
list_splice_init(&udev->cmdr_queue, &cmds);
list_splice_init(&udev->qfull_queue, &cmds);
list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) {
list_del_init(&tcmu_cmd->cmdr_queue_entry);
list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
list_del_init(&tcmu_cmd->queue_entry);
pr_debug("removing cmd %u on dev %s from queue\n",
tcmu_cmd->cmd_id, udev->name);
@@ -1437,14 +1467,13 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
* cmd was requeued, so just put all cmds back in
* the queue
*/
list_splice_tail(&cmds, &udev->cmdr_queue);
list_splice_tail(&cmds, &udev->qfull_queue);
drained = false;
goto done;
break;
}
}
if (list_empty(&udev->cmdr_queue))
del_timer(&udev->qfull_timer);
done:
tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
return drained;
}
@@ -1454,7 +1483,7 @@ static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
mutex_lock(&udev->cmdr_lock);
tcmu_handle_completions(udev);
run_cmdr_queue(udev, false);
run_qfull_queue(udev, false);
mutex_unlock(&udev->cmdr_lock);
return 0;
@@ -1982,7 +2011,7 @@ static void tcmu_block_dev(struct tcmu_dev *udev)
/* complete IO that has executed successfully */
tcmu_handle_completions(udev);
/* fail IO waiting to be queued */
run_cmdr_queue(udev, true);
run_qfull_queue(udev, true);
unlock:
mutex_unlock(&udev->cmdr_lock);
@@ -1997,7 +2026,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
mutex_lock(&udev->cmdr_lock);
idr_for_each_entry(&udev->commands, cmd, i) {
if (!list_empty(&cmd->cmdr_queue_entry))
if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags))
continue;
pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
@@ -2006,6 +2035,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
idr_remove(&udev->commands, i);
if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
list_del_init(&cmd->queue_entry);
if (err_level == 1) {
/*
* Userspace was not able to start the
@@ -2666,6 +2696,10 @@ static void check_timedout_devices(void)
mutex_lock(&udev->cmdr_lock);
idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
mutex_unlock(&udev->cmdr_lock);
spin_lock_bh(&timed_out_udevs_lock);

View File

@@ -1035,8 +1035,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
int type, ret;
ret = copy_from_iter(&type, sizeof(type), from);
if (ret != sizeof(type))
if (ret != sizeof(type)) {
ret = -EINVAL;
goto done;
}
switch (type) {
case VHOST_IOTLB_MSG:
@@ -1055,8 +1057,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
iov_iter_advance(from, offset);
ret = copy_from_iter(&msg, sizeof(msg), from);
if (ret != sizeof(msg))
if (ret != sizeof(msg)) {
ret = -EINVAL;
goto done;
}
if (vhost_process_iotlb_msg(dev, &msg)) {
ret = -EFAULT;
goto done;

View File

@@ -262,6 +262,16 @@ static int pwm_backlight_parse_dt(struct device *dev,
memset(data, 0, sizeof(*data));
/*
* These values are optional and set as 0 by default, the out values
* are modified only if a valid u32 value can be decoded.
*/
of_property_read_u32(node, "post-pwm-on-delay-ms",
&data->post_pwm_on_delay);
of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
data->enable_gpio = -EINVAL;
/*
* Determine the number of brightness levels, if this property is not
* set a default table of brightness levels will be used.
@@ -374,15 +384,6 @@ static int pwm_backlight_parse_dt(struct device *dev,
data->max_brightness--;
}
/*
* These values are optional and set as 0 by default, the out values
* are modified only if a valid u32 value can be decoded.
*/
of_property_read_u32(node, "post-pwm-on-delay-ms",
&data->post_pwm_on_delay);
of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
data->enable_gpio = -EINVAL;
return 0;
}

View File

@@ -916,8 +916,6 @@ static int dlfb_ops_open(struct fb_info *info, int user)
dlfb->fb_count++;
kref_get(&dlfb->kref);
if (fb_defio && (info->fbdefio == NULL)) {
/* enable defio at last moment if not disabled by client */
@@ -940,14 +938,17 @@ static int dlfb_ops_open(struct fb_info *info, int user)
return 0;
}
/*
* Called when all client interfaces to start transactions have been disabled,
* and all references to our device instance (dlfb_data) are released.
* Every transaction must have a reference, so we know are fully spun down
*/
static void dlfb_free(struct kref *kref)
static void dlfb_ops_destroy(struct fb_info *info)
{
struct dlfb_data *dlfb = container_of(kref, struct dlfb_data, kref);
struct dlfb_data *dlfb = info->par;
if (info->cmap.len != 0)
fb_dealloc_cmap(&info->cmap);
if (info->monspecs.modedb)
fb_destroy_modedb(info->monspecs.modedb);
vfree(info->screen_base);
fb_destroy_modelist(&info->modelist);
while (!list_empty(&dlfb->deferred_free)) {
struct dlfb_deferred_free *d = list_entry(dlfb->deferred_free.next, struct dlfb_deferred_free, list);
@@ -957,40 +958,13 @@ static void dlfb_free(struct kref *kref)
}
vfree(dlfb->backing_buffer);
kfree(dlfb->edid);
usb_put_dev(dlfb->udev);
kfree(dlfb);
/* Assume info structure is freed after this point */
framebuffer_release(info);
}
static void dlfb_free_framebuffer(struct dlfb_data *dlfb)
{
struct fb_info *info = dlfb->info;
if (info) {
unregister_framebuffer(info);
if (info->cmap.len != 0)
fb_dealloc_cmap(&info->cmap);
if (info->monspecs.modedb)
fb_destroy_modedb(info->monspecs.modedb);
vfree(info->screen_base);
fb_destroy_modelist(&info->modelist);
dlfb->info = NULL;
/* Assume info structure is freed after this point */
framebuffer_release(info);
}
/* ref taken in probe() as part of registering framebfufer */
kref_put(&dlfb->kref, dlfb_free);
}
static void dlfb_free_framebuffer_work(struct work_struct *work)
{
struct dlfb_data *dlfb = container_of(work, struct dlfb_data,
free_framebuffer_work.work);
dlfb_free_framebuffer(dlfb);
}
/*
* Assumes caller is holding info->lock mutex (for open and release at least)
*/
@@ -1000,10 +974,6 @@ static int dlfb_ops_release(struct fb_info *info, int user)
dlfb->fb_count--;
/* We can't free fb_info here - fbmem will touch it when we return */
if (dlfb->virtualized && (dlfb->fb_count == 0))
schedule_delayed_work(&dlfb->free_framebuffer_work, HZ);
if ((dlfb->fb_count == 0) && (info->fbdefio)) {
fb_deferred_io_cleanup(info);
kfree(info->fbdefio);
@@ -1013,8 +983,6 @@ static int dlfb_ops_release(struct fb_info *info, int user)
dev_dbg(info->dev, "release, user=%d count=%d\n", user, dlfb->fb_count);
kref_put(&dlfb->kref, dlfb_free);
return 0;
}
@@ -1172,6 +1140,7 @@ static struct fb_ops dlfb_ops = {
.fb_blank = dlfb_ops_blank,
.fb_check_var = dlfb_ops_check_var,
.fb_set_par = dlfb_ops_set_par,
.fb_destroy = dlfb_ops_destroy,
};
@@ -1615,12 +1584,13 @@ static int dlfb_parse_vendor_descriptor(struct dlfb_data *dlfb,
return true;
}
static void dlfb_init_framebuffer_work(struct work_struct *work);
static int dlfb_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
int i;
const struct device_attribute *attr;
struct dlfb_data *dlfb;
struct fb_info *info;
int retval = -ENOMEM;
struct usb_device *usbdev = interface_to_usbdev(intf);
@@ -1631,10 +1601,9 @@ static int dlfb_usb_probe(struct usb_interface *intf,
goto error;
}
kref_init(&dlfb->kref); /* matching kref_put in usb .disconnect fn */
INIT_LIST_HEAD(&dlfb->deferred_free);
dlfb->udev = usbdev;
dlfb->udev = usb_get_dev(usbdev);
usb_set_intfdata(intf, dlfb);
dev_dbg(&intf->dev, "console enable=%d\n", console);
@@ -1657,42 +1626,6 @@ static int dlfb_usb_probe(struct usb_interface *intf,
}
if (!dlfb_alloc_urb_list(dlfb, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
retval = -ENOMEM;
dev_err(&intf->dev, "unable to allocate urb list\n");
goto error;
}
kref_get(&dlfb->kref); /* matching kref_put in free_framebuffer_work */
/* We don't register a new USB class. Our client interface is dlfbev */
/* Workitem keep things fast & simple during USB enumeration */
INIT_DELAYED_WORK(&dlfb->init_framebuffer_work,
dlfb_init_framebuffer_work);
schedule_delayed_work(&dlfb->init_framebuffer_work, 0);
return 0;
error:
if (dlfb) {
kref_put(&dlfb->kref, dlfb_free); /* last ref from kref_init */
/* dev has been deallocated. Do not dereference */
}
return retval;
}
static void dlfb_init_framebuffer_work(struct work_struct *work)
{
int i, retval;
struct fb_info *info;
const struct device_attribute *attr;
struct dlfb_data *dlfb = container_of(work, struct dlfb_data,
init_framebuffer_work.work);
/* allocates framebuffer driver structure, not framebuffer memory */
info = framebuffer_alloc(0, &dlfb->udev->dev);
if (!info) {
@@ -1706,17 +1639,22 @@ static void dlfb_init_framebuffer_work(struct work_struct *work)
dlfb->ops = dlfb_ops;
info->fbops = &dlfb->ops;
INIT_LIST_HEAD(&info->modelist);
if (!dlfb_alloc_urb_list(dlfb, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
retval = -ENOMEM;
dev_err(&intf->dev, "unable to allocate urb list\n");
goto error;
}
/* We don't register a new USB class. Our client interface is dlfbev */
retval = fb_alloc_cmap(&info->cmap, 256, 0);
if (retval < 0) {
dev_err(info->device, "cmap allocation failed: %d\n", retval);
goto error;
}
INIT_DELAYED_WORK(&dlfb->free_framebuffer_work,
dlfb_free_framebuffer_work);
INIT_LIST_HEAD(&info->modelist);
retval = dlfb_setup_modes(dlfb, info, NULL, 0);
if (retval != 0) {
dev_err(info->device,
@@ -1760,10 +1698,16 @@ static void dlfb_init_framebuffer_work(struct work_struct *work)
dev_name(info->dev), info->var.xres, info->var.yres,
((dlfb->backing_buffer) ?
info->fix.smem_len * 2 : info->fix.smem_len) >> 10);
return;
return 0;
error:
dlfb_free_framebuffer(dlfb);
if (dlfb->info) {
dlfb_ops_destroy(dlfb->info);
} else if (dlfb) {
usb_put_dev(dlfb->udev);
kfree(dlfb);
}
return retval;
}
static void dlfb_usb_disconnect(struct usb_interface *intf)
@@ -1791,20 +1735,9 @@ static void dlfb_usb_disconnect(struct usb_interface *intf)
for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++)
device_remove_file(info->dev, &fb_device_attrs[i]);
device_remove_bin_file(info->dev, &edid_attr);
unlink_framebuffer(info);
}
usb_set_intfdata(intf, NULL);
dlfb->udev = NULL;
/* if clients still have us open, will be freed on last close */
if (dlfb->fb_count == 0)
schedule_delayed_work(&dlfb->free_framebuffer_work, 0);
/* release reference taken by kref_init in probe() */
kref_put(&dlfb->kref, dlfb_free);
/* consider dlfb_data freed */
unregister_framebuffer(info);
}
static struct usb_driver dlfb_driver = {

View File

@@ -17,6 +17,7 @@
#include <linux/watchdog.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
#include <asm/mach-ralink/ralink_regs.h>

View File

@@ -18,6 +18,7 @@
#include <linux/watchdog.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
#include <asm/mach-ralink/ralink_regs.h>

View File

@@ -160,9 +160,10 @@ static void pvcalls_conn_back_read(void *opaque)
/* write the data, then modify the indexes */
virt_wmb();
if (ret < 0)
if (ret < 0) {
atomic_set(&map->read, 0);
intf->in_error = ret;
else
} else
intf->in_prod = prod + ret;
/* update the indexes, then notify the other end */
virt_wmb();
@@ -282,13 +283,11 @@ static int pvcalls_back_socket(struct xenbus_device *dev,
static void pvcalls_sk_state_change(struct sock *sock)
{
struct sock_mapping *map = sock->sk_user_data;
struct pvcalls_data_intf *intf;
if (map == NULL)
return;
intf = map->ring;
intf->in_error = -ENOTCONN;
atomic_inc(&map->read);
notify_remote_via_irq(map->irq);
}

View File

@@ -31,6 +31,12 @@
#define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
#define PVCALLS_FRONT_MAX_SPIN 5000
static struct proto pvcalls_proto = {
.name = "PVCalls",
.owner = THIS_MODULE,
.obj_size = sizeof(struct sock),
};
struct pvcalls_bedata {
struct xen_pvcalls_front_ring ring;
grant_ref_t ref;
@@ -335,6 +341,42 @@ int pvcalls_front_socket(struct socket *sock)
return ret;
}
static void free_active_ring(struct sock_mapping *map)
{
if (!map->active.ring)
return;
free_pages((unsigned long)map->active.data.in,
map->active.ring->ring_order);
free_page((unsigned long)map->active.ring);
}
static int alloc_active_ring(struct sock_mapping *map)
{
void *bytes;
map->active.ring = (struct pvcalls_data_intf *)
get_zeroed_page(GFP_KERNEL);
if (!map->active.ring)
goto out;
map->active.ring->ring_order = PVCALLS_RING_ORDER;
bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
PVCALLS_RING_ORDER);
if (!bytes)
goto out;
map->active.data.in = bytes;
map->active.data.out = bytes +
XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
return 0;
out:
free_active_ring(map);
return -ENOMEM;
}
static int create_active(struct sock_mapping *map, int *evtchn)
{
void *bytes;
@@ -343,15 +385,7 @@ static int create_active(struct sock_mapping *map, int *evtchn)
*evtchn = -1;
init_waitqueue_head(&map->active.inflight_conn_req);
map->active.ring = (struct pvcalls_data_intf *)
__get_free_page(GFP_KERNEL | __GFP_ZERO);
if (map->active.ring == NULL)
goto out_error;
map->active.ring->ring_order = PVCALLS_RING_ORDER;
bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
PVCALLS_RING_ORDER);
if (bytes == NULL)
goto out_error;
bytes = map->active.data.in;
for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
map->active.ring->ref[i] = gnttab_grant_foreign_access(
pvcalls_front_dev->otherend_id,
@@ -361,10 +395,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
pvcalls_front_dev->otherend_id,
pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
map->active.data.in = bytes;
map->active.data.out = bytes +
XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
if (ret)
goto out_error;
@@ -385,8 +415,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
out_error:
if (*evtchn >= 0)
xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
free_page((unsigned long)map->active.ring);
return ret;
}
@@ -406,17 +434,24 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
return PTR_ERR(map);
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
ret = alloc_active_ring(map);
if (ret < 0) {
pvcalls_exit_sock(sock);
return ret;
}
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
free_active_ring(map);
pvcalls_exit_sock(sock);
return ret;
}
ret = create_active(map, &evtchn);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
free_active_ring(map);
pvcalls_exit_sock(sock);
return ret;
}
@@ -560,15 +595,13 @@ static int __read_ring(struct pvcalls_data_intf *intf,
error = intf->in_error;
/* get pointers before reading from the ring */
virt_rmb();
if (error < 0)
return error;
size = pvcalls_queued(prod, cons, array_size);
masked_prod = pvcalls_mask(prod, array_size);
masked_cons = pvcalls_mask(cons, array_size);
if (size == 0)
return 0;
return error ?: size;
if (len > size)
len = size;
@@ -780,25 +813,36 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
}
}
map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
if (map2 == NULL) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
pvcalls_exit_sock(sock);
return -ENOMEM;
}
ret = alloc_active_ring(map2);
if (ret < 0) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
kfree(map2);
pvcalls_exit_sock(sock);
return ret;
}
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
spin_unlock(&bedata->socket_lock);
free_active_ring(map2);
kfree(map2);
pvcalls_exit_sock(sock);
return ret;
}
map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
if (map2 == NULL) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
spin_unlock(&bedata->socket_lock);
pvcalls_exit_sock(sock);
return -ENOMEM;
}
ret = create_active(map2, &evtchn);
if (ret < 0) {
free_active_ring(map2);
kfree(map2);
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
@@ -839,7 +883,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
received:
map2->sock = newsock;
newsock->sk = kzalloc(sizeof(*newsock->sk), GFP_KERNEL);
newsock->sk = sk_alloc(sock_net(sock->sk), PF_INET, GFP_KERNEL, &pvcalls_proto, false);
if (!newsock->sk) {
bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
map->passive.inflight_req_id = PVCALLS_INVALID_ID;
@@ -1032,8 +1076,8 @@ int pvcalls_front_release(struct socket *sock)
spin_lock(&bedata->socket_lock);
list_del(&map->list);
spin_unlock(&bedata->socket_lock);
if (READ_ONCE(map->passive.inflight_req_id) !=
PVCALLS_INVALID_ID) {
if (READ_ONCE(map->passive.inflight_req_id) != PVCALLS_INVALID_ID &&
READ_ONCE(map->passive.inflight_req_id) != 0) {
pvcalls_front_free_map(bedata,
map->passive.accept_map);
}

View File

@@ -208,7 +208,7 @@ void afs_lock_work(struct work_struct *work)
/* The new front of the queue now owns the state variables. */
next = list_entry(vnode->pending_locks.next,
struct file_lock, fl_u.afs.link);
vnode->lock_key = afs_file_key(next->fl_file);
vnode->lock_key = key_get(afs_file_key(next->fl_file));
vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
goto again;
@@ -413,7 +413,7 @@ static void afs_dequeue_lock(struct afs_vnode *vnode, struct file_lock *fl)
/* The new front of the queue now owns the state variables. */
next = list_entry(vnode->pending_locks.next,
struct file_lock, fl_u.afs.link);
vnode->lock_key = afs_file_key(next->fl_file);
vnode->lock_key = key_get(afs_file_key(next->fl_file));
vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
afs_lock_may_be_available(vnode);

View File

@@ -411,7 +411,6 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
} else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
valid = true;
} else {
vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
vnode->cb_v_break = vnode->volume->cb_v_break;
valid = false;
}
@@ -543,6 +542,8 @@ void afs_evict_inode(struct inode *inode)
#endif
afs_put_permits(rcu_access_pointer(vnode->permit_cache));
key_put(vnode->lock_key);
vnode->lock_key = NULL;
_leave("");
}

View File

@@ -616,7 +616,8 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
capsnap->size);
spin_lock(&mdsc->snap_flush_lock);
list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
if (list_empty(&ci->i_snap_flush_item))
list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
spin_unlock(&mdsc->snap_flush_lock);
return 1; /* caller may want to ceph_flush_snaps */
}

Some files were not shown because too many files have changed in this diff Show More