Merge 4.19.107 into android-4.19

Changes in 4.19.107
	iommu/qcom: Fix bogus detach logic
	ALSA: hda: Use scnprintf() for printing texts for sysfs/procfs
	ALSA: hda/realtek - Apply quirk for MSI GP63, too
	ALSA: hda/realtek - Apply quirk for yet another MSI laptop
	ASoC: sun8i-codec: Fix setting DAI data format
	ecryptfs: fix a memory leak bug in parse_tag_1_packet()
	ecryptfs: fix a memory leak bug in ecryptfs_init_messaging()
	thunderbolt: Prevent crash if non-active NVMem file is read
	USB: misc: iowarrior: add support for 2 OEMed devices
	USB: misc: iowarrior: add support for the 28 and 28L devices
	USB: misc: iowarrior: add support for the 100 device
	floppy: check FDC index for errors before assigning it
	vt: fix scrollback flushing on background consoles
	vt: selection, handle pending signals in paste_selection
	vt: vt_ioctl: fix race in VT_RESIZEX
	staging: android: ashmem: Disallow ashmem memory from being remapped
	staging: vt6656: fix sign of rx_dbm to bb_pre_ed_rssi.
	xhci: Force Maximum Packet size for Full-speed bulk devices to valid range.
	xhci: fix runtime pm enabling for quirky Intel hosts
	xhci: Fix memory leak when caching protocol extended capability PSI tables - take 2
	usb: host: xhci: update event ring dequeue pointer on purpose
	USB: core: add endpoint-blacklist quirk
	USB: quirks: blacklist duplicate ep on Sound Devices USBPre2
	usb: uas: fix a plug & unplug racing
	USB: Fix novation SourceControl XL after suspend
	USB: hub: Don't record a connect-change event during reset-resume
	USB: hub: Fix the broken detection of USB3 device in SMSC hub
	usb: dwc2: Fix SET/CLEAR_FEATURE and GET_STATUS flows
	usb: dwc3: gadget: Check for IOC/LST bit in TRB->ctrl fields
	staging: rtl8188eu: Fix potential security hole
	staging: rtl8188eu: Fix potential overuse of kernel memory
	staging: rtl8723bs: Fix potential security hole
	staging: rtl8723bs: Fix potential overuse of kernel memory
	powerpc/tm: Fix clearing MSR[TS] in current when reclaiming on signal delivery
	jbd2: fix ocfs2 corrupt when clearing block group bits
	x86/mce/amd: Publish the bank pointer only after setup has succeeded
	x86/mce/amd: Fix kobject lifetime
	x86/cpu/amd: Enable the fixed Instructions Retired counter IRPERF
	serial: 8250: Check UPF_IRQ_SHARED in advance
	tty/serial: atmel: manage shutdown in case of RS485 or ISO7816 mode
	tty: serial: imx: setup the correct sg entry for tx dma
	serdev: ttyport: restore client ops on deregistration
	MAINTAINERS: Update drm/i915 bug filing URL
	Revert "ipc,sem: remove uneeded sem_undo_list lock usage in exit_sem()"
	mm/memcontrol.c: lost css_put in memcg_expand_shrinker_maps()
	nvme-multipath: Fix memory leak with ana_log_buf
	genirq/irqdomain: Make sure all irq domain flags are distinct
	mm/vmscan.c: don't round up scan size for online memory cgroup
	drm/amdgpu/soc15: fix xclk for raven
	xhci: apply XHCI_PME_STUCK_QUIRK to Intel Comet Lake platforms
	KVM: nVMX: Don't emulate instructions in guest mode
	KVM: x86: don't notify userspace IOAPIC on edge-triggered interrupt EOI
	tty: serial: qcom_geni_serial: Fix UART hang
	tty: serial: qcom_geni_serial: Remove interrupt storm
	tty: serial: qcom_geni_serial: Remove use of *_relaxed() and mb()
	tty: serial: qcom_geni_serial: Remove set_rfr_wm() and related variables
	tty: serial: qcom_geni_serial: Remove xfer_mode variable
	tty: serial: qcom_geni_serial: Fix RX cancel command failure
	lib/stackdepot.c: fix global out-of-bounds in stack_slabs
	drm/nouveau/kms/gv100-: Re-set LUT after clearing for modesets
	ext4: fix a data race in EXT4_I(inode)->i_disksize
	ext4: add cond_resched() to __ext4_find_entry()
	ext4: fix potential race between online resizing and write operations
	ext4: fix potential race between s_group_info online resizing and access
	ext4: fix potential race between s_flex_groups online resizing and access
	ext4: fix mount failure with quota configured as module
	ext4: rename s_journal_flag_rwsem to s_writepages_rwsem
	ext4: fix race between writepages and enabling EXT4_EXTENTS_FL
	KVM: nVMX: Refactor IO bitmap checks into helper function
	KVM: nVMX: Check IO instruction VM-exit conditions
	KVM: nVMX: handle nested posted interrupts when apicv is disabled for L1
	KVM: apic: avoid calculating pending eoi from an uninitialized val
	btrfs: fix bytes_may_use underflow in prealloc error condtition
	btrfs: reset fs_root to NULL on error in open_ctree
	btrfs: do not check delayed items are empty for single transaction cleanup
	Btrfs: fix btrfs_wait_ordered_range() so that it waits for all ordered extents
	Revert "dmaengine: imx-sdma: Fix memory leak"
	scsi: Revert "RDMA/isert: Fix a recently introduced regression related to logout"
	scsi: Revert "target: iscsi: Wait for all commands to finish before freeing a session"
	usb: gadget: composite: Fix bMaxPower for SuperSpeedPlus
	usb: dwc2: Fix in ISOC request length checking
	staging: rtl8723bs: fix copy of overlapping memory
	staging: greybus: use after free in gb_audio_manager_remove_all()
	ecryptfs: replace BUG_ON with error handling code
	iommu/vt-d: Fix compile warning from intel-svm.h
	genirq/proc: Reject invalid affinity masks (again)
	bpf, offload: Replace bitwise AND by logical AND in bpf_prog_offload_info_fill
	ALSA: rawmidi: Avoid bit fields for state flags
	ALSA: seq: Avoid concurrent access to queue flags
	ALSA: seq: Fix concurrent access to queue current tick/time
	netfilter: xt_hashlimit: limit the max size of hashtable
	rxrpc: Fix call RCU cleanup using non-bh-safe locks
	ata: ahci: Add shutdown to freeze hardware resources of ahci
	xen: Enable interrupts when calling _cond_resched()
	s390/mm: Explicitly compare PAGE_DEFAULT_KEY against zero in storage_key_init_range
	Revert "char/random: silence a lockdep splat with printk()"
	Linux 4.19.107

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I74e3d49c54d4afcfa4049042163cb879c3de3100
This commit is contained in:
Greg Kroah-Hartman
2020-03-03 07:33:01 +01:00
103 changed files with 1171 additions and 581 deletions

View File

@@ -7353,7 +7353,7 @@ M: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
M: Rodrigo Vivi <rodrigo.vivi@intel.com>
L: intel-gfx@lists.freedesktop.org
W: https://01.org/linuxgraphics/
B: https://01.org/linuxgraphics/documentation/how-report-bugs
B: https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
C: irc://chat.freenode.net/intel-gfx
Q: http://patchwork.freedesktop.org/project/intel-gfx/
T: git git://anongit.freedesktop.org/drm-intel

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 106
SUBLEVEL = 107
EXTRAVERSION =
NAME = "People's Front"

View File

@@ -200,14 +200,27 @@ unsigned long get_tm_stackpointer(struct task_struct *tsk)
* normal/non-checkpointed stack pointer.
*/
unsigned long ret = tsk->thread.regs->gpr[1];
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BUG_ON(tsk != current);
if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) {
preempt_disable();
tm_reclaim_current(TM_CAUSE_SIGNAL);
if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr))
return tsk->thread.ckpt_regs.gpr[1];
ret = tsk->thread.ckpt_regs.gpr[1];
/*
* If we treclaim, we must clear the current thread's TM bits
* before re-enabling preemption. Otherwise we might be
* preempted and have the live MSR[TS] changed behind our back
* (tm_recheckpoint_new_task() would recheckpoint). Besides, we
* enter the signal handler in non-transactional state.
*/
tsk->thread.regs->msr &= ~MSR_TS_MASK;
preempt_enable();
}
#endif
return tsk->thread.regs->gpr[1];
return ret;
}

View File

@@ -493,19 +493,11 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
*/
static int save_tm_user_regs(struct pt_regs *regs,
struct mcontext __user *frame,
struct mcontext __user *tm_frame, int sigret)
struct mcontext __user *tm_frame, int sigret,
unsigned long msr)
{
unsigned long msr = regs->msr;
WARN_ON(tm_suspend_disabled);
/* Remove TM bits from thread's MSR. The MSR in the sigcontext
* just indicates to userland that we were doing a transaction, but we
* don't want to return in transactional state. This also ensures
* that flush_fp_to_thread won't set TIF_RESTORE_TM again.
*/
regs->msr &= ~MSR_TS_MASK;
/* Save both sets of general registers */
if (save_general_regs(&current->thread.ckpt_regs, frame)
|| save_general_regs(regs, tm_frame))
@@ -916,6 +908,10 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
int sigret;
unsigned long tramp;
struct pt_regs *regs = tsk->thread.regs;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/* Save the thread's msr before get_tm_stackpointer() changes it */
unsigned long msr = regs->msr;
#endif
BUG_ON(tsk != current);
@@ -948,13 +944,13 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
tm_frame = &rt_sf->uc_transact.uc_mcontext;
if (MSR_TM_ACTIVE(regs->msr)) {
if (MSR_TM_ACTIVE(msr)) {
if (__put_user((unsigned long)&rt_sf->uc_transact,
&rt_sf->uc.uc_link) ||
__put_user((unsigned long)tm_frame,
&rt_sf->uc_transact.uc_regs))
goto badframe;
if (save_tm_user_regs(regs, frame, tm_frame, sigret))
if (save_tm_user_regs(regs, frame, tm_frame, sigret, msr))
goto badframe;
}
else
@@ -1365,6 +1361,10 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
int sigret;
unsigned long tramp;
struct pt_regs *regs = tsk->thread.regs;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/* Save the thread's msr before get_tm_stackpointer() changes it */
unsigned long msr = regs->msr;
#endif
BUG_ON(tsk != current);
@@ -1398,9 +1398,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
tm_mctx = &frame->mctx_transact;
if (MSR_TM_ACTIVE(regs->msr)) {
if (MSR_TM_ACTIVE(msr)) {
if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
sigret))
sigret, msr))
goto badframe;
}
else

View File

@@ -196,7 +196,8 @@ static long setup_sigcontext(struct sigcontext __user *sc,
static long setup_tm_sigcontexts(struct sigcontext __user *sc,
struct sigcontext __user *tm_sc,
struct task_struct *tsk,
int signr, sigset_t *set, unsigned long handler)
int signr, sigset_t *set, unsigned long handler,
unsigned long msr)
{
/* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
* process never used altivec yet (MSR_VEC is zero in pt_regs of
@@ -211,12 +212,11 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc);
#endif
struct pt_regs *regs = tsk->thread.regs;
unsigned long msr = tsk->thread.regs->msr;
long err = 0;
BUG_ON(tsk != current);
BUG_ON(!MSR_TM_ACTIVE(regs->msr));
BUG_ON(!MSR_TM_ACTIVE(msr));
WARN_ON(tm_suspend_disabled);
@@ -226,13 +226,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
*/
msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX);
/* Remove TM bits from thread's MSR. The MSR in the sigcontext
* just indicates to userland that we were doing a transaction, but we
* don't want to return in transactional state. This also ensures
* that flush_fp_to_thread won't set TIF_RESTORE_TM again.
*/
regs->msr &= ~MSR_TS_MASK;
#ifdef CONFIG_ALTIVEC
err |= __put_user(v_regs, &sc->v_regs);
err |= __put_user(tm_v_regs, &tm_sc->v_regs);
@@ -803,6 +796,10 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
unsigned long newsp = 0;
long err = 0;
struct pt_regs *regs = tsk->thread.regs;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/* Save the thread's msr before get_tm_stackpointer() changes it */
unsigned long msr = regs->msr;
#endif
BUG_ON(tsk != current);
@@ -820,7 +817,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
err |= __put_user(0, &frame->uc.uc_flags);
err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (MSR_TM_ACTIVE(regs->msr)) {
if (MSR_TM_ACTIVE(msr)) {
/* The ucontext_t passed to userland points to the second
* ucontext_t (for transactional state) with its uc_link ptr.
*/
@@ -828,7 +825,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext,
&frame->uc_transact.uc_mcontext,
tsk, ksig->sig, NULL,
(unsigned long)ksig->ka.sa.sa_handler);
(unsigned long)ksig->ka.sa.sa_handler,
msr);
} else
#endif
{

View File

@@ -42,7 +42,7 @@ void __storage_key_init_range(unsigned long start, unsigned long end);
static inline void storage_key_init_range(unsigned long start, unsigned long end)
{
if (PAGE_DEFAULT_KEY)
if (PAGE_DEFAULT_KEY != 0)
__storage_key_init_range(start, end);
}

View File

@@ -1040,7 +1040,7 @@ struct kvm_x86_ops {
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);

View File

@@ -455,6 +455,8 @@
#define MSR_K7_HWCR 0xc0010015
#define MSR_K7_HWCR_SMMLOCK_BIT 0
#define MSR_K7_HWCR_SMMLOCK BIT_ULL(MSR_K7_HWCR_SMMLOCK_BIT)
#define MSR_K7_HWCR_IRPERF_EN_BIT 30
#define MSR_K7_HWCR_IRPERF_EN BIT_ULL(MSR_K7_HWCR_IRPERF_EN_BIT)
#define MSR_K7_FID_VID_CTL 0xc0010041
#define MSR_K7_FID_VID_STATUS 0xc0010042

View File

@@ -25,6 +25,7 @@
static const int amd_erratum_383[];
static const int amd_erratum_400[];
static const int amd_erratum_1054[];
static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
/*
@@ -983,6 +984,15 @@ static void init_amd(struct cpuinfo_x86 *c)
/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
if (!cpu_has(c, X86_FEATURE_XENPV))
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
/*
* Turn on the Instructions Retired free counter on machines not
* susceptible to erratum #1054 "Instructions Retired Performance
* Counter May Be Inaccurate".
*/
if (cpu_has(c, X86_FEATURE_IRPERF) &&
!cpu_has_amd_erratum(c, amd_erratum_1054))
msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
}
#ifdef CONFIG_X86_32
@@ -1110,6 +1120,10 @@ static const int amd_erratum_400[] =
static const int amd_erratum_383[] =
AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
static const int amd_erratum_1054[] =
AMD_OSVW_ERRATUM(0, AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
{

View File

@@ -1117,9 +1117,12 @@ static const struct sysfs_ops threshold_ops = {
.store = store,
};
static void threshold_block_release(struct kobject *kobj);
static struct kobj_type threshold_ktype = {
.sysfs_ops = &threshold_ops,
.default_attrs = default_attrs,
.release = threshold_block_release,
};
static const char *get_name(unsigned int bank, struct threshold_block *b)
@@ -1152,8 +1155,9 @@ static const char *get_name(unsigned int bank, struct threshold_block *b)
return buf_mcatype;
}
static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
unsigned int block, u32 address)
static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb,
unsigned int bank, unsigned int block,
u32 address)
{
struct threshold_block *b = NULL;
u32 low, high;
@@ -1197,16 +1201,12 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
INIT_LIST_HEAD(&b->miscj);
if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
list_add(&b->miscj,
&per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
} else {
per_cpu(threshold_banks, cpu)[bank]->blocks = b;
}
if (tb->blocks)
list_add(&b->miscj, &tb->blocks->miscj);
else
tb->blocks = b;
err = kobject_init_and_add(&b->kobj, &threshold_ktype,
per_cpu(threshold_banks, cpu)[bank]->kobj,
get_name(bank, b));
err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(bank, b));
if (err)
goto out_free;
recurse:
@@ -1214,7 +1214,7 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
if (!address)
return 0;
err = allocate_threshold_blocks(cpu, bank, block, address);
err = allocate_threshold_blocks(cpu, tb, bank, block, address);
if (err)
goto out_free;
@@ -1299,8 +1299,6 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
goto out_free;
}
per_cpu(threshold_banks, cpu)[bank] = b;
if (is_shared_bank(bank)) {
refcount_set(&b->cpus, 1);
@@ -1311,9 +1309,13 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
}
}
err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank));
if (!err)
goto out;
err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank));
if (err)
goto out_free;
per_cpu(threshold_banks, cpu)[bank] = b;
return 0;
out_free:
kfree(b);
@@ -1322,8 +1324,12 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
return err;
}
static void deallocate_threshold_block(unsigned int cpu,
unsigned int bank)
static void threshold_block_release(struct kobject *kobj)
{
kfree(to_block(kobj));
}
static void deallocate_threshold_block(unsigned int cpu, unsigned int bank)
{
struct threshold_block *pos = NULL;
struct threshold_block *tmp = NULL;
@@ -1333,13 +1339,11 @@ static void deallocate_threshold_block(unsigned int cpu,
return;
list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
kobject_put(&pos->kobj);
list_del(&pos->miscj);
kfree(pos);
kobject_put(&pos->kobj);
}
kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
kobject_put(&head->blocks->kobj);
}
static void __threshold_remove_blocks(struct threshold_bank *b)

View File

@@ -427,7 +427,7 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
kvm_set_msi_irq(vcpu->kvm, entry, &irq);
if (irq.level && kvm_apic_match_dest(vcpu, NULL, 0,
if (irq.trig_mode && kvm_apic_match_dest(vcpu, NULL, 0,
irq.dest_id, irq.dest_mode))
__set_bit(irq.vector, ioapic_handled_vectors);
}

View File

@@ -633,9 +633,11 @@ static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
{
u8 val;
if (pv_eoi_get_user(vcpu, &val) < 0)
if (pv_eoi_get_user(vcpu, &val) < 0) {
apic_debug("Can't read EOI MSR value: 0x%llx\n",
(unsigned long long)vcpu->arch.pv_eoi.msr_val);
return false;
}
return val & 0x1;
}
@@ -1060,11 +1062,8 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
apic_clear_vector(vector, apic->regs + APIC_TMR);
}
if (vcpu->arch.apicv_active)
kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
else {
if (kvm_x86_ops->deliver_posted_interrupt(vcpu, vector)) {
kvm_lapic_set_irr(vector, apic);
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
}

View File

@@ -5140,8 +5140,11 @@ static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
return;
}
static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
static int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
{
if (!vcpu->arch.apicv_active)
return -1;
kvm_lapic_set_irr(vec, vcpu->arch.apic);
smp_mb__after_atomic();
@@ -5150,6 +5153,8 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
kvm_cpu_get_apicid(vcpu->cpu));
else
kvm_vcpu_wake_up(vcpu);
return 0;
}
static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)

View File

@@ -5725,6 +5725,26 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
(ss.selector & SEGMENT_RPL_MASK));
}
static bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu,
unsigned int port, int size);
static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
unsigned long exit_qualification;
unsigned short port;
int size;
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
port = exit_qualification >> 16;
size = (exit_qualification & 7) + 1;
return nested_vmx_check_io_bitmaps(vcpu, port, size);
}
/*
* Check if guest state is valid. Returns true if valid, false if
* not.
@@ -6264,24 +6284,29 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
* 2. If target vcpu isn't running(root mode), kick it to pick up the
* interrupt from PIR in next vmentry.
*/
static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int r;
r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
if (!r)
return;
return 0;
if (!vcpu->arch.apicv_active)
return -1;
if (pi_test_and_set_pir(vector, &vmx->pi_desc))
return;
return 0;
/* If a previous notification has sent the IPI, nothing to do. */
if (pi_test_and_set_on(&vmx->pi_desc))
return;
return 0;
if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
kvm_vcpu_kick(vcpu);
return 0;
}
/*
@@ -9469,23 +9494,17 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
static const int kvm_vmx_max_exit_handlers =
ARRAY_SIZE(kvm_vmx_exit_handlers);
static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
/*
* Return true if an IO instruction with the specified port and size should cause
* a VM-exit into L1.
*/
bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
int size)
{
unsigned long exit_qualification;
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
gpa_t bitmap, last_bitmap;
unsigned int port;
int size;
u8 b;
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
port = exit_qualification >> 16;
size = (exit_qualification & 7) + 1;
last_bitmap = (gpa_t)-1;
b = -1;
@@ -13675,6 +13694,39 @@ static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
to_vmx(vcpu)->nested.sync_shadow_vmcs = true;
}
static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
unsigned short port;
bool intercept;
int size;
if (info->intercept == x86_intercept_in ||
info->intercept == x86_intercept_ins) {
port = info->src_val;
size = info->dst_bytes;
} else {
port = info->dst_val;
size = info->src_bytes;
}
/*
* If the 'use IO bitmaps' VM-execution control is 0, IO instruction
* VM-exits depend on the 'unconditional IO exiting' VM-execution
* control.
*
* Otherwise, IO instruction VM-exits are controlled by the IO bitmaps.
*/
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
intercept = nested_cpu_has(vmcs12,
CPU_BASED_UNCOND_IO_EXITING);
else
intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
}
static int vmx_check_intercept(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info,
enum x86_intercept_stage stage)
@@ -13682,19 +13734,31 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
switch (info->intercept) {
/*
* RDPID causes #UD if disabled through secondary execution controls.
* Because it is marked as EmulateOnUD, we need to intercept it here.
*/
if (info->intercept == x86_intercept_rdtscp &&
!nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
ctxt->exception.vector = UD_VECTOR;
ctxt->exception.error_code_valid = false;
return X86EMUL_PROPAGATE_FAULT;
}
case x86_intercept_rdtscp:
if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
ctxt->exception.vector = UD_VECTOR;
ctxt->exception.error_code_valid = false;
return X86EMUL_PROPAGATE_FAULT;
}
break;
case x86_intercept_in:
case x86_intercept_ins:
case x86_intercept_out:
case x86_intercept_outs:
return vmx_check_intercept_io(vcpu, info);
/* TODO: check more intercepts... */
return X86EMUL_CONTINUE;
default:
break;
}
return X86EMUL_UNHANDLEABLE;
}
#ifdef CONFIG_X86_64

View File

@@ -96,6 +96,7 @@ enum board_ids {
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static void ahci_remove_one(struct pci_dev *dev);
static void ahci_shutdown_one(struct pci_dev *dev);
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
@@ -609,6 +610,7 @@ static struct pci_driver ahci_pci_driver = {
.id_table = ahci_pci_tbl,
.probe = ahci_init_one,
.remove = ahci_remove_one,
.shutdown = ahci_shutdown_one,
.driver = {
.pm = &ahci_pci_pm_ops,
},
@@ -1897,6 +1899,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
}
static void ahci_shutdown_one(struct pci_dev *pdev)
{
ata_pci_shutdown_one(pdev);
}
static void ahci_remove_one(struct pci_dev *pdev)
{
pm_runtime_get_noresume(&pdev->dev);

View File

@@ -6780,6 +6780,26 @@ void ata_pci_remove_one(struct pci_dev *pdev)
ata_host_detach(host);
}
void ata_pci_shutdown_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
int i;
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
ap->pflags |= ATA_PFLAG_FROZEN;
/* Disable port interrupts */
if (ap->ops->freeze)
ap->ops->freeze(ap);
/* Stop the port DMA engines */
if (ap->ops->port_stop)
ap->ops->port_stop(ap);
}
}
/* move to PCI subsystem */
int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
{
@@ -7400,6 +7420,7 @@ EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
#ifdef CONFIG_PCI
EXPORT_SYMBOL_GPL(pci_test_config_bits);
EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
EXPORT_SYMBOL_GPL(ata_pci_remove_one);
#ifdef CONFIG_PM
EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);

View File

@@ -852,14 +852,17 @@ static void reset_fdc_info(int mode)
/* selects the fdc and drive, and enables the fdc's input/dma. */
static void set_fdc(int drive)
{
unsigned int new_fdc = fdc;
if (drive >= 0 && drive < N_DRIVE) {
fdc = FDC(drive);
new_fdc = FDC(drive);
current_drive = drive;
}
if (fdc != 1 && fdc != 0) {
if (new_fdc >= N_FDC) {
pr_info("bad fdc value\n");
return;
}
fdc = new_fdc;
set_dor(fdc, ~0, 8);
#if N_FDC > 1
set_dor(1 - fdc, ~8, 0);

View File

@@ -1503,9 +1503,8 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
print_once = true;
#endif
if (__ratelimit(&unseeded_warning))
printk_deferred(KERN_NOTICE "random: %s called from %pS "
"with crng_init=%d\n", func_name, caller,
crng_init);
pr_notice("random: %s called from %pS with crng_init=%d\n",
func_name, caller, crng_init);
}
/*

View File

@@ -738,8 +738,12 @@ static void sdma_start_desc(struct sdma_channel *sdmac)
return;
}
sdmac->desc = desc = to_sdma_desc(&vd->tx);
list_del(&vd->node);
/*
* Do not delete the node in desc_issued list in cyclic mode, otherwise
* the desc allocated will never be freed in vchan_dma_desc_free_list
*/
if (!(sdmac->flags & IMX_DMA_SG_LOOP))
list_del(&vd->node);
sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
@@ -1040,6 +1044,7 @@ static void sdma_channel_terminate_work(struct work_struct *work)
spin_lock_irqsave(&sdmac->vc.lock, flags);
vchan_get_all_descriptors(&sdmac->vc, &head);
sdmac->desc = NULL;
spin_unlock_irqrestore(&sdmac->vc.lock, flags);
vchan_dma_desc_free_list(&sdmac->vc, &head);
}
@@ -1047,19 +1052,11 @@ static void sdma_channel_terminate_work(struct work_struct *work)
static int sdma_disable_channel_async(struct dma_chan *chan)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
unsigned long flags;
spin_lock_irqsave(&sdmac->vc.lock, flags);
sdma_disable_channel(chan);
if (sdmac->desc) {
vchan_terminate_vdesc(&sdmac->desc->vd);
sdmac->desc = NULL;
if (sdmac->desc)
schedule_work(&sdmac->terminate_worker);
}
spin_unlock_irqrestore(&sdmac->vc.lock, flags);
return 0;
}

View File

@@ -205,7 +205,12 @@ static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
static u32 soc15_get_xclk(struct amdgpu_device *adev)
{
return adev->clock.spll.reference_freq;
u32 reference_clock = adev->clock.spll.reference_freq;
if (adev->asic_type == CHIP_RAVEN)
return reference_clock / 4;
return reference_clock;
}

View File

@@ -405,6 +405,8 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
asyw->clr.ntfy = armw->ntfy.handle != 0;
asyw->clr.sema = armw->sema.handle != 0;
asyw->clr.xlut = armw->xlut.handle != 0;
if (asyw->clr.xlut && asyw->visible)
asyw->set.xlut = asyw->xlut.handle != 0;
if (wndw->func->image_clr)
asyw->clr.image = armw->image.handle[0] != 0;
}

View File

@@ -2584,6 +2584,17 @@ isert_wait4logout(struct isert_conn *isert_conn)
}
}
static void
isert_wait4cmds(struct iscsi_conn *conn)
{
isert_info("iscsi_conn %p\n", conn);
if (conn->sess) {
target_sess_cmd_list_set_waiting(conn->sess->se_sess);
target_wait_for_sess_cmds(conn->sess->se_sess);
}
}
/**
* isert_put_unsol_pending_cmds() - Drop commands waiting for
* unsolicitate dataout
@@ -2631,6 +2642,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
ib_drain_qp(isert_conn->qp);
isert_put_unsol_pending_cmds(conn);
isert_wait4cmds(conn);
isert_wait4logout(isert_conn);
queue_work(isert_release_wq, &isert_conn->release_work);

View File

@@ -333,21 +333,19 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain)
{
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
if (WARN_ON(qcom_domain->iommu)) /* forgot to detach? */
return;
iommu_put_dma_cookie(domain);
/* NOTE: unmap can be called after client device is powered off,
* for example, with GPUs or anything involving dma-buf. So we
* cannot rely on the device_link. Make sure the IOMMU is on to
* avoid unclocked accesses in the TLB inv path:
*/
pm_runtime_get_sync(qcom_domain->iommu->dev);
free_io_pgtable_ops(qcom_domain->pgtbl_ops);
pm_runtime_put_sync(qcom_domain->iommu->dev);
if (qcom_domain->iommu) {
/*
* NOTE: unmap can be called after client device is powered
* off, for example, with GPUs or anything involving dma-buf.
* So we cannot rely on the device_link. Make sure the IOMMU
* is on to avoid unclocked accesses in the TLB inv path:
*/
pm_runtime_get_sync(qcom_domain->iommu->dev);
free_io_pgtable_ops(qcom_domain->pgtbl_ops);
pm_runtime_put_sync(qcom_domain->iommu->dev);
}
kfree(qcom_domain);
}
@@ -392,7 +390,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
unsigned i;
if (!qcom_domain->iommu)
if (WARN_ON(!qcom_domain->iommu))
return;
pm_runtime_get_sync(qcom_iommu->dev);
@@ -405,8 +403,6 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
ctx->domain = NULL;
}
pm_runtime_put_sync(qcom_iommu->dev);
qcom_domain->iommu = NULL;
}
static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,

View File

@@ -569,6 +569,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
}
INIT_WORK(&ctrl->ana_work, nvme_ana_work);
kfree(ctrl->ana_log_buf);
ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
if (!ctrl->ana_log_buf) {
error = -ENOMEM;

View File

@@ -350,8 +350,23 @@ static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
_calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
}
static int ashmem_vmfile_mmap(struct file *file, struct vm_area_struct *vma)
{
/* do not allow to mmap ashmem backing shmem file directly */
return -EPERM;
}
static unsigned long
ashmem_vmfile_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags)
{
return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
}
static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
{
static struct file_operations vmfile_fops;
struct ashmem_area *asma = file->private_data;
int ret = 0;
@@ -392,6 +407,19 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
}
vmfile->f_mode |= FMODE_LSEEK;
asma->file = vmfile;
/*
* override mmap operation of the vmfile so that it can't be
* remapped which would lead to creation of a new vma with no
* asma permission checks. Have to override get_unmapped_area
* as well to prevent VM_BUG_ON check for f_ops modification.
*/
if (!vmfile_fops.mmap) {
vmfile_fops = *vmfile->f_op;
vmfile_fops.mmap = ashmem_vmfile_mmap;
vmfile_fops.get_unmapped_area =
ashmem_vmfile_get_unmapped_area;
}
vmfile->f_op = &vmfile_fops;
}
get_file(asma->file);

View File

@@ -89,8 +89,8 @@ void gb_audio_manager_remove_all(void)
list_for_each_entry_safe(module, next, &modules_list, list) {
list_del(&module->list);
kobject_put(&module->kobj);
ida_simple_remove(&module_id, module->id);
kobject_put(&module->kobj);
}
is_empty = list_empty(&modules_list);

View File

@@ -2026,7 +2026,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
struct ieee_param *param;
uint ret = 0;
if (p->length < sizeof(struct ieee_param) || !p->pointer) {
if (!p->pointer || p->length != sizeof(struct ieee_param)) {
ret = -EINVAL;
goto out;
}
@@ -2819,7 +2819,7 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
goto out;
}
if (!p->pointer) {
if (!p->pointer || p->length != sizeof(struct ieee_param)) {
ret = -EINVAL;
goto out;
}

View File

@@ -478,14 +478,13 @@ int rtl8723bs_xmit_thread(void *context)
s32 ret;
struct adapter *padapter;
struct xmit_priv *pxmitpriv;
u8 thread_name[20] = "RTWHALXT";
u8 thread_name[20];
ret = _SUCCESS;
padapter = context;
pxmitpriv = &padapter->xmitpriv;
rtw_sprintf(thread_name, 20, "%s-"ADPT_FMT, thread_name, ADPT_ARG(padapter));
rtw_sprintf(thread_name, 20, "RTWHALXT-" ADPT_FMT, ADPT_ARG(padapter));
thread_enter(thread_name);
DBG_871X("start "FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(padapter));

View File

@@ -3400,7 +3400,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
/* down(&ieee->wx_sem); */
if (p->length < sizeof(struct ieee_param) || !p->pointer) {
if (!p->pointer || p->length != sizeof(struct ieee_param)) {
ret = -EINVAL;
goto out;
}
@@ -4236,7 +4236,7 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
/* if (p->length < sizeof(struct ieee_param) || !p->pointer) { */
if (!p->pointer) {
if (!p->pointer || p->length != sizeof(*param)) {
ret = -EINVAL;
goto out;
}

View File

@@ -130,7 +130,7 @@ int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb,
vnt_rf_rssi_to_dbm(priv, *rssi, &rx_dbm);
priv->bb_pre_ed_rssi = (u8)rx_dbm + 1;
priv->bb_pre_ed_rssi = (u8)-rx_dbm + 1;
priv->current_rssi = priv->bb_pre_ed_rssi;
skb_pull(skb, 8);

View File

@@ -1157,9 +1157,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
conn->cid);
if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
target_get_sess_cmd(&cmd->se_cmd, true);
cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
scsilun_to_int(&hdr->lun));
@@ -2000,9 +1998,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
conn->sess->se_sess, 0, DMA_NONE,
TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
target_get_sess_cmd(&cmd->se_cmd, true);
/*
* TASK_REASSIGN for ERL=2 / connection stays inside of
@@ -4123,6 +4119,9 @@ int iscsit_close_connection(
iscsit_stop_nopin_response_timer(conn);
iscsit_stop_nopin_timer(conn);
if (conn->conn_transport->iscsit_wait_conn)
conn->conn_transport->iscsit_wait_conn(conn);
/*
* During Connection recovery drop unacknowledged out of order
* commands for this connection, and prepare the other commands
@@ -4205,11 +4204,6 @@ int iscsit_close_connection(
* must wait until they have completed.
*/
iscsit_check_conn_usage_count(conn);
target_sess_cmd_list_set_waiting(sess->se_sess);
target_wait_for_sess_cmds(sess->se_sess);
if (conn->conn_transport->iscsit_wait_conn)
conn->conn_transport->iscsit_wait_conn(conn);
ahash_request_free(conn->conn_tx_hash);
if (conn->conn_rx_hash) {

View File

@@ -264,6 +264,12 @@ static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
return ret;
}
static int tb_switch_nvm_no_read(void *priv, unsigned int offset, void *val,
size_t bytes)
{
return -EPERM;
}
static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
size_t bytes)
{
@@ -309,6 +315,7 @@ static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
config.read_only = true;
} else {
config.name = "nvm_non_active";
config.reg_read = tb_switch_nvm_no_read;
config.reg_write = tb_switch_nvm_write;
config.root_only = true;
}

View File

@@ -265,7 +265,6 @@ struct device *serdev_tty_port_register(struct tty_port *port,
struct device *parent,
struct tty_driver *drv, int idx)
{
const struct tty_port_client_operations *old_ops;
struct serdev_controller *ctrl;
struct serport *serport;
int ret;
@@ -289,7 +288,6 @@ struct device *serdev_tty_port_register(struct tty_port *port,
ctrl->ops = &ctrl_ops;
old_ops = port->client_ops;
port->client_ops = &client_ops;
port->client_data = ctrl;
@@ -302,7 +300,7 @@ struct device *serdev_tty_port_register(struct tty_port *port,
err_reset_data:
port->client_data = NULL;
port->client_ops = old_ops;
port->client_ops = &tty_port_default_client_ops;
serdev_controller_put(ctrl);
return ERR_PTR(ret);
@@ -317,8 +315,8 @@ int serdev_tty_port_unregister(struct tty_port *port)
return -ENODEV;
serdev_controller_remove(ctrl);
port->client_ops = NULL;
port->client_data = NULL;
port->client_ops = &tty_port_default_client_ops;
serdev_controller_put(ctrl);
return 0;

View File

@@ -375,7 +375,6 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
port.port.line = rc;
port.port.irq = irq_of_parse_and_map(np, 0);
port.port.irqflags = IRQF_SHARED;
port.port.handle_irq = aspeed_vuart_handle_irq;
port.port.iotype = UPIO_MEM;
port.port.type = PORT_16550A;

View File

@@ -177,7 +177,7 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
struct hlist_head *h;
struct hlist_node *n;
struct irq_info *i;
int ret, irq_flags = up->port.flags & UPF_SHARE_IRQ ? IRQF_SHARED : 0;
int ret;
mutex_lock(&hash_mutex);
@@ -212,9 +212,8 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
INIT_LIST_HEAD(&up->list);
i->head = &up->list;
spin_unlock_irq(&i->lock);
irq_flags |= up->port.irqflags;
ret = request_irq(up->port.irq, serial8250_interrupt,
irq_flags, up->port.name, i);
up->port.irqflags, up->port.name, i);
if (ret < 0)
serial_do_unlink(i, up);
}

View File

@@ -171,7 +171,6 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
port->type = type;
port->uartclk = clk;
port->irqflags |= IRQF_SHARED;
if (of_property_read_bool(np, "no-loopback-test"))
port->flags |= UPF_SKIP_TEST;

View File

@@ -2253,6 +2253,10 @@ int serial8250_do_startup(struct uart_port *port)
}
}
/* Check if we need to have shared IRQs */
if (port->irq && (up->port.flags & UPF_SHARE_IRQ))
up->port.irqflags |= IRQF_SHARED;
if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) {
unsigned char iir1;
/*

View File

@@ -490,7 +490,8 @@ static void atmel_stop_tx(struct uart_port *port)
atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
if (atmel_uart_is_half_duplex(port))
atmel_start_rx(port);
if (!atomic_read(&atmel_port->tasklet_shutdown))
atmel_start_rx(port);
}

View File

@@ -608,7 +608,7 @@ static void imx_uart_dma_tx(struct imx_port *sport)
sport->tx_bytes = uart_circ_chars_pending(xmit);
if (xmit->tail < xmit->head) {
if (xmit->tail < xmit->head || xmit->head == 0) {
sport->dma_tx_nents = 1;
sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
} else {

View File

@@ -85,7 +85,7 @@
#define DEF_FIFO_DEPTH_WORDS 16
#define DEF_TX_WM 2
#define DEF_FIFO_WIDTH_BITS 32
#define UART_CONSOLE_RX_WM 2
#define UART_RX_WM 2
#define MAX_LOOPBACK_CFG 3
#ifdef CONFIG_CONSOLE_POLL
@@ -101,10 +101,6 @@ struct qcom_geni_serial_port {
u32 tx_fifo_depth;
u32 tx_fifo_width;
u32 rx_fifo_depth;
u32 tx_wm;
u32 rx_wm;
u32 rx_rfr;
enum geni_se_xfer_mode xfer_mode;
bool setup;
int (*handle_rx)(struct uart_port *uport, u32 bytes, bool drop);
unsigned int baud;
@@ -125,6 +121,7 @@ static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop);
static int handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop);
static unsigned int qcom_geni_serial_tx_empty(struct uart_port *port);
static void qcom_geni_serial_stop_rx(struct uart_port *uport);
static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop);
static const unsigned long root_freq[] = {7372800, 14745600, 19200000, 29491200,
32000000, 48000000, 64000000, 80000000,
@@ -226,7 +223,7 @@ static unsigned int qcom_geni_serial_get_mctrl(struct uart_port *uport)
if (uart_console(uport)) {
mctrl |= TIOCM_CTS;
} else {
geni_ios = readl_relaxed(uport->membase + SE_GENI_IOS);
geni_ios = readl(uport->membase + SE_GENI_IOS);
if (!(geni_ios & IO2_DATA_IN))
mctrl |= TIOCM_CTS;
}
@@ -244,7 +241,7 @@ static void qcom_geni_serial_set_mctrl(struct uart_port *uport,
if (!(mctrl & TIOCM_RTS))
uart_manual_rfr = UART_MANUAL_RFR_EN | UART_RFR_NOT_READY;
writel_relaxed(uart_manual_rfr, uport->membase + SE_UART_MANUAL_RFR);
writel(uart_manual_rfr, uport->membase + SE_UART_MANUAL_RFR);
}
static const char *qcom_geni_serial_get_type(struct uart_port *uport)
@@ -273,9 +270,6 @@ static bool qcom_geni_serial_poll_bit(struct uart_port *uport,
unsigned int fifo_bits;
unsigned long timeout_us = 20000;
/* Ensure polling is not re-ordered before the prior writes/reads */
mb();
if (uport->private_data) {
port = to_dev_port(uport, uport);
baud = port->baud;
@@ -295,7 +289,7 @@ static bool qcom_geni_serial_poll_bit(struct uart_port *uport,
*/
timeout_us = DIV_ROUND_UP(timeout_us, 10) * 10;
while (timeout_us) {
reg = readl_relaxed(uport->membase + offset);
reg = readl(uport->membase + offset);
if ((bool)(reg & field) == set)
return true;
udelay(10);
@@ -308,7 +302,7 @@ static void qcom_geni_serial_setup_tx(struct uart_port *uport, u32 xmit_size)
{
u32 m_cmd;
writel_relaxed(xmit_size, uport->membase + SE_UART_TX_TRANS_LEN);
writel(xmit_size, uport->membase + SE_UART_TX_TRANS_LEN);
m_cmd = UART_START_TX << M_OPCODE_SHFT;
writel(m_cmd, uport->membase + SE_GENI_M_CMD0);
}
@@ -321,13 +315,13 @@ static void qcom_geni_serial_poll_tx_done(struct uart_port *uport)
done = qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_DONE_EN, true);
if (!done) {
writel_relaxed(M_GENI_CMD_ABORT, uport->membase +
writel(M_GENI_CMD_ABORT, uport->membase +
SE_GENI_M_CMD_CTRL_REG);
irq_clear |= M_CMD_ABORT_EN;
qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_ABORT_EN, true);
}
writel_relaxed(irq_clear, uport->membase + SE_GENI_M_IRQ_CLEAR);
writel(irq_clear, uport->membase + SE_GENI_M_IRQ_CLEAR);
}
static void qcom_geni_serial_abort_rx(struct uart_port *uport)
@@ -337,8 +331,8 @@ static void qcom_geni_serial_abort_rx(struct uart_port *uport)
writel(S_GENI_CMD_ABORT, uport->membase + SE_GENI_S_CMD_CTRL_REG);
qcom_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
S_GENI_CMD_ABORT, false);
writel_relaxed(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR);
writel_relaxed(FORCE_DEFAULT, uport->membase + GENI_FORCE_DEFAULT_REG);
writel(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR);
writel(FORCE_DEFAULT, uport->membase + GENI_FORCE_DEFAULT_REG);
}
#ifdef CONFIG_CONSOLE_POLL
@@ -347,19 +341,13 @@ static int qcom_geni_serial_get_char(struct uart_port *uport)
u32 rx_fifo;
u32 status;
status = readl_relaxed(uport->membase + SE_GENI_M_IRQ_STATUS);
writel_relaxed(status, uport->membase + SE_GENI_M_IRQ_CLEAR);
status = readl(uport->membase + SE_GENI_M_IRQ_STATUS);
writel(status, uport->membase + SE_GENI_M_IRQ_CLEAR);
status = readl_relaxed(uport->membase + SE_GENI_S_IRQ_STATUS);
writel_relaxed(status, uport->membase + SE_GENI_S_IRQ_CLEAR);
status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
writel(status, uport->membase + SE_GENI_S_IRQ_CLEAR);
/*
* Ensure the writes to clear interrupts is not re-ordered after
* reading the data.
*/
mb();
status = readl_relaxed(uport->membase + SE_GENI_RX_FIFO_STATUS);
status = readl(uport->membase + SE_GENI_RX_FIFO_STATUS);
if (!(status & RX_FIFO_WC_MSK))
return NO_POLL_CHAR;
@@ -370,15 +358,12 @@ static int qcom_geni_serial_get_char(struct uart_port *uport)
static void qcom_geni_serial_poll_put_char(struct uart_port *uport,
unsigned char c)
{
struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
writel_relaxed(port->tx_wm, uport->membase + SE_GENI_TX_WATERMARK_REG);
writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
qcom_geni_serial_setup_tx(uport, 1);
WARN_ON(!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_TX_FIFO_WATERMARK_EN, true));
writel_relaxed(c, uport->membase + SE_GENI_TX_FIFOn);
writel_relaxed(M_TX_FIFO_WATERMARK_EN, uport->membase +
SE_GENI_M_IRQ_CLEAR);
writel(c, uport->membase + SE_GENI_TX_FIFOn);
writel(M_TX_FIFO_WATERMARK_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
qcom_geni_serial_poll_tx_done(uport);
}
#endif
@@ -386,7 +371,7 @@ static void qcom_geni_serial_poll_put_char(struct uart_port *uport,
#ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
static void qcom_geni_serial_wr_char(struct uart_port *uport, int ch)
{
writel_relaxed(ch, uport->membase + SE_GENI_TX_FIFOn);
writel(ch, uport->membase + SE_GENI_TX_FIFOn);
}
static void
@@ -405,7 +390,7 @@ __qcom_geni_serial_console_write(struct uart_port *uport, const char *s,
bytes_to_send++;
}
writel_relaxed(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
qcom_geni_serial_setup_tx(uport, bytes_to_send);
for (i = 0; i < count; ) {
size_t chars_to_write = 0;
@@ -423,7 +408,7 @@ __qcom_geni_serial_console_write(struct uart_port *uport, const char *s,
chars_to_write = min_t(size_t, count - i, avail / 2);
uart_console_write(uport, s + i, chars_to_write,
qcom_geni_serial_wr_char);
writel_relaxed(M_TX_FIFO_WATERMARK_EN, uport->membase +
writel(M_TX_FIFO_WATERMARK_EN, uport->membase +
SE_GENI_M_IRQ_CLEAR);
i += chars_to_write;
}
@@ -438,6 +423,7 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
bool locked = true;
unsigned long flags;
u32 geni_status;
u32 irq_en;
WARN_ON(co->index < 0 || co->index >= GENI_UART_CONS_PORTS);
@@ -451,7 +437,7 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
else
spin_lock_irqsave(&uport->lock, flags);
geni_status = readl_relaxed(uport->membase + SE_GENI_STATUS);
geni_status = readl(uport->membase + SE_GENI_STATUS);
/* Cancel the current write to log the fault */
if (!locked) {
@@ -461,17 +447,22 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
geni_se_abort_m_cmd(&port->se);
qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_ABORT_EN, true);
writel_relaxed(M_CMD_ABORT_EN, uport->membase +
writel(M_CMD_ABORT_EN, uport->membase +
SE_GENI_M_IRQ_CLEAR);
}
writel_relaxed(M_CMD_CANCEL_EN, uport->membase +
SE_GENI_M_IRQ_CLEAR);
writel(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
} else if ((geni_status & M_GENI_CMD_ACTIVE) && !port->tx_remaining) {
/*
* It seems we can't interrupt existing transfers if all data
* has been sent, in which case we need to look for done first.
*/
qcom_geni_serial_poll_tx_done(uport);
if (uart_circ_chars_pending(&uport->state->xmit)) {
irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
writel(irq_en | M_TX_FIFO_WATERMARK_EN,
uport->membase + SE_GENI_M_IRQ_EN);
}
}
__qcom_geni_serial_console_write(uport, s, count);
@@ -556,29 +547,20 @@ static int handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop)
static void qcom_geni_serial_start_tx(struct uart_port *uport)
{
u32 irq_en;
struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
u32 status;
if (port->xfer_mode == GENI_SE_FIFO) {
/*
* readl ensures reading & writing of IRQ_EN register
* is not re-ordered before checking the status of the
* Serial Engine.
*/
status = readl(uport->membase + SE_GENI_STATUS);
if (status & M_GENI_CMD_ACTIVE)
return;
status = readl(uport->membase + SE_GENI_STATUS);
if (status & M_GENI_CMD_ACTIVE)
return;
if (!qcom_geni_serial_tx_empty(uport))
return;
if (!qcom_geni_serial_tx_empty(uport))
return;
irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
irq_en |= M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN;
irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
irq_en |= M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN;
writel_relaxed(port->tx_wm, uport->membase +
SE_GENI_TX_WATERMARK_REG);
writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
}
writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
}
static void qcom_geni_serial_stop_tx(struct uart_port *uport)
@@ -587,35 +569,24 @@ static void qcom_geni_serial_stop_tx(struct uart_port *uport)
u32 status;
struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
irq_en &= ~M_CMD_DONE_EN;
if (port->xfer_mode == GENI_SE_FIFO) {
irq_en &= ~M_TX_FIFO_WATERMARK_EN;
writel_relaxed(0, uport->membase +
SE_GENI_TX_WATERMARK_REG);
}
writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
status = readl_relaxed(uport->membase + SE_GENI_STATUS);
irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
irq_en &= ~(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN);
writel(0, uport->membase + SE_GENI_TX_WATERMARK_REG);
writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
status = readl(uport->membase + SE_GENI_STATUS);
/* Possible stop tx is called multiple times. */
if (!(status & M_GENI_CMD_ACTIVE))
return;
/*
* Ensure cancel command write is not re-ordered before checking
* the status of the Primary Sequencer.
*/
mb();
geni_se_cancel_m_cmd(&port->se);
if (!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_CANCEL_EN, true)) {
geni_se_abort_m_cmd(&port->se);
qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_ABORT_EN, true);
writel_relaxed(M_CMD_ABORT_EN, uport->membase +
SE_GENI_M_IRQ_CLEAR);
writel(M_CMD_ABORT_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
}
writel_relaxed(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
writel(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
}
static void qcom_geni_serial_start_rx(struct uart_port *uport)
@@ -624,27 +595,19 @@ static void qcom_geni_serial_start_rx(struct uart_port *uport)
u32 status;
struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
status = readl_relaxed(uport->membase + SE_GENI_STATUS);
status = readl(uport->membase + SE_GENI_STATUS);
if (status & S_GENI_CMD_ACTIVE)
qcom_geni_serial_stop_rx(uport);
/*
* Ensure setup command write is not re-ordered before checking
* the status of the Secondary Sequencer.
*/
mb();
geni_se_setup_s_cmd(&port->se, UART_START_READ, 0);
if (port->xfer_mode == GENI_SE_FIFO) {
irq_en = readl_relaxed(uport->membase + SE_GENI_S_IRQ_EN);
irq_en |= S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN;
writel_relaxed(irq_en, uport->membase + SE_GENI_S_IRQ_EN);
irq_en = readl(uport->membase + SE_GENI_S_IRQ_EN);
irq_en |= S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN;
writel(irq_en, uport->membase + SE_GENI_S_IRQ_EN);
irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
irq_en |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN;
writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
}
irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
irq_en |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN;
writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
}
static void qcom_geni_serial_stop_rx(struct uart_port *uport)
@@ -652,34 +615,35 @@ static void qcom_geni_serial_stop_rx(struct uart_port *uport)
u32 irq_en;
u32 status;
struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
u32 irq_clear = S_CMD_DONE_EN;
u32 s_irq_status;
if (port->xfer_mode == GENI_SE_FIFO) {
irq_en = readl_relaxed(uport->membase + SE_GENI_S_IRQ_EN);
irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN);
writel_relaxed(irq_en, uport->membase + SE_GENI_S_IRQ_EN);
irq_en = readl(uport->membase + SE_GENI_S_IRQ_EN);
irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN);
writel(irq_en, uport->membase + SE_GENI_S_IRQ_EN);
irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
irq_en &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
}
irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
irq_en &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
status = readl_relaxed(uport->membase + SE_GENI_STATUS);
status = readl(uport->membase + SE_GENI_STATUS);
/* Possible stop rx is called multiple times. */
if (!(status & S_GENI_CMD_ACTIVE))
return;
/*
* Ensure cancel command write is not re-ordered before checking
* the status of the Secondary Sequencer.
*/
mb();
geni_se_cancel_s_cmd(&port->se);
qcom_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
S_GENI_CMD_CANCEL, false);
status = readl_relaxed(uport->membase + SE_GENI_STATUS);
writel_relaxed(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR);
qcom_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS,
S_CMD_CANCEL_EN, true);
/*
* If timeout occurs secondary engine remains active
* and Abort sequence is executed.
*/
s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
/* Flush the Rx buffer */
if (s_irq_status & S_RX_FIFO_LAST_EN)
qcom_geni_serial_handle_rx(uport, true);
writel(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR);
status = readl(uport->membase + SE_GENI_STATUS);
if (status & S_GENI_CMD_ACTIVE)
qcom_geni_serial_abort_rx(uport);
}
@@ -693,7 +657,7 @@ static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop)
u32 total_bytes;
struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
status = readl_relaxed(uport->membase + SE_GENI_RX_FIFO_STATUS);
status = readl(uport->membase + SE_GENI_RX_FIFO_STATUS);
word_cnt = status & RX_FIFO_WC_MSK;
last_word_partial = status & RX_LAST;
last_word_byte_cnt = (status & RX_LAST_BYTE_VALID_MSK) >>
@@ -719,10 +683,11 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport, bool done,
size_t pending;
int i;
u32 status;
u32 irq_en;
unsigned int chunk;
int tail;
status = readl_relaxed(uport->membase + SE_GENI_TX_FIFO_STATUS);
status = readl(uport->membase + SE_GENI_TX_FIFO_STATUS);
/* Complete the current tx command before taking newly added data */
if (active)
@@ -747,6 +712,11 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport, bool done,
if (!port->tx_remaining) {
qcom_geni_serial_setup_tx(uport, pending);
port->tx_remaining = pending;
irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
if (!(irq_en & M_TX_FIFO_WATERMARK_EN))
writel(irq_en | M_TX_FIFO_WATERMARK_EN,
uport->membase + SE_GENI_M_IRQ_EN);
}
remaining = chunk;
@@ -770,7 +740,23 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport, bool done,
}
xmit->tail = tail & (UART_XMIT_SIZE - 1);
/*
* The tx fifo watermark is level triggered and latched. Though we had
* cleared it in qcom_geni_serial_isr it will have already reasserted
* so we must clear it again here after our writes.
*/
writel(M_TX_FIFO_WATERMARK_EN,
uport->membase + SE_GENI_M_IRQ_CLEAR);
out_write_wakeup:
if (!port->tx_remaining) {
irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
if (irq_en & M_TX_FIFO_WATERMARK_EN)
writel(irq_en & ~M_TX_FIFO_WATERMARK_EN,
uport->membase + SE_GENI_M_IRQ_EN);
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(uport);
}
@@ -791,12 +777,12 @@ static irqreturn_t qcom_geni_serial_isr(int isr, void *dev)
return IRQ_NONE;
spin_lock_irqsave(&uport->lock, flags);
m_irq_status = readl_relaxed(uport->membase + SE_GENI_M_IRQ_STATUS);
s_irq_status = readl_relaxed(uport->membase + SE_GENI_S_IRQ_STATUS);
geni_status = readl_relaxed(uport->membase + SE_GENI_STATUS);
m_irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
writel_relaxed(m_irq_status, uport->membase + SE_GENI_M_IRQ_CLEAR);
writel_relaxed(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR);
m_irq_status = readl(uport->membase + SE_GENI_M_IRQ_STATUS);
s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
geni_status = readl(uport->membase + SE_GENI_STATUS);
m_irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
writel(m_irq_status, uport->membase + SE_GENI_M_IRQ_CLEAR);
writel(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR);
if (WARN_ON(m_irq_status & M_ILLEGAL_CMD_EN))
goto out_unlock;
@@ -806,8 +792,7 @@ static irqreturn_t qcom_geni_serial_isr(int isr, void *dev)
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
}
if (m_irq_status & (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN) &&
m_irq_en & (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN))
if (m_irq_status & m_irq_en & (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN))
qcom_geni_serial_handle_tx(uport, m_irq_status & M_CMD_DONE_EN,
geni_status & M_GENI_CMD_ACTIVE);
@@ -842,17 +827,6 @@ static void get_tx_fifo_size(struct qcom_geni_serial_port *port)
(port->tx_fifo_depth * port->tx_fifo_width) / BITS_PER_BYTE;
}
static void set_rfr_wm(struct qcom_geni_serial_port *port)
{
/*
* Set RFR (Flow off) to FIFO_DEPTH - 2.
* RX WM level at 10% RX_FIFO_DEPTH.
* TX WM level at 10% TX_FIFO_DEPTH.
*/
port->rx_rfr = port->rx_fifo_depth - 2;
port->rx_wm = UART_CONSOLE_RX_WM;
port->tx_wm = DEF_TX_WM;
}
static void qcom_geni_serial_shutdown(struct uart_port *uport)
{
@@ -891,21 +865,19 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
get_tx_fifo_size(port);
set_rfr_wm(port);
writel_relaxed(rxstale, uport->membase + SE_UART_RX_STALE_CNT);
writel(rxstale, uport->membase + SE_UART_RX_STALE_CNT);
/*
* Make an unconditional cancel on the main sequencer to reset
* it else we could end up in data loss scenarios.
*/
port->xfer_mode = GENI_SE_FIFO;
if (uart_console(uport))
qcom_geni_serial_poll_tx_done(uport);
geni_se_config_packing(&port->se, BITS_PER_BYTE, port->tx_bytes_pw,
false, true, false);
geni_se_config_packing(&port->se, BITS_PER_BYTE, port->rx_bytes_pw,
false, false, true);
geni_se_init(&port->se, port->rx_wm, port->rx_rfr);
geni_se_select_mode(&port->se, port->xfer_mode);
geni_se_init(&port->se, UART_RX_WM, port->rx_fifo_depth - 2);
geni_se_select_mode(&port->se, GENI_SE_FIFO);
if (!uart_console(uport)) {
port->rx_fifo = devm_kcalloc(uport->dev,
port->rx_fifo_depth, sizeof(u32), GFP_KERNEL);
@@ -996,10 +968,10 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
ser_clk_cfg |= clk_div << CLK_DIV_SHFT;
/* parity */
tx_trans_cfg = readl_relaxed(uport->membase + SE_UART_TX_TRANS_CFG);
tx_parity_cfg = readl_relaxed(uport->membase + SE_UART_TX_PARITY_CFG);
rx_trans_cfg = readl_relaxed(uport->membase + SE_UART_RX_TRANS_CFG);
rx_parity_cfg = readl_relaxed(uport->membase + SE_UART_RX_PARITY_CFG);
tx_trans_cfg = readl(uport->membase + SE_UART_TX_TRANS_CFG);
tx_parity_cfg = readl(uport->membase + SE_UART_TX_PARITY_CFG);
rx_trans_cfg = readl(uport->membase + SE_UART_RX_TRANS_CFG);
rx_parity_cfg = readl(uport->membase + SE_UART_RX_PARITY_CFG);
if (termios->c_cflag & PARENB) {
tx_trans_cfg |= UART_TX_PAR_EN;
rx_trans_cfg |= UART_RX_PAR_EN;
@@ -1055,17 +1027,17 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
uart_update_timeout(uport, termios->c_cflag, baud);
if (!uart_console(uport))
writel_relaxed(port->loopback,
writel(port->loopback,
uport->membase + SE_UART_LOOPBACK_CFG);
writel_relaxed(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG);
writel_relaxed(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG);
writel_relaxed(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG);
writel_relaxed(rx_parity_cfg, uport->membase + SE_UART_RX_PARITY_CFG);
writel_relaxed(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN);
writel_relaxed(bits_per_char, uport->membase + SE_UART_RX_WORD_LEN);
writel_relaxed(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
writel_relaxed(ser_clk_cfg, uport->membase + GENI_SER_M_CLK_CFG);
writel_relaxed(ser_clk_cfg, uport->membase + GENI_SER_S_CLK_CFG);
writel(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG);
writel(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG);
writel(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG);
writel(rx_parity_cfg, uport->membase + SE_UART_RX_PARITY_CFG);
writel(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN);
writel(bits_per_char, uport->membase + SE_UART_RX_WORD_LEN);
writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
writel(ser_clk_cfg, uport->membase + GENI_SER_M_CLK_CFG);
writel(ser_clk_cfg, uport->membase + GENI_SER_S_CLK_CFG);
out_restart_rx:
qcom_geni_serial_start_rx(uport);
}
@@ -1156,13 +1128,13 @@ static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
geni_se_init(&se, DEF_FIFO_DEPTH_WORDS / 2, DEF_FIFO_DEPTH_WORDS - 2);
geni_se_select_mode(&se, GENI_SE_FIFO);
writel_relaxed(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG);
writel_relaxed(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG);
writel_relaxed(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG);
writel_relaxed(rx_parity_cfg, uport->membase + SE_UART_RX_PARITY_CFG);
writel_relaxed(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN);
writel_relaxed(bits_per_char, uport->membase + SE_UART_RX_WORD_LEN);
writel_relaxed(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
writel(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG);
writel(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG);
writel(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG);
writel(rx_parity_cfg, uport->membase + SE_UART_RX_PARITY_CFG);
writel(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN);
writel(bits_per_char, uport->membase + SE_UART_RX_WORD_LEN);
writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
dev->con->write = qcom_geni_serial_earlycon_write;
dev->con->setup = NULL;

View File

@@ -52,10 +52,11 @@ static void tty_port_default_wakeup(struct tty_port *port)
}
}
static const struct tty_port_client_operations default_client_ops = {
const struct tty_port_client_operations tty_port_default_client_ops = {
.receive_buf = tty_port_default_receive_buf,
.write_wakeup = tty_port_default_wakeup,
};
EXPORT_SYMBOL_GPL(tty_port_default_client_ops);
void tty_port_init(struct tty_port *port)
{
@@ -68,7 +69,7 @@ void tty_port_init(struct tty_port *port)
spin_lock_init(&port->lock);
port->close_delay = (50 * HZ) / 100;
port->closing_wait = (3000 * HZ) / 100;
port->client_ops = &default_client_ops;
port->client_ops = &tty_port_default_client_ops;
kref_init(&port->kref);
}
EXPORT_SYMBOL(tty_port_init);

View File

@@ -27,6 +27,8 @@
#include <linux/console.h>
#include <linux/tty_flip.h>
#include <linux/sched/signal.h>
/* Don't take this from <ctype.h>: 011-015 on the screen aren't spaces */
#define isspace(c) ((c) == ' ')
@@ -337,6 +339,7 @@ int paste_selection(struct tty_struct *tty)
unsigned int count;
struct tty_ldisc *ld;
DECLARE_WAITQUEUE(wait, current);
int ret = 0;
console_lock();
poke_blanked_console();
@@ -350,6 +353,10 @@ int paste_selection(struct tty_struct *tty)
add_wait_queue(&vc->paste_wait, &wait);
while (sel_buffer && sel_buffer_lth > pasted) {
set_current_state(TASK_INTERRUPTIBLE);
if (signal_pending(current)) {
ret = -EINTR;
break;
}
if (tty_throttled(tty)) {
schedule();
continue;
@@ -365,5 +372,5 @@ int paste_selection(struct tty_struct *tty)
tty_buffer_unlock_exclusive(&vc->port);
tty_ldisc_deref(ld);
return 0;
return ret;
}

View File

@@ -936,10 +936,21 @@ static void flush_scrollback(struct vc_data *vc)
WARN_CONSOLE_UNLOCKED();
set_origin(vc);
if (vc->vc_sw->con_flush_scrollback)
if (vc->vc_sw->con_flush_scrollback) {
vc->vc_sw->con_flush_scrollback(vc);
else
} else if (con_is_visible(vc)) {
/*
* When no con_flush_scrollback method is provided then the
* legacy way for flushing the scrollback buffer is to use
* a side effect of the con_switch method. We do it only on
* the foreground console as background consoles have no
* scrollback buffers in that case and we obviously don't
* want to switch to them.
*/
hide_cursor(vc);
vc->vc_sw->con_switch(vc);
set_cursor(vc);
}
}
/*

View File

@@ -876,15 +876,20 @@ int vt_ioctl(struct tty_struct *tty,
return -EINVAL;
for (i = 0; i < MAX_NR_CONSOLES; i++) {
struct vc_data *vcp;
if (!vc_cons[i].d)
continue;
console_lock();
if (v.v_vlin)
vc_cons[i].d->vc_scan_lines = v.v_vlin;
if (v.v_clin)
vc_cons[i].d->vc_font.height = v.v_clin;
vc_cons[i].d->vc_resize_user = 1;
vc_resize(vc_cons[i].d, v.v_cols, v.v_rows);
vcp = vc_cons[i].d;
if (vcp) {
if (v.v_vlin)
vcp->vc_scan_lines = v.v_vlin;
if (v.v_clin)
vcp->vc_font.height = v.v_clin;
vcp->vc_resize_user = 1;
vc_resize(vcp, v.v_cols, v.v_rows);
}
console_unlock();
}
break;

View File

@@ -256,6 +256,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
struct usb_host_interface *ifp, int num_ep,
unsigned char *buffer, int size)
{
struct usb_device *udev = to_usb_device(ddev);
unsigned char *buffer0 = buffer;
struct usb_endpoint_descriptor *d;
struct usb_host_endpoint *endpoint;
@@ -297,6 +298,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
goto skip_to_next_endpoint_or_interface_descriptor;
}
/* Ignore blacklisted endpoints */
if (udev->quirks & USB_QUIRK_ENDPOINT_BLACKLIST) {
if (usb_endpoint_is_blacklisted(udev, ifp, d)) {
dev_warn(ddev, "config %d interface %d altsetting %d has a blacklisted endpoint with address 0x%X, skipping\n",
cfgno, inum, asnum,
d->bEndpointAddress);
goto skip_to_next_endpoint_or_interface_descriptor;
}
}
endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
++ifp->desc.bNumEndpoints;

View File

@@ -37,7 +37,9 @@
#include "otg_whitelist.h"
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
#define USB_VENDOR_SMSC 0x0424
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
#define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02
#define USB_TP_TRANSMISSION_DELAY 40 /* ns */
#define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */
@@ -1191,11 +1193,6 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
#ifdef CONFIG_PM
udev->reset_resume = 1;
#endif
/* Don't set the change_bits when the device
* was powered off.
*/
if (test_bit(port1, hub->power_bits))
set_bit(port1, hub->change_bits);
} else {
/* The power session is gone; tell hub_wq */
@@ -1701,6 +1698,10 @@ static void hub_disconnect(struct usb_interface *intf)
kfree(hub->buffer);
pm_suspend_ignore_children(&intf->dev, false);
if (hub->quirk_disable_autosuspend)
usb_autopm_put_interface(intf);
kref_put(&hub->kref, hub_release);
}
@@ -1831,6 +1832,11 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (id->driver_info & HUB_QUIRK_CHECK_PORT_AUTOSUSPEND)
hub->quirk_check_port_auto_suspend = 1;
if (id->driver_info & HUB_QUIRK_DISABLE_AUTOSUSPEND) {
hub->quirk_disable_autosuspend = 1;
usb_autopm_get_interface(intf);
}
if (hub_configure(hub, &desc->endpoint[0].desc) >= 0)
return 0;
@@ -5415,6 +5421,10 @@ static void hub_event(struct work_struct *work)
}
static const struct usb_device_id hub_id_table[] = {
{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS,
.idVendor = USB_VENDOR_SMSC,
.bInterfaceClass = USB_CLASS_HUB,
.driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
| USB_DEVICE_ID_MATCH_INT_CLASS,
.idVendor = USB_VENDOR_GENESYS_LOGIC,

View File

@@ -61,6 +61,7 @@ struct usb_hub {
unsigned quiescing:1;
unsigned disconnected:1;
unsigned in_reset:1;
unsigned quirk_disable_autosuspend:1;
unsigned quirk_check_port_auto_suspend:1;

View File

@@ -354,6 +354,10 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0904, 0x6103), .driver_info =
USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
/* Sound Devices USBPre2 */
{ USB_DEVICE(0x0926, 0x0202), .driver_info =
USB_QUIRK_ENDPOINT_BLACKLIST },
/* Keytouch QWERTY Panel keyboard */
{ USB_DEVICE(0x0926, 0x3333), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
@@ -445,6 +449,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* INTEL VALUE SSD */
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
/* novation SoundControl XL */
{ USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME },
{ } /* terminating entry must be last */
};
@@ -472,6 +479,39 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
{ } /* terminating entry must be last */
};
/*
* Entries for blacklisted endpoints that should be ignored when parsing
* configuration descriptors.
*
* Matched for devices with USB_QUIRK_ENDPOINT_BLACKLIST.
*/
static const struct usb_device_id usb_endpoint_blacklist[] = {
{ USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 },
{ }
};
bool usb_endpoint_is_blacklisted(struct usb_device *udev,
struct usb_host_interface *intf,
struct usb_endpoint_descriptor *epd)
{
const struct usb_device_id *id;
unsigned int address;
for (id = usb_endpoint_blacklist; id->match_flags; ++id) {
if (!usb_match_device(udev, id))
continue;
if (!usb_match_one_id_intf(udev, intf, id))
continue;
address = id->driver_info;
if (address == epd->bEndpointAddress)
return true;
}
return false;
}
static bool usb_match_any_interface(struct usb_device *udev,
const struct usb_device_id *id)
{

View File

@@ -37,6 +37,9 @@ extern void usb_authorize_interface(struct usb_interface *);
extern void usb_detect_quirks(struct usb_device *udev);
extern void usb_detect_interface_quirks(struct usb_device *udev);
extern void usb_release_quirk_list(void);
extern bool usb_endpoint_is_blacklisted(struct usb_device *udev,
struct usb_host_interface *intf,
struct usb_endpoint_descriptor *epd);
extern int usb_remove_device(struct usb_device *udev);
extern int usb_get_device_descriptor(struct usb_device *dev,

View File

@@ -1004,11 +1004,6 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
else
packets = 1; /* send one packet if length is zero. */
if (hs_ep->isochronous && length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
dev_err(hsotg->dev, "req length > maxpacket*mc\n");
return;
}
if (dir_in && index != 0)
if (hs_ep->isochronous)
epsize = DXEPTSIZ_MC(packets);
@@ -1312,6 +1307,13 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
req->actual = 0;
req->status = -EINPROGRESS;
/* Don't queue ISOC request if length greater than mps*mc */
if (hs_ep->isochronous &&
req->length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
dev_err(hs->dev, "req length > maxpacket*mc\n");
return -EINVAL;
}
/* In DDMA mode for ISOC's don't queue request if length greater
* than descriptor limits.
*/
@@ -1542,6 +1544,7 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
struct dwc2_hsotg_ep *ep;
__le16 reply;
u16 status;
int ret;
dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
@@ -1553,11 +1556,10 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
/*
* bit 0 => self powered
* bit 1 => remote wakeup
*/
reply = cpu_to_le16(0);
status = 1 << USB_DEVICE_SELF_POWERED;
status |= hsotg->remote_wakeup_allowed <<
USB_DEVICE_REMOTE_WAKEUP;
reply = cpu_to_le16(status);
break;
case USB_RECIP_INTERFACE:
@@ -1668,7 +1670,10 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
case USB_RECIP_DEVICE:
switch (wValue) {
case USB_DEVICE_REMOTE_WAKEUP:
hsotg->remote_wakeup_allowed = 1;
if (set)
hsotg->remote_wakeup_allowed = 1;
else
hsotg->remote_wakeup_allowed = 0;
break;
case USB_DEVICE_TEST_MODE:
@@ -1678,16 +1683,17 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
return -EINVAL;
hsotg->test_mode = wIndex >> 8;
ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
if (ret) {
dev_err(hsotg->dev,
"%s: failed to send reply\n", __func__);
return ret;
}
break;
default:
return -ENOENT;
}
ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
if (ret) {
dev_err(hsotg->dev,
"%s: failed to send reply\n", __func__);
return ret;
}
break;
case USB_RECIP_ENDPOINT:

View File

@@ -2224,7 +2224,8 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
if (event->status & DEPEVT_STATUS_SHORT && !chain)
return 1;
if (event->status & DEPEVT_STATUS_IOC)
if ((trb->ctrl & DWC3_TRB_CTRL_IOC) ||
(trb->ctrl & DWC3_TRB_CTRL_LST))
return 1;
return 0;

View File

@@ -437,12 +437,10 @@ static u8 encode_bMaxPower(enum usb_device_speed speed,
val = CONFIG_USB_GADGET_VBUS_DRAW;
if (!val)
return 0;
switch (speed) {
case USB_SPEED_SUPER:
return DIV_ROUND_UP(val, 8);
default:
if (speed < USB_SPEED_SUPER)
return DIV_ROUND_UP(val, 2);
}
else
return DIV_ROUND_UP(val, 8);
}
static int config_buf(struct usb_configuration *config,

View File

@@ -55,6 +55,7 @@ static u8 usb_bos_descriptor [] = {
static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
u16 wLength)
{
struct xhci_port_cap *port_cap = NULL;
int i, ssa_count;
u32 temp;
u16 desc_size, ssp_cap_size, ssa_size = 0;
@@ -64,16 +65,24 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
ssp_cap_size = sizeof(usb_bos_descriptor) - desc_size;
/* does xhci support USB 3.1 Enhanced SuperSpeed */
if (xhci->usb3_rhub.min_rev >= 0x01) {
for (i = 0; i < xhci->num_port_caps; i++) {
if (xhci->port_caps[i].maj_rev == 0x03 &&
xhci->port_caps[i].min_rev >= 0x01) {
usb3_1 = true;
port_cap = &xhci->port_caps[i];
break;
}
}
if (usb3_1) {
/* does xhci provide a PSI table for SSA speed attributes? */
if (xhci->usb3_rhub.psi_count) {
if (port_cap->psi_count) {
/* two SSA entries for each unique PSI ID, RX and TX */
ssa_count = xhci->usb3_rhub.psi_uid_count * 2;
ssa_count = port_cap->psi_uid_count * 2;
ssa_size = ssa_count * sizeof(u32);
ssp_cap_size -= 16; /* skip copying the default SSA */
}
desc_size += ssp_cap_size;
usb3_1 = true;
}
memcpy(buf, &usb_bos_descriptor, min(desc_size, wLength));
@@ -99,7 +108,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
}
/* If PSI table exists, add the custom speed attributes from it */
if (usb3_1 && xhci->usb3_rhub.psi_count) {
if (usb3_1 && port_cap->psi_count) {
u32 ssp_cap_base, bm_attrib, psi, psi_mant, psi_exp;
int offset;
@@ -111,7 +120,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
/* attribute count SSAC bits 4:0 and ID count SSIC bits 8:5 */
bm_attrib = (ssa_count - 1) & 0x1f;
bm_attrib |= (xhci->usb3_rhub.psi_uid_count - 1) << 5;
bm_attrib |= (port_cap->psi_uid_count - 1) << 5;
put_unaligned_le32(bm_attrib, &buf[ssp_cap_base + 4]);
if (wLength < desc_size + ssa_size)
@@ -124,8 +133,8 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
* USB 3.1 requires two SSA entries (RX and TX) for every link
*/
offset = desc_size;
for (i = 0; i < xhci->usb3_rhub.psi_count; i++) {
psi = xhci->usb3_rhub.psi[i];
for (i = 0; i < port_cap->psi_count; i++) {
psi = port_cap->psi[i];
psi &= ~USB_SSP_SUBLINK_SPEED_RSVD;
psi_exp = XHCI_EXT_PORT_PSIE(psi);
psi_mant = XHCI_EXT_PORT_PSIM(psi);

View File

@@ -1475,9 +1475,15 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
/* Allow 3 retries for everything but isoc, set CErr = 3 */
if (!usb_endpoint_xfer_isoc(&ep->desc))
err_count = 3;
/* Some devices get this wrong */
if (usb_endpoint_xfer_bulk(&ep->desc) && udev->speed == USB_SPEED_HIGH)
max_packet = 512;
/* HS bulk max packet should be 512, FS bulk supports 8, 16, 32 or 64 */
if (usb_endpoint_xfer_bulk(&ep->desc)) {
if (udev->speed == USB_SPEED_HIGH)
max_packet = 512;
if (udev->speed == USB_SPEED_FULL) {
max_packet = rounddown_pow_of_two(max_packet);
max_packet = clamp_val(max_packet, 8, 64);
}
}
/* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */
if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
avg_trb_len = 8;
@@ -1909,17 +1915,17 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
xhci->usb3_rhub.num_ports = 0;
xhci->num_active_eps = 0;
kfree(xhci->usb2_rhub.ports);
kfree(xhci->usb2_rhub.psi);
kfree(xhci->usb3_rhub.ports);
kfree(xhci->usb3_rhub.psi);
kfree(xhci->hw_ports);
kfree(xhci->rh_bw);
kfree(xhci->ext_caps);
for (i = 0; i < xhci->num_port_caps; i++)
kfree(xhci->port_caps[i].psi);
kfree(xhci->port_caps);
xhci->num_port_caps = 0;
xhci->usb2_rhub.ports = NULL;
xhci->usb2_rhub.psi = NULL;
xhci->usb3_rhub.ports = NULL;
xhci->usb3_rhub.psi = NULL;
xhci->hw_ports = NULL;
xhci->rh_bw = NULL;
xhci->ext_caps = NULL;
@@ -2120,6 +2126,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
u8 major_revision, minor_revision;
struct xhci_hub *rhub;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
struct xhci_port_cap *port_cap;
temp = readl(addr);
major_revision = XHCI_EXT_PORT_MAJOR(temp);
@@ -2154,31 +2161,39 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
/* WTF? "Valid values are 1 to MaxPorts" */
return;
rhub->psi_count = XHCI_EXT_PORT_PSIC(temp);
if (rhub->psi_count) {
rhub->psi = kcalloc_node(rhub->psi_count, sizeof(*rhub->psi),
GFP_KERNEL, dev_to_node(dev));
if (!rhub->psi)
rhub->psi_count = 0;
port_cap = &xhci->port_caps[xhci->num_port_caps++];
if (xhci->num_port_caps > max_caps)
return;
rhub->psi_uid_count++;
for (i = 0; i < rhub->psi_count; i++) {
rhub->psi[i] = readl(addr + 4 + i);
port_cap->maj_rev = major_revision;
port_cap->min_rev = minor_revision;
port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp);
if (port_cap->psi_count) {
port_cap->psi = kcalloc_node(port_cap->psi_count,
sizeof(*port_cap->psi),
GFP_KERNEL, dev_to_node(dev));
if (!port_cap->psi)
port_cap->psi_count = 0;
port_cap->psi_uid_count++;
for (i = 0; i < port_cap->psi_count; i++) {
port_cap->psi[i] = readl(addr + 4 + i);
/* count unique ID values, two consecutive entries can
* have the same ID if link is assymetric
*/
if (i && (XHCI_EXT_PORT_PSIV(rhub->psi[i]) !=
XHCI_EXT_PORT_PSIV(rhub->psi[i - 1])))
rhub->psi_uid_count++;
if (i && (XHCI_EXT_PORT_PSIV(port_cap->psi[i]) !=
XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1])))
port_cap->psi_uid_count++;
xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
XHCI_EXT_PORT_PSIV(rhub->psi[i]),
XHCI_EXT_PORT_PSIE(rhub->psi[i]),
XHCI_EXT_PORT_PLT(rhub->psi[i]),
XHCI_EXT_PORT_PFD(rhub->psi[i]),
XHCI_EXT_PORT_LP(rhub->psi[i]),
XHCI_EXT_PORT_PSIM(rhub->psi[i]));
XHCI_EXT_PORT_PSIV(port_cap->psi[i]),
XHCI_EXT_PORT_PSIE(port_cap->psi[i]),
XHCI_EXT_PORT_PLT(port_cap->psi[i]),
XHCI_EXT_PORT_PFD(port_cap->psi[i]),
XHCI_EXT_PORT_LP(port_cap->psi[i]),
XHCI_EXT_PORT_PSIM(port_cap->psi[i]));
}
}
/* cache usb2 port capabilities */
@@ -2225,6 +2240,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
continue;
}
hw_port->rhub = rhub;
hw_port->port_cap = port_cap;
rhub->num_ports++;
}
/* FIXME: Should we disable ports not in the Extended Capabilities? */
@@ -2315,6 +2331,11 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
if (!xhci->ext_caps)
return -ENOMEM;
xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps),
flags, dev_to_node(dev));
if (!xhci->port_caps)
return -ENOMEM;
offset = cap_start;
while (offset) {

View File

@@ -41,6 +41,7 @@
#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0
#define PCI_DEVICE_ID_INTEL_CML_XHCI 0xa3af
#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
@@ -179,7 +180,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) {
pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_CML_XHCI)) {
xhci->quirks |= XHCI_PME_STUCK_QUIRK;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
@@ -283,6 +285,9 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
if (!usb_hcd_is_primary_hcd(hcd))
return 0;
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
xhci_pme_acpi_rtd3_enable(pdev);
xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn);
/* Find any debug ports */
@@ -340,9 +345,6 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
HCC_MAX_PSA(xhci->hcc_params) >= 4)
xhci->shared_hcd->can_do_streams = 1;
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
xhci_pme_acpi_rtd3_enable(dev);
/* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
pm_runtime_put_noidle(&dev->dev);

View File

@@ -2692,6 +2692,42 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
return 1;
}
/*
* Update Event Ring Dequeue Pointer:
* - When all events have finished
* - To avoid "Event Ring Full Error" condition
*/
static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
union xhci_trb *event_ring_deq)
{
u64 temp_64;
dma_addr_t deq;
temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
/* If necessary, update the HW's version of the event ring deq ptr. */
if (event_ring_deq != xhci->event_ring->dequeue) {
deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
xhci->event_ring->dequeue);
if (deq == 0)
xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
/*
* Per 4.9.4, Software writes to the ERDP register shall
* always advance the Event Ring Dequeue Pointer value.
*/
if ((temp_64 & (u64) ~ERST_PTR_MASK) ==
((u64) deq & (u64) ~ERST_PTR_MASK))
return;
/* Update HC event ring dequeue pointer */
temp_64 &= ERST_PTR_MASK;
temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
}
/* Clear the event handler busy flag (RW1C) */
temp_64 |= ERST_EHB;
xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
}
/*
* xHCI spec says we can get an interrupt, and if the HC has an error condition,
* we might get bad data out of the event ring. Section 4.10.2.7 has a list of
@@ -2703,9 +2739,9 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
union xhci_trb *event_ring_deq;
irqreturn_t ret = IRQ_NONE;
unsigned long flags;
dma_addr_t deq;
u64 temp_64;
u32 status;
int event_loop = 0;
spin_lock_irqsave(&xhci->lock, flags);
/* Check if the xHC generated the interrupt, or the irq is shared */
@@ -2759,24 +2795,14 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
/* FIXME this should be a delayed service routine
* that clears the EHB.
*/
while (xhci_handle_event(xhci) > 0) {}
temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
/* If necessary, update the HW's version of the event ring deq ptr. */
if (event_ring_deq != xhci->event_ring->dequeue) {
deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
xhci->event_ring->dequeue);
if (deq == 0)
xhci_warn(xhci, "WARN something wrong with SW event "
"ring dequeue ptr.\n");
/* Update HC event ring dequeue pointer */
temp_64 &= ERST_PTR_MASK;
temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
while (xhci_handle_event(xhci) > 0) {
if (event_loop++ < TRBS_PER_SEGMENT / 2)
continue;
xhci_update_erst_dequeue(xhci, event_ring_deq);
event_loop = 0;
}
/* Clear the event handler busy flag (RW1C); event ring is empty. */
temp_64 |= ERST_EHB;
xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
xhci_update_erst_dequeue(xhci, event_ring_deq);
ret = IRQ_HANDLED;
out:

View File

@@ -1704,11 +1704,21 @@ static inline unsigned int hcd_index(struct usb_hcd *hcd)
else
return 1;
}
struct xhci_port_cap {
u32 *psi; /* array of protocol speed ID entries */
u8 psi_count;
u8 psi_uid_count;
u8 maj_rev;
u8 min_rev;
};
struct xhci_port {
__le32 __iomem *addr;
int hw_portnum;
int hcd_portnum;
struct xhci_hub *rhub;
struct xhci_port_cap *port_cap;
};
struct xhci_hub {
@@ -1718,9 +1728,6 @@ struct xhci_hub {
/* supported prococol extended capabiliy values */
u8 maj_rev;
u8 min_rev;
u32 *psi; /* array of protocol speed ID entries */
u8 psi_count;
u8 psi_uid_count;
};
/* There is one xhci_hcd structure per controller */
@@ -1882,6 +1889,9 @@ struct xhci_hcd {
/* cached usb2 extened protocol capabilites */
u32 *ext_caps;
unsigned int num_ext_caps;
/* cached extended protocol port capabilities */
struct xhci_port_cap *port_caps;
unsigned int num_port_caps;
/* Compliance Mode Recovery Data */
struct timer_list comp_mode_recovery_timer;
u32 port_status_u0;

View File

@@ -33,6 +33,14 @@
#define USB_DEVICE_ID_CODEMERCS_IOWPV2 0x1512
/* full speed iowarrior */
#define USB_DEVICE_ID_CODEMERCS_IOW56 0x1503
/* fuller speed iowarrior */
#define USB_DEVICE_ID_CODEMERCS_IOW28 0x1504
#define USB_DEVICE_ID_CODEMERCS_IOW28L 0x1505
#define USB_DEVICE_ID_CODEMERCS_IOW100 0x1506
/* OEMed devices */
#define USB_DEVICE_ID_CODEMERCS_IOW24SAG 0x158a
#define USB_DEVICE_ID_CODEMERCS_IOW56AM 0x158b
/* Get a minor range for your devices from the usb maintainer */
#ifdef CONFIG_USB_DYNAMIC_MINORS
@@ -137,6 +145,11 @@ static const struct usb_device_id iowarrior_ids[] = {
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV1)},
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV2)},
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW56)},
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24SAG)},
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW56AM)},
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28)},
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28L)},
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW100)},
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, iowarrior_ids);
@@ -361,6 +374,7 @@ static ssize_t iowarrior_write(struct file *file,
}
switch (dev->product_id) {
case USB_DEVICE_ID_CODEMERCS_IOW24:
case USB_DEVICE_ID_CODEMERCS_IOW24SAG:
case USB_DEVICE_ID_CODEMERCS_IOWPV1:
case USB_DEVICE_ID_CODEMERCS_IOWPV2:
case USB_DEVICE_ID_CODEMERCS_IOW40:
@@ -375,6 +389,10 @@ static ssize_t iowarrior_write(struct file *file,
goto exit;
break;
case USB_DEVICE_ID_CODEMERCS_IOW56:
case USB_DEVICE_ID_CODEMERCS_IOW56AM:
case USB_DEVICE_ID_CODEMERCS_IOW28:
case USB_DEVICE_ID_CODEMERCS_IOW28L:
case USB_DEVICE_ID_CODEMERCS_IOW100:
/* The IOW56 uses asynchronous IO and more urbs */
if (atomic_read(&dev->write_busy) == MAX_WRITES_IN_FLIGHT) {
/* Wait until we are below the limit for submitted urbs */
@@ -499,6 +517,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
switch (cmd) {
case IOW_WRITE:
if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24 ||
dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24SAG ||
dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV1 ||
dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV2 ||
dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW40) {
@@ -782,7 +801,11 @@ static int iowarrior_probe(struct usb_interface *interface,
goto error;
}
if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) {
if ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) ||
(dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) ||
(dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) ||
(dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) ||
(dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100)) {
res = usb_find_last_int_out_endpoint(iface_desc,
&dev->int_out_endpoint);
if (res) {
@@ -795,7 +818,11 @@ static int iowarrior_probe(struct usb_interface *interface,
/* we have to check the report_size often, so remember it in the endianness suitable for our machine */
dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint);
if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) &&
(dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56))
((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) ||
(dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) ||
(dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) ||
(dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) ||
(dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100)))
/* IOWarrior56 has wMaxPacketSize different from report size */
dev->report_size = 7;

View File

@@ -45,6 +45,7 @@ struct uas_dev_info {
struct scsi_cmnd *cmnd[MAX_CMNDS];
spinlock_t lock;
struct work_struct work;
struct work_struct scan_work; /* for async scanning */
};
enum {
@@ -114,6 +115,17 @@ static void uas_do_work(struct work_struct *work)
spin_unlock_irqrestore(&devinfo->lock, flags);
}
static void uas_scan_work(struct work_struct *work)
{
struct uas_dev_info *devinfo =
container_of(work, struct uas_dev_info, scan_work);
struct Scsi_Host *shost = usb_get_intfdata(devinfo->intf);
dev_dbg(&devinfo->intf->dev, "starting scan\n");
scsi_scan_host(shost);
dev_dbg(&devinfo->intf->dev, "scan complete\n");
}
static void uas_add_work(struct uas_cmd_info *cmdinfo)
{
struct scsi_pointer *scp = (void *)cmdinfo;
@@ -989,6 +1001,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
init_usb_anchor(&devinfo->data_urbs);
spin_lock_init(&devinfo->lock);
INIT_WORK(&devinfo->work, uas_do_work);
INIT_WORK(&devinfo->scan_work, uas_scan_work);
result = uas_configure_endpoints(devinfo);
if (result)
@@ -1005,7 +1018,9 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (result)
goto free_streams;
scsi_scan_host(shost);
/* Submit the delayed_work for SCSI-device scanning */
schedule_work(&devinfo->scan_work);
return result;
free_streams:
@@ -1173,6 +1188,12 @@ static void uas_disconnect(struct usb_interface *intf)
usb_kill_anchored_urbs(&devinfo->data_urbs);
uas_zap_pending(devinfo, DID_NO_CONNECT);
/*
* Prevent SCSI scanning (if it hasn't started yet)
* or wait for the SCSI-scanning routine to stop.
*/
cancel_work_sync(&devinfo->scan_work);
scsi_remove_host(shost);
uas_free_streams(devinfo);
scsi_host_put(shost);

View File

@@ -37,7 +37,9 @@ asmlinkage __visible void xen_maybe_preempt_hcall(void)
* cpu.
*/
__this_cpu_write(xen_in_preemptible_hcall, false);
_cond_resched();
local_irq_enable();
cond_resched();
local_irq_disable();
__this_cpu_write(xen_in_preemptible_hcall, true);
}
}

View File

@@ -3153,6 +3153,7 @@ int open_ctree(struct super_block *sb,
if (IS_ERR(fs_info->fs_root)) {
err = PTR_ERR(fs_info->fs_root);
btrfs_warn(fs_info, "failed to read fs tree: %d", err);
fs_info->fs_root = NULL;
goto fail_qgroup;
}
@@ -4468,7 +4469,6 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
wake_up(&fs_info->transaction_wait);
btrfs_destroy_delayed_inodes(fs_info);
btrfs_assert_delayed_root_empty(fs_info);
btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
EXTENT_DIRTY);

View File

@@ -10348,6 +10348,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_key ins;
u64 cur_offset = start;
u64 clear_offset = start;
u64 i_size;
u64 cur_bytes;
u64 last_alloc = (u64)-1;
@@ -10382,6 +10383,15 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
btrfs_end_transaction(trans);
break;
}
/*
* We've reserved this space, and thus converted it from
* ->bytes_may_use to ->bytes_reserved. Any error that happens
* from here on out we will only need to clear our reservation
* for the remaining unreserved area, so advance our
* clear_offset by our extent size.
*/
clear_offset += ins.offset;
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
last_alloc = ins.offset;
@@ -10462,9 +10472,9 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
if (own_trans)
btrfs_end_transaction(trans);
}
if (cur_offset < end)
btrfs_free_reserved_data_space(inode, NULL, cur_offset,
end - cur_offset + 1);
if (clear_offset < end)
btrfs_free_reserved_data_space(inode, NULL, clear_offset,
end - clear_offset + 1);
return ret;
}

View File

@@ -712,10 +712,15 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
}
btrfs_start_ordered_extent(inode, ordered, 1);
end = ordered->file_offset;
/*
* If the ordered extent had an error save the error but don't
* exit without waiting first for all other ordered extents in
* the range to complete.
*/
if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
ret = -EIO;
btrfs_put_ordered_extent(ordered);
if (ret || end == 0 || end == start)
if (end == 0 || end == start)
break;
end--;
}

View File

@@ -325,8 +325,10 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
struct extent_crypt_result ecr;
int rc = 0;
BUG_ON(!crypt_stat || !crypt_stat->tfm
|| !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED));
if (!crypt_stat || !crypt_stat->tfm
|| !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED))
return -EINVAL;
if (unlikely(ecryptfs_verbosity > 0)) {
ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n",
crypt_stat->key_size);

View File

@@ -1318,7 +1318,7 @@ parse_tag_1_packet(struct ecryptfs_crypt_stat *crypt_stat,
printk(KERN_WARNING "Tag 1 packet contains key larger "
"than ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES\n");
rc = -EINVAL;
goto out;
goto out_free;
}
memcpy((*new_auth_tok)->session_key.encrypted_key,
&data[(*packet_size)], (body_size - (ECRYPTFS_SIG_SIZE + 2)));

View File

@@ -392,6 +392,7 @@ int __init ecryptfs_init_messaging(void)
* ecryptfs_message_buf_len),
GFP_KERNEL);
if (!ecryptfs_msg_ctx_arr) {
kfree(ecryptfs_daemon_hash);
rc = -ENOMEM;
goto out;
}

View File

@@ -270,6 +270,7 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
ext4_group_t ngroups = ext4_get_groups_count(sb);
struct ext4_group_desc *desc;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct buffer_head *bh_p;
if (block_group >= ngroups) {
ext4_error(sb, "block_group >= groups_count - block_group = %u,"
@@ -280,7 +281,14 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
if (!sbi->s_group_desc[group_desc]) {
bh_p = sbi_array_rcu_deref(sbi, s_group_desc, group_desc);
/*
* sbi_array_rcu_deref returns with rcu unlocked, this is ok since
* the pointer being dereferenced won't be dereferenced again. By
* looking at the usage in add_new_gdb() the value isn't modified,
* just the pointer, and so it remains valid.
*/
if (!bh_p) {
ext4_error(sb, "Group descriptor not loaded - "
"block_group = %u, group_desc = %u, desc = %u",
block_group, group_desc, offset);
@@ -288,10 +296,10 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
}
desc = (struct ext4_group_desc *)(
(__u8 *)sbi->s_group_desc[group_desc]->b_data +
(__u8 *)bh_p->b_data +
offset * EXT4_DESC_SIZE(sb));
if (bh)
*bh = sbi->s_group_desc[group_desc];
*bh = bh_p;
return desc;
}

View File

@@ -1381,7 +1381,7 @@ struct ext4_sb_info {
loff_t s_bitmap_maxbytes; /* max bytes for bitmap files */
struct buffer_head * s_sbh; /* Buffer containing the super block */
struct ext4_super_block *s_es; /* Pointer to the super block in the buffer */
struct buffer_head **s_group_desc;
struct buffer_head * __rcu *s_group_desc;
unsigned int s_mount_opt;
unsigned int s_mount_opt2;
unsigned int s_mount_flags;
@@ -1439,7 +1439,7 @@ struct ext4_sb_info {
#endif
/* for buddy allocator */
struct ext4_group_info ***s_group_info;
struct ext4_group_info ** __rcu *s_group_info;
struct inode *s_buddy_cache;
spinlock_t s_md_lock;
unsigned short *s_mb_offsets;
@@ -1489,7 +1489,7 @@ struct ext4_sb_info {
unsigned int s_extent_max_zeroout_kb;
unsigned int s_log_groups_per_flex;
struct flex_groups *s_flex_groups;
struct flex_groups * __rcu *s_flex_groups;
ext4_group_t s_flex_groups_allocated;
/* workqueue for reserved extent conversions (buffered io) */
@@ -1529,8 +1529,11 @@ struct ext4_sb_info {
struct ratelimit_state s_warning_ratelimit_state;
struct ratelimit_state s_msg_ratelimit_state;
/* Barrier between changing inodes' journal flags and writepages ops. */
struct percpu_rw_semaphore s_journal_flag_rwsem;
/*
* Barrier between writepages ops and changing any inode's JOURNAL_DATA
* or EXTENTS flag.
*/
struct percpu_rw_semaphore s_writepages_rwsem;
struct dax_device *s_daxdev;
};
@@ -1550,6 +1553,23 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
}
/*
* Returns: sbi->field[index]
* Used to access an array element from the following sbi fields which require
* rcu protection to avoid dereferencing an invalid pointer due to reassignment
* - s_group_desc
* - s_group_info
* - s_flex_group
*/
#define sbi_array_rcu_deref(sbi, field, index) \
({ \
typeof(*((sbi)->field)) _v; \
rcu_read_lock(); \
_v = ((typeof(_v)*)rcu_dereference((sbi)->field))[index]; \
rcu_read_unlock(); \
_v; \
})
/*
* Inode dynamic state flags
*/
@@ -2645,6 +2665,7 @@ extern int ext4_generic_delete_entry(handle_t *handle,
extern bool ext4_empty_dir(struct inode *inode);
/* resize.c */
extern void ext4_kvfree_array_rcu(void *to_free);
extern int ext4_group_add(struct super_block *sb,
struct ext4_new_group_data *input);
extern int ext4_group_extend(struct super_block *sb,
@@ -2892,13 +2913,13 @@ static inline
struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
ext4_group_t group)
{
struct ext4_group_info ***grp_info;
struct ext4_group_info **grp_info;
long indexv, indexh;
BUG_ON(group >= EXT4_SB(sb)->s_groups_count);
grp_info = EXT4_SB(sb)->s_group_info;
indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
return grp_info[indexv][indexh];
grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
return grp_info[indexh];
}
/*
@@ -2948,7 +2969,7 @@ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
!inode_is_locked(inode));
down_write(&EXT4_I(inode)->i_data_sem);
if (newsize > EXT4_I(inode)->i_disksize)
EXT4_I(inode)->i_disksize = newsize;
WRITE_ONCE(EXT4_I(inode)->i_disksize, newsize);
up_write(&EXT4_I(inode)->i_data_sem);
}

View File

@@ -330,11 +330,13 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
percpu_counter_inc(&sbi->s_freeinodes_counter);
if (sbi->s_log_groups_per_flex) {
ext4_group_t f = ext4_flex_group(sbi, block_group);
struct flex_groups *fg;
atomic_inc(&sbi->s_flex_groups[f].free_inodes);
fg = sbi_array_rcu_deref(sbi, s_flex_groups,
ext4_flex_group(sbi, block_group));
atomic_inc(&fg->free_inodes);
if (is_directory)
atomic_dec(&sbi->s_flex_groups[f].used_dirs);
atomic_dec(&fg->used_dirs);
}
BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
@@ -370,12 +372,13 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
int flex_size, struct orlov_stats *stats)
{
struct ext4_group_desc *desc;
struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
if (flex_size > 1) {
stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
struct flex_groups *fg = sbi_array_rcu_deref(EXT4_SB(sb),
s_flex_groups, g);
stats->free_inodes = atomic_read(&fg->free_inodes);
stats->free_clusters = atomic64_read(&fg->free_clusters);
stats->used_dirs = atomic_read(&fg->used_dirs);
return;
}
@@ -1056,7 +1059,8 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
if (sbi->s_log_groups_per_flex) {
ext4_group_t f = ext4_flex_group(sbi, group);
atomic_inc(&sbi->s_flex_groups[f].used_dirs);
atomic_inc(&sbi_array_rcu_deref(sbi, s_flex_groups,
f)->used_dirs);
}
}
if (ext4_has_group_desc_csum(sb)) {
@@ -1079,7 +1083,8 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
if (sbi->s_log_groups_per_flex) {
flex_group = ext4_flex_group(sbi, group);
atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
atomic_dec(&sbi_array_rcu_deref(sbi, s_flex_groups,
flex_group)->free_inodes);
}
inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);

View File

@@ -2595,7 +2595,7 @@ static int mpage_map_and_submit_extent(handle_t *handle,
* truncate are avoided by checking i_size under i_data_sem.
*/
disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
if (disksize > EXT4_I(inode)->i_disksize) {
if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
int err2;
loff_t i_size;
@@ -2756,7 +2756,7 @@ static int ext4_writepages(struct address_space *mapping,
if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
return -EIO;
percpu_down_read(&sbi->s_journal_flag_rwsem);
percpu_down_read(&sbi->s_writepages_rwsem);
trace_ext4_writepages(inode, wbc);
/*
@@ -2976,7 +2976,7 @@ static int ext4_writepages(struct address_space *mapping,
out_writepages:
trace_ext4_writepages_result(inode, wbc, ret,
nr_to_write - wbc->nr_to_write);
percpu_up_read(&sbi->s_journal_flag_rwsem);
percpu_up_read(&sbi->s_writepages_rwsem);
return ret;
}
@@ -2991,13 +2991,13 @@ static int ext4_dax_writepages(struct address_space *mapping,
if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
return -EIO;
percpu_down_read(&sbi->s_journal_flag_rwsem);
percpu_down_read(&sbi->s_writepages_rwsem);
trace_ext4_writepages(inode, wbc);
ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev, wbc);
trace_ext4_writepages_result(inode, wbc, ret,
nr_to_write - wbc->nr_to_write);
percpu_up_read(&sbi->s_journal_flag_rwsem);
percpu_up_read(&sbi->s_writepages_rwsem);
return ret;
}
@@ -6294,7 +6294,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
}
}
percpu_down_write(&sbi->s_journal_flag_rwsem);
percpu_down_write(&sbi->s_writepages_rwsem);
jbd2_journal_lock_updates(journal);
/*
@@ -6311,7 +6311,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
err = jbd2_journal_flush(journal);
if (err < 0) {
jbd2_journal_unlock_updates(journal);
percpu_up_write(&sbi->s_journal_flag_rwsem);
percpu_up_write(&sbi->s_writepages_rwsem);
return err;
}
ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
@@ -6319,7 +6319,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
ext4_set_aops(inode);
jbd2_journal_unlock_updates(journal);
percpu_up_write(&sbi->s_journal_flag_rwsem);
percpu_up_write(&sbi->s_writepages_rwsem);
if (val)
up_write(&EXT4_I(inode)->i_mmap_sem);

View File

@@ -2356,7 +2356,7 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
unsigned size;
struct ext4_group_info ***new_groupinfo;
struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
EXT4_DESC_PER_BLOCK_BITS(sb);
@@ -2369,13 +2369,16 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
return -ENOMEM;
}
if (sbi->s_group_info) {
memcpy(new_groupinfo, sbi->s_group_info,
rcu_read_lock();
old_groupinfo = rcu_dereference(sbi->s_group_info);
if (old_groupinfo)
memcpy(new_groupinfo, old_groupinfo,
sbi->s_group_info_size * sizeof(*sbi->s_group_info));
kvfree(sbi->s_group_info);
}
sbi->s_group_info = new_groupinfo;
rcu_read_unlock();
rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
if (old_groupinfo)
ext4_kvfree_array_rcu(old_groupinfo);
ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
sbi->s_group_info_size);
return 0;
@@ -2387,6 +2390,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
{
int i;
int metalen = 0;
int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_group_info **meta_group_info;
struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
@@ -2405,12 +2409,12 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
"for a buddy group");
goto exit_meta_group_info;
}
sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
meta_group_info;
rcu_read_lock();
rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
rcu_read_unlock();
}
meta_group_info =
sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
@@ -2458,8 +2462,13 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
exit_group_info:
/* If a meta_group_info table has been allocated, release it now */
if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
struct ext4_group_info ***group_info;
rcu_read_lock();
group_info = rcu_dereference(sbi->s_group_info);
kfree(group_info[idx]);
group_info[idx] = NULL;
rcu_read_unlock();
}
exit_meta_group_info:
return -ENOMEM;
@@ -2472,6 +2481,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
struct ext4_sb_info *sbi = EXT4_SB(sb);
int err;
struct ext4_group_desc *desc;
struct ext4_group_info ***group_info;
struct kmem_cache *cachep;
err = ext4_mb_alloc_groupinfo(sb, ngroups);
@@ -2506,11 +2516,16 @@ static int ext4_mb_init_backend(struct super_block *sb)
while (i-- > 0)
kmem_cache_free(cachep, ext4_get_group_info(sb, i));
i = sbi->s_group_info_size;
rcu_read_lock();
group_info = rcu_dereference(sbi->s_group_info);
while (i-- > 0)
kfree(sbi->s_group_info[i]);
kfree(group_info[i]);
rcu_read_unlock();
iput(sbi->s_buddy_cache);
err_freesgi:
kvfree(sbi->s_group_info);
rcu_read_lock();
kvfree(rcu_dereference(sbi->s_group_info));
rcu_read_unlock();
return -ENOMEM;
}
@@ -2699,7 +2714,7 @@ int ext4_mb_release(struct super_block *sb)
ext4_group_t ngroups = ext4_get_groups_count(sb);
ext4_group_t i;
int num_meta_group_infos;
struct ext4_group_info *grinfo;
struct ext4_group_info *grinfo, ***group_info;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
@@ -2717,9 +2732,12 @@ int ext4_mb_release(struct super_block *sb)
num_meta_group_infos = (ngroups +
EXT4_DESC_PER_BLOCK(sb) - 1) >>
EXT4_DESC_PER_BLOCK_BITS(sb);
rcu_read_lock();
group_info = rcu_dereference(sbi->s_group_info);
for (i = 0; i < num_meta_group_infos; i++)
kfree(sbi->s_group_info[i]);
kvfree(sbi->s_group_info);
kfree(group_info[i]);
kvfree(group_info);
rcu_read_unlock();
}
kfree(sbi->s_mb_offsets);
kfree(sbi->s_mb_maxs);
@@ -3018,7 +3036,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
ext4_group_t flex_group = ext4_flex_group(sbi,
ac->ac_b_ex.fe_group);
atomic64_sub(ac->ac_b_ex.fe_len,
&sbi->s_flex_groups[flex_group].free_clusters);
&sbi_array_rcu_deref(sbi, s_flex_groups,
flex_group)->free_clusters);
}
err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
@@ -4912,7 +4931,8 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
atomic64_add(count_clusters,
&sbi->s_flex_groups[flex_group].free_clusters);
&sbi_array_rcu_deref(sbi, s_flex_groups,
flex_group)->free_clusters);
}
if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
@@ -5061,7 +5081,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
atomic64_add(clusters_freed,
&sbi->s_flex_groups[flex_group].free_clusters);
&sbi_array_rcu_deref(sbi, s_flex_groups,
flex_group)->free_clusters);
}
ext4_mb_unload_buddy(&e4b);

View File

@@ -427,6 +427,7 @@ static int free_ext_block(handle_t *handle, struct inode *inode)
int ext4_ext_migrate(struct inode *inode)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
handle_t *handle;
int retval = 0, i;
__le32 *i_data;
@@ -451,6 +452,8 @@ int ext4_ext_migrate(struct inode *inode)
*/
return retval;
percpu_down_write(&sbi->s_writepages_rwsem);
/*
* Worst case we can touch the allocation bitmaps, a bgd
* block, and a block to link in the orphan list. We do need
@@ -461,7 +464,7 @@ int ext4_ext_migrate(struct inode *inode)
if (IS_ERR(handle)) {
retval = PTR_ERR(handle);
return retval;
goto out_unlock;
}
goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
@@ -472,7 +475,7 @@ int ext4_ext_migrate(struct inode *inode)
if (IS_ERR(tmp_inode)) {
retval = PTR_ERR(tmp_inode);
ext4_journal_stop(handle);
return retval;
goto out_unlock;
}
i_size_write(tmp_inode, i_size_read(inode));
/*
@@ -514,7 +517,7 @@ int ext4_ext_migrate(struct inode *inode)
*/
ext4_orphan_del(NULL, tmp_inode);
retval = PTR_ERR(handle);
goto out;
goto out_tmp_inode;
}
ei = EXT4_I(inode);
@@ -595,10 +598,11 @@ int ext4_ext_migrate(struct inode *inode)
/* Reset the extent details */
ext4_ext_tree_init(handle, tmp_inode);
ext4_journal_stop(handle);
out:
out_tmp_inode:
unlock_new_inode(tmp_inode);
iput(tmp_inode);
out_unlock:
percpu_up_write(&sbi->s_writepages_rwsem);
return retval;
}
@@ -608,7 +612,8 @@ int ext4_ext_migrate(struct inode *inode)
int ext4_ind_migrate(struct inode *inode)
{
struct ext4_extent_header *eh;
struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_super_block *es = sbi->s_es;
struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_extent *ex;
unsigned int i, len;
@@ -632,9 +637,13 @@ int ext4_ind_migrate(struct inode *inode)
if (test_opt(inode->i_sb, DELALLOC))
ext4_alloc_da_blocks(inode);
percpu_down_write(&sbi->s_writepages_rwsem);
handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
if (IS_ERR(handle))
return PTR_ERR(handle);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out_unlock;
}
down_write(&EXT4_I(inode)->i_data_sem);
ret = ext4_ext_check_inode(inode);
@@ -669,5 +678,7 @@ int ext4_ind_migrate(struct inode *inode)
errout:
ext4_journal_stop(handle);
up_write(&EXT4_I(inode)->i_data_sem);
out_unlock:
percpu_up_write(&sbi->s_writepages_rwsem);
return ret;
}

View File

@@ -1506,6 +1506,7 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir,
/*
* We deal with the read-ahead logic here.
*/
cond_resched();
if (ra_ptr >= ra_max) {
/* Refill the readahead buffer */
ra_ptr = 0;

View File

@@ -17,6 +17,33 @@
#include "ext4_jbd2.h"
struct ext4_rcu_ptr {
struct rcu_head rcu;
void *ptr;
};
static void ext4_rcu_ptr_callback(struct rcu_head *head)
{
struct ext4_rcu_ptr *ptr;
ptr = container_of(head, struct ext4_rcu_ptr, rcu);
kvfree(ptr->ptr);
kfree(ptr);
}
void ext4_kvfree_array_rcu(void *to_free)
{
struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
if (ptr) {
ptr->ptr = to_free;
call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
return;
}
synchronize_rcu();
kvfree(to_free);
}
int ext4_resize_begin(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -560,8 +587,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
brelse(gdb);
goto out;
}
memcpy(gdb->b_data, sbi->s_group_desc[j]->b_data,
gdb->b_size);
memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
s_group_desc, j)->b_data, gdb->b_size);
set_buffer_uptodate(gdb);
err = ext4_handle_dirty_metadata(handle, NULL, gdb);
@@ -879,13 +906,15 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
}
brelse(dind);
o_group_desc = EXT4_SB(sb)->s_group_desc;
rcu_read_lock();
o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
memcpy(n_group_desc, o_group_desc,
EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
rcu_read_unlock();
n_group_desc[gdb_num] = gdb_bh;
EXT4_SB(sb)->s_group_desc = n_group_desc;
rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
EXT4_SB(sb)->s_gdb_count++;
kvfree(o_group_desc);
ext4_kvfree_array_rcu(o_group_desc);
le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
err = ext4_handle_dirty_super(handle, sb);
@@ -929,9 +958,11 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
return err;
}
o_group_desc = EXT4_SB(sb)->s_group_desc;
rcu_read_lock();
o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
memcpy(n_group_desc, o_group_desc,
EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
rcu_read_unlock();
n_group_desc[gdb_num] = gdb_bh;
BUFFER_TRACE(gdb_bh, "get_write_access");
@@ -942,9 +973,9 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
return err;
}
EXT4_SB(sb)->s_group_desc = n_group_desc;
rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
EXT4_SB(sb)->s_gdb_count++;
kvfree(o_group_desc);
ext4_kvfree_array_rcu(o_group_desc);
return err;
}
@@ -1210,7 +1241,8 @@ static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
* use non-sparse filesystems anymore. This is already checked above.
*/
if (gdb_off) {
gdb_bh = sbi->s_group_desc[gdb_num];
gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
gdb_num);
BUFFER_TRACE(gdb_bh, "get_write_access");
err = ext4_journal_get_write_access(handle, gdb_bh);
@@ -1292,7 +1324,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
/*
* get_write_access() has been called on gdb_bh by ext4_add_new_desc().
*/
gdb_bh = sbi->s_group_desc[gdb_num];
gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
/* Update group descriptor block for new group */
gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
gdb_off * EXT4_DESC_SIZE(sb));
@@ -1420,11 +1452,14 @@ static void ext4_update_super(struct super_block *sb,
percpu_counter_read(&sbi->s_freeclusters_counter));
if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
ext4_group_t flex_group;
struct flex_groups *fg;
flex_group = ext4_flex_group(sbi, group_data[0].group);
fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
&sbi->s_flex_groups[flex_group].free_clusters);
&fg->free_clusters);
atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
&sbi->s_flex_groups[flex_group].free_inodes);
&fg->free_inodes);
}
/*
@@ -1519,7 +1554,8 @@ static int ext4_flex_group_add(struct super_block *sb,
for (; gdb_num <= gdb_num_end; gdb_num++) {
struct buffer_head *gdb_bh;
gdb_bh = sbi->s_group_desc[gdb_num];
gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
gdb_num);
if (old_gdb == gdb_bh->b_blocknr)
continue;
update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,

View File

@@ -970,6 +970,8 @@ static void ext4_put_super(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
struct buffer_head **group_desc;
struct flex_groups **flex_groups;
int aborted = 0;
int i, err;
@@ -1000,15 +1002,23 @@ static void ext4_put_super(struct super_block *sb)
if (!sb_rdonly(sb))
ext4_commit_super(sb, 1);
rcu_read_lock();
group_desc = rcu_dereference(sbi->s_group_desc);
for (i = 0; i < sbi->s_gdb_count; i++)
brelse(sbi->s_group_desc[i]);
kvfree(sbi->s_group_desc);
kvfree(sbi->s_flex_groups);
brelse(group_desc[i]);
kvfree(group_desc);
flex_groups = rcu_dereference(sbi->s_flex_groups);
if (flex_groups) {
for (i = 0; i < sbi->s_flex_groups_allocated; i++)
kvfree(flex_groups[i]);
kvfree(flex_groups);
}
rcu_read_unlock();
percpu_counter_destroy(&sbi->s_freeclusters_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
percpu_free_rwsem(&sbi->s_writepages_rwsem);
#ifdef CONFIG_QUOTA
for (i = 0; i < EXT4_MAXQUOTAS; i++)
kfree(get_qf_name(sb, sbi, i));
@@ -2355,8 +2365,8 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct flex_groups *new_groups;
int size;
struct flex_groups **old_groups, **new_groups;
int size, i;
if (!sbi->s_log_groups_per_flex)
return 0;
@@ -2365,22 +2375,37 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
if (size <= sbi->s_flex_groups_allocated)
return 0;
size = roundup_pow_of_two(size * sizeof(struct flex_groups));
new_groups = kvzalloc(size, GFP_KERNEL);
new_groups = kvzalloc(roundup_pow_of_two(size *
sizeof(*sbi->s_flex_groups)), GFP_KERNEL);
if (!new_groups) {
ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
size / (int) sizeof(struct flex_groups));
ext4_msg(sb, KERN_ERR,
"not enough memory for %d flex group pointers", size);
return -ENOMEM;
}
if (sbi->s_flex_groups) {
memcpy(new_groups, sbi->s_flex_groups,
(sbi->s_flex_groups_allocated *
sizeof(struct flex_groups)));
kvfree(sbi->s_flex_groups);
for (i = sbi->s_flex_groups_allocated; i < size; i++) {
new_groups[i] = kvzalloc(roundup_pow_of_two(
sizeof(struct flex_groups)),
GFP_KERNEL);
if (!new_groups[i]) {
for (i--; i >= sbi->s_flex_groups_allocated; i--)
kvfree(new_groups[i]);
kvfree(new_groups);
ext4_msg(sb, KERN_ERR,
"not enough memory for %d flex groups", size);
return -ENOMEM;
}
}
sbi->s_flex_groups = new_groups;
sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
rcu_read_lock();
old_groups = rcu_dereference(sbi->s_flex_groups);
if (old_groups)
memcpy(new_groups, old_groups,
(sbi->s_flex_groups_allocated *
sizeof(struct flex_groups *)));
rcu_read_unlock();
rcu_assign_pointer(sbi->s_flex_groups, new_groups);
sbi->s_flex_groups_allocated = size;
if (old_groups)
ext4_kvfree_array_rcu(old_groups);
return 0;
}
@@ -2388,6 +2413,7 @@ static int ext4_fill_flex_info(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_group_desc *gdp = NULL;
struct flex_groups *fg;
ext4_group_t flex_group;
int i, err;
@@ -2405,12 +2431,11 @@ static int ext4_fill_flex_info(struct super_block *sb)
gdp = ext4_get_group_desc(sb, i, NULL);
flex_group = ext4_flex_group(sbi, i);
atomic_add(ext4_free_inodes_count(sb, gdp),
&sbi->s_flex_groups[flex_group].free_inodes);
fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes);
atomic64_add(ext4_free_group_clusters(sb, gdp),
&sbi->s_flex_groups[flex_group].free_clusters);
atomic_add(ext4_used_dirs_count(sb, gdp),
&sbi->s_flex_groups[flex_group].used_dirs);
&fg->free_clusters);
atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs);
}
return 1;
@@ -3000,7 +3025,7 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
return 0;
}
#if !defined(CONFIG_QUOTA) || !defined(CONFIG_QFMT_V2)
#if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2)
if (!readonly && (ext4_has_feature_quota(sb) ||
ext4_has_feature_project(sb))) {
ext4_msg(sb, KERN_ERR,
@@ -3625,9 +3650,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
{
struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
char *orig_data = kstrdup(data, GFP_KERNEL);
struct buffer_head *bh;
struct buffer_head *bh, **group_desc;
struct ext4_super_block *es = NULL;
struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
struct flex_groups **flex_groups;
ext4_fsblk_t block;
ext4_fsblk_t sb_block = get_sb_block(&data);
ext4_fsblk_t logical_sb_block;
@@ -4280,9 +4306,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount;
}
}
sbi->s_group_desc = kvmalloc_array(db_count,
sizeof(struct buffer_head *),
GFP_KERNEL);
rcu_assign_pointer(sbi->s_group_desc,
kvmalloc_array(db_count,
sizeof(struct buffer_head *),
GFP_KERNEL));
if (sbi->s_group_desc == NULL) {
ext4_msg(sb, KERN_ERR, "not enough memory");
ret = -ENOMEM;
@@ -4298,14 +4325,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
for (i = 0; i < db_count; i++) {
struct buffer_head *bh;
block = descriptor_loc(sb, logical_sb_block, i);
sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
if (!sbi->s_group_desc[i]) {
bh = sb_bread_unmovable(sb, block);
if (!bh) {
ext4_msg(sb, KERN_ERR,
"can't read group descriptor %d", i);
db_count = i;
goto failed_mount2;
}
rcu_read_lock();
rcu_dereference(sbi->s_group_desc)[i] = bh;
rcu_read_unlock();
}
sbi->s_gdb_count = db_count;
if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
@@ -4586,7 +4618,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
GFP_KERNEL);
if (!err)
err = percpu_init_rwsem(&sbi->s_journal_flag_rwsem);
err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
if (err) {
ext4_msg(sb, KERN_ERR, "insufficient memory");
@@ -4674,13 +4706,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
ext4_unregister_li_request(sb);
failed_mount6:
ext4_mb_release(sb);
if (sbi->s_flex_groups)
kvfree(sbi->s_flex_groups);
rcu_read_lock();
flex_groups = rcu_dereference(sbi->s_flex_groups);
if (flex_groups) {
for (i = 0; i < sbi->s_flex_groups_allocated; i++)
kvfree(flex_groups[i]);
kvfree(flex_groups);
}
rcu_read_unlock();
percpu_counter_destroy(&sbi->s_freeclusters_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
percpu_free_rwsem(&sbi->s_writepages_rwsem);
failed_mount5:
ext4_ext_release(sb);
ext4_release_system_zone(sb);
@@ -4711,9 +4749,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (sbi->s_mmp_tsk)
kthread_stop(sbi->s_mmp_tsk);
failed_mount2:
rcu_read_lock();
group_desc = rcu_dereference(sbi->s_group_desc);
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
kvfree(sbi->s_group_desc);
brelse(group_desc[i]);
kvfree(group_desc);
rcu_read_unlock();
failed_mount:
if (sbi->s_chksum_driver)
crypto_free_shash(sbi->s_chksum_driver);

View File

@@ -831,8 +831,6 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
char *frozen_buffer = NULL;
unsigned long start_lock, time_lock;
if (is_handle_aborted(handle))
return -EROFS;
journal = transaction->t_journal;
jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
@@ -1084,6 +1082,9 @@ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
struct journal_head *jh;
int rc;
if (is_handle_aborted(handle))
return -EROFS;
if (jbd2_write_access_granted(handle, bh, false))
return 0;
@@ -1221,6 +1222,9 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
struct journal_head *jh;
char *committed_data = NULL;
if (is_handle_aborted(handle))
return -EROFS;
if (jbd2_write_access_granted(handle, bh, true))
return 0;

View File

@@ -130,7 +130,7 @@ static inline int intel_svm_unbind_mm(struct device *dev, int pasid)
BUG();
}
static int intel_svm_is_pasid_valid(struct device *dev, int pasid)
static inline int intel_svm_is_pasid_valid(struct device *dev, int pasid)
{
return -EINVAL;
}

View File

@@ -188,7 +188,7 @@ enum {
IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0),
/* Irq domain name was allocated in __irq_domain_add() */
IRQ_DOMAIN_NAME_ALLOCATED = (1 << 6),
IRQ_DOMAIN_NAME_ALLOCATED = (1 << 1),
/* Irq domain is an IPI domain with virq per cpu */
IRQ_DOMAIN_FLAG_IPI_PER_CPU = (1 << 2),

View File

@@ -1236,6 +1236,7 @@ struct pci_bits {
};
extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
extern void ata_pci_shutdown_one(struct pci_dev *pdev);
extern void ata_pci_remove_one(struct pci_dev *pdev);
#ifdef CONFIG_PM

View File

@@ -225,6 +225,8 @@ struct tty_port_client_operations {
void (*write_wakeup)(struct tty_port *port);
};
extern const struct tty_port_client_operations tty_port_default_client_ops;
struct tty_port {
struct tty_bufhead buf; /* Locked internally */
struct tty_struct *tty; /* Back pointer */

View File

@@ -69,4 +69,7 @@
/* Hub needs extra delay after resetting its port. */
#define USB_QUIRK_HUB_SLOW_RESET BIT(14)
/* device has blacklisted endpoints */
#define USB_QUIRK_ENDPOINT_BLACKLIST BIT(15)
#endif /* __LINUX_USB_QUIRKS_H */

View File

@@ -638,7 +638,6 @@ struct iscsi_reject {
#define ISCSI_REASON_BOOKMARK_INVALID 9
#define ISCSI_REASON_BOOKMARK_NO_RESOURCES 10
#define ISCSI_REASON_NEGOTIATION_RESET 11
#define ISCSI_REASON_WAITING_FOR_LOGOUT 12
/* Max. number of Key=Value pairs in a text message */
#define MAX_KEY_VALUE_PAIRS 8192

View File

@@ -92,9 +92,9 @@ struct snd_rawmidi_substream {
struct list_head list; /* list of all substream for given stream */
int stream; /* direction */
int number; /* substream number */
unsigned int opened: 1, /* open flag */
append: 1, /* append flag (merge more streams) */
active_sensing: 1; /* send active sensing when close */
bool opened; /* open flag */
bool append; /* append flag (merge more streams) */
bool active_sensing; /* send active sensing when close */
int use_count; /* use counter (for output) */
size_t bytes;
struct snd_rawmidi *rmidi;

View File

@@ -2345,11 +2345,9 @@ void exit_sem(struct task_struct *tsk)
ipc_assert_locked_object(&sma->sem_perm);
list_del(&un->list_id);
/* we are the last process using this ulp, acquiring ulp->lock
* isn't required. Besides that, we are also protected against
* IPC_RMID as we hold sma->sem_perm lock now
*/
spin_lock(&ulp->lock);
list_del_rcu(&un->list_proc);
spin_unlock(&ulp->lock);
/* perform adjustments registered in un */
for (i = 0; i < sma->sem_nsems; i++) {

View File

@@ -289,7 +289,7 @@ int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
ulen = info->jited_prog_len;
info->jited_prog_len = aux->offload->jited_len;
if (info->jited_prog_len & ulen) {
if (info->jited_prog_len && ulen) {
uinsns = u64_to_user_ptr(info->jited_prog_insns);
ulen = min_t(u32, info->jited_prog_len, ulen);
if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {

View File

@@ -126,8 +126,6 @@ static inline void unregister_handler_proc(unsigned int irq,
extern bool irq_can_set_affinity_usr(unsigned int irq);
extern int irq_select_affinity_usr(unsigned int irq);
extern void irq_set_thread_affinity(struct irq_desc *desc);
extern int irq_do_set_affinity(struct irq_data *data,

View File

@@ -441,23 +441,9 @@ int irq_setup_affinity(struct irq_desc *desc)
{
return irq_select_affinity(irq_desc_get_irq(desc));
}
#endif
#endif /* CONFIG_AUTO_IRQ_AFFINITY */
#endif /* CONFIG_SMP */
/*
* Called when a bogus affinity is set via /proc/irq
*/
int irq_select_affinity_usr(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
int ret;
raw_spin_lock_irqsave(&desc->lock, flags);
ret = irq_setup_affinity(desc);
raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
#endif
/**
* irq_set_vcpu_affinity - Set vcpu affinity for the interrupt

View File

@@ -115,6 +115,28 @@ static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
return show_irq_affinity(AFFINITY_LIST, m);
}
#ifndef CONFIG_AUTO_IRQ_AFFINITY
static inline int irq_select_affinity_usr(unsigned int irq)
{
/*
* If the interrupt is started up already then this fails. The
* interrupt is assigned to an online CPU already. There is no
* point to move it around randomly. Tell user space that the
* selected mask is bogus.
*
* If not then any change to the affinity is pointless because the
* startup code invokes irq_setup_affinity() which will select
* a online CPU anyway.
*/
return -EINVAL;
}
#else
/* ALPHA magic affinity auto selector. Keep it for historical reasons. */
static inline int irq_select_affinity_usr(unsigned int irq)
{
return irq_select_affinity(irq);
}
#endif
static ssize_t write_irq_affinity(int type, struct file *file,
const char __user *buffer, size_t count, loff_t *pos)

View File

@@ -92,15 +92,19 @@ static bool init_stack_slab(void **prealloc)
return true;
if (stack_slabs[depot_index] == NULL) {
stack_slabs[depot_index] = *prealloc;
*prealloc = NULL;
} else {
stack_slabs[depot_index + 1] = *prealloc;
/* If this is the last depot slab, do not touch the next one. */
if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
stack_slabs[depot_index + 1] = *prealloc;
*prealloc = NULL;
}
/*
* This smp_store_release pairs with smp_load_acquire() from
* |next_slab_inited| above and in depot_save_stack().
*/
smp_store_release(&next_slab_inited, 1);
}
*prealloc = NULL;
return true;
}

View File

@@ -419,8 +419,10 @@ int memcg_expand_shrinker_maps(int new_id)
if (mem_cgroup_is_root(memcg))
continue;
ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
if (ret)
if (ret) {
mem_cgroup_iter_break(NULL, memcg);
goto unlock;
}
}
unlock:
if (!ret)

View File

@@ -2448,10 +2448,13 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
/*
* Scan types proportional to swappiness and
* their relative recent reclaim efficiency.
* Make sure we don't miss the last page
* because of a round-off error.
* Make sure we don't miss the last page on
* the offlined memory cgroups because of a
* round-off error.
*/
scan = DIV64_U64_ROUND_UP(scan * fraction[file],
scan = mem_cgroup_online(memcg) ?
div64_u64(scan * fraction[file], denominator) :
DIV64_U64_ROUND_UP(scan * fraction[file],
denominator);
break;
case SCAN_FILE:

View File

@@ -845,6 +845,8 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
return hashlimit_mt_common(skb, par, hinfo, &info->cfg, 3);
}
#define HASHLIMIT_MAX_SIZE 1048576
static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
struct xt_hashlimit_htable **hinfo,
struct hashlimit_cfg3 *cfg,
@@ -855,6 +857,14 @@ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
if (cfg->gc_interval == 0 || cfg->expire == 0)
return -EINVAL;
if (cfg->size > HASHLIMIT_MAX_SIZE) {
cfg->size = HASHLIMIT_MAX_SIZE;
pr_info_ratelimited("size too large, truncated to %u\n", cfg->size);
}
if (cfg->max > HASHLIMIT_MAX_SIZE) {
cfg->max = HASHLIMIT_MAX_SIZE;
pr_info_ratelimited("max too large, truncated to %u\n", cfg->max);
}
if (par->family == NFPROTO_IPV4) {
if (cfg->srcmask > 32 || cfg->dstmask > 32)
return -EINVAL;

View File

@@ -647,11 +647,11 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
}
/*
* Final call destruction under RCU.
* Final call destruction - but must be done in process context.
*/
static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
static void rxrpc_destroy_call(struct work_struct *work)
{
struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor);
struct rxrpc_net *rxnet = call->rxnet;
rxrpc_put_connection(call->conn);
@@ -663,6 +663,22 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
wake_up_var(&rxnet->nr_calls);
}
/*
* Final call destruction under RCU.
*/
static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
{
struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
if (in_softirq()) {
INIT_WORK(&call->processor, rxrpc_destroy_call);
if (!rxrpc_queue_work(&call->processor))
BUG();
} else {
rxrpc_destroy_call(&call->processor);
}
}
/*
* clean up a call
*/

View File

@@ -563,7 +563,7 @@ static int update_timestamp_of_queue(struct snd_seq_event *event,
event->queue = queue;
event->flags &= ~SNDRV_SEQ_TIME_STAMP_MASK;
if (real_time) {
event->time.time = snd_seq_timer_get_cur_time(q->timer);
event->time.time = snd_seq_timer_get_cur_time(q->timer, true);
event->flags |= SNDRV_SEQ_TIME_STAMP_REAL;
} else {
event->time.tick = snd_seq_timer_get_cur_tick(q->timer);
@@ -1642,7 +1642,7 @@ static int snd_seq_ioctl_get_queue_status(struct snd_seq_client *client,
tmr = queue->timer;
status->events = queue->tickq->cells + queue->timeq->cells;
status->time = snd_seq_timer_get_cur_time(tmr);
status->time = snd_seq_timer_get_cur_time(tmr, true);
status->tick = snd_seq_timer_get_cur_tick(tmr);
status->running = tmr->running;

View File

@@ -251,6 +251,8 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
{
unsigned long flags;
struct snd_seq_event_cell *cell;
snd_seq_tick_time_t cur_tick;
snd_seq_real_time_t cur_time;
if (q == NULL)
return;
@@ -267,17 +269,18 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
__again:
/* Process tick queue... */
cur_tick = snd_seq_timer_get_cur_tick(q->timer);
for (;;) {
cell = snd_seq_prioq_cell_out(q->tickq,
&q->timer->tick.cur_tick);
cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick);
if (!cell)
break;
snd_seq_dispatch_event(cell, atomic, hop);
}
/* Process time queue... */
cur_time = snd_seq_timer_get_cur_time(q->timer, false);
for (;;) {
cell = snd_seq_prioq_cell_out(q->timeq, &q->timer->cur_time);
cell = snd_seq_prioq_cell_out(q->timeq, &cur_time);
if (!cell)
break;
snd_seq_dispatch_event(cell, atomic, hop);
@@ -405,6 +408,7 @@ int snd_seq_queue_check_access(int queueid, int client)
int snd_seq_queue_set_owner(int queueid, int client, int locked)
{
struct snd_seq_queue *q = queueptr(queueid);
unsigned long flags;
if (q == NULL)
return -EINVAL;
@@ -414,8 +418,10 @@ int snd_seq_queue_set_owner(int queueid, int client, int locked)
return -EPERM;
}
spin_lock_irqsave(&q->owner_lock, flags);
q->locked = locked ? 1 : 0;
q->owner = client;
spin_unlock_irqrestore(&q->owner_lock, flags);
queue_access_unlock(q);
queuefree(q);
@@ -552,15 +558,17 @@ void snd_seq_queue_client_termination(int client)
unsigned long flags;
int i;
struct snd_seq_queue *q;
bool matched;
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
if ((q = queueptr(i)) == NULL)
continue;
spin_lock_irqsave(&q->owner_lock, flags);
if (q->owner == client)
matched = (q->owner == client);
if (matched)
q->klocked = 1;
spin_unlock_irqrestore(&q->owner_lock, flags);
if (q->owner == client) {
if (matched) {
if (q->timer->running)
snd_seq_timer_stop(q->timer);
snd_seq_timer_reset(q->timer);
@@ -752,6 +760,8 @@ void snd_seq_info_queues_read(struct snd_info_entry *entry,
int i, bpm;
struct snd_seq_queue *q;
struct snd_seq_timer *tmr;
bool locked;
int owner;
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
if ((q = queueptr(i)) == NULL)
@@ -763,9 +773,14 @@ void snd_seq_info_queues_read(struct snd_info_entry *entry,
else
bpm = 0;
spin_lock_irq(&q->owner_lock);
locked = q->locked;
owner = q->owner;
spin_unlock_irq(&q->owner_lock);
snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name);
snd_iprintf(buffer, "owned by client : %d\n", q->owner);
snd_iprintf(buffer, "lock status : %s\n", q->locked ? "Locked" : "Free");
snd_iprintf(buffer, "owned by client : %d\n", owner);
snd_iprintf(buffer, "lock status : %s\n", locked ? "Locked" : "Free");
snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq));
snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq));
snd_iprintf(buffer, "timer state : %s\n", tmr->running ? "Running" : "Stopped");

View File

@@ -437,14 +437,15 @@ int snd_seq_timer_continue(struct snd_seq_timer *tmr)
}
/* return current 'real' time. use timeofday() to get better granularity. */
snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr,
bool adjust_ktime)
{
snd_seq_real_time_t cur_time;
unsigned long flags;
spin_lock_irqsave(&tmr->lock, flags);
cur_time = tmr->cur_time;
if (tmr->running) {
if (adjust_ktime && tmr->running) {
struct timespec64 tm;
ktime_get_ts64(&tm);
@@ -461,7 +462,13 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
high PPQ values) */
snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr)
{
return tmr->tick.cur_tick;
snd_seq_tick_time_t cur_tick;
unsigned long flags;
spin_lock_irqsave(&tmr->lock, flags);
cur_tick = tmr->tick.cur_tick;
spin_unlock_irqrestore(&tmr->lock, flags);
return cur_tick;
}

View File

@@ -135,7 +135,8 @@ int snd_seq_timer_set_tempo_ppq(struct snd_seq_timer *tmr, int tempo, int ppq);
int snd_seq_timer_set_position_tick(struct snd_seq_timer *tmr, snd_seq_tick_time_t position);
int snd_seq_timer_set_position_time(struct snd_seq_timer *tmr, snd_seq_real_time_t position);
int snd_seq_timer_set_skew(struct snd_seq_timer *tmr, unsigned int skew, unsigned int base);
snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr);
snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr,
bool adjust_ktime);
snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr);
extern int seq_default_timer_class;

View File

@@ -249,7 +249,7 @@ void snd_hdac_print_channel_allocation(int spk_alloc, char *buf, int buflen)
for (i = 0, j = 0; i < ARRAY_SIZE(cea_speaker_allocation_names); i++) {
if (spk_alloc & (1 << i))
j += snprintf(buf + j, buflen - j, " %s",
j += scnprintf(buf + j, buflen - j, " %s",
cea_speaker_allocation_names[i]);
}
buf[j] = '\0'; /* necessary when j == 0 */

View File

@@ -4019,7 +4019,7 @@ void snd_print_pcm_bits(int pcm, char *buf, int buflen)
for (i = 0, j = 0; i < ARRAY_SIZE(bits); i++)
if (pcm & (AC_SUPPCM_BITS_8 << i))
j += snprintf(buf + j, buflen - j, " %d", bits[i]);
j += scnprintf(buf + j, buflen - j, " %d", bits[i]);
buf[j] = '\0'; /* necessary when j == 0 */
}

View File

@@ -373,7 +373,7 @@ static void hdmi_print_pcm_rates(int pcm, char *buf, int buflen)
for (i = 0, j = 0; i < ARRAY_SIZE(alsa_rates); i++)
if (pcm & (1 << i))
j += snprintf(buf + j, buflen - j, " %d",
j += scnprintf(buf + j, buflen - j, " %d",
alsa_rates[i]);
buf[j] = '\0'; /* necessary when j == 0 */

Some files were not shown because too many files have changed in this diff Show More