Merge 4.19.105 into android-4.19

Changes in 4.19.105
	Input: synaptics - switch T470s to RMI4 by default
	Input: synaptics - enable SMBus on ThinkPad L470
	Input: synaptics - remove the LEN0049 dmi id from topbuttonpad list
	ALSA: usb-audio: Fix UAC2/3 effect unit parsing
	ALSA: hda/realtek - Fix silent output on MSI-GL73
	ALSA: usb-audio: Apply sample rate quirk for Audioengine D1
	arm64: cpufeature: Set the FP/SIMD compat HWCAP bits properly
	arm64: nofpsmid: Handle TIF_FOREIGN_FPSTATE flag cleanly
	ALSA: usb-audio: sound: usb: usb true/false for bool return type
	ALSA: usb-audio: Add clock validity quirk for Denon MC7000/MCX8000
	ext4: don't assume that mmp_nodename/bdevname have NUL
	ext4: fix support for inode sizes > 1024 bytes
	ext4: fix checksum errors with indexed dirs
	ext4: add cond_resched() to ext4_protect_reserved_inode
	ext4: improve explanation of a mount failure caused by a misconfigured kernel
	Btrfs: fix race between using extent maps and merging them
	btrfs: ref-verify: fix memory leaks
	btrfs: print message when tree-log replay starts
	btrfs: log message when rw remount is attempted with unclean tree-log
	ARM: npcm: Bring back GPIOLIB support
	arm64: ssbs: Fix context-switch when SSBS is present on all CPUs
	KVM: nVMX: Use correct root level for nested EPT shadow page tables
	perf/x86/amd: Add missing L2 misses event spec to AMD Family 17h's event map
	nvme: fix the parameter order for nvme_get_log in nvme_get_fw_slot_info
	IB/hfi1: Acquire lock to release TID entries when user file is closed
	IB/hfi1: Close window for pq and request coliding
	IB/rdmavt: Reset all QPs when the device is shut down
	RDMA/core: Fix invalid memory access in spec_filter_size
	RDMA/hfi1: Fix memory leak in _dev_comp_vect_mappings_create
	RDMA/rxe: Fix soft lockup problem due to using tasklets in softirq
	RDMA/core: Fix protection fault in get_pkey_idx_qp_list
	s390/time: Fix clk type in get_tod_clock
	perf/x86/intel: Fix inaccurate period in context switch for auto-reload
	hwmon: (pmbus/ltc2978) Fix PMBus polling of MFR_COMMON definitions.
	NFSv4.1 make cachethis=no for writes
	jbd2: move the clearing of b_modified flag to the journal_unmap_buffer()
	jbd2: do not clear the BH_Mapped flag when forgetting a metadata buffer
	KVM: x86/mmu: Fix struct guest_walker arrays for 5-level paging
	Linux 4.19.105

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I411bf0e21a24cee878f1a114175090e8fe69be46
This commit is contained in:
Greg Kroah-Hartman
2020-02-20 08:14:21 +01:00
43 changed files with 404 additions and 201 deletions

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 4 VERSION = 4
PATCHLEVEL = 19 PATCHLEVEL = 19
SUBLEVEL = 104 SUBLEVEL = 105
EXTRAVERSION = EXTRAVERSION =
NAME = "People's Front" NAME = "People's Front"

View File

@@ -10,7 +10,7 @@ config ARCH_NPCM7XX
depends on ARCH_MULTI_V7 depends on ARCH_MULTI_V7
select PINCTRL_NPCM7XX select PINCTRL_NPCM7XX
select NPCM7XX_TIMER select NPCM7XX_TIMER
select ARCH_REQUIRE_GPIOLIB select GPIOLIB
select CACHE_L2X0 select CACHE_L2X0
select ARM_GIC select ARM_GIC
select HAVE_ARM_TWD if SMP select HAVE_ARM_TWD if SMP

View File

@@ -42,9 +42,7 @@ EXPORT_SYMBOL_GPL(elf_hwcap);
#define COMPAT_ELF_HWCAP_DEFAULT \ #define COMPAT_ELF_HWCAP_DEFAULT \
(COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\ (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\ COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\ COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\
COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
COMPAT_HWCAP_LPAE) COMPAT_HWCAP_LPAE)
unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT; unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
unsigned int compat_elf_hwcap2 __read_mostly; unsigned int compat_elf_hwcap2 __read_mostly;
@@ -1341,17 +1339,30 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
{}, {},
}; };
#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
{ \ #define HWCAP_CPUID_MATCH(reg, field, s, min_value) \
.desc = #cap, \
.type = ARM64_CPUCAP_SYSTEM_FEATURE, \
.matches = has_cpuid_feature, \ .matches = has_cpuid_feature, \
.sys_reg = reg, \ .sys_reg = reg, \
.field_pos = field, \ .field_pos = field, \
.sign = s, \ .sign = s, \
.min_field_value = min_value, \ .min_field_value = min_value, \
#define __HWCAP_CAP(name, cap_type, cap) \
.desc = name, \
.type = ARM64_CPUCAP_SYSTEM_FEATURE, \
.hwcap_type = cap_type, \ .hwcap_type = cap_type, \
.hwcap = cap, \ .hwcap = cap, \
#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
{ \
__HWCAP_CAP(#cap, cap_type, cap) \
HWCAP_CPUID_MATCH(reg, field, s, min_value) \
}
#define HWCAP_CAP_MATCH(match, cap_type, cap) \
{ \
__HWCAP_CAP(#cap, cap_type, cap) \
.matches = match, \
} }
static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
@@ -1387,8 +1398,35 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
{}, {},
}; };
#ifdef CONFIG_COMPAT
static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope)
{
/*
* Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available,
* in line with that of arm32 as in vfp_init(). We make sure that the
* check is future proof, by making sure value is non-zero.
*/
u32 mvfr1;
WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
if (scope == SCOPE_SYSTEM)
mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1);
else
mvfr1 = read_sysreg_s(SYS_MVFR1_EL1);
return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) &&
cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) &&
cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT);
}
#endif
static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = { static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON),
HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4),
/* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */
HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP),
HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3),
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL), HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES), HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1), HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),

View File

@@ -218,6 +218,7 @@ static void sve_free(struct task_struct *task)
static void task_fpsimd_load(void) static void task_fpsimd_load(void)
{ {
WARN_ON(!in_softirq() && !irqs_disabled()); WARN_ON(!in_softirq() && !irqs_disabled());
WARN_ON(!system_supports_fpsimd());
if (system_supports_sve() && test_thread_flag(TIF_SVE)) if (system_supports_sve() && test_thread_flag(TIF_SVE))
sve_load_state(sve_pffr(&current->thread), sve_load_state(sve_pffr(&current->thread),
@@ -238,6 +239,7 @@ void fpsimd_save(void)
struct user_fpsimd_state *st = __this_cpu_read(fpsimd_last_state.st); struct user_fpsimd_state *st = __this_cpu_read(fpsimd_last_state.st);
/* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */ /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
WARN_ON(!system_supports_fpsimd());
WARN_ON(!in_softirq() && !irqs_disabled()); WARN_ON(!in_softirq() && !irqs_disabled());
if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
@@ -977,6 +979,7 @@ void fpsimd_bind_task_to_cpu(void)
struct fpsimd_last_state_struct *last = struct fpsimd_last_state_struct *last =
this_cpu_ptr(&fpsimd_last_state); this_cpu_ptr(&fpsimd_last_state);
WARN_ON(!system_supports_fpsimd());
last->st = &current->thread.uw.fpsimd_state; last->st = &current->thread.uw.fpsimd_state;
current->thread.fpsimd_cpu = smp_processor_id(); current->thread.fpsimd_cpu = smp_processor_id();
@@ -996,6 +999,7 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st)
struct fpsimd_last_state_struct *last = struct fpsimd_last_state_struct *last =
this_cpu_ptr(&fpsimd_last_state); this_cpu_ptr(&fpsimd_last_state);
WARN_ON(!system_supports_fpsimd());
WARN_ON(!in_softirq() && !irqs_disabled()); WARN_ON(!in_softirq() && !irqs_disabled());
last->st = st; last->st = st;
@@ -1008,8 +1012,19 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st)
*/ */
void fpsimd_restore_current_state(void) void fpsimd_restore_current_state(void)
{ {
if (!system_supports_fpsimd()) /*
* For the tasks that were created before we detected the absence of
* FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
* e.g, init. This could be then inherited by the children processes.
* If we later detect that the system doesn't support FP/SIMD,
* we must clear the flag for all the tasks to indicate that the
* FPSTATE is clean (as we can't have one) to avoid looping for ever in
* do_notify_resume().
*/
if (!system_supports_fpsimd()) {
clear_thread_flag(TIF_FOREIGN_FPSTATE);
return; return;
}
local_bh_disable(); local_bh_disable();
@@ -1028,7 +1043,7 @@ void fpsimd_restore_current_state(void)
*/ */
void fpsimd_update_current_state(struct user_fpsimd_state const *state) void fpsimd_update_current_state(struct user_fpsimd_state const *state)
{ {
if (!system_supports_fpsimd()) if (WARN_ON(!system_supports_fpsimd()))
return; return;
local_bh_disable(); local_bh_disable();
@@ -1055,6 +1070,7 @@ void fpsimd_flush_task_state(struct task_struct *t)
void fpsimd_flush_cpu_state(void) void fpsimd_flush_cpu_state(void)
{ {
WARN_ON(!system_supports_fpsimd());
__this_cpu_write(fpsimd_last_state.st, NULL); __this_cpu_write(fpsimd_last_state.st, NULL);
set_thread_flag(TIF_FOREIGN_FPSTATE); set_thread_flag(TIF_FOREIGN_FPSTATE);
} }

View File

@@ -424,6 +424,13 @@ static void ssbs_thread_switch(struct task_struct *next)
if (unlikely(next->flags & PF_KTHREAD)) if (unlikely(next->flags & PF_KTHREAD))
return; return;
/*
* If all CPUs implement the SSBS extension, then we just need to
* context-switch the PSTATE field.
*/
if (cpu_have_feature(cpu_feature(SSBS)))
return;
/* If the mitigation is enabled, then we leave SSBS clear. */ /* If the mitigation is enabled, then we leave SSBS clear. */
if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) || if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
test_tsk_thread_flag(next, TIF_SSBD)) test_tsk_thread_flag(next, TIF_SSBD))

View File

@@ -37,7 +37,15 @@
/* Check whether the FP regs were dirtied while in the host-side run loop: */ /* Check whether the FP regs were dirtied while in the host-side run loop: */
static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu) static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
{ {
if (vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE) /*
* When the system doesn't support FP/SIMD, we cannot rely on
* the _TIF_FOREIGN_FPSTATE flag. However, we always inject an
* abort on the very first access to FP and thus we should never
* see KVM_ARM64_FP_ENABLED. For added safety, make sure we always
* trap the accesses.
*/
if (!system_supports_fpsimd() ||
vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
KVM_ARM64_FP_HOST); KVM_ARM64_FP_HOST);

View File

@@ -155,7 +155,7 @@ static inline void get_tod_clock_ext(char *clk)
static inline unsigned long long get_tod_clock(void) static inline unsigned long long get_tod_clock(void)
{ {
unsigned char clk[STORE_CLOCK_EXT_SIZE]; char clk[STORE_CLOCK_EXT_SIZE];
get_tod_clock_ext(clk); get_tod_clock_ext(clk);
return *((unsigned long long *)&clk[1]); return *((unsigned long long *)&clk[1]);

View File

@@ -245,6 +245,7 @@ static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076, [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
[PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60, [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
[PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287, [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,

View File

@@ -1402,6 +1402,8 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
old = ((s64)(prev_raw_count << shift) >> shift); old = ((s64)(prev_raw_count << shift) >> shift);
local64_add(new - old + count * period, &event->count); local64_add(new - old + count * period, &event->count);
local64_set(&hwc->period_left, -new);
perf_event_update_userpage(event); perf_event_update_userpage(event);
return 0; return 0;

View File

@@ -36,7 +36,7 @@
#define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
#define PT_HAVE_ACCESSED_DIRTY(mmu) true #define PT_HAVE_ACCESSED_DIRTY(mmu) true
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#define PT_MAX_FULL_LEVELS 4 #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
#define CMPXCHG cmpxchg #define CMPXCHG cmpxchg
#else #else
#define CMPXCHG cmpxchg64 #define CMPXCHG cmpxchg64

View File

@@ -2968,6 +2968,9 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
static int get_ept_level(struct kvm_vcpu *vcpu) static int get_ept_level(struct kvm_vcpu *vcpu)
{ {
/* Nested EPT currently only supports 4-level walks. */
if (is_guest_mode(vcpu) && nested_cpu_has_ept(get_vmcs12(vcpu)))
return 4;
if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48)) if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48))
return 5; return 5;
return 4; return 4;

View File

@@ -89,8 +89,8 @@ enum chips { ltc2974, ltc2975, ltc2977, ltc2978, ltc2980, ltc3880, ltc3882,
#define LTC_POLL_TIMEOUT 100 /* in milli-seconds */ #define LTC_POLL_TIMEOUT 100 /* in milli-seconds */
#define LTC_NOT_BUSY BIT(5) #define LTC_NOT_BUSY BIT(6)
#define LTC_NOT_PENDING BIT(4) #define LTC_NOT_PENDING BIT(5)
/* /*
* LTC2978 clears peak data whenever the CLEAR_FAULTS command is executed, which * LTC2978 clears peak data whenever the CLEAR_FAULTS command is executed, which

View File

@@ -336,22 +336,16 @@ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
if (!new_pps) if (!new_pps)
return NULL; return NULL;
if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) { if (qp_attr_mask & IB_QP_PORT)
if (!qp_pps) { new_pps->main.port_num =
new_pps->main.port_num = qp_attr->port_num; (qp_pps) ? qp_pps->main.port_num : qp_attr->port_num;
new_pps->main.pkey_index = qp_attr->pkey_index; if (qp_attr_mask & IB_QP_PKEY_INDEX)
} else { new_pps->main.pkey_index = (qp_pps) ? qp_pps->main.pkey_index :
new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ? qp_attr->pkey_index;
qp_attr->port_num : if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT))
qp_pps->main.port_num;
new_pps->main.pkey_index =
(qp_attr_mask & IB_QP_PKEY_INDEX) ?
qp_attr->pkey_index :
qp_pps->main.pkey_index;
}
new_pps->main.state = IB_PORT_PKEY_VALID; new_pps->main.state = IB_PORT_PKEY_VALID;
} else if (qp_pps) {
if (!(qp_attr_mask & (IB_QP_PKEY_INDEX || IB_QP_PORT)) && qp_pps) {
new_pps->main.port_num = qp_pps->main.port_num; new_pps->main.port_num = qp_pps->main.port_num;
new_pps->main.pkey_index = qp_pps->main.pkey_index; new_pps->main.pkey_index = qp_pps->main.pkey_index;
if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID) if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)

View File

@@ -2914,12 +2914,6 @@ static int kern_spec_to_ib_spec_action(struct ib_uverbs_file *ufile,
return 0; return 0;
} }
static size_t kern_spec_filter_sz(const struct ib_uverbs_flow_spec_hdr *spec)
{
/* Returns user space filter size, includes padding */
return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2;
}
static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size, static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size,
u16 ib_real_filter_sz) u16 ib_real_filter_sz)
{ {
@@ -3063,11 +3057,16 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec, static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
union ib_flow_spec *ib_spec) union ib_flow_spec *ib_spec)
{ {
ssize_t kern_filter_sz; size_t kern_filter_sz;
void *kern_spec_mask; void *kern_spec_mask;
void *kern_spec_val; void *kern_spec_val;
kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr); if (check_sub_overflow((size_t)kern_spec->hdr.size,
sizeof(struct ib_uverbs_flow_spec_hdr),
&kern_filter_sz))
return -EINVAL;
kern_filter_sz /= 2;
kern_spec_val = (void *)kern_spec + kern_spec_val = (void *)kern_spec +
sizeof(struct ib_uverbs_flow_spec_hdr); sizeof(struct ib_uverbs_flow_spec_hdr);

View File

@@ -478,6 +478,8 @@ static int _dev_comp_vect_mappings_create(struct hfi1_devdata *dd,
rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu); rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu);
} }
free_cpumask_var(available_cpus);
free_cpumask_var(non_intr_cpus);
return 0; return 0;
fail: fail:

View File

@@ -200,23 +200,24 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
fd = kzalloc(sizeof(*fd), GFP_KERNEL); fd = kzalloc(sizeof(*fd), GFP_KERNEL);
if (fd) { if (!fd || init_srcu_struct(&fd->pq_srcu))
fd->rec_cpu_num = -1; /* no cpu affinity by default */ goto nomem;
fd->mm = current->mm; spin_lock_init(&fd->pq_rcu_lock);
mmgrab(fd->mm); spin_lock_init(&fd->tid_lock);
fd->dd = dd; spin_lock_init(&fd->invalid_lock);
kobject_get(&fd->dd->kobj); fd->rec_cpu_num = -1; /* no cpu affinity by default */
fp->private_data = fd; fd->mm = current->mm;
} else { mmgrab(fd->mm);
fp->private_data = NULL; fd->dd = dd;
kobject_get(&fd->dd->kobj);
if (atomic_dec_and_test(&dd->user_refcount)) fp->private_data = fd;
complete(&dd->user_comp);
return -ENOMEM;
}
return 0; return 0;
nomem:
kfree(fd);
fp->private_data = NULL;
if (atomic_dec_and_test(&dd->user_refcount))
complete(&dd->user_comp);
return -ENOMEM;
} }
static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
@@ -301,21 +302,30 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from) static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
{ {
struct hfi1_filedata *fd = kiocb->ki_filp->private_data; struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
struct hfi1_user_sdma_pkt_q *pq = fd->pq; struct hfi1_user_sdma_pkt_q *pq;
struct hfi1_user_sdma_comp_q *cq = fd->cq; struct hfi1_user_sdma_comp_q *cq = fd->cq;
int done = 0, reqs = 0; int done = 0, reqs = 0;
unsigned long dim = from->nr_segs; unsigned long dim = from->nr_segs;
int idx;
if (!cq || !pq) idx = srcu_read_lock(&fd->pq_srcu);
pq = srcu_dereference(fd->pq, &fd->pq_srcu);
if (!cq || !pq) {
srcu_read_unlock(&fd->pq_srcu, idx);
return -EIO; return -EIO;
}
if (!iter_is_iovec(from) || !dim) if (!iter_is_iovec(from) || !dim) {
srcu_read_unlock(&fd->pq_srcu, idx);
return -EINVAL; return -EINVAL;
}
trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim); trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim);
if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
srcu_read_unlock(&fd->pq_srcu, idx);
return -ENOSPC; return -ENOSPC;
}
while (dim) { while (dim) {
int ret; int ret;
@@ -333,6 +343,7 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
reqs++; reqs++;
} }
srcu_read_unlock(&fd->pq_srcu, idx);
return reqs; return reqs;
} }
@@ -706,6 +717,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
if (atomic_dec_and_test(&dd->user_refcount)) if (atomic_dec_and_test(&dd->user_refcount))
complete(&dd->user_comp); complete(&dd->user_comp);
cleanup_srcu_struct(&fdata->pq_srcu);
kfree(fdata); kfree(fdata);
return 0; return 0;
} }

View File

@@ -1376,10 +1376,13 @@ struct mmu_rb_handler;
/* Private data for file operations */ /* Private data for file operations */
struct hfi1_filedata { struct hfi1_filedata {
struct srcu_struct pq_srcu;
struct hfi1_devdata *dd; struct hfi1_devdata *dd;
struct hfi1_ctxtdata *uctxt; struct hfi1_ctxtdata *uctxt;
struct hfi1_user_sdma_comp_q *cq; struct hfi1_user_sdma_comp_q *cq;
struct hfi1_user_sdma_pkt_q *pq; /* update side lock for SRCU */
spinlock_t pq_rcu_lock;
struct hfi1_user_sdma_pkt_q __rcu *pq;
u16 subctxt; u16 subctxt;
/* for cpu affinity; -1 if none */ /* for cpu affinity; -1 if none */
int rec_cpu_num; int rec_cpu_num;

View File

@@ -90,9 +90,6 @@ int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
struct hfi1_devdata *dd = uctxt->dd; struct hfi1_devdata *dd = uctxt->dd;
int ret = 0; int ret = 0;
spin_lock_init(&fd->tid_lock);
spin_lock_init(&fd->invalid_lock);
fd->entry_to_rb = kcalloc(uctxt->expected_count, fd->entry_to_rb = kcalloc(uctxt->expected_count,
sizeof(struct rb_node *), sizeof(struct rb_node *),
GFP_KERNEL); GFP_KERNEL);
@@ -165,10 +162,12 @@ void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
if (fd->handler) { if (fd->handler) {
hfi1_mmu_rb_unregister(fd->handler); hfi1_mmu_rb_unregister(fd->handler);
} else { } else {
mutex_lock(&uctxt->exp_mutex);
if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list)) if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd); unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list)) if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd); unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
mutex_unlock(&uctxt->exp_mutex);
} }
kfree(fd->invalid_tids); kfree(fd->invalid_tids);

View File

@@ -179,7 +179,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
pq = kzalloc(sizeof(*pq), GFP_KERNEL); pq = kzalloc(sizeof(*pq), GFP_KERNEL);
if (!pq) if (!pq)
return -ENOMEM; return -ENOMEM;
pq->dd = dd; pq->dd = dd;
pq->ctxt = uctxt->ctxt; pq->ctxt = uctxt->ctxt;
pq->subctxt = fd->subctxt; pq->subctxt = fd->subctxt;
@@ -236,7 +235,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
goto pq_mmu_fail; goto pq_mmu_fail;
} }
fd->pq = pq; rcu_assign_pointer(fd->pq, pq);
fd->cq = cq; fd->cq = cq;
return 0; return 0;
@@ -264,8 +263,14 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
trace_hfi1_sdma_user_free_queues(uctxt->dd, uctxt->ctxt, fd->subctxt); trace_hfi1_sdma_user_free_queues(uctxt->dd, uctxt->ctxt, fd->subctxt);
pq = fd->pq; spin_lock(&fd->pq_rcu_lock);
pq = srcu_dereference_check(fd->pq, &fd->pq_srcu,
lockdep_is_held(&fd->pq_rcu_lock));
if (pq) { if (pq) {
rcu_assign_pointer(fd->pq, NULL);
spin_unlock(&fd->pq_rcu_lock);
synchronize_srcu(&fd->pq_srcu);
/* at this point there can be no more new requests */
if (pq->handler) if (pq->handler)
hfi1_mmu_rb_unregister(pq->handler); hfi1_mmu_rb_unregister(pq->handler);
iowait_sdma_drain(&pq->busy); iowait_sdma_drain(&pq->busy);
@@ -277,7 +282,8 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
kfree(pq->req_in_use); kfree(pq->req_in_use);
kmem_cache_destroy(pq->txreq_cache); kmem_cache_destroy(pq->txreq_cache);
kfree(pq); kfree(pq);
fd->pq = NULL; } else {
spin_unlock(&fd->pq_rcu_lock);
} }
if (fd->cq) { if (fd->cq) {
vfree(fd->cq->comps); vfree(fd->cq->comps);
@@ -321,7 +327,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
{ {
int ret = 0, i; int ret = 0, i;
struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_user_sdma_pkt_q *pq = fd->pq; struct hfi1_user_sdma_pkt_q *pq =
srcu_dereference(fd->pq, &fd->pq_srcu);
struct hfi1_user_sdma_comp_q *cq = fd->cq; struct hfi1_user_sdma_comp_q *cq = fd->cq;
struct hfi1_devdata *dd = pq->dd; struct hfi1_devdata *dd = pq->dd;
unsigned long idx = 0; unsigned long idx = 0;

View File

@@ -58,6 +58,8 @@
#include "trace.h" #include "trace.h"
static void rvt_rc_timeout(struct timer_list *t); static void rvt_rc_timeout(struct timer_list *t);
static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
enum ib_qp_type type);
/* /*
* Convert the AETH RNR timeout code into the number of microseconds. * Convert the AETH RNR timeout code into the number of microseconds.
@@ -268,40 +270,41 @@ int rvt_driver_qp_init(struct rvt_dev_info *rdi)
} }
/** /**
* free_all_qps - check for QPs still in use * rvt_free_qp_cb - callback function to reset a qp
* @qp: the qp to reset
* @v: a 64-bit value
*
* This function resets the qp and removes it from the
* qp hash table.
*/
static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v)
{
unsigned int *qp_inuse = (unsigned int *)v;
struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
/* Reset the qp and remove it from the qp hash list */
rvt_reset_qp(rdi, qp, qp->ibqp.qp_type);
/* Increment the qp_inuse count */
(*qp_inuse)++;
}
/**
* rvt_free_all_qps - check for QPs still in use
* @rdi: rvt device info structure * @rdi: rvt device info structure
* *
* There should not be any QPs still in use. * There should not be any QPs still in use.
* Free memory for table. * Free memory for table.
* Return the number of QPs still in use.
*/ */
static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi) static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
{ {
unsigned long flags; unsigned int qp_inuse = 0;
struct rvt_qp *qp;
unsigned n, qp_inuse = 0;
spinlock_t *ql; /* work around too long line below */
if (rdi->driver_f.free_all_qps)
qp_inuse = rdi->driver_f.free_all_qps(rdi);
qp_inuse += rvt_mcast_tree_empty(rdi); qp_inuse += rvt_mcast_tree_empty(rdi);
if (!rdi->qp_dev) rvt_qp_iter(rdi, (u64)&qp_inuse, rvt_free_qp_cb);
return qp_inuse;
ql = &rdi->qp_dev->qpt_lock;
spin_lock_irqsave(ql, flags);
for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
lockdep_is_held(ql));
RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
for (; qp; qp = rcu_dereference_protected(qp->next,
lockdep_is_held(ql)))
qp_inuse++;
}
spin_unlock_irqrestore(ql, flags);
synchronize_rcu();
return qp_inuse; return qp_inuse;
} }
@@ -684,14 +687,14 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
} }
/** /**
* rvt_reset_qp - initialize the QP state to the reset state * _rvt_reset_qp - initialize the QP state to the reset state
* @qp: the QP to reset * @qp: the QP to reset
* @type: the QP type * @type: the QP type
* *
* r_lock, s_hlock, and s_lock are required to be held by the caller * r_lock, s_hlock, and s_lock are required to be held by the caller
*/ */
static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
enum ib_qp_type type) enum ib_qp_type type)
__must_hold(&qp->s_lock) __must_hold(&qp->s_lock)
__must_hold(&qp->s_hlock) __must_hold(&qp->s_hlock)
__must_hold(&qp->r_lock) __must_hold(&qp->r_lock)
@@ -737,6 +740,27 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
lockdep_assert_held(&qp->s_lock); lockdep_assert_held(&qp->s_lock);
} }
/**
* rvt_reset_qp - initialize the QP state to the reset state
* @rdi: the device info
* @qp: the QP to reset
* @type: the QP type
*
* This is the wrapper function to acquire the r_lock, s_hlock, and s_lock
* before calling _rvt_reset_qp().
*/
static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
enum ib_qp_type type)
{
spin_lock_irq(&qp->r_lock);
spin_lock(&qp->s_hlock);
spin_lock(&qp->s_lock);
_rvt_reset_qp(rdi, qp, type);
spin_unlock(&qp->s_lock);
spin_unlock(&qp->s_hlock);
spin_unlock_irq(&qp->r_lock);
}
/** rvt_free_qpn - Free a qpn from the bit map /** rvt_free_qpn - Free a qpn from the bit map
* @qpt: QP table * @qpt: QP table
* @qpn: queue pair number to free * @qpn: queue pair number to free
@@ -1285,7 +1309,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
switch (new_state) { switch (new_state) {
case IB_QPS_RESET: case IB_QPS_RESET:
if (qp->state != IB_QPS_RESET) if (qp->state != IB_QPS_RESET)
rvt_reset_qp(rdi, qp, ibqp->qp_type); _rvt_reset_qp(rdi, qp, ibqp->qp_type);
break; break;
case IB_QPS_RTR: case IB_QPS_RTR:
@@ -1434,13 +1458,7 @@ int rvt_destroy_qp(struct ib_qp *ibqp)
struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
spin_lock_irq(&qp->r_lock);
spin_lock(&qp->s_hlock);
spin_lock(&qp->s_lock);
rvt_reset_qp(rdi, qp, ibqp->qp_type); rvt_reset_qp(rdi, qp, ibqp->qp_type);
spin_unlock(&qp->s_lock);
spin_unlock(&qp->s_hlock);
spin_unlock_irq(&qp->r_lock);
wait_event(qp->wait, !atomic_read(&qp->refcount)); wait_event(qp->wait, !atomic_read(&qp->refcount));
/* qpn is now available for use again */ /* qpn is now available for use again */

View File

@@ -329,7 +329,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
qp->comp.psn = pkt->psn; qp->comp.psn = pkt->psn;
if (qp->req.wait_psn) { if (qp->req.wait_psn) {
qp->req.wait_psn = 0; qp->req.wait_psn = 0;
rxe_run_task(&qp->req.task, 1); rxe_run_task(&qp->req.task, 0);
} }
} }
return COMPST_ERROR_RETRY; return COMPST_ERROR_RETRY;
@@ -457,7 +457,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
*/ */
if (qp->req.wait_fence) { if (qp->req.wait_fence) {
qp->req.wait_fence = 0; qp->req.wait_fence = 0;
rxe_run_task(&qp->req.task, 1); rxe_run_task(&qp->req.task, 0);
} }
} }
@@ -473,7 +473,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
if (qp->req.need_rd_atomic) { if (qp->req.need_rd_atomic) {
qp->comp.timeout_retry = 0; qp->comp.timeout_retry = 0;
qp->req.need_rd_atomic = 0; qp->req.need_rd_atomic = 0;
rxe_run_task(&qp->req.task, 1); rxe_run_task(&qp->req.task, 0);
} }
} }
@@ -719,7 +719,7 @@ int rxe_completer(void *arg)
RXE_CNT_COMP_RETRY); RXE_CNT_COMP_RETRY);
qp->req.need_retry = 1; qp->req.need_retry = 1;
qp->comp.started_retry = 1; qp->comp.started_retry = 1;
rxe_run_task(&qp->req.task, 1); rxe_run_task(&qp->req.task, 0);
} }
if (pkt) { if (pkt) {

View File

@@ -149,7 +149,6 @@ static const char * const topbuttonpad_pnp_ids[] = {
"LEN0042", /* Yoga */ "LEN0042", /* Yoga */
"LEN0045", "LEN0045",
"LEN0047", "LEN0047",
"LEN0049",
"LEN2000", /* S540 */ "LEN2000", /* S540 */
"LEN2001", /* Edge E431 */ "LEN2001", /* Edge E431 */
"LEN2002", /* Edge E531 */ "LEN2002", /* Edge E531 */
@@ -169,9 +168,11 @@ static const char * const smbus_pnp_ids[] = {
/* all of the topbuttonpad_pnp_ids are valid, we just add some extras */ /* all of the topbuttonpad_pnp_ids are valid, we just add some extras */
"LEN0048", /* X1 Carbon 3 */ "LEN0048", /* X1 Carbon 3 */
"LEN0046", /* X250 */ "LEN0046", /* X250 */
"LEN0049", /* Yoga 11e */
"LEN004a", /* W541 */ "LEN004a", /* W541 */
"LEN005b", /* P50 */ "LEN005b", /* P50 */
"LEN005e", /* T560 */ "LEN005e", /* T560 */
"LEN006c", /* T470s */
"LEN0071", /* T480 */ "LEN0071", /* T480 */
"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */ "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
"LEN0073", /* X1 Carbon G5 (Elantech) */ "LEN0073", /* X1 Carbon G5 (Elantech) */
@@ -182,6 +183,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN0097", /* X280 -> ALPS trackpoint */ "LEN0097", /* X280 -> ALPS trackpoint */
"LEN009b", /* T580 */ "LEN009b", /* T580 */
"LEN200f", /* T450s */ "LEN200f", /* T450s */
"LEN2044", /* L470 */
"LEN2054", /* E480 */ "LEN2054", /* E480 */
"LEN2055", /* E580 */ "LEN2055", /* E580 */
"SYN3052", /* HP EliteBook 840 G4 */ "SYN3052", /* HP EliteBook 840 G4 */

View File

@@ -3449,7 +3449,7 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
if (!log) if (!log)
return; return;
if (nvme_get_log(ctrl, NVME_NSID_ALL, 0, NVME_LOG_FW_SLOT, log, if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, log,
sizeof(*log), 0)) sizeof(*log), 0))
dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
kfree(log); kfree(log);

View File

@@ -3117,6 +3117,7 @@ int open_ctree(struct super_block *sb,
/* do not make disk changes in broken FS or nologreplay is given */ /* do not make disk changes in broken FS or nologreplay is given */
if (btrfs_super_log_root(disk_super) != 0 && if (btrfs_super_log_root(disk_super) != 0 &&
!btrfs_test_opt(fs_info, NOLOGREPLAY)) { !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
btrfs_info(fs_info, "start tree-log replay");
ret = btrfs_replay_log(fs_info, fs_devices); ret = btrfs_replay_log(fs_info, fs_devices);
if (ret) { if (ret) {
err = ret; err = ret;

View File

@@ -228,6 +228,17 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
struct extent_map *merge = NULL; struct extent_map *merge = NULL;
struct rb_node *rb; struct rb_node *rb;
/*
* We can't modify an extent map that is in the tree and that is being
* used by another task, as it can cause that other task to see it in
* inconsistent state during the merging. We always have 1 reference for
* the tree and 1 for this task (which is unpinning the extent map or
* clearing the logging flag), so anything > 2 means it's being used by
* other tasks too.
*/
if (refcount_read(&em->refs) > 2)
return;
if (em->start != 0) { if (em->start != 0) {
rb = rb_prev(&em->rb_node); rb = rb_prev(&em->rb_node);
if (rb) if (rb)

View File

@@ -747,6 +747,7 @@ int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
*/ */
be = add_block_entry(root->fs_info, bytenr, num_bytes, ref_root); be = add_block_entry(root->fs_info, bytenr, num_bytes, ref_root);
if (IS_ERR(be)) { if (IS_ERR(be)) {
kfree(ref);
kfree(ra); kfree(ra);
ret = PTR_ERR(be); ret = PTR_ERR(be);
goto out; goto out;
@@ -760,6 +761,8 @@ int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
"re-allocated a block that still has references to it!"); "re-allocated a block that still has references to it!");
dump_block_entry(fs_info, be); dump_block_entry(fs_info, be);
dump_ref_action(fs_info, ra); dump_ref_action(fs_info, ra);
kfree(ref);
kfree(ra);
goto out_unlock; goto out_unlock;
} }
@@ -822,6 +825,7 @@ int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
"dropping a ref for a existing root that doesn't have a ref on the block"); "dropping a ref for a existing root that doesn't have a ref on the block");
dump_block_entry(fs_info, be); dump_block_entry(fs_info, be);
dump_ref_action(fs_info, ra); dump_ref_action(fs_info, ra);
kfree(ref);
kfree(ra); kfree(ra);
goto out_unlock; goto out_unlock;
} }
@@ -837,6 +841,7 @@ int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
"attempting to add another ref for an existing ref on a tree block"); "attempting to add another ref for an existing ref on a tree block");
dump_block_entry(fs_info, be); dump_block_entry(fs_info, be);
dump_ref_action(fs_info, ra); dump_ref_action(fs_info, ra);
kfree(ref);
kfree(ra); kfree(ra);
goto out_unlock; goto out_unlock;
} }

View File

@@ -1857,6 +1857,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
} }
if (btrfs_super_log_root(fs_info->super_copy) != 0) { if (btrfs_super_log_root(fs_info->super_copy) != 0) {
btrfs_warn(fs_info,
"mount required to replay tree-log, cannot remount read-write");
ret = -EINVAL; ret = -EINVAL;
goto restore; goto restore;
} }

View File

@@ -203,6 +203,7 @@ static int ext4_protect_reserved_inode(struct super_block *sb,
return PTR_ERR(inode); return PTR_ERR(inode);
num = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; num = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
while (i < num) { while (i < num) {
cond_resched();
map.m_lblk = i; map.m_lblk = i;
map.m_len = num - i; map.m_len = num - i;
n = ext4_map_blocks(NULL, inode, &map, 0); n = ext4_map_blocks(NULL, inode, &map, 0);

View File

@@ -127,12 +127,14 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
if (err != ERR_BAD_DX_DIR) { if (err != ERR_BAD_DX_DIR) {
return err; return err;
} }
/* /* Can we just clear INDEX flag to ignore htree information? */
* We don't set the inode dirty flag since it's not if (!ext4_has_metadata_csum(sb)) {
* critical that it get flushed back to the disk. /*
*/ * We don't set the inode dirty flag since it's not
ext4_clear_inode_flag(file_inode(file), * critical that it gets flushed back to the disk.
EXT4_INODE_INDEX); */
ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
}
} }
if (ext4_has_inline_data(inode)) { if (ext4_has_inline_data(inode)) {

View File

@@ -2468,8 +2468,11 @@ void ext4_insert_dentry(struct inode *inode,
struct ext4_filename *fname); struct ext4_filename *fname);
static inline void ext4_update_dx_flag(struct inode *inode) static inline void ext4_update_dx_flag(struct inode *inode)
{ {
if (!ext4_has_feature_dir_index(inode->i_sb)) if (!ext4_has_feature_dir_index(inode->i_sb)) {
/* ext4_iget() should have caught this... */
WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb));
ext4_clear_inode_flag(inode, EXT4_INODE_INDEX); ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
}
} }
static const unsigned char ext4_filetype_table[] = { static const unsigned char ext4_filetype_table[] = {
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK

View File

@@ -5052,6 +5052,18 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
ret = -EFSCORRUPTED; ret = -EFSCORRUPTED;
goto bad_inode; goto bad_inode;
} }
/*
* If dir_index is not enabled but there's dir with INDEX flag set,
* we'd normally treat htree data as empty space. But with metadata
* checksumming that corrupts checksums so forbid that.
*/
if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) &&
ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
ext4_error_inode(inode, function, line, 0,
"iget: Dir with htree data on filesystem without dir_index feature.");
ret = -EFSCORRUPTED;
goto bad_inode;
}
ei->i_disksize = inode->i_size; ei->i_disksize = inode->i_size;
#ifdef CONFIG_QUOTA #ifdef CONFIG_QUOTA
ei->i_reserved_quota = 0; ei->i_reserved_quota = 0;

View File

@@ -120,10 +120,10 @@ void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
{ {
__ext4_warning(sb, function, line, "%s", msg); __ext4_warning(sb, function, line, "%s", msg);
__ext4_warning(sb, function, line, __ext4_warning(sb, function, line,
"MMP failure info: last update time: %llu, last update " "MMP failure info: last update time: %llu, last update node: %.*s, last update device: %.*s",
"node: %s, last update device: %s", (unsigned long long)le64_to_cpu(mmp->mmp_time),
(long long unsigned int) le64_to_cpu(mmp->mmp_time), (int)sizeof(mmp->mmp_nodename), mmp->mmp_nodename,
mmp->mmp_nodename, mmp->mmp_bdevname); (int)sizeof(mmp->mmp_bdevname), mmp->mmp_bdevname);
} }
/* /*
@@ -154,6 +154,7 @@ static int kmmpd(void *data)
mmp_check_interval = max(EXT4_MMP_CHECK_MULT * mmp_update_interval, mmp_check_interval = max(EXT4_MMP_CHECK_MULT * mmp_update_interval,
EXT4_MMP_MIN_CHECK_INTERVAL); EXT4_MMP_MIN_CHECK_INTERVAL);
mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval); mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
BUILD_BUG_ON(sizeof(mmp->mmp_bdevname) < BDEVNAME_SIZE);
bdevname(bh->b_bdev, mmp->mmp_bdevname); bdevname(bh->b_bdev, mmp->mmp_bdevname);
memcpy(mmp->mmp_nodename, init_utsname()->nodename, memcpy(mmp->mmp_nodename, init_utsname()->nodename,
@@ -375,7 +376,8 @@ int ext4_multi_mount_protect(struct super_block *sb,
/* /*
* Start a kernel thread to update the MMP block periodically. * Start a kernel thread to update the MMP block periodically.
*/ */
EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%s", EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%.*s",
(int)sizeof(mmp->mmp_bdevname),
bdevname(bh->b_bdev, bdevname(bh->b_bdev,
mmp->mmp_bdevname)); mmp->mmp_bdevname));
if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) { if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) {

View File

@@ -2214,6 +2214,13 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
retval = ext4_dx_add_entry(handle, &fname, dir, inode); retval = ext4_dx_add_entry(handle, &fname, dir, inode);
if (!retval || (retval != ERR_BAD_DX_DIR)) if (!retval || (retval != ERR_BAD_DX_DIR))
goto out; goto out;
/* Can we just ignore htree data? */
if (ext4_has_metadata_csum(sb)) {
EXT4_ERROR_INODE(dir,
"Directory has corrupted htree index.");
retval = -EFSCORRUPTED;
goto out;
}
ext4_clear_inode_flag(dir, EXT4_INODE_INDEX); ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
dx_fallback++; dx_fallback++;
ext4_mark_inode_dirty(handle, dir); ext4_mark_inode_dirty(handle, dir);

View File

@@ -3000,17 +3000,11 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
return 0; return 0;
} }
#ifndef CONFIG_QUOTA #if !defined(CONFIG_QUOTA) || !defined(CONFIG_QFMT_V2)
if (ext4_has_feature_quota(sb) && !readonly) { if (!readonly && (ext4_has_feature_quota(sb) ||
ext4_has_feature_project(sb))) {
ext4_msg(sb, KERN_ERR, ext4_msg(sb, KERN_ERR,
"Filesystem with quota feature cannot be mounted RDWR " "The kernel was not built with CONFIG_QUOTA and CONFIG_QFMT_V2");
"without CONFIG_QUOTA");
return 0;
}
if (ext4_has_feature_project(sb) && !readonly) {
ext4_msg(sb, KERN_ERR,
"Filesystem with project quota feature cannot be mounted RDWR "
"without CONFIG_QUOTA");
return 0; return 0;
} }
#endif /* CONFIG_QUOTA */ #endif /* CONFIG_QUOTA */
@@ -3804,6 +3798,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
*/ */
sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
if (blocksize < EXT4_MIN_BLOCK_SIZE ||
blocksize > EXT4_MAX_BLOCK_SIZE) {
ext4_msg(sb, KERN_ERR,
"Unsupported filesystem blocksize %d (%d log_block_size)",
blocksize, le32_to_cpu(es->s_log_block_size));
goto failed_mount;
}
if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) { if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE; sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO; sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
@@ -3821,6 +3824,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
ext4_msg(sb, KERN_ERR, ext4_msg(sb, KERN_ERR,
"unsupported inode size: %d", "unsupported inode size: %d",
sbi->s_inode_size); sbi->s_inode_size);
ext4_msg(sb, KERN_ERR, "blocksize: %d", blocksize);
goto failed_mount; goto failed_mount;
} }
/* /*
@@ -4021,14 +4025,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (!ext4_feature_set_ok(sb, (sb_rdonly(sb)))) if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
goto failed_mount; goto failed_mount;
blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
if (blocksize < EXT4_MIN_BLOCK_SIZE ||
blocksize > EXT4_MAX_BLOCK_SIZE) {
ext4_msg(sb, KERN_ERR,
"Unsupported filesystem blocksize %d (%d log_block_size)",
blocksize, le32_to_cpu(es->s_log_block_size));
goto failed_mount;
}
if (le32_to_cpu(es->s_log_block_size) > if (le32_to_cpu(es->s_log_block_size) >
(EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
ext4_msg(sb, KERN_ERR, ext4_msg(sb, KERN_ERR,

View File

@@ -971,29 +971,33 @@ void jbd2_journal_commit_transaction(journal_t *journal)
* it. */ * it. */
/* /*
* A buffer which has been freed while still being journaled by * A buffer which has been freed while still being journaled
* a previous transaction. * by a previous transaction, refile the buffer to BJ_Forget of
*/ * the running transaction. If the just committed transaction
if (buffer_freed(bh)) { * contains "add to orphan" operation, we can completely
* invalidate the buffer now. We are rather through in that
* since the buffer may be still accessible when blocksize <
* pagesize and it is attached to the last partial page.
*/
if (buffer_freed(bh) && !jh->b_next_transaction) {
struct address_space *mapping;
clear_buffer_freed(bh);
clear_buffer_jbddirty(bh);
/* /*
* If the running transaction is the one containing * Block device buffers need to stay mapped all the
* "add to orphan" operation (b_next_transaction != * time, so it is enough to clear buffer_jbddirty and
* NULL), we have to wait for that transaction to * buffer_freed bits. For the file mapping buffers (i.e.
* commit before we can really get rid of the buffer. * journalled data) we need to unmap buffer and clear
* So just clear b_modified to not confuse transaction * more bits. We also need to be careful about the check
* credit accounting and refile the buffer to * because the data page mapping can get cleared under
* BJ_Forget of the running transaction. If the just * out hands, which alse need not to clear more bits
* committed transaction contains "add to orphan" * because the page and buffers will be freed and can
* operation, we can completely invalidate the buffer * never be reused once we are done with them.
* now. We are rather through in that since the
* buffer may be still accessible when blocksize <
* pagesize and it is attached to the last partial
* page.
*/ */
jh->b_modified = 0; mapping = READ_ONCE(bh->b_page->mapping);
if (!jh->b_next_transaction) { if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {
clear_buffer_freed(bh);
clear_buffer_jbddirty(bh);
clear_buffer_mapped(bh); clear_buffer_mapped(bh);
clear_buffer_new(bh); clear_buffer_new(bh);
clear_buffer_req(bh); clear_buffer_req(bh);

View File

@@ -2228,14 +2228,16 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
return -EBUSY; return -EBUSY;
} }
/* /*
* OK, buffer won't be reachable after truncate. We just set * OK, buffer won't be reachable after truncate. We just clear
* j_next_transaction to the running transaction (if there is * b_modified to not confuse transaction credit accounting, and
* one) and mark buffer as freed so that commit code knows it * set j_next_transaction to the running transaction (if there
* should clear dirty bits when it is done with the buffer. * is one) and mark buffer as freed so that commit code knows
* it should clear dirty bits when it is done with the buffer.
*/ */
set_buffer_freed(bh); set_buffer_freed(bh);
if (journal->j_running_transaction && buffer_jbddirty(bh)) if (journal->j_running_transaction && buffer_jbddirty(bh))
jh->b_next_transaction = journal->j_running_transaction; jh->b_next_transaction = journal->j_running_transaction;
jh->b_modified = 0;
jbd2_journal_put_journal_head(jh); jbd2_journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock); spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh); jbd_unlock_bh_state(bh);

View File

@@ -5117,7 +5117,7 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
hdr->timestamp = jiffies; hdr->timestamp = jiffies;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1, 0); nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr); nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr);
} }

View File

@@ -2442,6 +2442,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD), SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),

View File

@@ -165,8 +165,34 @@ static int uac_clock_selector_set_val(struct snd_usb_audio *chip, int selector_i
return ret; return ret;
} }
/*
* Assume the clock is valid if clock source supports only one single sample
* rate, the terminal is connected directly to it (there is no clock selector)
* and clock type is internal. This is to deal with some Denon DJ controllers
* that always reports that clock is invalid.
*/
static bool uac_clock_source_is_valid_quirk(struct snd_usb_audio *chip,
struct audioformat *fmt,
int source_id)
{
if (fmt->protocol == UAC_VERSION_2) {
struct uac_clock_source_descriptor *cs_desc =
snd_usb_find_clock_source(chip->ctrl_intf, source_id);
if (!cs_desc)
return false;
return (fmt->nr_rates == 1 &&
(fmt->clock & 0xff) == cs_desc->bClockID &&
(cs_desc->bmAttributes & 0x3) !=
UAC_CLOCK_SOURCE_TYPE_EXT);
}
return false;
}
static bool uac_clock_source_is_valid(struct snd_usb_audio *chip, static bool uac_clock_source_is_valid(struct snd_usb_audio *chip,
int protocol, struct audioformat *fmt,
int source_id) int source_id)
{ {
int err; int err;
@@ -174,26 +200,26 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip,
struct usb_device *dev = chip->dev; struct usb_device *dev = chip->dev;
u32 bmControls; u32 bmControls;
if (protocol == UAC_VERSION_3) { if (fmt->protocol == UAC_VERSION_3) {
struct uac3_clock_source_descriptor *cs_desc = struct uac3_clock_source_descriptor *cs_desc =
snd_usb_find_clock_source_v3(chip->ctrl_intf, source_id); snd_usb_find_clock_source_v3(chip->ctrl_intf, source_id);
if (!cs_desc) if (!cs_desc)
return 0; return false;
bmControls = le32_to_cpu(cs_desc->bmControls); bmControls = le32_to_cpu(cs_desc->bmControls);
} else { /* UAC_VERSION_1/2 */ } else { /* UAC_VERSION_1/2 */
struct uac_clock_source_descriptor *cs_desc = struct uac_clock_source_descriptor *cs_desc =
snd_usb_find_clock_source(chip->ctrl_intf, source_id); snd_usb_find_clock_source(chip->ctrl_intf, source_id);
if (!cs_desc) if (!cs_desc)
return 0; return false;
bmControls = cs_desc->bmControls; bmControls = cs_desc->bmControls;
} }
/* If a clock source can't tell us whether it's valid, we assume it is */ /* If a clock source can't tell us whether it's valid, we assume it is */
if (!uac_v2v3_control_is_readable(bmControls, if (!uac_v2v3_control_is_readable(bmControls,
UAC2_CS_CONTROL_CLOCK_VALID)) UAC2_CS_CONTROL_CLOCK_VALID))
return 1; return true;
err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
@@ -205,13 +231,17 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip,
dev_warn(&dev->dev, dev_warn(&dev->dev,
"%s(): cannot get clock validity for id %d\n", "%s(): cannot get clock validity for id %d\n",
__func__, source_id); __func__, source_id);
return 0; return false;
} }
return !!data; if (data)
return true;
else
return uac_clock_source_is_valid_quirk(chip, fmt, source_id);
} }
static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id, static int __uac_clock_find_source(struct snd_usb_audio *chip,
struct audioformat *fmt, int entity_id,
unsigned long *visited, bool validate) unsigned long *visited, bool validate)
{ {
struct uac_clock_source_descriptor *source; struct uac_clock_source_descriptor *source;
@@ -231,7 +261,7 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
source = snd_usb_find_clock_source(chip->ctrl_intf, entity_id); source = snd_usb_find_clock_source(chip->ctrl_intf, entity_id);
if (source) { if (source) {
entity_id = source->bClockID; entity_id = source->bClockID;
if (validate && !uac_clock_source_is_valid(chip, UAC_VERSION_2, if (validate && !uac_clock_source_is_valid(chip, fmt,
entity_id)) { entity_id)) {
usb_audio_err(chip, usb_audio_err(chip,
"clock source %d is not valid, cannot use\n", "clock source %d is not valid, cannot use\n",
@@ -262,8 +292,9 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
} }
cur = ret; cur = ret;
ret = __uac_clock_find_source(chip, selector->baCSourceID[ret - 1], ret = __uac_clock_find_source(chip, fmt,
visited, validate); selector->baCSourceID[ret - 1],
visited, validate);
if (!validate || ret > 0 || !chip->autoclock) if (!validate || ret > 0 || !chip->autoclock)
return ret; return ret;
@@ -274,8 +305,9 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
if (i == cur) if (i == cur)
continue; continue;
ret = __uac_clock_find_source(chip, selector->baCSourceID[i - 1], ret = __uac_clock_find_source(chip, fmt,
visited, true); selector->baCSourceID[i - 1],
visited, true);
if (ret < 0) if (ret < 0)
continue; continue;
@@ -295,14 +327,16 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
/* FIXME: multipliers only act as pass-thru element for now */ /* FIXME: multipliers only act as pass-thru element for now */
multiplier = snd_usb_find_clock_multiplier(chip->ctrl_intf, entity_id); multiplier = snd_usb_find_clock_multiplier(chip->ctrl_intf, entity_id);
if (multiplier) if (multiplier)
return __uac_clock_find_source(chip, multiplier->bCSourceID, return __uac_clock_find_source(chip, fmt,
visited, validate); multiplier->bCSourceID,
visited, validate);
return -EINVAL; return -EINVAL;
} }
static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id, static int __uac3_clock_find_source(struct snd_usb_audio *chip,
unsigned long *visited, bool validate) struct audioformat *fmt, int entity_id,
unsigned long *visited, bool validate)
{ {
struct uac3_clock_source_descriptor *source; struct uac3_clock_source_descriptor *source;
struct uac3_clock_selector_descriptor *selector; struct uac3_clock_selector_descriptor *selector;
@@ -321,7 +355,7 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
source = snd_usb_find_clock_source_v3(chip->ctrl_intf, entity_id); source = snd_usb_find_clock_source_v3(chip->ctrl_intf, entity_id);
if (source) { if (source) {
entity_id = source->bClockID; entity_id = source->bClockID;
if (validate && !uac_clock_source_is_valid(chip, UAC_VERSION_3, if (validate && !uac_clock_source_is_valid(chip, fmt,
entity_id)) { entity_id)) {
usb_audio_err(chip, usb_audio_err(chip,
"clock source %d is not valid, cannot use\n", "clock source %d is not valid, cannot use\n",
@@ -352,7 +386,8 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
} }
cur = ret; cur = ret;
ret = __uac3_clock_find_source(chip, selector->baCSourceID[ret - 1], ret = __uac3_clock_find_source(chip, fmt,
selector->baCSourceID[ret - 1],
visited, validate); visited, validate);
if (!validate || ret > 0 || !chip->autoclock) if (!validate || ret > 0 || !chip->autoclock)
return ret; return ret;
@@ -364,8 +399,9 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
if (i == cur) if (i == cur)
continue; continue;
ret = __uac3_clock_find_source(chip, selector->baCSourceID[i - 1], ret = __uac3_clock_find_source(chip, fmt,
visited, true); selector->baCSourceID[i - 1],
visited, true);
if (ret < 0) if (ret < 0)
continue; continue;
@@ -386,7 +422,8 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
multiplier = snd_usb_find_clock_multiplier_v3(chip->ctrl_intf, multiplier = snd_usb_find_clock_multiplier_v3(chip->ctrl_intf,
entity_id); entity_id);
if (multiplier) if (multiplier)
return __uac3_clock_find_source(chip, multiplier->bCSourceID, return __uac3_clock_find_source(chip, fmt,
multiplier->bCSourceID,
visited, validate); visited, validate);
return -EINVAL; return -EINVAL;
@@ -403,18 +440,18 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
* *
* Returns the clock source UnitID (>=0) on success, or an error. * Returns the clock source UnitID (>=0) on success, or an error.
*/ */
int snd_usb_clock_find_source(struct snd_usb_audio *chip, int protocol, int snd_usb_clock_find_source(struct snd_usb_audio *chip,
int entity_id, bool validate) struct audioformat *fmt, bool validate)
{ {
DECLARE_BITMAP(visited, 256); DECLARE_BITMAP(visited, 256);
memset(visited, 0, sizeof(visited)); memset(visited, 0, sizeof(visited));
switch (protocol) { switch (fmt->protocol) {
case UAC_VERSION_2: case UAC_VERSION_2:
return __uac_clock_find_source(chip, entity_id, visited, return __uac_clock_find_source(chip, fmt, fmt->clock, visited,
validate); validate);
case UAC_VERSION_3: case UAC_VERSION_3:
return __uac3_clock_find_source(chip, entity_id, visited, return __uac3_clock_find_source(chip, fmt, fmt->clock, visited,
validate); validate);
default: default:
return -EINVAL; return -EINVAL;
@@ -515,8 +552,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, int iface,
* automatic clock selection if the current clock is not * automatic clock selection if the current clock is not
* valid. * valid.
*/ */
clock = snd_usb_clock_find_source(chip, fmt->protocol, clock = snd_usb_clock_find_source(chip, fmt, true);
fmt->clock, true);
if (clock < 0) { if (clock < 0) {
/* We did not find a valid clock, but that might be /* We did not find a valid clock, but that might be
* because the current sample rate does not match an * because the current sample rate does not match an
@@ -524,8 +560,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, int iface,
* and we will do another validation after setting the * and we will do another validation after setting the
* rate. * rate.
*/ */
clock = snd_usb_clock_find_source(chip, fmt->protocol, clock = snd_usb_clock_find_source(chip, fmt, false);
fmt->clock, false);
if (clock < 0) if (clock < 0)
return clock; return clock;
} }
@@ -591,7 +626,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, int iface,
validation: validation:
/* validate clock after rate change */ /* validate clock after rate change */
if (!uac_clock_source_is_valid(chip, fmt->protocol, clock)) if (!uac_clock_source_is_valid(chip, fmt, clock))
return -ENXIO; return -ENXIO;
return 0; return 0;
} }

View File

@@ -6,7 +6,7 @@ int snd_usb_init_sample_rate(struct snd_usb_audio *chip, int iface,
struct usb_host_interface *alts, struct usb_host_interface *alts,
struct audioformat *fmt, int rate); struct audioformat *fmt, int rate);
int snd_usb_clock_find_source(struct snd_usb_audio *chip, int protocol, int snd_usb_clock_find_source(struct snd_usb_audio *chip,
int entity_id, bool validate); struct audioformat *fmt, bool validate);
#endif /* __USBAUDIO_CLOCK_H */ #endif /* __USBAUDIO_CLOCK_H */

View File

@@ -306,8 +306,7 @@ static int parse_audio_format_rates_v2v3(struct snd_usb_audio *chip,
struct usb_device *dev = chip->dev; struct usb_device *dev = chip->dev;
unsigned char tmp[2], *data; unsigned char tmp[2], *data;
int nr_triplets, data_size, ret = 0; int nr_triplets, data_size, ret = 0;
int clock = snd_usb_clock_find_source(chip, fp->protocol, int clock = snd_usb_clock_find_source(chip, fp, false);
fp->clock, false);
if (clock < 0) { if (clock < 0) {
dev_err(&dev->dev, dev_err(&dev->dev,

View File

@@ -912,6 +912,15 @@ static int parse_term_proc_unit(struct mixer_build *state,
return 0; return 0;
} }
static int parse_term_effect_unit(struct mixer_build *state,
struct usb_audio_term *term,
void *p1, int id)
{
term->type = UAC3_EFFECT_UNIT << 16; /* virtual type */
term->id = id;
return 0;
}
static int parse_term_uac2_clock_source(struct mixer_build *state, static int parse_term_uac2_clock_source(struct mixer_build *state,
struct usb_audio_term *term, struct usb_audio_term *term,
void *p1, int id) void *p1, int id)
@@ -996,8 +1005,7 @@ static int __check_input_term(struct mixer_build *state, int id,
UAC3_PROCESSING_UNIT); UAC3_PROCESSING_UNIT);
case PTYPE(UAC_VERSION_2, UAC2_EFFECT_UNIT): case PTYPE(UAC_VERSION_2, UAC2_EFFECT_UNIT):
case PTYPE(UAC_VERSION_3, UAC3_EFFECT_UNIT): case PTYPE(UAC_VERSION_3, UAC3_EFFECT_UNIT):
return parse_term_proc_unit(state, term, p1, id, return parse_term_effect_unit(state, term, p1, id);
UAC3_EFFECT_UNIT);
case PTYPE(UAC_VERSION_1, UAC1_EXTENSION_UNIT): case PTYPE(UAC_VERSION_1, UAC1_EXTENSION_UNIT):
case PTYPE(UAC_VERSION_2, UAC2_EXTENSION_UNIT_V2): case PTYPE(UAC_VERSION_2, UAC2_EXTENSION_UNIT_V2):
case PTYPE(UAC_VERSION_3, UAC3_EXTENSION_UNIT): case PTYPE(UAC_VERSION_3, UAC3_EXTENSION_UNIT):

View File

@@ -1182,6 +1182,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */ case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */
case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */ case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */ case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */
return true; return true;
} }