Merge tag 'ASB-2024-10-05_4.19-stable' of https://android.googlesource.com/kernel/common into android13-4.19-kona
https://source.android.com/docs/security/bulletin/2024-10-01 * tag 'ASB-2024-10-05_4.19-stable' of https://android.googlesource.com/kernel/common: (99 commits) Linux 4.19.322 Revert "parisc: Use irq_enter_rcu() to fix warning at kernel/context_tracking.c:367" netns: restore ops before calling ops_exit_list cx82310_eth: fix error return code in cx82310_bind() net, sunrpc: Remap EPERM in case of connection failure in xs_tcp_setup_socket rtmutex: Drop rt_mutex::wait_lock before scheduling drm/i915/fence: Mark debug_fence_free() with __maybe_unused drm/i915/fence: Mark debug_fence_init_onstack() with __maybe_unused ACPI: processor: Fix memory leaks in error paths of processor_add() ACPI: processor: Return an error if acpi_processor_get_info() fails in processor_add() ila: call nf_unregister_net_hooks() sooner netns: add pre_exit method to struct pernet_operations nilfs2: protect references to superblock parameters exposed in sysfs nilfs2: replace snprintf in show functions with sysfs_emit tracing: Avoid possible softlockup in tracing_iter_reset() ring-buffer: Rename ring_buffer_read() to read_buffer_iter_advance() uprobes: Use kzalloc to allocate xol area clocksource/drivers/imx-tpm: Fix next event not taking effect sometime clocksource/drivers/imx-tpm: Fix return -ETIME when delta exceeds INT_MAX VMCI: Fix use-after-free when removing resource in vmci_resource_remove() ... Conflicts: drivers/clk/qcom/clk-alpha-pll.c Change-Id: I79078f7d518fa7e6a2b373df48acbd6f9f9ba30b
This commit is contained in:
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 321
|
||||
SUBLEVEL = 322
|
||||
EXTRAVERSION =
|
||||
NAME = "People's Front"
|
||||
|
||||
|
||||
@@ -524,7 +524,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
|
||||
|
||||
old_regs = set_irq_regs(regs);
|
||||
local_irq_disable();
|
||||
irq_enter_rcu();
|
||||
irq_enter();
|
||||
|
||||
eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
|
||||
if (!eirr_val)
|
||||
@@ -559,7 +559,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
|
||||
#endif /* CONFIG_IRQSTACKS */
|
||||
|
||||
out:
|
||||
irq_exit_rcu();
|
||||
irq_exit();
|
||||
set_irq_regs(old_regs);
|
||||
return;
|
||||
|
||||
|
||||
@@ -391,6 +391,7 @@ int setup_one_line(struct line *lines, int n, char *init,
|
||||
parse_chan_pair(NULL, line, n, opts, error_out);
|
||||
err = 0;
|
||||
}
|
||||
*error_out = "configured as 'none'";
|
||||
} else {
|
||||
char *new = kstrdup(init, GFP_KERNEL);
|
||||
if (!new) {
|
||||
@@ -414,6 +415,7 @@ int setup_one_line(struct line *lines, int n, char *init,
|
||||
}
|
||||
}
|
||||
if (err) {
|
||||
*error_out = "failed to parse channel pair";
|
||||
line->init_str = NULL;
|
||||
line->valid = 0;
|
||||
kfree(new);
|
||||
|
||||
@@ -227,6 +227,7 @@ bool bio_integrity_prep(struct bio *bio)
|
||||
unsigned int bytes, offset, i;
|
||||
unsigned int intervals;
|
||||
blk_status_t status;
|
||||
gfp_t gfp = GFP_NOIO;
|
||||
|
||||
if (!bi)
|
||||
return true;
|
||||
@@ -249,12 +250,20 @@ bool bio_integrity_prep(struct bio *bio)
|
||||
if (!bi->profile->generate_fn ||
|
||||
!(bi->flags & BLK_INTEGRITY_GENERATE))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Zero the memory allocated to not leak uninitialized kernel
|
||||
* memory to disk. For PI this only affects the app tag, but
|
||||
* for non-integrity metadata it affects the entire metadata
|
||||
* buffer.
|
||||
*/
|
||||
gfp |= __GFP_ZERO;
|
||||
}
|
||||
intervals = bio_integrity_intervals(bi, bio_sectors(bio));
|
||||
|
||||
/* Allocate kernel buffer for protection data */
|
||||
len = intervals * bi->tuple_size;
|
||||
buf = kmalloc(len, GFP_NOIO | q->bounce_gfp);
|
||||
buf = kmalloc(len, gfp | q->bounce_gfp);
|
||||
status = BLK_STS_RESOURCE;
|
||||
if (unlikely(buf == NULL)) {
|
||||
printk(KERN_ERR "could not allocate integrity buffer\n");
|
||||
|
||||
@@ -391,7 +391,7 @@ static int acpi_processor_add(struct acpi_device *device,
|
||||
|
||||
result = acpi_processor_get_info(device);
|
||||
if (result) /* Processor is not physically present or unavailable */
|
||||
return 0;
|
||||
goto err_clear_driver_data;
|
||||
|
||||
BUG_ON(pr->id >= nr_cpu_ids);
|
||||
|
||||
@@ -406,7 +406,7 @@ static int acpi_processor_add(struct acpi_device *device,
|
||||
"BIOS reported wrong ACPI id %d for the processor\n",
|
||||
pr->id);
|
||||
/* Give up, but do not abort the namespace scan. */
|
||||
goto err;
|
||||
goto err_clear_driver_data;
|
||||
}
|
||||
/*
|
||||
* processor_device_array is not cleared on errors to allow buggy BIOS
|
||||
@@ -418,12 +418,12 @@ static int acpi_processor_add(struct acpi_device *device,
|
||||
dev = get_cpu_device(pr->id);
|
||||
if (!dev) {
|
||||
result = -ENODEV;
|
||||
goto err;
|
||||
goto err_clear_per_cpu;
|
||||
}
|
||||
|
||||
result = acpi_bind_one(dev, device);
|
||||
if (result)
|
||||
goto err;
|
||||
goto err_clear_per_cpu;
|
||||
|
||||
pr->dev = dev;
|
||||
|
||||
@@ -434,10 +434,11 @@ static int acpi_processor_add(struct acpi_device *device,
|
||||
dev_err(dev, "Processor driver could not be attached\n");
|
||||
acpi_unbind_one(dev);
|
||||
|
||||
err:
|
||||
free_cpumask_var(pr->throttling.shared_cpu_map);
|
||||
device->driver_data = NULL;
|
||||
err_clear_per_cpu:
|
||||
per_cpu(processors, pr->id) = NULL;
|
||||
err_clear_driver_data:
|
||||
device->driver_data = NULL;
|
||||
free_cpumask_var(pr->throttling.shared_cpu_map);
|
||||
err_free_pr:
|
||||
kfree(pr);
|
||||
return result;
|
||||
|
||||
@@ -6221,8 +6221,10 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
|
||||
}
|
||||
|
||||
dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
|
||||
if (!dr)
|
||||
if (!dr) {
|
||||
kfree(host);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
devres_add(dev, dr);
|
||||
dev_set_drvdata(dev, host);
|
||||
|
||||
@@ -537,7 +537,8 @@ static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
|
||||
|
||||
while (sg_len) {
|
||||
/* table overflow should never happen */
|
||||
BUG_ON (pi++ >= MAX_DCMDS);
|
||||
if (WARN_ON_ONCE(pi >= MAX_DCMDS))
|
||||
return AC_ERR_SYSTEM;
|
||||
|
||||
len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG;
|
||||
table->command = cpu_to_le16(write ? OUTPUT_MORE: INPUT_MORE);
|
||||
@@ -549,11 +550,13 @@ static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
|
||||
addr += len;
|
||||
sg_len -= len;
|
||||
++table;
|
||||
++pi;
|
||||
}
|
||||
}
|
||||
|
||||
/* Should never happen according to Tejun */
|
||||
BUG_ON(!pi);
|
||||
if (WARN_ON_ONCE(!pi))
|
||||
return AC_ERR_SYSTEM;
|
||||
|
||||
/* Convert the last command to an input/output */
|
||||
table--;
|
||||
|
||||
@@ -559,6 +559,7 @@ void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
|
||||
grp->id = grp;
|
||||
if (id)
|
||||
grp->id = id;
|
||||
grp->color = 0;
|
||||
|
||||
spin_lock_irqsave(&dev->devres_lock, flags);
|
||||
add_dr(dev, &grp->node[0]);
|
||||
|
||||
@@ -44,7 +44,7 @@
|
||||
|
||||
#define PLL_USER_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL])
|
||||
#define PLL_POST_DIV_SHIFT 8
|
||||
#define PLL_POST_DIV_MASK(p) GENMASK((p)->width, 0)
|
||||
#define PLL_POST_DIV_MASK(p) GENMASK((p)->width - 1, 0)
|
||||
#define PLL_ALPHA_EN BIT(24)
|
||||
#define PLL_ALPHA_MODE BIT(25)
|
||||
#define PLL_VCO_SHIFT 20
|
||||
|
||||
@@ -96,20 +96,28 @@ static int __init tpm_clocksource_init(unsigned long rate)
|
||||
static int tpm_set_next_event(unsigned long delta,
|
||||
struct clock_event_device *evt)
|
||||
{
|
||||
unsigned long next, now;
|
||||
unsigned long next, prev, now;
|
||||
|
||||
next = tpm_read_counter();
|
||||
next += delta;
|
||||
prev = tpm_read_counter();
|
||||
next = prev + delta;
|
||||
writel(next, timer_base + TPM_C0V);
|
||||
now = tpm_read_counter();
|
||||
|
||||
/*
|
||||
* Need to wait CNT increase at least 1 cycle to make sure
|
||||
* the C0V has been updated into HW.
|
||||
*/
|
||||
if ((next & 0xffffffff) != readl(timer_base + TPM_C0V))
|
||||
while (now == tpm_read_counter())
|
||||
;
|
||||
|
||||
/*
|
||||
* NOTE: We observed in a very small probability, the bus fabric
|
||||
* contention between GPU and A7 may results a few cycles delay
|
||||
* of writing CNT registers which may cause the min_delta event got
|
||||
* missed, so we need add a ETIME check here in case it happened.
|
||||
*/
|
||||
return (int)(next - now) <= 0 ? -ETIME : 0;
|
||||
return (now - prev) >= delta ? -ETIME : 0;
|
||||
}
|
||||
|
||||
static int tpm_set_state_oneshot(struct clock_event_device *evt)
|
||||
|
||||
@@ -100,6 +100,7 @@ struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock)
|
||||
amdgpu_afmt_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
|
||||
amdgpu_afmt_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
|
||||
amdgpu_afmt_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
|
||||
res.clock = clock;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
@@ -1625,6 +1625,8 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
|
||||
(u32)le32_to_cpu(*((u32 *)reg_data + j));
|
||||
j++;
|
||||
} else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
|
||||
if (i == 0)
|
||||
continue;
|
||||
reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
|
||||
reg_table->mc_reg_table_entry[num_ranges].mc_data[i - 1];
|
||||
}
|
||||
|
||||
@@ -212,6 +212,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
||||
struct amdgpu_firmware_info *ucode;
|
||||
|
||||
id = fw_type_convert(cgs_device, type);
|
||||
if (id >= AMDGPU_UCODE_ID_MAXIMUM)
|
||||
return -EINVAL;
|
||||
|
||||
ucode = &adev->firmware.ucode[id];
|
||||
if (ucode->fw == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -497,8 +497,9 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
struct amdgpu_ring *ring = file_inode(f)->i_private;
|
||||
int r, i;
|
||||
uint32_t value, result, early[3];
|
||||
loff_t i;
|
||||
int r;
|
||||
|
||||
if (*pos & 3 || size & 3)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -42,8 +42,6 @@
|
||||
#define CRAT_OEMTABLEID_LENGTH 8
|
||||
#define CRAT_RESERVED_LENGTH 6
|
||||
|
||||
#define CRAT_OEMID_64BIT_MASK ((1ULL << (CRAT_OEMID_LENGTH * 8)) - 1)
|
||||
|
||||
/* Compute Unit flags */
|
||||
#define COMPUTE_UNIT_CPU (1 << 0) /* Create Virtual CRAT for CPU */
|
||||
#define COMPUTE_UNIT_GPU (1 << 1) /* Create Virtual CRAT for GPU */
|
||||
|
||||
@@ -863,8 +863,7 @@ static void kfd_update_system_properties(void)
|
||||
dev = list_last_entry(&topology_device_list,
|
||||
struct kfd_topology_device, list);
|
||||
if (dev) {
|
||||
sys_props.platform_id =
|
||||
(*((uint64_t *)dev->oem_id)) & CRAT_OEMID_64BIT_MASK;
|
||||
sys_props.platform_id = dev->oem_id64;
|
||||
sys_props.platform_oem = *((uint64_t *)dev->oem_table_id);
|
||||
sys_props.platform_rev = dev->oem_revision;
|
||||
}
|
||||
|
||||
@@ -164,7 +164,10 @@ struct kfd_topology_device {
|
||||
struct attribute attr_gpuid;
|
||||
struct attribute attr_name;
|
||||
struct attribute attr_props;
|
||||
union {
|
||||
uint8_t oem_id[CRAT_OEMID_LENGTH];
|
||||
uint64_t oem_id64;
|
||||
};
|
||||
uint8_t oem_table_id[CRAT_OEMTABLEID_LENGTH];
|
||||
uint32_t oem_revision;
|
||||
};
|
||||
|
||||
@@ -41,7 +41,7 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
|
||||
debug_object_init(fence, &i915_sw_fence_debug_descr);
|
||||
}
|
||||
|
||||
static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
|
||||
static inline __maybe_unused void debug_fence_init_onstack(struct i915_sw_fence *fence)
|
||||
{
|
||||
debug_object_init_on_stack(fence, &i915_sw_fence_debug_descr);
|
||||
}
|
||||
@@ -67,7 +67,7 @@ static inline void debug_fence_destroy(struct i915_sw_fence *fence)
|
||||
debug_object_destroy(fence, &i915_sw_fence_debug_descr);
|
||||
}
|
||||
|
||||
static inline void debug_fence_free(struct i915_sw_fence *fence)
|
||||
static inline __maybe_unused void debug_fence_free(struct i915_sw_fence *fence)
|
||||
{
|
||||
debug_object_free(fence, &i915_sw_fence_debug_descr);
|
||||
smp_wmb(); /* flush the change in state before reallocation */
|
||||
@@ -84,7 +84,7 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
|
||||
static inline __maybe_unused void debug_fence_init_onstack(struct i915_sw_fence *fence)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -105,7 +105,7 @@ static inline void debug_fence_destroy(struct i915_sw_fence *fence)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_fence_free(struct i915_sw_fence *fence)
|
||||
static inline __maybe_unused void debug_fence_free(struct i915_sw_fence *fence)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -104,7 +104,7 @@ static void cougar_fix_g6_mapping(struct hid_device *hdev)
|
||||
static __u8 *cougar_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
||||
unsigned int *rsize)
|
||||
{
|
||||
if (rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
|
||||
if (*rsize >= 117 && rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
|
||||
(rdesc[115] | rdesc[116] << 8) >= HID_MAX_USAGES) {
|
||||
hid_info(hdev,
|
||||
"usage count exceeds max: fixing up report descriptor\n");
|
||||
|
||||
@@ -1973,6 +1973,7 @@ static int vmbus_acpi_add(struct acpi_device *device)
|
||||
vmbus_acpi_remove(device);
|
||||
return ret_val;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vmbus_device_unregister);
|
||||
|
||||
static const struct acpi_device_id vmbus_acpi_device_ids[] = {
|
||||
{"VMBUS", 0},
|
||||
|
||||
@@ -184,7 +184,7 @@ static ssize_t adc128_set_in(struct device *dev, struct device_attribute *attr,
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
/* 10 mV LSB on limit registers */
|
||||
regval = clamp_val(DIV_ROUND_CLOSEST(val, 10), 0, 255);
|
||||
regval = DIV_ROUND_CLOSEST(clamp_val(val, 0, 2550), 10);
|
||||
data->in[index][nr] = regval << 4;
|
||||
reg = index == 1 ? ADC128_REG_IN_MIN(nr) : ADC128_REG_IN_MAX(nr);
|
||||
i2c_smbus_write_byte_data(data->client, reg, regval);
|
||||
@@ -222,7 +222,7 @@ static ssize_t adc128_set_temp(struct device *dev,
|
||||
return err;
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
regval = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
|
||||
regval = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
|
||||
data->temp[index] = regval << 1;
|
||||
i2c_smbus_write_byte_data(data->client,
|
||||
index == 1 ? ADC128_REG_TEMP_MAX
|
||||
|
||||
@@ -310,7 +310,8 @@ static ssize_t set_tcrit2(struct device *dev, struct device_attribute *attr,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, index ? 255 : 127);
|
||||
val = DIV_ROUND_CLOSEST(clamp_val(val, 0, (index ? 255 : 127) * 1000),
|
||||
1000);
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
data->tcrit2[index] = val;
|
||||
@@ -359,7 +360,7 @@ static ssize_t set_tcrit1(struct device *dev, struct device_attribute *attr,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 255);
|
||||
val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 255000), 1000);
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
data->tcrit1[index] = val;
|
||||
@@ -400,7 +401,7 @@ static ssize_t set_tcrit1_hyst(struct device *dev,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
val = DIV_ROUND_CLOSEST(val, 1000);
|
||||
val = DIV_ROUND_CLOSEST(clamp_val(val, -255000, 255000), 1000);
|
||||
val = clamp_val((int)data->tcrit1[index] - val, 0, 31);
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
@@ -440,7 +441,7 @@ static ssize_t set_offset(struct device *dev, struct device_attribute *attr,
|
||||
return ret;
|
||||
|
||||
/* Accuracy is 1/2 degrees C */
|
||||
val = clamp_val(DIV_ROUND_CLOSEST(val, 500), -128, 127);
|
||||
val = DIV_ROUND_CLOSEST(clamp_val(val, -64000, 63500), 500);
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
data->toffset[index] = val;
|
||||
|
||||
@@ -2264,7 +2264,7 @@ store_temp_offset(struct device *dev, struct device_attribute *attr,
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
|
||||
val = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
data->temp_offset[nr] = val;
|
||||
|
||||
@@ -1519,7 +1519,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr,
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 127);
|
||||
val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 127000), 1000);
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
data->target_temp[nr] = val;
|
||||
@@ -1545,7 +1545,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
|
||||
return err;
|
||||
|
||||
/* Limit the temp to 0C - 15C */
|
||||
val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15);
|
||||
val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 15000), 1000);
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
|
||||
|
||||
@@ -159,7 +159,7 @@ struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
|
||||
|
||||
ret = dma_get_slave_caps(chan, &caps);
|
||||
if (ret < 0)
|
||||
goto err_free;
|
||||
goto err_release;
|
||||
|
||||
/* Needs to be aligned to the maximum of the minimums */
|
||||
if (caps.src_addr_widths)
|
||||
@@ -184,6 +184,8 @@ struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
|
||||
|
||||
return &dmaengine_buffer->queue.buffer;
|
||||
|
||||
err_release:
|
||||
dma_release_channel(chan);
|
||||
err_free:
|
||||
kfree(dmaengine_buffer);
|
||||
return ERR_PTR(ret);
|
||||
|
||||
@@ -640,17 +640,17 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
|
||||
break;
|
||||
case IIO_VAL_INT_PLUS_MICRO:
|
||||
if (scale_val2 < 0)
|
||||
*processed = -raw64 * scale_val;
|
||||
*processed = -raw64 * scale_val * scale;
|
||||
else
|
||||
*processed = raw64 * scale_val;
|
||||
*processed = raw64 * scale_val * scale;
|
||||
*processed += div_s64(raw64 * (s64)scale_val2 * scale,
|
||||
1000000LL);
|
||||
break;
|
||||
case IIO_VAL_INT_PLUS_NANO:
|
||||
if (scale_val2 < 0)
|
||||
*processed = -raw64 * scale_val;
|
||||
*processed = -raw64 * scale_val * scale;
|
||||
else
|
||||
*processed = raw64 * scale_val;
|
||||
*processed = raw64 * scale_val * scale;
|
||||
*processed += div_s64(raw64 * (s64)scale_val2 * scale,
|
||||
1000000000LL);
|
||||
break;
|
||||
|
||||
@@ -429,6 +429,20 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Limit number of contacts to a reasonable value (100). This
|
||||
* ensures that we need less than 2 pages for struct input_mt
|
||||
* (we are not using in-kernel slot assignment so not going to
|
||||
* allocate memory for the "red" table), and we should have no
|
||||
* trouble getting this much memory.
|
||||
*/
|
||||
if (code == ABS_MT_SLOT && max > 99) {
|
||||
printk(KERN_DEBUG
|
||||
"%s: unreasonably large number of slots requested: %d\n",
|
||||
UINPUT_NAME, max);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -1292,7 +1292,7 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
|
||||
*/
|
||||
writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
|
||||
|
||||
while (qi->desc_status[wait_index] != QI_DONE) {
|
||||
while (READ_ONCE(qi->desc_status[wait_index]) != QI_DONE) {
|
||||
/*
|
||||
* We will leave the interrupts disabled, to prevent interrupt
|
||||
* context to queue another cmd while a cmd is already submitted
|
||||
|
||||
@@ -346,6 +346,10 @@ static struct irq_chip armada_370_xp_irq_chip = {
|
||||
static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
|
||||
unsigned int virq, irq_hw_number_t hw)
|
||||
{
|
||||
/* IRQs 0 and 1 cannot be mapped, they are handled internally */
|
||||
if (hw <= 1)
|
||||
return -EINVAL;
|
||||
|
||||
armada_370_xp_irq_mask(irq_get_irq_data(virq));
|
||||
if (!is_percpu_irq(hw))
|
||||
writel(hw, per_cpu_int_base +
|
||||
|
||||
@@ -431,8 +431,11 @@ static int camss_of_parse_endpoint_node(struct device *dev,
|
||||
struct v4l2_fwnode_bus_mipi_csi2 *mipi_csi2;
|
||||
struct v4l2_fwnode_endpoint vep = { { 0 } };
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &vep);
|
||||
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &vep);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
csd->interface.csiphy_id = vep.base.port;
|
||||
|
||||
|
||||
@@ -860,16 +860,26 @@ static int uvc_parse_streaming(struct uvc_device *dev,
|
||||
goto error;
|
||||
}
|
||||
|
||||
size = nformats * sizeof(*format) + nframes * sizeof(*frame)
|
||||
/*
|
||||
* Allocate memory for the formats, the frames and the intervals,
|
||||
* plus any required padding to guarantee that everything has the
|
||||
* correct alignment.
|
||||
*/
|
||||
size = nformats * sizeof(*format);
|
||||
size = ALIGN(size, __alignof__(*frame)) + nframes * sizeof(*frame);
|
||||
size = ALIGN(size, __alignof__(*interval))
|
||||
+ nintervals * sizeof(*interval);
|
||||
|
||||
format = kzalloc(size, GFP_KERNEL);
|
||||
if (format == NULL) {
|
||||
if (!format) {
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
frame = (struct uvc_frame *)&format[nformats];
|
||||
interval = (u32 *)&frame[nframes];
|
||||
frame = (void *)format + nformats * sizeof(*format);
|
||||
frame = PTR_ALIGN(frame, __alignof__(*frame));
|
||||
interval = (void *)frame + nframes * sizeof(*frame);
|
||||
interval = PTR_ALIGN(interval, __alignof__(*interval));
|
||||
|
||||
streaming->format = format;
|
||||
streaming->nformats = nformats;
|
||||
|
||||
@@ -152,7 +152,8 @@ void vmci_resource_remove(struct vmci_resource *resource)
|
||||
spin_lock(&vmci_resource_table.lock);
|
||||
|
||||
hlist_for_each_entry(r, &vmci_resource_table.entries[idx], node) {
|
||||
if (vmci_handle_is_equal(r->handle, resource->handle)) {
|
||||
if (vmci_handle_is_equal(r->handle, resource->handle) &&
|
||||
resource->type == r->type) {
|
||||
hlist_del_init_rcu(&r->node);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -2857,8 +2857,8 @@ static int dw_mci_init_slot(struct dw_mci *host)
|
||||
if (host->use_dma == TRANS_MODE_IDMAC) {
|
||||
mmc->max_segs = host->ring_size;
|
||||
mmc->max_blk_size = 65535;
|
||||
mmc->max_seg_size = 0x1000;
|
||||
mmc->max_req_size = mmc->max_seg_size * host->ring_size;
|
||||
mmc->max_req_size = DW_MCI_DESC_DATA_LENGTH * host->ring_size;
|
||||
mmc->max_seg_size = mmc->max_req_size;
|
||||
mmc->max_blk_count = mmc->max_req_size / 512;
|
||||
} else if (host->use_dma == TRANS_MODE_EDMAC) {
|
||||
mmc->max_segs = 64;
|
||||
|
||||
@@ -38,7 +38,7 @@
|
||||
#define VSC73XX_BLOCK_ANALYZER 0x2 /* Only subblock 0 */
|
||||
#define VSC73XX_BLOCK_MII 0x3 /* Subblocks 0 and 1 */
|
||||
#define VSC73XX_BLOCK_MEMINIT 0x3 /* Only subblock 2 */
|
||||
#define VSC73XX_BLOCK_CAPTURE 0x4 /* Only subblock 2 */
|
||||
#define VSC73XX_BLOCK_CAPTURE 0x4 /* Subblocks 0-4, 6, 7 */
|
||||
#define VSC73XX_BLOCK_ARBITER 0x5 /* Only subblock 0 */
|
||||
#define VSC73XX_BLOCK_SYSTEM 0x7 /* Only subblock 0 */
|
||||
|
||||
@@ -385,13 +385,19 @@ static int vsc73xx_is_addr_valid(u8 block, u8 subblock)
|
||||
break;
|
||||
|
||||
case VSC73XX_BLOCK_MII:
|
||||
case VSC73XX_BLOCK_CAPTURE:
|
||||
case VSC73XX_BLOCK_ARBITER:
|
||||
switch (subblock) {
|
||||
case 0 ... 1:
|
||||
return 1;
|
||||
}
|
||||
break;
|
||||
case VSC73XX_BLOCK_CAPTURE:
|
||||
switch (subblock) {
|
||||
case 0 ... 4:
|
||||
case 6 ... 7:
|
||||
return 1;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -6554,10 +6554,20 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
|
||||
|
||||
static void igb_tsync_interrupt(struct igb_adapter *adapter)
|
||||
{
|
||||
const u32 mask = (TSINTR_SYS_WRAP | E1000_TSICR_TXTS |
|
||||
TSINTR_TT0 | TSINTR_TT1 |
|
||||
TSINTR_AUTT0 | TSINTR_AUTT1);
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
u32 tsicr = rd32(E1000_TSICR);
|
||||
struct ptp_clock_event event;
|
||||
|
||||
if (hw->mac.type == e1000_82580) {
|
||||
/* 82580 has a hardware bug that requires an explicit
|
||||
* write to clear the TimeSync interrupt cause.
|
||||
*/
|
||||
wr32(E1000_TSICR, tsicr & mask);
|
||||
}
|
||||
|
||||
if (tsicr & TSINTR_SYS_WRAP) {
|
||||
event.type = PTP_CLOCK_PPS;
|
||||
if (adapter->ptp_caps.pps)
|
||||
|
||||
@@ -2072,12 +2072,13 @@ void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
static void
|
||||
mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
|
||||
const char *mac, u16 vid,
|
||||
struct net_device *dev)
|
||||
struct net_device *dev, bool offloaded)
|
||||
{
|
||||
struct switchdev_notifier_fdb_info info;
|
||||
|
||||
info.addr = mac;
|
||||
info.vid = vid;
|
||||
info.offloaded = offloaded;
|
||||
call_switchdev_notifiers(type, dev, &info.info);
|
||||
}
|
||||
|
||||
@@ -2129,7 +2130,7 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
|
||||
if (!do_notification)
|
||||
return;
|
||||
type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
|
||||
mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
|
||||
mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
|
||||
|
||||
return;
|
||||
|
||||
@@ -2189,7 +2190,7 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
|
||||
if (!do_notification)
|
||||
return;
|
||||
type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
|
||||
mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
|
||||
mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
|
||||
|
||||
return;
|
||||
|
||||
@@ -2294,7 +2295,7 @@ static void mlxsw_sp_switchdev_event_work(struct work_struct *work)
|
||||
break;
|
||||
mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
|
||||
fdb_info->addr,
|
||||
fdb_info->vid, dev);
|
||||
fdb_info->vid, dev, true);
|
||||
break;
|
||||
case SWITCHDEV_FDB_DEL_TO_DEVICE:
|
||||
fdb_info = &switchdev_work->fdb_info;
|
||||
|
||||
@@ -2728,6 +2728,7 @@ rocker_fdb_offload_notify(struct rocker_port *rocker_port,
|
||||
|
||||
info.addr = recv_info->addr;
|
||||
info.vid = recv_info->vid;
|
||||
info.offloaded = true;
|
||||
call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
|
||||
rocker_port->dev, &info.info);
|
||||
}
|
||||
|
||||
@@ -338,6 +338,7 @@ static int ch9200_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
{
|
||||
int retval = 0;
|
||||
unsigned char data[2];
|
||||
u8 addr[ETH_ALEN];
|
||||
|
||||
retval = usbnet_get_endpoints(dev, intf);
|
||||
if (retval)
|
||||
@@ -385,7 +386,8 @@ static int ch9200_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_CTRL, data, 0x02,
|
||||
CONTROL_TIMEOUT_MS);
|
||||
|
||||
retval = get_mac_address(dev, dev->net->dev_addr);
|
||||
retval = get_mac_address(dev, addr);
|
||||
eth_hw_addr_set(dev->net, addr);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
@@ -52,6 +52,11 @@ enum cx82310_status {
|
||||
#define CX82310_MTU 1514
|
||||
#define CMD_EP 0x01
|
||||
|
||||
struct cx82310_priv {
|
||||
struct work_struct reenable_work;
|
||||
struct usbnet *dev;
|
||||
};
|
||||
|
||||
/*
|
||||
* execute control command
|
||||
* - optionally send some data (command parameters)
|
||||
@@ -127,6 +132,23 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cx82310_enable_ethernet(struct usbnet *dev)
|
||||
{
|
||||
int ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0);
|
||||
|
||||
if (ret)
|
||||
netdev_err(dev->net, "unable to enable ethernet mode: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void cx82310_reenable_work(struct work_struct *work)
|
||||
{
|
||||
struct cx82310_priv *priv = container_of(work, struct cx82310_priv,
|
||||
reenable_work);
|
||||
cx82310_enable_ethernet(priv->dev);
|
||||
}
|
||||
|
||||
#define partial_len data[0] /* length of partial packet data */
|
||||
#define partial_rem data[1] /* remaining (missing) data length */
|
||||
#define partial_data data[2] /* partial packet data */
|
||||
@@ -138,6 +160,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
struct usb_device *udev = dev->udev;
|
||||
u8 link[3];
|
||||
int timeout = 50;
|
||||
struct cx82310_priv *priv;
|
||||
u8 addr[ETH_ALEN];
|
||||
|
||||
/* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */
|
||||
if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0
|
||||
@@ -164,6 +188,15 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
if (!dev->partial_data)
|
||||
return -ENOMEM;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv) {
|
||||
ret = -ENOMEM;
|
||||
goto err_partial;
|
||||
}
|
||||
dev->driver_priv = priv;
|
||||
INIT_WORK(&priv->reenable_work, cx82310_reenable_work);
|
||||
priv->dev = dev;
|
||||
|
||||
/* wait for firmware to become ready (indicated by the link being up) */
|
||||
while (--timeout) {
|
||||
ret = cx82310_cmd(dev, CMD_GET_LINK_STATUS, true, NULL, 0,
|
||||
@@ -180,20 +213,17 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
}
|
||||
|
||||
/* enable ethernet mode (?) */
|
||||
ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0);
|
||||
if (ret) {
|
||||
dev_err(&udev->dev, "unable to enable ethernet mode: %d\n",
|
||||
ret);
|
||||
ret = cx82310_enable_ethernet(dev);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* get the MAC address */
|
||||
ret = cx82310_cmd(dev, CMD_GET_MAC_ADDR, true, NULL, 0,
|
||||
dev->net->dev_addr, ETH_ALEN);
|
||||
ret = cx82310_cmd(dev, CMD_GET_MAC_ADDR, true, NULL, 0, addr, ETH_ALEN);
|
||||
if (ret) {
|
||||
dev_err(&udev->dev, "unable to read MAC address: %d\n", ret);
|
||||
goto err;
|
||||
}
|
||||
eth_hw_addr_set(dev->net, addr);
|
||||
|
||||
/* start (does not seem to have any effect?) */
|
||||
ret = cx82310_cmd(dev, CMD_START, false, NULL, 0, NULL, 0);
|
||||
@@ -202,13 +232,19 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
|
||||
return 0;
|
||||
err:
|
||||
kfree(dev->driver_priv);
|
||||
err_partial:
|
||||
kfree((void *)dev->partial_data);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void cx82310_unbind(struct usbnet *dev, struct usb_interface *intf)
|
||||
{
|
||||
struct cx82310_priv *priv = dev->driver_priv;
|
||||
|
||||
kfree((void *)dev->partial_data);
|
||||
cancel_work_sync(&priv->reenable_work);
|
||||
kfree(dev->driver_priv);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -223,6 +259,7 @@ static int cx82310_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
||||
{
|
||||
int len;
|
||||
struct sk_buff *skb2;
|
||||
struct cx82310_priv *priv = dev->driver_priv;
|
||||
|
||||
/*
|
||||
* If the last skb ended with an incomplete packet, this skb contains
|
||||
@@ -257,7 +294,10 @@ static int cx82310_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
||||
break;
|
||||
}
|
||||
|
||||
if (len > CX82310_MTU) {
|
||||
if (len == 0xffff) {
|
||||
netdev_info(dev->net, "router was rebooted, re-enabling ethernet mode");
|
||||
schedule_work(&priv->reenable_work);
|
||||
} else if (len > CX82310_MTU) {
|
||||
dev_err(&dev->udev->dev, "RX packet too long: %d B\n",
|
||||
len);
|
||||
return 0;
|
||||
|
||||
@@ -407,8 +407,8 @@ static int ipheth_close(struct net_device *net)
|
||||
{
|
||||
struct ipheth_device *dev = netdev_priv(net);
|
||||
|
||||
cancel_delayed_work_sync(&dev->carrier_work);
|
||||
netif_stop_queue(net);
|
||||
cancel_delayed_work_sync(&dev->carrier_work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -497,7 +497,7 @@ static int ipheth_probe(struct usb_interface *intf,
|
||||
|
||||
netdev->netdev_ops = &ipheth_netdev_ops;
|
||||
netdev->watchdog_timeo = IPHETH_TX_TIMEOUT;
|
||||
strcpy(netdev->name, "eth%d");
|
||||
strscpy(netdev->name, "eth%d", sizeof(netdev->name));
|
||||
|
||||
dev = netdev_priv(netdev);
|
||||
dev->udev = udev;
|
||||
|
||||
@@ -1139,8 +1139,7 @@ static int kaweth_probe(
|
||||
goto err_all_but_rxbuf;
|
||||
|
||||
memcpy(netdev->broadcast, &bcast_addr, sizeof(bcast_addr));
|
||||
memcpy(netdev->dev_addr, &kaweth->configuration.hw_addr,
|
||||
sizeof(kaweth->configuration.hw_addr));
|
||||
eth_hw_addr_set(netdev, (u8 *)&kaweth->configuration.hw_addr);
|
||||
|
||||
netdev->netdev_ops = &kaweth_netdev_ops;
|
||||
netdev->watchdog_timeo = KAWETH_TX_TIMEOUT;
|
||||
|
||||
@@ -493,17 +493,19 @@ static const struct net_device_ops mcs7830_netdev_ops = {
|
||||
static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev)
|
||||
{
|
||||
struct net_device *net = dev->net;
|
||||
u8 addr[ETH_ALEN];
|
||||
int ret;
|
||||
int retry;
|
||||
|
||||
/* Initial startup: Gather MAC address setting from EEPROM */
|
||||
ret = -EINVAL;
|
||||
for (retry = 0; retry < 5 && ret; retry++)
|
||||
ret = mcs7830_hif_get_mac_address(dev, net->dev_addr);
|
||||
ret = mcs7830_hif_get_mac_address(dev, addr);
|
||||
if (ret) {
|
||||
dev_warn(&dev->udev->dev, "Cannot read MAC address\n");
|
||||
goto out;
|
||||
}
|
||||
eth_hw_addr_set(net, addr);
|
||||
|
||||
mcs7830_data_set_multicast(net);
|
||||
|
||||
|
||||
@@ -1390,6 +1390,7 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
|
||||
{QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)}, /* u-blox LARA-L6 */
|
||||
{QMI_QUIRK_SET_DTR(0x33f8, 0x0104, 4)}, /* Rolling RW101 RMNET */
|
||||
{QMI_FIXED_INTF(0x2dee, 0x4d22, 5)}, /* MeiG Smart SRM825L */
|
||||
|
||||
/* 4. Gobi 1000 devices */
|
||||
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
|
||||
|
||||
@@ -686,6 +686,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00};
|
||||
static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = {
|
||||
0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00};
|
||||
u8 mod[2];
|
||||
|
||||
dev_dbg(&dev->udev->dev, "%s", __func__);
|
||||
|
||||
@@ -715,8 +716,9 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
dev->net->netdev_ops = &sierra_net_device_ops;
|
||||
|
||||
/* change MAC addr to include, ifacenum, and to be unique */
|
||||
dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
|
||||
dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
|
||||
mod[0] = atomic_inc_return(&iface_counter);
|
||||
mod[1] = ifacenum;
|
||||
dev_addr_mod(dev->net, ETH_ALEN - 2, mod, 2);
|
||||
|
||||
/* prepare shutdown message template */
|
||||
memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg));
|
||||
|
||||
@@ -326,6 +326,7 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
{
|
||||
struct net_device *netdev;
|
||||
struct mii_if_info *mii;
|
||||
u8 addr[ETH_ALEN];
|
||||
int ret;
|
||||
|
||||
ret = usbnet_get_endpoints(dev, intf);
|
||||
@@ -356,11 +357,12 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
* EEPROM automatically to PAR. In case there is no EEPROM externally,
|
||||
* a default MAC address is stored in PAR for making chip work properly.
|
||||
*/
|
||||
if (sr_read(dev, SR_PAR, ETH_ALEN, netdev->dev_addr) < 0) {
|
||||
if (sr_read(dev, SR_PAR, ETH_ALEN, addr) < 0) {
|
||||
netdev_err(netdev, "Error reading MAC address\n");
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
eth_hw_addr_set(netdev, addr);
|
||||
|
||||
/* power up and reset phy */
|
||||
sr_write_reg(dev, SR_PRR, PRR_PHY_RST);
|
||||
|
||||
@@ -732,6 +732,7 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
struct sr_data *data = (struct sr_data *)&dev->data;
|
||||
u16 led01_mux, led23_mux;
|
||||
int ret, embd_phy;
|
||||
u8 addr[ETH_ALEN];
|
||||
u32 phyid;
|
||||
u16 rx_ctl;
|
||||
|
||||
@@ -757,12 +758,12 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
}
|
||||
|
||||
/* Get the MAC address */
|
||||
ret = sr_read_cmd(dev, SR_CMD_READ_NODE_ID, 0, 0, ETH_ALEN,
|
||||
dev->net->dev_addr);
|
||||
ret = sr_read_cmd(dev, SR_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, addr);
|
||||
if (ret < 0) {
|
||||
netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
eth_hw_addr_set(dev->net, addr);
|
||||
netdev_dbg(dev->net, "mac addr : %pM\n", dev->net->dev_addr);
|
||||
|
||||
/* Initialize MII structure */
|
||||
|
||||
@@ -79,9 +79,6 @@
|
||||
|
||||
/*-------------------------------------------------------------------------*/
|
||||
|
||||
// randomly generated ethernet address
|
||||
static u8 node_id [ETH_ALEN];
|
||||
|
||||
/* use ethtool to change the level for any given device */
|
||||
static int msg_level = -1;
|
||||
module_param (msg_level, int, 0);
|
||||
@@ -163,12 +160,13 @@ EXPORT_SYMBOL_GPL(usbnet_get_endpoints);
|
||||
|
||||
int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
|
||||
{
|
||||
u8 addr[ETH_ALEN];
|
||||
int tmp = -1, ret;
|
||||
unsigned char buf [13];
|
||||
|
||||
ret = usb_string(dev->udev, iMACAddress, buf, sizeof buf);
|
||||
if (ret == 12)
|
||||
tmp = hex2bin(dev->net->dev_addr, buf, 6);
|
||||
tmp = hex2bin(addr, buf, 6);
|
||||
if (tmp < 0) {
|
||||
dev_dbg(&dev->udev->dev,
|
||||
"bad MAC string %d fetch, %d\n", iMACAddress, tmp);
|
||||
@@ -176,6 +174,7 @@ int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
eth_hw_addr_set(dev->net, addr);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
|
||||
@@ -1729,8 +1728,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
|
||||
dev->interrupt_count = 0;
|
||||
|
||||
dev->net = net;
|
||||
strcpy (net->name, "usb%d");
|
||||
memcpy (net->dev_addr, node_id, sizeof node_id);
|
||||
strscpy(net->name, "usb%d", sizeof(net->name));
|
||||
|
||||
/* rx and tx sides can use different message sizes;
|
||||
* bind() should set rx_urb_size in that case.
|
||||
@@ -1756,13 +1754,13 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
|
||||
if ((dev->driver_info->flags & FLAG_ETHER) != 0 &&
|
||||
((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 ||
|
||||
(net->dev_addr [0] & 0x02) == 0))
|
||||
strcpy (net->name, "eth%d");
|
||||
strscpy(net->name, "eth%d", sizeof(net->name));
|
||||
/* WLAN devices should always be named "wlan%d" */
|
||||
if ((dev->driver_info->flags & FLAG_WLAN) != 0)
|
||||
strcpy(net->name, "wlan%d");
|
||||
strscpy(net->name, "wlan%d", sizeof(net->name));
|
||||
/* WWAN devices should always be named "wwan%d" */
|
||||
if ((dev->driver_info->flags & FLAG_WWAN) != 0)
|
||||
strcpy(net->name, "wwan%d");
|
||||
strscpy(net->name, "wwan%d", sizeof(net->name));
|
||||
|
||||
/* devices that cannot do ARP */
|
||||
if ((dev->driver_info->flags & FLAG_NOARP) != 0)
|
||||
@@ -1804,9 +1802,9 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
|
||||
goto out4;
|
||||
}
|
||||
|
||||
/* let userspace know we have a random address */
|
||||
if (ether_addr_equal(net->dev_addr, node_id))
|
||||
net->addr_assign_type = NET_ADDR_RANDOM;
|
||||
/* this flags the device for user space */
|
||||
if (!is_valid_ether_addr(net->dev_addr))
|
||||
eth_hw_addr_random(net);
|
||||
|
||||
if ((dev->driver_info->flags & FLAG_WLAN) != 0)
|
||||
SET_NETDEV_DEVTYPE(net, &wlan_type);
|
||||
@@ -2216,7 +2214,6 @@ static int __init usbnet_init(void)
|
||||
BUILD_BUG_ON(
|
||||
FIELD_SIZEOF(struct sk_buff, cb) < sizeof(struct skb_data));
|
||||
|
||||
eth_random_addr(node_id);
|
||||
return 0;
|
||||
}
|
||||
module_init(usbnet_init);
|
||||
|
||||
@@ -1428,7 +1428,7 @@ static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
|
||||
return false;
|
||||
}
|
||||
|
||||
static void virtnet_poll_cleantx(struct receive_queue *rq)
|
||||
static void virtnet_poll_cleantx(struct receive_queue *rq, int budget)
|
||||
{
|
||||
struct virtnet_info *vi = rq->vq->vdev->priv;
|
||||
unsigned int index = vq2rxq(rq->vq);
|
||||
@@ -1439,7 +1439,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
|
||||
return;
|
||||
|
||||
if (__netif_tx_trylock(txq)) {
|
||||
free_old_xmit_skbs(sq, true);
|
||||
free_old_xmit_skbs(sq, !!budget);
|
||||
__netif_tx_unlock(txq);
|
||||
}
|
||||
|
||||
@@ -1456,7 +1456,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
|
||||
unsigned int received;
|
||||
unsigned int xdp_xmit = 0;
|
||||
|
||||
virtnet_poll_cleantx(rq);
|
||||
virtnet_poll_cleantx(rq, budget);
|
||||
|
||||
received = virtnet_receive(rq, budget, &xdp_xmit);
|
||||
|
||||
@@ -1526,7 +1526,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
|
||||
txq = netdev_get_tx_queue(vi->dev, index);
|
||||
__netif_tx_lock(txq, raw_smp_processor_id());
|
||||
virtqueue_disable_cb(sq->vq);
|
||||
free_old_xmit_skbs(sq, true);
|
||||
free_old_xmit_skbs(sq, !!budget);
|
||||
|
||||
opaque = virtqueue_enable_cb_prepare(sq->vq);
|
||||
|
||||
|
||||
@@ -1091,6 +1091,7 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
|
||||
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
|
||||
ieee80211_hw_set(hw, SIGNAL_DBM);
|
||||
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
|
||||
ieee80211_hw_set(hw, MFP_CAPABLE);
|
||||
|
||||
hw->extra_tx_headroom = brcms_c_get_header_len();
|
||||
hw->queues = N_TX_QUEUES;
|
||||
|
||||
@@ -1306,6 +1306,9 @@ mwifiex_get_priv_by_id(struct mwifiex_adapter *adapter,
|
||||
|
||||
for (i = 0; i < adapter->priv_num; i++) {
|
||||
if (adapter->priv[i]) {
|
||||
if (adapter->priv[i]->bss_mode == NL80211_IFTYPE_UNSPECIFIED)
|
||||
continue;
|
||||
|
||||
if ((adapter->priv[i]->bss_num == bss_num) &&
|
||||
(adapter->priv[i]->bss_type == bss_type))
|
||||
break;
|
||||
|
||||
@@ -668,13 +668,13 @@ void nvmem_device_put(struct nvmem_device *nvmem)
|
||||
EXPORT_SYMBOL_GPL(nvmem_device_put);
|
||||
|
||||
/**
|
||||
* devm_nvmem_device_get() - Get nvmem cell of device form a given id
|
||||
* devm_nvmem_device_get() - Get nvmem device of device form a given id
|
||||
*
|
||||
* @dev: Device that requests the nvmem device.
|
||||
* @id: name id for the requested nvmem device.
|
||||
*
|
||||
* Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
|
||||
* on success. The nvmem_cell will be freed by the automatically once the
|
||||
* Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
|
||||
* on success. The nvmem_device will be freed by the automatically once the
|
||||
* device is freed.
|
||||
*/
|
||||
struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
|
||||
|
||||
@@ -418,7 +418,8 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
|
||||
struct device_node *p;
|
||||
const __be32 *addr;
|
||||
u32 intsize;
|
||||
int i, res;
|
||||
int i, res, addr_len;
|
||||
__be32 addr_buf[3] = { 0 };
|
||||
|
||||
pr_debug("of_irq_parse_one: dev=%pOF, index=%d\n", device, index);
|
||||
|
||||
@@ -427,13 +428,19 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
|
||||
return of_irq_parse_oldworld(device, index, out_irq);
|
||||
|
||||
/* Get the reg property (if any) */
|
||||
addr = of_get_property(device, "reg", NULL);
|
||||
addr = of_get_property(device, "reg", &addr_len);
|
||||
|
||||
/* Prevent out-of-bounds read in case of longer interrupt parent address size */
|
||||
if (addr_len > (3 * sizeof(__be32)))
|
||||
addr_len = 3 * sizeof(__be32);
|
||||
if (addr)
|
||||
memcpy(addr_buf, addr, addr_len);
|
||||
|
||||
/* Try the new-style interrupts-extended first */
|
||||
res = of_parse_phandle_with_args(device, "interrupts-extended",
|
||||
"#interrupt-cells", index, out_irq);
|
||||
if (!res)
|
||||
return of_irq_parse_raw(addr, out_irq);
|
||||
return of_irq_parse_raw(addr_buf, out_irq);
|
||||
|
||||
/* Look for the interrupt parent. */
|
||||
p = of_irq_find_parent(device);
|
||||
@@ -463,7 +470,7 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
|
||||
|
||||
|
||||
/* Check if there are any interrupt-map translations to process */
|
||||
res = of_irq_parse_raw(addr, out_irq);
|
||||
res = of_irq_parse_raw(addr_buf, out_irq);
|
||||
out:
|
||||
of_node_put(p);
|
||||
return res;
|
||||
|
||||
@@ -35,7 +35,6 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
|
||||
bool disable_device)
|
||||
{
|
||||
struct pci_dev *pdev = php_slot->pdev;
|
||||
int irq = php_slot->irq;
|
||||
u16 ctrl;
|
||||
|
||||
if (php_slot->irq > 0) {
|
||||
@@ -54,7 +53,7 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
|
||||
php_slot->wq = NULL;
|
||||
}
|
||||
|
||||
if (disable_device || irq > 0) {
|
||||
if (disable_device) {
|
||||
if (pdev->msix_enabled)
|
||||
pci_disable_msix(pdev);
|
||||
else if (pdev->msi_enabled)
|
||||
|
||||
@@ -4958,10 +4958,12 @@ static void pci_bus_lock(struct pci_bus *bus)
|
||||
{
|
||||
struct pci_dev *dev;
|
||||
|
||||
pci_dev_lock(bus->self);
|
||||
list_for_each_entry(dev, &bus->devices, bus_list) {
|
||||
pci_dev_lock(dev);
|
||||
if (dev->subordinate)
|
||||
pci_bus_lock(dev->subordinate);
|
||||
else
|
||||
pci_dev_lock(dev);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4973,8 +4975,10 @@ static void pci_bus_unlock(struct pci_bus *bus)
|
||||
list_for_each_entry(dev, &bus->devices, bus_list) {
|
||||
if (dev->subordinate)
|
||||
pci_bus_unlock(dev->subordinate);
|
||||
else
|
||||
pci_dev_unlock(dev);
|
||||
}
|
||||
pci_dev_unlock(bus->self);
|
||||
}
|
||||
|
||||
/* Return 1 on successful lock, 0 on contention */
|
||||
@@ -4982,15 +4986,15 @@ static int pci_bus_trylock(struct pci_bus *bus)
|
||||
{
|
||||
struct pci_dev *dev;
|
||||
|
||||
if (!pci_dev_trylock(bus->self))
|
||||
return 0;
|
||||
|
||||
list_for_each_entry(dev, &bus->devices, bus_list) {
|
||||
if (!pci_dev_trylock(dev))
|
||||
goto unlock;
|
||||
if (dev->subordinate) {
|
||||
if (!pci_bus_trylock(dev->subordinate)) {
|
||||
pci_dev_unlock(dev);
|
||||
if (!pci_bus_trylock(dev->subordinate))
|
||||
goto unlock;
|
||||
} else if (!pci_dev_trylock(dev))
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
|
||||
@@ -4998,8 +5002,10 @@ static int pci_bus_trylock(struct pci_bus *bus)
|
||||
list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
|
||||
if (dev->subordinate)
|
||||
pci_bus_unlock(dev->subordinate);
|
||||
else
|
||||
pci_dev_unlock(dev);
|
||||
}
|
||||
pci_dev_unlock(bus->self);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -5031,9 +5037,10 @@ static void pci_slot_lock(struct pci_slot *slot)
|
||||
list_for_each_entry(dev, &slot->bus->devices, bus_list) {
|
||||
if (!dev->slot || dev->slot != slot)
|
||||
continue;
|
||||
pci_dev_lock(dev);
|
||||
if (dev->subordinate)
|
||||
pci_bus_lock(dev->subordinate);
|
||||
else
|
||||
pci_dev_lock(dev);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5059,14 +5066,13 @@ static int pci_slot_trylock(struct pci_slot *slot)
|
||||
list_for_each_entry(dev, &slot->bus->devices, bus_list) {
|
||||
if (!dev->slot || dev->slot != slot)
|
||||
continue;
|
||||
if (!pci_dev_trylock(dev))
|
||||
goto unlock;
|
||||
if (dev->subordinate) {
|
||||
if (!pci_bus_trylock(dev->subordinate)) {
|
||||
pci_dev_unlock(dev);
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
} else if (!pci_dev_trylock(dev))
|
||||
goto unlock;
|
||||
}
|
||||
return 1;
|
||||
|
||||
@@ -5077,6 +5083,7 @@ static int pci_slot_trylock(struct pci_slot *slot)
|
||||
continue;
|
||||
if (dev->subordinate)
|
||||
pci_bus_unlock(dev->subordinate);
|
||||
else
|
||||
pci_dev_unlock(dev);
|
||||
}
|
||||
return 0;
|
||||
|
||||
@@ -636,11 +636,11 @@ static int yenta_search_one_res(struct resource *root, struct resource *res,
|
||||
start = PCIBIOS_MIN_CARDBUS_IO;
|
||||
end = ~0U;
|
||||
} else {
|
||||
unsigned long avail = root->end - root->start;
|
||||
unsigned long avail = resource_size(root);
|
||||
int i;
|
||||
size = BRIDGE_MEM_MAX;
|
||||
if (size > avail/8) {
|
||||
size = (avail+1)/8;
|
||||
if (size > (avail - 1) / 8) {
|
||||
size = avail / 8;
|
||||
/* round size down to next power of 2 */
|
||||
i = 0;
|
||||
while ((size /= 2) != 0)
|
||||
|
||||
@@ -613,7 +613,10 @@ static int __init dell_smbios_init(void)
|
||||
return 0;
|
||||
|
||||
fail_sysfs:
|
||||
free_group(platform_device);
|
||||
if (!wmi)
|
||||
exit_dell_smbios_wmi();
|
||||
if (!smm)
|
||||
exit_dell_smbios_smm();
|
||||
|
||||
fail_create_group:
|
||||
platform_device_del(platform_device);
|
||||
|
||||
@@ -104,10 +104,11 @@ static void hv_uio_channel_cb(void *context)
|
||||
|
||||
/*
|
||||
* Callback from vmbus_event when channel is rescinded.
|
||||
* It is meant for rescind of primary channels only.
|
||||
*/
|
||||
static void hv_uio_rescind(struct vmbus_channel *channel)
|
||||
{
|
||||
struct hv_device *hv_dev = channel->primary_channel->device_obj;
|
||||
struct hv_device *hv_dev = channel->device_obj;
|
||||
struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
|
||||
|
||||
/*
|
||||
@@ -118,6 +119,14 @@ static void hv_uio_rescind(struct vmbus_channel *channel)
|
||||
|
||||
/* Wake up reader */
|
||||
uio_event_notify(&pdata->info);
|
||||
|
||||
/*
|
||||
* With rescind callback registered, rescind path will not unregister the device
|
||||
* from vmbus when the primary channel is rescinded.
|
||||
* Without it, rescind handling is incomplete and next onoffer msg does not come.
|
||||
* Unregister the device from vmbus here.
|
||||
*/
|
||||
vmbus_device_unregister(channel->device_obj);
|
||||
}
|
||||
|
||||
/* Sysfs API to allow mmap of the ring buffers
|
||||
|
||||
@@ -252,24 +252,25 @@ static int st_dwc3_probe(struct platform_device *pdev)
|
||||
if (!child) {
|
||||
dev_err(&pdev->dev, "failed to find dwc3 core node\n");
|
||||
ret = -ENODEV;
|
||||
goto undo_softreset;
|
||||
goto err_node_put;
|
||||
}
|
||||
|
||||
/* Allocate and initialize the core */
|
||||
ret = of_platform_populate(node, NULL, NULL, dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to add dwc3 core\n");
|
||||
goto undo_softreset;
|
||||
goto err_node_put;
|
||||
}
|
||||
|
||||
child_pdev = of_find_device_by_node(child);
|
||||
if (!child_pdev) {
|
||||
dev_err(dev, "failed to find dwc3 core device\n");
|
||||
ret = -ENODEV;
|
||||
goto undo_softreset;
|
||||
goto depopulate;
|
||||
}
|
||||
|
||||
dwc3_data->dr_mode = usb_get_dr_mode(&child_pdev->dev);
|
||||
of_node_put(child);
|
||||
|
||||
/*
|
||||
* Configure the USB port as device or host according to the static
|
||||
@@ -280,6 +281,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
|
||||
ret = st_dwc3_drd_init(dwc3_data);
|
||||
if (ret) {
|
||||
dev_err(dev, "drd initialisation failed\n");
|
||||
of_platform_depopulate(dev);
|
||||
goto undo_softreset;
|
||||
}
|
||||
|
||||
@@ -289,6 +291,10 @@ static int st_dwc3_probe(struct platform_device *pdev)
|
||||
platform_set_drvdata(pdev, dwc3_data);
|
||||
return 0;
|
||||
|
||||
depopulate:
|
||||
of_platform_depopulate(dev);
|
||||
err_node_put:
|
||||
of_node_put(child);
|
||||
undo_softreset:
|
||||
reset_control_assert(dwc3_data->rstc_rst);
|
||||
undo_powerdown:
|
||||
|
||||
@@ -144,53 +144,62 @@ static int tweak_set_configuration_cmd(struct urb *urb)
|
||||
if (err && err != -ENODEV)
|
||||
dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
|
||||
config, err);
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int tweak_reset_device_cmd(struct urb *urb)
|
||||
{
|
||||
struct stub_priv *priv = (struct stub_priv *) urb->context;
|
||||
struct stub_device *sdev = priv->sdev;
|
||||
int err;
|
||||
|
||||
dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
|
||||
|
||||
if (usb_lock_device_for_reset(sdev->udev, NULL) < 0) {
|
||||
err = usb_lock_device_for_reset(sdev->udev, NULL);
|
||||
if (err < 0) {
|
||||
dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
usb_reset_device(sdev->udev);
|
||||
err = usb_reset_device(sdev->udev);
|
||||
usb_unlock_device(sdev->udev);
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* clear_halt, set_interface, and set_configuration require special tricks.
|
||||
* Returns 1 if request was tweaked, 0 otherwise.
|
||||
*/
|
||||
static void tweak_special_requests(struct urb *urb)
|
||||
static int tweak_special_requests(struct urb *urb)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!urb || !urb->setup_packet)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
if (usb_pipetype(urb->pipe) != PIPE_CONTROL)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
if (is_clear_halt_cmd(urb))
|
||||
/* tweak clear_halt */
|
||||
tweak_clear_halt_cmd(urb);
|
||||
err = tweak_clear_halt_cmd(urb);
|
||||
|
||||
else if (is_set_interface_cmd(urb))
|
||||
/* tweak set_interface */
|
||||
tweak_set_interface_cmd(urb);
|
||||
err = tweak_set_interface_cmd(urb);
|
||||
|
||||
else if (is_set_configuration_cmd(urb))
|
||||
/* tweak set_configuration */
|
||||
tweak_set_configuration_cmd(urb);
|
||||
err = tweak_set_configuration_cmd(urb);
|
||||
|
||||
else if (is_reset_device_cmd(urb))
|
||||
tweak_reset_device_cmd(urb);
|
||||
else
|
||||
err = tweak_reset_device_cmd(urb);
|
||||
else {
|
||||
usbip_dbg_stub_rx("no need to tweak\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
return !err;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -468,6 +477,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
|
||||
int support_sg = 1;
|
||||
int np = 0;
|
||||
int ret, i;
|
||||
int is_tweaked;
|
||||
|
||||
if (pipe == -1)
|
||||
return;
|
||||
@@ -580,8 +590,11 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
|
||||
priv->urbs[i]->pipe = pipe;
|
||||
priv->urbs[i]->complete = stub_complete;
|
||||
|
||||
/* no need to submit an intercepted request, but harmless? */
|
||||
tweak_special_requests(priv->urbs[i]);
|
||||
/*
|
||||
* all URBs belong to a single PDU, so a global is_tweaked flag is
|
||||
* enough
|
||||
*/
|
||||
is_tweaked = tweak_special_requests(priv->urbs[i]);
|
||||
|
||||
masking_bogus_flags(priv->urbs[i]);
|
||||
}
|
||||
@@ -594,6 +607,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
|
||||
|
||||
/* urb is now ready to submit */
|
||||
for (i = 0; i < priv->num_urbs; i++) {
|
||||
if (!is_tweaked) {
|
||||
ret = usb_submit_urb(priv->urbs[i], GFP_KERNEL);
|
||||
|
||||
if (ret == 0)
|
||||
@@ -611,6 +625,15 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
|
||||
usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* An identical URB was already submitted in
|
||||
* tweak_special_requests(). Skip submitting this URB to not
|
||||
* duplicate the request.
|
||||
*/
|
||||
priv->urbs[i]->status = 0;
|
||||
stub_complete(priv->urbs[i]);
|
||||
}
|
||||
}
|
||||
|
||||
usbip_dbg_stub_rx("Leave\n");
|
||||
|
||||
@@ -8409,7 +8409,15 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
|
||||
/* We don't care about errors in readahead. */
|
||||
if (ret < 0)
|
||||
continue;
|
||||
BUG_ON(refs == 0);
|
||||
|
||||
/*
|
||||
* This could be racey, it's conceivable that we raced and end
|
||||
* up with a bogus refs count, if that's the case just skip, if
|
||||
* we are actually corrupt we will notice when we look up
|
||||
* everything again with our locks.
|
||||
*/
|
||||
if (refs == 0)
|
||||
continue;
|
||||
|
||||
if (wc->stage == DROP_REFERENCE) {
|
||||
if (refs == 1)
|
||||
@@ -8468,7 +8476,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
|
||||
if (lookup_info &&
|
||||
((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
|
||||
(wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
|
||||
BUG_ON(!path->locks[level]);
|
||||
ASSERT(path->locks[level]);
|
||||
ret = btrfs_lookup_extent_info(trans, fs_info,
|
||||
eb->start, level, 1,
|
||||
&wc->refs[level],
|
||||
@@ -8476,7 +8484,11 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
|
||||
BUG_ON(ret == -ENOMEM);
|
||||
if (ret)
|
||||
return ret;
|
||||
BUG_ON(wc->refs[level] == 0);
|
||||
if (unlikely(wc->refs[level] == 0)) {
|
||||
btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
|
||||
eb->start);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
}
|
||||
|
||||
if (wc->stage == DROP_REFERENCE) {
|
||||
@@ -8492,7 +8504,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
|
||||
|
||||
/* wc->stage == UPDATE_BACKREF */
|
||||
if (!(wc->flags[level] & flag)) {
|
||||
BUG_ON(!path->locks[level]);
|
||||
ASSERT(path->locks[level]);
|
||||
ret = btrfs_inc_ref(trans, root, eb, 1);
|
||||
BUG_ON(ret); /* -ENOMEM */
|
||||
ret = btrfs_dec_ref(trans, root, eb, 0);
|
||||
@@ -8584,8 +8596,9 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
|
||||
goto out_unlock;
|
||||
|
||||
if (unlikely(wc->refs[level - 1] == 0)) {
|
||||
btrfs_err(fs_info, "Missing references.");
|
||||
ret = -EIO;
|
||||
btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
|
||||
bytenr);
|
||||
ret = -EUCLEAN;
|
||||
goto out_unlock;
|
||||
}
|
||||
*lookup_info = 0;
|
||||
@@ -8753,7 +8766,12 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
|
||||
path->locks[level] = 0;
|
||||
return ret;
|
||||
}
|
||||
BUG_ON(wc->refs[level] == 0);
|
||||
if (unlikely(wc->refs[level] == 0)) {
|
||||
btrfs_tree_unlock_rw(eb, path->locks[level]);
|
||||
btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
|
||||
eb->start);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
if (wc->refs[level] == 1) {
|
||||
btrfs_tree_unlock_rw(eb, path->locks[level]);
|
||||
path->locks[level] = 0;
|
||||
|
||||
@@ -5869,7 +5869,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
|
||||
struct inode *inode;
|
||||
struct btrfs_root *root = BTRFS_I(dir)->root;
|
||||
struct btrfs_root *sub_root = root;
|
||||
struct btrfs_key location;
|
||||
struct btrfs_key location = { 0 };
|
||||
u8 di_type = 0;
|
||||
int index;
|
||||
int ret = 0;
|
||||
|
||||
@@ -1451,8 +1451,11 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
|
||||
alloc:
|
||||
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
|
||||
old_blkaddr = dn->data_blkaddr;
|
||||
f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
|
||||
&sum, seg_type, NULL, false);
|
||||
err = f2fs_allocate_data_block(sbi, NULL, old_blkaddr,
|
||||
&dn->data_blkaddr, &sum, seg_type, NULL, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
|
||||
invalidate_mapping_pages(META_MAPPING(sbi),
|
||||
old_blkaddr, old_blkaddr);
|
||||
|
||||
@@ -3356,7 +3356,7 @@ void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
|
||||
block_t old_addr, block_t new_addr,
|
||||
unsigned char version, bool recover_curseg,
|
||||
bool recover_newaddr);
|
||||
void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
|
||||
int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
|
||||
block_t old_blkaddr, block_t *new_blkaddr,
|
||||
struct f2fs_summary *sum, int type,
|
||||
struct f2fs_io_info *fio, bool add_list);
|
||||
|
||||
@@ -2279,8 +2279,11 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
|
||||
case F2FS_GOING_DOWN_METASYNC:
|
||||
/* do checkpoint only */
|
||||
ret = f2fs_sync_fs(sb, 1);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
if (ret == -EIO)
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
f2fs_stop_checkpoint(sbi, false);
|
||||
set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
|
||||
break;
|
||||
@@ -2299,6 +2302,8 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
|
||||
set_sbi_flag(sbi, SBI_IS_DIRTY);
|
||||
/* do checkpoint only */
|
||||
ret = f2fs_sync_fs(sb, 1);
|
||||
if (ret == -EIO)
|
||||
ret = 0;
|
||||
goto out;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
|
||||
@@ -863,8 +863,14 @@ static int move_data_block(struct inode *inode, block_t bidx,
|
||||
}
|
||||
}
|
||||
|
||||
f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
|
||||
/* allocate block address */
|
||||
err = f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
|
||||
&sum, CURSEG_COLD_DATA, NULL, false);
|
||||
if (err) {
|
||||
f2fs_put_page(mpage, 1);
|
||||
/* filesystem should shutdown, no need to recovery block */
|
||||
goto up_out;
|
||||
}
|
||||
|
||||
fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
|
||||
newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
|
||||
|
||||
@@ -492,6 +492,9 @@ int f2fs_commit_inmem_pages(struct inode *inode)
|
||||
*/
|
||||
void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
|
||||
{
|
||||
if (f2fs_cp_error(sbi))
|
||||
return;
|
||||
|
||||
if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
|
||||
f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
|
||||
f2fs_stop_checkpoint(sbi, false);
|
||||
@@ -2390,7 +2393,7 @@ static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
|
||||
* Find a new segment from the free segments bitmap to right order
|
||||
* This function should be returned with success, otherwise BUG
|
||||
*/
|
||||
static void get_new_segment(struct f2fs_sb_info *sbi,
|
||||
static int get_new_segment(struct f2fs_sb_info *sbi,
|
||||
unsigned int *newseg, bool new_sec, int dir)
|
||||
{
|
||||
struct free_segmap_info *free_i = FREE_I(sbi);
|
||||
@@ -2402,6 +2405,7 @@ static void get_new_segment(struct f2fs_sb_info *sbi,
|
||||
bool init = true;
|
||||
int go_left = 0;
|
||||
int i;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&free_i->segmap_lock);
|
||||
|
||||
@@ -2417,7 +2421,10 @@ static void get_new_segment(struct f2fs_sb_info *sbi,
|
||||
if (dir == ALLOC_RIGHT) {
|
||||
secno = find_next_zero_bit(free_i->free_secmap,
|
||||
MAIN_SECS(sbi), 0);
|
||||
f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
|
||||
if (secno >= MAIN_SECS(sbi)) {
|
||||
ret = -ENOSPC;
|
||||
goto out_unlock;
|
||||
}
|
||||
} else {
|
||||
go_left = 1;
|
||||
left_start = hint - 1;
|
||||
@@ -2433,7 +2440,10 @@ static void get_new_segment(struct f2fs_sb_info *sbi,
|
||||
}
|
||||
left_start = find_next_zero_bit(free_i->free_secmap,
|
||||
MAIN_SECS(sbi), 0);
|
||||
f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
|
||||
if (left_start >= MAIN_SECS(sbi)) {
|
||||
ret = -ENOSPC;
|
||||
goto out_unlock;
|
||||
}
|
||||
break;
|
||||
}
|
||||
secno = left_start;
|
||||
@@ -2474,7 +2484,14 @@ static void get_new_segment(struct f2fs_sb_info *sbi,
|
||||
f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
|
||||
__set_inuse(sbi, segno);
|
||||
*newseg = segno;
|
||||
out_unlock:
|
||||
spin_unlock(&free_i->segmap_lock);
|
||||
|
||||
if (ret) {
|
||||
f2fs_stop_checkpoint(sbi, false);
|
||||
f2fs_bug_on(sbi, 1);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
|
||||
@@ -2482,6 +2499,10 @@ static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
|
||||
struct curseg_info *curseg = CURSEG_I(sbi, type);
|
||||
struct summary_footer *sum_footer;
|
||||
|
||||
/* only happen when get_new_segment() fails */
|
||||
if (curseg->next_segno == NULL_SEGNO)
|
||||
return;
|
||||
|
||||
curseg->segno = curseg->next_segno;
|
||||
curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
|
||||
curseg->next_blkoff = 0;
|
||||
@@ -2538,7 +2559,11 @@ static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
|
||||
dir = ALLOC_RIGHT;
|
||||
|
||||
segno = __get_next_segno(sbi, type);
|
||||
get_new_segment(sbi, &segno, new_sec, dir);
|
||||
if (get_new_segment(sbi, &segno, new_sec, dir)) {
|
||||
curseg->segno = NULL_SEGNO;
|
||||
return;
|
||||
}
|
||||
|
||||
curseg->next_segno = segno;
|
||||
reset_curseg(sbi, type, 1);
|
||||
curseg->alloc_type = LFS;
|
||||
@@ -3098,7 +3123,7 @@ static int __get_segment_type(struct f2fs_io_info *fio)
|
||||
return type;
|
||||
}
|
||||
|
||||
void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
|
||||
int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
|
||||
block_t old_blkaddr, block_t *new_blkaddr,
|
||||
struct f2fs_summary *sum, int type,
|
||||
struct f2fs_io_info *fio, bool add_list)
|
||||
@@ -3132,6 +3157,9 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
|
||||
mutex_lock(&curseg->curseg_mutex);
|
||||
down_write(&sit_i->sentry_lock);
|
||||
|
||||
if (curseg->segno == NULL_SEGNO)
|
||||
goto out_err;
|
||||
|
||||
*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
|
||||
|
||||
f2fs_wait_discard_bio(sbi, *new_blkaddr);
|
||||
@@ -3188,7 +3216,6 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
|
||||
}
|
||||
|
||||
mutex_unlock(&curseg->curseg_mutex);
|
||||
|
||||
up_read(&SM_I(sbi)->curseg_lock);
|
||||
|
||||
if (IS_DATASEG(type))
|
||||
@@ -3196,6 +3223,19 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
|
||||
|
||||
if (put_pin_sem)
|
||||
up_read(&sbi->pin_sem);
|
||||
return 0;
|
||||
out_err:
|
||||
*new_blkaddr = NULL_ADDR;
|
||||
|
||||
up_write(&sit_i->sentry_lock);
|
||||
mutex_unlock(&curseg->curseg_mutex);
|
||||
up_read(&SM_I(sbi)->curseg_lock);
|
||||
if (IS_DATASEG(type))
|
||||
up_write(&sbi->node_write);
|
||||
|
||||
if (put_pin_sem)
|
||||
up_read(&sbi->pin_sem);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
static void update_device_state(struct f2fs_io_info *fio)
|
||||
@@ -3226,9 +3266,19 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
|
||||
|
||||
if (keep_order)
|
||||
down_read(&fio->sbi->io_order_lock);
|
||||
|
||||
reallocate:
|
||||
f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
|
||||
&fio->new_blkaddr, sum, type, fio, true);
|
||||
if (f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
|
||||
&fio->new_blkaddr, sum, type, fio, true)) {
|
||||
if (fscrypt_inode_uses_fs_layer_crypto(fio->page->mapping->host))
|
||||
fscrypt_finalize_bounce_page(&fio->encrypted_page);
|
||||
if (PageWriteback(fio->page))
|
||||
end_page_writeback(fio->page);
|
||||
if (f2fs_in_warm_node_list(fio->sbi, fio->page))
|
||||
f2fs_del_fsync_node_entry(fio->sbi, fio->page);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
|
||||
invalidate_mapping_pages(META_MAPPING(fio->sbi),
|
||||
fio->old_blkaddr, fio->old_blkaddr);
|
||||
@@ -3241,7 +3291,7 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
|
||||
}
|
||||
|
||||
update_device_state(fio);
|
||||
|
||||
out:
|
||||
if (keep_order)
|
||||
up_read(&fio->sbi->io_order_lock);
|
||||
}
|
||||
|
||||
@@ -79,7 +79,7 @@ ssize_t fuse_getxattr(struct inode *inode, const char *name, void *value,
|
||||
}
|
||||
ret = fuse_simple_request(fc, &args);
|
||||
if (!ret && !size)
|
||||
ret = min_t(ssize_t, outarg.size, XATTR_SIZE_MAX);
|
||||
ret = min_t(size_t, outarg.size, XATTR_SIZE_MAX);
|
||||
if (ret == -ENOSYS) {
|
||||
fc->no_getxattr = 1;
|
||||
ret = -EOPNOTSUPP;
|
||||
@@ -141,7 +141,7 @@ ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
|
||||
}
|
||||
ret = fuse_simple_request(fc, &args);
|
||||
if (!ret && !size)
|
||||
ret = min_t(ssize_t, outarg.size, XATTR_LIST_MAX);
|
||||
ret = min_t(size_t, outarg.size, XATTR_LIST_MAX);
|
||||
if (ret > 0 && size)
|
||||
ret = fuse_verify_xattr_list(list, ret);
|
||||
if (ret == -ENOSYS) {
|
||||
|
||||
@@ -708,6 +708,33 @@ static void nilfs_finish_roll_forward(struct the_nilfs *nilfs,
|
||||
brelse(bh);
|
||||
}
|
||||
|
||||
/**
|
||||
* nilfs_abort_roll_forward - cleaning up after a failed rollforward recovery
|
||||
* @nilfs: nilfs object
|
||||
*/
|
||||
static void nilfs_abort_roll_forward(struct the_nilfs *nilfs)
|
||||
{
|
||||
struct nilfs_inode_info *ii, *n;
|
||||
LIST_HEAD(head);
|
||||
|
||||
/* Abandon inodes that have read recovery data */
|
||||
spin_lock(&nilfs->ns_inode_lock);
|
||||
list_splice_init(&nilfs->ns_dirty_files, &head);
|
||||
spin_unlock(&nilfs->ns_inode_lock);
|
||||
if (list_empty(&head))
|
||||
return;
|
||||
|
||||
set_nilfs_purging(nilfs);
|
||||
list_for_each_entry_safe(ii, n, &head, i_dirty) {
|
||||
spin_lock(&nilfs->ns_inode_lock);
|
||||
list_del_init(&ii->i_dirty);
|
||||
spin_unlock(&nilfs->ns_inode_lock);
|
||||
|
||||
iput(&ii->vfs_inode);
|
||||
}
|
||||
clear_nilfs_purging(nilfs);
|
||||
}
|
||||
|
||||
/**
|
||||
* nilfs_salvage_orphan_logs - salvage logs written after the latest checkpoint
|
||||
* @nilfs: nilfs object
|
||||
@@ -766,15 +793,19 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
|
||||
if (unlikely(err)) {
|
||||
nilfs_err(sb, "error %d writing segment for recovery",
|
||||
err);
|
||||
goto failed;
|
||||
goto put_root;
|
||||
}
|
||||
|
||||
nilfs_finish_roll_forward(nilfs, ri);
|
||||
}
|
||||
|
||||
failed:
|
||||
put_root:
|
||||
nilfs_put_root(root);
|
||||
return err;
|
||||
|
||||
failed:
|
||||
nilfs_abort_roll_forward(nilfs);
|
||||
goto put_root;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -1828,6 +1828,9 @@ static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
|
||||
nilfs_abort_logs(&logs, ret ? : err);
|
||||
|
||||
list_splice_tail_init(&sci->sc_segbufs, &logs);
|
||||
if (list_empty(&logs))
|
||||
return; /* if the first segment buffer preparation failed */
|
||||
|
||||
nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
|
||||
nilfs_free_incomplete_logs(&logs, nilfs);
|
||||
|
||||
@@ -2072,7 +2075,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
|
||||
|
||||
err = nilfs_segctor_begin_construction(sci, nilfs);
|
||||
if (unlikely(err))
|
||||
goto out;
|
||||
goto failed;
|
||||
|
||||
/* Update time stamp */
|
||||
sci->sc_seg_ctime = ktime_get_real_seconds();
|
||||
@@ -2135,10 +2138,9 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
|
||||
return err;
|
||||
|
||||
failed_to_write:
|
||||
if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
|
||||
nilfs_redirty_inodes(&sci->sc_dirty_files);
|
||||
|
||||
failed:
|
||||
if (mode == SC_LSEG_SR && nilfs_sc_cstage_get(sci) >= NILFS_ST_IFILE)
|
||||
nilfs_redirty_inodes(&sci->sc_dirty_files);
|
||||
if (nilfs_doing_gc())
|
||||
nilfs_redirty_inodes(&sci->sc_gc_inodes);
|
||||
nilfs_segctor_abort_construction(sci, nilfs, err);
|
||||
|
||||
@@ -108,7 +108,7 @@ static ssize_t
|
||||
nilfs_snapshot_inodes_count_show(struct nilfs_snapshot_attr *attr,
|
||||
struct nilfs_root *root, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n",
|
||||
return sysfs_emit(buf, "%llu\n",
|
||||
(unsigned long long)atomic64_read(&root->inodes_count));
|
||||
}
|
||||
|
||||
@@ -116,7 +116,7 @@ static ssize_t
|
||||
nilfs_snapshot_blocks_count_show(struct nilfs_snapshot_attr *attr,
|
||||
struct nilfs_root *root, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n",
|
||||
return sysfs_emit(buf, "%llu\n",
|
||||
(unsigned long long)atomic64_read(&root->blocks_count));
|
||||
}
|
||||
|
||||
@@ -129,7 +129,7 @@ static ssize_t
|
||||
nilfs_snapshot_README_show(struct nilfs_snapshot_attr *attr,
|
||||
struct nilfs_root *root, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, snapshot_readme_str);
|
||||
return sysfs_emit(buf, snapshot_readme_str);
|
||||
}
|
||||
|
||||
NILFS_SNAPSHOT_RO_ATTR(inodes_count);
|
||||
@@ -230,7 +230,7 @@ static ssize_t
|
||||
nilfs_mounted_snapshots_README_show(struct nilfs_mounted_snapshots_attr *attr,
|
||||
struct the_nilfs *nilfs, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, mounted_snapshots_readme_str);
|
||||
return sysfs_emit(buf, mounted_snapshots_readme_str);
|
||||
}
|
||||
|
||||
NILFS_MOUNTED_SNAPSHOTS_RO_ATTR(README);
|
||||
@@ -268,7 +268,7 @@ nilfs_checkpoints_checkpoints_number_show(struct nilfs_checkpoints_attr *attr,
|
||||
|
||||
ncheckpoints = cpstat.cs_ncps;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", ncheckpoints);
|
||||
return sysfs_emit(buf, "%llu\n", ncheckpoints);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@@ -291,7 +291,7 @@ nilfs_checkpoints_snapshots_number_show(struct nilfs_checkpoints_attr *attr,
|
||||
|
||||
nsnapshots = cpstat.cs_nsss;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", nsnapshots);
|
||||
return sysfs_emit(buf, "%llu\n", nsnapshots);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@@ -305,7 +305,7 @@ nilfs_checkpoints_last_seg_checkpoint_show(struct nilfs_checkpoints_attr *attr,
|
||||
last_cno = nilfs->ns_last_cno;
|
||||
spin_unlock(&nilfs->ns_last_segment_lock);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", last_cno);
|
||||
return sysfs_emit(buf, "%llu\n", last_cno);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@@ -319,7 +319,7 @@ nilfs_checkpoints_next_checkpoint_show(struct nilfs_checkpoints_attr *attr,
|
||||
cno = nilfs->ns_cno;
|
||||
up_read(&nilfs->ns_segctor_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", cno);
|
||||
return sysfs_emit(buf, "%llu\n", cno);
|
||||
}
|
||||
|
||||
static const char checkpoints_readme_str[] =
|
||||
@@ -335,7 +335,7 @@ static ssize_t
|
||||
nilfs_checkpoints_README_show(struct nilfs_checkpoints_attr *attr,
|
||||
struct the_nilfs *nilfs, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, checkpoints_readme_str);
|
||||
return sysfs_emit(buf, checkpoints_readme_str);
|
||||
}
|
||||
|
||||
NILFS_CHECKPOINTS_RO_ATTR(checkpoints_number);
|
||||
@@ -366,7 +366,7 @@ nilfs_segments_segments_number_show(struct nilfs_segments_attr *attr,
|
||||
struct the_nilfs *nilfs,
|
||||
char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%lu\n", nilfs->ns_nsegments);
|
||||
return sysfs_emit(buf, "%lu\n", nilfs->ns_nsegments);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@@ -374,7 +374,7 @@ nilfs_segments_blocks_per_segment_show(struct nilfs_segments_attr *attr,
|
||||
struct the_nilfs *nilfs,
|
||||
char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%lu\n", nilfs->ns_blocks_per_segment);
|
||||
return sysfs_emit(buf, "%lu\n", nilfs->ns_blocks_per_segment);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@@ -388,7 +388,7 @@ nilfs_segments_clean_segments_show(struct nilfs_segments_attr *attr,
|
||||
ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile);
|
||||
up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%lu\n", ncleansegs);
|
||||
return sysfs_emit(buf, "%lu\n", ncleansegs);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@@ -408,7 +408,7 @@ nilfs_segments_dirty_segments_show(struct nilfs_segments_attr *attr,
|
||||
return err;
|
||||
}
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", sustat.ss_ndirtysegs);
|
||||
return sysfs_emit(buf, "%llu\n", sustat.ss_ndirtysegs);
|
||||
}
|
||||
|
||||
static const char segments_readme_str[] =
|
||||
@@ -424,7 +424,7 @@ nilfs_segments_README_show(struct nilfs_segments_attr *attr,
|
||||
struct the_nilfs *nilfs,
|
||||
char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, segments_readme_str);
|
||||
return sysfs_emit(buf, segments_readme_str);
|
||||
}
|
||||
|
||||
NILFS_SEGMENTS_RO_ATTR(segments_number);
|
||||
@@ -461,7 +461,7 @@ nilfs_segctor_last_pseg_block_show(struct nilfs_segctor_attr *attr,
|
||||
last_pseg = nilfs->ns_last_pseg;
|
||||
spin_unlock(&nilfs->ns_last_segment_lock);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n",
|
||||
return sysfs_emit(buf, "%llu\n",
|
||||
(unsigned long long)last_pseg);
|
||||
}
|
||||
|
||||
@@ -476,7 +476,7 @@ nilfs_segctor_last_seg_sequence_show(struct nilfs_segctor_attr *attr,
|
||||
last_seq = nilfs->ns_last_seq;
|
||||
spin_unlock(&nilfs->ns_last_segment_lock);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", last_seq);
|
||||
return sysfs_emit(buf, "%llu\n", last_seq);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@@ -490,7 +490,7 @@ nilfs_segctor_last_seg_checkpoint_show(struct nilfs_segctor_attr *attr,
|
||||
last_cno = nilfs->ns_last_cno;
|
||||
spin_unlock(&nilfs->ns_last_segment_lock);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", last_cno);
|
||||
return sysfs_emit(buf, "%llu\n", last_cno);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@@ -504,7 +504,7 @@ nilfs_segctor_current_seg_sequence_show(struct nilfs_segctor_attr *attr,
|
||||
seg_seq = nilfs->ns_seg_seq;
|
||||
up_read(&nilfs->ns_segctor_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", seg_seq);
|
||||
return sysfs_emit(buf, "%llu\n", seg_seq);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@@ -518,7 +518,7 @@ nilfs_segctor_current_last_full_seg_show(struct nilfs_segctor_attr *attr,
|
||||
segnum = nilfs->ns_segnum;
|
||||
up_read(&nilfs->ns_segctor_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", segnum);
|
||||
return sysfs_emit(buf, "%llu\n", segnum);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@@ -532,7 +532,7 @@ nilfs_segctor_next_full_seg_show(struct nilfs_segctor_attr *attr,
|
||||
nextnum = nilfs->ns_nextnum;
|
||||
up_read(&nilfs->ns_segctor_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", nextnum);
|
||||
return sysfs_emit(buf, "%llu\n", nextnum);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@@ -546,7 +546,7 @@ nilfs_segctor_next_pseg_offset_show(struct nilfs_segctor_attr *attr,
|
||||
pseg_offset = nilfs->ns_pseg_offset;
|
||||
up_read(&nilfs->ns_segctor_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%lu\n", pseg_offset);
|
||||
return sysfs_emit(buf, "%lu\n", pseg_offset);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@@ -560,7 +560,7 @@ nilfs_segctor_next_checkpoint_show(struct nilfs_segctor_attr *attr,
|
||||
cno = nilfs->ns_cno;
|
||||
up_read(&nilfs->ns_segctor_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", cno);
|
||||
return sysfs_emit(buf, "%llu\n", cno);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@@ -588,7 +588,7 @@ nilfs_segctor_last_seg_write_time_secs_show(struct nilfs_segctor_attr *attr,
|
||||
ctime = nilfs->ns_ctime;
|
||||
up_read(&nilfs->ns_segctor_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", ctime);
|
||||
return sysfs_emit(buf, "%llu\n", ctime);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@@ -616,7 +616,7 @@ nilfs_segctor_last_nongc_write_time_secs_show(struct nilfs_segctor_attr *attr,
|
||||
nongc_ctime = nilfs->ns_nongc_ctime;
|
||||
up_read(&nilfs->ns_segctor_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", nongc_ctime);
|
||||
return sysfs_emit(buf, "%llu\n", nongc_ctime);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@@ -630,7 +630,7 @@ nilfs_segctor_dirty_data_blocks_count_show(struct nilfs_segctor_attr *attr,
|
||||
ndirtyblks = atomic_read(&nilfs->ns_ndirtyblks);
|
||||
up_read(&nilfs->ns_segctor_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", ndirtyblks);
|
||||
return sysfs_emit(buf, "%u\n", ndirtyblks);
|
||||
}
|
||||
|
||||
static const char segctor_readme_str[] =
|
||||
@@ -667,7 +667,7 @@ static ssize_t
|
||||
nilfs_segctor_README_show(struct nilfs_segctor_attr *attr,
|
||||
struct the_nilfs *nilfs, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, segctor_readme_str);
|
||||
return sysfs_emit(buf, segctor_readme_str);
|
||||
}
|
||||
|
||||
NILFS_SEGCTOR_RO_ATTR(last_pseg_block);
|
||||
@@ -736,7 +736,7 @@ nilfs_superblock_sb_write_time_secs_show(struct nilfs_superblock_attr *attr,
|
||||
sbwtime = nilfs->ns_sbwtime;
|
||||
up_read(&nilfs->ns_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", sbwtime);
|
||||
return sysfs_emit(buf, "%llu\n", sbwtime);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@@ -750,7 +750,7 @@ nilfs_superblock_sb_write_count_show(struct nilfs_superblock_attr *attr,
|
||||
sbwcount = nilfs->ns_sbwcount;
|
||||
up_read(&nilfs->ns_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", sbwcount);
|
||||
return sysfs_emit(buf, "%u\n", sbwcount);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@@ -764,7 +764,7 @@ nilfs_superblock_sb_update_frequency_show(struct nilfs_superblock_attr *attr,
|
||||
sb_update_freq = nilfs->ns_sb_update_freq;
|
||||
up_read(&nilfs->ns_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", sb_update_freq);
|
||||
return sysfs_emit(buf, "%u\n", sb_update_freq);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@@ -812,7 +812,7 @@ static ssize_t
|
||||
nilfs_superblock_README_show(struct nilfs_superblock_attr *attr,
|
||||
struct the_nilfs *nilfs, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, sb_readme_str);
|
||||
return sysfs_emit(buf, sb_readme_str);
|
||||
}
|
||||
|
||||
NILFS_SUPERBLOCK_RO_ATTR(sb_write_time);
|
||||
@@ -843,11 +843,17 @@ ssize_t nilfs_dev_revision_show(struct nilfs_dev_attr *attr,
|
||||
struct the_nilfs *nilfs,
|
||||
char *buf)
|
||||
{
|
||||
struct nilfs_super_block **sbp = nilfs->ns_sbp;
|
||||
u32 major = le32_to_cpu(sbp[0]->s_rev_level);
|
||||
u16 minor = le16_to_cpu(sbp[0]->s_minor_rev_level);
|
||||
struct nilfs_super_block *raw_sb;
|
||||
u32 major;
|
||||
u16 minor;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d.%d\n", major, minor);
|
||||
down_read(&nilfs->ns_sem);
|
||||
raw_sb = nilfs->ns_sbp[0];
|
||||
major = le32_to_cpu(raw_sb->s_rev_level);
|
||||
minor = le16_to_cpu(raw_sb->s_minor_rev_level);
|
||||
up_read(&nilfs->ns_sem);
|
||||
|
||||
return sysfs_emit(buf, "%d.%d\n", major, minor);
|
||||
}
|
||||
|
||||
static
|
||||
@@ -855,7 +861,7 @@ ssize_t nilfs_dev_blocksize_show(struct nilfs_dev_attr *attr,
|
||||
struct the_nilfs *nilfs,
|
||||
char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", nilfs->ns_blocksize);
|
||||
return sysfs_emit(buf, "%u\n", nilfs->ns_blocksize);
|
||||
}
|
||||
|
||||
static
|
||||
@@ -863,10 +869,15 @@ ssize_t nilfs_dev_device_size_show(struct nilfs_dev_attr *attr,
|
||||
struct the_nilfs *nilfs,
|
||||
char *buf)
|
||||
{
|
||||
struct nilfs_super_block **sbp = nilfs->ns_sbp;
|
||||
u64 dev_size = le64_to_cpu(sbp[0]->s_dev_size);
|
||||
struct nilfs_super_block *raw_sb;
|
||||
u64 dev_size;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", dev_size);
|
||||
down_read(&nilfs->ns_sem);
|
||||
raw_sb = nilfs->ns_sbp[0];
|
||||
dev_size = le64_to_cpu(raw_sb->s_dev_size);
|
||||
up_read(&nilfs->ns_sem);
|
||||
|
||||
return sysfs_emit(buf, "%llu\n", dev_size);
|
||||
}
|
||||
|
||||
static
|
||||
@@ -877,7 +888,7 @@ ssize_t nilfs_dev_free_blocks_show(struct nilfs_dev_attr *attr,
|
||||
sector_t free_blocks = 0;
|
||||
|
||||
nilfs_count_free_blocks(nilfs, &free_blocks);
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n",
|
||||
return sysfs_emit(buf, "%llu\n",
|
||||
(unsigned long long)free_blocks);
|
||||
}
|
||||
|
||||
@@ -886,9 +897,15 @@ ssize_t nilfs_dev_uuid_show(struct nilfs_dev_attr *attr,
|
||||
struct the_nilfs *nilfs,
|
||||
char *buf)
|
||||
{
|
||||
struct nilfs_super_block **sbp = nilfs->ns_sbp;
|
||||
struct nilfs_super_block *raw_sb;
|
||||
ssize_t len;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%pUb\n", sbp[0]->s_uuid);
|
||||
down_read(&nilfs->ns_sem);
|
||||
raw_sb = nilfs->ns_sbp[0];
|
||||
len = sysfs_emit(buf, "%pUb\n", raw_sb->s_uuid);
|
||||
up_read(&nilfs->ns_sem);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static
|
||||
@@ -896,10 +913,16 @@ ssize_t nilfs_dev_volume_name_show(struct nilfs_dev_attr *attr,
|
||||
struct the_nilfs *nilfs,
|
||||
char *buf)
|
||||
{
|
||||
struct nilfs_super_block **sbp = nilfs->ns_sbp;
|
||||
struct nilfs_super_block *raw_sb;
|
||||
ssize_t len;
|
||||
|
||||
return scnprintf(buf, sizeof(sbp[0]->s_volume_name), "%s\n",
|
||||
sbp[0]->s_volume_name);
|
||||
down_read(&nilfs->ns_sem);
|
||||
raw_sb = nilfs->ns_sbp[0];
|
||||
len = scnprintf(buf, sizeof(raw_sb->s_volume_name), "%s\n",
|
||||
raw_sb->s_volume_name);
|
||||
up_read(&nilfs->ns_sem);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static const char dev_readme_str[] =
|
||||
@@ -916,7 +939,7 @@ static ssize_t nilfs_dev_README_show(struct nilfs_dev_attr *attr,
|
||||
struct the_nilfs *nilfs,
|
||||
char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, dev_readme_str);
|
||||
return sysfs_emit(buf, dev_readme_str);
|
||||
}
|
||||
|
||||
NILFS_DEV_RO_ATTR(revision);
|
||||
@@ -1060,7 +1083,7 @@ void nilfs_sysfs_delete_device_group(struct the_nilfs *nilfs)
|
||||
static ssize_t nilfs_feature_revision_show(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%d.%d\n",
|
||||
return sysfs_emit(buf, "%d.%d\n",
|
||||
NILFS_CURRENT_REV, NILFS_MINOR_REV);
|
||||
}
|
||||
|
||||
@@ -1073,7 +1096,7 @@ static ssize_t nilfs_feature_README_show(struct kobject *kobj,
|
||||
struct attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, features_readme_str);
|
||||
return sysfs_emit(buf, features_readme_str);
|
||||
}
|
||||
|
||||
NILFS_FEATURE_RO_ATTR(revision);
|
||||
|
||||
@@ -289,8 +289,13 @@ int squashfs_read_inode(struct inode *inode, long long ino)
|
||||
if (err < 0)
|
||||
goto failed_read;
|
||||
|
||||
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
|
||||
inode->i_size = le32_to_cpu(sqsh_ino->symlink_size);
|
||||
if (inode->i_size > PAGE_SIZE) {
|
||||
ERROR("Corrupted symlink\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
|
||||
inode->i_op = &squashfs_symlink_inode_ops;
|
||||
inode_nohighmem(inode);
|
||||
inode->i_data.a_ops = &squashfs_symlink_aops;
|
||||
|
||||
@@ -86,6 +86,13 @@ enum {
|
||||
#define UDF_MAX_LVID_NESTING 1000
|
||||
|
||||
enum { UDF_MAX_LINKS = 0xffff };
|
||||
/*
|
||||
* We limit filesize to 4TB. This is arbitrary as the on-disk format supports
|
||||
* more but because the file space is described by a linked list of extents,
|
||||
* each of which can have at most 1GB, the creation and handling of extents
|
||||
* gets unusably slow beyond certain point...
|
||||
*/
|
||||
#define UDF_MAX_FILESIZE (1ULL << 42)
|
||||
|
||||
/* These are the "meat" - everything else is stuffing */
|
||||
static int udf_fill_super(struct super_block *, void *, int);
|
||||
@@ -1047,12 +1054,19 @@ static int udf_fill_partdesc_info(struct super_block *sb,
|
||||
struct udf_part_map *map;
|
||||
struct udf_sb_info *sbi = UDF_SB(sb);
|
||||
struct partitionHeaderDesc *phd;
|
||||
u32 sum;
|
||||
int err;
|
||||
|
||||
map = &sbi->s_partmaps[p_index];
|
||||
|
||||
map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */
|
||||
map->s_partition_root = le32_to_cpu(p->partitionStartingLocation);
|
||||
if (check_add_overflow(map->s_partition_root, map->s_partition_len,
|
||||
&sum)) {
|
||||
udf_err(sb, "Partition %d has invalid location %u + %u\n",
|
||||
p_index, map->s_partition_root, map->s_partition_len);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
|
||||
map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY;
|
||||
@@ -1108,6 +1122,14 @@ static int udf_fill_partdesc_info(struct super_block *sb,
|
||||
bitmap->s_extPosition = le32_to_cpu(
|
||||
phd->unallocSpaceBitmap.extPosition);
|
||||
map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
|
||||
/* Check whether math over bitmap won't overflow. */
|
||||
if (check_add_overflow(map->s_partition_len,
|
||||
sizeof(struct spaceBitmapDesc) << 3,
|
||||
&sum)) {
|
||||
udf_err(sb, "Partition %d is too long (%u)\n", p_index,
|
||||
map->s_partition_len);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
udf_debug("unallocSpaceBitmap (part %d) @ %u\n",
|
||||
p_index, bitmap->s_extPosition);
|
||||
}
|
||||
@@ -2307,7 +2329,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
|
||||
ret = -ENOMEM;
|
||||
goto error_out;
|
||||
}
|
||||
sb->s_maxbytes = MAX_LFS_FILESIZE;
|
||||
sb->s_maxbytes = UDF_MAX_FILESIZE;
|
||||
sb->s_max_links = UDF_MAX_LINKS;
|
||||
return 0;
|
||||
|
||||
|
||||
@@ -135,8 +135,7 @@ void ring_buffer_read_finish(struct ring_buffer_iter *iter);
|
||||
|
||||
struct ring_buffer_event *
|
||||
ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts);
|
||||
struct ring_buffer_event *
|
||||
ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts);
|
||||
void ring_buffer_iter_advance(struct ring_buffer_iter *iter);
|
||||
void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
|
||||
int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
|
||||
|
||||
|
||||
@@ -351,8 +351,13 @@ struct pernet_operations {
|
||||
* synchronize_rcu() related to these pernet_operations,
|
||||
* instead of separate synchronize_rcu() for every net.
|
||||
* Please, avoid synchronize_rcu() at all, where it's possible.
|
||||
*
|
||||
* Note that a combination of pre_exit() and exit() can
|
||||
* be used, since a synchronize_rcu() is guaranteed between
|
||||
* the calls.
|
||||
*/
|
||||
int (*init)(struct net *net);
|
||||
void (*pre_exit)(struct net *net);
|
||||
void (*exit)(struct net *net);
|
||||
void (*exit_batch)(struct list_head *net_exit_list);
|
||||
unsigned int *id;
|
||||
|
||||
@@ -155,7 +155,8 @@ struct switchdev_notifier_fdb_info {
|
||||
struct switchdev_notifier_info info; /* must be first */
|
||||
const unsigned char *addr;
|
||||
u16 vid;
|
||||
bool added_by_user;
|
||||
u8 added_by_user:1,
|
||||
offloaded:1;
|
||||
};
|
||||
|
||||
static inline struct net_device *
|
||||
|
||||
@@ -43,6 +43,7 @@ enum {
|
||||
#define NTF_PROXY 0x08 /* == ATF_PUBL */
|
||||
#define NTF_EXT_LEARNED 0x10
|
||||
#define NTF_OFFLOADED 0x20
|
||||
#define NTF_STICKY 0x40
|
||||
#define NTF_ROUTER 0x80
|
||||
|
||||
/*
|
||||
|
||||
@@ -1762,9 +1762,9 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
|
||||
RCU_INIT_POINTER(scgrp->subsys[ssid], NULL);
|
||||
rcu_assign_pointer(dcgrp->subsys[ssid], css);
|
||||
ss->root = dst_root;
|
||||
css->cgroup = dcgrp;
|
||||
|
||||
spin_lock_irq(&css_set_lock);
|
||||
css->cgroup = dcgrp;
|
||||
WARN_ON(!list_empty(&dcgrp->e_csets[ss->id]));
|
||||
list_for_each_entry_safe(cset, cset_pos, &scgrp->e_csets[ss->id],
|
||||
e_cset_node[ss->id]) {
|
||||
|
||||
@@ -1187,7 +1187,7 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
|
||||
uprobe_opcode_t insn = UPROBE_SWBP_INSN;
|
||||
struct xol_area *area;
|
||||
|
||||
area = kmalloc(sizeof(*area), GFP_KERNEL);
|
||||
area = kzalloc(sizeof(*area), GFP_KERNEL);
|
||||
if (unlikely(!area))
|
||||
goto out;
|
||||
|
||||
@@ -1197,7 +1197,6 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
|
||||
goto free_area;
|
||||
|
||||
area->xol_mapping.name = "[uprobes]";
|
||||
area->xol_mapping.fault = NULL;
|
||||
area->xol_mapping.pages = area->pages;
|
||||
area->pages[0] = alloc_page(GFP_HIGHUSER);
|
||||
if (!area->pages[0])
|
||||
|
||||
@@ -1205,6 +1205,7 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
||||
}
|
||||
|
||||
static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
|
||||
struct rt_mutex *lock,
|
||||
struct rt_mutex_waiter *w)
|
||||
{
|
||||
/*
|
||||
@@ -1214,6 +1215,7 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
|
||||
if (res != -EDEADLOCK || detect_deadlock)
|
||||
return;
|
||||
|
||||
raw_spin_unlock_irq(&lock->wait_lock);
|
||||
/*
|
||||
* Yell lowdly and stop the task right here.
|
||||
*/
|
||||
@@ -1269,7 +1271,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
||||
if (unlikely(ret)) {
|
||||
__set_current_state(TASK_RUNNING);
|
||||
remove_waiter(lock, &waiter);
|
||||
rt_mutex_handle_deadlock(ret, chwalk, &waiter);
|
||||
rt_mutex_handle_deadlock(ret, chwalk, lock, &waiter);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -824,6 +824,7 @@ int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
|
||||
|
||||
queue_work_on(cpu, system_wq, &sscs.work);
|
||||
wait_for_completion(&sscs.done);
|
||||
destroy_work_on_stack(&sscs.work);
|
||||
|
||||
return sscs.ret;
|
||||
}
|
||||
|
||||
@@ -4379,35 +4379,24 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter)
|
||||
EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
|
||||
|
||||
/**
|
||||
* ring_buffer_read - read the next item in the ring buffer by the iterator
|
||||
* ring_buffer_iter_advance - advance the iterator to the next location
|
||||
* @iter: The ring buffer iterator
|
||||
* @ts: The time stamp of the event read.
|
||||
*
|
||||
* This reads the next event in the ring buffer and increments the iterator.
|
||||
* Move the location of the iterator such that the next read will
|
||||
* be the next location of the iterator.
|
||||
*/
|
||||
struct ring_buffer_event *
|
||||
ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
|
||||
void ring_buffer_iter_advance(struct ring_buffer_iter *iter)
|
||||
{
|
||||
struct ring_buffer_event *event;
|
||||
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
||||
again:
|
||||
event = rb_iter_peek(iter, ts);
|
||||
if (!event)
|
||||
goto out;
|
||||
|
||||
if (event->type_len == RINGBUF_TYPE_PADDING)
|
||||
goto again;
|
||||
|
||||
rb_advance_iter(iter);
|
||||
out:
|
||||
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
||||
|
||||
return event;
|
||||
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ring_buffer_read);
|
||||
EXPORT_SYMBOL_GPL(ring_buffer_iter_advance);
|
||||
|
||||
/**
|
||||
* ring_buffer_size - return the size of the ring buffer (in bytes)
|
||||
|
||||
@@ -3096,7 +3096,7 @@ static void trace_iterator_increment(struct trace_iterator *iter)
|
||||
|
||||
iter->idx++;
|
||||
if (buf_iter)
|
||||
ring_buffer_read(buf_iter, NULL);
|
||||
ring_buffer_iter_advance(buf_iter);
|
||||
}
|
||||
|
||||
static struct trace_entry *
|
||||
@@ -3256,7 +3256,9 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
|
||||
if (ts >= iter->trace_buffer->time_start)
|
||||
break;
|
||||
entries++;
|
||||
ring_buffer_read(buf_iter, NULL);
|
||||
ring_buffer_iter_advance(buf_iter);
|
||||
/* This could be a big loop */
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
|
||||
|
||||
@@ -726,7 +726,7 @@ get_return_for_leaf(struct trace_iterator *iter,
|
||||
|
||||
/* this is a leaf, now advance the iterator */
|
||||
if (ring_iter)
|
||||
ring_buffer_read(ring_iter, NULL);
|
||||
ring_buffer_iter_advance(ring_iter);
|
||||
|
||||
return next;
|
||||
}
|
||||
|
||||
@@ -151,7 +151,7 @@ static int br_switchdev_event(struct notifier_block *unused,
|
||||
break;
|
||||
}
|
||||
br_fdb_offloaded_set(br, p, fdb_info->addr,
|
||||
fdb_info->vid);
|
||||
fdb_info->vid, true);
|
||||
break;
|
||||
case SWITCHDEV_FDB_DEL_TO_BRIDGE:
|
||||
fdb_info = ptr;
|
||||
@@ -163,7 +163,7 @@ static int br_switchdev_event(struct notifier_block *unused,
|
||||
case SWITCHDEV_FDB_OFFLOADED:
|
||||
fdb_info = ptr;
|
||||
br_fdb_offloaded_set(br, p, fdb_info->addr,
|
||||
fdb_info->vid);
|
||||
fdb_info->vid, fdb_info->offloaded);
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
@@ -80,7 +80,8 @@ static inline unsigned long hold_time(const struct net_bridge *br)
|
||||
static inline int has_expired(const struct net_bridge *br,
|
||||
const struct net_bridge_fdb_entry *fdb)
|
||||
{
|
||||
return !fdb->is_static && !fdb->added_by_external_learn &&
|
||||
return !test_bit(BR_FDB_STATIC, &fdb->flags) &&
|
||||
!test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags) &&
|
||||
time_before_eq(fdb->updated + hold_time(br), jiffies);
|
||||
}
|
||||
|
||||
@@ -202,7 +203,7 @@ static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f,
|
||||
{
|
||||
trace_fdb_delete(br, f);
|
||||
|
||||
if (f->is_static)
|
||||
if (test_bit(BR_FDB_STATIC, &f->flags))
|
||||
fdb_del_hw_addr(br, f->key.addr.addr);
|
||||
|
||||
hlist_del_init_rcu(&f->fdb_node);
|
||||
@@ -229,7 +230,7 @@ static void fdb_delete_local(struct net_bridge *br,
|
||||
if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
|
||||
(!vid || br_vlan_find(vg, vid))) {
|
||||
f->dst = op;
|
||||
f->added_by_user = 0;
|
||||
clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -240,7 +241,7 @@ static void fdb_delete_local(struct net_bridge *br,
|
||||
if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
|
||||
(!vid || (v && br_vlan_should_use(v)))) {
|
||||
f->dst = NULL;
|
||||
f->added_by_user = 0;
|
||||
clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -255,7 +256,8 @@ void br_fdb_find_delete_local(struct net_bridge *br,
|
||||
|
||||
spin_lock_bh(&br->hash_lock);
|
||||
f = br_fdb_find(br, addr, vid);
|
||||
if (f && f->is_local && !f->added_by_user && f->dst == p)
|
||||
if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
|
||||
!test_bit(BR_FDB_ADDED_BY_USER, &f->flags) && f->dst == p)
|
||||
fdb_delete_local(br, p, f);
|
||||
spin_unlock_bh(&br->hash_lock);
|
||||
}
|
||||
@@ -270,7 +272,8 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
|
||||
spin_lock_bh(&br->hash_lock);
|
||||
vg = nbp_vlan_group(p);
|
||||
hlist_for_each_entry(f, &br->fdb_list, fdb_node) {
|
||||
if (f->dst == p && f->is_local && !f->added_by_user) {
|
||||
if (f->dst == p && test_bit(BR_FDB_LOCAL, &f->flags) &&
|
||||
!test_bit(BR_FDB_ADDED_BY_USER, &f->flags)) {
|
||||
/* delete old one */
|
||||
fdb_delete_local(br, p, f);
|
||||
|
||||
@@ -311,7 +314,8 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
|
||||
|
||||
/* If old entry was unassociated with any port, then delete it. */
|
||||
f = br_fdb_find(br, br->dev->dev_addr, 0);
|
||||
if (f && f->is_local && !f->dst && !f->added_by_user)
|
||||
if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
|
||||
!f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
|
||||
fdb_delete_local(br, NULL, f);
|
||||
|
||||
fdb_insert(br, NULL, newaddr, 0);
|
||||
@@ -326,7 +330,8 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
|
||||
if (!br_vlan_should_use(v))
|
||||
continue;
|
||||
f = br_fdb_find(br, br->dev->dev_addr, v->vid);
|
||||
if (f && f->is_local && !f->dst && !f->added_by_user)
|
||||
if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
|
||||
!f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
|
||||
fdb_delete_local(br, NULL, f);
|
||||
fdb_insert(br, NULL, newaddr, v->vid);
|
||||
}
|
||||
@@ -351,7 +356,8 @@ void br_fdb_cleanup(struct work_struct *work)
|
||||
hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
|
||||
unsigned long this_timer;
|
||||
|
||||
if (f->is_static || f->added_by_external_learn)
|
||||
if (test_bit(BR_FDB_STATIC, &f->flags) ||
|
||||
test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags))
|
||||
continue;
|
||||
this_timer = f->updated + delay;
|
||||
if (time_after(this_timer, now)) {
|
||||
@@ -378,7 +384,7 @@ void br_fdb_flush(struct net_bridge *br)
|
||||
|
||||
spin_lock_bh(&br->hash_lock);
|
||||
hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
|
||||
if (!f->is_static)
|
||||
if (!test_bit(BR_FDB_STATIC, &f->flags))
|
||||
fdb_delete(br, f, true);
|
||||
}
|
||||
spin_unlock_bh(&br->hash_lock);
|
||||
@@ -402,10 +408,11 @@ void br_fdb_delete_by_port(struct net_bridge *br,
|
||||
continue;
|
||||
|
||||
if (!do_all)
|
||||
if (f->is_static || (vid && f->key.vlan_id != vid))
|
||||
if (test_bit(BR_FDB_STATIC, &f->flags) ||
|
||||
(vid && f->key.vlan_id != vid))
|
||||
continue;
|
||||
|
||||
if (f->is_local)
|
||||
if (test_bit(BR_FDB_LOCAL, &f->flags))
|
||||
fdb_delete_local(br, p, f);
|
||||
else
|
||||
fdb_delete(br, f, true);
|
||||
@@ -474,8 +481,8 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
|
||||
fe->port_no = f->dst->port_no;
|
||||
fe->port_hi = f->dst->port_no >> 8;
|
||||
|
||||
fe->is_local = f->is_local;
|
||||
if (!f->is_static)
|
||||
fe->is_local = test_bit(BR_FDB_LOCAL, &f->flags);
|
||||
if (!test_bit(BR_FDB_STATIC, &f->flags))
|
||||
fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
|
||||
++fe;
|
||||
++num;
|
||||
@@ -499,10 +506,11 @@ static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
|
||||
memcpy(fdb->key.addr.addr, addr, ETH_ALEN);
|
||||
fdb->dst = source;
|
||||
fdb->key.vlan_id = vid;
|
||||
fdb->is_local = is_local;
|
||||
fdb->is_static = is_static;
|
||||
fdb->added_by_user = 0;
|
||||
fdb->added_by_external_learn = 0;
|
||||
fdb->flags = 0;
|
||||
if (is_local)
|
||||
set_bit(BR_FDB_LOCAL, &fdb->flags);
|
||||
if (is_static)
|
||||
set_bit(BR_FDB_STATIC, &fdb->flags);
|
||||
fdb->offloaded = 0;
|
||||
fdb->updated = fdb->used = jiffies;
|
||||
if (rhashtable_lookup_insert_fast(&br->fdb_hash_tbl,
|
||||
@@ -530,7 +538,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
|
||||
/* it is okay to have multiple ports with same
|
||||
* address, just use the first one.
|
||||
*/
|
||||
if (fdb->is_local)
|
||||
if (test_bit(BR_FDB_LOCAL, &fdb->flags))
|
||||
return 0;
|
||||
br_warn(br, "adding interface %s with same address as a received packet (addr:%pM, vlan:%u)\n",
|
||||
source ? source->dev->name : br->dev->name, addr, vid);
|
||||
@@ -576,7 +584,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
|
||||
fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
|
||||
if (likely(fdb)) {
|
||||
/* attempt to update an entry for a local interface */
|
||||
if (unlikely(fdb->is_local)) {
|
||||
if (unlikely(test_bit(BR_FDB_LOCAL, &fdb->flags))) {
|
||||
if (net_ratelimit())
|
||||
br_warn(br, "received packet on %s with own address as source address (addr:%pM, vlan:%u)\n",
|
||||
source->dev->name, addr, vid);
|
||||
@@ -584,17 +592,18 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
|
||||
unsigned long now = jiffies;
|
||||
|
||||
/* fastpath: update of existing entry */
|
||||
if (unlikely(source != fdb->dst)) {
|
||||
if (unlikely(source != fdb->dst &&
|
||||
!test_bit(BR_FDB_STICKY, &fdb->flags))) {
|
||||
fdb->dst = source;
|
||||
fdb_modified = true;
|
||||
/* Take over HW learned entry */
|
||||
if (unlikely(fdb->added_by_external_learn))
|
||||
fdb->added_by_external_learn = 0;
|
||||
test_and_clear_bit(BR_FDB_ADDED_BY_EXT_LEARN,
|
||||
&fdb->flags);
|
||||
}
|
||||
if (now != fdb->updated)
|
||||
fdb->updated = now;
|
||||
if (unlikely(added_by_user))
|
||||
fdb->added_by_user = 1;
|
||||
set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
|
||||
if (unlikely(fdb_modified)) {
|
||||
trace_br_fdb_update(br, source, addr, vid, added_by_user);
|
||||
fdb_notify(br, fdb, RTM_NEWNEIGH, true);
|
||||
@@ -605,7 +614,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
|
||||
fdb = fdb_create(br, source, addr, vid, 0, 0);
|
||||
if (fdb) {
|
||||
if (unlikely(added_by_user))
|
||||
fdb->added_by_user = 1;
|
||||
set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
|
||||
trace_br_fdb_update(br, source, addr, vid,
|
||||
added_by_user);
|
||||
fdb_notify(br, fdb, RTM_NEWNEIGH, true);
|
||||
@@ -620,9 +629,9 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
|
||||
static int fdb_to_nud(const struct net_bridge *br,
|
||||
const struct net_bridge_fdb_entry *fdb)
|
||||
{
|
||||
if (fdb->is_local)
|
||||
if (test_bit(BR_FDB_LOCAL, &fdb->flags))
|
||||
return NUD_PERMANENT;
|
||||
else if (fdb->is_static)
|
||||
else if (test_bit(BR_FDB_STATIC, &fdb->flags))
|
||||
return NUD_NOARP;
|
||||
else if (has_expired(br, fdb))
|
||||
return NUD_STALE;
|
||||
@@ -654,8 +663,10 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
|
||||
|
||||
if (fdb->offloaded)
|
||||
ndm->ndm_flags |= NTF_OFFLOADED;
|
||||
if (fdb->added_by_external_learn)
|
||||
if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
|
||||
ndm->ndm_flags |= NTF_EXT_LEARNED;
|
||||
if (test_bit(BR_FDB_STICKY, &fdb->flags))
|
||||
ndm->ndm_flags |= NTF_STICKY;
|
||||
|
||||
if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr))
|
||||
goto nla_put_failure;
|
||||
@@ -772,8 +783,10 @@ int br_fdb_dump(struct sk_buff *skb,
|
||||
|
||||
/* Update (create or replace) forwarding database entry */
|
||||
static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
|
||||
const __u8 *addr, __u16 state, __u16 flags, __u16 vid)
|
||||
const u8 *addr, u16 state, u16 flags, u16 vid,
|
||||
u8 ndm_flags)
|
||||
{
|
||||
bool is_sticky = !!(ndm_flags & NTF_STICKY);
|
||||
struct net_bridge_fdb_entry *fdb;
|
||||
bool modified = false;
|
||||
|
||||
@@ -789,6 +802,9 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (is_sticky && (state & NUD_PERMANENT))
|
||||
return -EINVAL;
|
||||
|
||||
fdb = br_fdb_find(br, addr, vid);
|
||||
if (fdb == NULL) {
|
||||
if (!(flags & NLM_F_CREATE))
|
||||
@@ -811,28 +827,28 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
|
||||
|
||||
if (fdb_to_nud(br, fdb) != state) {
|
||||
if (state & NUD_PERMANENT) {
|
||||
fdb->is_local = 1;
|
||||
if (!fdb->is_static) {
|
||||
fdb->is_static = 1;
|
||||
set_bit(BR_FDB_LOCAL, &fdb->flags);
|
||||
if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
|
||||
fdb_add_hw_addr(br, addr);
|
||||
}
|
||||
} else if (state & NUD_NOARP) {
|
||||
fdb->is_local = 0;
|
||||
if (!fdb->is_static) {
|
||||
fdb->is_static = 1;
|
||||
clear_bit(BR_FDB_LOCAL, &fdb->flags);
|
||||
if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
|
||||
fdb_add_hw_addr(br, addr);
|
||||
}
|
||||
} else {
|
||||
fdb->is_local = 0;
|
||||
if (fdb->is_static) {
|
||||
fdb->is_static = 0;
|
||||
clear_bit(BR_FDB_LOCAL, &fdb->flags);
|
||||
if (test_and_clear_bit(BR_FDB_STATIC, &fdb->flags))
|
||||
fdb_del_hw_addr(br, addr);
|
||||
}
|
||||
}
|
||||
|
||||
modified = true;
|
||||
}
|
||||
fdb->added_by_user = 1;
|
||||
|
||||
if (is_sticky != test_bit(BR_FDB_STICKY, &fdb->flags)) {
|
||||
change_bit(BR_FDB_STICKY, &fdb->flags);
|
||||
modified = true;
|
||||
}
|
||||
|
||||
set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
|
||||
|
||||
fdb->used = jiffies;
|
||||
if (modified) {
|
||||
@@ -865,7 +881,7 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
|
||||
} else {
|
||||
spin_lock_bh(&br->hash_lock);
|
||||
err = fdb_add_entry(br, p, addr, ndm->ndm_state,
|
||||
nlh_flags, vid);
|
||||
nlh_flags, vid, ndm->ndm_flags);
|
||||
spin_unlock_bh(&br->hash_lock);
|
||||
}
|
||||
|
||||
@@ -1028,7 +1044,7 @@ int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
|
||||
/* We only care for static entries */
|
||||
if (!f->is_static)
|
||||
if (!test_bit(BR_FDB_STATIC, &f->flags))
|
||||
continue;
|
||||
err = dev_uc_add(p->dev, f->key.addr.addr);
|
||||
if (err)
|
||||
@@ -1042,7 +1058,7 @@ int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
|
||||
rollback:
|
||||
hlist_for_each_entry_rcu(tmp, &br->fdb_list, fdb_node) {
|
||||
/* We only care for static entries */
|
||||
if (!tmp->is_static)
|
||||
if (!test_bit(BR_FDB_STATIC, &tmp->flags))
|
||||
continue;
|
||||
if (tmp == f)
|
||||
break;
|
||||
@@ -1061,7 +1077,7 @@ void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
|
||||
/* We only care for static entries */
|
||||
if (!f->is_static)
|
||||
if (!test_bit(BR_FDB_STATIC, &f->flags))
|
||||
continue;
|
||||
|
||||
dev_uc_del(p->dev, f->key.addr.addr);
|
||||
@@ -1089,8 +1105,8 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
|
||||
goto err_unlock;
|
||||
}
|
||||
if (swdev_notify)
|
||||
fdb->added_by_user = 1;
|
||||
fdb->added_by_external_learn = 1;
|
||||
set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
|
||||
set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags);
|
||||
fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
|
||||
} else {
|
||||
fdb->updated = jiffies;
|
||||
@@ -1100,17 +1116,15 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
|
||||
modified = true;
|
||||
}
|
||||
|
||||
if (fdb->added_by_external_learn) {
|
||||
if (test_and_set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
|
||||
/* Refresh entry */
|
||||
fdb->used = jiffies;
|
||||
} else if (!fdb->added_by_user) {
|
||||
/* Take over SW learned entry */
|
||||
fdb->added_by_external_learn = 1;
|
||||
} else {
|
||||
modified = true;
|
||||
}
|
||||
|
||||
if (swdev_notify)
|
||||
fdb->added_by_user = 1;
|
||||
set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
|
||||
|
||||
if (modified)
|
||||
fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
|
||||
@@ -1132,7 +1146,7 @@ int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
|
||||
spin_lock_bh(&br->hash_lock);
|
||||
|
||||
fdb = br_fdb_find(br, addr, vid);
|
||||
if (fdb && fdb->added_by_external_learn)
|
||||
if (fdb && test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
|
||||
fdb_delete(br, fdb, swdev_notify);
|
||||
else
|
||||
err = -ENOENT;
|
||||
@@ -1143,7 +1157,7 @@ int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
|
||||
}
|
||||
|
||||
void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
|
||||
const unsigned char *addr, u16 vid)
|
||||
const unsigned char *addr, u16 vid, bool offloaded)
|
||||
{
|
||||
struct net_bridge_fdb_entry *fdb;
|
||||
|
||||
@@ -1151,7 +1165,7 @@ void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
|
||||
|
||||
fdb = br_fdb_find(br, addr, vid);
|
||||
if (fdb)
|
||||
fdb->offloaded = 1;
|
||||
fdb->offloaded = offloaded;
|
||||
|
||||
spin_unlock_bh(&br->hash_lock);
|
||||
}
|
||||
|
||||
@@ -163,7 +163,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
|
||||
if (dst) {
|
||||
unsigned long now = jiffies;
|
||||
|
||||
if (dst->is_local)
|
||||
if (test_bit(BR_FDB_LOCAL, &dst->flags))
|
||||
return br_pass_frame_up(skb);
|
||||
|
||||
if (now != dst->used)
|
||||
|
||||
@@ -168,6 +168,15 @@ struct net_bridge_vlan_group {
|
||||
u16 pvid;
|
||||
};
|
||||
|
||||
/* bridge fdb flags */
|
||||
enum {
|
||||
BR_FDB_LOCAL,
|
||||
BR_FDB_STATIC,
|
||||
BR_FDB_STICKY,
|
||||
BR_FDB_ADDED_BY_USER,
|
||||
BR_FDB_ADDED_BY_EXT_LEARN,
|
||||
};
|
||||
|
||||
struct net_bridge_fdb_key {
|
||||
mac_addr addr;
|
||||
u16 vlan_id;
|
||||
@@ -179,11 +188,8 @@ struct net_bridge_fdb_entry {
|
||||
|
||||
struct net_bridge_fdb_key key;
|
||||
struct hlist_node fdb_node;
|
||||
unsigned char is_local:1,
|
||||
is_static:1,
|
||||
added_by_user:1,
|
||||
added_by_external_learn:1,
|
||||
offloaded:1;
|
||||
unsigned long flags;
|
||||
unsigned char offloaded:1;
|
||||
|
||||
/* write-heavy members should not affect lookups */
|
||||
unsigned long updated ____cacheline_aligned_in_smp;
|
||||
@@ -564,7 +570,7 @@ int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
|
||||
const unsigned char *addr, u16 vid,
|
||||
bool swdev_notify);
|
||||
void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
|
||||
const unsigned char *addr, u16 vid);
|
||||
const unsigned char *addr, u16 vid, bool offloaded);
|
||||
|
||||
/* br_forward.c */
|
||||
enum br_pkt_type {
|
||||
|
||||
@@ -103,7 +103,7 @@ int br_switchdev_set_port_flag(struct net_bridge_port *p,
|
||||
static void
|
||||
br_switchdev_fdb_call_notifiers(bool adding, const unsigned char *mac,
|
||||
u16 vid, struct net_device *dev,
|
||||
bool added_by_user)
|
||||
bool added_by_user, bool offloaded)
|
||||
{
|
||||
struct switchdev_notifier_fdb_info info;
|
||||
unsigned long notifier_type;
|
||||
@@ -111,6 +111,7 @@ br_switchdev_fdb_call_notifiers(bool adding, const unsigned char *mac,
|
||||
info.addr = mac;
|
||||
info.vid = vid;
|
||||
info.added_by_user = added_by_user;
|
||||
info.offloaded = offloaded;
|
||||
notifier_type = adding ? SWITCHDEV_FDB_ADD_TO_DEVICE : SWITCHDEV_FDB_DEL_TO_DEVICE;
|
||||
call_switchdev_notifiers(notifier_type, dev, &info.info);
|
||||
}
|
||||
@@ -126,13 +127,17 @@ br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
|
||||
br_switchdev_fdb_call_notifiers(false, fdb->key.addr.addr,
|
||||
fdb->key.vlan_id,
|
||||
fdb->dst->dev,
|
||||
fdb->added_by_user);
|
||||
test_bit(BR_FDB_ADDED_BY_USER,
|
||||
&fdb->flags),
|
||||
fdb->offloaded);
|
||||
break;
|
||||
case RTM_NEWNEIGH:
|
||||
br_switchdev_fdb_call_notifiers(true, fdb->key.addr.addr,
|
||||
fdb->key.vlan_id,
|
||||
fdb->dst->dev,
|
||||
fdb->added_by_user);
|
||||
test_bit(BR_FDB_ADDED_BY_USER,
|
||||
&fdb->flags),
|
||||
fdb->offloaded);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1422,6 +1422,10 @@ static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
|
||||
|
||||
/* remove device reference, if this is our bound device */
|
||||
if (bo->bound && bo->ifindex == dev->ifindex) {
|
||||
#if IS_ENABLED(CONFIG_PROC_FS)
|
||||
if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read)
|
||||
remove_proc_entry(bo->procname, sock_net(sk)->can.bcmproc_dir);
|
||||
#endif
|
||||
bo->bound = 0;
|
||||
bo->ifindex = 0;
|
||||
notify_enodev = 1;
|
||||
|
||||
@@ -154,6 +154,17 @@ static void ops_free(const struct pernet_operations *ops, struct net *net)
|
||||
}
|
||||
}
|
||||
|
||||
static void ops_pre_exit_list(const struct pernet_operations *ops,
|
||||
struct list_head *net_exit_list)
|
||||
{
|
||||
struct net *net;
|
||||
|
||||
if (ops->pre_exit) {
|
||||
list_for_each_entry(net, net_exit_list, exit_list)
|
||||
ops->pre_exit(net);
|
||||
}
|
||||
}
|
||||
|
||||
static void ops_exit_list(const struct pernet_operations *ops,
|
||||
struct list_head *net_exit_list)
|
||||
{
|
||||
@@ -341,6 +352,12 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
|
||||
*/
|
||||
list_add(&net->exit_list, &net_exit_list);
|
||||
saved_ops = ops;
|
||||
list_for_each_entry_continue_reverse(ops, &pernet_list, list)
|
||||
ops_pre_exit_list(ops, &net_exit_list);
|
||||
|
||||
synchronize_rcu();
|
||||
|
||||
ops = saved_ops;
|
||||
list_for_each_entry_continue_reverse(ops, &pernet_list, list)
|
||||
ops_exit_list(ops, &net_exit_list);
|
||||
|
||||
@@ -554,10 +571,15 @@ static void cleanup_net(struct work_struct *work)
|
||||
list_add_tail(&net->exit_list, &net_exit_list);
|
||||
}
|
||||
|
||||
/* Run all of the network namespace pre_exit methods */
|
||||
list_for_each_entry_reverse(ops, &pernet_list, list)
|
||||
ops_pre_exit_list(ops, &net_exit_list);
|
||||
|
||||
/*
|
||||
* Another CPU might be rcu-iterating the list, wait for it.
|
||||
* This needs to be before calling the exit() notifiers, so
|
||||
* the rcu_barrier() below isn't sufficient alone.
|
||||
* Also the pre_exit() and exit() methods need this barrier.
|
||||
*/
|
||||
synchronize_rcu();
|
||||
|
||||
@@ -977,6 +999,8 @@ static int __register_pernet_operations(struct list_head *list,
|
||||
out_undo:
|
||||
/* If I have an error cleanup all namespaces I initialized */
|
||||
list_del(&ops->list);
|
||||
ops_pre_exit_list(ops, &net_exit_list);
|
||||
synchronize_rcu();
|
||||
ops_exit_list(ops, &net_exit_list);
|
||||
ops_free_list(ops, &net_exit_list);
|
||||
return error;
|
||||
@@ -991,6 +1015,8 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
|
||||
/* See comment in __register_pernet_operations() */
|
||||
for_each_net(net)
|
||||
list_add_tail(&net->exit_list, &net_exit_list);
|
||||
ops_pre_exit_list(ops, &net_exit_list);
|
||||
synchronize_rcu();
|
||||
ops_exit_list(ops, &net_exit_list);
|
||||
ops_free_list(ops, &net_exit_list);
|
||||
}
|
||||
@@ -1015,6 +1041,8 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
|
||||
} else {
|
||||
LIST_HEAD(net_exit_list);
|
||||
list_add(&init_net.exit_list, &net_exit_list);
|
||||
ops_pre_exit_list(ops, &net_exit_list);
|
||||
synchronize_rcu();
|
||||
ops_exit_list(ops, &net_exit_list);
|
||||
ops_free_list(ops, &net_exit_list);
|
||||
}
|
||||
|
||||
@@ -1464,6 +1464,7 @@ static void dsa_slave_switchdev_event_work(struct work_struct *work)
|
||||
netdev_dbg(dev, "fdb add failed err=%d\n", err);
|
||||
break;
|
||||
}
|
||||
fdb_info->offloaded = true;
|
||||
call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
|
||||
&fdb_info->info);
|
||||
break;
|
||||
|
||||
@@ -118,6 +118,7 @@ int ila_lwt_init(void);
|
||||
void ila_lwt_fini(void);
|
||||
|
||||
int ila_xlat_init_net(struct net *net);
|
||||
void ila_xlat_pre_exit_net(struct net *net);
|
||||
void ila_xlat_exit_net(struct net *net);
|
||||
|
||||
int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info);
|
||||
|
||||
@@ -70,6 +70,11 @@ static __net_init int ila_init_net(struct net *net)
|
||||
return err;
|
||||
}
|
||||
|
||||
static __net_exit void ila_pre_exit_net(struct net *net)
|
||||
{
|
||||
ila_xlat_pre_exit_net(net);
|
||||
}
|
||||
|
||||
static __net_exit void ila_exit_net(struct net *net)
|
||||
{
|
||||
ila_xlat_exit_net(net);
|
||||
@@ -77,6 +82,7 @@ static __net_exit void ila_exit_net(struct net *net)
|
||||
|
||||
static struct pernet_operations ila_net_ops = {
|
||||
.init = ila_init_net,
|
||||
.pre_exit = ila_pre_exit_net,
|
||||
.exit = ila_exit_net,
|
||||
.id = &ila_net_id,
|
||||
.size = sizeof(struct ila_net),
|
||||
|
||||
@@ -627,6 +627,15 @@ int ila_xlat_init_net(struct net *net)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ila_xlat_pre_exit_net(struct net *net)
|
||||
{
|
||||
struct ila_net *ilan = net_generic(net, ila_net_id);
|
||||
|
||||
if (ilan->xlat.hooks_registered)
|
||||
nf_unregister_net_hooks(net, ila_nf_hook_ops,
|
||||
ARRAY_SIZE(ila_nf_hook_ops));
|
||||
}
|
||||
|
||||
void ila_xlat_exit_net(struct net *net)
|
||||
{
|
||||
struct ila_net *ilan = net_generic(net, ila_net_id);
|
||||
@@ -634,10 +643,6 @@ void ila_xlat_exit_net(struct net *net)
|
||||
rhashtable_free_and_destroy(&ilan->xlat.rhash_table, ila_free_cb, NULL);
|
||||
|
||||
free_bucket_spinlocks(ilan->xlat.locks);
|
||||
|
||||
if (ilan->xlat.hooks_registered)
|
||||
nf_unregister_net_hooks(net, ila_nf_hook_ops,
|
||||
ARRAY_SIZE(ila_nf_hook_ops));
|
||||
}
|
||||
|
||||
static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila)
|
||||
|
||||
@@ -309,7 +309,6 @@ insert_tree(struct net *net,
|
||||
struct nf_conncount_rb *rbconn;
|
||||
struct nf_conncount_tuple *conn;
|
||||
unsigned int count = 0, gc_count = 0;
|
||||
u8 keylen = data->keylen;
|
||||
bool do_gc = true;
|
||||
|
||||
spin_lock_bh(&nf_conncount_locks[hash]);
|
||||
@@ -321,7 +320,7 @@ insert_tree(struct net *net,
|
||||
rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node);
|
||||
|
||||
parent = *rbnode;
|
||||
diff = key_diff(key, rbconn->key, keylen);
|
||||
diff = key_diff(key, rbconn->key, data->keylen);
|
||||
if (diff < 0) {
|
||||
rbnode = &((*rbnode)->rb_left);
|
||||
} else if (diff > 0) {
|
||||
@@ -366,7 +365,7 @@ insert_tree(struct net *net,
|
||||
|
||||
conn->tuple = *tuple;
|
||||
conn->zone = *zone;
|
||||
memcpy(rbconn->key, key, sizeof(u32) * keylen);
|
||||
memcpy(rbconn->key, key, sizeof(u32) * data->keylen);
|
||||
|
||||
nf_conncount_list_init(&rbconn->list);
|
||||
list_add(&conn->node, &rbconn->list.head);
|
||||
@@ -391,7 +390,6 @@ count_tree(struct net *net,
|
||||
struct rb_node *parent;
|
||||
struct nf_conncount_rb *rbconn;
|
||||
unsigned int hash;
|
||||
u8 keylen = data->keylen;
|
||||
|
||||
hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS;
|
||||
root = &data->root[hash];
|
||||
@@ -402,7 +400,7 @@ count_tree(struct net *net,
|
||||
|
||||
rbconn = rb_entry(parent, struct nf_conncount_rb, node);
|
||||
|
||||
diff = key_diff(key, rbconn->key, keylen);
|
||||
diff = key_diff(key, rbconn->key, data->keylen);
|
||||
if (diff < 0) {
|
||||
parent = rcu_dereference_raw(parent->rb_left);
|
||||
} else if (diff > 0) {
|
||||
|
||||
@@ -510,8 +510,8 @@ void rfkill_remove_epo_lock(void)
|
||||
/**
|
||||
* rfkill_is_epo_lock_active - returns true EPO is active
|
||||
*
|
||||
* Returns 0 (false) if there is NOT an active EPO contidion,
|
||||
* and 1 (true) if there is an active EPO contition, which
|
||||
* Returns 0 (false) if there is NOT an active EPO condition,
|
||||
* and 1 (true) if there is an active EPO condition, which
|
||||
* locks all radios in one of the BLOCKED states.
|
||||
*
|
||||
* Can be called in atomic context.
|
||||
|
||||
@@ -697,11 +697,10 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
|
||||
|
||||
err = qdisc_enqueue(skb, q->qdisc, &to_free);
|
||||
kfree_skb_list(to_free);
|
||||
if (err != NET_XMIT_SUCCESS &&
|
||||
net_xmit_drop_count(err)) {
|
||||
if (err != NET_XMIT_SUCCESS) {
|
||||
if (net_xmit_drop_count(err))
|
||||
qdisc_qstats_drop(sch);
|
||||
qdisc_tree_reduce_backlog(sch, 1,
|
||||
pkt_len);
|
||||
qdisc_tree_reduce_backlog(sch, 1, pkt_len);
|
||||
}
|
||||
goto tfifo_dequeue;
|
||||
}
|
||||
|
||||
@@ -2466,6 +2466,13 @@ static void xs_tcp_setup_socket(struct work_struct *work)
|
||||
case -EALREADY:
|
||||
xprt_unlock_connect(xprt, transport);
|
||||
return;
|
||||
case -EPERM:
|
||||
/* Happens, for instance, if a BPF program is preventing
|
||||
* the connect. Remap the error so upper layers can better
|
||||
* deal with it.
|
||||
*/
|
||||
status = -ECONNREFUSED;
|
||||
/* fall through */
|
||||
case -EINVAL:
|
||||
/* Happens, for instance, if the user specified a link
|
||||
* local IPv6 address without a scope-id.
|
||||
|
||||
@@ -610,9 +610,6 @@ static void init_peercred(struct sock *sk)
|
||||
|
||||
static void copy_peercred(struct sock *sk, struct sock *peersk)
|
||||
{
|
||||
const struct cred *old_cred;
|
||||
struct pid *old_pid;
|
||||
|
||||
if (sk < peersk) {
|
||||
spin_lock(&sk->sk_peer_lock);
|
||||
spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
|
||||
@@ -620,16 +617,12 @@ static void copy_peercred(struct sock *sk, struct sock *peersk)
|
||||
spin_lock(&peersk->sk_peer_lock);
|
||||
spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
|
||||
}
|
||||
old_pid = sk->sk_peer_pid;
|
||||
old_cred = sk->sk_peer_cred;
|
||||
|
||||
sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
|
||||
sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
|
||||
|
||||
spin_unlock(&sk->sk_peer_lock);
|
||||
spin_unlock(&peersk->sk_peer_lock);
|
||||
|
||||
put_pid(old_pid);
|
||||
put_cred(old_cred);
|
||||
}
|
||||
|
||||
static int unix_listen(struct socket *sock, int backlog)
|
||||
|
||||
@@ -1593,6 +1593,10 @@ int __aafs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
|
||||
struct aa_profile *p;
|
||||
p = aa_deref_parent(profile);
|
||||
dent = prof_dir(p);
|
||||
if (!dent) {
|
||||
error = -ENOENT;
|
||||
goto fail2;
|
||||
}
|
||||
/* adding to parent that previously didn't have children */
|
||||
dent = aafs_create_dir("profiles", dent);
|
||||
if (IS_ERR(dent))
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user