Merge 4.19.246 into android-4.19-stable
Changes in 4.19.246
x86/pci/xen: Disable PCI/MSI[-X] masking for XEN_HVM guests
staging: rtl8723bs: prevent ->Ssid overflow in rtw_wx_set_scan()
tcp: change source port randomizarion at connect() time
secure_seq: use the 64 bits of the siphash for port offset calculation
ACPI: sysfs: Make sparse happy about address space in use
ACPI: sysfs: Fix BERT error region memory mapping
net: af_key: check encryption module availability consistency
net: ftgmac100: Disable hardware checksum on AST2600
i2c: ismt: Provide a DMA buffer for Interrupt Cause Logging
drivers: i2c: thunderx: Allow driver to work with ACPI defined TWSI controllers
assoc_array: Fix BUG_ON during garbage collect
cfg80211: set custom regdomain after wiphy registration
libtraceevent: Fix build with binutils 2.35
perf bench: Share some global variables to fix build with gcc 10
perf tests bp_account: Make global variable static
drm/i915: Fix -Wstringop-overflow warning in call to intel_read_wm_latency()
block-map: add __GFP_ZERO flag for alloc_page in function bio_copy_kern
exec: Force single empty string when argv is empty
netfilter: conntrack: re-fetch conntrack after insertion
zsmalloc: fix races between asynchronous zspage free and page migration
dm integrity: fix error code in dm_integrity_ctr()
dm crypt: make printing of the key constant-time
dm stats: add cond_resched when looping over entries
dm verity: set DM_TARGET_IMMUTABLE feature flag
HID: multitouch: Add support for Google Whiskers Touchpad
tpm: Fix buffer access in tpm2_get_tpm_pt()
tpm: ibmvtpm: Correct the return value in tpm_ibmvtpm_probe()
docs: submitting-patches: Fix crossref to 'The canonical patch format'
NFSD: Fix possible sleep during nfsd4_release_lockowner()
bpf: Enlarge offset check value to INT_MAX in bpf_skb_{load,store}_bytes
Linux 4.19.246
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ibb2e43911c3fcd82ee8158ebfe7bc369e12aa872
This commit is contained in:
@@ -133,7 +133,7 @@ as you intend it to.
|
|||||||
|
|
||||||
The maintainer will thank you if you write your patch description in a
|
The maintainer will thank you if you write your patch description in a
|
||||||
form which can be easily pulled into Linux's source code management
|
form which can be easily pulled into Linux's source code management
|
||||||
system, ``git``, as a "commit log". See :ref:`explicit_in_reply_to`.
|
system, ``git``, as a "commit log". See :ref:`the_canonical_patch_format`.
|
||||||
|
|
||||||
Solve only one problem per patch. If your description starts to get
|
Solve only one problem per patch. If your description starts to get
|
||||||
long, that's a sign that you probably need to split up your patch.
|
long, that's a sign that you probably need to split up your patch.
|
||||||
|
|||||||
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 19
|
PATCHLEVEL = 19
|
||||||
SUBLEVEL = 245
|
SUBLEVEL = 246
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = "People's Front"
|
NAME = "People's Front"
|
||||||
|
|
||||||
|
|||||||
@@ -441,6 +441,11 @@ void __init xen_msi_init(void)
|
|||||||
|
|
||||||
x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs;
|
x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs;
|
||||||
x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
|
x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
|
||||||
|
/*
|
||||||
|
* With XEN PIRQ/Eventchannels in use PCI/MSI[-X] masking is solely
|
||||||
|
* controlled by the hypervisor.
|
||||||
|
*/
|
||||||
|
pci_msi_ignore_mask = 1;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|||||||
@@ -1532,7 +1532,7 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
|
|||||||
if (bytes > len)
|
if (bytes > len)
|
||||||
bytes = len;
|
bytes = len;
|
||||||
|
|
||||||
page = alloc_page(q->bounce_gfp | gfp_mask);
|
page = alloc_page(q->bounce_gfp | __GFP_ZERO | gfp_mask);
|
||||||
if (!page)
|
if (!page)
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
|
|
||||||
|
|||||||
@@ -439,18 +439,29 @@ static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
|
|||||||
{
|
{
|
||||||
struct acpi_data_attr *data_attr;
|
struct acpi_data_attr *data_attr;
|
||||||
void __iomem *base;
|
void __iomem *base;
|
||||||
ssize_t rc;
|
ssize_t size;
|
||||||
|
|
||||||
data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
|
data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
|
||||||
|
size = data_attr->attr.size;
|
||||||
|
|
||||||
base = acpi_os_map_memory(data_attr->addr, data_attr->attr.size);
|
if (offset < 0)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (offset >= size)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (count > size - offset)
|
||||||
|
count = size - offset;
|
||||||
|
|
||||||
|
base = acpi_os_map_iomem(data_attr->addr, size);
|
||||||
if (!base)
|
if (!base)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
rc = memory_read_from_buffer(buf, count, &offset, base,
|
|
||||||
data_attr->attr.size);
|
|
||||||
acpi_os_unmap_memory(base, data_attr->attr.size);
|
|
||||||
|
|
||||||
return rc;
|
memcpy_fromio(buf, base + offset, count);
|
||||||
|
|
||||||
|
acpi_os_unmap_iomem(base, size);
|
||||||
|
|
||||||
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)
|
static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)
|
||||||
|
|||||||
@@ -717,7 +717,16 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value,
|
|||||||
if (!rc) {
|
if (!rc) {
|
||||||
out = (struct tpm2_get_cap_out *)
|
out = (struct tpm2_get_cap_out *)
|
||||||
&buf.data[TPM_HEADER_SIZE];
|
&buf.data[TPM_HEADER_SIZE];
|
||||||
|
/*
|
||||||
|
* To prevent failing boot up of some systems, Infineon TPM2.0
|
||||||
|
* returns SUCCESS on TPM2_Startup in field upgrade mode. Also
|
||||||
|
* the TPM2_Getcapability command returns a zero length list
|
||||||
|
* in field upgrade mode.
|
||||||
|
*/
|
||||||
|
if (be32_to_cpu(out->property_cnt) > 0)
|
||||||
*value = be32_to_cpu(out->value);
|
*value = be32_to_cpu(out->value);
|
||||||
|
else
|
||||||
|
rc = -ENODATA;
|
||||||
}
|
}
|
||||||
tpm_buf_destroy(&buf);
|
tpm_buf_destroy(&buf);
|
||||||
return rc;
|
return rc;
|
||||||
|
|||||||
@@ -692,6 +692,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
|
|||||||
if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
|
if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
|
||||||
ibmvtpm->rtce_buf != NULL,
|
ibmvtpm->rtce_buf != NULL,
|
||||||
HZ)) {
|
HZ)) {
|
||||||
|
rc = -ENODEV;
|
||||||
dev_err(dev, "CRQ response timed out\n");
|
dev_err(dev, "CRQ response timed out\n");
|
||||||
goto init_irq_cleanup;
|
goto init_irq_cleanup;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2814,7 +2814,7 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
|
static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
|
||||||
uint16_t wm[8])
|
uint16_t wm[])
|
||||||
{
|
{
|
||||||
if (INTEL_GEN(dev_priv) >= 9) {
|
if (INTEL_GEN(dev_priv) >= 9) {
|
||||||
uint32_t val;
|
uint32_t val;
|
||||||
|
|||||||
@@ -2111,6 +2111,9 @@ static const struct hid_device_id mt_devices[] = {
|
|||||||
{ .driver_data = MT_CLS_GOOGLE,
|
{ .driver_data = MT_CLS_GOOGLE,
|
||||||
HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_GOOGLE,
|
HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_GOOGLE,
|
||||||
USB_DEVICE_ID_GOOGLE_TOUCH_ROSE) },
|
USB_DEVICE_ID_GOOGLE_TOUCH_ROSE) },
|
||||||
|
{ .driver_data = MT_CLS_GOOGLE,
|
||||||
|
HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_GOOGLE,
|
||||||
|
USB_DEVICE_ID_GOOGLE_WHISKERS) },
|
||||||
|
|
||||||
/* Generic MT device */
|
/* Generic MT device */
|
||||||
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) },
|
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) },
|
||||||
|
|||||||
@@ -80,6 +80,7 @@
|
|||||||
|
|
||||||
#define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */
|
#define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */
|
||||||
#define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */
|
#define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */
|
||||||
|
#define ISMT_LOG_ENTRIES 3 /* number of interrupt cause log entries */
|
||||||
|
|
||||||
/* Hardware Descriptor Constants - Control Field */
|
/* Hardware Descriptor Constants - Control Field */
|
||||||
#define ISMT_DESC_CWRL 0x01 /* Command/Write Length */
|
#define ISMT_DESC_CWRL 0x01 /* Command/Write Length */
|
||||||
@@ -173,6 +174,8 @@ struct ismt_priv {
|
|||||||
u8 head; /* ring buffer head pointer */
|
u8 head; /* ring buffer head pointer */
|
||||||
struct completion cmp; /* interrupt completion */
|
struct completion cmp; /* interrupt completion */
|
||||||
u8 buffer[I2C_SMBUS_BLOCK_MAX + 16]; /* temp R/W data buffer */
|
u8 buffer[I2C_SMBUS_BLOCK_MAX + 16]; /* temp R/W data buffer */
|
||||||
|
dma_addr_t log_dma;
|
||||||
|
u32 *log;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -406,6 +409,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
|
|||||||
memset(desc, 0, sizeof(struct ismt_desc));
|
memset(desc, 0, sizeof(struct ismt_desc));
|
||||||
desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);
|
desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);
|
||||||
|
|
||||||
|
/* Always clear the log entries */
|
||||||
|
memset(priv->log, 0, ISMT_LOG_ENTRIES * sizeof(u32));
|
||||||
|
|
||||||
/* Initialize common control bits */
|
/* Initialize common control bits */
|
||||||
if (likely(pci_dev_msi_enabled(priv->pci_dev)))
|
if (likely(pci_dev_msi_enabled(priv->pci_dev)))
|
||||||
desc->control = ISMT_DESC_INT | ISMT_DESC_FAIR;
|
desc->control = ISMT_DESC_INT | ISMT_DESC_FAIR;
|
||||||
@@ -695,6 +701,8 @@ static void ismt_hw_init(struct ismt_priv *priv)
|
|||||||
/* initialize the Master Descriptor Base Address (MDBA) */
|
/* initialize the Master Descriptor Base Address (MDBA) */
|
||||||
writeq(priv->io_rng_dma, priv->smba + ISMT_MSTR_MDBA);
|
writeq(priv->io_rng_dma, priv->smba + ISMT_MSTR_MDBA);
|
||||||
|
|
||||||
|
writeq(priv->log_dma, priv->smba + ISMT_GR_SMTICL);
|
||||||
|
|
||||||
/* initialize the Master Control Register (MCTRL) */
|
/* initialize the Master Control Register (MCTRL) */
|
||||||
writel(ISMT_MCTRL_MEIE, priv->smba + ISMT_MSTR_MCTRL);
|
writel(ISMT_MCTRL_MEIE, priv->smba + ISMT_MSTR_MCTRL);
|
||||||
|
|
||||||
@@ -784,6 +792,12 @@ static int ismt_dev_init(struct ismt_priv *priv)
|
|||||||
priv->head = 0;
|
priv->head = 0;
|
||||||
init_completion(&priv->cmp);
|
init_completion(&priv->cmp);
|
||||||
|
|
||||||
|
priv->log = dmam_alloc_coherent(&priv->pci_dev->dev,
|
||||||
|
ISMT_LOG_ENTRIES * sizeof(u32),
|
||||||
|
&priv->log_dma, GFP_KERNEL);
|
||||||
|
if (!priv->log)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -208,6 +208,7 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev,
|
|||||||
i2c->adap.bus_recovery_info = &octeon_i2c_recovery_info;
|
i2c->adap.bus_recovery_info = &octeon_i2c_recovery_info;
|
||||||
i2c->adap.dev.parent = dev;
|
i2c->adap.dev.parent = dev;
|
||||||
i2c->adap.dev.of_node = pdev->dev.of_node;
|
i2c->adap.dev.of_node = pdev->dev.of_node;
|
||||||
|
i2c->adap.dev.fwnode = dev->fwnode;
|
||||||
snprintf(i2c->adap.name, sizeof(i2c->adap.name),
|
snprintf(i2c->adap.name, sizeof(i2c->adap.name),
|
||||||
"Cavium ThunderX i2c adapter at %s", dev_name(dev));
|
"Cavium ThunderX i2c adapter at %s", dev_name(dev));
|
||||||
i2c_set_adapdata(&i2c->adap, i2c);
|
i2c_set_adapdata(&i2c->adap, i2c);
|
||||||
|
|||||||
@@ -2940,6 +2940,11 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
|
|||||||
return DM_MAPIO_SUBMITTED;
|
return DM_MAPIO_SUBMITTED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static char hex2asc(unsigned char c)
|
||||||
|
{
|
||||||
|
return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27);
|
||||||
|
}
|
||||||
|
|
||||||
static void crypt_status(struct dm_target *ti, status_type_t type,
|
static void crypt_status(struct dm_target *ti, status_type_t type,
|
||||||
unsigned status_flags, char *result, unsigned maxlen)
|
unsigned status_flags, char *result, unsigned maxlen)
|
||||||
{
|
{
|
||||||
@@ -2958,9 +2963,12 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
|
|||||||
if (cc->key_size > 0) {
|
if (cc->key_size > 0) {
|
||||||
if (cc->key_string)
|
if (cc->key_string)
|
||||||
DMEMIT(":%u:%s", cc->key_size, cc->key_string);
|
DMEMIT(":%u:%s", cc->key_size, cc->key_string);
|
||||||
else
|
else {
|
||||||
for (i = 0; i < cc->key_size; i++)
|
for (i = 0; i < cc->key_size; i++) {
|
||||||
DMEMIT("%02x", cc->key[i]);
|
DMEMIT("%c%c", hex2asc(cc->key[i] >> 4),
|
||||||
|
hex2asc(cc->key[i] & 0xf));
|
||||||
|
}
|
||||||
|
}
|
||||||
} else
|
} else
|
||||||
DMEMIT("-");
|
DMEMIT("-");
|
||||||
|
|
||||||
|
|||||||
@@ -3565,8 +3565,6 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (should_write_sb) {
|
if (should_write_sb) {
|
||||||
int r;
|
|
||||||
|
|
||||||
init_journal(ic, 0, ic->journal_sections, 0);
|
init_journal(ic, 0, ic->journal_sections, 0);
|
||||||
r = dm_integrity_failed(ic);
|
r = dm_integrity_failed(ic);
|
||||||
if (unlikely(r)) {
|
if (unlikely(r)) {
|
||||||
|
|||||||
@@ -224,6 +224,7 @@ void dm_stats_cleanup(struct dm_stats *stats)
|
|||||||
atomic_read(&shared->in_flight[READ]),
|
atomic_read(&shared->in_flight[READ]),
|
||||||
atomic_read(&shared->in_flight[WRITE]));
|
atomic_read(&shared->in_flight[WRITE]));
|
||||||
}
|
}
|
||||||
|
cond_resched();
|
||||||
}
|
}
|
||||||
dm_stat_free(&s->rcu_head);
|
dm_stat_free(&s->rcu_head);
|
||||||
}
|
}
|
||||||
@@ -313,6 +314,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
|
|||||||
for (ni = 0; ni < n_entries; ni++) {
|
for (ni = 0; ni < n_entries; ni++) {
|
||||||
atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
|
atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
|
||||||
atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
|
atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
|
||||||
|
cond_resched();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s->n_histogram_entries) {
|
if (s->n_histogram_entries) {
|
||||||
@@ -325,6 +327,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
|
|||||||
for (ni = 0; ni < n_entries; ni++) {
|
for (ni = 0; ni < n_entries; ni++) {
|
||||||
s->stat_shared[ni].tmp.histogram = hi;
|
s->stat_shared[ni].tmp.histogram = hi;
|
||||||
hi += s->n_histogram_entries + 1;
|
hi += s->n_histogram_entries + 1;
|
||||||
|
cond_resched();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -345,6 +348,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
|
|||||||
for (ni = 0; ni < n_entries; ni++) {
|
for (ni = 0; ni < n_entries; ni++) {
|
||||||
p[ni].histogram = hi;
|
p[ni].histogram = hi;
|
||||||
hi += s->n_histogram_entries + 1;
|
hi += s->n_histogram_entries + 1;
|
||||||
|
cond_resched();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -474,6 +478,7 @@ static int dm_stats_list(struct dm_stats *stats, const char *program,
|
|||||||
}
|
}
|
||||||
DMEMIT("\n");
|
DMEMIT("\n");
|
||||||
}
|
}
|
||||||
|
cond_resched();
|
||||||
}
|
}
|
||||||
mutex_unlock(&stats->mutex);
|
mutex_unlock(&stats->mutex);
|
||||||
|
|
||||||
@@ -750,6 +755,7 @@ static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
|
|||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
cond_resched();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -865,6 +871,8 @@ static int dm_stats_print(struct dm_stats *stats, int id,
|
|||||||
|
|
||||||
if (unlikely(sz + 1 >= maxlen))
|
if (unlikely(sz + 1 >= maxlen))
|
||||||
goto buffer_overflow;
|
goto buffer_overflow;
|
||||||
|
|
||||||
|
cond_resched();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (clear)
|
if (clear)
|
||||||
|
|||||||
@@ -1198,6 +1198,7 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
|||||||
|
|
||||||
static struct target_type verity_target = {
|
static struct target_type verity_target = {
|
||||||
.name = "verity",
|
.name = "verity",
|
||||||
|
.features = DM_TARGET_IMMUTABLE,
|
||||||
.version = {1, 4, 0},
|
.version = {1, 4, 0},
|
||||||
.module = THIS_MODULE,
|
.module = THIS_MODULE,
|
||||||
.ctr = verity_ctr,
|
.ctr = verity_ctr,
|
||||||
|
|||||||
@@ -1869,6 +1869,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
|
|||||||
/* AST2400 doesn't have working HW checksum generation */
|
/* AST2400 doesn't have working HW checksum generation */
|
||||||
if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
|
if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
|
||||||
netdev->hw_features &= ~NETIF_F_HW_CSUM;
|
netdev->hw_features &= ~NETIF_F_HW_CSUM;
|
||||||
|
|
||||||
|
/* AST2600 tx checksum with NCSI is broken */
|
||||||
|
if (priv->use_ncsi && of_device_is_compatible(np, "aspeed,ast2600-mac"))
|
||||||
|
netdev->hw_features &= ~NETIF_F_HW_CSUM;
|
||||||
|
|
||||||
if (np && of_get_property(np, "no-hw-checksum", NULL))
|
if (np && of_get_property(np, "no-hw-checksum", NULL))
|
||||||
netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
|
netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
|
||||||
netdev->features |= netdev->hw_features;
|
netdev->features |= netdev->hw_features;
|
||||||
|
|||||||
@@ -1359,9 +1359,11 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
|
|||||||
|
|
||||||
sec_len = *(pos++); len-= 1;
|
sec_len = *(pos++); len-= 1;
|
||||||
|
|
||||||
if (sec_len>0 && sec_len<=len) {
|
if (sec_len > 0 &&
|
||||||
|
sec_len <= len &&
|
||||||
|
sec_len <= 32) {
|
||||||
ssid[ssid_index].SsidLength = sec_len;
|
ssid[ssid_index].SsidLength = sec_len;
|
||||||
memcpy(ssid[ssid_index].Ssid, pos, ssid[ssid_index].SsidLength);
|
memcpy(ssid[ssid_index].Ssid, pos, sec_len);
|
||||||
/* DBG_871X("%s COMBO_SCAN with specific ssid:%s, %d\n", __func__ */
|
/* DBG_871X("%s COMBO_SCAN with specific ssid:%s, %d\n", __func__ */
|
||||||
/* , ssid[ssid_index].Ssid, ssid[ssid_index].SsidLength); */
|
/* , ssid[ssid_index].Ssid, ssid[ssid_index].SsidLength); */
|
||||||
ssid_index++;
|
ssid_index++;
|
||||||
|
|||||||
17
fs/exec.c
17
fs/exec.c
@@ -1805,6 +1805,9 @@ static int __do_execve_file(int fd, struct filename *filename,
|
|||||||
goto out_unmark;
|
goto out_unmark;
|
||||||
|
|
||||||
bprm->argc = count(argv, MAX_ARG_STRINGS);
|
bprm->argc = count(argv, MAX_ARG_STRINGS);
|
||||||
|
if (bprm->argc == 0)
|
||||||
|
pr_warn_once("process '%s' launched '%s' with NULL argv: empty string added\n",
|
||||||
|
current->comm, bprm->filename);
|
||||||
if ((retval = bprm->argc) < 0)
|
if ((retval = bprm->argc) < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
@@ -1829,6 +1832,20 @@ static int __do_execve_file(int fd, struct filename *filename,
|
|||||||
if (retval < 0)
|
if (retval < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When argv is empty, add an empty string ("") as argv[0] to
|
||||||
|
* ensure confused userspace programs that start processing
|
||||||
|
* from argv[1] won't end up walking envp. See also
|
||||||
|
* bprm_stack_limits().
|
||||||
|
*/
|
||||||
|
if (bprm->argc == 0) {
|
||||||
|
const char *argv[] = { "", NULL };
|
||||||
|
retval = copy_strings_kernel(1, argv, bprm);
|
||||||
|
if (retval < 0)
|
||||||
|
goto out;
|
||||||
|
bprm->argc = 1;
|
||||||
|
}
|
||||||
|
|
||||||
retval = exec_binprm(bprm);
|
retval = exec_binprm(bprm);
|
||||||
if (retval < 0)
|
if (retval < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|||||||
@@ -6401,16 +6401,12 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
|
|||||||
if (sop->so_is_open_owner || !same_owner_str(sop, owner))
|
if (sop->so_is_open_owner || !same_owner_str(sop, owner))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* see if there are still any locks associated with it */
|
if (atomic_read(&sop->so_count) != 1) {
|
||||||
lo = lockowner(sop);
|
|
||||||
list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
|
|
||||||
if (check_for_locks(stp->st_stid.sc_file, lo)) {
|
|
||||||
status = nfserr_locks_held;
|
|
||||||
spin_unlock(&clp->cl_lock);
|
spin_unlock(&clp->cl_lock);
|
||||||
return status;
|
return nfserr_locks_held;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
lo = lockowner(sop);
|
||||||
nfs4_get_stateowner(sop);
|
nfs4_get_stateowner(sop);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -407,7 +407,7 @@ static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
int __inet_hash_connect(struct inet_timewait_death_row *death_row,
|
int __inet_hash_connect(struct inet_timewait_death_row *death_row,
|
||||||
struct sock *sk, u32 port_offset,
|
struct sock *sk, u64 port_offset,
|
||||||
int (*check_established)(struct inet_timewait_death_row *,
|
int (*check_established)(struct inet_timewait_death_row *,
|
||||||
struct sock *, __u16,
|
struct sock *, __u16,
|
||||||
struct inet_timewait_sock **));
|
struct inet_timewait_sock **));
|
||||||
|
|||||||
@@ -58,8 +58,13 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
|
|||||||
int ret = NF_ACCEPT;
|
int ret = NF_ACCEPT;
|
||||||
|
|
||||||
if (ct) {
|
if (ct) {
|
||||||
if (!nf_ct_is_confirmed(ct))
|
if (!nf_ct_is_confirmed(ct)) {
|
||||||
ret = __nf_conntrack_confirm(skb);
|
ret = __nf_conntrack_confirm(skb);
|
||||||
|
|
||||||
|
if (ret == NF_ACCEPT)
|
||||||
|
ct = (struct nf_conn *)skb_nfct(skb);
|
||||||
|
}
|
||||||
|
|
||||||
if (likely(ret == NF_ACCEPT))
|
if (likely(ret == NF_ACCEPT))
|
||||||
nf_ct_deliver_cached_events(ct);
|
nf_ct_deliver_cached_events(ct);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,8 +4,8 @@
|
|||||||
|
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
|
||||||
u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
|
u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
|
||||||
u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
|
u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
|
||||||
__be16 dport);
|
__be16 dport);
|
||||||
u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
|
u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
|
||||||
__be16 sport, __be16 dport);
|
__be16 sport, __be16 dport);
|
||||||
|
|||||||
@@ -1465,6 +1465,7 @@ int assoc_array_gc(struct assoc_array *array,
|
|||||||
struct assoc_array_ptr *cursor, *ptr;
|
struct assoc_array_ptr *cursor, *ptr;
|
||||||
struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp;
|
struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp;
|
||||||
unsigned long nr_leaves_on_tree;
|
unsigned long nr_leaves_on_tree;
|
||||||
|
bool retained;
|
||||||
int keylen, slot, nr_free, next_slot, i;
|
int keylen, slot, nr_free, next_slot, i;
|
||||||
|
|
||||||
pr_devel("-->%s()\n", __func__);
|
pr_devel("-->%s()\n", __func__);
|
||||||
@@ -1541,6 +1542,7 @@ int assoc_array_gc(struct assoc_array *array,
|
|||||||
goto descend;
|
goto descend;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
retry_compress:
|
||||||
pr_devel("-- compress node %p --\n", new_n);
|
pr_devel("-- compress node %p --\n", new_n);
|
||||||
|
|
||||||
/* Count up the number of empty slots in this node and work out the
|
/* Count up the number of empty slots in this node and work out the
|
||||||
@@ -1558,6 +1560,7 @@ int assoc_array_gc(struct assoc_array *array,
|
|||||||
pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch);
|
pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch);
|
||||||
|
|
||||||
/* See what we can fold in */
|
/* See what we can fold in */
|
||||||
|
retained = false;
|
||||||
next_slot = 0;
|
next_slot = 0;
|
||||||
for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
|
for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
|
||||||
struct assoc_array_shortcut *s;
|
struct assoc_array_shortcut *s;
|
||||||
@@ -1607,9 +1610,14 @@ int assoc_array_gc(struct assoc_array *array,
|
|||||||
pr_devel("[%d] retain node %lu/%d [nx %d]\n",
|
pr_devel("[%d] retain node %lu/%d [nx %d]\n",
|
||||||
slot, child->nr_leaves_on_branch, nr_free + 1,
|
slot, child->nr_leaves_on_branch, nr_free + 1,
|
||||||
next_slot);
|
next_slot);
|
||||||
|
retained = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (retained && new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) {
|
||||||
|
pr_devel("internal nodes remain despite enough space, retrying\n");
|
||||||
|
goto retry_compress;
|
||||||
|
}
|
||||||
pr_devel("after: %lu\n", new_n->nr_leaves_on_branch);
|
pr_devel("after: %lu\n", new_n->nr_leaves_on_branch);
|
||||||
|
|
||||||
nr_leaves_on_tree = new_n->nr_leaves_on_branch;
|
nr_leaves_on_tree = new_n->nr_leaves_on_branch;
|
||||||
|
|||||||
@@ -1812,11 +1812,40 @@ static enum fullness_group putback_zspage(struct size_class *class,
|
|||||||
*/
|
*/
|
||||||
static void lock_zspage(struct zspage *zspage)
|
static void lock_zspage(struct zspage *zspage)
|
||||||
{
|
{
|
||||||
struct page *page = get_first_page(zspage);
|
struct page *curr_page, *page;
|
||||||
|
|
||||||
do {
|
/*
|
||||||
lock_page(page);
|
* Pages we haven't locked yet can be migrated off the list while we're
|
||||||
} while ((page = get_next_page(page)) != NULL);
|
* trying to lock them, so we need to be careful and only attempt to
|
||||||
|
* lock each page under migrate_read_lock(). Otherwise, the page we lock
|
||||||
|
* may no longer belong to the zspage. This means that we may wait for
|
||||||
|
* the wrong page to unlock, so we must take a reference to the page
|
||||||
|
* prior to waiting for it to unlock outside migrate_read_lock().
|
||||||
|
*/
|
||||||
|
while (1) {
|
||||||
|
migrate_read_lock(zspage);
|
||||||
|
page = get_first_page(zspage);
|
||||||
|
if (trylock_page(page))
|
||||||
|
break;
|
||||||
|
get_page(page);
|
||||||
|
migrate_read_unlock(zspage);
|
||||||
|
wait_on_page_locked(page);
|
||||||
|
put_page(page);
|
||||||
|
}
|
||||||
|
|
||||||
|
curr_page = page;
|
||||||
|
while ((page = get_next_page(curr_page))) {
|
||||||
|
if (trylock_page(page)) {
|
||||||
|
curr_page = page;
|
||||||
|
} else {
|
||||||
|
get_page(page);
|
||||||
|
migrate_read_unlock(zspage);
|
||||||
|
wait_on_page_locked(page);
|
||||||
|
put_page(page);
|
||||||
|
migrate_read_lock(zspage);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
migrate_read_unlock(zspage);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dentry *zs_mount(struct file_system_type *fs_type,
|
static struct dentry *zs_mount(struct file_system_type *fs_type,
|
||||||
|
|||||||
@@ -1666,7 +1666,7 @@ BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
|
|||||||
|
|
||||||
if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
|
if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
if (unlikely(offset > 0xffff))
|
if (unlikely(offset > INT_MAX))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
if (unlikely(bpf_try_make_writable(skb, offset + len)))
|
if (unlikely(bpf_try_make_writable(skb, offset + len)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
@@ -1701,7 +1701,7 @@ BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
|
|||||||
{
|
{
|
||||||
void *ptr;
|
void *ptr;
|
||||||
|
|
||||||
if (unlikely(offset > 0xffff))
|
if (unlikely(offset > INT_MAX))
|
||||||
goto err_clear;
|
goto err_clear;
|
||||||
|
|
||||||
ptr = skb_header_pointer(skb, offset, len, to);
|
ptr = skb_header_pointer(skb, offset, len, to);
|
||||||
|
|||||||
@@ -96,7 +96,7 @@ u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(secure_tcpv6_seq);
|
EXPORT_SYMBOL(secure_tcpv6_seq);
|
||||||
|
|
||||||
u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
|
u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
|
||||||
__be16 dport)
|
__be16 dport)
|
||||||
{
|
{
|
||||||
const struct {
|
const struct {
|
||||||
@@ -146,7 +146,7 @@ u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(secure_tcp_seq);
|
EXPORT_SYMBOL_GPL(secure_tcp_seq);
|
||||||
|
|
||||||
u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
|
u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
|
||||||
{
|
{
|
||||||
net_secret_init();
|
net_secret_init();
|
||||||
return siphash_4u32((__force u32)saddr, (__force u32)daddr,
|
return siphash_4u32((__force u32)saddr, (__force u32)daddr,
|
||||||
|
|||||||
@@ -507,7 +507,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
|
|||||||
return -EADDRNOTAVAIL;
|
return -EADDRNOTAVAIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 inet_sk_port_offset(const struct sock *sk)
|
static u64 inet_sk_port_offset(const struct sock *sk)
|
||||||
{
|
{
|
||||||
const struct inet_sock *inet = inet_sk(sk);
|
const struct inet_sock *inet = inet_sk(sk);
|
||||||
|
|
||||||
@@ -714,8 +714,19 @@ void inet_unhash(struct sock *sk)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(inet_unhash);
|
EXPORT_SYMBOL_GPL(inet_unhash);
|
||||||
|
|
||||||
|
/* RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm
|
||||||
|
* Note that we use 32bit integers (vs RFC 'short integers')
|
||||||
|
* because 2^16 is not a multiple of num_ephemeral and this
|
||||||
|
* property might be used by clever attacker.
|
||||||
|
* RFC claims using TABLE_LENGTH=10 buckets gives an improvement,
|
||||||
|
* we use 256 instead to really give more isolation and
|
||||||
|
* privacy, this only consumes 1 KB of kernel memory.
|
||||||
|
*/
|
||||||
|
#define INET_TABLE_PERTURB_SHIFT 8
|
||||||
|
static u32 table_perturb[1 << INET_TABLE_PERTURB_SHIFT];
|
||||||
|
|
||||||
int __inet_hash_connect(struct inet_timewait_death_row *death_row,
|
int __inet_hash_connect(struct inet_timewait_death_row *death_row,
|
||||||
struct sock *sk, u32 port_offset,
|
struct sock *sk, u64 port_offset,
|
||||||
int (*check_established)(struct inet_timewait_death_row *,
|
int (*check_established)(struct inet_timewait_death_row *,
|
||||||
struct sock *, __u16, struct inet_timewait_sock **))
|
struct sock *, __u16, struct inet_timewait_sock **))
|
||||||
{
|
{
|
||||||
@@ -727,7 +738,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
|
|||||||
struct inet_bind_bucket *tb;
|
struct inet_bind_bucket *tb;
|
||||||
u32 remaining, offset;
|
u32 remaining, offset;
|
||||||
int ret, i, low, high;
|
int ret, i, low, high;
|
||||||
static u32 hint;
|
u32 index;
|
||||||
|
|
||||||
if (port) {
|
if (port) {
|
||||||
head = &hinfo->bhash[inet_bhashfn(net, port,
|
head = &hinfo->bhash[inet_bhashfn(net, port,
|
||||||
@@ -752,7 +763,12 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
|
|||||||
if (likely(remaining > 1))
|
if (likely(remaining > 1))
|
||||||
remaining &= ~1U;
|
remaining &= ~1U;
|
||||||
|
|
||||||
offset = (hint + port_offset) % remaining;
|
net_get_random_once(table_perturb, sizeof(table_perturb));
|
||||||
|
index = hash_32(port_offset, INET_TABLE_PERTURB_SHIFT);
|
||||||
|
|
||||||
|
offset = READ_ONCE(table_perturb[index]) + port_offset;
|
||||||
|
offset %= remaining;
|
||||||
|
|
||||||
/* In first pass we try ports of @low parity.
|
/* In first pass we try ports of @low parity.
|
||||||
* inet_csk_get_port() does the opposite choice.
|
* inet_csk_get_port() does the opposite choice.
|
||||||
*/
|
*/
|
||||||
@@ -805,7 +821,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
|
|||||||
return -EADDRNOTAVAIL;
|
return -EADDRNOTAVAIL;
|
||||||
|
|
||||||
ok:
|
ok:
|
||||||
hint += i + 2;
|
WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2);
|
||||||
|
|
||||||
/* Head lock still held and bh's disabled */
|
/* Head lock still held and bh's disabled */
|
||||||
inet_bind_hash(sk, tb, port);
|
inet_bind_hash(sk, tb, port);
|
||||||
@@ -828,7 +844,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
|
|||||||
int inet_hash_connect(struct inet_timewait_death_row *death_row,
|
int inet_hash_connect(struct inet_timewait_death_row *death_row,
|
||||||
struct sock *sk)
|
struct sock *sk)
|
||||||
{
|
{
|
||||||
u32 port_offset = 0;
|
u64 port_offset = 0;
|
||||||
|
|
||||||
if (!inet_sk(sk)->inet_num)
|
if (!inet_sk(sk)->inet_num)
|
||||||
port_offset = inet_sk_port_offset(sk);
|
port_offset = inet_sk_port_offset(sk);
|
||||||
|
|||||||
@@ -311,7 +311,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
|
|||||||
return -EADDRNOTAVAIL;
|
return -EADDRNOTAVAIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 inet6_sk_port_offset(const struct sock *sk)
|
static u64 inet6_sk_port_offset(const struct sock *sk)
|
||||||
{
|
{
|
||||||
const struct inet_sock *inet = inet_sk(sk);
|
const struct inet_sock *inet = inet_sk(sk);
|
||||||
|
|
||||||
@@ -323,7 +323,7 @@ static u32 inet6_sk_port_offset(const struct sock *sk)
|
|||||||
int inet6_hash_connect(struct inet_timewait_death_row *death_row,
|
int inet6_hash_connect(struct inet_timewait_death_row *death_row,
|
||||||
struct sock *sk)
|
struct sock *sk)
|
||||||
{
|
{
|
||||||
u32 port_offset = 0;
|
u64 port_offset = 0;
|
||||||
|
|
||||||
if (!inet_sk(sk)->inet_num)
|
if (!inet_sk(sk)->inet_num)
|
||||||
port_offset = inet6_sk_port_offset(sk);
|
port_offset = inet6_sk_port_offset(sk);
|
||||||
|
|||||||
@@ -2910,7 +2910,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t)
|
|||||||
break;
|
break;
|
||||||
if (!aalg->pfkey_supported)
|
if (!aalg->pfkey_supported)
|
||||||
continue;
|
continue;
|
||||||
if (aalg_tmpl_set(t, aalg))
|
if (aalg_tmpl_set(t, aalg) && aalg->available)
|
||||||
sz += sizeof(struct sadb_comb);
|
sz += sizeof(struct sadb_comb);
|
||||||
}
|
}
|
||||||
return sz + sizeof(struct sadb_prop);
|
return sz + sizeof(struct sadb_prop);
|
||||||
@@ -2928,7 +2928,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
|
|||||||
if (!ealg->pfkey_supported)
|
if (!ealg->pfkey_supported)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (!(ealg_tmpl_set(t, ealg)))
|
if (!(ealg_tmpl_set(t, ealg) && ealg->available))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
for (k = 1; ; k++) {
|
for (k = 1; ; k++) {
|
||||||
@@ -2939,7 +2939,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
|
|||||||
if (!aalg->pfkey_supported)
|
if (!aalg->pfkey_supported)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (aalg_tmpl_set(t, aalg))
|
if (aalg_tmpl_set(t, aalg) && aalg->available)
|
||||||
sz += sizeof(struct sadb_comb);
|
sz += sizeof(struct sadb_comb);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,6 +4,7 @@
|
|||||||
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
|
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
|
||||||
* Copyright 2013-2014 Intel Mobile Communications GmbH
|
* Copyright 2013-2014 Intel Mobile Communications GmbH
|
||||||
* Copyright 2015-2017 Intel Deutschland GmbH
|
* Copyright 2015-2017 Intel Deutschland GmbH
|
||||||
|
* Copyright (C) 2018-2021 Intel Corporation
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
@@ -835,9 +836,6 @@ int wiphy_register(struct wiphy *wiphy)
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* set up regulatory info */
|
|
||||||
wiphy_regulatory_register(wiphy);
|
|
||||||
|
|
||||||
list_add_rcu(&rdev->list, &cfg80211_rdev_list);
|
list_add_rcu(&rdev->list, &cfg80211_rdev_list);
|
||||||
cfg80211_rdev_list_generation++;
|
cfg80211_rdev_list_generation++;
|
||||||
|
|
||||||
@@ -851,6 +849,9 @@ int wiphy_register(struct wiphy *wiphy)
|
|||||||
cfg80211_debugfs_rdev_add(rdev);
|
cfg80211_debugfs_rdev_add(rdev);
|
||||||
nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY);
|
nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY);
|
||||||
|
|
||||||
|
/* set up regulatory info */
|
||||||
|
wiphy_regulatory_register(wiphy);
|
||||||
|
|
||||||
if (wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) {
|
if (wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) {
|
||||||
struct regulatory_request request;
|
struct regulatory_request request;
|
||||||
|
|
||||||
|
|||||||
@@ -3756,6 +3756,7 @@ void wiphy_regulatory_register(struct wiphy *wiphy)
|
|||||||
|
|
||||||
wiphy_update_regulatory(wiphy, lr->initiator);
|
wiphy_update_regulatory(wiphy, lr->initiator);
|
||||||
wiphy_all_share_dfs_chan_state(wiphy);
|
wiphy_all_share_dfs_chan_state(wiphy);
|
||||||
|
reg_process_self_managed_hints();
|
||||||
}
|
}
|
||||||
|
|
||||||
void wiphy_regulatory_deregister(struct wiphy *wiphy)
|
void wiphy_regulatory_deregister(struct wiphy *wiphy)
|
||||||
|
|||||||
@@ -263,7 +263,7 @@ define do_generate_dynamic_list_file
|
|||||||
xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\
|
xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\
|
||||||
if [ "$$symbol_type" = "U W" ];then \
|
if [ "$$symbol_type" = "U W" ];then \
|
||||||
(echo '{'; \
|
(echo '{'; \
|
||||||
$(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\
|
$(NM) -u -D $1 | awk 'NF>1 {sub("@.*", "", $$2); print "\t"$$2";"}' | sort -u;\
|
||||||
echo '};'; \
|
echo '};'; \
|
||||||
) > $2; \
|
) > $2; \
|
||||||
else \
|
else \
|
||||||
|
|||||||
@@ -2,6 +2,10 @@
|
|||||||
#ifndef BENCH_H
|
#ifndef BENCH_H
|
||||||
#define BENCH_H
|
#define BENCH_H
|
||||||
|
|
||||||
|
#include <sys/time.h>
|
||||||
|
|
||||||
|
extern struct timeval bench__start, bench__end, bench__runtime;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The madvise transparent hugepage constants were added in glibc
|
* The madvise transparent hugepage constants were added in glibc
|
||||||
* 2.13. For compatibility with older versions of glibc, define these
|
* 2.13. For compatibility with older versions of glibc, define these
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ static unsigned int nfutexes = 1024;
|
|||||||
static bool fshared = false, done = false, silent = false;
|
static bool fshared = false, done = false, silent = false;
|
||||||
static int futex_flag = 0;
|
static int futex_flag = 0;
|
||||||
|
|
||||||
struct timeval start, end, runtime;
|
struct timeval bench__start, bench__end, bench__runtime;
|
||||||
static pthread_mutex_t thread_lock;
|
static pthread_mutex_t thread_lock;
|
||||||
static unsigned int threads_starting;
|
static unsigned int threads_starting;
|
||||||
static struct stats throughput_stats;
|
static struct stats throughput_stats;
|
||||||
@@ -101,8 +101,8 @@ static void toggle_done(int sig __maybe_unused,
|
|||||||
{
|
{
|
||||||
/* inform all threads that we're done for the day */
|
/* inform all threads that we're done for the day */
|
||||||
done = true;
|
done = true;
|
||||||
gettimeofday(&end, NULL);
|
gettimeofday(&bench__end, NULL);
|
||||||
timersub(&end, &start, &runtime);
|
timersub(&bench__end, &bench__start, &bench__runtime);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void print_summary(void)
|
static void print_summary(void)
|
||||||
@@ -112,7 +112,7 @@ static void print_summary(void)
|
|||||||
|
|
||||||
printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
|
printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
|
||||||
!silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
|
!silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
|
||||||
(int) runtime.tv_sec);
|
(int)bench__runtime.tv_sec);
|
||||||
}
|
}
|
||||||
|
|
||||||
int bench_futex_hash(int argc, const char **argv)
|
int bench_futex_hash(int argc, const char **argv)
|
||||||
@@ -159,7 +159,7 @@ int bench_futex_hash(int argc, const char **argv)
|
|||||||
|
|
||||||
threads_starting = nthreads;
|
threads_starting = nthreads;
|
||||||
pthread_attr_init(&thread_attr);
|
pthread_attr_init(&thread_attr);
|
||||||
gettimeofday(&start, NULL);
|
gettimeofday(&bench__start, NULL);
|
||||||
for (i = 0; i < nthreads; i++) {
|
for (i = 0; i < nthreads; i++) {
|
||||||
worker[i].tid = i;
|
worker[i].tid = i;
|
||||||
worker[i].futex = calloc(nfutexes, sizeof(*worker[i].futex));
|
worker[i].futex = calloc(nfutexes, sizeof(*worker[i].futex));
|
||||||
@@ -202,7 +202,7 @@ int bench_futex_hash(int argc, const char **argv)
|
|||||||
pthread_mutex_destroy(&thread_lock);
|
pthread_mutex_destroy(&thread_lock);
|
||||||
|
|
||||||
for (i = 0; i < nthreads; i++) {
|
for (i = 0; i < nthreads; i++) {
|
||||||
unsigned long t = worker[i].ops/runtime.tv_sec;
|
unsigned long t = worker[i].ops / bench__runtime.tv_sec;
|
||||||
update_stats(&throughput_stats, t);
|
update_stats(&throughput_stats, t);
|
||||||
if (!silent) {
|
if (!silent) {
|
||||||
if (nfutexes == 1)
|
if (nfutexes == 1)
|
||||||
|
|||||||
@@ -35,7 +35,6 @@ static bool silent = false, multi = false;
|
|||||||
static bool done = false, fshared = false;
|
static bool done = false, fshared = false;
|
||||||
static unsigned int nthreads = 0;
|
static unsigned int nthreads = 0;
|
||||||
static int futex_flag = 0;
|
static int futex_flag = 0;
|
||||||
struct timeval start, end, runtime;
|
|
||||||
static pthread_mutex_t thread_lock;
|
static pthread_mutex_t thread_lock;
|
||||||
static unsigned int threads_starting;
|
static unsigned int threads_starting;
|
||||||
static struct stats throughput_stats;
|
static struct stats throughput_stats;
|
||||||
@@ -62,7 +61,7 @@ static void print_summary(void)
|
|||||||
|
|
||||||
printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
|
printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
|
||||||
!silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
|
!silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
|
||||||
(int) runtime.tv_sec);
|
(int)bench__runtime.tv_sec);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void toggle_done(int sig __maybe_unused,
|
static void toggle_done(int sig __maybe_unused,
|
||||||
@@ -71,8 +70,8 @@ static void toggle_done(int sig __maybe_unused,
|
|||||||
{
|
{
|
||||||
/* inform all threads that we're done for the day */
|
/* inform all threads that we're done for the day */
|
||||||
done = true;
|
done = true;
|
||||||
gettimeofday(&end, NULL);
|
gettimeofday(&bench__end, NULL);
|
||||||
timersub(&end, &start, &runtime);
|
timersub(&bench__end, &bench__start, &bench__runtime);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *workerfn(void *arg)
|
static void *workerfn(void *arg)
|
||||||
@@ -183,7 +182,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
|
|||||||
|
|
||||||
threads_starting = nthreads;
|
threads_starting = nthreads;
|
||||||
pthread_attr_init(&thread_attr);
|
pthread_attr_init(&thread_attr);
|
||||||
gettimeofday(&start, NULL);
|
gettimeofday(&bench__start, NULL);
|
||||||
|
|
||||||
create_threads(worker, thread_attr, cpu);
|
create_threads(worker, thread_attr, cpu);
|
||||||
pthread_attr_destroy(&thread_attr);
|
pthread_attr_destroy(&thread_attr);
|
||||||
@@ -209,7 +208,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
|
|||||||
pthread_mutex_destroy(&thread_lock);
|
pthread_mutex_destroy(&thread_lock);
|
||||||
|
|
||||||
for (i = 0; i < nthreads; i++) {
|
for (i = 0; i < nthreads; i++) {
|
||||||
unsigned long t = worker[i].ops/runtime.tv_sec;
|
unsigned long t = worker[i].ops / bench__runtime.tv_sec;
|
||||||
|
|
||||||
update_stats(&throughput_stats, t);
|
update_stats(&throughput_stats, t);
|
||||||
if (!silent)
|
if (!silent)
|
||||||
|
|||||||
@@ -22,7 +22,7 @@
|
|||||||
#include "perf.h"
|
#include "perf.h"
|
||||||
#include "cloexec.h"
|
#include "cloexec.h"
|
||||||
|
|
||||||
volatile long the_var;
|
static volatile long the_var;
|
||||||
|
|
||||||
static noinline int test_function(void)
|
static noinline int test_function(void)
|
||||||
{
|
{
|
||||||
|
|||||||
Reference in New Issue
Block a user