Merge 4.19.310 into android-4.19-stable
Changes in 4.19.310 net: usb: lan78xx: Remove lots of set but unused 'ret' variables lan78xx: Fix white space and style issues lan78xx: Add missing return code checks lan78xx: Fix partial packet errors on suspend/resume lan78xx: Fix race conditions in suspend/resume handling net: lan78xx: fix runtime PM count underflow on link stop net: move definition of pcpu_lstats to header file geneve: make sure to pull inner header in geneve_rx() net/ipv6: avoid possible UAF in ip6_route_mpath_notify() net/rds: fix WARNING in rds_conn_connect_if_down netfilter: nf_conntrack_h323: Add protection for bmp length out of range netrom: Fix a data-race around sysctl_netrom_default_path_quality netrom: Fix a data-race around sysctl_netrom_obsolescence_count_initialiser netrom: Fix data-races around sysctl_netrom_network_ttl_initialiser netrom: Fix a data-race around sysctl_netrom_transport_timeout netrom: Fix a data-race around sysctl_netrom_transport_maximum_tries netrom: Fix a data-race around sysctl_netrom_transport_acknowledge_delay netrom: Fix a data-race around sysctl_netrom_transport_busy_delay netrom: Fix a data-race around sysctl_netrom_transport_requested_window_size netrom: Fix a data-race around sysctl_netrom_transport_no_activity_timeout netrom: Fix a data-race around sysctl_netrom_routing_control netrom: Fix a data-race around sysctl_netrom_link_fails_count netrom: Fix data-races around sysctl_net_busy_read btrfs: ref-verify: free ref cache before clearing mount opt tools/selftest/vm: allow choosing mem size and page size in map_hugetlb selftests: mm: fix map_hugetlb failure on 64K page size systems um: allow not setting extra rpaths in the linux binary Input: i8042 - fix strange behavior of touchpad on Clevo NS70PU hv_netvsc: Make netvsc/VF binding check both MAC and serial number hv_netvsc: use netif_is_bond_master() instead of open code hv_netvsc: Register VF in netvsc_probe if NET_DEVICE_REGISTER missed y2038: rusage: use __kernel_old_timeval getrusage: add the "signal_struct *sig" local variable getrusage: move thread_group_cputime_adjusted() outside of lock_task_sighand() getrusage: use __for_each_thread() getrusage: use sig->stats_lock rather than lock_task_sighand() selftests/vm: fix display of page size in map_hugetlb selftests/vm: fix map_hugetlb length used for testing read and write Linux 4.19.310 Change-Id: Ic5b82f32496ce273cea3aa7db028cb6aa911da6f Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 19
|
PATCHLEVEL = 19
|
||||||
SUBLEVEL = 309
|
SUBLEVEL = 310
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = "People's Front"
|
NAME = "People's Front"
|
||||||
|
|
||||||
|
|||||||
@@ -964,7 +964,7 @@ put_tv32(struct timeval32 __user *o, struct timespec64 *i)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline long
|
static inline long
|
||||||
put_tv_to_tv32(struct timeval32 __user *o, struct timeval *i)
|
put_tv_to_tv32(struct timeval32 __user *o, struct __kernel_old_timeval *i)
|
||||||
{
|
{
|
||||||
return copy_to_user(o, &(struct timeval32){
|
return copy_to_user(o, &(struct timeval32){
|
||||||
.tv_sec = i->tv_sec,
|
.tv_sec = i->tv_sec,
|
||||||
|
|||||||
@@ -88,6 +88,19 @@ config LD_SCRIPT_DYN
|
|||||||
depends on !LD_SCRIPT_STATIC
|
depends on !LD_SCRIPT_STATIC
|
||||||
select MODULE_REL_CRCS if MODVERSIONS
|
select MODULE_REL_CRCS if MODVERSIONS
|
||||||
|
|
||||||
|
config LD_SCRIPT_DYN_RPATH
|
||||||
|
bool "set rpath in the binary" if EXPERT
|
||||||
|
default y
|
||||||
|
depends on LD_SCRIPT_DYN
|
||||||
|
help
|
||||||
|
Add /lib (and /lib64 for 64-bit) to the linux binary's rpath
|
||||||
|
explicitly.
|
||||||
|
|
||||||
|
You may need to turn this off if compiling for nix systems
|
||||||
|
that have their libraries in random /nix directories and
|
||||||
|
might otherwise unexpected use libraries from /lib or /lib64
|
||||||
|
instead of the desired ones.
|
||||||
|
|
||||||
config HOSTFS
|
config HOSTFS
|
||||||
tristate "Host filesystem"
|
tristate "Host filesystem"
|
||||||
help
|
help
|
||||||
|
|||||||
@@ -119,7 +119,8 @@ archheaders:
|
|||||||
archprepare: include/generated/user_constants.h
|
archprepare: include/generated/user_constants.h
|
||||||
|
|
||||||
LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static
|
LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static
|
||||||
LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib $(call cc-option, -no-pie)
|
LINK-$(CONFIG_LD_SCRIPT_DYN) += $(call cc-option, -no-pie)
|
||||||
|
LINK-$(CONFIG_LD_SCRIPT_DYN_RPATH) += -Wl,-rpath,/lib
|
||||||
|
|
||||||
CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \
|
CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \
|
||||||
$(call cc-option, -fno-stack-protector,) \
|
$(call cc-option, -fno-stack-protector,) \
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ ELF_FORMAT := elf64-x86-64
|
|||||||
|
|
||||||
# Not on all 64-bit distros /lib is a symlink to /lib64. PLD is an example.
|
# Not on all 64-bit distros /lib is a symlink to /lib64. PLD is an example.
|
||||||
|
|
||||||
LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib64
|
LINK-$(CONFIG_LD_SCRIPT_DYN_RPATH) += -Wl,-rpath,/lib64
|
||||||
LINK-y += -m64
|
LINK-y += -m64
|
||||||
|
|
||||||
# Do unit-at-a-time unconditionally on x86_64, following the host
|
# Do unit-at-a-time unconditionally on x86_64, following the host
|
||||||
|
|||||||
@@ -1183,6 +1183,12 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
|
|||||||
SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
|
SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
|
||||||
SERIO_QUIRK_NOPNP)
|
SERIO_QUIRK_NOPNP)
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_BOARD_NAME, "NS5x_7xPU"),
|
||||||
|
},
|
||||||
|
.driver_data = (void *)(SERIO_QUIRK_NOAUX)
|
||||||
|
},
|
||||||
{
|
{
|
||||||
.matches = {
|
.matches = {
|
||||||
DMI_MATCH(DMI_BOARD_NAME, "NJ50_70CU"),
|
DMI_MATCH(DMI_BOARD_NAME, "NJ50_70CU"),
|
||||||
|
|||||||
@@ -217,7 +217,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
|
|||||||
struct metadata_dst *tun_dst = NULL;
|
struct metadata_dst *tun_dst = NULL;
|
||||||
struct pcpu_sw_netstats *stats;
|
struct pcpu_sw_netstats *stats;
|
||||||
unsigned int len;
|
unsigned int len;
|
||||||
int err = 0;
|
int nh, err = 0;
|
||||||
void *oiph;
|
void *oiph;
|
||||||
|
|
||||||
if (ip_tunnel_collect_metadata() || gs->collect_md) {
|
if (ip_tunnel_collect_metadata() || gs->collect_md) {
|
||||||
@@ -261,9 +261,23 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
|
|||||||
goto drop;
|
goto drop;
|
||||||
}
|
}
|
||||||
|
|
||||||
oiph = skb_network_header(skb);
|
/* Save offset of outer header relative to skb->head,
|
||||||
|
* because we are going to reset the network header to the inner header
|
||||||
|
* and might change skb->head.
|
||||||
|
*/
|
||||||
|
nh = skb_network_header(skb) - skb->head;
|
||||||
|
|
||||||
skb_reset_network_header(skb);
|
skb_reset_network_header(skb);
|
||||||
|
|
||||||
|
if (!pskb_inet_may_pull(skb)) {
|
||||||
|
DEV_STATS_INC(geneve->dev, rx_length_errors);
|
||||||
|
DEV_STATS_INC(geneve->dev, rx_errors);
|
||||||
|
goto drop;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Get the outer header. */
|
||||||
|
oiph = skb->head + nh;
|
||||||
|
|
||||||
if (geneve_get_sk_family(gs) == AF_INET)
|
if (geneve_get_sk_family(gs) == AF_INET)
|
||||||
err = IP_ECN_decapsulate(oiph, skb);
|
err = IP_ECN_decapsulate(oiph, skb);
|
||||||
#if IS_ENABLED(CONFIG_IPV6)
|
#if IS_ENABLED(CONFIG_IPV6)
|
||||||
|
|||||||
@@ -54,6 +54,10 @@
|
|||||||
#define LINKCHANGE_INT (2 * HZ)
|
#define LINKCHANGE_INT (2 * HZ)
|
||||||
#define VF_TAKEOVER_INT (HZ / 10)
|
#define VF_TAKEOVER_INT (HZ / 10)
|
||||||
|
|
||||||
|
/* Macros to define the context of vf registration */
|
||||||
|
#define VF_REG_IN_PROBE 1
|
||||||
|
#define VF_REG_IN_NOTIFIER 2
|
||||||
|
|
||||||
static unsigned int ring_size __ro_after_init = 128;
|
static unsigned int ring_size __ro_after_init = 128;
|
||||||
module_param(ring_size, uint, 0444);
|
module_param(ring_size, uint, 0444);
|
||||||
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
|
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
|
||||||
@@ -2025,7 +2029,7 @@ static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int netvsc_vf_join(struct net_device *vf_netdev,
|
static int netvsc_vf_join(struct net_device *vf_netdev,
|
||||||
struct net_device *ndev)
|
struct net_device *ndev, int context)
|
||||||
{
|
{
|
||||||
struct net_device_context *ndev_ctx = netdev_priv(ndev);
|
struct net_device_context *ndev_ctx = netdev_priv(ndev);
|
||||||
int ret;
|
int ret;
|
||||||
@@ -2048,7 +2052,11 @@ static int netvsc_vf_join(struct net_device *vf_netdev,
|
|||||||
goto upper_link_failed;
|
goto upper_link_failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
|
/* If this registration is called from probe context vf_takeover
|
||||||
|
* is taken care of later in probe itself.
|
||||||
|
*/
|
||||||
|
if (context == VF_REG_IN_NOTIFIER)
|
||||||
|
schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
|
||||||
|
|
||||||
call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
|
call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
|
||||||
|
|
||||||
@@ -2141,8 +2149,17 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
|
|||||||
if (!ndev_ctx->vf_alloc)
|
if (!ndev_ctx->vf_alloc)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (ndev_ctx->vf_serial == serial)
|
if (ndev_ctx->vf_serial != serial)
|
||||||
return hv_get_drvdata(ndev_ctx->device_ctx);
|
continue;
|
||||||
|
|
||||||
|
ndev = hv_get_drvdata(ndev_ctx->device_ctx);
|
||||||
|
if (ndev->addr_len != vf_netdev->addr_len ||
|
||||||
|
memcmp(ndev->perm_addr, vf_netdev->perm_addr,
|
||||||
|
ndev->addr_len) != 0)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
return ndev;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Fallback path to check synthetic vf with help of mac addr.
|
/* Fallback path to check synthetic vf with help of mac addr.
|
||||||
@@ -2177,7 +2194,7 @@ static int netvsc_prepare_bonding(struct net_device *vf_netdev)
|
|||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int netvsc_register_vf(struct net_device *vf_netdev)
|
static int netvsc_register_vf(struct net_device *vf_netdev, int context)
|
||||||
{
|
{
|
||||||
struct net_device_context *net_device_ctx;
|
struct net_device_context *net_device_ctx;
|
||||||
struct netvsc_device *netvsc_dev;
|
struct netvsc_device *netvsc_dev;
|
||||||
@@ -2216,7 +2233,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
|
|||||||
|
|
||||||
netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
|
netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
|
||||||
|
|
||||||
if (netvsc_vf_join(vf_netdev, ndev) != 0)
|
if (netvsc_vf_join(vf_netdev, ndev, context) != 0)
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
dev_hold(vf_netdev);
|
dev_hold(vf_netdev);
|
||||||
@@ -2276,10 +2293,31 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
|
|||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int check_dev_is_matching_vf(struct net_device *event_ndev)
|
||||||
|
{
|
||||||
|
/* Skip NetVSC interfaces */
|
||||||
|
if (event_ndev->netdev_ops == &device_ops)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
/* Avoid non-Ethernet type devices */
|
||||||
|
if (event_ndev->type != ARPHRD_ETHER)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
/* Avoid Vlan dev with same MAC registering as VF */
|
||||||
|
if (is_vlan_dev(event_ndev))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
/* Avoid Bonding master dev with same MAC registering as VF */
|
||||||
|
if (netif_is_bond_master(event_ndev))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int netvsc_probe(struct hv_device *dev,
|
static int netvsc_probe(struct hv_device *dev,
|
||||||
const struct hv_vmbus_device_id *dev_id)
|
const struct hv_vmbus_device_id *dev_id)
|
||||||
{
|
{
|
||||||
struct net_device *net = NULL;
|
struct net_device *net = NULL, *vf_netdev;
|
||||||
struct net_device_context *net_device_ctx;
|
struct net_device_context *net_device_ctx;
|
||||||
struct netvsc_device_info *device_info = NULL;
|
struct netvsc_device_info *device_info = NULL;
|
||||||
struct netvsc_device *nvdev;
|
struct netvsc_device *nvdev;
|
||||||
@@ -2382,6 +2420,30 @@ static int netvsc_probe(struct hv_device *dev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
list_add(&net_device_ctx->list, &netvsc_dev_list);
|
list_add(&net_device_ctx->list, &netvsc_dev_list);
|
||||||
|
|
||||||
|
/* When the hv_netvsc driver is unloaded and reloaded, the
|
||||||
|
* NET_DEVICE_REGISTER for the vf device is replayed before probe
|
||||||
|
* is complete. This is because register_netdevice_notifier() gets
|
||||||
|
* registered before vmbus_driver_register() so that callback func
|
||||||
|
* is set before probe and we don't miss events like NETDEV_POST_INIT
|
||||||
|
* So, in this section we try to register the matching vf device that
|
||||||
|
* is present as a netdevice, knowing that its register call is not
|
||||||
|
* processed in the netvsc_netdev_notifier(as probing is progress and
|
||||||
|
* get_netvsc_byslot fails).
|
||||||
|
*/
|
||||||
|
for_each_netdev(dev_net(net), vf_netdev) {
|
||||||
|
ret = check_dev_is_matching_vf(vf_netdev);
|
||||||
|
if (ret != 0)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (net != get_netvsc_byslot(vf_netdev))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
netvsc_prepare_bonding(vf_netdev);
|
||||||
|
netvsc_register_vf(vf_netdev, VF_REG_IN_PROBE);
|
||||||
|
__netvsc_vf_setup(net, vf_netdev);
|
||||||
|
break;
|
||||||
|
}
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
|
|
||||||
kfree(device_info);
|
kfree(device_info);
|
||||||
@@ -2474,29 +2536,17 @@ static int netvsc_netdev_event(struct notifier_block *this,
|
|||||||
unsigned long event, void *ptr)
|
unsigned long event, void *ptr)
|
||||||
{
|
{
|
||||||
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
|
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
/* Skip our own events */
|
ret = check_dev_is_matching_vf(event_dev);
|
||||||
if (event_dev->netdev_ops == &device_ops)
|
if (ret != 0)
|
||||||
return NOTIFY_DONE;
|
|
||||||
|
|
||||||
/* Avoid non-Ethernet type devices */
|
|
||||||
if (event_dev->type != ARPHRD_ETHER)
|
|
||||||
return NOTIFY_DONE;
|
|
||||||
|
|
||||||
/* Avoid Vlan dev with same MAC registering as VF */
|
|
||||||
if (is_vlan_dev(event_dev))
|
|
||||||
return NOTIFY_DONE;
|
|
||||||
|
|
||||||
/* Avoid Bonding master dev with same MAC registering as VF */
|
|
||||||
if ((event_dev->priv_flags & IFF_BONDING) &&
|
|
||||||
(event_dev->flags & IFF_MASTER))
|
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
switch (event) {
|
switch (event) {
|
||||||
case NETDEV_POST_INIT:
|
case NETDEV_POST_INIT:
|
||||||
return netvsc_prepare_bonding(event_dev);
|
return netvsc_prepare_bonding(event_dev);
|
||||||
case NETDEV_REGISTER:
|
case NETDEV_REGISTER:
|
||||||
return netvsc_register_vf(event_dev);
|
return netvsc_register_vf(event_dev, VF_REG_IN_NOTIFIER);
|
||||||
case NETDEV_UNREGISTER:
|
case NETDEV_UNREGISTER:
|
||||||
return netvsc_unregister_vf(event_dev);
|
return netvsc_unregister_vf(event_dev);
|
||||||
case NETDEV_UP:
|
case NETDEV_UP:
|
||||||
|
|||||||
@@ -59,12 +59,6 @@
|
|||||||
#include <net/net_namespace.h>
|
#include <net/net_namespace.h>
|
||||||
#include <linux/u64_stats_sync.h>
|
#include <linux/u64_stats_sync.h>
|
||||||
|
|
||||||
struct pcpu_lstats {
|
|
||||||
u64 packets;
|
|
||||||
u64 bytes;
|
|
||||||
struct u64_stats_sync syncp;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* The higher levels take care of making this non-reentrant (it's
|
/* The higher levels take care of making this non-reentrant (it's
|
||||||
* called with bh's disabled).
|
* called with bh's disabled).
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -6,12 +6,6 @@
|
|||||||
#include <linux/if_arp.h>
|
#include <linux/if_arp.h>
|
||||||
#include <net/rtnetlink.h>
|
#include <net/rtnetlink.h>
|
||||||
|
|
||||||
struct pcpu_lstats {
|
|
||||||
u64 packets;
|
|
||||||
u64 bytes;
|
|
||||||
struct u64_stats_sync syncp;
|
|
||||||
};
|
|
||||||
|
|
||||||
static netdev_tx_t nlmon_xmit(struct sk_buff *skb, struct net_device *dev)
|
static netdev_tx_t nlmon_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
{
|
{
|
||||||
int len = skb->len;
|
int len = skb->len;
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -11,12 +11,6 @@
|
|||||||
#define DEFAULT_MTU (VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + \
|
#define DEFAULT_MTU (VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + \
|
||||||
sizeof(struct af_vsockmon_hdr))
|
sizeof(struct af_vsockmon_hdr))
|
||||||
|
|
||||||
struct pcpu_lstats {
|
|
||||||
u64 rx_packets;
|
|
||||||
u64 rx_bytes;
|
|
||||||
struct u64_stats_sync syncp;
|
|
||||||
};
|
|
||||||
|
|
||||||
static int vsockmon_dev_init(struct net_device *dev)
|
static int vsockmon_dev_init(struct net_device *dev)
|
||||||
{
|
{
|
||||||
dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
|
dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
|
||||||
@@ -56,8 +50,8 @@ static netdev_tx_t vsockmon_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
|
struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
|
||||||
|
|
||||||
u64_stats_update_begin(&stats->syncp);
|
u64_stats_update_begin(&stats->syncp);
|
||||||
stats->rx_bytes += len;
|
stats->bytes += len;
|
||||||
stats->rx_packets++;
|
stats->packets++;
|
||||||
u64_stats_update_end(&stats->syncp);
|
u64_stats_update_end(&stats->syncp);
|
||||||
|
|
||||||
dev_kfree_skb(skb);
|
dev_kfree_skb(skb);
|
||||||
@@ -80,8 +74,8 @@ vsockmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
|||||||
|
|
||||||
do {
|
do {
|
||||||
start = u64_stats_fetch_begin_irq(&vstats->syncp);
|
start = u64_stats_fetch_begin_irq(&vstats->syncp);
|
||||||
tbytes = vstats->rx_bytes;
|
tbytes = vstats->bytes;
|
||||||
tpackets = vstats->rx_packets;
|
tpackets = vstats->packets;
|
||||||
} while (u64_stats_fetch_retry_irq(&vstats->syncp, start));
|
} while (u64_stats_fetch_retry_irq(&vstats->syncp, start));
|
||||||
|
|
||||||
packets += tpackets;
|
packets += tpackets;
|
||||||
|
|||||||
@@ -891,8 +891,10 @@ int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
|
|||||||
out_unlock:
|
out_unlock:
|
||||||
spin_unlock(&root->fs_info->ref_verify_lock);
|
spin_unlock(&root->fs_info->ref_verify_lock);
|
||||||
out:
|
out:
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
btrfs_free_ref_cache(fs_info);
|
||||||
btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
|
btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1021,8 +1023,8 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (ret) {
|
if (ret) {
|
||||||
btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
|
|
||||||
btrfs_free_ref_cache(fs_info);
|
btrfs_free_ref_cache(fs_info);
|
||||||
|
btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
|
||||||
}
|
}
|
||||||
btrfs_free_path(path);
|
btrfs_free_path(path);
|
||||||
return ret;
|
return ret;
|
||||||
|
|||||||
@@ -2427,6 +2427,12 @@ struct pcpu_sw_netstats {
|
|||||||
struct u64_stats_sync syncp;
|
struct u64_stats_sync syncp;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct pcpu_lstats {
|
||||||
|
u64 packets;
|
||||||
|
u64 bytes;
|
||||||
|
struct u64_stats_sync syncp;
|
||||||
|
};
|
||||||
|
|
||||||
#define __netdev_alloc_pcpu_stats(type, gfp) \
|
#define __netdev_alloc_pcpu_stats(type, gfp) \
|
||||||
({ \
|
({ \
|
||||||
typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
|
typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
|
||||||
|
|||||||
@@ -22,8 +22,8 @@
|
|||||||
#define RUSAGE_THREAD 1 /* only the calling thread */
|
#define RUSAGE_THREAD 1 /* only the calling thread */
|
||||||
|
|
||||||
struct rusage {
|
struct rusage {
|
||||||
struct timeval ru_utime; /* user time used */
|
struct __kernel_old_timeval ru_utime; /* user time used */
|
||||||
struct timeval ru_stime; /* system time used */
|
struct __kernel_old_timeval ru_stime; /* system time used */
|
||||||
__kernel_long_t ru_maxrss; /* maximum resident set size */
|
__kernel_long_t ru_maxrss; /* maximum resident set size */
|
||||||
__kernel_long_t ru_ixrss; /* integral shared memory size */
|
__kernel_long_t ru_ixrss; /* integral shared memory size */
|
||||||
__kernel_long_t ru_idrss; /* integral unshared data size */
|
__kernel_long_t ru_idrss; /* integral unshared data size */
|
||||||
|
|||||||
95
kernel/sys.c
95
kernel/sys.c
@@ -1725,73 +1725,86 @@ void getrusage(struct task_struct *p, int who, struct rusage *r)
|
|||||||
struct task_struct *t;
|
struct task_struct *t;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u64 tgutime, tgstime, utime, stime;
|
u64 tgutime, tgstime, utime, stime;
|
||||||
unsigned long maxrss = 0;
|
unsigned long maxrss;
|
||||||
|
struct mm_struct *mm;
|
||||||
|
struct signal_struct *sig = p->signal;
|
||||||
|
unsigned int seq = 0;
|
||||||
|
|
||||||
memset((char *)r, 0, sizeof (*r));
|
retry:
|
||||||
|
memset(r, 0, sizeof(*r));
|
||||||
utime = stime = 0;
|
utime = stime = 0;
|
||||||
|
maxrss = 0;
|
||||||
|
|
||||||
if (who == RUSAGE_THREAD) {
|
if (who == RUSAGE_THREAD) {
|
||||||
task_cputime_adjusted(current, &utime, &stime);
|
task_cputime_adjusted(current, &utime, &stime);
|
||||||
accumulate_thread_rusage(p, r);
|
accumulate_thread_rusage(p, r);
|
||||||
maxrss = p->signal->maxrss;
|
maxrss = sig->maxrss;
|
||||||
goto out;
|
goto out_thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!lock_task_sighand(p, &flags))
|
flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
|
||||||
return;
|
|
||||||
|
|
||||||
switch (who) {
|
switch (who) {
|
||||||
case RUSAGE_BOTH:
|
case RUSAGE_BOTH:
|
||||||
case RUSAGE_CHILDREN:
|
case RUSAGE_CHILDREN:
|
||||||
utime = p->signal->cutime;
|
utime = sig->cutime;
|
||||||
stime = p->signal->cstime;
|
stime = sig->cstime;
|
||||||
r->ru_nvcsw = p->signal->cnvcsw;
|
r->ru_nvcsw = sig->cnvcsw;
|
||||||
r->ru_nivcsw = p->signal->cnivcsw;
|
r->ru_nivcsw = sig->cnivcsw;
|
||||||
r->ru_minflt = p->signal->cmin_flt;
|
r->ru_minflt = sig->cmin_flt;
|
||||||
r->ru_majflt = p->signal->cmaj_flt;
|
r->ru_majflt = sig->cmaj_flt;
|
||||||
r->ru_inblock = p->signal->cinblock;
|
r->ru_inblock = sig->cinblock;
|
||||||
r->ru_oublock = p->signal->coublock;
|
r->ru_oublock = sig->coublock;
|
||||||
maxrss = p->signal->cmaxrss;
|
maxrss = sig->cmaxrss;
|
||||||
|
|
||||||
if (who == RUSAGE_CHILDREN)
|
if (who == RUSAGE_CHILDREN)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case RUSAGE_SELF:
|
case RUSAGE_SELF:
|
||||||
thread_group_cputime_adjusted(p, &tgutime, &tgstime);
|
r->ru_nvcsw += sig->nvcsw;
|
||||||
utime += tgutime;
|
r->ru_nivcsw += sig->nivcsw;
|
||||||
stime += tgstime;
|
r->ru_minflt += sig->min_flt;
|
||||||
r->ru_nvcsw += p->signal->nvcsw;
|
r->ru_majflt += sig->maj_flt;
|
||||||
r->ru_nivcsw += p->signal->nivcsw;
|
r->ru_inblock += sig->inblock;
|
||||||
r->ru_minflt += p->signal->min_flt;
|
r->ru_oublock += sig->oublock;
|
||||||
r->ru_majflt += p->signal->maj_flt;
|
if (maxrss < sig->maxrss)
|
||||||
r->ru_inblock += p->signal->inblock;
|
maxrss = sig->maxrss;
|
||||||
r->ru_oublock += p->signal->oublock;
|
|
||||||
if (maxrss < p->signal->maxrss)
|
rcu_read_lock();
|
||||||
maxrss = p->signal->maxrss;
|
__for_each_thread(sig, t)
|
||||||
t = p;
|
|
||||||
do {
|
|
||||||
accumulate_thread_rusage(t, r);
|
accumulate_thread_rusage(t, r);
|
||||||
} while_each_thread(p, t);
|
rcu_read_unlock();
|
||||||
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
unlock_task_sighand(p, &flags);
|
|
||||||
|
|
||||||
out:
|
if (need_seqretry(&sig->stats_lock, seq)) {
|
||||||
r->ru_utime = ns_to_timeval(utime);
|
seq = 1;
|
||||||
r->ru_stime = ns_to_timeval(stime);
|
goto retry;
|
||||||
|
|
||||||
if (who != RUSAGE_CHILDREN) {
|
|
||||||
struct mm_struct *mm = get_task_mm(p);
|
|
||||||
|
|
||||||
if (mm) {
|
|
||||||
setmax_mm_hiwater_rss(&maxrss, mm);
|
|
||||||
mmput(mm);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
|
||||||
|
|
||||||
|
if (who == RUSAGE_CHILDREN)
|
||||||
|
goto out_children;
|
||||||
|
|
||||||
|
thread_group_cputime_adjusted(p, &tgutime, &tgstime);
|
||||||
|
utime += tgutime;
|
||||||
|
stime += tgstime;
|
||||||
|
|
||||||
|
out_thread:
|
||||||
|
mm = get_task_mm(p);
|
||||||
|
if (mm) {
|
||||||
|
setmax_mm_hiwater_rss(&maxrss, mm);
|
||||||
|
mmput(mm);
|
||||||
|
}
|
||||||
|
|
||||||
|
out_children:
|
||||||
r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
|
r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
|
||||||
|
r->ru_utime = ns_to_kernel_old_timeval(utime);
|
||||||
|
r->ru_stime = ns_to_kernel_old_timeval(stime);
|
||||||
}
|
}
|
||||||
|
|
||||||
SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
|
SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
|
||||||
|
|||||||
@@ -4495,25 +4495,19 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
|
|||||||
err_nh = NULL;
|
err_nh = NULL;
|
||||||
list_for_each_entry(nh, &rt6_nh_list, next) {
|
list_for_each_entry(nh, &rt6_nh_list, next) {
|
||||||
err = __ip6_ins_rt(nh->fib6_info, info, extack);
|
err = __ip6_ins_rt(nh->fib6_info, info, extack);
|
||||||
fib6_info_release(nh->fib6_info);
|
|
||||||
|
|
||||||
if (!err) {
|
|
||||||
/* save reference to last route successfully inserted */
|
|
||||||
rt_last = nh->fib6_info;
|
|
||||||
|
|
||||||
/* save reference to first route for notification */
|
|
||||||
if (!rt_notif)
|
|
||||||
rt_notif = nh->fib6_info;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* nh->fib6_info is used or freed at this point, reset to NULL*/
|
|
||||||
nh->fib6_info = NULL;
|
|
||||||
if (err) {
|
if (err) {
|
||||||
if (replace && nhn)
|
if (replace && nhn)
|
||||||
ip6_print_replace_route_err(&rt6_nh_list);
|
ip6_print_replace_route_err(&rt6_nh_list);
|
||||||
err_nh = nh;
|
err_nh = nh;
|
||||||
goto add_errout;
|
goto add_errout;
|
||||||
}
|
}
|
||||||
|
/* save reference to last route successfully inserted */
|
||||||
|
rt_last = nh->fib6_info;
|
||||||
|
|
||||||
|
/* save reference to first route for notification */
|
||||||
|
if (!rt_notif)
|
||||||
|
rt_notif = nh->fib6_info;
|
||||||
|
|
||||||
/* Because each route is added like a single route we remove
|
/* Because each route is added like a single route we remove
|
||||||
* these flags after the first nexthop: if there is a collision,
|
* these flags after the first nexthop: if there is a collision,
|
||||||
@@ -4551,8 +4545,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
|
|||||||
|
|
||||||
cleanup:
|
cleanup:
|
||||||
list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
|
list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
|
||||||
if (nh->fib6_info)
|
fib6_info_release(nh->fib6_info);
|
||||||
fib6_info_release(nh->fib6_info);
|
|
||||||
list_del(&nh->next);
|
list_del(&nh->next);
|
||||||
kfree(nh);
|
kfree(nh);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -536,6 +536,8 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
|
|||||||
/* Get fields bitmap */
|
/* Get fields bitmap */
|
||||||
if (nf_h323_error_boundary(bs, 0, f->sz))
|
if (nf_h323_error_boundary(bs, 0, f->sz))
|
||||||
return H323_ERROR_BOUND;
|
return H323_ERROR_BOUND;
|
||||||
|
if (f->sz > 32)
|
||||||
|
return H323_ERROR_RANGE;
|
||||||
bmp = get_bitmap(bs, f->sz);
|
bmp = get_bitmap(bs, f->sz);
|
||||||
if (base)
|
if (base)
|
||||||
*(unsigned int *)base = bmp;
|
*(unsigned int *)base = bmp;
|
||||||
@@ -592,6 +594,8 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
|
|||||||
bmp2_len = get_bits(bs, 7) + 1;
|
bmp2_len = get_bits(bs, 7) + 1;
|
||||||
if (nf_h323_error_boundary(bs, 0, bmp2_len))
|
if (nf_h323_error_boundary(bs, 0, bmp2_len))
|
||||||
return H323_ERROR_BOUND;
|
return H323_ERROR_BOUND;
|
||||||
|
if (bmp2_len > 32)
|
||||||
|
return H323_ERROR_RANGE;
|
||||||
bmp2 = get_bitmap(bs, bmp2_len);
|
bmp2 = get_bitmap(bs, bmp2_len);
|
||||||
bmp |= bmp2 >> f->sz;
|
bmp |= bmp2 >> f->sz;
|
||||||
if (base)
|
if (base)
|
||||||
|
|||||||
@@ -456,16 +456,16 @@ static int nr_create(struct net *net, struct socket *sock, int protocol,
|
|||||||
nr_init_timers(sk);
|
nr_init_timers(sk);
|
||||||
|
|
||||||
nr->t1 =
|
nr->t1 =
|
||||||
msecs_to_jiffies(sysctl_netrom_transport_timeout);
|
msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_timeout));
|
||||||
nr->t2 =
|
nr->t2 =
|
||||||
msecs_to_jiffies(sysctl_netrom_transport_acknowledge_delay);
|
msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_acknowledge_delay));
|
||||||
nr->n2 =
|
nr->n2 =
|
||||||
msecs_to_jiffies(sysctl_netrom_transport_maximum_tries);
|
msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_maximum_tries));
|
||||||
nr->t4 =
|
nr->t4 =
|
||||||
msecs_to_jiffies(sysctl_netrom_transport_busy_delay);
|
msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_busy_delay));
|
||||||
nr->idle =
|
nr->idle =
|
||||||
msecs_to_jiffies(sysctl_netrom_transport_no_activity_timeout);
|
msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_no_activity_timeout));
|
||||||
nr->window = sysctl_netrom_transport_requested_window_size;
|
nr->window = READ_ONCE(sysctl_netrom_transport_requested_window_size);
|
||||||
|
|
||||||
nr->bpqext = 1;
|
nr->bpqext = 1;
|
||||||
nr->state = NR_STATE_0;
|
nr->state = NR_STATE_0;
|
||||||
@@ -957,7 +957,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
|
|||||||
* G8PZT's Xrouter which is sending packets with command type 7
|
* G8PZT's Xrouter which is sending packets with command type 7
|
||||||
* as an extension of the protocol.
|
* as an extension of the protocol.
|
||||||
*/
|
*/
|
||||||
if (sysctl_netrom_reset_circuit &&
|
if (READ_ONCE(sysctl_netrom_reset_circuit) &&
|
||||||
(frametype != NR_RESET || flags != 0))
|
(frametype != NR_RESET || flags != 0))
|
||||||
nr_transmit_reset(skb, 1);
|
nr_transmit_reset(skb, 1);
|
||||||
|
|
||||||
|
|||||||
@@ -84,7 +84,7 @@ static int nr_header(struct sk_buff *skb, struct net_device *dev,
|
|||||||
buff[6] |= AX25_SSSID_SPARE;
|
buff[6] |= AX25_SSSID_SPARE;
|
||||||
buff += AX25_ADDR_LEN;
|
buff += AX25_ADDR_LEN;
|
||||||
|
|
||||||
*buff++ = sysctl_netrom_network_ttl_initialiser;
|
*buff++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
|
||||||
|
|
||||||
*buff++ = NR_PROTO_IP;
|
*buff++ = NR_PROTO_IP;
|
||||||
*buff++ = NR_PROTO_IP;
|
*buff++ = NR_PROTO_IP;
|
||||||
|
|||||||
@@ -100,7 +100,7 @@ static int nr_state1_machine(struct sock *sk, struct sk_buff *skb,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case NR_RESET:
|
case NR_RESET:
|
||||||
if (sysctl_netrom_reset_circuit)
|
if (READ_ONCE(sysctl_netrom_reset_circuit))
|
||||||
nr_disconnect(sk, ECONNRESET);
|
nr_disconnect(sk, ECONNRESET);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@@ -131,7 +131,7 @@ static int nr_state2_machine(struct sock *sk, struct sk_buff *skb,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case NR_RESET:
|
case NR_RESET:
|
||||||
if (sysctl_netrom_reset_circuit)
|
if (READ_ONCE(sysctl_netrom_reset_circuit))
|
||||||
nr_disconnect(sk, ECONNRESET);
|
nr_disconnect(sk, ECONNRESET);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@@ -266,7 +266,7 @@ static int nr_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case NR_RESET:
|
case NR_RESET:
|
||||||
if (sysctl_netrom_reset_circuit)
|
if (READ_ONCE(sysctl_netrom_reset_circuit))
|
||||||
nr_disconnect(sk, ECONNRESET);
|
nr_disconnect(sk, ECONNRESET);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|||||||
@@ -207,7 +207,7 @@ void nr_transmit_buffer(struct sock *sk, struct sk_buff *skb)
|
|||||||
dptr[6] |= AX25_SSSID_SPARE;
|
dptr[6] |= AX25_SSSID_SPARE;
|
||||||
dptr += AX25_ADDR_LEN;
|
dptr += AX25_ADDR_LEN;
|
||||||
|
|
||||||
*dptr++ = sysctl_netrom_network_ttl_initialiser;
|
*dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
|
||||||
|
|
||||||
if (!nr_route_frame(skb, NULL)) {
|
if (!nr_route_frame(skb, NULL)) {
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
|
|||||||
@@ -156,7 +156,7 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
|
|||||||
nr_neigh->digipeat = NULL;
|
nr_neigh->digipeat = NULL;
|
||||||
nr_neigh->ax25 = NULL;
|
nr_neigh->ax25 = NULL;
|
||||||
nr_neigh->dev = dev;
|
nr_neigh->dev = dev;
|
||||||
nr_neigh->quality = sysctl_netrom_default_path_quality;
|
nr_neigh->quality = READ_ONCE(sysctl_netrom_default_path_quality);
|
||||||
nr_neigh->locked = 0;
|
nr_neigh->locked = 0;
|
||||||
nr_neigh->count = 0;
|
nr_neigh->count = 0;
|
||||||
nr_neigh->number = nr_neigh_no++;
|
nr_neigh->number = nr_neigh_no++;
|
||||||
@@ -728,7 +728,7 @@ void nr_link_failed(ax25_cb *ax25, int reason)
|
|||||||
nr_neigh->ax25 = NULL;
|
nr_neigh->ax25 = NULL;
|
||||||
ax25_cb_put(ax25);
|
ax25_cb_put(ax25);
|
||||||
|
|
||||||
if (++nr_neigh->failed < sysctl_netrom_link_fails_count) {
|
if (++nr_neigh->failed < READ_ONCE(sysctl_netrom_link_fails_count)) {
|
||||||
nr_neigh_put(nr_neigh);
|
nr_neigh_put(nr_neigh);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -766,7 +766,7 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
|
|||||||
if (ax25 != NULL) {
|
if (ax25 != NULL) {
|
||||||
ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat,
|
ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat,
|
||||||
ax25->ax25_dev->dev, 0,
|
ax25->ax25_dev->dev, 0,
|
||||||
sysctl_netrom_obsolescence_count_initialiser);
|
READ_ONCE(sysctl_netrom_obsolescence_count_initialiser));
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@@ -780,7 +780,7 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!sysctl_netrom_routing_control && ax25 != NULL)
|
if (!READ_ONCE(sysctl_netrom_routing_control) && ax25 != NULL)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Its Time-To-Live has expired */
|
/* Its Time-To-Live has expired */
|
||||||
|
|||||||
@@ -185,7 +185,8 @@ void nr_write_internal(struct sock *sk, int frametype)
|
|||||||
*dptr++ = nr->my_id;
|
*dptr++ = nr->my_id;
|
||||||
*dptr++ = frametype;
|
*dptr++ = frametype;
|
||||||
*dptr++ = nr->window;
|
*dptr++ = nr->window;
|
||||||
if (nr->bpqext) *dptr++ = sysctl_netrom_network_ttl_initialiser;
|
if (nr->bpqext)
|
||||||
|
*dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case NR_DISCREQ:
|
case NR_DISCREQ:
|
||||||
@@ -239,7 +240,7 @@ void __nr_transmit_reply(struct sk_buff *skb, int mine, unsigned char cmdflags)
|
|||||||
dptr[6] |= AX25_SSSID_SPARE;
|
dptr[6] |= AX25_SSSID_SPARE;
|
||||||
dptr += AX25_ADDR_LEN;
|
dptr += AX25_ADDR_LEN;
|
||||||
|
|
||||||
*dptr++ = sysctl_netrom_network_ttl_initialiser;
|
*dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
|
||||||
|
|
||||||
if (mine) {
|
if (mine) {
|
||||||
*dptr++ = 0;
|
*dptr++ = 0;
|
||||||
|
|||||||
@@ -278,6 +278,9 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
|
|||||||
put_page(sg_page(&sg[i]));
|
put_page(sg_page(&sg[i]));
|
||||||
kfree(sg);
|
kfree(sg);
|
||||||
ret = PTR_ERR(trans_private);
|
ret = PTR_ERR(trans_private);
|
||||||
|
/* Trigger connection so that its ready for the next retry */
|
||||||
|
if (ret == -ENODEV)
|
||||||
|
rds_conn_connect_if_down(cp->cp_conn);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1300,12 +1300,8 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
|
|||||||
|
|
||||||
/* Parse any control messages the user may have included. */
|
/* Parse any control messages the user may have included. */
|
||||||
ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
|
ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
|
||||||
if (ret) {
|
if (ret)
|
||||||
/* Trigger connection so that its ready for the next retry */
|
|
||||||
if (ret == -EAGAIN)
|
|
||||||
rds_conn_connect_if_down(conn);
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
|
||||||
|
|
||||||
if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
|
if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
|
||||||
printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
|
printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
|
||||||
|
|||||||
@@ -15,6 +15,7 @@
|
|||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <sys/mman.h>
|
#include <sys/mman.h>
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
|
#include "vm_util.h"
|
||||||
|
|
||||||
#define LENGTH (256UL*1024*1024)
|
#define LENGTH (256UL*1024*1024)
|
||||||
#define PROTECTION (PROT_READ | PROT_WRITE)
|
#define PROTECTION (PROT_READ | PROT_WRITE)
|
||||||
@@ -23,6 +24,14 @@
|
|||||||
#define MAP_HUGETLB 0x40000 /* arch specific */
|
#define MAP_HUGETLB 0x40000 /* arch specific */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef MAP_HUGE_SHIFT
|
||||||
|
#define MAP_HUGE_SHIFT 26
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef MAP_HUGE_MASK
|
||||||
|
#define MAP_HUGE_MASK 0x3f
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Only ia64 requires this */
|
/* Only ia64 requires this */
|
||||||
#ifdef __ia64__
|
#ifdef __ia64__
|
||||||
#define ADDR (void *)(0x8000000000000000UL)
|
#define ADDR (void *)(0x8000000000000000UL)
|
||||||
@@ -37,20 +46,20 @@ static void check_bytes(char *addr)
|
|||||||
printf("First hex is %x\n", *((unsigned int *)addr));
|
printf("First hex is %x\n", *((unsigned int *)addr));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void write_bytes(char *addr)
|
static void write_bytes(char *addr, size_t length)
|
||||||
{
|
{
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
|
|
||||||
for (i = 0; i < LENGTH; i++)
|
for (i = 0; i < length; i++)
|
||||||
*(addr + i) = (char)i;
|
*(addr + i) = (char)i;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int read_bytes(char *addr)
|
static int read_bytes(char *addr, size_t length)
|
||||||
{
|
{
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
|
|
||||||
check_bytes(addr);
|
check_bytes(addr);
|
||||||
for (i = 0; i < LENGTH; i++)
|
for (i = 0; i < length; i++)
|
||||||
if (*(addr + i) != (char)i) {
|
if (*(addr + i) != (char)i) {
|
||||||
printf("Mismatch at %lu\n", i);
|
printf("Mismatch at %lu\n", i);
|
||||||
return 1;
|
return 1;
|
||||||
@@ -58,12 +67,35 @@ static int read_bytes(char *addr)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int main(void)
|
int main(int argc, char **argv)
|
||||||
{
|
{
|
||||||
void *addr;
|
void *addr;
|
||||||
int ret;
|
int ret;
|
||||||
|
size_t hugepage_size;
|
||||||
|
size_t length = LENGTH;
|
||||||
|
int flags = FLAGS;
|
||||||
|
int shift = 0;
|
||||||
|
|
||||||
addr = mmap(ADDR, LENGTH, PROTECTION, FLAGS, -1, 0);
|
hugepage_size = default_huge_page_size();
|
||||||
|
/* munmap with fail if the length is not page aligned */
|
||||||
|
if (hugepage_size > length)
|
||||||
|
length = hugepage_size;
|
||||||
|
|
||||||
|
if (argc > 1)
|
||||||
|
length = atol(argv[1]) << 20;
|
||||||
|
if (argc > 2) {
|
||||||
|
shift = atoi(argv[2]);
|
||||||
|
if (shift)
|
||||||
|
flags |= (shift & MAP_HUGE_MASK) << MAP_HUGE_SHIFT;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (shift)
|
||||||
|
printf("%u kB hugepages\n", 1 << (shift - 10));
|
||||||
|
else
|
||||||
|
printf("Default size hugepages\n");
|
||||||
|
printf("Mapping %lu Mbytes\n", (unsigned long)length >> 20);
|
||||||
|
|
||||||
|
addr = mmap(ADDR, length, PROTECTION, flags, -1, 0);
|
||||||
if (addr == MAP_FAILED) {
|
if (addr == MAP_FAILED) {
|
||||||
perror("mmap");
|
perror("mmap");
|
||||||
exit(1);
|
exit(1);
|
||||||
@@ -71,11 +103,11 @@ int main(void)
|
|||||||
|
|
||||||
printf("Returned address is %p\n", addr);
|
printf("Returned address is %p\n", addr);
|
||||||
check_bytes(addr);
|
check_bytes(addr);
|
||||||
write_bytes(addr);
|
write_bytes(addr, length);
|
||||||
ret = read_bytes(addr);
|
ret = read_bytes(addr, length);
|
||||||
|
|
||||||
/* munmap() length of MAP_HUGETLB memory must be hugepage aligned */
|
/* munmap() length of MAP_HUGETLB memory must be hugepage aligned */
|
||||||
if (munmap(addr, LENGTH)) {
|
if (munmap(addr, length)) {
|
||||||
perror("munmap");
|
perror("munmap");
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user