Merge 4.19.66 into android-4.19-q
Changes in 4.19.66 scsi: fcoe: Embed fc_rport_priv in fcoe_rport structure gcc-9: don't warn about uninitialized variable driver core: Establish order of operations for device_add and device_del via bitflag drivers/base: Introduce kill_device() libnvdimm/bus: Prevent duplicate device_unregister() calls libnvdimm/region: Register badblocks before namespaces libnvdimm/bus: Prepare the nd_ioctl() path to be re-entrant libnvdimm/bus: Fix wait_nvdimm_bus_probe_idle() ABBA deadlock HID: wacom: fix bit shift for Cintiq Companion 2 HID: Add quirk for HP X1200 PIXART OEM mouse IB: directly cast the sockaddr union to aockaddr atm: iphase: Fix Spectre v1 vulnerability bnx2x: Disable multi-cos feature. ife: error out when nla attributes are empty ip6_gre: reload ipv6h in prepare_ip6gre_xmit_ipv6 ip6_tunnel: fix possible use-after-free on xmit ipip: validate header length in ipip_tunnel_xmit mlxsw: spectrum: Fix error path in mlxsw_sp_module_init() mvpp2: fix panic on module removal mvpp2: refactor MTU change code net: bridge: delete local fdb on device init failure net: bridge: mcast: don't delete permanent entries when fast leave is enabled net: fix ifindex collision during namespace removal net/mlx5e: always initialize frag->last_in_page net/mlx5: Use reversed order when unregister devices net: phylink: Fix flow control for fixed-link net: qualcomm: rmnet: Fix incorrect UL checksum offload logic net: sched: Fix a possible null-pointer dereference in dequeue_func() net sched: update vlan action for batched events operations net: sched: use temporary variable for actions indexes net/smc: do not schedule tx_work in SMC_CLOSED state NFC: nfcmrvl: fix gpio-handling regression ocelot: Cancel delayed work before wq destruction tipc: compat: allow tipc commands without arguments tun: mark small packets as owned by the tap sock net/mlx5: Fix modify_cq_in alignment net/mlx5e: Prevent encap flow counter update async to user query r8169: don't use MSI before RTL8168d compat_ioctl: pppoe: fix PPPOEIOCSFWD handling cgroup: Call cgroup_release() before __exit_signal() cgroup: Implement css_task_iter_skip() cgroup: Include dying leaders with live threads in PROCS iterations cgroup: css_task_iter_skip()'d iterators must be advanced before accessed cgroup: Fix css_task_iter_advance_css_set() cset skip condition spi: bcm2835: Fix 3-wire mode if DMA is enabled Linux 4.19.66 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I58f1b77149c4f3285db3e64c05240080b7425ae4
This commit is contained in:
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 19
|
PATCHLEVEL = 19
|
||||||
SUBLEVEL = 65
|
SUBLEVEL = 66
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = "People's Front"
|
NAME = "People's Front"
|
||||||
|
|
||||||
|
|||||||
@@ -63,6 +63,7 @@
|
|||||||
#include <asm/byteorder.h>
|
#include <asm/byteorder.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
#include <linux/jiffies.h>
|
#include <linux/jiffies.h>
|
||||||
|
#include <linux/nospec.h>
|
||||||
#include "iphase.h"
|
#include "iphase.h"
|
||||||
#include "suni.h"
|
#include "suni.h"
|
||||||
#define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
|
#define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
|
||||||
@@ -2760,8 +2761,11 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
|
|||||||
}
|
}
|
||||||
if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
|
if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
|
||||||
board = ia_cmds.status;
|
board = ia_cmds.status;
|
||||||
if ((board < 0) || (board > iadev_count))
|
|
||||||
board = 0;
|
if ((board < 0) || (board > iadev_count))
|
||||||
|
board = 0;
|
||||||
|
board = array_index_nospec(board, iadev_count + 1);
|
||||||
|
|
||||||
iadev = ia_dev[board];
|
iadev = ia_dev[board];
|
||||||
switch (ia_cmds.cmd) {
|
switch (ia_cmds.cmd) {
|
||||||
case MEMDUMP:
|
case MEMDUMP:
|
||||||
|
|||||||
@@ -66,6 +66,9 @@ struct driver_private {
|
|||||||
* probed first.
|
* probed first.
|
||||||
* @device - pointer back to the struct device that this structure is
|
* @device - pointer back to the struct device that this structure is
|
||||||
* associated with.
|
* associated with.
|
||||||
|
* @dead - This device is currently either in the process of or has been
|
||||||
|
* removed from the system. Any asynchronous events scheduled for this
|
||||||
|
* device should exit without taking any action.
|
||||||
*
|
*
|
||||||
* Nothing outside of the driver core should ever touch these fields.
|
* Nothing outside of the driver core should ever touch these fields.
|
||||||
*/
|
*/
|
||||||
@@ -76,6 +79,7 @@ struct device_private {
|
|||||||
struct klist_node knode_bus;
|
struct klist_node knode_bus;
|
||||||
struct list_head deferred_probe;
|
struct list_head deferred_probe;
|
||||||
struct device *device;
|
struct device *device;
|
||||||
|
u8 dead:1;
|
||||||
};
|
};
|
||||||
#define to_device_private_parent(obj) \
|
#define to_device_private_parent(obj) \
|
||||||
container_of(obj, struct device_private, knode_parent)
|
container_of(obj, struct device_private, knode_parent)
|
||||||
|
|||||||
@@ -2031,6 +2031,24 @@ void put_device(struct device *dev)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(put_device);
|
EXPORT_SYMBOL_GPL(put_device);
|
||||||
|
|
||||||
|
bool kill_device(struct device *dev)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Require the device lock and set the "dead" flag to guarantee that
|
||||||
|
* the update behavior is consistent with the other bitfields near
|
||||||
|
* it and that we cannot have an asynchronous probe routine trying
|
||||||
|
* to run while we are tearing out the bus/class/sysfs from
|
||||||
|
* underneath the device.
|
||||||
|
*/
|
||||||
|
lockdep_assert_held(&dev->mutex);
|
||||||
|
|
||||||
|
if (dev->p->dead)
|
||||||
|
return false;
|
||||||
|
dev->p->dead = true;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(kill_device);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* device_del - delete device from system.
|
* device_del - delete device from system.
|
||||||
* @dev: device.
|
* @dev: device.
|
||||||
@@ -2050,6 +2068,10 @@ void device_del(struct device *dev)
|
|||||||
struct kobject *glue_dir = NULL;
|
struct kobject *glue_dir = NULL;
|
||||||
struct class_interface *class_intf;
|
struct class_interface *class_intf;
|
||||||
|
|
||||||
|
device_lock(dev);
|
||||||
|
kill_device(dev);
|
||||||
|
device_unlock(dev);
|
||||||
|
|
||||||
/* Notify clients of device removal. This call must come
|
/* Notify clients of device removal. This call must come
|
||||||
* before dpm_sysfs_remove().
|
* before dpm_sysfs_remove().
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -725,15 +725,6 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
|
|||||||
bool async_allowed;
|
bool async_allowed;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/*
|
|
||||||
* Check if device has already been claimed. This may
|
|
||||||
* happen with driver loading, device discovery/registration,
|
|
||||||
* and deferred probe processing happens all at once with
|
|
||||||
* multiple threads.
|
|
||||||
*/
|
|
||||||
if (dev->driver)
|
|
||||||
return -EBUSY;
|
|
||||||
|
|
||||||
ret = driver_match_device(drv, dev);
|
ret = driver_match_device(drv, dev);
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
/* no match */
|
/* no match */
|
||||||
@@ -768,6 +759,15 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
|
|||||||
|
|
||||||
device_lock(dev);
|
device_lock(dev);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check if device has already been removed or claimed. This may
|
||||||
|
* happen with driver loading, device discovery/registration,
|
||||||
|
* and deferred probe processing happens all at once with
|
||||||
|
* multiple threads.
|
||||||
|
*/
|
||||||
|
if (dev->p->dead || dev->driver)
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
if (dev->parent)
|
if (dev->parent)
|
||||||
pm_runtime_get_sync(dev->parent);
|
pm_runtime_get_sync(dev->parent);
|
||||||
|
|
||||||
@@ -778,7 +778,7 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
|
|||||||
|
|
||||||
if (dev->parent)
|
if (dev->parent)
|
||||||
pm_runtime_put(dev->parent);
|
pm_runtime_put(dev->parent);
|
||||||
|
out_unlock:
|
||||||
device_unlock(dev);
|
device_unlock(dev);
|
||||||
|
|
||||||
put_device(dev);
|
put_device(dev);
|
||||||
@@ -891,7 +891,7 @@ static int __driver_attach(struct device *dev, void *data)
|
|||||||
if (dev->parent && dev->bus->need_parent_lock)
|
if (dev->parent && dev->bus->need_parent_lock)
|
||||||
device_lock(dev->parent);
|
device_lock(dev->parent);
|
||||||
device_lock(dev);
|
device_lock(dev);
|
||||||
if (!dev->driver)
|
if (!dev->p->dead && !dev->driver)
|
||||||
driver_probe_device(drv, dev);
|
driver_probe_device(drv, dev);
|
||||||
device_unlock(dev);
|
device_unlock(dev);
|
||||||
if (dev->parent && dev->bus->need_parent_lock)
|
if (dev->parent && dev->bus->need_parent_lock)
|
||||||
|
|||||||
@@ -559,6 +559,7 @@
|
|||||||
#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a
|
#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a
|
||||||
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
|
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
|
||||||
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a
|
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a
|
||||||
|
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641 0x0641
|
||||||
|
|
||||||
#define USB_VENDOR_ID_HUION 0x256c
|
#define USB_VENDOR_ID_HUION 0x256c
|
||||||
#define USB_DEVICE_ID_HUION_TABLET 0x006e
|
#define USB_DEVICE_ID_HUION_TABLET 0x006e
|
||||||
|
|||||||
@@ -94,6 +94,7 @@ static const struct hid_device_id hid_quirks[] = {
|
|||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },
|
||||||
|
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641), HID_QUIRK_ALWAYS_POLL },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT },
|
||||||
|
|||||||
@@ -537,14 +537,14 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
|
|||||||
*/
|
*/
|
||||||
buttons = (data[4] << 1) | (data[3] & 0x01);
|
buttons = (data[4] << 1) | (data[3] & 0x01);
|
||||||
} else if (features->type == CINTIQ_COMPANION_2) {
|
} else if (features->type == CINTIQ_COMPANION_2) {
|
||||||
/* d-pad right -> data[4] & 0x10
|
/* d-pad right -> data[2] & 0x10
|
||||||
* d-pad up -> data[4] & 0x20
|
* d-pad up -> data[2] & 0x20
|
||||||
* d-pad left -> data[4] & 0x40
|
* d-pad left -> data[2] & 0x40
|
||||||
* d-pad down -> data[4] & 0x80
|
* d-pad down -> data[2] & 0x80
|
||||||
* d-pad center -> data[3] & 0x01
|
* d-pad center -> data[1] & 0x01
|
||||||
*/
|
*/
|
||||||
buttons = ((data[2] >> 4) << 7) |
|
buttons = ((data[2] >> 4) << 7) |
|
||||||
((data[1] & 0x04) << 6) |
|
((data[1] & 0x04) << 4) |
|
||||||
((data[2] & 0x0F) << 2) |
|
((data[2] & 0x0F) << 2) |
|
||||||
(data[1] & 0x03);
|
(data[1] & 0x03);
|
||||||
} else if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
|
} else if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
|
||||||
|
|||||||
@@ -185,7 +185,7 @@ static int i2c_generic_bus_free(struct i2c_adapter *adap)
|
|||||||
int i2c_generic_scl_recovery(struct i2c_adapter *adap)
|
int i2c_generic_scl_recovery(struct i2c_adapter *adap)
|
||||||
{
|
{
|
||||||
struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
|
struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
|
||||||
int i = 0, scl = 1, ret;
|
int i = 0, scl = 1, ret = 0;
|
||||||
|
|
||||||
if (bri->prepare_recovery)
|
if (bri->prepare_recovery)
|
||||||
bri->prepare_recovery(adap);
|
bri->prepare_recovery(adap);
|
||||||
|
|||||||
@@ -1232,7 +1232,6 @@ static int roce_resolve_route_from_path(struct sa_path_rec *rec,
|
|||||||
{
|
{
|
||||||
struct rdma_dev_addr dev_addr = {};
|
struct rdma_dev_addr dev_addr = {};
|
||||||
union {
|
union {
|
||||||
struct sockaddr _sockaddr;
|
|
||||||
struct sockaddr_in _sockaddr_in;
|
struct sockaddr_in _sockaddr_in;
|
||||||
struct sockaddr_in6 _sockaddr_in6;
|
struct sockaddr_in6 _sockaddr_in6;
|
||||||
} sgid_addr, dgid_addr;
|
} sgid_addr, dgid_addr;
|
||||||
@@ -1249,12 +1248,12 @@ static int roce_resolve_route_from_path(struct sa_path_rec *rec,
|
|||||||
*/
|
*/
|
||||||
dev_addr.net = &init_net;
|
dev_addr.net = &init_net;
|
||||||
|
|
||||||
rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid);
|
rdma_gid2ip((struct sockaddr *)&sgid_addr, &rec->sgid);
|
||||||
rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid);
|
rdma_gid2ip((struct sockaddr *)&dgid_addr, &rec->dgid);
|
||||||
|
|
||||||
/* validate the route */
|
/* validate the route */
|
||||||
ret = rdma_resolve_ip_route(&sgid_addr._sockaddr,
|
ret = rdma_resolve_ip_route((struct sockaddr *)&sgid_addr,
|
||||||
&dgid_addr._sockaddr, &dev_addr);
|
(struct sockaddr *)&dgid_addr, &dev_addr);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|||||||
@@ -1936,8 +1936,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* select a non-FCoE queue */
|
/* select a non-FCoE queue */
|
||||||
return fallback(dev, skb, NULL) %
|
return fallback(dev, skb, NULL) % (BNX2X_NUM_ETH_QUEUES(bp));
|
||||||
(BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void bnx2x_set_num_queues(struct bnx2x *bp)
|
void bnx2x_set_num_queues(struct bnx2x *bp)
|
||||||
|
|||||||
@@ -3501,6 +3501,7 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p)
|
|||||||
static int mvpp2_change_mtu(struct net_device *dev, int mtu)
|
static int mvpp2_change_mtu(struct net_device *dev, int mtu)
|
||||||
{
|
{
|
||||||
struct mvpp2_port *port = netdev_priv(dev);
|
struct mvpp2_port *port = netdev_priv(dev);
|
||||||
|
bool running = netif_running(dev);
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
|
if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
|
||||||
@@ -3509,40 +3510,24 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
|
|||||||
mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
|
mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!netif_running(dev)) {
|
if (running)
|
||||||
err = mvpp2_bm_update_mtu(dev, mtu);
|
mvpp2_stop_dev(port);
|
||||||
if (!err) {
|
|
||||||
port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Reconfigure BM to the original MTU */
|
|
||||||
err = mvpp2_bm_update_mtu(dev, dev->mtu);
|
|
||||||
if (err)
|
|
||||||
goto log_error;
|
|
||||||
}
|
|
||||||
|
|
||||||
mvpp2_stop_dev(port);
|
|
||||||
|
|
||||||
err = mvpp2_bm_update_mtu(dev, mtu);
|
err = mvpp2_bm_update_mtu(dev, mtu);
|
||||||
if (!err) {
|
if (err) {
|
||||||
|
netdev_err(dev, "failed to change MTU\n");
|
||||||
|
/* Reconfigure BM to the original MTU */
|
||||||
|
mvpp2_bm_update_mtu(dev, dev->mtu);
|
||||||
|
} else {
|
||||||
port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
|
port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
|
||||||
goto out_start;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Reconfigure BM to the original MTU */
|
if (running) {
|
||||||
err = mvpp2_bm_update_mtu(dev, dev->mtu);
|
mvpp2_start_dev(port);
|
||||||
if (err)
|
mvpp2_egress_enable(port);
|
||||||
goto log_error;
|
mvpp2_ingress_enable(port);
|
||||||
|
}
|
||||||
|
|
||||||
out_start:
|
|
||||||
mvpp2_start_dev(port);
|
|
||||||
mvpp2_egress_enable(port);
|
|
||||||
mvpp2_ingress_enable(port);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
log_error:
|
|
||||||
netdev_err(dev, "failed to change MTU\n");
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -5358,9 +5343,6 @@ static int mvpp2_remove(struct platform_device *pdev)
|
|||||||
|
|
||||||
mvpp2_dbgfs_cleanup(priv);
|
mvpp2_dbgfs_cleanup(priv);
|
||||||
|
|
||||||
flush_workqueue(priv->stats_queue);
|
|
||||||
destroy_workqueue(priv->stats_queue);
|
|
||||||
|
|
||||||
fwnode_for_each_available_child_node(fwnode, port_fwnode) {
|
fwnode_for_each_available_child_node(fwnode, port_fwnode) {
|
||||||
if (priv->port_list[i]) {
|
if (priv->port_list[i]) {
|
||||||
mutex_destroy(&priv->port_list[i]->gather_stats_lock);
|
mutex_destroy(&priv->port_list[i]->gather_stats_lock);
|
||||||
@@ -5369,6 +5351,8 @@ static int mvpp2_remove(struct platform_device *pdev)
|
|||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
destroy_workqueue(priv->stats_queue);
|
||||||
|
|
||||||
for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
|
for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
|
||||||
struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
|
struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
|
||||||
|
|
||||||
|
|||||||
@@ -307,7 +307,7 @@ void mlx5_unregister_device(struct mlx5_core_dev *dev)
|
|||||||
struct mlx5_interface *intf;
|
struct mlx5_interface *intf;
|
||||||
|
|
||||||
mutex_lock(&mlx5_intf_mutex);
|
mutex_lock(&mlx5_intf_mutex);
|
||||||
list_for_each_entry(intf, &intf_list, list)
|
list_for_each_entry_reverse(intf, &intf_list, list)
|
||||||
mlx5_remove_device(intf, priv);
|
mlx5_remove_device(intf, priv);
|
||||||
list_del(&priv->dev_list);
|
list_del(&priv->dev_list);
|
||||||
mutex_unlock(&mlx5_intf_mutex);
|
mutex_unlock(&mlx5_intf_mutex);
|
||||||
|
|||||||
@@ -420,12 +420,11 @@ static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
|
|||||||
|
|
||||||
static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
|
static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
|
||||||
{
|
{
|
||||||
struct mlx5e_wqe_frag_info next_frag, *prev;
|
struct mlx5e_wqe_frag_info next_frag = {};
|
||||||
|
struct mlx5e_wqe_frag_info *prev = NULL;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
next_frag.di = &rq->wqe.di[0];
|
next_frag.di = &rq->wqe.di[0];
|
||||||
next_frag.offset = 0;
|
|
||||||
prev = NULL;
|
|
||||||
|
|
||||||
for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
|
for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
|
||||||
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
|
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
|
||||||
|
|||||||
@@ -992,13 +992,13 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
|
|||||||
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
|
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
|
||||||
{
|
{
|
||||||
struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
|
struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
|
||||||
u64 bytes, packets, lastuse = 0;
|
|
||||||
struct mlx5e_tc_flow *flow;
|
struct mlx5e_tc_flow *flow;
|
||||||
struct mlx5e_encap_entry *e;
|
struct mlx5e_encap_entry *e;
|
||||||
struct mlx5_fc *counter;
|
struct mlx5_fc *counter;
|
||||||
struct neigh_table *tbl;
|
struct neigh_table *tbl;
|
||||||
bool neigh_used = false;
|
bool neigh_used = false;
|
||||||
struct neighbour *n;
|
struct neighbour *n;
|
||||||
|
u64 lastuse;
|
||||||
|
|
||||||
if (m_neigh->family == AF_INET)
|
if (m_neigh->family == AF_INET)
|
||||||
tbl = &arp_tbl;
|
tbl = &arp_tbl;
|
||||||
@@ -1015,7 +1015,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
|
|||||||
list_for_each_entry(flow, &e->flows, encap) {
|
list_for_each_entry(flow, &e->flows, encap) {
|
||||||
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
|
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
|
||||||
counter = mlx5_flow_rule_counter(flow->rule[0]);
|
counter = mlx5_flow_rule_counter(flow->rule[0]);
|
||||||
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
|
lastuse = mlx5_fc_query_lastuse(counter);
|
||||||
if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
|
if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
|
||||||
neigh_used = true;
|
neigh_used = true;
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -321,6 +321,11 @@ int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(mlx5_fc_query);
|
EXPORT_SYMBOL(mlx5_fc_query);
|
||||||
|
|
||||||
|
u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter)
|
||||||
|
{
|
||||||
|
return counter->cache.lastuse;
|
||||||
|
}
|
||||||
|
|
||||||
void mlx5_fc_query_cached(struct mlx5_fc *counter,
|
void mlx5_fc_query_cached(struct mlx5_fc *counter,
|
||||||
u64 *bytes, u64 *packets, u64 *lastuse)
|
u64 *bytes, u64 *packets, u64 *lastuse)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -5032,7 +5032,7 @@ static int __init mlxsw_sp_module_init(void)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_sp2_pci_driver_register:
|
err_sp2_pci_driver_register:
|
||||||
mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
|
mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
|
||||||
err_sp1_pci_driver_register:
|
err_sp1_pci_driver_register:
|
||||||
mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
|
mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
|
||||||
err_sp2_core_driver_register:
|
err_sp2_core_driver_register:
|
||||||
|
|||||||
@@ -1767,6 +1767,7 @@ EXPORT_SYMBOL(ocelot_init);
|
|||||||
|
|
||||||
void ocelot_deinit(struct ocelot *ocelot)
|
void ocelot_deinit(struct ocelot *ocelot)
|
||||||
{
|
{
|
||||||
|
cancel_delayed_work(&ocelot->stats_work);
|
||||||
destroy_workqueue(ocelot->stats_queue);
|
destroy_workqueue(ocelot->stats_queue);
|
||||||
mutex_destroy(&ocelot->stats_lock);
|
mutex_destroy(&ocelot->stats_lock);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ struct rmnet_map_dl_csum_trailer {
|
|||||||
struct rmnet_map_ul_csum_header {
|
struct rmnet_map_ul_csum_header {
|
||||||
__be16 csum_start_offset;
|
__be16 csum_start_offset;
|
||||||
u16 csum_insert_offset:14;
|
u16 csum_insert_offset:14;
|
||||||
u16 udp_ip4_ind:1;
|
u16 udp_ind:1;
|
||||||
u16 csum_enabled:1;
|
u16 csum_enabled:1;
|
||||||
} __aligned(1);
|
} __aligned(1);
|
||||||
|
|
||||||
|
|||||||
@@ -215,9 +215,9 @@ rmnet_map_ipv4_ul_csum_header(void *iphdr,
|
|||||||
ul_header->csum_insert_offset = skb->csum_offset;
|
ul_header->csum_insert_offset = skb->csum_offset;
|
||||||
ul_header->csum_enabled = 1;
|
ul_header->csum_enabled = 1;
|
||||||
if (ip4h->protocol == IPPROTO_UDP)
|
if (ip4h->protocol == IPPROTO_UDP)
|
||||||
ul_header->udp_ip4_ind = 1;
|
ul_header->udp_ind = 1;
|
||||||
else
|
else
|
||||||
ul_header->udp_ip4_ind = 0;
|
ul_header->udp_ind = 0;
|
||||||
|
|
||||||
/* Changing remaining fields to network order */
|
/* Changing remaining fields to network order */
|
||||||
hdr++;
|
hdr++;
|
||||||
@@ -248,6 +248,7 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
|
|||||||
struct rmnet_map_ul_csum_header *ul_header,
|
struct rmnet_map_ul_csum_header *ul_header,
|
||||||
struct sk_buff *skb)
|
struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
|
struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
|
||||||
__be16 *hdr = (__be16 *)ul_header, offset;
|
__be16 *hdr = (__be16 *)ul_header, offset;
|
||||||
|
|
||||||
offset = htons((__force u16)(skb_transport_header(skb) -
|
offset = htons((__force u16)(skb_transport_header(skb) -
|
||||||
@@ -255,7 +256,11 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
|
|||||||
ul_header->csum_start_offset = offset;
|
ul_header->csum_start_offset = offset;
|
||||||
ul_header->csum_insert_offset = skb->csum_offset;
|
ul_header->csum_insert_offset = skb->csum_offset;
|
||||||
ul_header->csum_enabled = 1;
|
ul_header->csum_enabled = 1;
|
||||||
ul_header->udp_ip4_ind = 0;
|
|
||||||
|
if (ip6h->nexthdr == IPPROTO_UDP)
|
||||||
|
ul_header->udp_ind = 1;
|
||||||
|
else
|
||||||
|
ul_header->udp_ind = 0;
|
||||||
|
|
||||||
/* Changing remaining fields to network order */
|
/* Changing remaining fields to network order */
|
||||||
hdr++;
|
hdr++;
|
||||||
@@ -428,7 +433,7 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
|
|||||||
ul_header->csum_start_offset = 0;
|
ul_header->csum_start_offset = 0;
|
||||||
ul_header->csum_insert_offset = 0;
|
ul_header->csum_insert_offset = 0;
|
||||||
ul_header->csum_enabled = 0;
|
ul_header->csum_enabled = 0;
|
||||||
ul_header->udp_ip4_ind = 0;
|
ul_header->udp_ind = 0;
|
||||||
|
|
||||||
priv->stats.csum_sw++;
|
priv->stats.csum_sw++;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7239,13 +7239,18 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
|
|||||||
{
|
{
|
||||||
unsigned int flags;
|
unsigned int flags;
|
||||||
|
|
||||||
if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
|
switch (tp->mac_version) {
|
||||||
|
case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
|
||||||
RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
|
RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
|
||||||
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
|
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
|
||||||
RTL_W8(tp, Cfg9346, Cfg9346_Lock);
|
RTL_W8(tp, Cfg9346, Cfg9346_Lock);
|
||||||
|
/* fall through */
|
||||||
|
case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_24:
|
||||||
flags = PCI_IRQ_LEGACY;
|
flags = PCI_IRQ_LEGACY;
|
||||||
} else {
|
break;
|
||||||
|
default:
|
||||||
flags = PCI_IRQ_ALL_TYPES;
|
flags = PCI_IRQ_ALL_TYPES;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags);
|
return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags);
|
||||||
|
|||||||
@@ -226,6 +226,8 @@ static int phylink_parse_fixedlink(struct phylink *pl,
|
|||||||
__ETHTOOL_LINK_MODE_MASK_NBITS, true);
|
__ETHTOOL_LINK_MODE_MASK_NBITS, true);
|
||||||
linkmode_zero(pl->supported);
|
linkmode_zero(pl->supported);
|
||||||
phylink_set(pl->supported, MII);
|
phylink_set(pl->supported, MII);
|
||||||
|
phylink_set(pl->supported, Pause);
|
||||||
|
phylink_set(pl->supported, Asym_Pause);
|
||||||
if (s) {
|
if (s) {
|
||||||
__set_bit(s->bit, pl->supported);
|
__set_bit(s->bit, pl->supported);
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -1120,6 +1120,9 @@ static const struct proto_ops pppoe_ops = {
|
|||||||
.recvmsg = pppoe_recvmsg,
|
.recvmsg = pppoe_recvmsg,
|
||||||
.mmap = sock_no_mmap,
|
.mmap = sock_no_mmap,
|
||||||
.ioctl = pppox_ioctl,
|
.ioctl = pppox_ioctl,
|
||||||
|
#ifdef CONFIG_COMPAT
|
||||||
|
.compat_ioctl = pppox_compat_ioctl,
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct pppox_proto pppoe_proto = {
|
static const struct pppox_proto pppoe_proto = {
|
||||||
|
|||||||
@@ -22,6 +22,7 @@
|
|||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/compat.h>
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/netdevice.h>
|
#include <linux/netdevice.h>
|
||||||
#include <linux/net.h>
|
#include <linux/net.h>
|
||||||
@@ -103,6 +104,18 @@ int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
|
|||||||
|
|
||||||
EXPORT_SYMBOL(pppox_ioctl);
|
EXPORT_SYMBOL(pppox_ioctl);
|
||||||
|
|
||||||
|
#ifdef CONFIG_COMPAT
|
||||||
|
int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
|
||||||
|
{
|
||||||
|
if (cmd == PPPOEIOCSFWD32)
|
||||||
|
cmd = PPPOEIOCSFWD;
|
||||||
|
|
||||||
|
return pppox_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
|
||||||
|
}
|
||||||
|
|
||||||
|
EXPORT_SYMBOL(pppox_compat_ioctl);
|
||||||
|
#endif
|
||||||
|
|
||||||
static int pppox_create(struct net *net, struct socket *sock, int protocol,
|
static int pppox_create(struct net *net, struct socket *sock, int protocol,
|
||||||
int kern)
|
int kern)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -628,6 +628,9 @@ static const struct proto_ops pptp_ops = {
|
|||||||
.recvmsg = sock_no_recvmsg,
|
.recvmsg = sock_no_recvmsg,
|
||||||
.mmap = sock_no_mmap,
|
.mmap = sock_no_mmap,
|
||||||
.ioctl = pppox_ioctl,
|
.ioctl = pppox_ioctl,
|
||||||
|
#ifdef CONFIG_COMPAT
|
||||||
|
.compat_ioctl = pppox_compat_ioctl,
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct pppox_proto pppox_pptp_proto = {
|
static const struct pppox_proto pppox_pptp_proto = {
|
||||||
|
|||||||
@@ -1682,6 +1682,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
|
|||||||
|
|
||||||
skb_reserve(skb, pad - delta);
|
skb_reserve(skb, pad - delta);
|
||||||
skb_put(skb, len);
|
skb_put(skb, len);
|
||||||
|
skb_set_owner_w(skb, tfile->socket.sk);
|
||||||
get_page(alloc_frag->page);
|
get_page(alloc_frag->page);
|
||||||
alloc_frag->offset += buflen;
|
alloc_frag->offset += buflen;
|
||||||
|
|
||||||
|
|||||||
@@ -244,7 +244,7 @@ void nfcmrvl_chip_reset(struct nfcmrvl_private *priv)
|
|||||||
/* Reset possible fault of previous session */
|
/* Reset possible fault of previous session */
|
||||||
clear_bit(NFCMRVL_PHY_ERROR, &priv->flags);
|
clear_bit(NFCMRVL_PHY_ERROR, &priv->flags);
|
||||||
|
|
||||||
if (priv->config.reset_n_io) {
|
if (gpio_is_valid(priv->config.reset_n_io)) {
|
||||||
nfc_info(priv->dev, "reset the chip\n");
|
nfc_info(priv->dev, "reset the chip\n");
|
||||||
gpio_set_value(priv->config.reset_n_io, 0);
|
gpio_set_value(priv->config.reset_n_io, 0);
|
||||||
usleep_range(5000, 10000);
|
usleep_range(5000, 10000);
|
||||||
@@ -255,7 +255,7 @@ void nfcmrvl_chip_reset(struct nfcmrvl_private *priv)
|
|||||||
|
|
||||||
void nfcmrvl_chip_halt(struct nfcmrvl_private *priv)
|
void nfcmrvl_chip_halt(struct nfcmrvl_private *priv)
|
||||||
{
|
{
|
||||||
if (priv->config.reset_n_io)
|
if (gpio_is_valid(priv->config.reset_n_io))
|
||||||
gpio_set_value(priv->config.reset_n_io, 0);
|
gpio_set_value(priv->config.reset_n_io, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -26,7 +26,7 @@
|
|||||||
static unsigned int hci_muxed;
|
static unsigned int hci_muxed;
|
||||||
static unsigned int flow_control;
|
static unsigned int flow_control;
|
||||||
static unsigned int break_control;
|
static unsigned int break_control;
|
||||||
static unsigned int reset_n_io;
|
static int reset_n_io = -EINVAL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
** NFCMRVL NCI OPS
|
** NFCMRVL NCI OPS
|
||||||
@@ -231,5 +231,5 @@ MODULE_PARM_DESC(break_control, "Tell if UART driver must drive break signal.");
|
|||||||
module_param(hci_muxed, uint, 0);
|
module_param(hci_muxed, uint, 0);
|
||||||
MODULE_PARM_DESC(hci_muxed, "Tell if transport is muxed in HCI one.");
|
MODULE_PARM_DESC(hci_muxed, "Tell if transport is muxed in HCI one.");
|
||||||
|
|
||||||
module_param(reset_n_io, uint, 0);
|
module_param(reset_n_io, int, 0);
|
||||||
MODULE_PARM_DESC(reset_n_io, "GPIO that is wired to RESET_N signal.");
|
MODULE_PARM_DESC(reset_n_io, "GPIO that is wired to RESET_N signal.");
|
||||||
|
|||||||
@@ -305,6 +305,7 @@ static int nfcmrvl_probe(struct usb_interface *intf,
|
|||||||
|
|
||||||
/* No configuration for USB */
|
/* No configuration for USB */
|
||||||
memset(&config, 0, sizeof(config));
|
memset(&config, 0, sizeof(config));
|
||||||
|
config.reset_n_io = -EINVAL;
|
||||||
|
|
||||||
nfc_info(&udev->dev, "intf %p id %p\n", intf, id);
|
nfc_info(&udev->dev, "intf %p id %p\n", intf, id);
|
||||||
|
|
||||||
|
|||||||
@@ -528,13 +528,38 @@ EXPORT_SYMBOL(nd_device_register);
|
|||||||
|
|
||||||
void nd_device_unregister(struct device *dev, enum nd_async_mode mode)
|
void nd_device_unregister(struct device *dev, enum nd_async_mode mode)
|
||||||
{
|
{
|
||||||
|
bool killed;
|
||||||
|
|
||||||
switch (mode) {
|
switch (mode) {
|
||||||
case ND_ASYNC:
|
case ND_ASYNC:
|
||||||
|
/*
|
||||||
|
* In the async case this is being triggered with the
|
||||||
|
* device lock held and the unregistration work needs to
|
||||||
|
* be moved out of line iff this is thread has won the
|
||||||
|
* race to schedule the deletion.
|
||||||
|
*/
|
||||||
|
if (!kill_device(dev))
|
||||||
|
return;
|
||||||
|
|
||||||
get_device(dev);
|
get_device(dev);
|
||||||
async_schedule_domain(nd_async_device_unregister, dev,
|
async_schedule_domain(nd_async_device_unregister, dev,
|
||||||
&nd_async_domain);
|
&nd_async_domain);
|
||||||
break;
|
break;
|
||||||
case ND_SYNC:
|
case ND_SYNC:
|
||||||
|
/*
|
||||||
|
* In the sync case the device is being unregistered due
|
||||||
|
* to a state change of the parent. Claim the kill state
|
||||||
|
* to synchronize against other unregistration requests,
|
||||||
|
* or otherwise let the async path handle it if the
|
||||||
|
* unregistration was already queued.
|
||||||
|
*/
|
||||||
|
device_lock(dev);
|
||||||
|
killed = kill_device(dev);
|
||||||
|
device_unlock(dev);
|
||||||
|
|
||||||
|
if (!killed)
|
||||||
|
return;
|
||||||
|
|
||||||
nd_synchronize();
|
nd_synchronize();
|
||||||
device_unregister(dev);
|
device_unregister(dev);
|
||||||
break;
|
break;
|
||||||
@@ -840,10 +865,12 @@ void wait_nvdimm_bus_probe_idle(struct device *dev)
|
|||||||
do {
|
do {
|
||||||
if (nvdimm_bus->probe_active == 0)
|
if (nvdimm_bus->probe_active == 0)
|
||||||
break;
|
break;
|
||||||
nvdimm_bus_unlock(&nvdimm_bus->dev);
|
nvdimm_bus_unlock(dev);
|
||||||
|
device_unlock(dev);
|
||||||
wait_event(nvdimm_bus->wait,
|
wait_event(nvdimm_bus->wait,
|
||||||
nvdimm_bus->probe_active == 0);
|
nvdimm_bus->probe_active == 0);
|
||||||
nvdimm_bus_lock(&nvdimm_bus->dev);
|
device_lock(dev);
|
||||||
|
nvdimm_bus_lock(dev);
|
||||||
} while (true);
|
} while (true);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -926,20 +953,19 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
|
|||||||
int read_only, unsigned int ioctl_cmd, unsigned long arg)
|
int read_only, unsigned int ioctl_cmd, unsigned long arg)
|
||||||
{
|
{
|
||||||
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
|
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
|
||||||
static char out_env[ND_CMD_MAX_ENVELOPE];
|
|
||||||
static char in_env[ND_CMD_MAX_ENVELOPE];
|
|
||||||
const struct nd_cmd_desc *desc = NULL;
|
const struct nd_cmd_desc *desc = NULL;
|
||||||
unsigned int cmd = _IOC_NR(ioctl_cmd);
|
unsigned int cmd = _IOC_NR(ioctl_cmd);
|
||||||
struct device *dev = &nvdimm_bus->dev;
|
struct device *dev = &nvdimm_bus->dev;
|
||||||
void __user *p = (void __user *) arg;
|
void __user *p = (void __user *) arg;
|
||||||
|
char *out_env = NULL, *in_env = NULL;
|
||||||
const char *cmd_name, *dimm_name;
|
const char *cmd_name, *dimm_name;
|
||||||
u32 in_len = 0, out_len = 0;
|
u32 in_len = 0, out_len = 0;
|
||||||
unsigned int func = cmd;
|
unsigned int func = cmd;
|
||||||
unsigned long cmd_mask;
|
unsigned long cmd_mask;
|
||||||
struct nd_cmd_pkg pkg;
|
struct nd_cmd_pkg pkg;
|
||||||
int rc, i, cmd_rc;
|
int rc, i, cmd_rc;
|
||||||
|
void *buf = NULL;
|
||||||
u64 buf_len = 0;
|
u64 buf_len = 0;
|
||||||
void *buf;
|
|
||||||
|
|
||||||
if (nvdimm) {
|
if (nvdimm) {
|
||||||
desc = nd_cmd_dimm_desc(cmd);
|
desc = nd_cmd_dimm_desc(cmd);
|
||||||
@@ -970,7 +996,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
|
|||||||
case ND_CMD_ARS_START:
|
case ND_CMD_ARS_START:
|
||||||
case ND_CMD_CLEAR_ERROR:
|
case ND_CMD_CLEAR_ERROR:
|
||||||
case ND_CMD_CALL:
|
case ND_CMD_CALL:
|
||||||
dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n",
|
dev_dbg(dev, "'%s' command while read-only.\n",
|
||||||
nvdimm ? nvdimm_cmd_name(cmd)
|
nvdimm ? nvdimm_cmd_name(cmd)
|
||||||
: nvdimm_bus_cmd_name(cmd));
|
: nvdimm_bus_cmd_name(cmd));
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
@@ -979,6 +1005,9 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* process an input envelope */
|
/* process an input envelope */
|
||||||
|
in_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
|
||||||
|
if (!in_env)
|
||||||
|
return -ENOMEM;
|
||||||
for (i = 0; i < desc->in_num; i++) {
|
for (i = 0; i < desc->in_num; i++) {
|
||||||
u32 in_size, copy;
|
u32 in_size, copy;
|
||||||
|
|
||||||
@@ -986,14 +1015,17 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
|
|||||||
if (in_size == UINT_MAX) {
|
if (in_size == UINT_MAX) {
|
||||||
dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n",
|
dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n",
|
||||||
__func__, dimm_name, cmd_name, i);
|
__func__, dimm_name, cmd_name, i);
|
||||||
return -ENXIO;
|
rc = -ENXIO;
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
if (in_len < sizeof(in_env))
|
if (in_len < ND_CMD_MAX_ENVELOPE)
|
||||||
copy = min_t(u32, sizeof(in_env) - in_len, in_size);
|
copy = min_t(u32, ND_CMD_MAX_ENVELOPE - in_len, in_size);
|
||||||
else
|
else
|
||||||
copy = 0;
|
copy = 0;
|
||||||
if (copy && copy_from_user(&in_env[in_len], p + in_len, copy))
|
if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) {
|
||||||
return -EFAULT;
|
rc = -EFAULT;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
in_len += in_size;
|
in_len += in_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1005,6 +1037,12 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* process an output envelope */
|
/* process an output envelope */
|
||||||
|
out_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
|
||||||
|
if (!out_env) {
|
||||||
|
rc = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < desc->out_num; i++) {
|
for (i = 0; i < desc->out_num; i++) {
|
||||||
u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i,
|
u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i,
|
||||||
(u32 *) in_env, (u32 *) out_env, 0);
|
(u32 *) in_env, (u32 *) out_env, 0);
|
||||||
@@ -1013,15 +1051,18 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
|
|||||||
if (out_size == UINT_MAX) {
|
if (out_size == UINT_MAX) {
|
||||||
dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n",
|
dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n",
|
||||||
dimm_name, cmd_name, i);
|
dimm_name, cmd_name, i);
|
||||||
return -EFAULT;
|
rc = -EFAULT;
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
if (out_len < sizeof(out_env))
|
if (out_len < ND_CMD_MAX_ENVELOPE)
|
||||||
copy = min_t(u32, sizeof(out_env) - out_len, out_size);
|
copy = min_t(u32, ND_CMD_MAX_ENVELOPE - out_len, out_size);
|
||||||
else
|
else
|
||||||
copy = 0;
|
copy = 0;
|
||||||
if (copy && copy_from_user(&out_env[out_len],
|
if (copy && copy_from_user(&out_env[out_len],
|
||||||
p + in_len + out_len, copy))
|
p + in_len + out_len, copy)) {
|
||||||
return -EFAULT;
|
rc = -EFAULT;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
out_len += out_size;
|
out_len += out_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1029,19 +1070,23 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
|
|||||||
if (buf_len > ND_IOCTL_MAX_BUFLEN) {
|
if (buf_len > ND_IOCTL_MAX_BUFLEN) {
|
||||||
dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name,
|
dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name,
|
||||||
cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN);
|
cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN);
|
||||||
return -EINVAL;
|
rc = -EINVAL;
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
buf = vmalloc(buf_len);
|
buf = vmalloc(buf_len);
|
||||||
if (!buf)
|
if (!buf) {
|
||||||
return -ENOMEM;
|
rc = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
if (copy_from_user(buf, p, buf_len)) {
|
if (copy_from_user(buf, p, buf_len)) {
|
||||||
rc = -EFAULT;
|
rc = -EFAULT;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
nvdimm_bus_lock(&nvdimm_bus->dev);
|
device_lock(dev);
|
||||||
|
nvdimm_bus_lock(dev);
|
||||||
rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf);
|
rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
@@ -1056,17 +1101,16 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
|
|||||||
nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address,
|
nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address,
|
||||||
clear_err->cleared);
|
clear_err->cleared);
|
||||||
}
|
}
|
||||||
nvdimm_bus_unlock(&nvdimm_bus->dev);
|
|
||||||
|
|
||||||
if (copy_to_user(p, buf, buf_len))
|
if (copy_to_user(p, buf, buf_len))
|
||||||
rc = -EFAULT;
|
rc = -EFAULT;
|
||||||
|
|
||||||
vfree(buf);
|
out_unlock:
|
||||||
return rc;
|
nvdimm_bus_unlock(dev);
|
||||||
|
device_unlock(dev);
|
||||||
out_unlock:
|
out:
|
||||||
nvdimm_bus_unlock(&nvdimm_bus->dev);
|
kfree(in_env);
|
||||||
out:
|
kfree(out_env);
|
||||||
vfree(buf);
|
vfree(buf);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -42,17 +42,6 @@ static int nd_region_probe(struct device *dev)
|
|||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
rc = nd_region_register_namespaces(nd_region, &err);
|
|
||||||
if (rc < 0)
|
|
||||||
return rc;
|
|
||||||
|
|
||||||
ndrd = dev_get_drvdata(dev);
|
|
||||||
ndrd->ns_active = rc;
|
|
||||||
ndrd->ns_count = rc + err;
|
|
||||||
|
|
||||||
if (rc && err && rc == err)
|
|
||||||
return -ENODEV;
|
|
||||||
|
|
||||||
if (is_nd_pmem(&nd_region->dev)) {
|
if (is_nd_pmem(&nd_region->dev)) {
|
||||||
struct resource ndr_res;
|
struct resource ndr_res;
|
||||||
|
|
||||||
@@ -68,6 +57,17 @@ static int nd_region_probe(struct device *dev)
|
|||||||
nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res);
|
nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rc = nd_region_register_namespaces(nd_region, &err);
|
||||||
|
if (rc < 0)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
ndrd = dev_get_drvdata(dev);
|
||||||
|
ndrd->ns_active = rc;
|
||||||
|
ndrd->ns_count = rc + err;
|
||||||
|
|
||||||
|
if (rc && err && rc == err)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
nd_region->btt_seed = nd_btt_create(nd_region);
|
nd_region->btt_seed = nd_btt_create(nd_region);
|
||||||
nd_region->pfn_seed = nd_pfn_create(nd_region);
|
nd_region->pfn_seed = nd_pfn_create(nd_region);
|
||||||
nd_region->dax_seed = nd_dax_create(nd_region);
|
nd_region->dax_seed = nd_dax_create(nd_region);
|
||||||
|
|||||||
@@ -425,10 +425,12 @@ static ssize_t available_size_show(struct device *dev,
|
|||||||
* memory nvdimm_bus_lock() is dropped, but that's userspace's
|
* memory nvdimm_bus_lock() is dropped, but that's userspace's
|
||||||
* problem to not race itself.
|
* problem to not race itself.
|
||||||
*/
|
*/
|
||||||
|
device_lock(dev);
|
||||||
nvdimm_bus_lock(dev);
|
nvdimm_bus_lock(dev);
|
||||||
wait_nvdimm_bus_probe_idle(dev);
|
wait_nvdimm_bus_probe_idle(dev);
|
||||||
available = nd_region_available_dpa(nd_region);
|
available = nd_region_available_dpa(nd_region);
|
||||||
nvdimm_bus_unlock(dev);
|
nvdimm_bus_unlock(dev);
|
||||||
|
device_unlock(dev);
|
||||||
|
|
||||||
return sprintf(buf, "%llu\n", available);
|
return sprintf(buf, "%llu\n", available);
|
||||||
}
|
}
|
||||||
@@ -440,10 +442,12 @@ static ssize_t max_available_extent_show(struct device *dev,
|
|||||||
struct nd_region *nd_region = to_nd_region(dev);
|
struct nd_region *nd_region = to_nd_region(dev);
|
||||||
unsigned long long available = 0;
|
unsigned long long available = 0;
|
||||||
|
|
||||||
|
device_lock(dev);
|
||||||
nvdimm_bus_lock(dev);
|
nvdimm_bus_lock(dev);
|
||||||
wait_nvdimm_bus_probe_idle(dev);
|
wait_nvdimm_bus_probe_idle(dev);
|
||||||
available = nd_region_allocatable_dpa(nd_region);
|
available = nd_region_allocatable_dpa(nd_region);
|
||||||
nvdimm_bus_unlock(dev);
|
nvdimm_bus_unlock(dev);
|
||||||
|
device_unlock(dev);
|
||||||
|
|
||||||
return sprintf(buf, "%llu\n", available);
|
return sprintf(buf, "%llu\n", available);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2017,7 +2017,7 @@ EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
|
|||||||
*/
|
*/
|
||||||
static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata)
|
static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata)
|
||||||
{
|
{
|
||||||
return (struct fcoe_rport *)(rdata + 1);
|
return container_of(rdata, struct fcoe_rport, rdata);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -2281,7 +2281,7 @@ static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip)
|
|||||||
*/
|
*/
|
||||||
static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
|
static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
|
||||||
struct sk_buff *skb,
|
struct sk_buff *skb,
|
||||||
struct fc_rport_priv *rdata)
|
struct fcoe_rport *frport)
|
||||||
{
|
{
|
||||||
struct fip_header *fiph;
|
struct fip_header *fiph;
|
||||||
struct fip_desc *desc = NULL;
|
struct fip_desc *desc = NULL;
|
||||||
@@ -2289,16 +2289,12 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
|
|||||||
struct fip_wwn_desc *wwn = NULL;
|
struct fip_wwn_desc *wwn = NULL;
|
||||||
struct fip_vn_desc *vn = NULL;
|
struct fip_vn_desc *vn = NULL;
|
||||||
struct fip_size_desc *size = NULL;
|
struct fip_size_desc *size = NULL;
|
||||||
struct fcoe_rport *frport;
|
|
||||||
size_t rlen;
|
size_t rlen;
|
||||||
size_t dlen;
|
size_t dlen;
|
||||||
u32 desc_mask = 0;
|
u32 desc_mask = 0;
|
||||||
u32 dtype;
|
u32 dtype;
|
||||||
u8 sub;
|
u8 sub;
|
||||||
|
|
||||||
memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
|
|
||||||
frport = fcoe_ctlr_rport(rdata);
|
|
||||||
|
|
||||||
fiph = (struct fip_header *)skb->data;
|
fiph = (struct fip_header *)skb->data;
|
||||||
frport->flags = ntohs(fiph->fip_flags);
|
frport->flags = ntohs(fiph->fip_flags);
|
||||||
|
|
||||||
@@ -2361,15 +2357,17 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
|
|||||||
if (dlen != sizeof(struct fip_wwn_desc))
|
if (dlen != sizeof(struct fip_wwn_desc))
|
||||||
goto len_err;
|
goto len_err;
|
||||||
wwn = (struct fip_wwn_desc *)desc;
|
wwn = (struct fip_wwn_desc *)desc;
|
||||||
rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
|
frport->rdata.ids.node_name =
|
||||||
|
get_unaligned_be64(&wwn->fd_wwn);
|
||||||
break;
|
break;
|
||||||
case FIP_DT_VN_ID:
|
case FIP_DT_VN_ID:
|
||||||
if (dlen != sizeof(struct fip_vn_desc))
|
if (dlen != sizeof(struct fip_vn_desc))
|
||||||
goto len_err;
|
goto len_err;
|
||||||
vn = (struct fip_vn_desc *)desc;
|
vn = (struct fip_vn_desc *)desc;
|
||||||
memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN);
|
memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN);
|
||||||
rdata->ids.port_id = ntoh24(vn->fd_fc_id);
|
frport->rdata.ids.port_id = ntoh24(vn->fd_fc_id);
|
||||||
rdata->ids.port_name = get_unaligned_be64(&vn->fd_wwpn);
|
frport->rdata.ids.port_name =
|
||||||
|
get_unaligned_be64(&vn->fd_wwpn);
|
||||||
break;
|
break;
|
||||||
case FIP_DT_FC4F:
|
case FIP_DT_FC4F:
|
||||||
if (dlen != sizeof(struct fip_fc4_feat))
|
if (dlen != sizeof(struct fip_fc4_feat))
|
||||||
@@ -2750,10 +2748,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
|
|||||||
{
|
{
|
||||||
struct fip_header *fiph;
|
struct fip_header *fiph;
|
||||||
enum fip_vn2vn_subcode sub;
|
enum fip_vn2vn_subcode sub;
|
||||||
struct {
|
struct fcoe_rport frport = { };
|
||||||
struct fc_rport_priv rdata;
|
|
||||||
struct fcoe_rport frport;
|
|
||||||
} buf;
|
|
||||||
int rc, vlan_id = 0;
|
int rc, vlan_id = 0;
|
||||||
|
|
||||||
fiph = (struct fip_header *)skb->data;
|
fiph = (struct fip_header *)skb->data;
|
||||||
@@ -2769,7 +2764,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
|
|||||||
goto drop;
|
goto drop;
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata);
|
rc = fcoe_ctlr_vn_parse(fip, skb, &frport);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc);
|
LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc);
|
||||||
goto drop;
|
goto drop;
|
||||||
@@ -2778,19 +2773,19 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
|
|||||||
mutex_lock(&fip->ctlr_mutex);
|
mutex_lock(&fip->ctlr_mutex);
|
||||||
switch (sub) {
|
switch (sub) {
|
||||||
case FIP_SC_VN_PROBE_REQ:
|
case FIP_SC_VN_PROBE_REQ:
|
||||||
fcoe_ctlr_vn_probe_req(fip, &buf.rdata);
|
fcoe_ctlr_vn_probe_req(fip, &frport.rdata);
|
||||||
break;
|
break;
|
||||||
case FIP_SC_VN_PROBE_REP:
|
case FIP_SC_VN_PROBE_REP:
|
||||||
fcoe_ctlr_vn_probe_reply(fip, &buf.rdata);
|
fcoe_ctlr_vn_probe_reply(fip, &frport.rdata);
|
||||||
break;
|
break;
|
||||||
case FIP_SC_VN_CLAIM_NOTIFY:
|
case FIP_SC_VN_CLAIM_NOTIFY:
|
||||||
fcoe_ctlr_vn_claim_notify(fip, &buf.rdata);
|
fcoe_ctlr_vn_claim_notify(fip, &frport.rdata);
|
||||||
break;
|
break;
|
||||||
case FIP_SC_VN_CLAIM_REP:
|
case FIP_SC_VN_CLAIM_REP:
|
||||||
fcoe_ctlr_vn_claim_resp(fip, &buf.rdata);
|
fcoe_ctlr_vn_claim_resp(fip, &frport.rdata);
|
||||||
break;
|
break;
|
||||||
case FIP_SC_VN_BEACON:
|
case FIP_SC_VN_BEACON:
|
||||||
fcoe_ctlr_vn_beacon(fip, &buf.rdata);
|
fcoe_ctlr_vn_beacon(fip, &frport.rdata);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub);
|
LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub);
|
||||||
@@ -2814,22 +2809,18 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
|
|||||||
*/
|
*/
|
||||||
static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
|
static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
|
||||||
struct sk_buff *skb,
|
struct sk_buff *skb,
|
||||||
struct fc_rport_priv *rdata)
|
struct fcoe_rport *frport)
|
||||||
{
|
{
|
||||||
struct fip_header *fiph;
|
struct fip_header *fiph;
|
||||||
struct fip_desc *desc = NULL;
|
struct fip_desc *desc = NULL;
|
||||||
struct fip_mac_desc *macd = NULL;
|
struct fip_mac_desc *macd = NULL;
|
||||||
struct fip_wwn_desc *wwn = NULL;
|
struct fip_wwn_desc *wwn = NULL;
|
||||||
struct fcoe_rport *frport;
|
|
||||||
size_t rlen;
|
size_t rlen;
|
||||||
size_t dlen;
|
size_t dlen;
|
||||||
u32 desc_mask = 0;
|
u32 desc_mask = 0;
|
||||||
u32 dtype;
|
u32 dtype;
|
||||||
u8 sub;
|
u8 sub;
|
||||||
|
|
||||||
memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
|
|
||||||
frport = fcoe_ctlr_rport(rdata);
|
|
||||||
|
|
||||||
fiph = (struct fip_header *)skb->data;
|
fiph = (struct fip_header *)skb->data;
|
||||||
frport->flags = ntohs(fiph->fip_flags);
|
frport->flags = ntohs(fiph->fip_flags);
|
||||||
|
|
||||||
@@ -2883,7 +2874,8 @@ static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
|
|||||||
if (dlen != sizeof(struct fip_wwn_desc))
|
if (dlen != sizeof(struct fip_wwn_desc))
|
||||||
goto len_err;
|
goto len_err;
|
||||||
wwn = (struct fip_wwn_desc *)desc;
|
wwn = (struct fip_wwn_desc *)desc;
|
||||||
rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
|
frport->rdata.ids.node_name =
|
||||||
|
get_unaligned_be64(&wwn->fd_wwn);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
|
LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
|
||||||
@@ -2994,22 +2986,19 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
|
|||||||
{
|
{
|
||||||
struct fip_header *fiph;
|
struct fip_header *fiph;
|
||||||
enum fip_vlan_subcode sub;
|
enum fip_vlan_subcode sub;
|
||||||
struct {
|
struct fcoe_rport frport = { };
|
||||||
struct fc_rport_priv rdata;
|
|
||||||
struct fcoe_rport frport;
|
|
||||||
} buf;
|
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
fiph = (struct fip_header *)skb->data;
|
fiph = (struct fip_header *)skb->data;
|
||||||
sub = fiph->fip_subcode;
|
sub = fiph->fip_subcode;
|
||||||
rc = fcoe_ctlr_vlan_parse(fip, skb, &buf.rdata);
|
rc = fcoe_ctlr_vlan_parse(fip, skb, &frport);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc);
|
LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc);
|
||||||
goto drop;
|
goto drop;
|
||||||
}
|
}
|
||||||
mutex_lock(&fip->ctlr_mutex);
|
mutex_lock(&fip->ctlr_mutex);
|
||||||
if (sub == FIP_SC_VL_REQ)
|
if (sub == FIP_SC_VL_REQ)
|
||||||
fcoe_ctlr_vlan_disc_reply(fip, &buf.rdata);
|
fcoe_ctlr_vlan_disc_reply(fip, &frport.rdata);
|
||||||
mutex_unlock(&fip->ctlr_mutex);
|
mutex_unlock(&fip->ctlr_mutex);
|
||||||
|
|
||||||
drop:
|
drop:
|
||||||
|
|||||||
@@ -140,6 +140,7 @@ EXPORT_SYMBOL(fc_rport_lookup);
|
|||||||
struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
|
struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
|
||||||
{
|
{
|
||||||
struct fc_rport_priv *rdata;
|
struct fc_rport_priv *rdata;
|
||||||
|
size_t rport_priv_size = sizeof(*rdata);
|
||||||
|
|
||||||
lockdep_assert_held(&lport->disc.disc_mutex);
|
lockdep_assert_held(&lport->disc.disc_mutex);
|
||||||
|
|
||||||
@@ -147,7 +148,9 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
|
|||||||
if (rdata)
|
if (rdata)
|
||||||
return rdata;
|
return rdata;
|
||||||
|
|
||||||
rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL);
|
if (lport->rport_priv_size > 0)
|
||||||
|
rport_priv_size = lport->rport_priv_size;
|
||||||
|
rdata = kzalloc(rport_priv_size, GFP_KERNEL);
|
||||||
if (!rdata)
|
if (!rdata)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
|||||||
@@ -554,7 +554,8 @@ static int bcm2835_spi_transfer_one(struct spi_master *master,
|
|||||||
bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
|
bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
|
||||||
|
|
||||||
/* handle all the 3-wire mode */
|
/* handle all the 3-wire mode */
|
||||||
if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf))
|
if (spi->mode & SPI_3WIRE && tfr->rx_buf &&
|
||||||
|
tfr->rx_buf != master->dummy_rx)
|
||||||
cs |= BCM2835_SPI_CS_REN;
|
cs |= BCM2835_SPI_CS_REN;
|
||||||
else
|
else
|
||||||
cs &= ~BCM2835_SPI_CS_REN;
|
cs &= ~BCM2835_SPI_CS_REN;
|
||||||
|
|||||||
@@ -894,9 +894,6 @@ COMPATIBLE_IOCTL(PPPIOCDISCONN)
|
|||||||
COMPATIBLE_IOCTL(PPPIOCATTCHAN)
|
COMPATIBLE_IOCTL(PPPIOCATTCHAN)
|
||||||
COMPATIBLE_IOCTL(PPPIOCGCHAN)
|
COMPATIBLE_IOCTL(PPPIOCGCHAN)
|
||||||
COMPATIBLE_IOCTL(PPPIOCGL2TPSTATS)
|
COMPATIBLE_IOCTL(PPPIOCGL2TPSTATS)
|
||||||
/* PPPOX */
|
|
||||||
COMPATIBLE_IOCTL(PPPOEIOCSFWD)
|
|
||||||
COMPATIBLE_IOCTL(PPPOEIOCDFWD)
|
|
||||||
/* Big A */
|
/* Big A */
|
||||||
/* sparc only */
|
/* sparc only */
|
||||||
/* Big Q for sound/OSS */
|
/* Big Q for sound/OSS */
|
||||||
|
|||||||
@@ -209,6 +209,7 @@ struct css_set {
|
|||||||
*/
|
*/
|
||||||
struct list_head tasks;
|
struct list_head tasks;
|
||||||
struct list_head mg_tasks;
|
struct list_head mg_tasks;
|
||||||
|
struct list_head dying_tasks;
|
||||||
|
|
||||||
/* all css_task_iters currently walking this cset */
|
/* all css_task_iters currently walking this cset */
|
||||||
struct list_head task_iters;
|
struct list_head task_iters;
|
||||||
|
|||||||
@@ -43,6 +43,9 @@
|
|||||||
/* walk all threaded css_sets in the domain */
|
/* walk all threaded css_sets in the domain */
|
||||||
#define CSS_TASK_ITER_THREADED (1U << 1)
|
#define CSS_TASK_ITER_THREADED (1U << 1)
|
||||||
|
|
||||||
|
/* internal flags */
|
||||||
|
#define CSS_TASK_ITER_SKIPPED (1U << 16)
|
||||||
|
|
||||||
/* a css_task_iter should be treated as an opaque object */
|
/* a css_task_iter should be treated as an opaque object */
|
||||||
struct css_task_iter {
|
struct css_task_iter {
|
||||||
struct cgroup_subsys *ss;
|
struct cgroup_subsys *ss;
|
||||||
@@ -57,6 +60,7 @@ struct css_task_iter {
|
|||||||
struct list_head *task_pos;
|
struct list_head *task_pos;
|
||||||
struct list_head *tasks_head;
|
struct list_head *tasks_head;
|
||||||
struct list_head *mg_tasks_head;
|
struct list_head *mg_tasks_head;
|
||||||
|
struct list_head *dying_tasks_head;
|
||||||
|
|
||||||
struct css_set *cur_cset;
|
struct css_set *cur_cset;
|
||||||
struct css_set *cur_dcset;
|
struct css_set *cur_dcset;
|
||||||
|
|||||||
@@ -1332,6 +1332,7 @@ extern int (*platform_notify_remove)(struct device *dev);
|
|||||||
*/
|
*/
|
||||||
extern struct device *get_device(struct device *dev);
|
extern struct device *get_device(struct device *dev);
|
||||||
extern void put_device(struct device *dev);
|
extern void put_device(struct device *dev);
|
||||||
|
extern bool kill_device(struct device *dev);
|
||||||
|
|
||||||
#ifdef CONFIG_DEVTMPFS
|
#ifdef CONFIG_DEVTMPFS
|
||||||
extern int devtmpfs_create_node(struct device *dev);
|
extern int devtmpfs_create_node(struct device *dev);
|
||||||
|
|||||||
@@ -84,6 +84,9 @@ extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
|
|||||||
extern void unregister_pppox_proto(int proto_num);
|
extern void unregister_pppox_proto(int proto_num);
|
||||||
extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */
|
extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */
|
||||||
extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
|
extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
|
||||||
|
extern int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
|
||||||
|
|
||||||
|
#define PPPOEIOCSFWD32 _IOW(0xB1 ,0, compat_size_t)
|
||||||
|
|
||||||
/* PPPoX socket states */
|
/* PPPoX socket states */
|
||||||
enum {
|
enum {
|
||||||
|
|||||||
@@ -188,6 +188,7 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
|
|||||||
struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handler);
|
struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handler);
|
||||||
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
|
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
|
||||||
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
|
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
|
||||||
|
u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
|
||||||
void mlx5_fc_query_cached(struct mlx5_fc *counter,
|
void mlx5_fc_query_cached(struct mlx5_fc *counter,
|
||||||
u64 *bytes, u64 *packets, u64 *lastuse);
|
u64 *bytes, u64 *packets, u64 *lastuse);
|
||||||
int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
|
int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
|
||||||
|
|||||||
@@ -5623,7 +5623,12 @@ struct mlx5_ifc_modify_cq_in_bits {
|
|||||||
|
|
||||||
struct mlx5_ifc_cqc_bits cq_context;
|
struct mlx5_ifc_cqc_bits cq_context;
|
||||||
|
|
||||||
u8 reserved_at_280[0x600];
|
u8 reserved_at_280[0x60];
|
||||||
|
|
||||||
|
u8 cq_umem_valid[0x1];
|
||||||
|
u8 reserved_at_2e1[0x1f];
|
||||||
|
|
||||||
|
u8 reserved_at_300[0x580];
|
||||||
|
|
||||||
u8 pas[0][0x40];
|
u8 pas[0][0x40];
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -241,6 +241,7 @@ struct fcoe_fcf {
|
|||||||
* @vn_mac: VN_Node assigned MAC address for data
|
* @vn_mac: VN_Node assigned MAC address for data
|
||||||
*/
|
*/
|
||||||
struct fcoe_rport {
|
struct fcoe_rport {
|
||||||
|
struct fc_rport_priv rdata;
|
||||||
unsigned long time;
|
unsigned long time;
|
||||||
u16 fcoe_len;
|
u16 fcoe_len;
|
||||||
u16 flags;
|
u16 flags;
|
||||||
|
|||||||
@@ -213,7 +213,8 @@ static struct cftype cgroup_base_files[];
|
|||||||
|
|
||||||
static int cgroup_apply_control(struct cgroup *cgrp);
|
static int cgroup_apply_control(struct cgroup *cgrp);
|
||||||
static void cgroup_finalize_control(struct cgroup *cgrp, int ret);
|
static void cgroup_finalize_control(struct cgroup *cgrp, int ret);
|
||||||
static void css_task_iter_advance(struct css_task_iter *it);
|
static void css_task_iter_skip(struct css_task_iter *it,
|
||||||
|
struct task_struct *task);
|
||||||
static int cgroup_destroy_locked(struct cgroup *cgrp);
|
static int cgroup_destroy_locked(struct cgroup *cgrp);
|
||||||
static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
|
static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
|
||||||
struct cgroup_subsys *ss);
|
struct cgroup_subsys *ss);
|
||||||
@@ -673,6 +674,7 @@ struct css_set init_css_set = {
|
|||||||
.dom_cset = &init_css_set,
|
.dom_cset = &init_css_set,
|
||||||
.tasks = LIST_HEAD_INIT(init_css_set.tasks),
|
.tasks = LIST_HEAD_INIT(init_css_set.tasks),
|
||||||
.mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks),
|
.mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks),
|
||||||
|
.dying_tasks = LIST_HEAD_INIT(init_css_set.dying_tasks),
|
||||||
.task_iters = LIST_HEAD_INIT(init_css_set.task_iters),
|
.task_iters = LIST_HEAD_INIT(init_css_set.task_iters),
|
||||||
.threaded_csets = LIST_HEAD_INIT(init_css_set.threaded_csets),
|
.threaded_csets = LIST_HEAD_INIT(init_css_set.threaded_csets),
|
||||||
.cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links),
|
.cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links),
|
||||||
@@ -776,6 +778,21 @@ static void css_set_update_populated(struct css_set *cset, bool populated)
|
|||||||
cgroup_update_populated(link->cgrp, populated);
|
cgroup_update_populated(link->cgrp, populated);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @task is leaving, advance task iterators which are pointing to it so
|
||||||
|
* that they can resume at the next position. Advancing an iterator might
|
||||||
|
* remove it from the list, use safe walk. See css_task_iter_skip() for
|
||||||
|
* details.
|
||||||
|
*/
|
||||||
|
static void css_set_skip_task_iters(struct css_set *cset,
|
||||||
|
struct task_struct *task)
|
||||||
|
{
|
||||||
|
struct css_task_iter *it, *pos;
|
||||||
|
|
||||||
|
list_for_each_entry_safe(it, pos, &cset->task_iters, iters_node)
|
||||||
|
css_task_iter_skip(it, task);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* css_set_move_task - move a task from one css_set to another
|
* css_set_move_task - move a task from one css_set to another
|
||||||
* @task: task being moved
|
* @task: task being moved
|
||||||
@@ -801,22 +818,9 @@ static void css_set_move_task(struct task_struct *task,
|
|||||||
css_set_update_populated(to_cset, true);
|
css_set_update_populated(to_cset, true);
|
||||||
|
|
||||||
if (from_cset) {
|
if (from_cset) {
|
||||||
struct css_task_iter *it, *pos;
|
|
||||||
|
|
||||||
WARN_ON_ONCE(list_empty(&task->cg_list));
|
WARN_ON_ONCE(list_empty(&task->cg_list));
|
||||||
|
|
||||||
/*
|
css_set_skip_task_iters(from_cset, task);
|
||||||
* @task is leaving, advance task iterators which are
|
|
||||||
* pointing to it so that they can resume at the next
|
|
||||||
* position. Advancing an iterator might remove it from
|
|
||||||
* the list, use safe walk. See css_task_iter_advance*()
|
|
||||||
* for details.
|
|
||||||
*/
|
|
||||||
list_for_each_entry_safe(it, pos, &from_cset->task_iters,
|
|
||||||
iters_node)
|
|
||||||
if (it->task_pos == &task->cg_list)
|
|
||||||
css_task_iter_advance(it);
|
|
||||||
|
|
||||||
list_del_init(&task->cg_list);
|
list_del_init(&task->cg_list);
|
||||||
if (!css_set_populated(from_cset))
|
if (!css_set_populated(from_cset))
|
||||||
css_set_update_populated(from_cset, false);
|
css_set_update_populated(from_cset, false);
|
||||||
@@ -1143,6 +1147,7 @@ static struct css_set *find_css_set(struct css_set *old_cset,
|
|||||||
cset->dom_cset = cset;
|
cset->dom_cset = cset;
|
||||||
INIT_LIST_HEAD(&cset->tasks);
|
INIT_LIST_HEAD(&cset->tasks);
|
||||||
INIT_LIST_HEAD(&cset->mg_tasks);
|
INIT_LIST_HEAD(&cset->mg_tasks);
|
||||||
|
INIT_LIST_HEAD(&cset->dying_tasks);
|
||||||
INIT_LIST_HEAD(&cset->task_iters);
|
INIT_LIST_HEAD(&cset->task_iters);
|
||||||
INIT_LIST_HEAD(&cset->threaded_csets);
|
INIT_LIST_HEAD(&cset->threaded_csets);
|
||||||
INIT_HLIST_NODE(&cset->hlist);
|
INIT_HLIST_NODE(&cset->hlist);
|
||||||
@@ -4235,15 +4240,18 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
|
|||||||
it->task_pos = NULL;
|
it->task_pos = NULL;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
} while (!css_set_populated(cset));
|
} while (!css_set_populated(cset) && list_empty(&cset->dying_tasks));
|
||||||
|
|
||||||
if (!list_empty(&cset->tasks))
|
if (!list_empty(&cset->tasks))
|
||||||
it->task_pos = cset->tasks.next;
|
it->task_pos = cset->tasks.next;
|
||||||
else
|
else if (!list_empty(&cset->mg_tasks))
|
||||||
it->task_pos = cset->mg_tasks.next;
|
it->task_pos = cset->mg_tasks.next;
|
||||||
|
else
|
||||||
|
it->task_pos = cset->dying_tasks.next;
|
||||||
|
|
||||||
it->tasks_head = &cset->tasks;
|
it->tasks_head = &cset->tasks;
|
||||||
it->mg_tasks_head = &cset->mg_tasks;
|
it->mg_tasks_head = &cset->mg_tasks;
|
||||||
|
it->dying_tasks_head = &cset->dying_tasks;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We don't keep css_sets locked across iteration steps and thus
|
* We don't keep css_sets locked across iteration steps and thus
|
||||||
@@ -4269,9 +4277,20 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
|
|||||||
list_add(&it->iters_node, &cset->task_iters);
|
list_add(&it->iters_node, &cset->task_iters);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void css_task_iter_skip(struct css_task_iter *it,
|
||||||
|
struct task_struct *task)
|
||||||
|
{
|
||||||
|
lockdep_assert_held(&css_set_lock);
|
||||||
|
|
||||||
|
if (it->task_pos == &task->cg_list) {
|
||||||
|
it->task_pos = it->task_pos->next;
|
||||||
|
it->flags |= CSS_TASK_ITER_SKIPPED;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void css_task_iter_advance(struct css_task_iter *it)
|
static void css_task_iter_advance(struct css_task_iter *it)
|
||||||
{
|
{
|
||||||
struct list_head *next;
|
struct task_struct *task;
|
||||||
|
|
||||||
lockdep_assert_held(&css_set_lock);
|
lockdep_assert_held(&css_set_lock);
|
||||||
repeat:
|
repeat:
|
||||||
@@ -4281,25 +4300,40 @@ static void css_task_iter_advance(struct css_task_iter *it)
|
|||||||
* consumed first and then ->mg_tasks. After ->mg_tasks,
|
* consumed first and then ->mg_tasks. After ->mg_tasks,
|
||||||
* we move onto the next cset.
|
* we move onto the next cset.
|
||||||
*/
|
*/
|
||||||
next = it->task_pos->next;
|
if (it->flags & CSS_TASK_ITER_SKIPPED)
|
||||||
|
it->flags &= ~CSS_TASK_ITER_SKIPPED;
|
||||||
if (next == it->tasks_head)
|
|
||||||
next = it->mg_tasks_head->next;
|
|
||||||
|
|
||||||
if (next == it->mg_tasks_head)
|
|
||||||
css_task_iter_advance_css_set(it);
|
|
||||||
else
|
else
|
||||||
it->task_pos = next;
|
it->task_pos = it->task_pos->next;
|
||||||
|
|
||||||
|
if (it->task_pos == it->tasks_head)
|
||||||
|
it->task_pos = it->mg_tasks_head->next;
|
||||||
|
if (it->task_pos == it->mg_tasks_head)
|
||||||
|
it->task_pos = it->dying_tasks_head->next;
|
||||||
|
if (it->task_pos == it->dying_tasks_head)
|
||||||
|
css_task_iter_advance_css_set(it);
|
||||||
} else {
|
} else {
|
||||||
/* called from start, proceed to the first cset */
|
/* called from start, proceed to the first cset */
|
||||||
css_task_iter_advance_css_set(it);
|
css_task_iter_advance_css_set(it);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* if PROCS, skip over tasks which aren't group leaders */
|
if (!it->task_pos)
|
||||||
if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos &&
|
return;
|
||||||
!thread_group_leader(list_entry(it->task_pos, struct task_struct,
|
|
||||||
cg_list)))
|
task = list_entry(it->task_pos, struct task_struct, cg_list);
|
||||||
goto repeat;
|
|
||||||
|
if (it->flags & CSS_TASK_ITER_PROCS) {
|
||||||
|
/* if PROCS, skip over tasks which aren't group leaders */
|
||||||
|
if (!thread_group_leader(task))
|
||||||
|
goto repeat;
|
||||||
|
|
||||||
|
/* and dying leaders w/o live member threads */
|
||||||
|
if (!atomic_read(&task->signal->live))
|
||||||
|
goto repeat;
|
||||||
|
} else {
|
||||||
|
/* skip all dying ones */
|
||||||
|
if (task->flags & PF_EXITING)
|
||||||
|
goto repeat;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -4355,6 +4389,10 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
|
|||||||
|
|
||||||
spin_lock_irq(&css_set_lock);
|
spin_lock_irq(&css_set_lock);
|
||||||
|
|
||||||
|
/* @it may be half-advanced by skips, finish advancing */
|
||||||
|
if (it->flags & CSS_TASK_ITER_SKIPPED)
|
||||||
|
css_task_iter_advance(it);
|
||||||
|
|
||||||
if (it->task_pos) {
|
if (it->task_pos) {
|
||||||
it->cur_task = list_entry(it->task_pos, struct task_struct,
|
it->cur_task = list_entry(it->task_pos, struct task_struct,
|
||||||
cg_list);
|
cg_list);
|
||||||
@@ -5790,6 +5828,7 @@ void cgroup_exit(struct task_struct *tsk)
|
|||||||
if (!list_empty(&tsk->cg_list)) {
|
if (!list_empty(&tsk->cg_list)) {
|
||||||
spin_lock_irq(&css_set_lock);
|
spin_lock_irq(&css_set_lock);
|
||||||
css_set_move_task(tsk, cset, NULL, false);
|
css_set_move_task(tsk, cset, NULL, false);
|
||||||
|
list_add_tail(&tsk->cg_list, &cset->dying_tasks);
|
||||||
cset->nr_tasks--;
|
cset->nr_tasks--;
|
||||||
spin_unlock_irq(&css_set_lock);
|
spin_unlock_irq(&css_set_lock);
|
||||||
} else {
|
} else {
|
||||||
@@ -5810,6 +5849,13 @@ void cgroup_release(struct task_struct *task)
|
|||||||
do_each_subsys_mask(ss, ssid, have_release_callback) {
|
do_each_subsys_mask(ss, ssid, have_release_callback) {
|
||||||
ss->release(task);
|
ss->release(task);
|
||||||
} while_each_subsys_mask();
|
} while_each_subsys_mask();
|
||||||
|
|
||||||
|
if (use_task_css_set_links) {
|
||||||
|
spin_lock_irq(&css_set_lock);
|
||||||
|
css_set_skip_task_iters(task_css_set(task), task);
|
||||||
|
list_del_init(&task->cg_list);
|
||||||
|
spin_unlock_irq(&css_set_lock);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void cgroup_free(struct task_struct *task)
|
void cgroup_free(struct task_struct *task)
|
||||||
|
|||||||
@@ -194,6 +194,7 @@ void release_task(struct task_struct *p)
|
|||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
proc_flush_task(p);
|
proc_flush_task(p);
|
||||||
|
cgroup_release(p);
|
||||||
|
|
||||||
write_lock_irq(&tasklist_lock);
|
write_lock_irq(&tasklist_lock);
|
||||||
ptrace_release_task(p);
|
ptrace_release_task(p);
|
||||||
@@ -219,7 +220,6 @@ void release_task(struct task_struct *p)
|
|||||||
}
|
}
|
||||||
|
|
||||||
write_unlock_irq(&tasklist_lock);
|
write_unlock_irq(&tasklist_lock);
|
||||||
cgroup_release(p);
|
|
||||||
release_thread(p);
|
release_thread(p);
|
||||||
call_rcu(&p->rcu, delayed_put_task_struct);
|
call_rcu(&p->rcu, delayed_put_task_struct);
|
||||||
|
|
||||||
|
|||||||
@@ -1621,6 +1621,9 @@ br_multicast_leave_group(struct net_bridge *br,
|
|||||||
if (!br_port_group_equal(p, port, src))
|
if (!br_port_group_equal(p, port, src))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
if (p->flags & MDB_PG_FLAGS_PERMANENT)
|
||||||
|
break;
|
||||||
|
|
||||||
rcu_assign_pointer(*pp, p->next);
|
rcu_assign_pointer(*pp, p->next);
|
||||||
hlist_del_init(&p->mglist);
|
hlist_del_init(&p->mglist);
|
||||||
del_timer(&p->timer);
|
del_timer(&p->timer);
|
||||||
|
|||||||
@@ -677,6 +677,11 @@ void br_vlan_flush(struct net_bridge *br)
|
|||||||
|
|
||||||
ASSERT_RTNL();
|
ASSERT_RTNL();
|
||||||
|
|
||||||
|
/* delete auto-added default pvid local fdb before flushing vlans
|
||||||
|
* otherwise it will be leaked on bridge device init failure
|
||||||
|
*/
|
||||||
|
br_fdb_delete_by_port(br, NULL, 0, 1);
|
||||||
|
|
||||||
vg = br_vlan_group(br);
|
vg = br_vlan_group(br);
|
||||||
__vlan_flush(vg);
|
__vlan_flush(vg);
|
||||||
RCU_INIT_POINTER(br->vlgrp, NULL);
|
RCU_INIT_POINTER(br->vlgrp, NULL);
|
||||||
|
|||||||
@@ -9510,6 +9510,8 @@ static void __net_exit default_device_exit(struct net *net)
|
|||||||
|
|
||||||
/* Push remaining network devices to init_net */
|
/* Push remaining network devices to init_net */
|
||||||
snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
|
snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
|
||||||
|
if (__dev_get_by_name(&init_net, fb_name))
|
||||||
|
snprintf(fb_name, IFNAMSIZ, "dev%%d");
|
||||||
err = dev_change_net_namespace(dev, &init_net, fb_name);
|
err = dev_change_net_namespace(dev, &init_net, fb_name);
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_emerg("%s: failed to move %s to init_net: %d\n",
|
pr_emerg("%s: failed to move %s to init_net: %d\n",
|
||||||
|
|||||||
@@ -281,6 +281,9 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb,
|
|||||||
const struct iphdr *tiph = &tunnel->parms.iph;
|
const struct iphdr *tiph = &tunnel->parms.iph;
|
||||||
u8 ipproto;
|
u8 ipproto;
|
||||||
|
|
||||||
|
if (!pskb_inet_may_pull(skb))
|
||||||
|
goto tx_error;
|
||||||
|
|
||||||
switch (skb->protocol) {
|
switch (skb->protocol) {
|
||||||
case htons(ETH_P_IP):
|
case htons(ETH_P_IP):
|
||||||
ipproto = IPPROTO_IPIP;
|
ipproto = IPPROTO_IPIP;
|
||||||
|
|||||||
@@ -680,12 +680,13 @@ static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb,
|
|||||||
struct flowi6 *fl6, __u8 *dsfield,
|
struct flowi6 *fl6, __u8 *dsfield,
|
||||||
int *encap_limit)
|
int *encap_limit)
|
||||||
{
|
{
|
||||||
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
|
struct ipv6hdr *ipv6h;
|
||||||
struct ip6_tnl *t = netdev_priv(dev);
|
struct ip6_tnl *t = netdev_priv(dev);
|
||||||
__u16 offset;
|
__u16 offset;
|
||||||
|
|
||||||
offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
|
offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
|
||||||
/* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
|
/* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
|
||||||
|
ipv6h = ipv6_hdr(skb);
|
||||||
|
|
||||||
if (offset > 0) {
|
if (offset > 0) {
|
||||||
struct ipv6_tlv_tnl_enc_lim *tel;
|
struct ipv6_tlv_tnl_enc_lim *tel;
|
||||||
|
|||||||
@@ -1283,12 +1283,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
|
fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
|
||||||
|
dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
|
||||||
|
|
||||||
if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
|
if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
|
|
||||||
|
|
||||||
skb_set_inner_ipproto(skb, IPPROTO_IPIP);
|
skb_set_inner_ipproto(skb, IPPROTO_IPIP);
|
||||||
|
|
||||||
err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
|
err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
|
||||||
@@ -1372,12 +1371,11 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
|
fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
|
||||||
|
dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
|
||||||
|
|
||||||
if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
|
if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
|
|
||||||
|
|
||||||
skb_set_inner_ipproto(skb, IPPROTO_IPV6);
|
skb_set_inner_ipproto(skb, IPPROTO_IPV6);
|
||||||
|
|
||||||
err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
|
err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
|
||||||
|
|||||||
@@ -1686,6 +1686,9 @@ static const struct proto_ops pppol2tp_ops = {
|
|||||||
.recvmsg = pppol2tp_recvmsg,
|
.recvmsg = pppol2tp_recvmsg,
|
||||||
.mmap = sock_no_mmap,
|
.mmap = sock_no_mmap,
|
||||||
.ioctl = pppox_ioctl,
|
.ioctl = pppox_ioctl,
|
||||||
|
#ifdef CONFIG_COMPAT
|
||||||
|
.compat_ioctl = pppox_compat_ioctl,
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct pppox_proto pppol2tp_proto = {
|
static const struct pppox_proto pppol2tp_proto = {
|
||||||
|
|||||||
@@ -287,6 +287,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
|
|||||||
struct tcf_bpf *prog;
|
struct tcf_bpf *prog;
|
||||||
bool is_bpf, is_ebpf;
|
bool is_bpf, is_ebpf;
|
||||||
int ret, res = 0;
|
int ret, res = 0;
|
||||||
|
u32 index;
|
||||||
|
|
||||||
if (!nla)
|
if (!nla)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -299,13 +300,13 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
|
parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
|
||||||
|
index = parm->index;
|
||||||
ret = tcf_idr_check_alloc(tn, &parm->index, act, bind);
|
ret = tcf_idr_check_alloc(tn, &index, act, bind);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
ret = tcf_idr_create(tn, parm->index, est, act,
|
ret = tcf_idr_create(tn, index, est, act,
|
||||||
&act_bpf_ops, bind, true);
|
&act_bpf_ops, bind, true);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
tcf_idr_cleanup(tn, parm->index);
|
tcf_idr_cleanup(tn, index);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -104,6 +104,7 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
|
|||||||
struct tcf_connmark_info *ci;
|
struct tcf_connmark_info *ci;
|
||||||
struct tc_connmark *parm;
|
struct tc_connmark *parm;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
u32 index;
|
||||||
|
|
||||||
if (!nla)
|
if (!nla)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -117,13 +118,13 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
parm = nla_data(tb[TCA_CONNMARK_PARMS]);
|
parm = nla_data(tb[TCA_CONNMARK_PARMS]);
|
||||||
|
index = parm->index;
|
||||||
ret = tcf_idr_check_alloc(tn, &parm->index, a, bind);
|
ret = tcf_idr_check_alloc(tn, &index, a, bind);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
ret = tcf_idr_create(tn, parm->index, est, a,
|
ret = tcf_idr_create(tn, index, est, a,
|
||||||
&act_connmark_ops, bind, false);
|
&act_connmark_ops, bind, false);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
tcf_idr_cleanup(tn, parm->index);
|
tcf_idr_cleanup(tn, index);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -55,6 +55,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
|
|||||||
struct tc_csum *parm;
|
struct tc_csum *parm;
|
||||||
struct tcf_csum *p;
|
struct tcf_csum *p;
|
||||||
int ret = 0, err;
|
int ret = 0, err;
|
||||||
|
u32 index;
|
||||||
|
|
||||||
if (nla == NULL)
|
if (nla == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -66,13 +67,13 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
|
|||||||
if (tb[TCA_CSUM_PARMS] == NULL)
|
if (tb[TCA_CSUM_PARMS] == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
parm = nla_data(tb[TCA_CSUM_PARMS]);
|
parm = nla_data(tb[TCA_CSUM_PARMS]);
|
||||||
|
index = parm->index;
|
||||||
err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
|
err = tcf_idr_check_alloc(tn, &index, a, bind);
|
||||||
if (!err) {
|
if (!err) {
|
||||||
ret = tcf_idr_create(tn, parm->index, est, a,
|
ret = tcf_idr_create(tn, index, est, a,
|
||||||
&act_csum_ops, bind, true);
|
&act_csum_ops, bind, true);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
tcf_idr_cleanup(tn, parm->index);
|
tcf_idr_cleanup(tn, index);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
ret = ACT_P_CREATED;
|
ret = ACT_P_CREATED;
|
||||||
|
|||||||
@@ -64,6 +64,7 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
|
|||||||
struct tc_gact *parm;
|
struct tc_gact *parm;
|
||||||
struct tcf_gact *gact;
|
struct tcf_gact *gact;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
u32 index;
|
||||||
int err;
|
int err;
|
||||||
#ifdef CONFIG_GACT_PROB
|
#ifdef CONFIG_GACT_PROB
|
||||||
struct tc_gact_p *p_parm = NULL;
|
struct tc_gact_p *p_parm = NULL;
|
||||||
@@ -79,6 +80,7 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
|
|||||||
if (tb[TCA_GACT_PARMS] == NULL)
|
if (tb[TCA_GACT_PARMS] == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
parm = nla_data(tb[TCA_GACT_PARMS]);
|
parm = nla_data(tb[TCA_GACT_PARMS]);
|
||||||
|
index = parm->index;
|
||||||
|
|
||||||
#ifndef CONFIG_GACT_PROB
|
#ifndef CONFIG_GACT_PROB
|
||||||
if (tb[TCA_GACT_PROB] != NULL)
|
if (tb[TCA_GACT_PROB] != NULL)
|
||||||
@@ -91,12 +93,12 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
|
err = tcf_idr_check_alloc(tn, &index, a, bind);
|
||||||
if (!err) {
|
if (!err) {
|
||||||
ret = tcf_idr_create(tn, parm->index, est, a,
|
ret = tcf_idr_create(tn, index, est, a,
|
||||||
&act_gact_ops, bind, true);
|
&act_gact_ops, bind, true);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
tcf_idr_cleanup(tn, parm->index);
|
tcf_idr_cleanup(tn, index);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
ret = ACT_P_CREATED;
|
ret = ACT_P_CREATED;
|
||||||
|
|||||||
@@ -482,8 +482,14 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
|
|||||||
u8 *saddr = NULL;
|
u8 *saddr = NULL;
|
||||||
bool exists = false;
|
bool exists = false;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
u32 index;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
if (!nla) {
|
||||||
|
NL_SET_ERR_MSG_MOD(extack, "IFE requires attributes to be passed");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy, NULL);
|
err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy, NULL);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
@@ -504,7 +510,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
|
|||||||
if (!p)
|
if (!p)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
|
index = parm->index;
|
||||||
|
err = tcf_idr_check_alloc(tn, &index, a, bind);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
kfree(p);
|
kfree(p);
|
||||||
return err;
|
return err;
|
||||||
@@ -516,10 +523,10 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!exists) {
|
if (!exists) {
|
||||||
ret = tcf_idr_create(tn, parm->index, est, a, &act_ife_ops,
|
ret = tcf_idr_create(tn, index, est, a, &act_ife_ops,
|
||||||
bind, true);
|
bind, true);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
tcf_idr_cleanup(tn, parm->index);
|
tcf_idr_cleanup(tn, index);
|
||||||
kfree(p);
|
kfree(p);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -104,6 +104,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
|
|||||||
struct net_device *dev;
|
struct net_device *dev;
|
||||||
bool exists = false;
|
bool exists = false;
|
||||||
int ret, err;
|
int ret, err;
|
||||||
|
u32 index;
|
||||||
|
|
||||||
if (!nla) {
|
if (!nla) {
|
||||||
NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
|
NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
|
||||||
@@ -117,8 +118,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
parm = nla_data(tb[TCA_MIRRED_PARMS]);
|
parm = nla_data(tb[TCA_MIRRED_PARMS]);
|
||||||
|
index = parm->index;
|
||||||
err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
|
err = tcf_idr_check_alloc(tn, &index, a, bind);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
exists = err;
|
exists = err;
|
||||||
@@ -135,21 +136,21 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
|
|||||||
if (exists)
|
if (exists)
|
||||||
tcf_idr_release(*a, bind);
|
tcf_idr_release(*a, bind);
|
||||||
else
|
else
|
||||||
tcf_idr_cleanup(tn, parm->index);
|
tcf_idr_cleanup(tn, index);
|
||||||
NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
|
NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!exists) {
|
if (!exists) {
|
||||||
if (!parm->ifindex) {
|
if (!parm->ifindex) {
|
||||||
tcf_idr_cleanup(tn, parm->index);
|
tcf_idr_cleanup(tn, index);
|
||||||
NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
|
NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
ret = tcf_idr_create(tn, parm->index, est, a,
|
ret = tcf_idr_create(tn, index, est, a,
|
||||||
&act_mirred_ops, bind, true);
|
&act_mirred_ops, bind, true);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
tcf_idr_cleanup(tn, parm->index);
|
tcf_idr_cleanup(tn, index);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
ret = ACT_P_CREATED;
|
ret = ACT_P_CREATED;
|
||||||
|
|||||||
@@ -45,6 +45,7 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
|
|||||||
struct tc_nat *parm;
|
struct tc_nat *parm;
|
||||||
int ret = 0, err;
|
int ret = 0, err;
|
||||||
struct tcf_nat *p;
|
struct tcf_nat *p;
|
||||||
|
u32 index;
|
||||||
|
|
||||||
if (nla == NULL)
|
if (nla == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -56,13 +57,13 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
|
|||||||
if (tb[TCA_NAT_PARMS] == NULL)
|
if (tb[TCA_NAT_PARMS] == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
parm = nla_data(tb[TCA_NAT_PARMS]);
|
parm = nla_data(tb[TCA_NAT_PARMS]);
|
||||||
|
index = parm->index;
|
||||||
err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
|
err = tcf_idr_check_alloc(tn, &index, a, bind);
|
||||||
if (!err) {
|
if (!err) {
|
||||||
ret = tcf_idr_create(tn, parm->index, est, a,
|
ret = tcf_idr_create(tn, index, est, a,
|
||||||
&act_nat_ops, bind, false);
|
&act_nat_ops, bind, false);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
tcf_idr_cleanup(tn, parm->index);
|
tcf_idr_cleanup(tn, index);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
ret = ACT_P_CREATED;
|
ret = ACT_P_CREATED;
|
||||||
|
|||||||
@@ -149,6 +149,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
|
|||||||
struct tcf_pedit *p;
|
struct tcf_pedit *p;
|
||||||
int ret = 0, err;
|
int ret = 0, err;
|
||||||
int ksize;
|
int ksize;
|
||||||
|
u32 index;
|
||||||
|
|
||||||
if (!nla) {
|
if (!nla) {
|
||||||
NL_SET_ERR_MSG_MOD(extack, "Pedit requires attributes to be passed");
|
NL_SET_ERR_MSG_MOD(extack, "Pedit requires attributes to be passed");
|
||||||
@@ -178,18 +179,19 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
|
|||||||
if (IS_ERR(keys_ex))
|
if (IS_ERR(keys_ex))
|
||||||
return PTR_ERR(keys_ex);
|
return PTR_ERR(keys_ex);
|
||||||
|
|
||||||
err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
|
index = parm->index;
|
||||||
|
err = tcf_idr_check_alloc(tn, &index, a, bind);
|
||||||
if (!err) {
|
if (!err) {
|
||||||
if (!parm->nkeys) {
|
if (!parm->nkeys) {
|
||||||
tcf_idr_cleanup(tn, parm->index);
|
tcf_idr_cleanup(tn, index);
|
||||||
NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed");
|
NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed");
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
ret = tcf_idr_create(tn, parm->index, est, a,
|
ret = tcf_idr_create(tn, index, est, a,
|
||||||
&act_pedit_ops, bind, false);
|
&act_pedit_ops, bind, false);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
tcf_idr_cleanup(tn, parm->index);
|
tcf_idr_cleanup(tn, index);
|
||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
ret = ACT_P_CREATED;
|
ret = ACT_P_CREATED;
|
||||||
|
|||||||
@@ -85,6 +85,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
|
|||||||
struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
|
struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
|
||||||
struct tc_action_net *tn = net_generic(net, police_net_id);
|
struct tc_action_net *tn = net_generic(net, police_net_id);
|
||||||
bool exists = false;
|
bool exists = false;
|
||||||
|
u32 index;
|
||||||
int size;
|
int size;
|
||||||
|
|
||||||
if (nla == NULL)
|
if (nla == NULL)
|
||||||
@@ -101,7 +102,8 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
parm = nla_data(tb[TCA_POLICE_TBF]);
|
parm = nla_data(tb[TCA_POLICE_TBF]);
|
||||||
err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
|
index = parm->index;
|
||||||
|
err = tcf_idr_check_alloc(tn, &index, a, bind);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
exists = err;
|
exists = err;
|
||||||
@@ -109,10 +111,10 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (!exists) {
|
if (!exists) {
|
||||||
ret = tcf_idr_create(tn, parm->index, NULL, a,
|
ret = tcf_idr_create(tn, index, NULL, a,
|
||||||
&act_police_ops, bind, false);
|
&act_police_ops, bind, false);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
tcf_idr_cleanup(tn, parm->index);
|
tcf_idr_cleanup(tn, index);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
ret = ACT_P_CREATED;
|
ret = ACT_P_CREATED;
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
|
|||||||
struct tc_action_net *tn = net_generic(net, sample_net_id);
|
struct tc_action_net *tn = net_generic(net, sample_net_id);
|
||||||
struct nlattr *tb[TCA_SAMPLE_MAX + 1];
|
struct nlattr *tb[TCA_SAMPLE_MAX + 1];
|
||||||
struct psample_group *psample_group;
|
struct psample_group *psample_group;
|
||||||
u32 psample_group_num, rate;
|
u32 psample_group_num, rate, index;
|
||||||
struct tc_sample *parm;
|
struct tc_sample *parm;
|
||||||
struct tcf_sample *s;
|
struct tcf_sample *s;
|
||||||
bool exists = false;
|
bool exists = false;
|
||||||
@@ -59,8 +59,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
parm = nla_data(tb[TCA_SAMPLE_PARMS]);
|
parm = nla_data(tb[TCA_SAMPLE_PARMS]);
|
||||||
|
index = parm->index;
|
||||||
err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
|
err = tcf_idr_check_alloc(tn, &index, a, bind);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
exists = err;
|
exists = err;
|
||||||
@@ -68,10 +68,10 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (!exists) {
|
if (!exists) {
|
||||||
ret = tcf_idr_create(tn, parm->index, est, a,
|
ret = tcf_idr_create(tn, index, est, a,
|
||||||
&act_sample_ops, bind, true);
|
&act_sample_ops, bind, true);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
tcf_idr_cleanup(tn, parm->index);
|
tcf_idr_cleanup(tn, index);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
ret = ACT_P_CREATED;
|
ret = ACT_P_CREATED;
|
||||||
|
|||||||
@@ -88,6 +88,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
|
|||||||
struct tcf_defact *d;
|
struct tcf_defact *d;
|
||||||
bool exists = false;
|
bool exists = false;
|
||||||
int ret = 0, err;
|
int ret = 0, err;
|
||||||
|
u32 index;
|
||||||
|
|
||||||
if (nla == NULL)
|
if (nla == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -100,7 +101,8 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
parm = nla_data(tb[TCA_DEF_PARMS]);
|
parm = nla_data(tb[TCA_DEF_PARMS]);
|
||||||
err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
|
index = parm->index;
|
||||||
|
err = tcf_idr_check_alloc(tn, &index, a, bind);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
exists = err;
|
exists = err;
|
||||||
@@ -111,15 +113,15 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
|
|||||||
if (exists)
|
if (exists)
|
||||||
tcf_idr_release(*a, bind);
|
tcf_idr_release(*a, bind);
|
||||||
else
|
else
|
||||||
tcf_idr_cleanup(tn, parm->index);
|
tcf_idr_cleanup(tn, index);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!exists) {
|
if (!exists) {
|
||||||
ret = tcf_idr_create(tn, parm->index, est, a,
|
ret = tcf_idr_create(tn, index, est, a,
|
||||||
&act_simp_ops, bind, false);
|
&act_simp_ops, bind, false);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
tcf_idr_cleanup(tn, parm->index);
|
tcf_idr_cleanup(tn, index);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -107,6 +107,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
|
|||||||
u16 *queue_mapping = NULL, *ptype = NULL;
|
u16 *queue_mapping = NULL, *ptype = NULL;
|
||||||
bool exists = false;
|
bool exists = false;
|
||||||
int ret = 0, err;
|
int ret = 0, err;
|
||||||
|
u32 index;
|
||||||
|
|
||||||
if (nla == NULL)
|
if (nla == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -153,8 +154,8 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
|
|||||||
}
|
}
|
||||||
|
|
||||||
parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
|
parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
|
||||||
|
index = parm->index;
|
||||||
err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
|
err = tcf_idr_check_alloc(tn, &index, a, bind);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
exists = err;
|
exists = err;
|
||||||
@@ -165,15 +166,15 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
|
|||||||
if (exists)
|
if (exists)
|
||||||
tcf_idr_release(*a, bind);
|
tcf_idr_release(*a, bind);
|
||||||
else
|
else
|
||||||
tcf_idr_cleanup(tn, parm->index);
|
tcf_idr_cleanup(tn, index);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!exists) {
|
if (!exists) {
|
||||||
ret = tcf_idr_create(tn, parm->index, est, a,
|
ret = tcf_idr_create(tn, index, est, a,
|
||||||
&act_skbedit_ops, bind, true);
|
&act_skbedit_ops, bind, true);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
tcf_idr_cleanup(tn, parm->index);
|
tcf_idr_cleanup(tn, index);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -88,12 +88,12 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
|
|||||||
struct nlattr *tb[TCA_SKBMOD_MAX + 1];
|
struct nlattr *tb[TCA_SKBMOD_MAX + 1];
|
||||||
struct tcf_skbmod_params *p, *p_old;
|
struct tcf_skbmod_params *p, *p_old;
|
||||||
struct tc_skbmod *parm;
|
struct tc_skbmod *parm;
|
||||||
|
u32 lflags = 0, index;
|
||||||
struct tcf_skbmod *d;
|
struct tcf_skbmod *d;
|
||||||
bool exists = false;
|
bool exists = false;
|
||||||
u8 *daddr = NULL;
|
u8 *daddr = NULL;
|
||||||
u8 *saddr = NULL;
|
u8 *saddr = NULL;
|
||||||
u16 eth_type = 0;
|
u16 eth_type = 0;
|
||||||
u32 lflags = 0;
|
|
||||||
int ret = 0, err;
|
int ret = 0, err;
|
||||||
|
|
||||||
if (!nla)
|
if (!nla)
|
||||||
@@ -122,10 +122,11 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
|
|||||||
}
|
}
|
||||||
|
|
||||||
parm = nla_data(tb[TCA_SKBMOD_PARMS]);
|
parm = nla_data(tb[TCA_SKBMOD_PARMS]);
|
||||||
|
index = parm->index;
|
||||||
if (parm->flags & SKBMOD_F_SWAPMAC)
|
if (parm->flags & SKBMOD_F_SWAPMAC)
|
||||||
lflags = SKBMOD_F_SWAPMAC;
|
lflags = SKBMOD_F_SWAPMAC;
|
||||||
|
|
||||||
err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
|
err = tcf_idr_check_alloc(tn, &index, a, bind);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
exists = err;
|
exists = err;
|
||||||
@@ -136,15 +137,15 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
|
|||||||
if (exists)
|
if (exists)
|
||||||
tcf_idr_release(*a, bind);
|
tcf_idr_release(*a, bind);
|
||||||
else
|
else
|
||||||
tcf_idr_cleanup(tn, parm->index);
|
tcf_idr_cleanup(tn, index);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!exists) {
|
if (!exists) {
|
||||||
ret = tcf_idr_create(tn, parm->index, est, a,
|
ret = tcf_idr_create(tn, index, est, a,
|
||||||
&act_skbmod_ops, bind, true);
|
&act_skbmod_ops, bind, true);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
tcf_idr_cleanup(tn, parm->index);
|
tcf_idr_cleanup(tn, index);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -224,6 +224,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
|
|||||||
__be16 flags;
|
__be16 flags;
|
||||||
u8 tos, ttl;
|
u8 tos, ttl;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
u32 index;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (!nla) {
|
if (!nla) {
|
||||||
@@ -244,7 +245,8 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
|
|||||||
}
|
}
|
||||||
|
|
||||||
parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
|
parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
|
||||||
err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
|
index = parm->index;
|
||||||
|
err = tcf_idr_check_alloc(tn, &index, a, bind);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
exists = err;
|
exists = err;
|
||||||
@@ -338,7 +340,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!exists) {
|
if (!exists) {
|
||||||
ret = tcf_idr_create(tn, parm->index, est, a,
|
ret = tcf_idr_create(tn, index, est, a,
|
||||||
&act_tunnel_key_ops, bind, true);
|
&act_tunnel_key_ops, bind, true);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
|
NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
|
||||||
@@ -384,7 +386,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
|
|||||||
if (exists)
|
if (exists)
|
||||||
tcf_idr_release(*a, bind);
|
tcf_idr_release(*a, bind);
|
||||||
else
|
else
|
||||||
tcf_idr_cleanup(tn, parm->index);
|
tcf_idr_cleanup(tn, index);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -118,6 +118,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
|
|||||||
u8 push_prio = 0;
|
u8 push_prio = 0;
|
||||||
bool exists = false;
|
bool exists = false;
|
||||||
int ret = 0, err;
|
int ret = 0, err;
|
||||||
|
u32 index;
|
||||||
|
|
||||||
if (!nla)
|
if (!nla)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -129,7 +130,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
|
|||||||
if (!tb[TCA_VLAN_PARMS])
|
if (!tb[TCA_VLAN_PARMS])
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
parm = nla_data(tb[TCA_VLAN_PARMS]);
|
parm = nla_data(tb[TCA_VLAN_PARMS]);
|
||||||
err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
|
index = parm->index;
|
||||||
|
err = tcf_idr_check_alloc(tn, &index, a, bind);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
exists = err;
|
exists = err;
|
||||||
@@ -145,7 +147,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
|
|||||||
if (exists)
|
if (exists)
|
||||||
tcf_idr_release(*a, bind);
|
tcf_idr_release(*a, bind);
|
||||||
else
|
else
|
||||||
tcf_idr_cleanup(tn, parm->index);
|
tcf_idr_cleanup(tn, index);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]);
|
push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]);
|
||||||
@@ -153,7 +155,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
|
|||||||
if (exists)
|
if (exists)
|
||||||
tcf_idr_release(*a, bind);
|
tcf_idr_release(*a, bind);
|
||||||
else
|
else
|
||||||
tcf_idr_cleanup(tn, parm->index);
|
tcf_idr_cleanup(tn, index);
|
||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -167,7 +169,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
|
|||||||
if (exists)
|
if (exists)
|
||||||
tcf_idr_release(*a, bind);
|
tcf_idr_release(*a, bind);
|
||||||
else
|
else
|
||||||
tcf_idr_cleanup(tn, parm->index);
|
tcf_idr_cleanup(tn, index);
|
||||||
return -EPROTONOSUPPORT;
|
return -EPROTONOSUPPORT;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -181,16 +183,16 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
|
|||||||
if (exists)
|
if (exists)
|
||||||
tcf_idr_release(*a, bind);
|
tcf_idr_release(*a, bind);
|
||||||
else
|
else
|
||||||
tcf_idr_cleanup(tn, parm->index);
|
tcf_idr_cleanup(tn, index);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
action = parm->v_action;
|
action = parm->v_action;
|
||||||
|
|
||||||
if (!exists) {
|
if (!exists) {
|
||||||
ret = tcf_idr_create(tn, parm->index, est, a,
|
ret = tcf_idr_create(tn, index, est, a,
|
||||||
&act_vlan_ops, bind, true);
|
&act_vlan_ops, bind, true);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
tcf_idr_cleanup(tn, parm->index);
|
tcf_idr_cleanup(tn, index);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -296,6 +298,14 @@ static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index,
|
|||||||
return tcf_idr_search(tn, a, index);
|
return tcf_idr_search(tn, a, index);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static size_t tcf_vlan_get_fill_size(const struct tc_action *act)
|
||||||
|
{
|
||||||
|
return nla_total_size(sizeof(struct tc_vlan))
|
||||||
|
+ nla_total_size(sizeof(u16)) /* TCA_VLAN_PUSH_VLAN_ID */
|
||||||
|
+ nla_total_size(sizeof(u16)) /* TCA_VLAN_PUSH_VLAN_PROTOCOL */
|
||||||
|
+ nla_total_size(sizeof(u8)); /* TCA_VLAN_PUSH_VLAN_PRIORITY */
|
||||||
|
}
|
||||||
|
|
||||||
static struct tc_action_ops act_vlan_ops = {
|
static struct tc_action_ops act_vlan_ops = {
|
||||||
.kind = "vlan",
|
.kind = "vlan",
|
||||||
.type = TCA_ACT_VLAN,
|
.type = TCA_ACT_VLAN,
|
||||||
@@ -305,6 +315,7 @@ static struct tc_action_ops act_vlan_ops = {
|
|||||||
.init = tcf_vlan_init,
|
.init = tcf_vlan_init,
|
||||||
.cleanup = tcf_vlan_cleanup,
|
.cleanup = tcf_vlan_cleanup,
|
||||||
.walk = tcf_vlan_walker,
|
.walk = tcf_vlan_walker,
|
||||||
|
.get_fill_size = tcf_vlan_get_fill_size,
|
||||||
.lookup = tcf_vlan_search,
|
.lookup = tcf_vlan_search,
|
||||||
.size = sizeof(struct tcf_vlan),
|
.size = sizeof(struct tcf_vlan),
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -71,10 +71,10 @@ static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
|
|||||||
struct Qdisc *sch = ctx;
|
struct Qdisc *sch = ctx;
|
||||||
struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
|
struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
|
||||||
|
|
||||||
if (skb)
|
if (skb) {
|
||||||
sch->qstats.backlog -= qdisc_pkt_len(skb);
|
sch->qstats.backlog -= qdisc_pkt_len(skb);
|
||||||
|
prefetch(&skb->end); /* we'll need skb_shinfo() */
|
||||||
prefetch(&skb->end); /* we'll need skb_shinfo() */
|
}
|
||||||
return skb;
|
return skb;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1680,14 +1680,18 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case TCP_NODELAY:
|
case TCP_NODELAY:
|
||||||
if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) {
|
if (sk->sk_state != SMC_INIT &&
|
||||||
|
sk->sk_state != SMC_LISTEN &&
|
||||||
|
sk->sk_state != SMC_CLOSED) {
|
||||||
if (val && !smc->use_fallback)
|
if (val && !smc->use_fallback)
|
||||||
mod_delayed_work(system_wq, &smc->conn.tx_work,
|
mod_delayed_work(system_wq, &smc->conn.tx_work,
|
||||||
0);
|
0);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case TCP_CORK:
|
case TCP_CORK:
|
||||||
if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) {
|
if (sk->sk_state != SMC_INIT &&
|
||||||
|
sk->sk_state != SMC_LISTEN &&
|
||||||
|
sk->sk_state != SMC_CLOSED) {
|
||||||
if (!val && !smc->use_fallback)
|
if (!val && !smc->use_fallback)
|
||||||
mod_delayed_work(system_wq, &smc->conn.tx_work,
|
mod_delayed_work(system_wq, &smc->conn.tx_work,
|
||||||
0);
|
0);
|
||||||
|
|||||||
@@ -55,6 +55,7 @@ struct tipc_nl_compat_msg {
|
|||||||
int rep_type;
|
int rep_type;
|
||||||
int rep_size;
|
int rep_size;
|
||||||
int req_type;
|
int req_type;
|
||||||
|
int req_size;
|
||||||
struct net *net;
|
struct net *net;
|
||||||
struct sk_buff *rep;
|
struct sk_buff *rep;
|
||||||
struct tlv_desc *req;
|
struct tlv_desc *req;
|
||||||
@@ -257,7 +258,8 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
|
|||||||
int err;
|
int err;
|
||||||
struct sk_buff *arg;
|
struct sk_buff *arg;
|
||||||
|
|
||||||
if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
|
if (msg->req_type && (!msg->req_size ||
|
||||||
|
!TLV_CHECK_TYPE(msg->req, msg->req_type)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
msg->rep = tipc_tlv_alloc(msg->rep_size);
|
msg->rep = tipc_tlv_alloc(msg->rep_size);
|
||||||
@@ -354,7 +356,8 @@ static int tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
|
|||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
|
if (msg->req_type && (!msg->req_size ||
|
||||||
|
!TLV_CHECK_TYPE(msg->req, msg->req_type)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
err = __tipc_nl_compat_doit(cmd, msg);
|
err = __tipc_nl_compat_doit(cmd, msg);
|
||||||
@@ -1276,8 +1279,8 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
|
|||||||
goto send;
|
goto send;
|
||||||
}
|
}
|
||||||
|
|
||||||
len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
|
msg.req_size = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
|
||||||
if (!len || !TLV_OK(msg.req, len)) {
|
if (msg.req_size && !TLV_OK(msg.req, msg.req_size)) {
|
||||||
msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
|
msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
|
||||||
err = -EOPNOTSUPP;
|
err = -EOPNOTSUPP;
|
||||||
goto send;
|
goto send;
|
||||||
|
|||||||
Reference in New Issue
Block a user