Merge 4.19.206 into android-4.19-stable
Changes in 4.19.206 net: qrtr: fix another OOB Read in qrtr_endpoint_post bpf: Do not use ax register in interpreter on div/mod bpf: Fix 32 bit src register truncation on div/mod bpf: Fix truncation handling for mod32 dst reg wrt zero ARC: Fix CONFIG_STACKDEPOT netfilter: conntrack: collect all entries in one cycle once: Fix panic when module unload can: usb: esd_usb2: esd_usb2_rx_event(): fix the interchange of the CAN RX and TX error counters Revert "USB: serial: ch341: fix character loss at high transfer rates" USB: serial: option: add new VID/PID to support Fibocom FG150 usb: dwc3: gadget: Fix dwc3_calc_trbs_left() usb: dwc3: gadget: Stop EP0 transfers during pullup disable IB/hfi1: Fix possible null-pointer dereference in _extend_sdma_tx_descs() e1000e: Fix the max snoop/no-snoop latency for 10M ip_gre: add validation for csum_start xgene-v2: Fix a resource leak in the error handling path of 'xge_probe()' net: marvell: fix MVNETA_TX_IN_PRGRS bit number net: hns3: fix get wrong pfc_en when query PFC configuration usb: gadget: u_audio: fix race condition on endpoint stop opp: remove WARN when no valid OPPs remain virtio: Improve vq->broken access to avoid any compiler optimization virtio_pci: Support surprise removal of virtio pci device vringh: Use wiov->used to check for read/write desc order qed: qed ll2 race condition fixes qed: Fix null-pointer dereference in qed_rdma_create_qp() drm: Copy drm_wait_vblank to user before returning drm/nouveau/disp: power down unused DP links during init net/rds: dma_map_sg is entitled to merge entries vt_kdsetmode: extend console locking fbmem: add margin check to fb_check_caps() KVM: x86/mmu: Treat NX as used (not reserved) for all !TDP shadow MMUs Revert "floppy: reintroduce O_NDELAY fix" net: don't unconditionally copy_from_user a struct ifreq for socket ioctls Linux 4.19.206 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I04e05680c5e311bc4cd79daae49d654b66f774a0
This commit is contained in:
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 205
|
||||
SUBLEVEL = 206
|
||||
EXTRAVERSION =
|
||||
NAME = "People's Front"
|
||||
|
||||
|
||||
@@ -92,6 +92,8 @@ SECTIONS
|
||||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
IRQENTRY_TEXT
|
||||
SOFTIRQENTRY_TEXT
|
||||
*(.fixup)
|
||||
*(.gnu.warning)
|
||||
}
|
||||
|
||||
@@ -4557,7 +4557,16 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
|
||||
void
|
||||
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
|
||||
{
|
||||
bool uses_nx = context->nx || context->base_role.smep_andnot_wp;
|
||||
/*
|
||||
* KVM uses NX when TDP is disabled to handle a variety of scenarios,
|
||||
* notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
|
||||
* to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
|
||||
* The iTLB multi-hit workaround can be toggled at any time, so assume
|
||||
* NX can be used by any non-nested shadow MMU to avoid having to reset
|
||||
* MMU contexts. Note, KVM forces EFER.NX=1 when TDP is disabled.
|
||||
*/
|
||||
bool uses_nx = context->nx || !tdp_enabled ||
|
||||
context->base_role.smep_andnot_wp;
|
||||
struct rsvd_bits_validate *shadow_zero_check;
|
||||
int i;
|
||||
|
||||
|
||||
@@ -4074,6 +4074,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
|
||||
if (UFDCS->rawcmd == 1)
|
||||
UFDCS->rawcmd = 2;
|
||||
|
||||
if (!(mode & FMODE_NDELAY)) {
|
||||
if (mode & (FMODE_READ|FMODE_WRITE)) {
|
||||
UDRS->last_checked = 0;
|
||||
clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
|
||||
@@ -4083,13 +4084,11 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
|
||||
if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
|
||||
goto out;
|
||||
}
|
||||
|
||||
res = -EROFS;
|
||||
|
||||
if ((mode & FMODE_WRITE) &&
|
||||
!test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
|
||||
goto out;
|
||||
|
||||
}
|
||||
mutex_unlock(&open_lock);
|
||||
mutex_unlock(&floppy_mutex);
|
||||
return 0;
|
||||
|
||||
@@ -855,8 +855,6 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
|
||||
req.request.sequence = req32.request.sequence;
|
||||
req.request.signal = req32.request.signal;
|
||||
err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
req32.reply.type = req.reply.type;
|
||||
req32.reply.sequence = req.reply.sequence;
|
||||
@@ -865,7 +863,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
|
||||
if (copy_to_user(argp, &req32, sizeof(req32)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_X86)
|
||||
|
||||
@@ -419,7 +419,7 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
|
||||
{
|
||||
struct nvkm_dp *dp = nvkm_dp(outp);
|
||||
|
||||
@@ -32,6 +32,7 @@ struct nvkm_dp {
|
||||
|
||||
int nvkm_dp_new(struct nvkm_disp *, int index, struct dcb_output *,
|
||||
struct nvkm_outp **);
|
||||
void nvkm_dp_disable(struct nvkm_outp *, struct nvkm_ior *);
|
||||
|
||||
/* DPCD Receiver Capabilities */
|
||||
#define DPCD_RC00_DPCD_REV 0x00000
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "outp.h"
|
||||
#include "dp.h"
|
||||
#include "ior.h"
|
||||
|
||||
#include <subdev/bios.h>
|
||||
@@ -216,6 +217,14 @@ nvkm_outp_init_route(struct nvkm_outp *outp)
|
||||
if (!ior->arm.head || ior->arm.proto != proto) {
|
||||
OUTP_DBG(outp, "no heads (%x %d %d)", ior->arm.head,
|
||||
ior->arm.proto, proto);
|
||||
|
||||
/* The EFI GOP driver on Ampere can leave unused DP links routed,
|
||||
* which we don't expect. The DisableLT IED script *should* get
|
||||
* us back to where we need to be.
|
||||
*/
|
||||
if (ior->func->route.get && !ior->arm.head && outp->info.type == DCB_OUTPUT_DP)
|
||||
nvkm_dp_disable(outp, ior);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -3055,6 +3055,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
|
||||
static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
|
||||
{
|
||||
int i;
|
||||
struct sdma_desc *descp;
|
||||
|
||||
/* Handle last descriptor */
|
||||
if (unlikely((tx->num_desc == (MAX_DESC - 1)))) {
|
||||
@@ -3075,12 +3076,10 @@ static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
|
||||
if (unlikely(tx->num_desc == MAX_DESC))
|
||||
goto enomem;
|
||||
|
||||
tx->descp = kmalloc_array(
|
||||
MAX_DESC,
|
||||
sizeof(struct sdma_desc),
|
||||
GFP_ATOMIC);
|
||||
if (!tx->descp)
|
||||
descp = kmalloc_array(MAX_DESC, sizeof(struct sdma_desc), GFP_ATOMIC);
|
||||
if (!descp)
|
||||
goto enomem;
|
||||
tx->descp = descp;
|
||||
|
||||
/* reserve last descriptor for coalescing */
|
||||
tx->desc_limit = MAX_DESC - 1;
|
||||
|
||||
@@ -236,8 +236,8 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
|
||||
if (id == ESD_EV_CAN_ERROR_EXT) {
|
||||
u8 state = msg->msg.rx.data[0];
|
||||
u8 ecc = msg->msg.rx.data[1];
|
||||
u8 txerr = msg->msg.rx.data[2];
|
||||
u8 rxerr = msg->msg.rx.data[3];
|
||||
u8 rxerr = msg->msg.rx.data[2];
|
||||
u8 txerr = msg->msg.rx.data[3];
|
||||
|
||||
skb = alloc_can_err_skb(priv->netdev, &cf);
|
||||
if (skb == NULL) {
|
||||
|
||||
@@ -691,11 +691,13 @@ static int xge_probe(struct platform_device *pdev)
|
||||
ret = register_netdev(ndev);
|
||||
if (ret) {
|
||||
netdev_err(ndev, "Failed to register netdev\n");
|
||||
goto err;
|
||||
goto err_mdio_remove;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_mdio_remove:
|
||||
xge_mdio_remove(ndev);
|
||||
err:
|
||||
free_netdev(ndev);
|
||||
|
||||
|
||||
@@ -204,21 +204,12 @@ static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
|
||||
u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC];
|
||||
struct hclge_vport *vport = hclge_get_vport(h);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
u8 i, j, pfc_map, *prio_tc;
|
||||
int ret;
|
||||
u8 i;
|
||||
|
||||
memset(pfc, 0, sizeof(*pfc));
|
||||
pfc->pfc_cap = hdev->pfc_max;
|
||||
prio_tc = hdev->tm_info.prio_tc;
|
||||
pfc_map = hdev->tm_info.hw_pfc_map;
|
||||
|
||||
/* Pfc setting is based on TC */
|
||||
for (i = 0; i < hdev->tm_info.num_tc; i++) {
|
||||
for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
|
||||
if ((prio_tc[j] == i) && (pfc_map & BIT(i)))
|
||||
pfc->pfc_en |= BIT(j);
|
||||
}
|
||||
}
|
||||
pfc->pfc_en = hdev->tm_info.pfc_en;
|
||||
|
||||
ret = hclge_pfc_tx_stats_get(hdev, requests);
|
||||
if (ret)
|
||||
|
||||
@@ -995,6 +995,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
|
||||
{
|
||||
u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
|
||||
link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
|
||||
u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
|
||||
u16 lat_enc_d = 0; /* latency decoded */
|
||||
u16 lat_enc = 0; /* latency encoded */
|
||||
|
||||
if (link) {
|
||||
@@ -1048,7 +1050,17 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
|
||||
E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
|
||||
max_ltr_enc = max_t(u16, max_snoop, max_nosnoop);
|
||||
|
||||
if (lat_enc > max_ltr_enc)
|
||||
lat_enc_d = (lat_enc & E1000_LTRV_VALUE_MASK) *
|
||||
(1U << (E1000_LTRV_SCALE_FACTOR *
|
||||
((lat_enc & E1000_LTRV_SCALE_MASK)
|
||||
>> E1000_LTRV_SCALE_SHIFT)));
|
||||
|
||||
max_ltr_enc_d = (max_ltr_enc & E1000_LTRV_VALUE_MASK) *
|
||||
(1U << (E1000_LTRV_SCALE_FACTOR *
|
||||
((max_ltr_enc & E1000_LTRV_SCALE_MASK)
|
||||
>> E1000_LTRV_SCALE_SHIFT)));
|
||||
|
||||
if (lat_enc_d > max_ltr_enc_d)
|
||||
lat_enc = max_ltr_enc;
|
||||
}
|
||||
|
||||
|
||||
@@ -274,8 +274,11 @@
|
||||
|
||||
/* Latency Tolerance Reporting */
|
||||
#define E1000_LTRV 0x000F8
|
||||
#define E1000_LTRV_VALUE_MASK 0x000003FF
|
||||
#define E1000_LTRV_SCALE_MAX 5
|
||||
#define E1000_LTRV_SCALE_FACTOR 5
|
||||
#define E1000_LTRV_SCALE_SHIFT 10
|
||||
#define E1000_LTRV_SCALE_MASK 0x00001C00
|
||||
#define E1000_LTRV_REQ_SHIFT 15
|
||||
#define E1000_LTRV_NOSNOOP_SHIFT 16
|
||||
#define E1000_LTRV_SEND (1 << 30)
|
||||
|
||||
@@ -100,7 +100,7 @@
|
||||
#define MVNETA_DESC_SWAP BIT(6)
|
||||
#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
|
||||
#define MVNETA_PORT_STATUS 0x2444
|
||||
#define MVNETA_TX_IN_PRGRS BIT(1)
|
||||
#define MVNETA_TX_IN_PRGRS BIT(0)
|
||||
#define MVNETA_TX_FIFO_EMPTY BIT(8)
|
||||
#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
|
||||
#define MVNETA_SERDES_CFG 0x24A0
|
||||
|
||||
@@ -354,6 +354,9 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
|
||||
unsigned long flags;
|
||||
int rc = -EINVAL;
|
||||
|
||||
if (!p_ll2_conn)
|
||||
return rc;
|
||||
|
||||
spin_lock_irqsave(&p_tx->lock, flags);
|
||||
if (p_tx->b_completing_packet) {
|
||||
rc = -EBUSY;
|
||||
@@ -527,7 +530,16 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
|
||||
unsigned long flags = 0;
|
||||
int rc = 0;
|
||||
|
||||
if (!p_ll2_conn)
|
||||
return rc;
|
||||
|
||||
spin_lock_irqsave(&p_rx->lock, flags);
|
||||
|
||||
if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) {
|
||||
spin_unlock_irqrestore(&p_rx->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
|
||||
cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
|
||||
|
||||
@@ -848,6 +860,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
|
||||
struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
|
||||
int rc;
|
||||
|
||||
if (!p_ll2_conn)
|
||||
return 0;
|
||||
|
||||
if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
|
||||
return 0;
|
||||
|
||||
@@ -871,6 +886,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
|
||||
u16 new_idx = 0, num_bds = 0;
|
||||
int rc;
|
||||
|
||||
if (!p_ll2_conn)
|
||||
return 0;
|
||||
|
||||
if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
|
||||
return 0;
|
||||
|
||||
@@ -1628,6 +1646,8 @@ int qed_ll2_post_rx_buffer(void *cxt,
|
||||
if (!p_ll2_conn)
|
||||
return -EINVAL;
|
||||
p_rx = &p_ll2_conn->rx_queue;
|
||||
if (!p_rx->set_prod_addr)
|
||||
return -EIO;
|
||||
|
||||
spin_lock_irqsave(&p_rx->lock, flags);
|
||||
if (!list_empty(&p_rx->free_descq))
|
||||
|
||||
@@ -1244,8 +1244,7 @@ qed_rdma_create_qp(void *rdma_cxt,
|
||||
|
||||
if (!rdma_cxt || !in_params || !out_params ||
|
||||
!p_hwfn->p_rdma_info->active) {
|
||||
DP_ERR(p_hwfn->cdev,
|
||||
"qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
|
||||
pr_err("qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
|
||||
rdma_cxt, in_params, out_params);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -423,8 +423,9 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
|
||||
}
|
||||
}
|
||||
|
||||
/* There should be one of more OPP defined */
|
||||
if (WARN_ON(!count)) {
|
||||
/* There should be one or more OPPs defined */
|
||||
if (!count) {
|
||||
dev_err(dev, "%s: no supported OPPs", __func__);
|
||||
ret = -ENOENT;
|
||||
goto put_opp_table;
|
||||
}
|
||||
|
||||
@@ -484,16 +484,19 @@ int vt_ioctl(struct tty_struct *tty,
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
/* FIXME: this needs the console lock extending */
|
||||
if (vc->vc_mode == (unsigned char) arg)
|
||||
console_lock();
|
||||
if (vc->vc_mode == (unsigned char) arg) {
|
||||
console_unlock();
|
||||
break;
|
||||
}
|
||||
vc->vc_mode = (unsigned char) arg;
|
||||
if (console != fg_console)
|
||||
if (console != fg_console) {
|
||||
console_unlock();
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* explicitly blank/unblank the screen if switching modes
|
||||
*/
|
||||
console_lock();
|
||||
if (arg == KD_TEXT)
|
||||
do_unblank_screen(1);
|
||||
else
|
||||
|
||||
@@ -894,19 +894,19 @@ static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
|
||||
|
||||
static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
|
||||
{
|
||||
struct dwc3_trb *tmp;
|
||||
u8 trbs_left;
|
||||
|
||||
/*
|
||||
* If enqueue & dequeue are equal than it is either full or empty.
|
||||
*
|
||||
* One way to know for sure is if the TRB right before us has HWO bit
|
||||
* set or not. If it has, then we're definitely full and can't fit any
|
||||
* more transfers in our ring.
|
||||
* If the enqueue & dequeue are equal then the TRB ring is either full
|
||||
* or empty. It's considered full when there are DWC3_TRB_NUM-1 of TRBs
|
||||
* pending to be processed by the driver.
|
||||
*/
|
||||
if (dep->trb_enqueue == dep->trb_dequeue) {
|
||||
tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
|
||||
if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
|
||||
/*
|
||||
* If there is any request remained in the started_list at
|
||||
* this point, that means there is no TRB available.
|
||||
*/
|
||||
if (!list_empty(&dep->started_list))
|
||||
return 0;
|
||||
|
||||
return DWC3_TRB_NUM - 1;
|
||||
@@ -1805,10 +1805,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
|
||||
|
||||
ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
|
||||
msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
|
||||
if (ret == 0) {
|
||||
dev_err(dwc->dev, "timed out waiting for SETUP phase\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
if (ret == 0)
|
||||
dev_warn(dwc->dev, "timed out waiting for SETUP phase\n");
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
@@ -1946,6 +1944,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
|
||||
/* begin to receive SETUP packets */
|
||||
dwc->ep0state = EP0_SETUP_PHASE;
|
||||
dwc->link_state = DWC3_LINK_STATE_SS_DIS;
|
||||
dwc->delayed_status = false;
|
||||
dwc3_ep0_out_start(dwc);
|
||||
|
||||
dwc3_gadget_enable_irq(dwc);
|
||||
|
||||
@@ -349,8 +349,6 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
|
||||
if (!prm->ep_enabled)
|
||||
return;
|
||||
|
||||
prm->ep_enabled = false;
|
||||
|
||||
audio_dev = uac->audio_dev;
|
||||
params = &audio_dev->params;
|
||||
|
||||
@@ -368,11 +366,12 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
|
||||
}
|
||||
}
|
||||
|
||||
prm->ep_enabled = false;
|
||||
|
||||
if (usb_ep_disable(ep))
|
||||
dev_err(uac->card->dev, "%s:%d Error!\n", __func__, __LINE__);
|
||||
}
|
||||
|
||||
|
||||
int u_audio_start_capture(struct g_audio *audio_dev)
|
||||
{
|
||||
struct snd_uac_chip *uac = audio_dev->uac;
|
||||
|
||||
@@ -625,7 +625,6 @@ static struct usb_serial_driver ch341_device = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "ch341-uart",
|
||||
},
|
||||
.bulk_in_size = 512,
|
||||
.id_table = id_table,
|
||||
.num_ports = 1,
|
||||
.open = ch341_open,
|
||||
|
||||
@@ -2074,6 +2074,8 @@ static const struct usb_device_id option_ids[] = {
|
||||
.driver_info = RSVD(4) | RSVD(5) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
|
||||
.driver_info = RSVD(6) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
|
||||
|
||||
@@ -330,7 +330,7 @@ __vringh_iov(struct vringh *vrh, u16 i,
|
||||
iov = wiov;
|
||||
else {
|
||||
iov = riov;
|
||||
if (unlikely(wiov && wiov->i)) {
|
||||
if (unlikely(wiov && wiov->used)) {
|
||||
vringh_bad("Readable desc %p after writable",
|
||||
&descs[i]);
|
||||
err = -EINVAL;
|
||||
|
||||
@@ -991,6 +991,10 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* bitfill_aligned() assumes that it's at least 8x8 */
|
||||
if (var->xres < 8 || var->yres < 8)
|
||||
return -EINVAL;
|
||||
|
||||
ret = info->fbops->fb_check_var(var, info);
|
||||
|
||||
if (ret)
|
||||
|
||||
@@ -579,6 +579,13 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
|
||||
struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
|
||||
struct device *dev = get_device(&vp_dev->vdev.dev);
|
||||
|
||||
/*
|
||||
* Device is marked broken on surprise removal so that virtio upper
|
||||
* layers can abort any ongoing operation.
|
||||
*/
|
||||
if (!pci_device_is_present(pci_dev))
|
||||
virtio_break_device(&vp_dev->vdev);
|
||||
|
||||
pci_disable_sriov(pci_dev);
|
||||
|
||||
unregister_virtio_device(&vp_dev->vdev);
|
||||
|
||||
@@ -1197,7 +1197,7 @@ bool virtqueue_is_broken(struct virtqueue *_vq)
|
||||
{
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
|
||||
return vq->broken;
|
||||
return READ_ONCE(vq->broken);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtqueue_is_broken);
|
||||
|
||||
@@ -1211,7 +1211,9 @@ void virtio_break_device(struct virtio_device *dev)
|
||||
|
||||
list_for_each_entry(_vq, &dev->vqs, list) {
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
vq->broken = true;
|
||||
|
||||
/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
|
||||
WRITE_ONCE(vq->broken, true);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtio_break_device);
|
||||
|
||||
@@ -77,6 +77,14 @@ struct sock_reuseport;
|
||||
|
||||
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
|
||||
|
||||
#define BPF_ALU_REG(CLASS, OP, DST, SRC) \
|
||||
((struct bpf_insn) { \
|
||||
.code = CLASS | BPF_OP(OP) | BPF_X, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = SRC, \
|
||||
.off = 0, \
|
||||
.imm = 0 })
|
||||
|
||||
#define BPF_ALU64_REG(OP, DST, SRC) \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
|
||||
@@ -123,6 +131,14 @@ struct sock_reuseport;
|
||||
|
||||
/* Short form of mov, dst_reg = src_reg */
|
||||
|
||||
#define BPF_MOV_REG(CLASS, DST, SRC) \
|
||||
((struct bpf_insn) { \
|
||||
.code = CLASS | BPF_MOV | BPF_X, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = SRC, \
|
||||
.off = 0, \
|
||||
.imm = 0 })
|
||||
|
||||
#define BPF_MOV64_REG(DST, SRC) \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_ALU64 | BPF_MOV | BPF_X, \
|
||||
@@ -157,6 +173,14 @@ struct sock_reuseport;
|
||||
.off = 0, \
|
||||
.imm = IMM })
|
||||
|
||||
#define BPF_RAW_REG(insn, DST, SRC) \
|
||||
((struct bpf_insn) { \
|
||||
.code = (insn).code, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = SRC, \
|
||||
.off = (insn).off, \
|
||||
.imm = (insn).imm })
|
||||
|
||||
/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
|
||||
#define BPF_LD_IMM64(DST, IMM) \
|
||||
BPF_LD_IMM64_RAW(DST, 0, IMM)
|
||||
|
||||
@@ -3641,6 +3641,10 @@ int netdev_rx_handler_register(struct net_device *dev,
|
||||
void netdev_rx_handler_unregister(struct net_device *dev);
|
||||
|
||||
bool dev_valid_name(const char *name);
|
||||
static inline bool is_socket_ioctl_cmd(unsigned int cmd)
|
||||
{
|
||||
return _IOC_TYPE(cmd) == SOCK_IOC_TYPE;
|
||||
}
|
||||
int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
|
||||
bool *need_copyout);
|
||||
int dev_ifconf(struct net *net, struct ifconf *, int);
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
|
||||
bool __do_once_start(bool *done, unsigned long *flags);
|
||||
void __do_once_done(bool *done, struct static_key_true *once_key,
|
||||
unsigned long *flags);
|
||||
unsigned long *flags, struct module *mod);
|
||||
|
||||
/* Call a function exactly once. The idea of DO_ONCE() is to perform
|
||||
* a function call such as initialization of random seeds, etc, only
|
||||
@@ -46,7 +46,7 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
|
||||
if (unlikely(___ret)) { \
|
||||
func(__VA_ARGS__); \
|
||||
__do_once_done(&___done, &___once_key, \
|
||||
&___flags); \
|
||||
&___flags, THIS_MODULE); \
|
||||
} \
|
||||
} \
|
||||
___ret; \
|
||||
|
||||
@@ -714,9 +714,6 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from,
|
||||
* below.
|
||||
*
|
||||
* Constant blinding is only used by JITs, not in the interpreter.
|
||||
* The interpreter uses AX in some occasions as a local temporary
|
||||
* register e.g. in DIV or MOD instructions.
|
||||
*
|
||||
* In restricted circumstances, the verifier can also use the AX
|
||||
* register for rewrites as long as they do not interfere with
|
||||
* the above cases!
|
||||
@@ -1066,6 +1063,7 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
|
||||
#undef BPF_INSN_3_LBL
|
||||
#undef BPF_INSN_2_LBL
|
||||
u32 tail_call_cnt = 0;
|
||||
u64 tmp;
|
||||
|
||||
#define CONT ({ insn++; goto select_insn; })
|
||||
#define CONT_JMP ({ insn++; goto select_insn; })
|
||||
@@ -1126,36 +1124,36 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
|
||||
(*(s64 *) &DST) >>= IMM;
|
||||
CONT;
|
||||
ALU64_MOD_X:
|
||||
div64_u64_rem(DST, SRC, &AX);
|
||||
DST = AX;
|
||||
div64_u64_rem(DST, SRC, &tmp);
|
||||
DST = tmp;
|
||||
CONT;
|
||||
ALU_MOD_X:
|
||||
AX = (u32) DST;
|
||||
DST = do_div(AX, (u32) SRC);
|
||||
tmp = (u32) DST;
|
||||
DST = do_div(tmp, (u32) SRC);
|
||||
CONT;
|
||||
ALU64_MOD_K:
|
||||
div64_u64_rem(DST, IMM, &AX);
|
||||
DST = AX;
|
||||
div64_u64_rem(DST, IMM, &tmp);
|
||||
DST = tmp;
|
||||
CONT;
|
||||
ALU_MOD_K:
|
||||
AX = (u32) DST;
|
||||
DST = do_div(AX, (u32) IMM);
|
||||
tmp = (u32) DST;
|
||||
DST = do_div(tmp, (u32) IMM);
|
||||
CONT;
|
||||
ALU64_DIV_X:
|
||||
DST = div64_u64(DST, SRC);
|
||||
CONT;
|
||||
ALU_DIV_X:
|
||||
AX = (u32) DST;
|
||||
do_div(AX, (u32) SRC);
|
||||
DST = (u32) AX;
|
||||
tmp = (u32) DST;
|
||||
do_div(tmp, (u32) SRC);
|
||||
DST = (u32) tmp;
|
||||
CONT;
|
||||
ALU64_DIV_K:
|
||||
DST = div64_u64(DST, IMM);
|
||||
CONT;
|
||||
ALU_DIV_K:
|
||||
AX = (u32) DST;
|
||||
do_div(AX, (u32) IMM);
|
||||
DST = (u32) AX;
|
||||
tmp = (u32) DST;
|
||||
do_div(tmp, (u32) IMM);
|
||||
DST = (u32) tmp;
|
||||
CONT;
|
||||
ALU_END_TO_BE:
|
||||
switch (IMM) {
|
||||
|
||||
@@ -6177,28 +6177,29 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
||||
insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
|
||||
bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
|
||||
struct bpf_insn mask_and_div[] = {
|
||||
BPF_MOV32_REG(insn->src_reg, insn->src_reg),
|
||||
/* Rx div 0 -> 0 */
|
||||
BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
|
||||
BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
|
||||
BPF_MOV_REG(BPF_CLASS(insn->code), BPF_REG_AX, insn->src_reg),
|
||||
/* [R,W]x div 0 -> 0 */
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, 2),
|
||||
BPF_RAW_REG(*insn, insn->dst_reg, BPF_REG_AX),
|
||||
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
||||
*insn,
|
||||
BPF_ALU_REG(BPF_CLASS(insn->code), BPF_XOR, insn->dst_reg, insn->dst_reg),
|
||||
};
|
||||
struct bpf_insn mask_and_mod[] = {
|
||||
BPF_MOV32_REG(insn->src_reg, insn->src_reg),
|
||||
/* Rx mod 0 -> Rx */
|
||||
BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
|
||||
*insn,
|
||||
BPF_MOV_REG(BPF_CLASS(insn->code), BPF_REG_AX, insn->src_reg),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, 1 + (is64 ? 0 : 1)),
|
||||
BPF_RAW_REG(*insn, insn->dst_reg, BPF_REG_AX),
|
||||
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
|
||||
BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
|
||||
};
|
||||
struct bpf_insn *patchlet;
|
||||
|
||||
if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
|
||||
insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
|
||||
patchlet = mask_and_div + (is64 ? 1 : 0);
|
||||
cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
|
||||
patchlet = mask_and_div;
|
||||
cnt = ARRAY_SIZE(mask_and_div);
|
||||
} else {
|
||||
patchlet = mask_and_mod + (is64 ? 1 : 0);
|
||||
cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
|
||||
patchlet = mask_and_mod;
|
||||
cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 2 : 0);
|
||||
}
|
||||
|
||||
new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
|
||||
|
||||
11
lib/once.c
11
lib/once.c
@@ -3,10 +3,12 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/once.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
struct once_work {
|
||||
struct work_struct work;
|
||||
struct static_key_true *key;
|
||||
struct module *module;
|
||||
};
|
||||
|
||||
static void once_deferred(struct work_struct *w)
|
||||
@@ -16,10 +18,11 @@ static void once_deferred(struct work_struct *w)
|
||||
work = container_of(w, struct once_work, work);
|
||||
BUG_ON(!static_key_enabled(work->key));
|
||||
static_branch_disable(work->key);
|
||||
module_put(work->module);
|
||||
kfree(work);
|
||||
}
|
||||
|
||||
static void once_disable_jump(struct static_key_true *key)
|
||||
static void once_disable_jump(struct static_key_true *key, struct module *mod)
|
||||
{
|
||||
struct once_work *w;
|
||||
|
||||
@@ -29,6 +32,8 @@ static void once_disable_jump(struct static_key_true *key)
|
||||
|
||||
INIT_WORK(&w->work, once_deferred);
|
||||
w->key = key;
|
||||
w->module = mod;
|
||||
__module_get(mod);
|
||||
schedule_work(&w->work);
|
||||
}
|
||||
|
||||
@@ -53,11 +58,11 @@ bool __do_once_start(bool *done, unsigned long *flags)
|
||||
EXPORT_SYMBOL(__do_once_start);
|
||||
|
||||
void __do_once_done(bool *done, struct static_key_true *once_key,
|
||||
unsigned long *flags)
|
||||
unsigned long *flags, struct module *mod)
|
||||
__releases(once_lock)
|
||||
{
|
||||
*done = true;
|
||||
spin_unlock_irqrestore(&once_lock, *flags);
|
||||
once_disable_jump(once_key);
|
||||
once_disable_jump(once_key, mod);
|
||||
}
|
||||
EXPORT_SYMBOL(__do_once_done);
|
||||
|
||||
@@ -449,6 +449,8 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
|
||||
static int gre_handle_offloads(struct sk_buff *skb, bool csum)
|
||||
{
|
||||
if (csum && skb_checksum_start(skb) < skb->data)
|
||||
return -EINVAL;
|
||||
return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
|
||||
}
|
||||
|
||||
|
||||
@@ -70,10 +70,9 @@ EXPORT_SYMBOL_GPL(nf_conntrack_hash);
|
||||
|
||||
struct conntrack_gc_work {
|
||||
struct delayed_work dwork;
|
||||
u32 last_bucket;
|
||||
u32 next_bucket;
|
||||
bool exiting;
|
||||
bool early_drop;
|
||||
long next_gc_run;
|
||||
};
|
||||
|
||||
static __read_mostly struct kmem_cache *nf_conntrack_cachep;
|
||||
@@ -81,12 +80,8 @@ static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
|
||||
static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
|
||||
static __read_mostly bool nf_conntrack_locks_all;
|
||||
|
||||
/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
|
||||
#define GC_MAX_BUCKETS_DIV 128u
|
||||
/* upper bound of full table scan */
|
||||
#define GC_MAX_SCAN_JIFFIES (16u * HZ)
|
||||
/* desired ratio of entries found to be expired */
|
||||
#define GC_EVICT_RATIO 50u
|
||||
#define GC_SCAN_INTERVAL (120u * HZ)
|
||||
#define GC_SCAN_MAX_DURATION msecs_to_jiffies(10)
|
||||
|
||||
static struct conntrack_gc_work conntrack_gc_work;
|
||||
|
||||
@@ -1198,17 +1193,13 @@ static void nf_ct_offload_timeout(struct nf_conn *ct)
|
||||
|
||||
static void gc_worker(struct work_struct *work)
|
||||
{
|
||||
unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
|
||||
unsigned int i, goal, buckets = 0, expired_count = 0;
|
||||
unsigned int nf_conntrack_max95 = 0;
|
||||
unsigned long end_time = jiffies + GC_SCAN_MAX_DURATION;
|
||||
unsigned int i, hashsz, nf_conntrack_max95 = 0;
|
||||
unsigned long next_run = GC_SCAN_INTERVAL;
|
||||
struct conntrack_gc_work *gc_work;
|
||||
unsigned int ratio, scanned = 0;
|
||||
unsigned long next_run;
|
||||
|
||||
gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
|
||||
|
||||
goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV;
|
||||
i = gc_work->last_bucket;
|
||||
i = gc_work->next_bucket;
|
||||
if (gc_work->early_drop)
|
||||
nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
|
||||
|
||||
@@ -1216,22 +1207,21 @@ static void gc_worker(struct work_struct *work)
|
||||
struct nf_conntrack_tuple_hash *h;
|
||||
struct hlist_nulls_head *ct_hash;
|
||||
struct hlist_nulls_node *n;
|
||||
unsigned int hashsz;
|
||||
struct nf_conn *tmp;
|
||||
|
||||
i++;
|
||||
rcu_read_lock();
|
||||
|
||||
nf_conntrack_get_ht(&ct_hash, &hashsz);
|
||||
if (i >= hashsz)
|
||||
i = 0;
|
||||
if (i >= hashsz) {
|
||||
rcu_read_unlock();
|
||||
break;
|
||||
}
|
||||
|
||||
hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
|
||||
struct net *net;
|
||||
|
||||
tmp = nf_ct_tuplehash_to_ctrack(h);
|
||||
|
||||
scanned++;
|
||||
if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
|
||||
nf_ct_offload_timeout(tmp);
|
||||
continue;
|
||||
@@ -1239,7 +1229,6 @@ static void gc_worker(struct work_struct *work)
|
||||
|
||||
if (nf_ct_is_expired(tmp)) {
|
||||
nf_ct_gc_expired(tmp);
|
||||
expired_count++;
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -1271,7 +1260,14 @@ static void gc_worker(struct work_struct *work)
|
||||
*/
|
||||
rcu_read_unlock();
|
||||
cond_resched();
|
||||
} while (++buckets < goal);
|
||||
i++;
|
||||
|
||||
if (time_after(jiffies, end_time) && i < hashsz) {
|
||||
gc_work->next_bucket = i;
|
||||
next_run = 0;
|
||||
break;
|
||||
}
|
||||
} while (i < hashsz);
|
||||
|
||||
if (gc_work->exiting)
|
||||
return;
|
||||
@@ -1282,40 +1278,17 @@ static void gc_worker(struct work_struct *work)
|
||||
*
|
||||
* This worker is only here to reap expired entries when system went
|
||||
* idle after a busy period.
|
||||
*
|
||||
* The heuristics below are supposed to balance conflicting goals:
|
||||
*
|
||||
* 1. Minimize time until we notice a stale entry
|
||||
* 2. Maximize scan intervals to not waste cycles
|
||||
*
|
||||
* Normally, expire ratio will be close to 0.
|
||||
*
|
||||
* As soon as a sizeable fraction of the entries have expired
|
||||
* increase scan frequency.
|
||||
*/
|
||||
ratio = scanned ? expired_count * 100 / scanned : 0;
|
||||
if (ratio > GC_EVICT_RATIO) {
|
||||
gc_work->next_gc_run = min_interval;
|
||||
} else {
|
||||
unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
|
||||
|
||||
BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
|
||||
|
||||
gc_work->next_gc_run += min_interval;
|
||||
if (gc_work->next_gc_run > max)
|
||||
gc_work->next_gc_run = max;
|
||||
}
|
||||
|
||||
next_run = gc_work->next_gc_run;
|
||||
gc_work->last_bucket = i;
|
||||
if (next_run) {
|
||||
gc_work->early_drop = false;
|
||||
gc_work->next_bucket = 0;
|
||||
}
|
||||
queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
|
||||
}
|
||||
|
||||
static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
|
||||
{
|
||||
INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
|
||||
gc_work->next_gc_run = HZ;
|
||||
gc_work->exiting = false;
|
||||
}
|
||||
|
||||
|
||||
@@ -321,7 +321,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (len != ALIGN(size, 4) + hdrlen)
|
||||
if (!size || len != ALIGN(size, 4) + hdrlen)
|
||||
goto err;
|
||||
|
||||
if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA)
|
||||
|
||||
@@ -111,9 +111,9 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len,
|
||||
ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len,
|
||||
&off, PAGE_SIZE);
|
||||
if (unlikely(ret != ibmr->sg_len))
|
||||
if (unlikely(ret != ibmr->sg_dma_len))
|
||||
return ret < 0 ? ret : -EINVAL;
|
||||
|
||||
/* Perform a WR for the fast_reg_mr. Each individual page
|
||||
|
||||
@@ -1030,7 +1030,7 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
|
||||
rtnl_unlock();
|
||||
if (!err && copy_to_user(argp, &ifc, sizeof(struct ifconf)))
|
||||
err = -EFAULT;
|
||||
} else {
|
||||
} else if (is_socket_ioctl_cmd(cmd)) {
|
||||
struct ifreq ifr;
|
||||
bool need_copyout;
|
||||
if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
|
||||
@@ -1039,6 +1039,8 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
|
||||
if (!err && need_copyout)
|
||||
if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
|
||||
return -EFAULT;
|
||||
} else {
|
||||
err = -ENOTTY;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
@@ -3064,6 +3066,8 @@ static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd,
|
||||
struct ifreq ifreq;
|
||||
u32 data32;
|
||||
|
||||
if (!is_socket_ioctl_cmd(cmd))
|
||||
return -ENOTTY;
|
||||
if (copy_from_user(ifreq.ifr_name, u_ifreq32->ifr_name, IFNAMSIZ))
|
||||
return -EFAULT;
|
||||
if (get_user(data32, &u_ifreq32->ifr_data))
|
||||
|
||||
Reference in New Issue
Block a user