Merge 4.19.274 into android-4.19-stable

Changes in 4.19.274
	wifi: rtl8xxxu: gen2: Turn on the rate control
	powerpc: dts: t208x: Mark MAC1 and MAC2 as 10G
	random: always mix cycle counter in add_latent_entropy()
	can: kvaser_usb: hydra: help gcc-13 to figure out cmd_len
	powerpc: dts: t208x: Disable 10G on MAC1 and MAC2
	alarmtimer: Prevent starvation by small intervals and SIG_IGN
	drm/i915/gvt: fix double free bug in split_2MB_gtt_entry
	mac80211: mesh: embedd mesh_paths and mpp_paths into ieee80211_if_mesh
	uaccess: Add speculation barrier to copy_from_user()
	wifi: mwifiex: Add missing compatible string for SD8787
	ext4: Fix function prototype mismatch for ext4_feat_ktype
	bpf: add missing header file include
	Linux 4.19.274

Change-Id: Ibf649340dee25d21c329d09a1f19454dfd2e5e7f
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2023-02-25 15:35:59 +00:00
17 changed files with 251 additions and 113 deletions

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 4 VERSION = 4
PATCHLEVEL = 19 PATCHLEVEL = 19
SUBLEVEL = 273 SUBLEVEL = 274
EXTRAVERSION = EXTRAVERSION =
NAME = "People's Front" NAME = "People's Front"

View File

@@ -0,0 +1,44 @@
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
* QorIQ FMan v3 10g port #2 device tree stub [ controller @ offset 0x400000 ]
*
* Copyright 2022 Sean Anderson <sean.anderson@seco.com>
* Copyright 2012 - 2015 Freescale Semiconductor Inc.
*/
fman@400000 {
fman0_rx_0x08: port@88000 {
cell-index = <0x8>;
compatible = "fsl,fman-v3-port-rx";
reg = <0x88000 0x1000>;
fsl,fman-10g-port;
};
fman0_tx_0x28: port@a8000 {
cell-index = <0x28>;
compatible = "fsl,fman-v3-port-tx";
reg = <0xa8000 0x1000>;
fsl,fman-10g-port;
};
ethernet@e0000 {
cell-index = <0>;
compatible = "fsl,fman-memac";
reg = <0xe0000 0x1000>;
fsl,fman-ports = <&fman0_rx_0x08 &fman0_tx_0x28>;
ptp-timer = <&ptp_timer0>;
pcsphy-handle = <&pcsphy0>;
};
mdio@e1000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe1000 0x1000>;
fsl,erratum-a011043; /* must ignore read errors */
pcsphy0: ethernet-phy@0 {
reg = <0x0>;
};
};
};

View File

@@ -0,0 +1,44 @@
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
* QorIQ FMan v3 10g port #3 device tree stub [ controller @ offset 0x400000 ]
*
* Copyright 2022 Sean Anderson <sean.anderson@seco.com>
* Copyright 2012 - 2015 Freescale Semiconductor Inc.
*/
fman@400000 {
fman0_rx_0x09: port@89000 {
cell-index = <0x9>;
compatible = "fsl,fman-v3-port-rx";
reg = <0x89000 0x1000>;
fsl,fman-10g-port;
};
fman0_tx_0x29: port@a9000 {
cell-index = <0x29>;
compatible = "fsl,fman-v3-port-tx";
reg = <0xa9000 0x1000>;
fsl,fman-10g-port;
};
ethernet@e2000 {
cell-index = <1>;
compatible = "fsl,fman-memac";
reg = <0xe2000 0x1000>;
fsl,fman-ports = <&fman0_rx_0x09 &fman0_tx_0x29>;
ptp-timer = <&ptp_timer0>;
pcsphy-handle = <&pcsphy1>;
};
mdio@e3000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe3000 0x1000>;
fsl,erratum-a011043; /* must ignore read errors */
pcsphy1: ethernet-phy@0 {
reg = <0x0>;
};
};
};

View File

@@ -631,8 +631,8 @@
/include/ "qoriq-bman1.dtsi" /include/ "qoriq-bman1.dtsi"
/include/ "qoriq-fman3-0.dtsi" /include/ "qoriq-fman3-0.dtsi"
/include/ "qoriq-fman3-0-1g-0.dtsi" /include/ "qoriq-fman3-0-10g-2.dtsi"
/include/ "qoriq-fman3-0-1g-1.dtsi" /include/ "qoriq-fman3-0-10g-3.dtsi"
/include/ "qoriq-fman3-0-1g-2.dtsi" /include/ "qoriq-fman3-0-1g-2.dtsi"
/include/ "qoriq-fman3-0-1g-3.dtsi" /include/ "qoriq-fman3-0-1g-3.dtsi"
/include/ "qoriq-fman3-0-1g-4.dtsi" /include/ "qoriq-fman3-0-1g-4.dtsi"
@@ -681,3 +681,19 @@
interrupts = <16 2 1 9>; interrupts = <16 2 1 9>;
}; };
}; };
&fman0_rx_0x08 {
/delete-property/ fsl,fman-10g-port;
};
&fman0_tx_0x28 {
/delete-property/ fsl,fman-10g-port;
};
&fman0_rx_0x09 {
/delete-property/ fsl,fman-10g-port;
};
&fman0_tx_0x29 {
/delete-property/ fsl,fman-10g-port;
};

View File

@@ -1155,10 +1155,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
for_each_shadow_entry(sub_spt, &sub_se, sub_index) { for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
start_gfn + sub_index, PAGE_SIZE, &dma_addr); start_gfn + sub_index, PAGE_SIZE, &dma_addr);
if (ret) { if (ret)
ppgtt_invalidate_spt(spt); goto err;
return ret;
}
sub_se.val64 = se->val64; sub_se.val64 = se->val64;
/* Copy the PAT field from PDE. */ /* Copy the PAT field from PDE. */
@@ -1177,6 +1175,17 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
ops->set_pfn(se, sub_spt->shadow_page.mfn); ops->set_pfn(se, sub_spt->shadow_page.mfn);
ppgtt_set_shadow_entry(spt, se, index); ppgtt_set_shadow_entry(spt, se, index);
return 0; return 0;
err:
/* Cancel the existing addess mappings of DMA addr. */
for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) {
gvt_vdbg_mm("invalidate 4K entry\n");
ppgtt_invalidate_pte(sub_spt, &sub_se);
}
/* Release the new allocated spt. */
trace_spt_change(sub_spt->vgpu->id, "release", sub_spt,
sub_spt->guest_page.gfn, sub_spt->shadow_page.type);
ppgtt_free_spt(sub_spt);
return ret;
} }
static int split_64KB_gtt_entry(struct intel_vgpu *vgpu, static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,

View File

@@ -518,6 +518,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
u8 cmd_no, int channel) u8 cmd_no, int channel)
{ {
struct kvaser_cmd *cmd; struct kvaser_cmd *cmd;
size_t cmd_len;
int err; int err;
cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
@@ -525,6 +526,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
return -ENOMEM; return -ENOMEM;
cmd->header.cmd_no = cmd_no; cmd->header.cmd_no = cmd_no;
cmd_len = kvaser_usb_hydra_cmd_size(cmd);
if (channel < 0) { if (channel < 0) {
kvaser_usb_hydra_set_cmd_dest_he kvaser_usb_hydra_set_cmd_dest_he
(cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL); (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL);
@@ -541,7 +543,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
kvaser_usb_hydra_set_cmd_transid kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev)); (cmd, kvaser_usb_hydra_get_next_transid(dev));
err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
if (err) if (err)
goto end; goto end;
@@ -557,6 +559,7 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv,
{ {
struct kvaser_cmd *cmd; struct kvaser_cmd *cmd;
struct kvaser_usb *dev = priv->dev; struct kvaser_usb *dev = priv->dev;
size_t cmd_len;
int err; int err;
cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC); cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
@@ -564,14 +567,14 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv,
return -ENOMEM; return -ENOMEM;
cmd->header.cmd_no = cmd_no; cmd->header.cmd_no = cmd_no;
cmd_len = kvaser_usb_hydra_cmd_size(cmd);
kvaser_usb_hydra_set_cmd_dest_he kvaser_usb_hydra_set_cmd_dest_he
(cmd, dev->card_data.hydra.channel_to_he[priv->channel]); (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
kvaser_usb_hydra_set_cmd_transid kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev)); (cmd, kvaser_usb_hydra_get_next_transid(dev));
err = kvaser_usb_send_cmd_async(priv, cmd, err = kvaser_usb_send_cmd_async(priv, cmd, cmd_len);
kvaser_usb_hydra_cmd_size(cmd));
if (err) if (err)
kfree(cmd); kfree(cmd);
@@ -715,6 +718,7 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev,
{ {
struct kvaser_usb_dev_card_data *card_data = &dev->card_data; struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
struct kvaser_cmd *cmd; struct kvaser_cmd *cmd;
size_t cmd_len;
u32 value = 0; u32 value = 0;
u32 mask = 0; u32 mask = 0;
u16 cap_cmd_res; u16 cap_cmd_res;
@@ -726,13 +730,14 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev,
return -ENOMEM; return -ENOMEM;
cmd->header.cmd_no = CMD_GET_CAPABILITIES_REQ; cmd->header.cmd_no = CMD_GET_CAPABILITIES_REQ;
cmd_len = kvaser_usb_hydra_cmd_size(cmd);
cmd->cap_req.cap_cmd = cpu_to_le16(cap_cmd_req); cmd->cap_req.cap_cmd = cpu_to_le16(cap_cmd_req);
kvaser_usb_hydra_set_cmd_dest_he(cmd, card_data->hydra.sysdbg_he); kvaser_usb_hydra_set_cmd_dest_he(cmd, card_data->hydra.sysdbg_he);
kvaser_usb_hydra_set_cmd_transid kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev)); (cmd, kvaser_usb_hydra_get_next_transid(dev));
err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
if (err) if (err)
goto end; goto end;
@@ -1555,6 +1560,7 @@ static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv,
struct kvaser_usb *dev = priv->dev; struct kvaser_usb *dev = priv->dev;
struct kvaser_usb_net_hydra_priv *hydra = priv->sub_priv; struct kvaser_usb_net_hydra_priv *hydra = priv->sub_priv;
struct kvaser_cmd *cmd; struct kvaser_cmd *cmd;
size_t cmd_len;
int err; int err;
if (!hydra) if (!hydra)
@@ -1565,6 +1571,7 @@ static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv,
return -ENOMEM; return -ENOMEM;
cmd->header.cmd_no = CMD_GET_BUSPARAMS_REQ; cmd->header.cmd_no = CMD_GET_BUSPARAMS_REQ;
cmd_len = kvaser_usb_hydra_cmd_size(cmd);
kvaser_usb_hydra_set_cmd_dest_he kvaser_usb_hydra_set_cmd_dest_he
(cmd, dev->card_data.hydra.channel_to_he[priv->channel]); (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
kvaser_usb_hydra_set_cmd_transid kvaser_usb_hydra_set_cmd_transid
@@ -1574,7 +1581,7 @@ static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv,
reinit_completion(&priv->get_busparams_comp); reinit_completion(&priv->get_busparams_comp);
err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
if (err) if (err)
return err; return err;
@@ -1601,6 +1608,7 @@ static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev,
struct kvaser_cmd *cmd; struct kvaser_cmd *cmd;
struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct kvaser_usb *dev = priv->dev; struct kvaser_usb *dev = priv->dev;
size_t cmd_len;
int err; int err;
cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
@@ -1608,6 +1616,7 @@ static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev,
return -ENOMEM; return -ENOMEM;
cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ; cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ;
cmd_len = kvaser_usb_hydra_cmd_size(cmd);
memcpy(&cmd->set_busparams_req.busparams_nominal, busparams, memcpy(&cmd->set_busparams_req.busparams_nominal, busparams,
sizeof(cmd->set_busparams_req.busparams_nominal)); sizeof(cmd->set_busparams_req.busparams_nominal));
@@ -1616,7 +1625,7 @@ static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev,
kvaser_usb_hydra_set_cmd_transid kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev)); (cmd, kvaser_usb_hydra_get_next_transid(dev));
err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
kfree(cmd); kfree(cmd);
@@ -1629,6 +1638,7 @@ static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev,
struct kvaser_cmd *cmd; struct kvaser_cmd *cmd;
struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct kvaser_usb *dev = priv->dev; struct kvaser_usb *dev = priv->dev;
size_t cmd_len;
int err; int err;
cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
@@ -1636,6 +1646,7 @@ static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev,
return -ENOMEM; return -ENOMEM;
cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ; cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ;
cmd_len = kvaser_usb_hydra_cmd_size(cmd);
memcpy(&cmd->set_busparams_req.busparams_data, busparams, memcpy(&cmd->set_busparams_req.busparams_data, busparams,
sizeof(cmd->set_busparams_req.busparams_data)); sizeof(cmd->set_busparams_req.busparams_data));
@@ -1653,7 +1664,7 @@ static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev,
kvaser_usb_hydra_set_cmd_transid kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev)); (cmd, kvaser_usb_hydra_get_next_transid(dev));
err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
kfree(cmd); kfree(cmd);
@@ -1781,6 +1792,7 @@ static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev)
static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev) static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
{ {
struct kvaser_cmd *cmd; struct kvaser_cmd *cmd;
size_t cmd_len;
int err; int err;
u32 flags; u32 flags;
struct kvaser_usb_dev_card_data *card_data = &dev->card_data; struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
@@ -1790,6 +1802,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
return -ENOMEM; return -ENOMEM;
cmd->header.cmd_no = CMD_GET_SOFTWARE_DETAILS_REQ; cmd->header.cmd_no = CMD_GET_SOFTWARE_DETAILS_REQ;
cmd_len = kvaser_usb_hydra_cmd_size(cmd);
cmd->sw_detail_req.use_ext_cmd = 1; cmd->sw_detail_req.use_ext_cmd = 1;
kvaser_usb_hydra_set_cmd_dest_he kvaser_usb_hydra_set_cmd_dest_he
(cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL); (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL);
@@ -1797,7 +1810,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
kvaser_usb_hydra_set_cmd_transid kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev)); (cmd, kvaser_usb_hydra_get_next_transid(dev));
err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
if (err) if (err)
goto end; goto end;
@@ -1913,6 +1926,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
{ {
struct kvaser_usb *dev = priv->dev; struct kvaser_usb *dev = priv->dev;
struct kvaser_cmd *cmd; struct kvaser_cmd *cmd;
size_t cmd_len;
int err; int err;
if ((priv->can.ctrlmode & if ((priv->can.ctrlmode &
@@ -1928,6 +1942,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
return -ENOMEM; return -ENOMEM;
cmd->header.cmd_no = CMD_SET_DRIVERMODE_REQ; cmd->header.cmd_no = CMD_SET_DRIVERMODE_REQ;
cmd_len = kvaser_usb_hydra_cmd_size(cmd);
kvaser_usb_hydra_set_cmd_dest_he kvaser_usb_hydra_set_cmd_dest_he
(cmd, dev->card_data.hydra.channel_to_he[priv->channel]); (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
kvaser_usb_hydra_set_cmd_transid kvaser_usb_hydra_set_cmd_transid
@@ -1937,7 +1952,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
else else
cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_NORMAL; cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_NORMAL;
err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
kfree(cmd); kfree(cmd);
return err; return err;

View File

@@ -58,6 +58,7 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = {
}; };
static const struct of_device_id mwifiex_sdio_of_match_table[] = { static const struct of_device_id mwifiex_sdio_of_match_table[] = {
{ .compatible = "marvell,sd8787" },
{ .compatible = "marvell,sd8897" }, { .compatible = "marvell,sd8897" },
{ .compatible = "marvell,sd8997" }, { .compatible = "marvell,sd8997" },
{ } { }

View File

@@ -4375,12 +4375,9 @@ void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv, void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
u8 macid, bool connect) u8 macid, bool connect)
{ {
#ifdef RTL8XXXU_GEN2_REPORT_CONNECT
/* /*
* Barry Day reports this causes issues with 8192eu and 8723bu * The firmware turns on the rate control when it knows it's
* devices reconnecting. The reason for this is unclear, but * connected to a network.
* until it is better understood, leave the code in place but
* disabled, so it is not lost.
*/ */
struct h2c_cmd h2c; struct h2c_cmd h2c;
@@ -4393,7 +4390,6 @@ void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
h2c.media_status_rpt.parm &= ~BIT(0); h2c.media_status_rpt.parm &= ~BIT(0);
rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt)); rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt));
#endif
} }
void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv) void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv)

View File

@@ -369,6 +369,11 @@ static void ext4_sb_release(struct kobject *kobj)
complete(&sbi->s_kobj_unregister); complete(&sbi->s_kobj_unregister);
} }
static void ext4_feat_release(struct kobject *kobj)
{
kfree(kobj);
}
static const struct sysfs_ops ext4_attr_ops = { static const struct sysfs_ops ext4_attr_ops = {
.show = ext4_attr_show, .show = ext4_attr_show,
.store = ext4_attr_store, .store = ext4_attr_store,
@@ -383,7 +388,7 @@ static struct kobj_type ext4_sb_ktype = {
static struct kobj_type ext4_feat_ktype = { static struct kobj_type ext4_feat_ktype = {
.default_attrs = ext4_feat_attrs, .default_attrs = ext4_feat_attrs,
.sysfs_ops = &ext4_attr_ops, .sysfs_ops = &ext4_attr_ops,
.release = (void (*)(struct kobject *))kfree, .release = ext4_feat_release,
}; };
static struct kobject *ext4_root; static struct kobject *ext4_root;

View File

@@ -9,6 +9,10 @@
struct task_struct; struct task_struct;
#ifndef barrier_nospec
# define barrier_nospec() do { } while (0)
#endif
/** /**
* array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
* @index: array element index * @index: array element index

View File

@@ -19,14 +19,14 @@ void add_input_randomness(unsigned int type, unsigned int code,
void add_interrupt_randomness(int irq) __latent_entropy; void add_interrupt_randomness(int irq) __latent_entropy;
void add_hwgenerator_randomness(const char *buf, size_t len, size_t entropy); void add_hwgenerator_randomness(const char *buf, size_t len, size_t entropy);
#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
static inline void add_latent_entropy(void) static inline void add_latent_entropy(void)
{ {
#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy)); add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
}
#else #else
static inline void add_latent_entropy(void) { } add_device_randomness(NULL, 0);
#endif #endif
}
void get_random_bytes(void *buf, int len); void get_random_bytes(void *buf, int len);
size_t __must_check get_random_bytes_arch(void *buf, size_t len); size_t __must_check get_random_bytes_arch(void *buf, size_t len);

View File

@@ -32,6 +32,7 @@
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/nospec.h>
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
@@ -1382,9 +1383,7 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
* reuse preexisting logic from Spectre v1 mitigation that * reuse preexisting logic from Spectre v1 mitigation that
* happens to produce the required code on x86 for v4 as well. * happens to produce the required code on x86 for v4 as well.
*/ */
#ifdef CONFIG_X86
barrier_nospec(); barrier_nospec();
#endif
CONT; CONT;
#define LDST(SIZEOP, SIZE) \ #define LDST(SIZEOP, SIZE) \
STX_MEM_##SIZEOP: \ STX_MEM_##SIZEOP: \

View File

@@ -476,11 +476,35 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval)
} }
EXPORT_SYMBOL_GPL(alarm_forward); EXPORT_SYMBOL_GPL(alarm_forward);
u64 alarm_forward_now(struct alarm *alarm, ktime_t interval) static u64 __alarm_forward_now(struct alarm *alarm, ktime_t interval, bool throttle)
{ {
struct alarm_base *base = &alarm_bases[alarm->type]; struct alarm_base *base = &alarm_bases[alarm->type];
ktime_t now = base->gettime();
return alarm_forward(alarm, base->gettime(), interval); if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && throttle) {
/*
* Same issue as with posix_timer_fn(). Timers which are
* periodic but the signal is ignored can starve the system
* with a very small interval. The real fix which was
* promised in the context of posix_timer_fn() never
* materialized, but someone should really work on it.
*
* To prevent DOS fake @now to be 1 jiffie out which keeps
* the overrun accounting correct but creates an
* inconsistency vs. timer_gettime(2).
*/
ktime_t kj = NSEC_PER_SEC / HZ;
if (interval < kj)
now = ktime_add(now, kj);
}
return alarm_forward(alarm, now, interval);
}
u64 alarm_forward_now(struct alarm *alarm, ktime_t interval)
{
return __alarm_forward_now(alarm, interval, false);
} }
EXPORT_SYMBOL_GPL(alarm_forward_now); EXPORT_SYMBOL_GPL(alarm_forward_now);
@@ -554,9 +578,10 @@ static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
if (posix_timer_event(ptr, si_private) && ptr->it_interval) { if (posix_timer_event(ptr, si_private) && ptr->it_interval) {
/* /*
* Handle ignored signals and rearm the timer. This will go * Handle ignored signals and rearm the timer. This will go
* away once we handle ignored signals proper. * away once we handle ignored signals proper. Ensure that
* small intervals cannot starve the system.
*/ */
ptr->it_overrun += alarm_forward_now(alarm, ptr->it_interval); ptr->it_overrun += __alarm_forward_now(alarm, ptr->it_interval, true);
++ptr->it_requeue_pending; ++ptr->it_requeue_pending;
ptr->it_active = 1; ptr->it_active = 1;
result = ALARMTIMER_RESTART; result = ALARMTIMER_RESTART;

View File

@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/nospec.h>
/* out-of-line parts */ /* out-of-line parts */
@@ -10,6 +11,12 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n
unsigned long res = n; unsigned long res = n;
might_fault(); might_fault();
if (likely(access_ok(VERIFY_READ, from, n))) { if (likely(access_ok(VERIFY_READ, from, n))) {
/*
* Ensure that bad access_ok() speculation will not
* lead to nasty side effects *after* the copy is
* finished:
*/
barrier_nospec();
kasan_check_write(to, n); kasan_check_write(to, n);
res = raw_copy_from_user(to, from, n); res = raw_copy_from_user(to, from, n);
} }

View File

@@ -627,6 +627,26 @@ struct mesh_csa_settings {
struct cfg80211_csa_settings settings; struct cfg80211_csa_settings settings;
}; };
/**
* struct mesh_table
*
* @known_gates: list of known mesh gates and their mpaths by the station. The
* gate's mpath may or may not be resolved and active.
* @gates_lock: protects updates to known_gates
* @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
* @walk_head: linked list containing all mesh_path objects
* @walk_lock: lock protecting walk_head
* @entries: number of entries in the table
*/
struct mesh_table {
struct hlist_head known_gates;
spinlock_t gates_lock;
struct rhashtable rhead;
struct hlist_head walk_head;
spinlock_t walk_lock;
atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
};
struct ieee80211_if_mesh { struct ieee80211_if_mesh {
struct timer_list housekeeping_timer; struct timer_list housekeeping_timer;
struct timer_list mesh_path_timer; struct timer_list mesh_path_timer;
@@ -701,8 +721,8 @@ struct ieee80211_if_mesh {
/* offset from skb->data while building IE */ /* offset from skb->data while building IE */
int meshconf_offset; int meshconf_offset;
struct mesh_table *mesh_paths; struct mesh_table mesh_paths;
struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ struct mesh_table mpp_paths; /* Store paths for MPP&MAP */
int mesh_paths_generation; int mesh_paths_generation;
int mpp_paths_generation; int mpp_paths_generation;
}; };

View File

@@ -128,26 +128,6 @@ struct mesh_path {
bool is_gate; bool is_gate;
}; };
/**
* struct mesh_table
*
* @known_gates: list of known mesh gates and their mpaths by the station. The
* gate's mpath may or may not be resolved and active.
* @gates_lock: protects updates to known_gates
* @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
* @walk_head: linked list containging all mesh_path objects
* @walk_lock: lock protecting walk_head
* @entries: number of entries in the table
*/
struct mesh_table {
struct hlist_head known_gates;
spinlock_t gates_lock;
struct rhashtable rhead;
struct hlist_head walk_head;
spinlock_t walk_lock;
atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
};
/* Recent multicast cache */ /* Recent multicast cache */
/* RMC_BUCKETS must be a power of 2, maximum 256 */ /* RMC_BUCKETS must be a power of 2, maximum 256 */
#define RMC_BUCKETS 256 #define RMC_BUCKETS 256
@@ -300,7 +280,7 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta);
void mesh_path_flush_pending(struct mesh_path *mpath); void mesh_path_flush_pending(struct mesh_path *mpath);
void mesh_path_tx_pending(struct mesh_path *mpath); void mesh_path_tx_pending(struct mesh_path *mpath);
int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata); void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata);
void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata); void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata);
int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr); int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr);
void mesh_path_timer(struct timer_list *t); void mesh_path_timer(struct timer_list *t);

View File

@@ -50,32 +50,24 @@ static void mesh_path_rht_free(void *ptr, void *tblptr)
mesh_path_free_rcu(tbl, mpath); mesh_path_free_rcu(tbl, mpath);
} }
static struct mesh_table *mesh_table_alloc(void) static void mesh_table_init(struct mesh_table *tbl)
{ {
struct mesh_table *newtbl; INIT_HLIST_HEAD(&tbl->known_gates);
INIT_HLIST_HEAD(&tbl->walk_head);
atomic_set(&tbl->entries, 0);
spin_lock_init(&tbl->gates_lock);
spin_lock_init(&tbl->walk_lock);
newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC); /* rhashtable_init() may fail only in case of wrong
if (!newtbl) * mesh_rht_params
return NULL; */
WARN_ON(rhashtable_init(&tbl->rhead, &mesh_rht_params));
INIT_HLIST_HEAD(&newtbl->known_gates);
INIT_HLIST_HEAD(&newtbl->walk_head);
atomic_set(&newtbl->entries, 0);
spin_lock_init(&newtbl->gates_lock);
spin_lock_init(&newtbl->walk_lock);
if (rhashtable_init(&newtbl->rhead, &mesh_rht_params)) {
kfree(newtbl);
return NULL;
}
return newtbl;
} }
static void mesh_table_free(struct mesh_table *tbl) static void mesh_table_free(struct mesh_table *tbl)
{ {
rhashtable_free_and_destroy(&tbl->rhead, rhashtable_free_and_destroy(&tbl->rhead,
mesh_path_rht_free, tbl); mesh_path_rht_free, tbl);
kfree(tbl);
} }
/** /**
@@ -243,13 +235,13 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
struct mesh_path * struct mesh_path *
mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
{ {
return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata); return mpath_lookup(&sdata->u.mesh.mesh_paths, dst, sdata);
} }
struct mesh_path * struct mesh_path *
mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
{ {
return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata); return mpath_lookup(&sdata->u.mesh.mpp_paths, dst, sdata);
} }
static struct mesh_path * static struct mesh_path *
@@ -286,7 +278,7 @@ __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
struct mesh_path * struct mesh_path *
mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
{ {
return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx); return __mesh_path_lookup_by_idx(&sdata->u.mesh.mesh_paths, idx);
} }
/** /**
@@ -301,7 +293,7 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
struct mesh_path * struct mesh_path *
mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
{ {
return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx); return __mesh_path_lookup_by_idx(&sdata->u.mesh.mpp_paths, idx);
} }
/** /**
@@ -314,7 +306,7 @@ int mesh_path_add_gate(struct mesh_path *mpath)
int err; int err;
rcu_read_lock(); rcu_read_lock();
tbl = mpath->sdata->u.mesh.mesh_paths; tbl = &mpath->sdata->u.mesh.mesh_paths;
spin_lock_bh(&mpath->state_lock); spin_lock_bh(&mpath->state_lock);
if (mpath->is_gate) { if (mpath->is_gate) {
@@ -424,7 +416,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
if (!new_mpath) if (!new_mpath)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
tbl = sdata->u.mesh.mesh_paths; tbl = &sdata->u.mesh.mesh_paths;
spin_lock_bh(&tbl->walk_lock); spin_lock_bh(&tbl->walk_lock);
do { do {
ret = rhashtable_lookup_insert_fast(&tbl->rhead, ret = rhashtable_lookup_insert_fast(&tbl->rhead,
@@ -473,7 +465,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
return -ENOMEM; return -ENOMEM;
memcpy(new_mpath->mpp, mpp, ETH_ALEN); memcpy(new_mpath->mpp, mpp, ETH_ALEN);
tbl = sdata->u.mesh.mpp_paths; tbl = &sdata->u.mesh.mpp_paths;
spin_lock_bh(&tbl->walk_lock); spin_lock_bh(&tbl->walk_lock);
ret = rhashtable_lookup_insert_fast(&tbl->rhead, ret = rhashtable_lookup_insert_fast(&tbl->rhead,
@@ -502,7 +494,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
void mesh_plink_broken(struct sta_info *sta) void mesh_plink_broken(struct sta_info *sta)
{ {
struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_sub_if_data *sdata = sta->sdata;
struct mesh_table *tbl = sdata->u.mesh.mesh_paths; struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct mesh_path *mpath; struct mesh_path *mpath;
@@ -561,7 +553,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
void mesh_path_flush_by_nexthop(struct sta_info *sta) void mesh_path_flush_by_nexthop(struct sta_info *sta)
{ {
struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_sub_if_data *sdata = sta->sdata;
struct mesh_table *tbl = sdata->u.mesh.mesh_paths; struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
struct mesh_path *mpath; struct mesh_path *mpath;
struct hlist_node *n; struct hlist_node *n;
@@ -576,7 +568,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
const u8 *proxy) const u8 *proxy)
{ {
struct mesh_table *tbl = sdata->u.mesh.mpp_paths; struct mesh_table *tbl = &sdata->u.mesh.mpp_paths;
struct mesh_path *mpath; struct mesh_path *mpath;
struct hlist_node *n; struct hlist_node *n;
@@ -610,8 +602,8 @@ static void table_flush_by_iface(struct mesh_table *tbl)
*/ */
void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
{ {
table_flush_by_iface(sdata->u.mesh.mesh_paths); table_flush_by_iface(&sdata->u.mesh.mesh_paths);
table_flush_by_iface(sdata->u.mesh.mpp_paths); table_flush_by_iface(&sdata->u.mesh.mpp_paths);
} }
/** /**
@@ -657,7 +649,7 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
/* flush relevant mpp entries first */ /* flush relevant mpp entries first */
mpp_flush_by_proxy(sdata, addr); mpp_flush_by_proxy(sdata, addr);
err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr); err = table_path_del(&sdata->u.mesh.mesh_paths, sdata, addr);
sdata->u.mesh.mesh_paths_generation++; sdata->u.mesh.mesh_paths_generation++;
return err; return err;
} }
@@ -695,7 +687,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
struct mesh_path *gate; struct mesh_path *gate;
bool copy = false; bool copy = false;
tbl = sdata->u.mesh.mesh_paths; tbl = &sdata->u.mesh.mesh_paths;
rcu_read_lock(); rcu_read_lock();
hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
@@ -775,29 +767,10 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
mesh_path_tx_pending(mpath); mesh_path_tx_pending(mpath);
} }
int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
{ {
struct mesh_table *tbl_path, *tbl_mpp; mesh_table_init(&sdata->u.mesh.mesh_paths);
int ret; mesh_table_init(&sdata->u.mesh.mpp_paths);
tbl_path = mesh_table_alloc();
if (!tbl_path)
return -ENOMEM;
tbl_mpp = mesh_table_alloc();
if (!tbl_mpp) {
ret = -ENOMEM;
goto free_path;
}
sdata->u.mesh.mesh_paths = tbl_path;
sdata->u.mesh.mpp_paths = tbl_mpp;
return 0;
free_path:
mesh_table_free(tbl_path);
return ret;
} }
static static
@@ -819,12 +792,12 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
void mesh_path_expire(struct ieee80211_sub_if_data *sdata) void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
{ {
mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths); mesh_path_tbl_expire(sdata, &sdata->u.mesh.mesh_paths);
mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths); mesh_path_tbl_expire(sdata, &sdata->u.mesh.mpp_paths);
} }
void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata) void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata)
{ {
mesh_table_free(sdata->u.mesh.mesh_paths); mesh_table_free(&sdata->u.mesh.mesh_paths);
mesh_table_free(sdata->u.mesh.mpp_paths); mesh_table_free(&sdata->u.mesh.mpp_paths);
} }