Merge 4.19.91 into android-4.19
Changes in 4.19.91
inet: protect against too small mtu values.
mqprio: Fix out-of-bounds access in mqprio_dump
net: bridge: deny dev_set_mac_address() when unregistering
net: dsa: fix flow dissection on Tx path
net: ethernet: ti: cpsw: fix extra rx interrupt
net: sched: fix dump qlen for sch_mq/sch_mqprio with NOLOCK subqueues
net: thunderx: start phy before starting autonegotiation
openvswitch: support asymmetric conntrack
tcp: md5: fix potential overestimation of TCP option space
tipc: fix ordering of tipc module init and exit routine
net/mlx5e: Query global pause state before setting prio2buffer
tcp: fix rejected syncookies due to stale timestamps
tcp: tighten acceptance of ACKs not matching a child socket
tcp: Protect accesses to .ts_recent_stamp with {READ,WRITE}_ONCE()
Revert "arm64: preempt: Fix big-endian when checking preempt count in assembly"
mmc: block: Make card_busy_detect() a bit more generic
mmc: block: Add CMD13 polling for MMC IOCTLS with R1B response
PCI/PM: Always return devices to D0 when thawing
PCI: pciehp: Avoid returning prematurely from sysfs requests
PCI: Fix Intel ACS quirk UPDCR register address
PCI/MSI: Fix incorrect MSI-X masking on resume
PCI: Apply Cavium ACS quirk to ThunderX2 and ThunderX3
xtensa: fix TLB sanity checker
rpmsg: glink: Set tail pointer to 0 at end of FIFO
rpmsg: glink: Fix reuse intents memory leak issue
rpmsg: glink: Fix use after free in open_ack TIMEOUT case
rpmsg: glink: Put an extra reference during cleanup
rpmsg: glink: Fix rpmsg_register_device err handling
rpmsg: glink: Don't send pending rx_done during remove
rpmsg: glink: Free pending deferred work on remove
cifs: smbd: Return -EAGAIN when transport is reconnecting
cifs: smbd: Add messages on RDMA session destroy and reconnection
cifs: smbd: Return -EINVAL when the number of iovs exceeds SMBDIRECT_MAX_SGE
cifs: Don't display RDMA transport on reconnect
CIFS: Respect O_SYNC and O_DIRECT flags during reconnect
CIFS: Close open handle after interrupted close
ARM: dts: s3c64xx: Fix init order of clock providers
ARM: tegra: Fix FLOW_CTLR_HALT register clobbering by tegra_resume()
vfio/pci: call irq_bypass_unregister_producer() before freeing irq
dma-buf: Fix memory leak in sync_file_merge()
drm: meson: venc: cvbs: fix CVBS mode matching
dm mpath: remove harmful bio-based optimization
dm btree: increase rebalance threshold in __rebalance2()
scsi: iscsi: Fix a potential deadlock in the timeout handler
scsi: qla2xxx: Change discovery state before PLOGI
drm/radeon: fix r1xx/r2xx register checker for POT textures
xhci: fix USB3 device initiated resume race with roothub autosuspend
Linux 4.19.91
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I974d1578d54f93e1c442e09685ddc2fdf373c441
This commit is contained in:
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 19
|
PATCHLEVEL = 19
|
||||||
SUBLEVEL = 90
|
SUBLEVEL = 91
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = "People's Front"
|
NAME = "People's Front"
|
||||||
|
|
||||||
|
|||||||
@@ -165,6 +165,10 @@
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
&clocks {
|
||||||
|
clocks = <&fin_pll>;
|
||||||
|
};
|
||||||
|
|
||||||
&sdhci0 {
|
&sdhci0 {
|
||||||
pinctrl-names = "default";
|
pinctrl-names = "default";
|
||||||
pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>;
|
pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>;
|
||||||
|
|||||||
@@ -69,6 +69,10 @@
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
&clocks {
|
||||||
|
clocks = <&fin_pll>;
|
||||||
|
};
|
||||||
|
|
||||||
&sdhci0 {
|
&sdhci0 {
|
||||||
pinctrl-names = "default";
|
pinctrl-names = "default";
|
||||||
pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>;
|
pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>;
|
||||||
|
|||||||
@@ -56,16 +56,16 @@ ENTRY(tegra_resume)
|
|||||||
cmp r6, #TEGRA20
|
cmp r6, #TEGRA20
|
||||||
beq 1f @ Yes
|
beq 1f @ Yes
|
||||||
/* Clear the flow controller flags for this CPU. */
|
/* Clear the flow controller flags for this CPU. */
|
||||||
cpu_to_csr_reg r1, r0
|
cpu_to_csr_reg r3, r0
|
||||||
mov32 r2, TEGRA_FLOW_CTRL_BASE
|
mov32 r2, TEGRA_FLOW_CTRL_BASE
|
||||||
ldr r1, [r2, r1]
|
ldr r1, [r2, r3]
|
||||||
/* Clear event & intr flag */
|
/* Clear event & intr flag */
|
||||||
orr r1, r1, \
|
orr r1, r1, \
|
||||||
#FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG
|
#FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG
|
||||||
movw r0, #0x3FFD @ enable, cluster_switch, immed, bitmaps
|
movw r0, #0x3FFD @ enable, cluster_switch, immed, bitmaps
|
||||||
@ & ext flags for CPU power mgnt
|
@ & ext flags for CPU power mgnt
|
||||||
bic r1, r1, r0
|
bic r1, r1, r0
|
||||||
str r1, [r2]
|
str r1, [r2, r3]
|
||||||
1:
|
1:
|
||||||
|
|
||||||
mov32 r9, 0xc09
|
mov32 r9, 0xc09
|
||||||
|
|||||||
@@ -692,9 +692,11 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
|
|||||||
.macro if_will_cond_yield_neon
|
.macro if_will_cond_yield_neon
|
||||||
#ifdef CONFIG_PREEMPT
|
#ifdef CONFIG_PREEMPT
|
||||||
get_thread_info x0
|
get_thread_info x0
|
||||||
ldr x0, [x0, #TSK_TI_PREEMPT]
|
ldr w1, [x0, #TSK_TI_PREEMPT]
|
||||||
sub x0, x0, #PREEMPT_DISABLE_OFFSET
|
ldr x0, [x0, #TSK_TI_FLAGS]
|
||||||
cbz x0, .Lyield_\@
|
cmp w1, #PREEMPT_DISABLE_OFFSET
|
||||||
|
csel x0, x0, xzr, eq
|
||||||
|
tbnz x0, #TIF_NEED_RESCHED, .Lyield_\@ // needs rescheduling?
|
||||||
/* fall through to endif_yield_neon */
|
/* fall through to endif_yield_neon */
|
||||||
.subsection 1
|
.subsection 1
|
||||||
.Lyield_\@ :
|
.Lyield_\@ :
|
||||||
|
|||||||
@@ -644,8 +644,10 @@ el1_irq:
|
|||||||
irq_handler
|
irq_handler
|
||||||
|
|
||||||
#ifdef CONFIG_PREEMPT
|
#ifdef CONFIG_PREEMPT
|
||||||
ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
|
ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
|
||||||
cbnz x24, 1f // preempt count != 0
|
cbnz w24, 1f // preempt count != 0
|
||||||
|
ldr x0, [tsk, #TSK_TI_FLAGS] // get flags
|
||||||
|
tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
|
||||||
bl el1_preempt
|
bl el1_preempt
|
||||||
1:
|
1:
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -216,6 +216,8 @@ static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
|
|||||||
unsigned tlbidx = w | (e << PAGE_SHIFT);
|
unsigned tlbidx = w | (e << PAGE_SHIFT);
|
||||||
unsigned r0 = dtlb ?
|
unsigned r0 = dtlb ?
|
||||||
read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
|
read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
|
||||||
|
unsigned r1 = dtlb ?
|
||||||
|
read_dtlb_translation(tlbidx) : read_itlb_translation(tlbidx);
|
||||||
unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
|
unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
|
||||||
unsigned pte = get_pte_for_vaddr(vpn);
|
unsigned pte = get_pte_for_vaddr(vpn);
|
||||||
unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
|
unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
|
||||||
@@ -231,8 +233,6 @@ static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (tlb_asid == mm_asid) {
|
if (tlb_asid == mm_asid) {
|
||||||
unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) :
|
|
||||||
read_itlb_translation(tlbidx);
|
|
||||||
if ((pte ^ r1) & PAGE_MASK) {
|
if ((pte ^ r1) & PAGE_MASK) {
|
||||||
pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
|
pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
|
||||||
dtlb ? 'D' : 'I', w, e, r0, r1, pte);
|
dtlb ? 'D' : 'I', w, e, r0, r1, pte);
|
||||||
|
|||||||
@@ -230,7 +230,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
|
|||||||
a_fences = get_fences(a, &a_num_fences);
|
a_fences = get_fences(a, &a_num_fences);
|
||||||
b_fences = get_fences(b, &b_num_fences);
|
b_fences = get_fences(b, &b_num_fences);
|
||||||
if (a_num_fences > INT_MAX - b_num_fences)
|
if (a_num_fences > INT_MAX - b_num_fences)
|
||||||
return NULL;
|
goto err;
|
||||||
|
|
||||||
num_fences = a_num_fences + b_num_fences;
|
num_fences = a_num_fences + b_num_fences;
|
||||||
|
|
||||||
|
|||||||
@@ -75,6 +75,25 @@ struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT] = {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct meson_cvbs_mode *
|
||||||
|
meson_cvbs_get_mode(const struct drm_display_mode *req_mode)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
|
||||||
|
struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
|
||||||
|
|
||||||
|
if (drm_mode_match(req_mode, &meson_mode->mode,
|
||||||
|
DRM_MODE_MATCH_TIMINGS |
|
||||||
|
DRM_MODE_MATCH_CLOCK |
|
||||||
|
DRM_MODE_MATCH_FLAGS |
|
||||||
|
DRM_MODE_MATCH_3D_FLAGS))
|
||||||
|
return meson_mode;
|
||||||
|
}
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/* Connector */
|
/* Connector */
|
||||||
|
|
||||||
static void meson_cvbs_connector_destroy(struct drm_connector *connector)
|
static void meson_cvbs_connector_destroy(struct drm_connector *connector)
|
||||||
@@ -147,14 +166,8 @@ static int meson_venc_cvbs_encoder_atomic_check(struct drm_encoder *encoder,
|
|||||||
struct drm_crtc_state *crtc_state,
|
struct drm_crtc_state *crtc_state,
|
||||||
struct drm_connector_state *conn_state)
|
struct drm_connector_state *conn_state)
|
||||||
{
|
{
|
||||||
int i;
|
if (meson_cvbs_get_mode(&crtc_state->mode))
|
||||||
|
return 0;
|
||||||
for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
|
|
||||||
struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
|
|
||||||
|
|
||||||
if (drm_mode_equal(&crtc_state->mode, &meson_mode->mode))
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@@ -192,24 +205,17 @@ static void meson_venc_cvbs_encoder_mode_set(struct drm_encoder *encoder,
|
|||||||
struct drm_display_mode *mode,
|
struct drm_display_mode *mode,
|
||||||
struct drm_display_mode *adjusted_mode)
|
struct drm_display_mode *adjusted_mode)
|
||||||
{
|
{
|
||||||
|
const struct meson_cvbs_mode *meson_mode = meson_cvbs_get_mode(mode);
|
||||||
struct meson_venc_cvbs *meson_venc_cvbs =
|
struct meson_venc_cvbs *meson_venc_cvbs =
|
||||||
encoder_to_meson_venc_cvbs(encoder);
|
encoder_to_meson_venc_cvbs(encoder);
|
||||||
struct meson_drm *priv = meson_venc_cvbs->priv;
|
struct meson_drm *priv = meson_venc_cvbs->priv;
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
|
if (meson_mode) {
|
||||||
struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
|
meson_venci_cvbs_mode_set(priv, meson_mode->enci);
|
||||||
|
|
||||||
if (drm_mode_equal(mode, &meson_mode->mode)) {
|
/* Setup 27MHz vclk2 for ENCI and VDAC */
|
||||||
meson_venci_cvbs_mode_set(priv,
|
meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, MESON_VCLK_CVBS,
|
||||||
meson_mode->enci);
|
MESON_VCLK_CVBS, MESON_VCLK_CVBS, true);
|
||||||
|
|
||||||
/* Setup 27MHz vclk2 for ENCI and VDAC */
|
|
||||||
meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS,
|
|
||||||
MESON_VCLK_CVBS, MESON_VCLK_CVBS,
|
|
||||||
MESON_VCLK_CVBS, true);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1820,8 +1820,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
|
|||||||
track->textures[i].use_pitch = 1;
|
track->textures[i].use_pitch = 1;
|
||||||
} else {
|
} else {
|
||||||
track->textures[i].use_pitch = 0;
|
track->textures[i].use_pitch = 0;
|
||||||
track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
|
track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK) >> RADEON_TXFORMAT_WIDTH_SHIFT);
|
||||||
track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
|
track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK) >> RADEON_TXFORMAT_HEIGHT_SHIFT);
|
||||||
}
|
}
|
||||||
if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
|
if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
|
||||||
track->textures[i].tex_coord_type = 2;
|
track->textures[i].tex_coord_type = 2;
|
||||||
|
|||||||
@@ -476,8 +476,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
|
|||||||
track->textures[i].use_pitch = 1;
|
track->textures[i].use_pitch = 1;
|
||||||
} else {
|
} else {
|
||||||
track->textures[i].use_pitch = 0;
|
track->textures[i].use_pitch = 0;
|
||||||
track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
|
track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK) >> RADEON_TXFORMAT_WIDTH_SHIFT);
|
||||||
track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
|
track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK) >> RADEON_TXFORMAT_HEIGHT_SHIFT);
|
||||||
}
|
}
|
||||||
if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE)
|
if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE)
|
||||||
track->textures[i].lookup_disable = true;
|
track->textures[i].lookup_disable = true;
|
||||||
|
|||||||
@@ -609,45 +609,10 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
|
|||||||
return pgpath;
|
return pgpath;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct pgpath *__map_bio_fast(struct multipath *m, struct bio *bio)
|
|
||||||
{
|
|
||||||
struct pgpath *pgpath;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
/* Do we need to select a new pgpath? */
|
|
||||||
/*
|
|
||||||
* FIXME: currently only switching path if no path (due to failure, etc)
|
|
||||||
* - which negates the point of using a path selector
|
|
||||||
*/
|
|
||||||
pgpath = READ_ONCE(m->current_pgpath);
|
|
||||||
if (!pgpath)
|
|
||||||
pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
|
|
||||||
|
|
||||||
if (!pgpath) {
|
|
||||||
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
|
|
||||||
/* Queue for the daemon to resubmit */
|
|
||||||
spin_lock_irqsave(&m->lock, flags);
|
|
||||||
bio_list_add(&m->queued_bios, bio);
|
|
||||||
spin_unlock_irqrestore(&m->lock, flags);
|
|
||||||
queue_work(kmultipathd, &m->process_queued_bios);
|
|
||||||
|
|
||||||
return ERR_PTR(-EAGAIN);
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return pgpath;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __multipath_map_bio(struct multipath *m, struct bio *bio,
|
static int __multipath_map_bio(struct multipath *m, struct bio *bio,
|
||||||
struct dm_mpath_io *mpio)
|
struct dm_mpath_io *mpio)
|
||||||
{
|
{
|
||||||
struct pgpath *pgpath;
|
struct pgpath *pgpath = __map_bio(m, bio);
|
||||||
|
|
||||||
if (!m->hw_handler_name)
|
|
||||||
pgpath = __map_bio_fast(m, bio);
|
|
||||||
else
|
|
||||||
pgpath = __map_bio(m, bio);
|
|
||||||
|
|
||||||
if (IS_ERR(pgpath))
|
if (IS_ERR(pgpath))
|
||||||
return DM_MAPIO_SUBMITTED;
|
return DM_MAPIO_SUBMITTED;
|
||||||
|
|||||||
@@ -203,7 +203,13 @@ static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
|
|||||||
struct btree_node *right = r->n;
|
struct btree_node *right = r->n;
|
||||||
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
|
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
|
||||||
uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
|
uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
|
||||||
unsigned threshold = 2 * merge_threshold(left) + 1;
|
/*
|
||||||
|
* Ensure the number of entries in each child will be greater
|
||||||
|
* than or equal to (max_entries / 3 + 1), so no matter which
|
||||||
|
* child is used for removal, the number will still be not
|
||||||
|
* less than (max_entries / 3).
|
||||||
|
*/
|
||||||
|
unsigned int threshold = 2 * (merge_threshold(left) + 1);
|
||||||
|
|
||||||
if (nr_left + nr_right < threshold) {
|
if (nr_left + nr_right < threshold) {
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -409,38 +409,6 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
|
|
||||||
u32 retries_max)
|
|
||||||
{
|
|
||||||
int err;
|
|
||||||
u32 retry_count = 0;
|
|
||||||
|
|
||||||
if (!status || !retries_max)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
do {
|
|
||||||
err = __mmc_send_status(card, status, 5);
|
|
||||||
if (err)
|
|
||||||
break;
|
|
||||||
|
|
||||||
if (!R1_STATUS(*status) &&
|
|
||||||
(R1_CURRENT_STATE(*status) != R1_STATE_PRG))
|
|
||||||
break; /* RPMB programming operation complete */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Rechedule to give the MMC device a chance to continue
|
|
||||||
* processing the previous command without being polled too
|
|
||||||
* frequently.
|
|
||||||
*/
|
|
||||||
usleep_range(1000, 5000);
|
|
||||||
} while (++retry_count < retries_max);
|
|
||||||
|
|
||||||
if (retry_count == retries_max)
|
|
||||||
err = -EPERM;
|
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ioctl_do_sanitize(struct mmc_card *card)
|
static int ioctl_do_sanitize(struct mmc_card *card)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
@@ -469,6 +437,58 @@ static int ioctl_do_sanitize(struct mmc_card *card)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool mmc_blk_in_tran_state(u32 status)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Some cards mishandle the status bits, so make sure to check both the
|
||||||
|
* busy indication and the card state.
|
||||||
|
*/
|
||||||
|
return status & R1_READY_FOR_DATA &&
|
||||||
|
(R1_CURRENT_STATE(status) == R1_STATE_TRAN);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
|
||||||
|
u32 *resp_errs)
|
||||||
|
{
|
||||||
|
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
|
||||||
|
int err = 0;
|
||||||
|
u32 status;
|
||||||
|
|
||||||
|
do {
|
||||||
|
bool done = time_after(jiffies, timeout);
|
||||||
|
|
||||||
|
err = __mmc_send_status(card, &status, 5);
|
||||||
|
if (err) {
|
||||||
|
dev_err(mmc_dev(card->host),
|
||||||
|
"error %d requesting status\n", err);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Accumulate any response error bits seen */
|
||||||
|
if (resp_errs)
|
||||||
|
*resp_errs |= status;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Timeout if the device never becomes ready for data and never
|
||||||
|
* leaves the program state.
|
||||||
|
*/
|
||||||
|
if (done) {
|
||||||
|
dev_err(mmc_dev(card->host),
|
||||||
|
"Card stuck in wrong state! %s status: %#x\n",
|
||||||
|
__func__, status);
|
||||||
|
return -ETIMEDOUT;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Some cards mishandle the status bits,
|
||||||
|
* so make sure to check both the busy
|
||||||
|
* indication and the card state.
|
||||||
|
*/
|
||||||
|
} while (!mmc_blk_in_tran_state(status));
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
|
static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
|
||||||
struct mmc_blk_ioc_data *idata)
|
struct mmc_blk_ioc_data *idata)
|
||||||
{
|
{
|
||||||
@@ -478,7 +498,6 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
|
|||||||
struct scatterlist sg;
|
struct scatterlist sg;
|
||||||
int err;
|
int err;
|
||||||
unsigned int target_part;
|
unsigned int target_part;
|
||||||
u32 status = 0;
|
|
||||||
|
|
||||||
if (!card || !md || !idata)
|
if (!card || !md || !idata)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -612,16 +631,12 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
|
|||||||
|
|
||||||
memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
|
memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
|
||||||
|
|
||||||
if (idata->rpmb) {
|
if (idata->rpmb || (cmd.flags & MMC_RSP_R1B)) {
|
||||||
/*
|
/*
|
||||||
* Ensure RPMB command has completed by polling CMD13
|
* Ensure RPMB/R1B command has completed by polling CMD13
|
||||||
* "Send Status".
|
* "Send Status".
|
||||||
*/
|
*/
|
||||||
err = ioctl_rpmb_card_status_poll(card, &status, 5);
|
err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, NULL);
|
||||||
if (err)
|
|
||||||
dev_err(mmc_dev(card->host),
|
|
||||||
"%s: Card Status=0x%08X, error %d\n",
|
|
||||||
__func__, status, err);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
@@ -971,58 +986,6 @@ static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host,
|
|||||||
return ms;
|
return ms;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool mmc_blk_in_tran_state(u32 status)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Some cards mishandle the status bits, so make sure to check both the
|
|
||||||
* busy indication and the card state.
|
|
||||||
*/
|
|
||||||
return status & R1_READY_FOR_DATA &&
|
|
||||||
(R1_CURRENT_STATE(status) == R1_STATE_TRAN);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
|
|
||||||
struct request *req, u32 *resp_errs)
|
|
||||||
{
|
|
||||||
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
|
|
||||||
int err = 0;
|
|
||||||
u32 status;
|
|
||||||
|
|
||||||
do {
|
|
||||||
bool done = time_after(jiffies, timeout);
|
|
||||||
|
|
||||||
err = __mmc_send_status(card, &status, 5);
|
|
||||||
if (err) {
|
|
||||||
pr_err("%s: error %d requesting status\n",
|
|
||||||
req->rq_disk->disk_name, err);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Accumulate any response error bits seen */
|
|
||||||
if (resp_errs)
|
|
||||||
*resp_errs |= status;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Timeout if the device never becomes ready for data and never
|
|
||||||
* leaves the program state.
|
|
||||||
*/
|
|
||||||
if (done) {
|
|
||||||
pr_err("%s: Card stuck in wrong state! %s %s status: %#x\n",
|
|
||||||
mmc_hostname(card->host),
|
|
||||||
req->rq_disk->disk_name, __func__, status);
|
|
||||||
return -ETIMEDOUT;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Some cards mishandle the status bits,
|
|
||||||
* so make sure to check both the busy
|
|
||||||
* indication and the card state.
|
|
||||||
*/
|
|
||||||
} while (!mmc_blk_in_tran_state(status));
|
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
|
static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
|
||||||
int type)
|
int type)
|
||||||
{
|
{
|
||||||
@@ -1678,7 +1641,7 @@ static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
|
|||||||
|
|
||||||
mmc_blk_send_stop(card, timeout);
|
mmc_blk_send_stop(card, timeout);
|
||||||
|
|
||||||
err = card_busy_detect(card, timeout, req, NULL);
|
err = card_busy_detect(card, timeout, NULL);
|
||||||
|
|
||||||
mmc_retune_release(card->host);
|
mmc_retune_release(card->host);
|
||||||
|
|
||||||
@@ -1902,7 +1865,7 @@ static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
|
|||||||
if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ)
|
if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, req, &status);
|
err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, &status);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Do not assume data transferred correctly if there are any error bits
|
* Do not assume data transferred correctly if there are any error bits
|
||||||
|
|||||||
@@ -1118,7 +1118,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
|
|||||||
phy_interface_mode(lmac->lmac_type)))
|
phy_interface_mode(lmac->lmac_type)))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
phy_start_aneg(lmac->phydev);
|
phy_start(lmac->phydev);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -155,8 +155,11 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (port_buffer->buffer[i].size <
|
if (port_buffer->buffer[i].size <
|
||||||
(xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
|
(xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) {
|
||||||
|
pr_err("buffer_size[%d]=%d is not enough for lossless buffer\n",
|
||||||
|
i, port_buffer->buffer[i].size);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
|
port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
|
||||||
port_buffer->buffer[i].xon =
|
port_buffer->buffer[i].xon =
|
||||||
@@ -232,6 +235,26 @@ static int update_buffer_lossy(unsigned int max_mtu,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int fill_pfc_en(struct mlx5_core_dev *mdev, u8 *pfc_en)
|
||||||
|
{
|
||||||
|
u32 g_rx_pause, g_tx_pause;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = mlx5_query_port_pause(mdev, &g_rx_pause, &g_tx_pause);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
/* If global pause enabled, set all active buffers to lossless.
|
||||||
|
* Otherwise, check PFC setting.
|
||||||
|
*/
|
||||||
|
if (g_rx_pause || g_tx_pause)
|
||||||
|
*pfc_en = 0xff;
|
||||||
|
else
|
||||||
|
err = mlx5_query_port_pfc(mdev, pfc_en, NULL);
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
#define MINIMUM_MAX_MTU 9216
|
#define MINIMUM_MAX_MTU 9216
|
||||||
int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
|
int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
|
||||||
u32 change, unsigned int mtu,
|
u32 change, unsigned int mtu,
|
||||||
@@ -277,7 +300,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
|
|||||||
|
|
||||||
if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) {
|
if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) {
|
||||||
update_prio2buffer = true;
|
update_prio2buffer = true;
|
||||||
err = mlx5_query_port_pfc(priv->mdev, &curr_pfc_en, NULL);
|
err = fill_pfc_en(priv->mdev, &curr_pfc_en);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
|||||||
@@ -954,8 +954,8 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
|
|||||||
{
|
{
|
||||||
struct cpsw_common *cpsw = dev_id;
|
struct cpsw_common *cpsw = dev_id;
|
||||||
|
|
||||||
cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
|
|
||||||
writel(0, &cpsw->wr_regs->rx_en);
|
writel(0, &cpsw->wr_regs->rx_en);
|
||||||
|
cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
|
||||||
|
|
||||||
if (cpsw->quirk_irq) {
|
if (cpsw->quirk_irq) {
|
||||||
disable_irq_nosync(cpsw->irqs_table[0]);
|
disable_irq_nosync(cpsw->irqs_table[0]);
|
||||||
|
|||||||
@@ -106,6 +106,7 @@ struct slot {
|
|||||||
* that has not yet been cleared by the user
|
* that has not yet been cleared by the user
|
||||||
* @pending_events: used by the IRQ handler to save events retrieved from the
|
* @pending_events: used by the IRQ handler to save events retrieved from the
|
||||||
* Slot Status register for later consumption by the IRQ thread
|
* Slot Status register for later consumption by the IRQ thread
|
||||||
|
* @ist_running: flag to keep user request waiting while IRQ thread is running
|
||||||
* @request_result: result of last user request submitted to the IRQ thread
|
* @request_result: result of last user request submitted to the IRQ thread
|
||||||
* @requester: wait queue to wake up on completion of user request,
|
* @requester: wait queue to wake up on completion of user request,
|
||||||
* used for synchronous slot enable/disable request via sysfs
|
* used for synchronous slot enable/disable request via sysfs
|
||||||
@@ -125,6 +126,7 @@ struct controller {
|
|||||||
unsigned int notification_enabled:1;
|
unsigned int notification_enabled:1;
|
||||||
unsigned int power_fault_detected;
|
unsigned int power_fault_detected;
|
||||||
atomic_t pending_events;
|
atomic_t pending_events;
|
||||||
|
unsigned int ist_running;
|
||||||
int request_result;
|
int request_result;
|
||||||
wait_queue_head_t requester;
|
wait_queue_head_t requester;
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -383,7 +383,8 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot)
|
|||||||
ctrl->request_result = -ENODEV;
|
ctrl->request_result = -ENODEV;
|
||||||
pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC);
|
pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC);
|
||||||
wait_event(ctrl->requester,
|
wait_event(ctrl->requester,
|
||||||
!atomic_read(&ctrl->pending_events));
|
!atomic_read(&ctrl->pending_events) &&
|
||||||
|
!ctrl->ist_running);
|
||||||
return ctrl->request_result;
|
return ctrl->request_result;
|
||||||
case POWERON_STATE:
|
case POWERON_STATE:
|
||||||
ctrl_info(ctrl, "Slot(%s): Already in powering on state\n",
|
ctrl_info(ctrl, "Slot(%s): Already in powering on state\n",
|
||||||
@@ -416,7 +417,8 @@ int pciehp_sysfs_disable_slot(struct slot *p_slot)
|
|||||||
mutex_unlock(&p_slot->lock);
|
mutex_unlock(&p_slot->lock);
|
||||||
pciehp_request(ctrl, DISABLE_SLOT);
|
pciehp_request(ctrl, DISABLE_SLOT);
|
||||||
wait_event(ctrl->requester,
|
wait_event(ctrl->requester,
|
||||||
!atomic_read(&ctrl->pending_events));
|
!atomic_read(&ctrl->pending_events) &&
|
||||||
|
!ctrl->ist_running);
|
||||||
return ctrl->request_result;
|
return ctrl->request_result;
|
||||||
case POWEROFF_STATE:
|
case POWEROFF_STATE:
|
||||||
ctrl_info(ctrl, "Slot(%s): Already in powering off state\n",
|
ctrl_info(ctrl, "Slot(%s): Already in powering off state\n",
|
||||||
|
|||||||
@@ -620,6 +620,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
|
|||||||
irqreturn_t ret;
|
irqreturn_t ret;
|
||||||
u32 events;
|
u32 events;
|
||||||
|
|
||||||
|
ctrl->ist_running = true;
|
||||||
pci_config_pm_runtime_get(pdev);
|
pci_config_pm_runtime_get(pdev);
|
||||||
|
|
||||||
/* rerun pciehp_isr() if the port was inaccessible on interrupt */
|
/* rerun pciehp_isr() if the port was inaccessible on interrupt */
|
||||||
@@ -666,6 +667,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
|
|||||||
up_read(&ctrl->reset_lock);
|
up_read(&ctrl->reset_lock);
|
||||||
|
|
||||||
pci_config_pm_runtime_put(pdev);
|
pci_config_pm_runtime_put(pdev);
|
||||||
|
ctrl->ist_running = false;
|
||||||
wake_up(&ctrl->requester);
|
wake_up(&ctrl->requester);
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -211,7 +211,7 @@ u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
|
mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
|
||||||
if (flag)
|
if (flag & PCI_MSIX_ENTRY_CTRL_MASKBIT)
|
||||||
mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
|
mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
|
||||||
writel(mask_bits, pci_msix_desc_addr(desc) + PCI_MSIX_ENTRY_VECTOR_CTRL);
|
writel(mask_bits, pci_msix_desc_addr(desc) + PCI_MSIX_ENTRY_VECTOR_CTRL);
|
||||||
|
|
||||||
|
|||||||
@@ -1042,17 +1042,22 @@ static int pci_pm_thaw_noirq(struct device *dev)
|
|||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pci_has_legacy_pm_support(pci_dev))
|
|
||||||
return pci_legacy_resume_early(dev);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* pci_restore_state() requires the device to be in D0 (because of MSI
|
* Both the legacy ->resume_early() and the new pm->thaw_noirq()
|
||||||
* restoration among other things), so force it into D0 in case the
|
* callbacks assume the device has been returned to D0 and its
|
||||||
* driver's "freeze" callbacks put it into a low-power state directly.
|
* config state has been restored.
|
||||||
|
*
|
||||||
|
* In addition, pci_restore_state() restores MSI-X state in MMIO
|
||||||
|
* space, which requires the device to be in D0, so return it to D0
|
||||||
|
* in case the driver's "freeze" callbacks put it into a low-power
|
||||||
|
* state.
|
||||||
*/
|
*/
|
||||||
pci_set_power_state(pci_dev, PCI_D0);
|
pci_set_power_state(pci_dev, PCI_D0);
|
||||||
pci_restore_state(pci_dev);
|
pci_restore_state(pci_dev);
|
||||||
|
|
||||||
|
if (pci_has_legacy_pm_support(pci_dev))
|
||||||
|
return pci_legacy_resume_early(dev);
|
||||||
|
|
||||||
if (drv && drv->pm && drv->pm->thaw_noirq)
|
if (drv && drv->pm && drv->pm->thaw_noirq)
|
||||||
error = drv->pm->thaw_noirq(dev);
|
error = drv->pm->thaw_noirq(dev);
|
||||||
|
|
||||||
|
|||||||
@@ -4219,15 +4219,21 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
|
|||||||
|
|
||||||
static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
|
static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
|
if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
switch (dev->device) {
|
||||||
/*
|
/*
|
||||||
* Effectively selects all downstream ports for whole ThunderX 1
|
* Effectively selects all downstream ports for whole ThunderX1
|
||||||
* family by 0xf800 mask (which represents 8 SoCs), while the lower
|
* (which represents 8 SoCs).
|
||||||
* bits of device ID are used to indicate which subdevice is used
|
|
||||||
* within the SoC.
|
|
||||||
*/
|
*/
|
||||||
return (pci_is_pcie(dev) &&
|
case 0xa000 ... 0xa7ff: /* ThunderX1 */
|
||||||
(pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) &&
|
case 0xaf84: /* ThunderX2 */
|
||||||
((dev->device & 0xf800) == 0xa000));
|
case 0xb884: /* ThunderX3 */
|
||||||
|
return true;
|
||||||
|
default:
|
||||||
|
return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
|
static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
|
||||||
@@ -4576,7 +4582,7 @@ int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
|
|||||||
#define INTEL_BSPR_REG_BPPD (1 << 9)
|
#define INTEL_BSPR_REG_BPPD (1 << 9)
|
||||||
|
|
||||||
/* Upstream Peer Decode Configuration Register */
|
/* Upstream Peer Decode Configuration Register */
|
||||||
#define INTEL_UPDCR_REG 0x1114
|
#define INTEL_UPDCR_REG 0x1014
|
||||||
/* 5:0 Peer Decode Enable bits */
|
/* 5:0 Peer Decode Enable bits */
|
||||||
#define INTEL_UPDCR_REG_MASK 0x3f
|
#define INTEL_UPDCR_REG_MASK 0x3f
|
||||||
|
|
||||||
|
|||||||
@@ -241,10 +241,31 @@ static void qcom_glink_channel_release(struct kref *ref)
|
|||||||
{
|
{
|
||||||
struct glink_channel *channel = container_of(ref, struct glink_channel,
|
struct glink_channel *channel = container_of(ref, struct glink_channel,
|
||||||
refcount);
|
refcount);
|
||||||
|
struct glink_core_rx_intent *intent;
|
||||||
|
struct glink_core_rx_intent *tmp;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
int iid;
|
||||||
|
|
||||||
|
/* cancel pending rx_done work */
|
||||||
|
cancel_work_sync(&channel->intent_work);
|
||||||
|
|
||||||
spin_lock_irqsave(&channel->intent_lock, flags);
|
spin_lock_irqsave(&channel->intent_lock, flags);
|
||||||
|
/* Free all non-reuse intents pending rx_done work */
|
||||||
|
list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) {
|
||||||
|
if (!intent->reuse) {
|
||||||
|
kfree(intent->data);
|
||||||
|
kfree(intent);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
idr_for_each_entry(&channel->liids, tmp, iid) {
|
||||||
|
kfree(tmp->data);
|
||||||
|
kfree(tmp);
|
||||||
|
}
|
||||||
idr_destroy(&channel->liids);
|
idr_destroy(&channel->liids);
|
||||||
|
|
||||||
|
idr_for_each_entry(&channel->riids, tmp, iid)
|
||||||
|
kfree(tmp);
|
||||||
idr_destroy(&channel->riids);
|
idr_destroy(&channel->riids);
|
||||||
spin_unlock_irqrestore(&channel->intent_lock, flags);
|
spin_unlock_irqrestore(&channel->intent_lock, flags);
|
||||||
|
|
||||||
@@ -1097,13 +1118,12 @@ static int qcom_glink_create_remote(struct qcom_glink *glink,
|
|||||||
close_link:
|
close_link:
|
||||||
/*
|
/*
|
||||||
* Send a close request to "undo" our open-ack. The close-ack will
|
* Send a close request to "undo" our open-ack. The close-ack will
|
||||||
* release the last reference.
|
* release qcom_glink_send_open_req() reference and the last reference
|
||||||
|
* will be relesed after receiving remote_close or transport unregister
|
||||||
|
* by calling qcom_glink_native_remove().
|
||||||
*/
|
*/
|
||||||
qcom_glink_send_close_req(glink, channel);
|
qcom_glink_send_close_req(glink, channel);
|
||||||
|
|
||||||
/* Release qcom_glink_send_open_req() reference */
|
|
||||||
kref_put(&channel->refcount, qcom_glink_channel_release);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1418,15 +1438,13 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid,
|
|||||||
|
|
||||||
ret = rpmsg_register_device(rpdev);
|
ret = rpmsg_register_device(rpdev);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto free_rpdev;
|
goto rcid_remove;
|
||||||
|
|
||||||
channel->rpdev = rpdev;
|
channel->rpdev = rpdev;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
free_rpdev:
|
|
||||||
kfree(rpdev);
|
|
||||||
rcid_remove:
|
rcid_remove:
|
||||||
spin_lock_irqsave(&glink->idr_lock, flags);
|
spin_lock_irqsave(&glink->idr_lock, flags);
|
||||||
idr_remove(&glink->rcids, channel->rcid);
|
idr_remove(&glink->rcids, channel->rcid);
|
||||||
@@ -1547,6 +1565,18 @@ static void qcom_glink_work(struct work_struct *work)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void qcom_glink_cancel_rx_work(struct qcom_glink *glink)
|
||||||
|
{
|
||||||
|
struct glink_defer_cmd *dcmd;
|
||||||
|
struct glink_defer_cmd *tmp;
|
||||||
|
|
||||||
|
/* cancel any pending deferred rx_work */
|
||||||
|
cancel_work_sync(&glink->rx_work);
|
||||||
|
|
||||||
|
list_for_each_entry_safe(dcmd, tmp, &glink->rx_queue, node)
|
||||||
|
kfree(dcmd);
|
||||||
|
}
|
||||||
|
|
||||||
struct qcom_glink *qcom_glink_native_probe(struct device *dev,
|
struct qcom_glink *qcom_glink_native_probe(struct device *dev,
|
||||||
unsigned long features,
|
unsigned long features,
|
||||||
struct qcom_glink_pipe *rx,
|
struct qcom_glink_pipe *rx,
|
||||||
@@ -1622,23 +1652,24 @@ void qcom_glink_native_remove(struct qcom_glink *glink)
|
|||||||
struct glink_channel *channel;
|
struct glink_channel *channel;
|
||||||
int cid;
|
int cid;
|
||||||
int ret;
|
int ret;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
disable_irq(glink->irq);
|
disable_irq(glink->irq);
|
||||||
cancel_work_sync(&glink->rx_work);
|
qcom_glink_cancel_rx_work(glink);
|
||||||
|
|
||||||
ret = device_for_each_child(glink->dev, NULL, qcom_glink_remove_device);
|
ret = device_for_each_child(glink->dev, NULL, qcom_glink_remove_device);
|
||||||
if (ret)
|
if (ret)
|
||||||
dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret);
|
dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret);
|
||||||
|
|
||||||
spin_lock_irqsave(&glink->idr_lock, flags);
|
|
||||||
/* Release any defunct local channels, waiting for close-ack */
|
/* Release any defunct local channels, waiting for close-ack */
|
||||||
idr_for_each_entry(&glink->lcids, channel, cid)
|
idr_for_each_entry(&glink->lcids, channel, cid)
|
||||||
kref_put(&channel->refcount, qcom_glink_channel_release);
|
kref_put(&channel->refcount, qcom_glink_channel_release);
|
||||||
|
|
||||||
|
/* Release any defunct local channels, waiting for close-req */
|
||||||
|
idr_for_each_entry(&glink->rcids, channel, cid)
|
||||||
|
kref_put(&channel->refcount, qcom_glink_channel_release);
|
||||||
|
|
||||||
idr_destroy(&glink->lcids);
|
idr_destroy(&glink->lcids);
|
||||||
idr_destroy(&glink->rcids);
|
idr_destroy(&glink->rcids);
|
||||||
spin_unlock_irqrestore(&glink->idr_lock, flags);
|
|
||||||
mbox_free_channel(glink->mbox_chan);
|
mbox_free_channel(glink->mbox_chan);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(qcom_glink_native_remove);
|
EXPORT_SYMBOL_GPL(qcom_glink_native_remove);
|
||||||
|
|||||||
@@ -105,7 +105,7 @@ static void glink_smem_rx_advance(struct qcom_glink_pipe *np,
|
|||||||
tail = le32_to_cpu(*pipe->tail);
|
tail = le32_to_cpu(*pipe->tail);
|
||||||
|
|
||||||
tail += count;
|
tail += count;
|
||||||
if (tail > pipe->native.length)
|
if (tail >= pipe->native.length)
|
||||||
tail -= pipe->native.length;
|
tail -= pipe->native.length;
|
||||||
|
|
||||||
*pipe->tail = cpu_to_le32(tail);
|
*pipe->tail = cpu_to_le32(tail);
|
||||||
|
|||||||
@@ -1983,7 +1983,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
|
|||||||
|
|
||||||
ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
|
ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
|
||||||
|
|
||||||
spin_lock(&session->frwd_lock);
|
spin_lock_bh(&session->frwd_lock);
|
||||||
task = (struct iscsi_task *)sc->SCp.ptr;
|
task = (struct iscsi_task *)sc->SCp.ptr;
|
||||||
if (!task) {
|
if (!task) {
|
||||||
/*
|
/*
|
||||||
@@ -2110,7 +2110,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
|
|||||||
done:
|
done:
|
||||||
if (task)
|
if (task)
|
||||||
task->last_timeout = jiffies;
|
task->last_timeout = jiffies;
|
||||||
spin_unlock(&session->frwd_lock);
|
spin_unlock_bh(&session->frwd_lock);
|
||||||
ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
|
ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
|
||||||
"timer reset" : "shutdown or nh");
|
"timer reset" : "shutdown or nh");
|
||||||
return rc;
|
return rc;
|
||||||
|
|||||||
@@ -966,6 +966,7 @@ int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
|
|||||||
|
|
||||||
e->u.fcport.fcport = fcport;
|
e->u.fcport.fcport = fcport;
|
||||||
fcport->flags |= FCF_ASYNC_ACTIVE;
|
fcport->flags |= FCF_ASYNC_ACTIVE;
|
||||||
|
fcport->disc_state = DSC_LOGIN_PEND;
|
||||||
return qla2x00_post_work(vha, e);
|
return qla2x00_post_work(vha, e);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -868,6 +868,14 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
|
|||||||
status |= USB_PORT_STAT_C_BH_RESET << 16;
|
status |= USB_PORT_STAT_C_BH_RESET << 16;
|
||||||
if ((raw_port_status & PORT_CEC))
|
if ((raw_port_status & PORT_CEC))
|
||||||
status |= USB_PORT_STAT_C_CONFIG_ERROR << 16;
|
status |= USB_PORT_STAT_C_CONFIG_ERROR << 16;
|
||||||
|
|
||||||
|
/* USB3 remote wake resume signaling completed */
|
||||||
|
if (bus_state->port_remote_wakeup & (1 << wIndex) &&
|
||||||
|
(raw_port_status & PORT_PLS_MASK) != XDEV_RESUME &&
|
||||||
|
(raw_port_status & PORT_PLS_MASK) != XDEV_RECOVERY) {
|
||||||
|
bus_state->port_remote_wakeup &= ~(1 << wIndex);
|
||||||
|
usb_hcd_end_port_resume(&hcd->self, wIndex);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (hcd->speed < HCD_USB3) {
|
if (hcd->speed < HCD_USB3) {
|
||||||
|
|||||||
@@ -1609,7 +1609,6 @@ static void handle_port_status(struct xhci_hcd *xhci,
|
|||||||
slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
|
slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
|
||||||
if (slot_id && xhci->devs[slot_id])
|
if (slot_id && xhci->devs[slot_id])
|
||||||
xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR;
|
xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR;
|
||||||
bus_state->port_remote_wakeup &= ~(1 << hcd_portnum);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
|
if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
|
||||||
@@ -1630,6 +1629,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
|
|||||||
bus_state->port_remote_wakeup |= 1 << hcd_portnum;
|
bus_state->port_remote_wakeup |= 1 << hcd_portnum;
|
||||||
xhci_test_and_clear_bit(xhci, port, PORT_PLC);
|
xhci_test_and_clear_bit(xhci, port, PORT_PLC);
|
||||||
xhci_set_link_state(xhci, port, XDEV_U0);
|
xhci_set_link_state(xhci, port, XDEV_U0);
|
||||||
|
usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
|
||||||
/* Need to wait until the next link state change
|
/* Need to wait until the next link state change
|
||||||
* indicates the device is actually in U0.
|
* indicates the device is actually in U0.
|
||||||
*/
|
*/
|
||||||
@@ -1669,7 +1669,6 @@ static void handle_port_status(struct xhci_hcd *xhci,
|
|||||||
if (slot_id && xhci->devs[slot_id])
|
if (slot_id && xhci->devs[slot_id])
|
||||||
xhci_ring_device(xhci, slot_id);
|
xhci_ring_device(xhci, slot_id);
|
||||||
if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) {
|
if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) {
|
||||||
bus_state->port_remote_wakeup &= ~(1 << hcd_portnum);
|
|
||||||
xhci_test_and_clear_bit(xhci, port, PORT_PLC);
|
xhci_test_and_clear_bit(xhci, port, PORT_PLC);
|
||||||
usb_wakeup_notification(hcd->self.root_hub,
|
usb_wakeup_notification(hcd->self.root_hub,
|
||||||
hcd_portnum + 1);
|
hcd_portnum + 1);
|
||||||
|
|||||||
@@ -297,8 +297,8 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
|
|||||||
irq = pci_irq_vector(pdev, vector);
|
irq = pci_irq_vector(pdev, vector);
|
||||||
|
|
||||||
if (vdev->ctx[vector].trigger) {
|
if (vdev->ctx[vector].trigger) {
|
||||||
free_irq(irq, vdev->ctx[vector].trigger);
|
|
||||||
irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
|
irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
|
||||||
|
free_irq(irq, vdev->ctx[vector].trigger);
|
||||||
kfree(vdev->ctx[vector].name);
|
kfree(vdev->ctx[vector].name);
|
||||||
eventfd_ctx_put(vdev->ctx[vector].trigger);
|
eventfd_ctx_put(vdev->ctx[vector].trigger);
|
||||||
vdev->ctx[vector].trigger = NULL;
|
vdev->ctx[vector].trigger = NULL;
|
||||||
|
|||||||
@@ -210,6 +210,11 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
|
|||||||
if (!server->rdma)
|
if (!server->rdma)
|
||||||
goto skip_rdma;
|
goto skip_rdma;
|
||||||
|
|
||||||
|
if (!server->smbd_conn) {
|
||||||
|
seq_printf(m, "\nSMBDirect transport not available");
|
||||||
|
goto skip_rdma;
|
||||||
|
}
|
||||||
|
|
||||||
seq_printf(m, "\nSMBDirect (in hex) protocol version: %x "
|
seq_printf(m, "\nSMBDirect (in hex) protocol version: %x "
|
||||||
"transport status: %x",
|
"transport status: %x",
|
||||||
server->smbd_conn->protocol,
|
server->smbd_conn->protocol,
|
||||||
|
|||||||
@@ -726,6 +726,13 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
|
|||||||
if (backup_cred(cifs_sb))
|
if (backup_cred(cifs_sb))
|
||||||
create_options |= CREATE_OPEN_BACKUP_INTENT;
|
create_options |= CREATE_OPEN_BACKUP_INTENT;
|
||||||
|
|
||||||
|
/* O_SYNC also has bit for O_DSYNC so following check picks up either */
|
||||||
|
if (cfile->f_flags & O_SYNC)
|
||||||
|
create_options |= CREATE_WRITE_THROUGH;
|
||||||
|
|
||||||
|
if (cfile->f_flags & O_DIRECT)
|
||||||
|
create_options |= CREATE_NO_BUFFER;
|
||||||
|
|
||||||
if (server->ops->get_lease_key)
|
if (server->ops->get_lease_key)
|
||||||
server->ops->get_lease_key(inode, &cfile->fid);
|
server->ops->get_lease_key(inode, &cfile->fid);
|
||||||
|
|
||||||
|
|||||||
@@ -743,36 +743,67 @@ smb2_cancelled_close_fid(struct work_struct *work)
|
|||||||
kfree(cancelled);
|
kfree(cancelled);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Caller should already has an extra reference to @tcon */
|
||||||
|
static int
|
||||||
|
__smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid,
|
||||||
|
__u64 volatile_fid)
|
||||||
|
{
|
||||||
|
struct close_cancelled_open *cancelled;
|
||||||
|
|
||||||
|
cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
|
||||||
|
if (!cancelled)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
cancelled->fid.persistent_fid = persistent_fid;
|
||||||
|
cancelled->fid.volatile_fid = volatile_fid;
|
||||||
|
cancelled->tcon = tcon;
|
||||||
|
INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
|
||||||
|
WARN_ON(queue_work(cifsiod_wq, &cancelled->work) == false);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid,
|
||||||
|
__u64 volatile_fid)
|
||||||
|
{
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
|
||||||
|
spin_lock(&cifs_tcp_ses_lock);
|
||||||
|
tcon->tc_count++;
|
||||||
|
spin_unlock(&cifs_tcp_ses_lock);
|
||||||
|
|
||||||
|
rc = __smb2_handle_cancelled_close(tcon, persistent_fid, volatile_fid);
|
||||||
|
if (rc)
|
||||||
|
cifs_put_tcon(tcon);
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
|
smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
|
||||||
{
|
{
|
||||||
struct smb2_sync_hdr *sync_hdr = (struct smb2_sync_hdr *)buffer;
|
struct smb2_sync_hdr *sync_hdr = (struct smb2_sync_hdr *)buffer;
|
||||||
struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer;
|
struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer;
|
||||||
struct cifs_tcon *tcon;
|
struct cifs_tcon *tcon;
|
||||||
struct close_cancelled_open *cancelled;
|
int rc;
|
||||||
|
|
||||||
if (sync_hdr->Command != SMB2_CREATE ||
|
if (sync_hdr->Command != SMB2_CREATE ||
|
||||||
sync_hdr->Status != STATUS_SUCCESS)
|
sync_hdr->Status != STATUS_SUCCESS)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
|
|
||||||
if (!cancelled)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
tcon = smb2_find_smb_tcon(server, sync_hdr->SessionId,
|
tcon = smb2_find_smb_tcon(server, sync_hdr->SessionId,
|
||||||
sync_hdr->TreeId);
|
sync_hdr->TreeId);
|
||||||
if (!tcon) {
|
if (!tcon)
|
||||||
kfree(cancelled);
|
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
|
||||||
|
|
||||||
cancelled->fid.persistent_fid = rsp->PersistentFileId;
|
rc = __smb2_handle_cancelled_close(tcon, rsp->PersistentFileId,
|
||||||
cancelled->fid.volatile_fid = rsp->VolatileFileId;
|
rsp->VolatileFileId);
|
||||||
cancelled->tcon = tcon;
|
if (rc)
|
||||||
INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
|
cifs_put_tcon(tcon);
|
||||||
queue_work(cifsiod_wq, &cancelled->work);
|
|
||||||
|
|
||||||
return 0;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -2629,7 +2629,21 @@ int
|
|||||||
SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
|
SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
|
||||||
u64 persistent_fid, u64 volatile_fid)
|
u64 persistent_fid, u64 volatile_fid)
|
||||||
{
|
{
|
||||||
return SMB2_close_flags(xid, tcon, persistent_fid, volatile_fid, 0);
|
int rc;
|
||||||
|
int tmp_rc;
|
||||||
|
|
||||||
|
rc = SMB2_close_flags(xid, tcon, persistent_fid, volatile_fid, 0);
|
||||||
|
|
||||||
|
/* retry close in a worker thread if this one is interrupted */
|
||||||
|
if (rc == -EINTR) {
|
||||||
|
tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid,
|
||||||
|
volatile_fid);
|
||||||
|
if (tmp_rc)
|
||||||
|
cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n",
|
||||||
|
persistent_fid, tmp_rc);
|
||||||
|
}
|
||||||
|
|
||||||
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
|
|||||||
@@ -204,6 +204,9 @@ extern int SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
|
|||||||
extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
|
extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
|
||||||
const u64 persistent_fid, const u64 volatile_fid,
|
const u64 persistent_fid, const u64 volatile_fid,
|
||||||
const __u8 oplock_level);
|
const __u8 oplock_level);
|
||||||
|
extern int smb2_handle_cancelled_close(struct cifs_tcon *tcon,
|
||||||
|
__u64 persistent_fid,
|
||||||
|
__u64 volatile_fid);
|
||||||
extern int smb2_handle_cancelled_mid(char *buffer,
|
extern int smb2_handle_cancelled_mid(char *buffer,
|
||||||
struct TCP_Server_Info *server);
|
struct TCP_Server_Info *server);
|
||||||
void smb2_cancelled_close_fid(struct work_struct *work);
|
void smb2_cancelled_close_fid(struct work_struct *work);
|
||||||
|
|||||||
@@ -1164,7 +1164,7 @@ static int smbd_post_send_data(
|
|||||||
|
|
||||||
if (n_vec > SMBDIRECT_MAX_SGE) {
|
if (n_vec > SMBDIRECT_MAX_SGE) {
|
||||||
cifs_dbg(VFS, "Can't fit data to SGL, n_vec=%d\n", n_vec);
|
cifs_dbg(VFS, "Can't fit data to SGL, n_vec=%d\n", n_vec);
|
||||||
return -ENOMEM;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
sg_init_table(sgl, n_vec);
|
sg_init_table(sgl, n_vec);
|
||||||
@@ -1491,6 +1491,7 @@ void smbd_destroy(struct smbd_connection *info)
|
|||||||
info->transport_status == SMBD_DESTROYED);
|
info->transport_status == SMBD_DESTROYED);
|
||||||
|
|
||||||
destroy_workqueue(info->workqueue);
|
destroy_workqueue(info->workqueue);
|
||||||
|
log_rdma_event(INFO, "rdma session destroyed\n");
|
||||||
kfree(info);
|
kfree(info);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1528,8 +1529,9 @@ int smbd_reconnect(struct TCP_Server_Info *server)
|
|||||||
log_rdma_event(INFO, "creating rdma session\n");
|
log_rdma_event(INFO, "creating rdma session\n");
|
||||||
server->smbd_conn = smbd_get_connection(
|
server->smbd_conn = smbd_get_connection(
|
||||||
server, (struct sockaddr *) &server->dstaddr);
|
server, (struct sockaddr *) &server->dstaddr);
|
||||||
log_rdma_event(INFO, "created rdma session info=%p\n",
|
|
||||||
server->smbd_conn);
|
if (server->smbd_conn)
|
||||||
|
cifs_dbg(VFS, "RDMA transport re-established\n");
|
||||||
|
|
||||||
return server->smbd_conn ? 0 : -ENOENT;
|
return server->smbd_conn ? 0 : -ENOENT;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -286,8 +286,11 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
|
|||||||
int val = 1;
|
int val = 1;
|
||||||
__be32 rfc1002_marker;
|
__be32 rfc1002_marker;
|
||||||
|
|
||||||
if (cifs_rdma_enabled(server) && server->smbd_conn) {
|
if (cifs_rdma_enabled(server)) {
|
||||||
rc = smbd_send(server, num_rqst, rqst);
|
/* return -EAGAIN when connecting or reconnecting */
|
||||||
|
rc = -EAGAIN;
|
||||||
|
if (server->smbd_conn)
|
||||||
|
rc = smbd_send(server, num_rqst, rqst);
|
||||||
goto smbd_done;
|
goto smbd_done;
|
||||||
}
|
}
|
||||||
if (ssocket == NULL)
|
if (ssocket == NULL)
|
||||||
|
|||||||
@@ -1834,6 +1834,11 @@ struct net_device {
|
|||||||
unsigned char if_port;
|
unsigned char if_port;
|
||||||
unsigned char dma;
|
unsigned char dma;
|
||||||
|
|
||||||
|
/* Note : dev->mtu is often read without holding a lock.
|
||||||
|
* Writers usually hold RTNL.
|
||||||
|
* It is recommended to use READ_ONCE() to annotate the reads,
|
||||||
|
* and to use WRITE_ONCE() to annotate the writes.
|
||||||
|
*/
|
||||||
unsigned int mtu;
|
unsigned int mtu;
|
||||||
unsigned int min_mtu;
|
unsigned int min_mtu;
|
||||||
unsigned int max_mtu;
|
unsigned int max_mtu;
|
||||||
|
|||||||
@@ -96,4 +96,17 @@ static inline bool itimerspec64_valid(const struct itimerspec64 *its)
|
|||||||
*/
|
*/
|
||||||
#define time_after32(a, b) ((s32)((u32)(b) - (u32)(a)) < 0)
|
#define time_after32(a, b) ((s32)((u32)(b) - (u32)(a)) < 0)
|
||||||
#define time_before32(b, a) time_after32(a, b)
|
#define time_before32(b, a) time_after32(a, b)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* time_between32 - check if a 32-bit timestamp is within a given time range
|
||||||
|
* @t: the time which may be within [l,h]
|
||||||
|
* @l: the lower bound of the range
|
||||||
|
* @h: the higher bound of the range
|
||||||
|
*
|
||||||
|
* time_before32(t, l, h) returns true if @l <= @t <= @h. All operands are
|
||||||
|
* treated as 32-bit integers.
|
||||||
|
*
|
||||||
|
* Equivalent to !(time_before32(@t, @l) || time_after32(@t, @h)).
|
||||||
|
*/
|
||||||
|
#define time_between32(t, l, h) ((u32)(h) - (u32)(l) >= (u32)(t) - (u32)(l))
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -692,4 +692,9 @@ int ip_misc_proc_init(void);
|
|||||||
int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
|
int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
|
||||||
struct netlink_ext_ack *extack);
|
struct netlink_ext_ack *extack);
|
||||||
|
|
||||||
|
static inline bool inetdev_valid_mtu(unsigned int mtu)
|
||||||
|
{
|
||||||
|
return likely(mtu >= IPV4_MIN_MTU);
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* _IP_H */
|
#endif /* _IP_H */
|
||||||
|
|||||||
@@ -485,15 +485,16 @@ static inline void tcp_synq_overflow(const struct sock *sk)
|
|||||||
reuse = rcu_dereference(sk->sk_reuseport_cb);
|
reuse = rcu_dereference(sk->sk_reuseport_cb);
|
||||||
if (likely(reuse)) {
|
if (likely(reuse)) {
|
||||||
last_overflow = READ_ONCE(reuse->synq_overflow_ts);
|
last_overflow = READ_ONCE(reuse->synq_overflow_ts);
|
||||||
if (time_after32(now, last_overflow + HZ))
|
if (!time_between32(now, last_overflow,
|
||||||
|
last_overflow + HZ))
|
||||||
WRITE_ONCE(reuse->synq_overflow_ts, now);
|
WRITE_ONCE(reuse->synq_overflow_ts, now);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
|
last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
|
||||||
if (time_after32(now, last_overflow + HZ))
|
if (!time_between32(now, last_overflow, last_overflow + HZ))
|
||||||
tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
|
WRITE_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp, now);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* syncookies: no recent synqueue overflow on this listening socket? */
|
/* syncookies: no recent synqueue overflow on this listening socket? */
|
||||||
@@ -508,13 +509,23 @@ static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
|
|||||||
reuse = rcu_dereference(sk->sk_reuseport_cb);
|
reuse = rcu_dereference(sk->sk_reuseport_cb);
|
||||||
if (likely(reuse)) {
|
if (likely(reuse)) {
|
||||||
last_overflow = READ_ONCE(reuse->synq_overflow_ts);
|
last_overflow = READ_ONCE(reuse->synq_overflow_ts);
|
||||||
return time_after32(now, last_overflow +
|
return !time_between32(now, last_overflow - HZ,
|
||||||
TCP_SYNCOOKIE_VALID);
|
last_overflow +
|
||||||
|
TCP_SYNCOOKIE_VALID);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
|
last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
|
||||||
return time_after32(now, last_overflow + TCP_SYNCOOKIE_VALID);
|
|
||||||
|
/* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
|
||||||
|
* then we're under synflood. However, we have to use
|
||||||
|
* 'last_overflow - HZ' as lower bound. That's because a concurrent
|
||||||
|
* tcp_synq_overflow() could update .ts_recent_stamp after we read
|
||||||
|
* jiffies but before we store .ts_recent_stamp into last_overflow,
|
||||||
|
* which could lead to rejecting a valid syncookie.
|
||||||
|
*/
|
||||||
|
return !time_between32(now, last_overflow - HZ,
|
||||||
|
last_overflow + TCP_SYNCOOKIE_VALID);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 tcp_cookie_time(void)
|
static inline u32 tcp_cookie_time(void)
|
||||||
|
|||||||
@@ -246,6 +246,12 @@ static int br_set_mac_address(struct net_device *dev, void *p)
|
|||||||
if (!is_valid_ether_addr(addr->sa_data))
|
if (!is_valid_ether_addr(addr->sa_data))
|
||||||
return -EADDRNOTAVAIL;
|
return -EADDRNOTAVAIL;
|
||||||
|
|
||||||
|
/* dev_set_mac_addr() can be called by a master device on bridge's
|
||||||
|
* NETDEV_UNREGISTER, but since it's being destroyed do nothing
|
||||||
|
*/
|
||||||
|
if (dev->reg_state != NETREG_REGISTERED)
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
spin_lock_bh(&br->lock);
|
spin_lock_bh(&br->lock);
|
||||||
if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
|
if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
|
||||||
/* Mac address will be changed in br_stp_change_bridge_id(). */
|
/* Mac address will be changed in br_stp_change_bridge_id(). */
|
||||||
|
|||||||
@@ -7595,7 +7595,8 @@ int __dev_set_mtu(struct net_device *dev, int new_mtu)
|
|||||||
if (ops->ndo_change_mtu)
|
if (ops->ndo_change_mtu)
|
||||||
return ops->ndo_change_mtu(dev, new_mtu);
|
return ops->ndo_change_mtu(dev, new_mtu);
|
||||||
|
|
||||||
dev->mtu = new_mtu;
|
/* Pairs with all the lockless reads of dev->mtu in the stack */
|
||||||
|
WRITE_ONCE(dev->mtu, new_mtu);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__dev_set_mtu);
|
EXPORT_SYMBOL(__dev_set_mtu);
|
||||||
|
|||||||
@@ -630,9 +630,10 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
|
|||||||
nhoff = skb_network_offset(skb);
|
nhoff = skb_network_offset(skb);
|
||||||
hlen = skb_headlen(skb);
|
hlen = skb_headlen(skb);
|
||||||
#if IS_ENABLED(CONFIG_NET_DSA)
|
#if IS_ENABLED(CONFIG_NET_DSA)
|
||||||
if (unlikely(skb->dev && netdev_uses_dsa(skb->dev))) {
|
if (unlikely(skb->dev && netdev_uses_dsa(skb->dev) &&
|
||||||
|
proto == htons(ETH_P_XDSA))) {
|
||||||
const struct dsa_device_ops *ops;
|
const struct dsa_device_ops *ops;
|
||||||
int offset;
|
int offset = 0;
|
||||||
|
|
||||||
ops = skb->dev->dsa_ptr->tag_ops;
|
ops = skb->dev->dsa_ptr->tag_ops;
|
||||||
if (ops->flow_dissect &&
|
if (ops->flow_dissect &&
|
||||||
|
|||||||
@@ -1441,11 +1441,6 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool inetdev_valid_mtu(unsigned int mtu)
|
|
||||||
{
|
|
||||||
return mtu >= IPV4_MIN_MTU;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void inetdev_send_gratuitous_arp(struct net_device *dev,
|
static void inetdev_send_gratuitous_arp(struct net_device *dev,
|
||||||
struct in_device *in_dev)
|
struct in_device *in_dev)
|
||||||
|
|
||||||
|
|||||||
@@ -1142,15 +1142,18 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
|
|||||||
cork->addr = ipc->addr;
|
cork->addr = ipc->addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* We steal reference to this route, caller should not release it
|
|
||||||
*/
|
|
||||||
*rtp = NULL;
|
|
||||||
cork->fragsize = ip_sk_use_pmtu(sk) ?
|
cork->fragsize = ip_sk_use_pmtu(sk) ?
|
||||||
dst_mtu(&rt->dst) : rt->dst.dev->mtu;
|
dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
|
||||||
|
|
||||||
|
if (!inetdev_valid_mtu(cork->fragsize))
|
||||||
|
return -ENETUNREACH;
|
||||||
|
|
||||||
cork->gso_size = ipc->gso_size;
|
cork->gso_size = ipc->gso_size;
|
||||||
|
|
||||||
cork->dst = &rt->dst;
|
cork->dst = &rt->dst;
|
||||||
|
/* We stole this route, caller should not release it. */
|
||||||
|
*rtp = NULL;
|
||||||
|
|
||||||
cork->length = 0;
|
cork->length = 0;
|
||||||
cork->ttl = ipc->ttl;
|
cork->ttl = ipc->ttl;
|
||||||
cork->tos = ipc->tos;
|
cork->tos = ipc->tos;
|
||||||
|
|||||||
@@ -740,8 +740,9 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
|
|||||||
min_t(unsigned int, eff_sacks,
|
min_t(unsigned int, eff_sacks,
|
||||||
(remaining - TCPOLEN_SACK_BASE_ALIGNED) /
|
(remaining - TCPOLEN_SACK_BASE_ALIGNED) /
|
||||||
TCPOLEN_SACK_PERBLOCK);
|
TCPOLEN_SACK_PERBLOCK);
|
||||||
size += TCPOLEN_SACK_BASE_ALIGNED +
|
if (likely(opts->num_sack_blocks))
|
||||||
opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
|
size += TCPOLEN_SACK_BASE_ALIGNED +
|
||||||
|
opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
|
||||||
}
|
}
|
||||||
|
|
||||||
return size;
|
return size;
|
||||||
|
|||||||
@@ -897,6 +897,17 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
|
|||||||
}
|
}
|
||||||
err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype);
|
err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype);
|
||||||
|
|
||||||
|
if (err == NF_ACCEPT &&
|
||||||
|
ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) {
|
||||||
|
if (maniptype == NF_NAT_MANIP_SRC)
|
||||||
|
maniptype = NF_NAT_MANIP_DST;
|
||||||
|
else
|
||||||
|
maniptype = NF_NAT_MANIP_SRC;
|
||||||
|
|
||||||
|
err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range,
|
||||||
|
maniptype);
|
||||||
|
}
|
||||||
|
|
||||||
/* Mark NAT done if successful and update the flow key. */
|
/* Mark NAT done if successful and update the flow key. */
|
||||||
if (err == NF_ACCEPT)
|
if (err == NF_ACCEPT)
|
||||||
ovs_nat_update_key(key, skb, maniptype);
|
ovs_nat_update_key(key, skb, maniptype);
|
||||||
|
|||||||
@@ -158,6 +158,7 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
|
|||||||
__gnet_stats_copy_queue(&sch->qstats,
|
__gnet_stats_copy_queue(&sch->qstats,
|
||||||
qdisc->cpu_qstats,
|
qdisc->cpu_qstats,
|
||||||
&qdisc->qstats, qlen);
|
&qdisc->qstats, qlen);
|
||||||
|
sch->q.qlen += qlen;
|
||||||
} else {
|
} else {
|
||||||
sch->q.qlen += qdisc->q.qlen;
|
sch->q.qlen += qdisc->q.qlen;
|
||||||
sch->bstats.bytes += qdisc->bstats.bytes;
|
sch->bstats.bytes += qdisc->bstats.bytes;
|
||||||
|
|||||||
@@ -413,6 +413,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
|
|||||||
__gnet_stats_copy_queue(&sch->qstats,
|
__gnet_stats_copy_queue(&sch->qstats,
|
||||||
qdisc->cpu_qstats,
|
qdisc->cpu_qstats,
|
||||||
&qdisc->qstats, qlen);
|
&qdisc->qstats, qlen);
|
||||||
|
sch->q.qlen += qlen;
|
||||||
} else {
|
} else {
|
||||||
sch->q.qlen += qdisc->q.qlen;
|
sch->q.qlen += qdisc->q.qlen;
|
||||||
sch->bstats.bytes += qdisc->bstats.bytes;
|
sch->bstats.bytes += qdisc->bstats.bytes;
|
||||||
@@ -435,7 +436,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
|
|||||||
opt.offset[tc] = dev->tc_to_txq[tc].offset;
|
opt.offset[tc] = dev->tc_to_txq[tc].offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nla_put(skb, TCA_OPTIONS, NLA_ALIGN(sizeof(opt)), &opt))
|
if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
|
||||||
goto nla_put_failure;
|
goto nla_put_failure;
|
||||||
|
|
||||||
if ((priv->flags & TC_MQPRIO_F_MODE) &&
|
if ((priv->flags & TC_MQPRIO_F_MODE) &&
|
||||||
|
|||||||
@@ -120,14 +120,6 @@ static int __init tipc_init(void)
|
|||||||
sysctl_tipc_rmem[1] = RCVBUF_DEF;
|
sysctl_tipc_rmem[1] = RCVBUF_DEF;
|
||||||
sysctl_tipc_rmem[2] = RCVBUF_MAX;
|
sysctl_tipc_rmem[2] = RCVBUF_MAX;
|
||||||
|
|
||||||
err = tipc_netlink_start();
|
|
||||||
if (err)
|
|
||||||
goto out_netlink;
|
|
||||||
|
|
||||||
err = tipc_netlink_compat_start();
|
|
||||||
if (err)
|
|
||||||
goto out_netlink_compat;
|
|
||||||
|
|
||||||
err = tipc_register_sysctl();
|
err = tipc_register_sysctl();
|
||||||
if (err)
|
if (err)
|
||||||
goto out_sysctl;
|
goto out_sysctl;
|
||||||
@@ -148,8 +140,21 @@ static int __init tipc_init(void)
|
|||||||
if (err)
|
if (err)
|
||||||
goto out_bearer;
|
goto out_bearer;
|
||||||
|
|
||||||
|
err = tipc_netlink_start();
|
||||||
|
if (err)
|
||||||
|
goto out_netlink;
|
||||||
|
|
||||||
|
err = tipc_netlink_compat_start();
|
||||||
|
if (err)
|
||||||
|
goto out_netlink_compat;
|
||||||
|
|
||||||
pr_info("Started in single node mode\n");
|
pr_info("Started in single node mode\n");
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
out_netlink_compat:
|
||||||
|
tipc_netlink_stop();
|
||||||
|
out_netlink:
|
||||||
|
tipc_bearer_cleanup();
|
||||||
out_bearer:
|
out_bearer:
|
||||||
unregister_pernet_device(&tipc_topsrv_net_ops);
|
unregister_pernet_device(&tipc_topsrv_net_ops);
|
||||||
out_pernet_topsrv:
|
out_pernet_topsrv:
|
||||||
@@ -159,22 +164,18 @@ static int __init tipc_init(void)
|
|||||||
out_pernet:
|
out_pernet:
|
||||||
tipc_unregister_sysctl();
|
tipc_unregister_sysctl();
|
||||||
out_sysctl:
|
out_sysctl:
|
||||||
tipc_netlink_compat_stop();
|
|
||||||
out_netlink_compat:
|
|
||||||
tipc_netlink_stop();
|
|
||||||
out_netlink:
|
|
||||||
pr_err("Unable to start in single node mode\n");
|
pr_err("Unable to start in single node mode\n");
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit tipc_exit(void)
|
static void __exit tipc_exit(void)
|
||||||
{
|
{
|
||||||
|
tipc_netlink_compat_stop();
|
||||||
|
tipc_netlink_stop();
|
||||||
tipc_bearer_cleanup();
|
tipc_bearer_cleanup();
|
||||||
unregister_pernet_device(&tipc_topsrv_net_ops);
|
unregister_pernet_device(&tipc_topsrv_net_ops);
|
||||||
tipc_socket_stop();
|
tipc_socket_stop();
|
||||||
unregister_pernet_device(&tipc_net_ops);
|
unregister_pernet_device(&tipc_net_ops);
|
||||||
tipc_netlink_stop();
|
|
||||||
tipc_netlink_compat_stop();
|
|
||||||
tipc_unregister_sysctl();
|
tipc_unregister_sysctl();
|
||||||
|
|
||||||
pr_info("Deactivated\n");
|
pr_info("Deactivated\n");
|
||||||
|
|||||||
Reference in New Issue
Block a user