Merge branch 'android-4.19-stable' of https://android.googlesource.com/kernel/common into android13-4.19-kona
* 'android-4.19-stable' of https://android.googlesource.com/kernel/common: (415 commits) Linux 4.19.318 i2c: rcar: bring hardware to known state when probing nilfs2: fix kernel bug on rename operation of broken directory SUNRPC: Fix RPC client cleaned up the freed pipefs dentries tcp: avoid too many retransmit packets tcp: use signed arithmetic in tcp_rtx_probe0_timed_out() net: tcp: fix unexcepted socket die when snd_wnd is 0 tcp: refactor tcp_retransmit_timer() libceph: fix race between delayed_work() and ceph_monc_stop() hpet: Support 32-bit userspace USB: core: Fix duplicate endpoint bug by clearing reserved bits in the descriptor usb: gadget: configfs: Prevent OOB read/write in usb_string_copy() USB: Add USB_QUIRK_NO_SET_INTF quirk for START BP-850k USB: serial: option: add Rolling RW350-GL variants USB: serial: option: add Netprisma LCUK54 series modules USB: serial: option: add support for Foxconn T99W651 USB: serial: option: add Fibocom FM350-GL USB: serial: option: add Telit FN912 rmnet compositions USB: serial: option: add Telit generic core-dump composition ARM: davinci: Convert comma to semicolon ... Conflicts: drivers/net/usb/ax88179_178a.c drivers/scsi/ufs/ufshcd.c Change-Id: I63f3c3862218db4d5d13828c76e11f21da54ca42
This commit is contained in:
@@ -1293,7 +1293,7 @@ bool current_cpuset_is_being_rebound(void)
|
||||
static int update_relax_domain_level(struct cpuset *cs, s64 val)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
if (val < -1 || val >= sched_domain_level_max)
|
||||
if (val < -1 || val > sched_domain_level_max + 1)
|
||||
return -EINVAL;
|
||||
#endif
|
||||
|
||||
|
||||
@@ -170,6 +170,33 @@ static int kdb_read_get_key(char *buffer, size_t bufsize)
|
||||
return key;
|
||||
}
|
||||
|
||||
/**
|
||||
* kdb_position_cursor() - Place cursor in the correct horizontal position
|
||||
* @prompt: Nil-terminated string containing the prompt string
|
||||
* @buffer: Nil-terminated string containing the entire command line
|
||||
* @cp: Cursor position, pointer the character in buffer where the cursor
|
||||
* should be positioned.
|
||||
*
|
||||
* The cursor is positioned by sending a carriage-return and then printing
|
||||
* the content of the line until we reach the correct cursor position.
|
||||
*
|
||||
* There is some additional fine detail here.
|
||||
*
|
||||
* Firstly, even though kdb_printf() will correctly format zero-width fields
|
||||
* we want the second call to kdb_printf() to be conditional. That keeps things
|
||||
* a little cleaner when LOGGING=1.
|
||||
*
|
||||
* Secondly, we can't combine everything into one call to kdb_printf() since
|
||||
* that renders into a fixed length buffer and the combined print could result
|
||||
* in unwanted truncation.
|
||||
*/
|
||||
static void kdb_position_cursor(char *prompt, char *buffer, char *cp)
|
||||
{
|
||||
kdb_printf("\r%s", kdb_prompt_str);
|
||||
if (cp > buffer)
|
||||
kdb_printf("%.*s", (int)(cp - buffer), buffer);
|
||||
}
|
||||
|
||||
/*
|
||||
* kdb_read
|
||||
*
|
||||
@@ -208,7 +235,6 @@ static char *kdb_read(char *buffer, size_t bufsize)
|
||||
* and null byte */
|
||||
char *lastchar;
|
||||
char *p_tmp;
|
||||
char tmp;
|
||||
static char tmpbuffer[CMD_BUFLEN];
|
||||
int len = strlen(buffer);
|
||||
int len_tmp;
|
||||
@@ -251,12 +277,8 @@ static char *kdb_read(char *buffer, size_t bufsize)
|
||||
}
|
||||
*(--lastchar) = '\0';
|
||||
--cp;
|
||||
kdb_printf("\b%s \r", cp);
|
||||
tmp = *cp;
|
||||
*cp = '\0';
|
||||
kdb_printf(kdb_prompt_str);
|
||||
kdb_printf("%s", buffer);
|
||||
*cp = tmp;
|
||||
kdb_printf("\b%s ", cp);
|
||||
kdb_position_cursor(kdb_prompt_str, buffer, cp);
|
||||
}
|
||||
break;
|
||||
case 10: /* new line */
|
||||
@@ -278,19 +300,14 @@ static char *kdb_read(char *buffer, size_t bufsize)
|
||||
memcpy(tmpbuffer, cp+1, lastchar - cp - 1);
|
||||
memcpy(cp, tmpbuffer, lastchar - cp - 1);
|
||||
*(--lastchar) = '\0';
|
||||
kdb_printf("%s \r", cp);
|
||||
tmp = *cp;
|
||||
*cp = '\0';
|
||||
kdb_printf(kdb_prompt_str);
|
||||
kdb_printf("%s", buffer);
|
||||
*cp = tmp;
|
||||
kdb_printf("%s ", cp);
|
||||
kdb_position_cursor(kdb_prompt_str, buffer, cp);
|
||||
}
|
||||
break;
|
||||
case 1: /* Home */
|
||||
if (cp > buffer) {
|
||||
kdb_printf("\r");
|
||||
kdb_printf(kdb_prompt_str);
|
||||
cp = buffer;
|
||||
kdb_position_cursor(kdb_prompt_str, buffer, cp);
|
||||
}
|
||||
break;
|
||||
case 5: /* End */
|
||||
@@ -306,11 +323,10 @@ static char *kdb_read(char *buffer, size_t bufsize)
|
||||
}
|
||||
break;
|
||||
case 14: /* Down */
|
||||
memset(tmpbuffer, ' ',
|
||||
strlen(kdb_prompt_str) + (lastchar-buffer));
|
||||
*(tmpbuffer+strlen(kdb_prompt_str) +
|
||||
(lastchar-buffer)) = '\0';
|
||||
kdb_printf("\r%s\r", tmpbuffer);
|
||||
case 16: /* Up */
|
||||
kdb_printf("\r%*c\r",
|
||||
(int)(strlen(kdb_prompt_str) + (lastchar - buffer)),
|
||||
' ');
|
||||
*lastchar = (char)key;
|
||||
*(lastchar+1) = '\0';
|
||||
return lastchar;
|
||||
@@ -320,15 +336,6 @@ static char *kdb_read(char *buffer, size_t bufsize)
|
||||
++cp;
|
||||
}
|
||||
break;
|
||||
case 16: /* Up */
|
||||
memset(tmpbuffer, ' ',
|
||||
strlen(kdb_prompt_str) + (lastchar-buffer));
|
||||
*(tmpbuffer+strlen(kdb_prompt_str) +
|
||||
(lastchar-buffer)) = '\0';
|
||||
kdb_printf("\r%s\r", tmpbuffer);
|
||||
*lastchar = (char)key;
|
||||
*(lastchar+1) = '\0';
|
||||
return lastchar;
|
||||
case 9: /* Tab */
|
||||
if (tab < 2)
|
||||
++tab;
|
||||
@@ -372,15 +379,25 @@ static char *kdb_read(char *buffer, size_t bufsize)
|
||||
kdb_printf("\n");
|
||||
kdb_printf(kdb_prompt_str);
|
||||
kdb_printf("%s", buffer);
|
||||
if (cp != lastchar)
|
||||
kdb_position_cursor(kdb_prompt_str, buffer, cp);
|
||||
} else if (tab != 2 && count > 0) {
|
||||
len_tmp = strlen(p_tmp);
|
||||
strncpy(p_tmp+len_tmp, cp, lastchar-cp+1);
|
||||
len_tmp = strlen(p_tmp);
|
||||
strncpy(cp, p_tmp+len, len_tmp-len + 1);
|
||||
len = len_tmp - len;
|
||||
kdb_printf("%s", cp);
|
||||
cp += len;
|
||||
lastchar += len;
|
||||
/* How many new characters do we want from tmpbuffer? */
|
||||
len_tmp = strlen(p_tmp) - len;
|
||||
if (lastchar + len_tmp >= bufend)
|
||||
len_tmp = bufend - lastchar;
|
||||
|
||||
if (len_tmp) {
|
||||
/* + 1 ensures the '\0' is memmove'd */
|
||||
memmove(cp+len_tmp, cp, (lastchar-cp) + 1);
|
||||
memcpy(cp, p_tmp+len, len_tmp);
|
||||
kdb_printf("%s", cp);
|
||||
cp += len_tmp;
|
||||
lastchar += len_tmp;
|
||||
if (cp != lastchar)
|
||||
kdb_position_cursor(kdb_prompt_str,
|
||||
buffer, cp);
|
||||
}
|
||||
}
|
||||
kdb_nextline = 1; /* reset output line number */
|
||||
break;
|
||||
@@ -391,13 +408,9 @@ static char *kdb_read(char *buffer, size_t bufsize)
|
||||
memcpy(cp+1, tmpbuffer, lastchar - cp);
|
||||
*++lastchar = '\0';
|
||||
*cp = key;
|
||||
kdb_printf("%s\r", cp);
|
||||
kdb_printf("%s", cp);
|
||||
++cp;
|
||||
tmp = *cp;
|
||||
*cp = '\0';
|
||||
kdb_printf(kdb_prompt_str);
|
||||
kdb_printf("%s", buffer);
|
||||
*cp = tmp;
|
||||
kdb_position_cursor(kdb_prompt_str, buffer, cp);
|
||||
} else {
|
||||
*++lastchar = '\0';
|
||||
*cp++ = key;
|
||||
|
||||
@@ -4776,6 +4776,7 @@ static int __perf_event_release_kernel(struct perf_event *event)
|
||||
again:
|
||||
mutex_lock(&event->child_mutex);
|
||||
list_for_each_entry(child, &event->child_list, child_list) {
|
||||
void *var = NULL;
|
||||
|
||||
/*
|
||||
* Cannot change, child events are not migrated, see the
|
||||
@@ -4816,11 +4817,23 @@ static int __perf_event_release_kernel(struct perf_event *event)
|
||||
* this can't be the last reference.
|
||||
*/
|
||||
put_event(event);
|
||||
} else {
|
||||
var = &ctx->refcount;
|
||||
}
|
||||
|
||||
mutex_unlock(&event->child_mutex);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
put_ctx(ctx);
|
||||
|
||||
if (var) {
|
||||
/*
|
||||
* If perf_event_free_task() has deleted all events from the
|
||||
* ctx while the child_mutex got released above, make sure to
|
||||
* notify about the preceding put_ctx().
|
||||
*/
|
||||
smp_mb(); /* pairs with wait_var_event() */
|
||||
wake_up_var(var);
|
||||
}
|
||||
goto again;
|
||||
}
|
||||
mutex_unlock(&event->child_mutex);
|
||||
|
||||
@@ -499,6 +499,8 @@ void mm_update_next_owner(struct mm_struct *mm)
|
||||
* Search through everything else, we should not get here often.
|
||||
*/
|
||||
for_each_process(g) {
|
||||
if (atomic_read(&mm->mm_users) <= 1)
|
||||
break;
|
||||
if (g->flags & PF_KTHREAD)
|
||||
continue;
|
||||
for_each_thread(g, c) {
|
||||
|
||||
@@ -19,7 +19,9 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include "gcov.h"
|
||||
|
||||
#if (__GNUC__ >= 10)
|
||||
#if (__GNUC__ >= 14)
|
||||
#define GCOV_COUNTERS 9
|
||||
#elif (__GNUC__ >= 10)
|
||||
#define GCOV_COUNTERS 8
|
||||
#elif (__GNUC__ >= 7)
|
||||
#define GCOV_COUNTERS 9
|
||||
|
||||
@@ -70,6 +70,14 @@ static bool migrate_one_irq(struct irq_desc *desc)
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Complete an eventually pending irq move cleanup. If this
|
||||
* interrupt was moved in hard irq context, then the vectors need
|
||||
* to be cleaned up. It can't wait until this interrupt actually
|
||||
* happens and this CPU was involved.
|
||||
*/
|
||||
irq_force_complete_move(desc);
|
||||
|
||||
/*
|
||||
* No move required, if:
|
||||
* - Interrupt is per cpu
|
||||
@@ -88,14 +96,6 @@ static bool migrate_one_irq(struct irq_desc *desc)
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Complete an eventually pending irq move cleanup. If this
|
||||
* interrupt was moved in hard irq context, then the vectors need
|
||||
* to be cleaned up. It can't wait until this interrupt actually
|
||||
* happens and this CPU was involved.
|
||||
*/
|
||||
irq_force_complete_move(desc);
|
||||
|
||||
/*
|
||||
* If there is a setaffinity pending, then try to reuse the pending
|
||||
* mask, so the last change of the affinity does not get lost. If
|
||||
|
||||
@@ -245,6 +245,24 @@ STANDARD_PARAM_DEF(long, long, "%li", kstrtol);
|
||||
STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", kstrtoul);
|
||||
STANDARD_PARAM_DEF(ullong, unsigned long long, "%llu", kstrtoull);
|
||||
|
||||
int param_set_uint_minmax(const char *val, const struct kernel_param *kp,
|
||||
unsigned int min, unsigned int max)
|
||||
{
|
||||
unsigned int num;
|
||||
int ret;
|
||||
|
||||
if (!val)
|
||||
return -EINVAL;
|
||||
ret = kstrtouint(val, 0, &num);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (num < min || num > max)
|
||||
return -EINVAL;
|
||||
*((unsigned int *)kp->arg) = num;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(param_set_uint_minmax);
|
||||
|
||||
int param_set_charp(const char *val, const struct kernel_param *kp)
|
||||
{
|
||||
if (strlen(val) > 1024) {
|
||||
|
||||
@@ -1334,7 +1334,8 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp)
|
||||
preempt_disable();
|
||||
pipe_count = p->rtort_pipe_count;
|
||||
if (pipe_count > RCU_TORTURE_PIPE_LEN) {
|
||||
/* Should not happen, but... */
|
||||
// Should not happen in a correct RCU implementation,
|
||||
// happens quite often for torture_type=busted.
|
||||
pipe_count = RCU_TORTURE_PIPE_LEN;
|
||||
}
|
||||
completed = cur_ops->get_gp_seq();
|
||||
|
||||
@@ -1195,16 +1195,13 @@ static void set_domain_attribute(struct sched_domain *sd,
|
||||
if (!attr || attr->relax_domain_level < 0) {
|
||||
if (default_relax_domain_level < 0)
|
||||
return;
|
||||
else
|
||||
request = default_relax_domain_level;
|
||||
request = default_relax_domain_level;
|
||||
} else
|
||||
request = attr->relax_domain_level;
|
||||
if (request < sd->level) {
|
||||
|
||||
if (sd->level >= request) {
|
||||
/* Turn off idle balance on this domain: */
|
||||
sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
|
||||
} else {
|
||||
/* Turn on idle balance on this domain: */
|
||||
sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -69,4 +69,5 @@ static void __exit preemptirq_delay_exit(void)
|
||||
|
||||
module_init(preemptirq_delay_init)
|
||||
module_exit(preemptirq_delay_exit)
|
||||
MODULE_DESCRIPTION("Preempt / IRQ disable delay thread to test latency tracers");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
||||
@@ -1131,6 +1131,11 @@ static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
*
|
||||
* As a safety measure we check to make sure the data pages have not
|
||||
* been corrupted.
|
||||
*
|
||||
* Callers of this function need to guarantee that the list of pages doesn't get
|
||||
* modified during the check. In particular, if it's possible that the function
|
||||
* is invoked with concurrent readers which can swap in a new reader page then
|
||||
* the caller should take cpu_buffer->reader_lock.
|
||||
*/
|
||||
static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
{
|
||||
@@ -1846,8 +1851,12 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
|
||||
*/
|
||||
synchronize_sched();
|
||||
for_each_buffer_cpu(buffer, cpu) {
|
||||
unsigned long flags;
|
||||
|
||||
cpu_buffer = buffer->buffers[cpu];
|
||||
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
||||
rb_check_pages(cpu_buffer);
|
||||
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
||||
}
|
||||
atomic_dec(&buffer->record_disabled);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user