Merge branch 'lineage-20' of https://github.com/LineageOS/android_kernel_qcom_sm8250 into lineage-22.1
Change-Id: Ib5c234d95e3f41cfa458ac48d666fbab920b0b6e
This commit is contained in:
@@ -71,6 +71,9 @@ int array_map_alloc_check(union bpf_attr *attr)
|
||||
* access the elements.
|
||||
*/
|
||||
return -E2BIG;
|
||||
/* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */
|
||||
if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE)
|
||||
return -E2BIG;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -291,6 +291,9 @@ static int htab_map_alloc_check(union bpf_attr *attr)
|
||||
* kmalloc-able later in htab_map_update_elem()
|
||||
*/
|
||||
return -E2BIG;
|
||||
/* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */
|
||||
if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE)
|
||||
return -E2BIG;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -629,7 +629,7 @@ static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key)
|
||||
if (!key || key->prefixlen > trie->max_prefixlen)
|
||||
goto find_leftmost;
|
||||
|
||||
node_stack = kmalloc_array(trie->max_prefixlen,
|
||||
node_stack = kmalloc_array(trie->max_prefixlen + 1,
|
||||
sizeof(struct lpm_trie_node *),
|
||||
GFP_ATOMIC | __GFP_NOWARN);
|
||||
if (!node_stack)
|
||||
|
||||
@@ -6446,7 +6446,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
|
||||
/* 'struct bpf_verifier_env' can be global, but since it's not small,
|
||||
* allocate/free it every time bpf_check() is called
|
||||
*/
|
||||
env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
|
||||
env = kvzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
|
||||
if (!env)
|
||||
return -ENOMEM;
|
||||
log = &env->log;
|
||||
@@ -6573,6 +6573,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
|
||||
mutex_unlock(&bpf_verifier_lock);
|
||||
vfree(env->insn_aux_data);
|
||||
err_free_env:
|
||||
kfree(env);
|
||||
kvfree(env);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1762,9 +1762,9 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
|
||||
RCU_INIT_POINTER(scgrp->subsys[ssid], NULL);
|
||||
rcu_assign_pointer(dcgrp->subsys[ssid], css);
|
||||
ss->root = dst_root;
|
||||
css->cgroup = dcgrp;
|
||||
|
||||
spin_lock_irq(&css_set_lock);
|
||||
css->cgroup = dcgrp;
|
||||
WARN_ON(!list_empty(&dcgrp->e_csets[ss->id]));
|
||||
list_for_each_entry_safe(cset, cset_pos, &scgrp->e_csets[ss->id],
|
||||
e_cset_node[ss->id]) {
|
||||
@@ -5370,7 +5370,7 @@ static bool cgroup_check_hierarchy_limits(struct cgroup *parent)
|
||||
{
|
||||
struct cgroup *cgroup;
|
||||
int ret = false;
|
||||
int level = 1;
|
||||
int level = 0;
|
||||
|
||||
lockdep_assert_held(&cgroup_mutex);
|
||||
|
||||
@@ -5378,7 +5378,7 @@ static bool cgroup_check_hierarchy_limits(struct cgroup *parent)
|
||||
if (cgroup->nr_descendants >= cgroup->max_descendants)
|
||||
goto fail;
|
||||
|
||||
if (level > cgroup->max_depth)
|
||||
if (level >= cgroup->max_depth)
|
||||
goto fail;
|
||||
|
||||
level++;
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
* distribution for more details.
|
||||
*/
|
||||
|
||||
#include "cgroup-internal.h"
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/cpuset.h>
|
||||
@@ -2791,10 +2792,14 @@ int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
|
||||
if (!buf)
|
||||
goto out;
|
||||
|
||||
css = task_get_css(tsk, cpuset_cgrp_id);
|
||||
retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX,
|
||||
current->nsproxy->cgroup_ns);
|
||||
css_put(css);
|
||||
rcu_read_lock();
|
||||
spin_lock_irq(&css_set_lock);
|
||||
css = task_css(tsk, cpuset_cgrp_id);
|
||||
retval = cgroup_path_ns_locked(css->cgroup, buf, PATH_MAX,
|
||||
current->nsproxy->cgroup_ns);
|
||||
spin_unlock_irq(&css_set_lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (retval >= PATH_MAX)
|
||||
retval = -ENAMETOOLONG;
|
||||
if (retval < 0)
|
||||
|
||||
@@ -192,7 +192,7 @@ static int kdb_read_get_key(char *buffer, size_t bufsize)
|
||||
*/
|
||||
static void kdb_position_cursor(char *prompt, char *buffer, char *cp)
|
||||
{
|
||||
kdb_printf("\r%s", kdb_prompt_str);
|
||||
kdb_printf("\r%s", prompt);
|
||||
if (cp > buffer)
|
||||
kdb_printf("%.*s", (int)(cp - buffer), buffer);
|
||||
}
|
||||
@@ -377,7 +377,7 @@ static char *kdb_read(char *buffer, size_t bufsize)
|
||||
if (i >= dtab_count)
|
||||
kdb_printf("...");
|
||||
kdb_printf("\n");
|
||||
kdb_printf(kdb_prompt_str);
|
||||
kdb_printf("%s", kdb_prompt_str);
|
||||
kdb_printf("%s", buffer);
|
||||
if (cp != lastchar)
|
||||
kdb_position_cursor(kdb_prompt_str, buffer, cp);
|
||||
@@ -468,8 +468,8 @@ static char *kdb_read(char *buffer, size_t bufsize)
|
||||
char *kdb_getstr(char *buffer, size_t bufsize, const char *prompt)
|
||||
{
|
||||
if (prompt && kdb_prompt_str != prompt)
|
||||
strncpy(kdb_prompt_str, prompt, CMD_BUFLEN);
|
||||
kdb_printf(kdb_prompt_str);
|
||||
strscpy(kdb_prompt_str, prompt, CMD_BUFLEN);
|
||||
kdb_printf("%s", kdb_prompt_str);
|
||||
kdb_nextline = 1; /* Prompt and input resets line number */
|
||||
return kdb_read(buffer, bufsize);
|
||||
}
|
||||
|
||||
@@ -97,8 +97,8 @@ void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
|
||||
{
|
||||
struct dma_devres match_data = { size, vaddr, dma_handle };
|
||||
|
||||
dma_free_coherent(dev, size, vaddr, dma_handle);
|
||||
WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
|
||||
dma_free_coherent(dev, size, vaddr, dma_handle);
|
||||
}
|
||||
EXPORT_SYMBOL(dmam_free_coherent);
|
||||
|
||||
|
||||
@@ -3677,7 +3677,11 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bo
|
||||
period = perf_calculate_period(event, nsec, count);
|
||||
|
||||
delta = (s64)(period - hwc->sample_period);
|
||||
delta = (delta + 7) / 8; /* low pass filter */
|
||||
if (delta >= 0)
|
||||
delta += 7;
|
||||
else
|
||||
delta -= 7;
|
||||
delta /= 8; /* low pass filter */
|
||||
|
||||
sample_period = hwc->sample_period + delta;
|
||||
|
||||
@@ -5872,6 +5876,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
return -EINVAL;
|
||||
|
||||
nr_pages = vma_size / PAGE_SIZE;
|
||||
if (nr_pages > INT_MAX)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&event->mmap_mutex);
|
||||
ret = -EINVAL;
|
||||
|
||||
@@ -121,7 +121,7 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
|
||||
|
||||
static inline unsigned long perf_aux_size(struct ring_buffer *rb)
|
||||
{
|
||||
return rb->aux_nr_pages << PAGE_SHIFT;
|
||||
return (unsigned long)rb->aux_nr_pages << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
#define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...) \
|
||||
|
||||
@@ -1187,7 +1187,7 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
|
||||
uprobe_opcode_t insn = UPROBE_SWBP_INSN;
|
||||
struct xol_area *area;
|
||||
|
||||
area = kmalloc(sizeof(*area), GFP_KERNEL);
|
||||
area = kzalloc(sizeof(*area), GFP_KERNEL);
|
||||
if (unlikely(!area))
|
||||
goto out;
|
||||
|
||||
@@ -1197,9 +1197,8 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
|
||||
goto free_area;
|
||||
|
||||
area->xol_mapping.name = "[uprobes]";
|
||||
area->xol_mapping.fault = NULL;
|
||||
area->xol_mapping.pages = area->pages;
|
||||
area->pages[0] = alloc_page(GFP_HIGHUSER);
|
||||
area->pages[0] = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
|
||||
if (!area->pages[0])
|
||||
goto free_bitmap;
|
||||
area->pages[1] = NULL;
|
||||
|
||||
@@ -697,10 +697,25 @@ int kthread_worker_fn(void *worker_ptr)
|
||||
spin_unlock_irq(&worker->lock);
|
||||
|
||||
if (work) {
|
||||
kthread_work_func_t func = work->func;
|
||||
__set_current_state(TASK_RUNNING);
|
||||
trace_sched_kthread_work_execute_start(work);
|
||||
work->func(work);
|
||||
} else if (!freezing(current))
|
||||
/*
|
||||
* Avoid dereferencing work after this point. The trace
|
||||
* event only cares about the address.
|
||||
*/
|
||||
trace_sched_kthread_work_execute_end(work, func);
|
||||
} else if (!freezing(current)) {
|
||||
schedule();
|
||||
} else {
|
||||
/*
|
||||
* Handle the case where the current remains
|
||||
* TASK_INTERRUPTIBLE. try_to_freeze() expects
|
||||
* the current to be TASK_RUNNING.
|
||||
*/
|
||||
__set_current_state(TASK_RUNNING);
|
||||
}
|
||||
|
||||
try_to_freeze();
|
||||
cond_resched();
|
||||
@@ -827,6 +842,8 @@ static void kthread_insert_work(struct kthread_worker *worker,
|
||||
{
|
||||
kthread_insert_work_sanity_check(worker, work);
|
||||
|
||||
trace_sched_kthread_work_queue_work(worker, work);
|
||||
|
||||
list_add_tail(&work->node, pos);
|
||||
work->worker = worker;
|
||||
if (!worker->current_work && likely(worker->task))
|
||||
|
||||
@@ -1205,6 +1205,7 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
||||
}
|
||||
|
||||
static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
|
||||
struct rt_mutex *lock,
|
||||
struct rt_mutex_waiter *w)
|
||||
{
|
||||
/*
|
||||
@@ -1214,6 +1215,7 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
|
||||
if (res != -EDEADLOCK || detect_deadlock)
|
||||
return;
|
||||
|
||||
raw_spin_unlock_irq(&lock->wait_lock);
|
||||
/*
|
||||
* Yell lowdly and stop the task right here.
|
||||
*/
|
||||
@@ -1269,7 +1271,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
||||
if (unlikely(ret)) {
|
||||
__set_current_state(TASK_RUNNING);
|
||||
remove_waiter(lock, &waiter);
|
||||
rt_mutex_handle_deadlock(ret, chwalk, &waiter);
|
||||
rt_mutex_handle_deadlock(ret, chwalk, lock, &waiter);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -1752,10 +1752,11 @@ struct sigqueue *sigqueue_alloc(void)
|
||||
|
||||
void sigqueue_free(struct sigqueue *q)
|
||||
{
|
||||
unsigned long flags;
|
||||
spinlock_t *lock = ¤t->sighand->siglock;
|
||||
unsigned long flags;
|
||||
|
||||
BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
|
||||
if (WARN_ON_ONCE(!(q->flags & SIGQUEUE_PREALLOC)))
|
||||
return;
|
||||
/*
|
||||
* We must hold ->siglock while testing q->list
|
||||
* to serialize with collect_signal() or with
|
||||
@@ -1783,7 +1784,10 @@ int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
|
||||
unsigned long flags;
|
||||
int ret, result;
|
||||
|
||||
BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
|
||||
if (WARN_ON_ONCE(!(q->flags & SIGQUEUE_PREALLOC)))
|
||||
return 0;
|
||||
if (WARN_ON_ONCE(q->info.si_code != SI_TIMER))
|
||||
return 0;
|
||||
|
||||
ret = -1;
|
||||
rcu_read_lock();
|
||||
@@ -1802,7 +1806,6 @@ int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
|
||||
* If an SI_TIMER entry is already queue just increment
|
||||
* the overrun count.
|
||||
*/
|
||||
BUG_ON(q->info.si_code != SI_TIMER);
|
||||
q->info.si_overrun++;
|
||||
result = TRACE_SIGNAL_ALREADY_PENDING;
|
||||
goto out;
|
||||
|
||||
@@ -824,6 +824,7 @@ int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
|
||||
|
||||
queue_work_on(cpu, system_wq, &sscs.work);
|
||||
wait_for_completion(&sscs.done);
|
||||
destroy_work_on_stack(&sscs.work);
|
||||
|
||||
return sscs.ret;
|
||||
}
|
||||
|
||||
@@ -1188,6 +1188,8 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
|
||||
struct hrtimer_clock_base *base;
|
||||
unsigned long flags;
|
||||
|
||||
if (WARN_ON_ONCE(!timer->function))
|
||||
return;
|
||||
/*
|
||||
* Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft
|
||||
* match.
|
||||
|
||||
@@ -686,17 +686,16 @@ static inline void process_adjtimex_modes(const struct timex *txc, s32 *time_tai
|
||||
}
|
||||
|
||||
if (txc->modes & ADJ_MAXERROR)
|
||||
time_maxerror = txc->maxerror;
|
||||
time_maxerror = clamp(txc->maxerror, (__kernel_long_t)0, (__kernel_long_t)NTP_PHASE_LIMIT);
|
||||
|
||||
if (txc->modes & ADJ_ESTERROR)
|
||||
time_esterror = txc->esterror;
|
||||
time_esterror = clamp(txc->esterror, (__kernel_long_t)0, (__kernel_long_t)NTP_PHASE_LIMIT);
|
||||
|
||||
if (txc->modes & ADJ_TIMECONST) {
|
||||
time_constant = txc->constant;
|
||||
time_constant = clamp(txc->constant, (__kernel_long_t)0, (__kernel_long_t)MAXTC);
|
||||
if (!(time_status & STA_NANO))
|
||||
time_constant += 4;
|
||||
time_constant = min(time_constant, (long)MAXTC);
|
||||
time_constant = max(time_constant, 0l);
|
||||
time_constant = clamp(time_constant, (long)0, (long)MAXTC);
|
||||
}
|
||||
|
||||
if (txc->modes & ADJ_TAI &&
|
||||
|
||||
@@ -303,6 +303,9 @@ static int pc_clock_settime(clockid_t id, const struct timespec64 *ts)
|
||||
struct posix_clock_desc cd;
|
||||
int err;
|
||||
|
||||
if (!timespec64_valid_strict(ts))
|
||||
return -EINVAL;
|
||||
|
||||
err = get_clock_desc(id, &cd);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -948,6 +948,30 @@ void hotplug_cpu__broadcast_tick_pull(int deadcpu)
|
||||
bc = tick_broadcast_device.evtdev;
|
||||
|
||||
if (bc && broadcast_needs_cpu(bc, deadcpu)) {
|
||||
/*
|
||||
* If the broadcast force bit of the current CPU is set,
|
||||
* then the current CPU has not yet reprogrammed the local
|
||||
* timer device to avoid a ping-pong race. See
|
||||
* ___tick_broadcast_oneshot_control().
|
||||
*
|
||||
* If the broadcast device is hrtimer based then
|
||||
* programming the broadcast event below does not have any
|
||||
* effect because the local clockevent device is not
|
||||
* running and not programmed because the broadcast event
|
||||
* is not earlier than the pending event of the local clock
|
||||
* event device. As a consequence all CPUs waiting for a
|
||||
* broadcast event are stuck forever.
|
||||
*
|
||||
* Detect this condition and reprogram the cpu local timer
|
||||
* device to avoid the starvation.
|
||||
*/
|
||||
if (tick_check_broadcast_expired()) {
|
||||
struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
|
||||
|
||||
cpumask_clear_cpu(smp_processor_id(), tick_broadcast_force_mask);
|
||||
tick_program_event(td->evtdev->next_event, 1);
|
||||
}
|
||||
|
||||
/* This moves the broadcast assignment to this CPU: */
|
||||
clockevents_program_event(bc, bc->next_event, 1);
|
||||
}
|
||||
|
||||
@@ -4379,35 +4379,24 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter)
|
||||
EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
|
||||
|
||||
/**
|
||||
* ring_buffer_read - read the next item in the ring buffer by the iterator
|
||||
* ring_buffer_iter_advance - advance the iterator to the next location
|
||||
* @iter: The ring buffer iterator
|
||||
* @ts: The time stamp of the event read.
|
||||
*
|
||||
* This reads the next event in the ring buffer and increments the iterator.
|
||||
* Move the location of the iterator such that the next read will
|
||||
* be the next location of the iterator.
|
||||
*/
|
||||
struct ring_buffer_event *
|
||||
ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
|
||||
void ring_buffer_iter_advance(struct ring_buffer_iter *iter)
|
||||
{
|
||||
struct ring_buffer_event *event;
|
||||
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
||||
again:
|
||||
event = rb_iter_peek(iter, ts);
|
||||
if (!event)
|
||||
goto out;
|
||||
|
||||
if (event->type_len == RINGBUF_TYPE_PADDING)
|
||||
goto again;
|
||||
|
||||
rb_advance_iter(iter);
|
||||
out:
|
||||
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
||||
|
||||
return event;
|
||||
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ring_buffer_read);
|
||||
EXPORT_SYMBOL_GPL(ring_buffer_iter_advance);
|
||||
|
||||
/**
|
||||
* ring_buffer_size - return the size of the ring buffer (in bytes)
|
||||
|
||||
@@ -3096,7 +3096,7 @@ static void trace_iterator_increment(struct trace_iterator *iter)
|
||||
|
||||
iter->idx++;
|
||||
if (buf_iter)
|
||||
ring_buffer_read(buf_iter, NULL);
|
||||
ring_buffer_iter_advance(buf_iter);
|
||||
}
|
||||
|
||||
static struct trace_entry *
|
||||
@@ -3256,7 +3256,9 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
|
||||
if (ts >= iter->trace_buffer->time_start)
|
||||
break;
|
||||
entries++;
|
||||
ring_buffer_read(buf_iter, NULL);
|
||||
ring_buffer_iter_advance(buf_iter);
|
||||
/* This could be a big loop */
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
|
||||
|
||||
@@ -726,7 +726,7 @@ get_return_for_leaf(struct trace_iterator *iter,
|
||||
|
||||
/* this is a leaf, now advance the iterator */
|
||||
if (ring_iter)
|
||||
ring_buffer_read(ring_iter, NULL);
|
||||
ring_buffer_iter_advance(ring_iter);
|
||||
|
||||
return next;
|
||||
}
|
||||
|
||||
@@ -1320,12 +1320,11 @@ static enum print_line_t trace_print_print(struct trace_iterator *iter,
|
||||
{
|
||||
struct print_entry *field;
|
||||
struct trace_seq *s = &iter->seq;
|
||||
int max = iter->ent_size - offsetof(struct print_entry, buf);
|
||||
|
||||
trace_assign_type(field, iter->ent);
|
||||
|
||||
seq_print_ip_sym(s, field->ip, flags);
|
||||
trace_seq_printf(s, ": %.*s", max, field->buf);
|
||||
trace_seq_printf(s, ": %s", field->buf);
|
||||
|
||||
return trace_handle_return(s);
|
||||
}
|
||||
@@ -1334,11 +1333,10 @@ static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags,
|
||||
struct trace_event *event)
|
||||
{
|
||||
struct print_entry *field;
|
||||
int max = iter->ent_size - offsetof(struct print_entry, buf);
|
||||
|
||||
trace_assign_type(field, iter->ent);
|
||||
|
||||
trace_seq_printf(&iter->seq, "# %lx %.*s", field->ip, max, field->buf);
|
||||
trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf);
|
||||
|
||||
return trace_handle_return(&iter->seq);
|
||||
}
|
||||
|
||||
@@ -454,7 +454,7 @@ static struct tracing_map_elt *get_free_elt(struct tracing_map *map)
|
||||
struct tracing_map_elt *elt = NULL;
|
||||
int idx;
|
||||
|
||||
idx = atomic_inc_return(&map->next_elt);
|
||||
idx = atomic_fetch_add_unless(&map->next_elt, 1, map->max_elts);
|
||||
if (idx < map->max_elts) {
|
||||
elt = *(TRACING_MAP_ELT(map->elts, idx));
|
||||
if (map->ops && map->ops->elt_init)
|
||||
@@ -699,7 +699,7 @@ void tracing_map_clear(struct tracing_map *map)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
atomic_set(&map->next_elt, -1);
|
||||
atomic_set(&map->next_elt, 0);
|
||||
atomic64_set(&map->hits, 0);
|
||||
atomic64_set(&map->drops, 0);
|
||||
|
||||
@@ -783,7 +783,7 @@ struct tracing_map *tracing_map_create(unsigned int map_bits,
|
||||
|
||||
map->map_bits = map_bits;
|
||||
map->max_elts = (1 << map_bits);
|
||||
atomic_set(&map->next_elt, -1);
|
||||
atomic_set(&map->next_elt, 0);
|
||||
|
||||
map->map_size = (1 << (map_bits + 1));
|
||||
map->ops = ops;
|
||||
|
||||
@@ -91,11 +91,15 @@ static bool watchdog_check_timestamp(void)
|
||||
__this_cpu_write(last_timestamp, now);
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
static inline bool watchdog_check_timestamp(void)
|
||||
|
||||
static void watchdog_init_timestamp(void)
|
||||
{
|
||||
return true;
|
||||
__this_cpu_write(nmi_rearmed, 0);
|
||||
__this_cpu_write(last_timestamp, ktime_get_mono_fast_ns());
|
||||
}
|
||||
#else
|
||||
static inline bool watchdog_check_timestamp(void) { return true; }
|
||||
static inline void watchdog_init_timestamp(void) { }
|
||||
#endif
|
||||
|
||||
static struct perf_event_attr wd_hw_attr = {
|
||||
@@ -195,6 +199,7 @@ void hardlockup_detector_perf_enable(void)
|
||||
if (!atomic_fetch_inc(&watchdog_cpus))
|
||||
pr_info("Enabled. Permanently consumes one hw-PMU counter.\n");
|
||||
|
||||
watchdog_init_timestamp();
|
||||
perf_event_enable(this_cpu_read(watchdog_ev));
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user