Reapply "perf: Disallow mis-matched inherited group reads"
This reverts commit 9c20bfd64d.
It's needed back in the tree as it fixes a real issue, and it turns out
to nicely fit into an unused slot within the structure, so there's no
ABI breakage.
Update the .xml file with the new structure field:
Leaf changes summary: 1 artifact changed
Changed leaf types summary: 1 leaf type changed
Removed/Changed/Added functions summary: 0 Removed, 0 Changed, 0 Added function
Removed/Changed/Added variables summary: 0 Removed, 0 Changed, 0 Added variable
'struct perf_event at perf_event.h:564:1' changed:
type size hasn't changed
1 data member insertion:
'unsigned int group_generation', at offset 1120 (in bits) at perf_event.h:601:1
1242 impacted interfaces
Bug: 307236803
Change-Id: I921c19d115869d8b0517d44d271d56d557da4167
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
File diff suppressed because it is too large
Load Diff
@@ -597,6 +597,9 @@ struct perf_event {
|
|||||||
/* The cumulative AND of all event_caps for events in this group. */
|
/* The cumulative AND of all event_caps for events in this group. */
|
||||||
int group_caps;
|
int group_caps;
|
||||||
|
|
||||||
|
#ifndef __GENKSYMS__ /* ANDROID Bug: 307236803 to keep the crc preserved */
|
||||||
|
unsigned int group_generation;
|
||||||
|
#endif
|
||||||
struct perf_event *group_leader;
|
struct perf_event *group_leader;
|
||||||
struct pmu *pmu;
|
struct pmu *pmu;
|
||||||
void *pmu_private;
|
void *pmu_private;
|
||||||
|
|||||||
@@ -1848,6 +1848,7 @@ static void perf_group_attach(struct perf_event *event)
|
|||||||
|
|
||||||
list_add_tail(&event->sibling_list, &group_leader->sibling_list);
|
list_add_tail(&event->sibling_list, &group_leader->sibling_list);
|
||||||
group_leader->nr_siblings++;
|
group_leader->nr_siblings++;
|
||||||
|
group_leader->group_generation++;
|
||||||
|
|
||||||
perf_event__header_size(group_leader);
|
perf_event__header_size(group_leader);
|
||||||
|
|
||||||
@@ -1918,6 +1919,7 @@ static void perf_group_detach(struct perf_event *event)
|
|||||||
if (event->group_leader != event) {
|
if (event->group_leader != event) {
|
||||||
list_del_init(&event->sibling_list);
|
list_del_init(&event->sibling_list);
|
||||||
event->group_leader->nr_siblings--;
|
event->group_leader->nr_siblings--;
|
||||||
|
event->group_leader->group_generation++;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -4736,7 +4738,7 @@ static int __perf_read_group_add(struct perf_event *leader,
|
|||||||
u64 read_format, u64 *values)
|
u64 read_format, u64 *values)
|
||||||
{
|
{
|
||||||
struct perf_event_context *ctx = leader->ctx;
|
struct perf_event_context *ctx = leader->ctx;
|
||||||
struct perf_event *sub;
|
struct perf_event *sub, *parent;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int n = 1; /* skip @nr */
|
int n = 1; /* skip @nr */
|
||||||
int ret;
|
int ret;
|
||||||
@@ -4746,6 +4748,33 @@ static int __perf_read_group_add(struct perf_event *leader,
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&ctx->lock, flags);
|
raw_spin_lock_irqsave(&ctx->lock, flags);
|
||||||
|
/*
|
||||||
|
* Verify the grouping between the parent and child (inherited)
|
||||||
|
* events is still in tact.
|
||||||
|
*
|
||||||
|
* Specifically:
|
||||||
|
* - leader->ctx->lock pins leader->sibling_list
|
||||||
|
* - parent->child_mutex pins parent->child_list
|
||||||
|
* - parent->ctx->mutex pins parent->sibling_list
|
||||||
|
*
|
||||||
|
* Because parent->ctx != leader->ctx (and child_list nests inside
|
||||||
|
* ctx->mutex), group destruction is not atomic between children, also
|
||||||
|
* see perf_event_release_kernel(). Additionally, parent can grow the
|
||||||
|
* group.
|
||||||
|
*
|
||||||
|
* Therefore it is possible to have parent and child groups in a
|
||||||
|
* different configuration and summing over such a beast makes no sense
|
||||||
|
* what so ever.
|
||||||
|
*
|
||||||
|
* Reject this.
|
||||||
|
*/
|
||||||
|
parent = leader->parent;
|
||||||
|
if (parent &&
|
||||||
|
(parent->group_generation != leader->group_generation ||
|
||||||
|
parent->nr_siblings != leader->nr_siblings)) {
|
||||||
|
ret = -ECHILD;
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Since we co-schedule groups, {enabled,running} times of siblings
|
* Since we co-schedule groups, {enabled,running} times of siblings
|
||||||
@@ -4775,8 +4804,9 @@ static int __perf_read_group_add(struct perf_event *leader,
|
|||||||
values[n++] = primary_event_id(sub);
|
values[n++] = primary_event_id(sub);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unlock:
|
||||||
raw_spin_unlock_irqrestore(&ctx->lock, flags);
|
raw_spin_unlock_irqrestore(&ctx->lock, flags);
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int perf_read_group(struct perf_event *event,
|
static int perf_read_group(struct perf_event *event,
|
||||||
@@ -4795,10 +4825,6 @@ static int perf_read_group(struct perf_event *event,
|
|||||||
|
|
||||||
values[0] = 1 + leader->nr_siblings;
|
values[0] = 1 + leader->nr_siblings;
|
||||||
|
|
||||||
/*
|
|
||||||
* By locking the child_mutex of the leader we effectively
|
|
||||||
* lock the child list of all siblings.. XXX explain how.
|
|
||||||
*/
|
|
||||||
mutex_lock(&leader->child_mutex);
|
mutex_lock(&leader->child_mutex);
|
||||||
|
|
||||||
ret = __perf_read_group_add(leader, read_format, values);
|
ret = __perf_read_group_add(leader, read_format, values);
|
||||||
@@ -11587,6 +11613,7 @@ static int inherit_group(struct perf_event *parent_event,
|
|||||||
if (IS_ERR(child_ctr))
|
if (IS_ERR(child_ctr))
|
||||||
return PTR_ERR(child_ctr);
|
return PTR_ERR(child_ctr);
|
||||||
}
|
}
|
||||||
|
leader->group_generation = parent_event->group_generation;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user