ANDROID: GKI: QoS: Prevent usage of dev_pm_qos_request as pm_qos_request

pm_qos_set_value_for_cpus expects the list in pm_qos_constraints to
contain structs of type pm_qos_request. However requests from device
drivers will populate the list with dev_pm_qos_request structs.
pm_qos_set_value_for_cpus updates target_per_cpu array and since there
is no way to access target_per_cpu for device driver requests, we can
skip updating target_per_cpu for such requests. This will prevent
current issue when pm_qos_set_value_for_cpus accesses dev_pm_qos_request
structs as it they were pm_qos_request structs.

Fixes: 723feab600 ("ANDROID: GKI: QoS: Enhance framework to support cpu/irq specific QoS requests")
Bug: 183959482
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Change-Id: Iaa8d349b3c1f9cd8357b2e7912b16aadef78165f
This commit is contained in:
Suren Baghdasaryan
2021-04-26 13:18:46 -07:00
parent 3881a45951
commit 3092012197
3 changed files with 31 additions and 13 deletions

View File

@@ -143,11 +143,13 @@ static int apply_constraint(struct dev_pm_qos_request *req,
value = 0; value = 0;
ret = pm_qos_update_target(&qos->resume_latency, ret = pm_qos_update_target(&qos->resume_latency,
&req->data.pnode, action, value); &req->data.pnode, action, value,
true);
break; break;
case DEV_PM_QOS_LATENCY_TOLERANCE: case DEV_PM_QOS_LATENCY_TOLERANCE:
ret = pm_qos_update_target(&qos->latency_tolerance, ret = pm_qos_update_target(&qos->latency_tolerance,
&req->data.pnode, action, value); &req->data.pnode, action, value,
true);
if (ret) { if (ret) {
value = pm_qos_read_value(&qos->latency_tolerance); value = pm_qos_read_value(&qos->latency_tolerance);
req->dev->power.set_latency_tolerance(req->dev, value); req->dev->power.set_latency_tolerance(req->dev, value);

View File

@@ -136,7 +136,8 @@ static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
} }
int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
enum pm_qos_req_action action, int value); enum pm_qos_req_action action, int value,
bool dev_req);
bool pm_qos_update_flags(struct pm_qos_flags *pqf, bool pm_qos_update_flags(struct pm_qos_flags *pqf,
struct pm_qos_flags_request *req, struct pm_qos_flags_request *req,
enum pm_qos_req_action action, s32 val); enum pm_qos_req_action action, s32 val);

View File

@@ -265,12 +265,26 @@ static const struct file_operations pm_qos_debug_fops = {
.release = single_release, .release = single_release,
}; };
static inline void pm_qos_set_value_for_cpus(struct pm_qos_constraints *c) static inline void pm_qos_set_value_for_cpus(struct pm_qos_constraints *c,
bool dev_req)
{ {
struct pm_qos_request *req = NULL; struct pm_qos_request *req = NULL;
int cpu; int cpu;
s32 qos_val[NR_CPUS] = { [0 ... (NR_CPUS - 1)] = c->default_value }; s32 qos_val[NR_CPUS] = { [0 ... (NR_CPUS - 1)] = c->default_value };
/*
* pm_qos_set_value_for_cpus expects all c->list elements to be of type
* pm_qos_request, however requests from device will contain elements
* of type dev_pm_qos_request.
* pm_qos_constraints.target_per_cpu can be accessed only for
* constraints associated with one of the pm_qos_class and present in
* pm_qos_array. Device requests are not associated with any of
* pm_qos_class, therefore their target_per_cpu cannot be accessed. We
* can safely skip updating target_per_cpu for device requests.
*/
if (dev_req)
return;
plist_for_each_entry(req, &c->list, node) { plist_for_each_entry(req, &c->list, node) {
for_each_cpu(cpu, &req->cpus_affine) { for_each_cpu(cpu, &req->cpus_affine) {
switch (c->type) { switch (c->type) {
@@ -304,7 +318,7 @@ static inline void pm_qos_set_value_for_cpus(struct pm_qos_constraints *c)
* otherwise. * otherwise.
*/ */
int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
enum pm_qos_req_action action, int value) enum pm_qos_req_action action, int value, bool dev_req)
{ {
unsigned long flags; unsigned long flags;
int prev_value, curr_value, new_value; int prev_value, curr_value, new_value;
@@ -340,7 +354,7 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
curr_value = pm_qos_get_value(c); curr_value = pm_qos_get_value(c);
pm_qos_set_value(c, curr_value); pm_qos_set_value(c, curr_value);
pm_qos_set_value_for_cpus(c); pm_qos_set_value_for_cpus(c, dev_req);
spin_unlock_irqrestore(&pm_qos_lock, flags); spin_unlock_irqrestore(&pm_qos_lock, flags);
@@ -485,7 +499,7 @@ static void __pm_qos_update_request(struct pm_qos_request *req,
if (new_value != req->node.prio) if (new_value != req->node.prio)
pm_qos_update_target( pm_qos_update_target(
pm_qos_array[req->pm_qos_class]->constraints, pm_qos_array[req->pm_qos_class]->constraints,
&req->node, PM_QOS_UPDATE_REQ, new_value); &req->node, PM_QOS_UPDATE_REQ, new_value, false);
} }
/** /**
@@ -519,7 +533,7 @@ static void pm_qos_irq_release(struct kref *ref)
spin_unlock_irqrestore(&pm_qos_lock, flags); spin_unlock_irqrestore(&pm_qos_lock, flags);
pm_qos_update_target(c, &req->node, PM_QOS_UPDATE_REQ, pm_qos_update_target(c, &req->node, PM_QOS_UPDATE_REQ,
c->default_value); c->default_value, false);
} }
static void pm_qos_irq_notify(struct irq_affinity_notify *notify, static void pm_qos_irq_notify(struct irq_affinity_notify *notify,
@@ -535,7 +549,8 @@ static void pm_qos_irq_notify(struct irq_affinity_notify *notify,
cpumask_copy(&req->cpus_affine, mask); cpumask_copy(&req->cpus_affine, mask);
spin_unlock_irqrestore(&pm_qos_lock, flags); spin_unlock_irqrestore(&pm_qos_lock, flags);
pm_qos_update_target(c, &req->node, PM_QOS_UPDATE_REQ, req->node.prio); pm_qos_update_target(c, &req->node, PM_QOS_UPDATE_REQ, req->node.prio,
false);
} }
#endif #endif
@@ -608,7 +623,7 @@ void pm_qos_add_request(struct pm_qos_request *req,
INIT_DELAYED_WORK(&req->work, pm_qos_work_fn); INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
trace_pm_qos_add_request(pm_qos_class, value); trace_pm_qos_add_request(pm_qos_class, value);
pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints, pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
&req->node, PM_QOS_ADD_REQ, value); &req->node, PM_QOS_ADD_REQ, value, false);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (req->type == PM_QOS_REQ_AFFINE_IRQ && if (req->type == PM_QOS_REQ_AFFINE_IRQ &&
@@ -623,7 +638,7 @@ void pm_qos_add_request(struct pm_qos_request *req,
cpumask_setall(&req->cpus_affine); cpumask_setall(&req->cpus_affine);
pm_qos_update_target( pm_qos_update_target(
pm_qos_array[pm_qos_class]->constraints, pm_qos_array[pm_qos_class]->constraints,
&req->node, PM_QOS_UPDATE_REQ, value); &req->node, PM_QOS_UPDATE_REQ, value, false);
} }
} }
#endif #endif
@@ -680,7 +695,7 @@ void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
if (new_value != req->node.prio) if (new_value != req->node.prio)
pm_qos_update_target( pm_qos_update_target(
pm_qos_array[req->pm_qos_class]->constraints, pm_qos_array[req->pm_qos_class]->constraints,
&req->node, PM_QOS_UPDATE_REQ, new_value); &req->node, PM_QOS_UPDATE_REQ, new_value, false);
schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us)); schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
} }
@@ -710,7 +725,7 @@ void pm_qos_remove_request(struct pm_qos_request *req)
trace_pm_qos_remove_request(req->pm_qos_class, PM_QOS_DEFAULT_VALUE); trace_pm_qos_remove_request(req->pm_qos_class, PM_QOS_DEFAULT_VALUE);
pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints, pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
&req->node, PM_QOS_REMOVE_REQ, &req->node, PM_QOS_REMOVE_REQ,
PM_QOS_DEFAULT_VALUE); PM_QOS_DEFAULT_VALUE, false);
memset(req, 0, sizeof(*req)); memset(req, 0, sizeof(*req));
} }
EXPORT_SYMBOL_GPL(pm_qos_remove_request); EXPORT_SYMBOL_GPL(pm_qos_remove_request);