Commit 112202d9 authored by Tejun Heo's avatar Tejun Heo

workqueue: rename cpu_workqueue to pool_workqueue

workqueue has moved away from global_cwqs to worker_pools and with the
scheduled custom worker pools, wforkqueues will be associated with
pools which don't have anything to do with CPUs.  The workqueue code
went through significant amount of changes recently and mass renaming
isn't likely to hurt much additionally.  Let's replace 'cpu' with
'pool' so that it reflects the current design.

* s/struct cpu_workqueue_struct/struct pool_workqueue/
* s/cpu_wq/pool_wq/
* s/cwq/pwq/

This patch is purely cosmetic.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 8d03ecfe
......@@ -27,7 +27,7 @@ void delayed_work_timer_fn(unsigned long __data);
enum {
WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */
WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */
WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
#ifdef CONFIG_DEBUG_OBJECTS_WORK
WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
......@@ -40,7 +40,7 @@ enum {
WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT,
WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
#ifdef CONFIG_DEBUG_OBJECTS_WORK
WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
......@@ -60,14 +60,14 @@ enum {
WORK_CPU_END = NR_CPUS + 1,
/*
* Reserve 7 bits off of cwq pointer w/ debugobjects turned
* off. This makes cwqs aligned to 256 bytes and allows 15
* workqueue flush colors.
* Reserve 7 bits off of pwq pointer w/ debugobjects turned off.
* This makes pwqs aligned to 256 bytes and allows 15 workqueue
* flush colors.
*/
WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
WORK_STRUCT_COLOR_BITS,
/* data contains off-queue information when !WORK_STRUCT_CWQ */
/* data contains off-queue information when !WORK_STRUCT_PWQ */
WORK_OFFQ_FLAG_BASE = WORK_STRUCT_FLAG_BITS,
WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE),
......
......@@ -27,7 +27,7 @@ DECLARE_EVENT_CLASS(workqueue_work,
/**
* workqueue_queue_work - called when a work gets queued
* @req_cpu: the requested cpu
* @cwq: pointer to struct cpu_workqueue_struct
* @pwq: pointer to struct pool_workqueue
* @work: pointer to struct work_struct
*
* This event occurs when a work is queued immediately or once a
......@@ -36,10 +36,10 @@ DECLARE_EVENT_CLASS(workqueue_work,
*/
TRACE_EVENT(workqueue_queue_work,
TP_PROTO(unsigned int req_cpu, struct cpu_workqueue_struct *cwq,
TP_PROTO(unsigned int req_cpu, struct pool_workqueue *pwq,
struct work_struct *work),
TP_ARGS(req_cpu, cwq, work),
TP_ARGS(req_cpu, pwq, work),
TP_STRUCT__entry(
__field( void *, work )
......@@ -52,9 +52,9 @@ TRACE_EVENT(workqueue_queue_work,
TP_fast_assign(
__entry->work = work;
__entry->function = work->func;
__entry->workqueue = cwq->wq;
__entry->workqueue = pwq->wq;
__entry->req_cpu = req_cpu;
__entry->cpu = cwq->pool->cpu;
__entry->cpu = pwq->pool->cpu;
),
TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u",
......
......@@ -154,11 +154,12 @@ struct worker_pool {
} ____cacheline_aligned_in_smp;
/*
* The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of
* work_struct->data are used for flags and thus cwqs need to be
* aligned at two's power of the number of flag bits.
* The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS
* of work_struct->data are used for flags and the remaining high bits
* point to the pwq; thus, pwqs need to be aligned at two's power of the
* number of flag bits.
*/
struct cpu_workqueue_struct {
struct pool_workqueue {
struct worker_pool *pool; /* I: the associated pool */
struct workqueue_struct *wq; /* I: the owning workqueue */
int work_color; /* L: current color */
......@@ -207,16 +208,16 @@ typedef unsigned long mayday_mask_t;
struct workqueue_struct {
unsigned int flags; /* W: WQ_* flags */
union {
struct cpu_workqueue_struct __percpu *pcpu;
struct cpu_workqueue_struct *single;
struct pool_workqueue __percpu *pcpu;
struct pool_workqueue *single;
unsigned long v;
} cpu_wq; /* I: cwq's */
} pool_wq; /* I: pwq's */
struct list_head list; /* W: list of all workqueues */
struct mutex flush_mutex; /* protects wq flushing */
int work_color; /* F: current work color */
int flush_color; /* F: current flush color */
atomic_t nr_cwqs_to_flush; /* flush in progress */
atomic_t nr_pwqs_to_flush; /* flush in progress */
struct wq_flusher *first_flusher; /* F: first flusher */
struct list_head flusher_queue; /* F: flush waiters */
struct list_head flusher_overflow; /* F: flush overflow list */
......@@ -225,7 +226,7 @@ struct workqueue_struct {
struct worker *rescuer; /* I: rescue worker */
int nr_drainers; /* W: drain in progress */
int saved_max_active; /* W: saved cwq max_active */
int saved_max_active; /* W: saved pwq max_active */
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
#endif
......@@ -268,7 +269,7 @@ static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
return WORK_CPU_END;
}
static inline int __next_cwq_cpu(int cpu, const struct cpumask *mask,
static inline int __next_pwq_cpu(int cpu, const struct cpumask *mask,
struct workqueue_struct *wq)
{
return __next_wq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
......@@ -284,7 +285,7 @@ static inline int __next_cwq_cpu(int cpu, const struct cpumask *mask,
*
* for_each_wq_cpu() : possible CPUs + WORK_CPU_UNBOUND
* for_each_online_wq_cpu() : online CPUs + WORK_CPU_UNBOUND
* for_each_cwq_cpu() : possible CPUs for bound workqueues,
* for_each_pwq_cpu() : possible CPUs for bound workqueues,
* WORK_CPU_UNBOUND for unbound workqueues
*/
#define for_each_wq_cpu(cpu) \
......@@ -297,10 +298,10 @@ static inline int __next_cwq_cpu(int cpu, const struct cpumask *mask,
(cpu) < WORK_CPU_END; \
(cpu) = __next_wq_cpu((cpu), cpu_online_mask, 3))
#define for_each_cwq_cpu(cpu, wq) \
for ((cpu) = __next_cwq_cpu(-1, cpu_possible_mask, (wq)); \
#define for_each_pwq_cpu(cpu, wq) \
for ((cpu) = __next_pwq_cpu(-1, cpu_possible_mask, (wq)); \
(cpu) < WORK_CPU_END; \
(cpu) = __next_cwq_cpu((cpu), cpu_possible_mask, (wq)))
(cpu) = __next_pwq_cpu((cpu), cpu_possible_mask, (wq)))
#ifdef CONFIG_DEBUG_OBJECTS_WORK
......@@ -479,14 +480,14 @@ static struct worker_pool *get_std_worker_pool(int cpu, bool highpri)
return &pools[highpri];
}
static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
struct workqueue_struct *wq)
static struct pool_workqueue *get_pwq(unsigned int cpu,
struct workqueue_struct *wq)
{
if (!(wq->flags & WQ_UNBOUND)) {
if (likely(cpu < nr_cpu_ids))
return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
return per_cpu_ptr(wq->pool_wq.pcpu, cpu);
} else if (likely(cpu == WORK_CPU_UNBOUND))
return wq->cpu_wq.single;
return wq->pool_wq.single;
return NULL;
}
......@@ -507,18 +508,18 @@ static int work_next_color(int color)
}
/*
* While queued, %WORK_STRUCT_CWQ is set and non flag bits of a work's data
* contain the pointer to the queued cwq. Once execution starts, the flag
* While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
* contain the pointer to the queued pwq. Once execution starts, the flag
* is cleared and the high bits contain OFFQ flags and pool ID.
*
* set_work_cwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
* and clear_work_data() can be used to set the cwq, pool or clear
* set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
* and clear_work_data() can be used to set the pwq, pool or clear
* work->data. These functions should only be called while the work is
* owned - ie. while the PENDING bit is set.
*
* get_work_pool() and get_work_cwq() can be used to obtain the pool or cwq
* get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
* corresponding to a work. Pool is available once the work has been
* queued anywhere after initialization until it is sync canceled. cwq is
* queued anywhere after initialization until it is sync canceled. pwq is
* available only while the work item is queued.
*
* %WORK_OFFQ_CANCELING is used to mark a work item which is being
......@@ -533,12 +534,11 @@ static inline void set_work_data(struct work_struct *work, unsigned long data,
atomic_long_set(&work->data, data | flags | work_static(work));
}
static void set_work_cwq(struct work_struct *work,
struct cpu_workqueue_struct *cwq,
static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
unsigned long extra_flags)
{
set_work_data(work, (unsigned long)cwq,
WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
set_work_data(work, (unsigned long)pwq,
WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
}
static void set_work_pool_and_keep_pending(struct work_struct *work,
......@@ -567,11 +567,11 @@ static void clear_work_data(struct work_struct *work)
set_work_data(work, WORK_STRUCT_NO_POOL, 0);
}
static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
static struct pool_workqueue *get_work_pwq(struct work_struct *work)
{
unsigned long data = atomic_long_read(&work->data);
if (data & WORK_STRUCT_CWQ)
if (data & WORK_STRUCT_PWQ)
return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
else
return NULL;
......@@ -589,8 +589,8 @@ static struct worker_pool *get_work_pool(struct work_struct *work)
struct worker_pool *pool;
int pool_id;
if (data & WORK_STRUCT_CWQ)
return ((struct cpu_workqueue_struct *)
if (data & WORK_STRUCT_PWQ)
return ((struct pool_workqueue *)
(data & WORK_STRUCT_WQ_DATA_MASK))->pool;
pool_id = data >> WORK_OFFQ_POOL_SHIFT;
......@@ -613,8 +613,8 @@ static int get_work_pool_id(struct work_struct *work)
{
unsigned long data = atomic_long_read(&work->data);
if (data & WORK_STRUCT_CWQ)
return ((struct cpu_workqueue_struct *)
if (data & WORK_STRUCT_PWQ)
return ((struct pool_workqueue *)
(data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
return data >> WORK_OFFQ_POOL_SHIFT;
......@@ -632,7 +632,7 @@ static bool work_is_canceling(struct work_struct *work)
{
unsigned long data = atomic_long_read(&work->data);
return !(data & WORK_STRUCT_CWQ) && (data & WORK_OFFQ_CANCELING);
return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
}
/*
......@@ -961,67 +961,67 @@ static void move_linked_works(struct work_struct *work, struct list_head *head,
*nextp = n;
}
static void cwq_activate_delayed_work(struct work_struct *work)
static void pwq_activate_delayed_work(struct work_struct *work)
{
struct cpu_workqueue_struct *cwq = get_work_cwq(work);
struct pool_workqueue *pwq = get_work_pwq(work);
trace_workqueue_activate_work(work);
move_linked_works(work, &cwq->pool->worklist, NULL);
move_linked_works(work, &pwq->pool->worklist, NULL);
__clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
cwq->nr_active++;
pwq->nr_active++;
}
static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
{
struct work_struct *work = list_first_entry(&cwq->delayed_works,
struct work_struct *work = list_first_entry(&pwq->delayed_works,
struct work_struct, entry);
cwq_activate_delayed_work(work);
pwq_activate_delayed_work(work);
}
/**
* cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
* @cwq: cwq of interest
* pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
* @pwq: pwq of interest
* @color: color of work which left the queue
*
* A work either has completed or is removed from pending queue,
* decrement nr_in_flight of its cwq and handle workqueue flushing.
* decrement nr_in_flight of its pwq and handle workqueue flushing.
*
* CONTEXT:
* spin_lock_irq(pool->lock).
*/
static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
{
/* ignore uncolored works */
if (color == WORK_NO_COLOR)
return;
cwq->nr_in_flight[color]--;
pwq->nr_in_flight[color]--;
cwq->nr_active--;
if (!list_empty(&cwq->delayed_works)) {
pwq->nr_active--;
if (!list_empty(&pwq->delayed_works)) {
/* one down, submit a delayed one */
if (cwq->nr_active < cwq->max_active)
cwq_activate_first_delayed(cwq);
if (pwq->nr_active < pwq->max_active)
pwq_activate_first_delayed(pwq);
}
/* is flush in progress and are we at the flushing tip? */
if (likely(cwq->flush_color != color))
if (likely(pwq->flush_color != color))
return;
/* are there still in-flight works? */
if (cwq->nr_in_flight[color])
if (pwq->nr_in_flight[color])
return;
/* this cwq is done, clear flush_color */
cwq->flush_color = -1;
/* this pwq is done, clear flush_color */
pwq->flush_color = -1;
/*
* If this was the last cwq, wake up the first flusher. It
* If this was the last pwq, wake up the first flusher. It
* will handle the rest.
*/
if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
complete(&cwq->wq->first_flusher->done);
if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
complete(&pwq->wq->first_flusher->done);
}
/**
......@@ -1053,7 +1053,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
unsigned long *flags)
{
struct worker_pool *pool;
struct cpu_workqueue_struct *cwq;
struct pool_workqueue *pwq;
local_irq_save(*flags);
......@@ -1084,31 +1084,31 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
spin_lock(&pool->lock);
/*
* work->data is guaranteed to point to cwq only while the work
* item is queued on cwq->wq, and both updating work->data to point
* to cwq on queueing and to pool on dequeueing are done under
* cwq->pool->lock. This in turn guarantees that, if work->data
* points to cwq which is associated with a locked pool, the work
* work->data is guaranteed to point to pwq only while the work
* item is queued on pwq->wq, and both updating work->data to point
* to pwq on queueing and to pool on dequeueing are done under
* pwq->pool->lock. This in turn guarantees that, if work->data
* points to pwq which is associated with a locked pool, the work
* item is currently queued on that pool.
*/
cwq = get_work_cwq(work);
if (cwq && cwq->pool == pool) {
pwq = get_work_pwq(work);
if (pwq && pwq->pool == pool) {
debug_work_deactivate(work);
/*
* A delayed work item cannot be grabbed directly because
* it might have linked NO_COLOR work items which, if left
* on the delayed_list, will confuse cwq->nr_active
* on the delayed_list, will confuse pwq->nr_active
* management later on and cause stall. Make sure the work
* item is activated before grabbing.
*/
if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
cwq_activate_delayed_work(work);
pwq_activate_delayed_work(work);
list_del_init(&work->entry);
cwq_dec_nr_in_flight(get_work_cwq(work), get_work_color(work));
pwq_dec_nr_in_flight(get_work_pwq(work), get_work_color(work));
/* work->data points to cwq iff queued, point to pool */
/* work->data points to pwq iff queued, point to pool */
set_work_pool_and_keep_pending(work, pool->id);
spin_unlock(&pool->lock);
......@@ -1125,25 +1125,24 @@ fail:
/**
* insert_work - insert a work into a pool
* @cwq: cwq @work belongs to
* @pwq: pwq @work belongs to
* @work: work to insert
* @head: insertion point
* @extra_flags: extra WORK_STRUCT_* flags to set
*
* Insert @work which belongs to @cwq after @head. @extra_flags is or'd to
* Insert @work which belongs to @pwq after @head. @extra_flags is or'd to
* work_struct flags.
*
* CONTEXT:
* spin_lock_irq(pool->lock).
*/
static void insert_work(struct cpu_workqueue_struct *cwq,
struct work_struct *work, struct list_head *head,
unsigned int extra_flags)
static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
struct list_head *head, unsigned int extra_flags)
{
struct worker_pool *pool = cwq->pool;
struct worker_pool *pool = pwq->pool;
/* we own @work, set data and link */
set_work_cwq(work, cwq, extra_flags);
set_work_pwq(work, pwq, extra_flags);
list_add_tail(&work->entry, head);
/*
......@@ -1170,13 +1169,13 @@ static bool is_chained_work(struct workqueue_struct *wq)
* Return %true iff I'm a worker execuing a work item on @wq. If
* I'm @worker, it's safe to dereference it without locking.
*/
return worker && worker->current_cwq->wq == wq;
return worker && worker->current_pwq->wq == wq;
}
static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
struct work_struct *work)
{
struct cpu_workqueue_struct *cwq;
struct pool_workqueue *pwq;
struct list_head *worklist;
unsigned int work_flags;
unsigned int req_cpu = cpu;
......@@ -1196,7 +1195,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
WARN_ON_ONCE(!is_chained_work(wq)))
return;
/* determine the cwq to use */
/* determine the pwq to use */
if (!(wq->flags & WQ_UNBOUND)) {
struct worker_pool *last_pool;
......@@ -1209,54 +1208,54 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
* work needs to be queued on that cpu to guarantee
* non-reentrancy.
*/
cwq = get_cwq(cpu, wq);
pwq = get_pwq(cpu, wq);
last_pool = get_work_pool(work);
if (last_pool && last_pool != cwq->pool) {
if (last_pool && last_pool != pwq->pool) {
struct worker *worker;
spin_lock(&last_pool->lock);
worker = find_worker_executing_work(last_pool, work);
if (worker && worker->current_cwq->wq == wq) {
cwq = get_cwq(last_pool->cpu, wq);
if (worker && worker->current_pwq->wq == wq) {
pwq = get_pwq(last_pool->cpu, wq);
} else {
/* meh... not running there, queue here */
spin_unlock(&last_pool->lock);
spin_lock(&cwq->pool->lock);
spin_lock(&pwq->pool->lock);
}
} else {
spin_lock(&cwq->pool->lock);
spin_lock(&pwq->pool->lock);
}
} else {
cwq = get_cwq(WORK_CPU_UNBOUND, wq);
spin_lock(&cwq->pool->lock);
pwq = get_pwq(WORK_CPU_UNBOUND, wq);
spin_lock(&pwq->pool->lock);
}
/* cwq determined, queue */
trace_workqueue_queue_work(req_cpu, cwq, work);
/* pwq determined, queue */
trace_workqueue_queue_work(req_cpu, pwq, work);
if (WARN_ON(!list_empty(&work->entry))) {
spin_unlock(&cwq->pool->lock);
spin_unlock(&pwq->pool->lock);
return;
}
cwq->nr_in_flight[cwq->work_color]++;
work_flags = work_color_to_flags(cwq->work_color);
pwq->nr_in_flight[pwq->work_color]++;
work_flags = work_color_to_flags(pwq->work_color);
if (likely(cwq->nr_active < cwq->max_active)) {
if (likely(pwq->nr_active < pwq->max_active)) {
trace_workqueue_activate_work(work);
cwq->nr_active++;
worklist = &cwq->pool->worklist;
pwq->nr_active++;
worklist = &pwq->pool->worklist;
} else {
work_flags |= WORK_STRUCT_DELAYED;
worklist = &cwq->delayed_works;
worklist = &pwq->delayed_works;
}
insert_work(cwq, work, worklist, work_flags);
insert_work(pwq, work, worklist, work_flags);
spin_unlock(&cwq->pool->lock);
spin_unlock(&pwq->pool->lock);
}
/**
......@@ -1661,14 +1660,14 @@ static void rebind_workers(struct worker_pool *pool)
/*
* wq doesn't really matter but let's keep @worker->pool
* and @cwq->pool consistent for sanity.
* and @pwq->pool consistent for sanity.
*/
if (std_worker_pool_pri(worker->pool))
wq = system_highpri_wq;
else
wq = system_wq;
insert_work(get_cwq(pool->cpu, wq), rebind_work,
insert_work(get_pwq(pool->cpu, wq), rebind_work,
worker->scheduled.next,
work_color_to_flags(WORK_NO_COLOR));
}
......@@ -1845,15 +1844,15 @@ static void idle_worker_timeout(unsigned long __pool)
static bool send_mayday(struct work_struct *work)
{
struct cpu_workqueue_struct *cwq = get_work_cwq(work);
struct workqueue_struct *wq = cwq->wq;
struct pool_workqueue *pwq = get_work_pwq(work);
struct workqueue_struct *wq = pwq->wq;
unsigned int cpu;
if (!(wq->flags & WQ_RESCUER))
return false;
/* mayday mayday mayday */
cpu = cwq->pool->cpu;
cpu = pwq->pool->cpu;
/* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
if (cpu == WORK_CPU_UNBOUND)
cpu = 0;
......@@ -2082,9 +2081,9 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
__releases(&pool->lock)
__acquires(&pool->lock)
{
struct cpu_workqueue_struct *cwq = get_work_cwq(work);
struct pool_workqueue *pwq = get_work_pwq(work);
struct worker_pool *pool = worker->pool;
bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
int work_color;
struct worker *collision;
#ifdef CONFIG_LOCKDEP
......@@ -2125,7 +2124,7 @@ __acquires(&pool->lock)
hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
worker->current_work = work;
worker->current_func = work->func;
worker->current_cwq = cwq;
worker->current_pwq = pwq;
work_color = get_work_color(work);
list_del_init(&work->entry);
......@@ -2154,7 +2153,7 @@ __acquires(&pool->lock)
spin_unlock_irq(&pool->lock);
lock_map_acquire_read(&cwq->wq->lockdep_map);
lock_map_acquire_read(&pwq->wq->lockdep_map);
lock_map_acquire(&lockdep_map);
trace_workqueue_execute_start(work);
worker->current_func(work);
......@@ -2164,7 +2163,7 @@ __acquires(&pool->lock)
*/
trace_workqueue_execute_end(work);
lock_map_release(&lockdep_map);
lock_map_release(&cwq->wq->lockdep_map);
lock_map_release(&pwq->wq->lockdep_map);
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
......@@ -2185,8 +2184,8 @@ __acquires(&pool->lock)
hash_del(&worker->hentry);
worker->current_work = NULL;
worker->current_func = NULL;
worker->current_cwq = NULL;
cwq_dec_nr_in_flight(cwq, work_color);
worker->current_pwq = NULL;
pwq_dec_nr_in_flight(pwq, work_color);
}
/**
......@@ -2353,8 +2352,8 @@ repeat:
*/
for_each_mayday_cpu(cpu, wq->mayday_mask) {
unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
struct worker_pool *pool = cwq->pool;
struct pool_workqueue *pwq = get_pwq(tcpu, wq);
struct worker_pool *pool = pwq->pool;
struct work_struct *work, *n;
__set_current_state(TASK_RUNNING);
......@@ -2370,7 +2369,7 @@ repeat:
*/
BUG_ON(!list_empty(&rescuer->scheduled));
list_for_each_entry_safe(work, n, &pool->worklist, entry)
if (get_work_cwq(work) == cwq)
if (get_work_pwq(work) == pwq)
move_linked_works(work, scheduled, &n);
process_scheduled_works(rescuer);
......@@ -2405,7 +2404,7 @@ static void wq_barrier_func(struct work_struct *work)
/**
* insert_wq_barrier - insert a barrier work
* @cwq: cwq to insert barrier into
* @pwq: pwq to insert barrier into
* @barr: wq_barrier to insert
* @target: target work to attach @barr to
* @worker: worker currently executing @target, NULL if @target is not executing
......@@ -2422,12 +2421,12 @@ static void wq_barrier_func(struct work_struct *work)
* after a work with LINKED flag set.
*
* Note that when @worker is non-NULL, @target may be modified
* underneath us, so we can't reliably determine cwq from @target.
* underneath us, so we can't reliably determine pwq from @target.
*
* CONTEXT:
* spin_lock_irq(pool->lock).
*/
static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
static void insert_wq_barrier(struct pool_workqueue *pwq,
struct wq_barrier *barr,
struct work_struct *target, struct worker *worker)
{
......@@ -2460,23 +2459,23 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
}
debug_work_activate(&barr->work);
insert_work(cwq, &barr->work, head,
insert_work(pwq, &barr->work, head,
work_color_to_flags(WORK_NO_COLOR) | linked);
}