1 #ifndef __TOOLS_LINUX_WORKQUEUE_H
2 #define __TOOLS_LINUX_WORKQUEUE_H
4 #include <linux/list.h>
5 #include <linux/timer.h>
8 struct workqueue_struct;
10 typedef void (*work_func_t)(struct work_struct *work);
11 void delayed_work_timer_fn(struct timer_list *);
13 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
17 //WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
18 //WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
20 //WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
21 //WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
27 struct list_head entry;
31 #define INIT_WORK(_work, _func) \
33 (_work)->data.counter = 0; \
34 INIT_LIST_HEAD(&(_work)->entry); \
35 (_work)->func = (_func); \
39 struct work_struct work;
40 struct timer_list timer;
41 struct workqueue_struct *wq;
44 #define INIT_DELAYED_WORK(_work, _func) \
46 INIT_WORK(&(_work)->work, (_func)); \
47 timer_setup(&(_work)->timer, delayed_work_timer_fn, 0); \
50 static inline struct delayed_work *to_delayed_work(struct work_struct *work)
52 return container_of(work, struct delayed_work, work);
56 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
57 WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
58 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
59 WQ_HIGHPRI = 1 << 4, /* high priority */
60 WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */
61 WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */
64 * Per-cpu workqueues are generally preferred because they tend to
65 * show better performance thanks to cache locality. Per-cpu
66 * workqueues exclude the scheduler from choosing the CPU to
67 * execute the worker threads, which has an unfortunate side effect
68 * of increasing power consumption.
70 * The scheduler considers a CPU idle if it doesn't have any task
71 * to execute and tries to keep idle cores idle to conserve power;
72 * however, for example, a per-cpu work item scheduled from an
73 * interrupt handler on an idle CPU will force the scheduler to
74 * excute the work item on that CPU breaking the idleness, which in
75 * turn may lead to more scheduling choices which are sub-optimal
76 * in terms of power consumption.
78 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
79 * but become unbound if workqueue.power_efficient kernel param is
80 * specified. Per-cpu workqueues which are identified to
81 * contribute significantly to power-consumption are identified and
82 * marked with this flag and enabling the power_efficient mode
83 * leads to noticeable power saving at the cost of small
84 * performance disadvantage.
86 * http://thread.gmane.org/gmane.linux.kernel/1480396
88 WQ_POWER_EFFICIENT = 1 << 7,
90 __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
91 __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
92 __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
94 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
95 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
96 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
99 /* unbound wq's aren't per-cpu, scale max_active according to #cpus */
100 #define WQ_UNBOUND_MAX_ACTIVE WQ_MAX_ACTIVE
102 extern struct workqueue_struct *system_wq;
103 extern struct workqueue_struct *system_highpri_wq;
104 extern struct workqueue_struct *system_long_wq;
105 extern struct workqueue_struct *system_unbound_wq;
106 extern struct workqueue_struct *system_freezable_wq;
108 extern struct workqueue_struct *
109 alloc_workqueue(const char *fmt, unsigned int flags,
110 int max_active, ...) __printf(1, 4);
112 #define alloc_ordered_workqueue(fmt, flags, args...) \
113 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
115 #define create_workqueue(name) \
116 alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
117 #define create_freezable_workqueue(name) \
118 alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
119 WQ_MEM_RECLAIM, 1, (name))
120 #define create_singlethread_workqueue(name) \
121 alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
123 extern void destroy_workqueue(struct workqueue_struct *wq);
125 struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
126 void free_workqueue_attrs(struct workqueue_attrs *attrs);
127 int apply_workqueue_attrs(struct workqueue_struct *wq,
128 const struct workqueue_attrs *attrs);
130 extern bool queue_work(struct workqueue_struct *wq,
131 struct work_struct *work);
132 extern bool queue_delayed_work(struct workqueue_struct *wq,
133 struct delayed_work *work, unsigned long delay);
134 extern bool mod_delayed_work(struct workqueue_struct *wq,
135 struct delayed_work *dwork, unsigned long delay);
137 extern void flush_workqueue(struct workqueue_struct *wq);
138 extern void drain_workqueue(struct workqueue_struct *wq);
140 extern int schedule_on_each_cpu(work_func_t func);
142 extern bool flush_work(struct work_struct *work);
143 extern bool cancel_work_sync(struct work_struct *work);
145 extern bool flush_delayed_work(struct delayed_work *dwork);
146 extern bool cancel_delayed_work(struct delayed_work *dwork);
147 extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
149 extern void workqueue_set_max_active(struct workqueue_struct *wq,
151 extern bool current_is_workqueue_rescuer(void);
152 extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
153 extern unsigned int work_busy(struct work_struct *work);
154 extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
155 extern void print_worker_info(const char *log_lvl, struct task_struct *task);
156 extern void show_workqueue_state(void);
158 static inline bool schedule_work_on(int cpu, struct work_struct *work)
160 return queue_work(system_wq, work);
163 static inline bool schedule_work(struct work_struct *work)
165 return queue_work(system_wq, work);
168 static inline void flush_scheduled_work(void)
170 flush_workqueue(system_wq);
173 static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
176 return queue_delayed_work(system_wq, dwork, delay);
179 static inline bool schedule_delayed_work(struct delayed_work *dwork,
182 return queue_delayed_work(system_wq, dwork, delay);
185 #endif /* __TOOLS_LINUX_WORKQUEUE_H */