LCOV - code coverage report
Current view: top level - include/linux - workqueue.h (source / functions) Hit Total Coverage
Test: btrfstest.info Lines: 2 2 100.0 %
Date: 2014-11-28 Functions: 0 0 -

          Line data    Source code
       1             : /*
       2             :  * workqueue.h --- work queue handling for Linux.
       3             :  */
       4             : 
       5             : #ifndef _LINUX_WORKQUEUE_H
       6             : #define _LINUX_WORKQUEUE_H
       7             : 
       8             : #include <linux/timer.h>
       9             : #include <linux/linkage.h>
      10             : #include <linux/bitops.h>
      11             : #include <linux/lockdep.h>
      12             : #include <linux/threads.h>
      13             : #include <linux/atomic.h>
      14             : #include <linux/cpumask.h>
      15             : 
      16             : struct workqueue_struct;
      17             : 
      18             : struct work_struct;
      19             : typedef void (*work_func_t)(struct work_struct *work);
      20             : void delayed_work_timer_fn(unsigned long __data);
      21             : 
      22             : /*
      23             :  * The first word is the work queue pointer and the flags rolled into
      24             :  * one
      25             :  */
      26             : #define work_data_bits(work) ((unsigned long *)(&(work)->data))
      27             : 
      28             : enum {
      29             :         WORK_STRUCT_PENDING_BIT = 0,    /* work item is pending execution */
      30             :         WORK_STRUCT_DELAYED_BIT = 1,    /* work item is delayed */
      31             :         WORK_STRUCT_PWQ_BIT     = 2,    /* data points to pwq */
      32             :         WORK_STRUCT_LINKED_BIT  = 3,    /* next work is linked to this one */
      33             : #ifdef CONFIG_DEBUG_OBJECTS_WORK
      34             :         WORK_STRUCT_STATIC_BIT  = 4,    /* static initializer (debugobjects) */
      35             :         WORK_STRUCT_COLOR_SHIFT = 5,    /* color for workqueue flushing */
      36             : #else
      37             :         WORK_STRUCT_COLOR_SHIFT = 4,    /* color for workqueue flushing */
      38             : #endif
      39             : 
      40             :         WORK_STRUCT_COLOR_BITS  = 4,
      41             : 
      42             :         WORK_STRUCT_PENDING     = 1 << WORK_STRUCT_PENDING_BIT,
      43             :         WORK_STRUCT_DELAYED     = 1 << WORK_STRUCT_DELAYED_BIT,
      44             :         WORK_STRUCT_PWQ         = 1 << WORK_STRUCT_PWQ_BIT,
      45             :         WORK_STRUCT_LINKED      = 1 << WORK_STRUCT_LINKED_BIT,
      46             : #ifdef CONFIG_DEBUG_OBJECTS_WORK
      47             :         WORK_STRUCT_STATIC      = 1 << WORK_STRUCT_STATIC_BIT,
      48             : #else
      49             :         WORK_STRUCT_STATIC      = 0,
      50             : #endif
      51             : 
      52             :         /*
      53             :          * The last color is no color used for works which don't
      54             :          * participate in workqueue flushing.
      55             :          */
      56             :         WORK_NR_COLORS          = (1 << WORK_STRUCT_COLOR_BITS) - 1,
      57             :         WORK_NO_COLOR           = WORK_NR_COLORS,
      58             : 
      59             :         /* not bound to any CPU, prefer the local CPU */
      60             :         WORK_CPU_UNBOUND        = NR_CPUS,
      61             : 
      62             :         /*
      63             :          * Reserve 7 bits off of pwq pointer w/ debugobjects turned off.
      64             :          * This makes pwqs aligned to 256 bytes and allows 15 workqueue
      65             :          * flush colors.
      66             :          */
      67             :         WORK_STRUCT_FLAG_BITS   = WORK_STRUCT_COLOR_SHIFT +
      68             :                                   WORK_STRUCT_COLOR_BITS,
      69             : 
      70             :         /* data contains off-queue information when !WORK_STRUCT_PWQ */
      71             :         WORK_OFFQ_FLAG_BASE     = WORK_STRUCT_COLOR_SHIFT,
      72             : 
      73             :         WORK_OFFQ_CANCELING     = (1 << WORK_OFFQ_FLAG_BASE),
      74             : 
      75             :         /*
      76             :          * When a work item is off queue, its high bits point to the last
      77             :          * pool it was on.  Cap at 31 bits and use the highest number to
      78             :          * indicate that no pool is associated.
      79             :          */
      80             :         WORK_OFFQ_FLAG_BITS     = 1,
      81             :         WORK_OFFQ_POOL_SHIFT    = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
      82             :         WORK_OFFQ_LEFT          = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
      83             :         WORK_OFFQ_POOL_BITS     = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
      84             :         WORK_OFFQ_POOL_NONE     = (1LU << WORK_OFFQ_POOL_BITS) - 1,
      85             : 
      86             :         /* convenience constants */
      87             :         WORK_STRUCT_FLAG_MASK   = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
      88             :         WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
      89             :         WORK_STRUCT_NO_POOL     = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
      90             : 
      91             :         /* bit mask for work_busy() return values */
      92             :         WORK_BUSY_PENDING       = 1 << 0,
      93             :         WORK_BUSY_RUNNING       = 1 << 1,
      94             : 
      95             :         /* maximum string length for set_worker_desc() */
      96             :         WORKER_DESC_LEN         = 24,
      97             : };
      98             : 
      99             : struct work_struct {
     100             :         atomic_long_t data;
     101             :         struct list_head entry;
     102             :         work_func_t func;
     103             : #ifdef CONFIG_LOCKDEP
     104             :         struct lockdep_map lockdep_map;
     105             : #endif
     106             : };
     107             : 
     108             : #define WORK_DATA_INIT()        ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL)
     109             : #define WORK_DATA_STATIC_INIT() \
     110             :         ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)
     111             : 
     112             : struct delayed_work {
     113             :         struct work_struct work;
     114             :         struct timer_list timer;
     115             : 
     116             :         /* target workqueue and CPU ->timer uses to queue ->work */
     117             :         struct workqueue_struct *wq;
     118             :         int cpu;
     119             : };
     120             : 
     121             : /*
     122             :  * A struct for workqueue attributes.  This can be used to change
     123             :  * attributes of an unbound workqueue.
     124             :  *
     125             :  * Unlike other fields, ->no_numa isn't a property of a worker_pool.  It
     126             :  * only modifies how apply_workqueue_attrs() select pools and thus doesn't
     127             :  * participate in pool hash calculations or equality comparisons.
     128             :  */
     129             : struct workqueue_attrs {
     130             :         int                     nice;           /* nice level */
     131             :         cpumask_var_t           cpumask;        /* allowed CPUs */
     132             :         bool                    no_numa;        /* disable NUMA affinity */
     133             : };
     134             : 
     135             : static inline struct delayed_work *to_delayed_work(struct work_struct *work)
     136             : {
     137             :         return container_of(work, struct delayed_work, work);
     138             : }
     139             : 
     140             : struct execute_work {
     141             :         struct work_struct work;
     142             : };
     143             : 
     144             : #ifdef CONFIG_LOCKDEP
     145             : /*
     146             :  * NB: because we have to copy the lockdep_map, setting _key
     147             :  * here is required, otherwise it could get initialised to the
     148             :  * copy of the lockdep_map!
     149             :  */
     150             : #define __WORK_INIT_LOCKDEP_MAP(n, k) \
     151             :         .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
     152             : #else
     153             : #define __WORK_INIT_LOCKDEP_MAP(n, k)
     154             : #endif
     155             : 
     156             : #define __WORK_INITIALIZER(n, f) {                                      \
     157             :         .data = WORK_DATA_STATIC_INIT(),                                \
     158             :         .entry  = { &(n).entry, &(n).entry },                           \
     159             :         .func = (f),                                                    \
     160             :         __WORK_INIT_LOCKDEP_MAP(#n, &(n))                           \
     161             :         }
     162             : 
     163             : #define __DELAYED_WORK_INITIALIZER(n, f, tflags) {                      \
     164             :         .work = __WORK_INITIALIZER((n).work, (f)),                      \
     165             :         .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,             \
     166             :                                      0, (unsigned long)&(n),                \
     167             :                                      (tflags) | TIMER_IRQSAFE),         \
     168             :         }
     169             : 
     170             : #define DECLARE_WORK(n, f)                                              \
     171             :         struct work_struct n = __WORK_INITIALIZER(n, f)
     172             : 
     173             : #define DECLARE_DELAYED_WORK(n, f)                                      \
     174             :         struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
     175             : 
     176             : #define DECLARE_DEFERRABLE_WORK(n, f)                                   \
     177             :         struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
     178             : 
     179             : #ifdef CONFIG_DEBUG_OBJECTS_WORK
     180             : extern void __init_work(struct work_struct *work, int onstack);
     181             : extern void destroy_work_on_stack(struct work_struct *work);
     182             : extern void destroy_delayed_work_on_stack(struct delayed_work *work);
     183             : static inline unsigned int work_static(struct work_struct *work)
     184             : {
     185             :         return *work_data_bits(work) & WORK_STRUCT_STATIC;
     186             : }
     187             : #else
     188             : static inline void __init_work(struct work_struct *work, int onstack) { }
     189             : static inline void destroy_work_on_stack(struct work_struct *work) { }
     190             : static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
     191             : static inline unsigned int work_static(struct work_struct *work) { return 0; }
     192             : #endif
     193             : 
     194             : /*
     195             :  * initialize all of a work item in one go
     196             :  *
     197             :  * NOTE! No point in using "atomic_long_set()": using a direct
     198             :  * assignment of the work data initializer allows the compiler
     199             :  * to generate better code.
     200             :  */
     201             : #ifdef CONFIG_LOCKDEP
     202             : #define __INIT_WORK(_work, _func, _onstack)                             \
     203             :         do {                                                            \
     204             :                 static struct lock_class_key __key;                     \
     205             :                                                                         \
     206             :                 __init_work((_work), _onstack);                         \
     207             :                 (_work)->data = (atomic_long_t) WORK_DATA_INIT();    \
     208             :                 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
     209             :                 INIT_LIST_HEAD(&(_work)->entry);                 \
     210             :                 (_work)->func = (_func);                             \
     211             :         } while (0)
     212             : #else
     213             : #define __INIT_WORK(_work, _func, _onstack)                             \
     214             :         do {                                                            \
     215             :                 __init_work((_work), _onstack);                         \
     216             :                 (_work)->data = (atomic_long_t) WORK_DATA_INIT();    \
     217             :                 INIT_LIST_HEAD(&(_work)->entry);                 \
     218             :                 (_work)->func = (_func);                             \
     219             :         } while (0)
     220             : #endif
     221             : 
     222             : #define INIT_WORK(_work, _func)                                         \
     223             :         do {                                                            \
     224             :                 __INIT_WORK((_work), (_func), 0);                       \
     225             :         } while (0)
     226             : 
     227             : #define INIT_WORK_ONSTACK(_work, _func)                                 \
     228             :         do {                                                            \
     229             :                 __INIT_WORK((_work), (_func), 1);                       \
     230             :         } while (0)
     231             : 
     232             : #define __INIT_DELAYED_WORK(_work, _func, _tflags)                      \
     233             :         do {                                                            \
     234             :                 INIT_WORK(&(_work)->work, (_func));                      \
     235             :                 __setup_timer(&(_work)->timer, delayed_work_timer_fn,    \
     236             :                               (unsigned long)(_work),                   \
     237             :                               (_tflags) | TIMER_IRQSAFE);               \
     238             :         } while (0)
     239             : 
     240             : #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags)              \
     241             :         do {                                                            \
     242             :                 INIT_WORK_ONSTACK(&(_work)->work, (_func));              \
     243             :                 __setup_timer_on_stack(&(_work)->timer,                  \
     244             :                                        delayed_work_timer_fn,           \
     245             :                                        (unsigned long)(_work),          \
     246             :                                        (_tflags) | TIMER_IRQSAFE);      \
     247             :         } while (0)
     248             : 
     249             : #define INIT_DELAYED_WORK(_work, _func)                                 \
     250             :         __INIT_DELAYED_WORK(_work, _func, 0)
     251             : 
     252             : #define INIT_DELAYED_WORK_ONSTACK(_work, _func)                         \
     253             :         __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
     254             : 
     255             : #define INIT_DEFERRABLE_WORK(_work, _func)                              \
     256             :         __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
     257             : 
     258             : #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func)                      \
     259             :         __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
     260             : 
     261             : /**
     262             :  * work_pending - Find out whether a work item is currently pending
     263             :  * @work: The work item in question
     264             :  */
     265             : #define work_pending(work) \
     266             :         test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
     267             : 
     268             : /**
     269             :  * delayed_work_pending - Find out whether a delayable work item is currently
     270             :  * pending
     271             :  * @work: The work item in question
     272             :  */
     273             : #define delayed_work_pending(w) \
     274             :         work_pending(&(w)->work)
     275             : 
     276             : /*
     277             :  * Workqueue flags and constants.  For details, please refer to
     278             :  * Documentation/workqueue.txt.
     279             :  */
     280             : enum {
     281             :         WQ_UNBOUND              = 1 << 1, /* not bound to any cpu */
     282             :         WQ_FREEZABLE            = 1 << 2, /* freeze during suspend */
     283             :         WQ_MEM_RECLAIM          = 1 << 3, /* may be used for memory reclaim */
     284             :         WQ_HIGHPRI              = 1 << 4, /* high priority */
     285             :         WQ_CPU_INTENSIVE        = 1 << 5, /* cpu intensive workqueue */
     286             :         WQ_SYSFS                = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */
     287             : 
     288             :         /*
     289             :          * Per-cpu workqueues are generally preferred because they tend to
     290             :          * show better performance thanks to cache locality.  Per-cpu
     291             :          * workqueues exclude the scheduler from choosing the CPU to
     292             :          * execute the worker threads, which has an unfortunate side effect
     293             :          * of increasing power consumption.
     294             :          *
     295             :          * The scheduler considers a CPU idle if it doesn't have any task
     296             :          * to execute and tries to keep idle cores idle to conserve power;
     297             :          * however, for example, a per-cpu work item scheduled from an
     298             :          * interrupt handler on an idle CPU will force the scheduler to
     299             :          * excute the work item on that CPU breaking the idleness, which in
     300             :          * turn may lead to more scheduling choices which are sub-optimal
     301             :          * in terms of power consumption.
     302             :          *
     303             :          * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
     304             :          * but become unbound if workqueue.power_efficient kernel param is
     305             :          * specified.  Per-cpu workqueues which are identified to
     306             :          * contribute significantly to power-consumption are identified and
     307             :          * marked with this flag and enabling the power_efficient mode
     308             :          * leads to noticeable power saving at the cost of small
     309             :          * performance disadvantage.
     310             :          *
     311             :          * http://thread.gmane.org/gmane.linux.kernel/1480396
     312             :          */
     313             :         WQ_POWER_EFFICIENT      = 1 << 7,
     314             : 
     315             :         __WQ_DRAINING           = 1 << 16, /* internal: workqueue is draining */
     316             :         __WQ_ORDERED            = 1 << 17, /* internal: workqueue is ordered */
     317             : 
     318             :         WQ_MAX_ACTIVE           = 512,    /* I like 512, better ideas? */
     319             :         WQ_MAX_UNBOUND_PER_CPU  = 4,      /* 4 * #cpus for unbound wq */
     320             :         WQ_DFL_ACTIVE           = WQ_MAX_ACTIVE / 2,
     321             : };
     322             : 
     323             : /* unbound wq's aren't per-cpu, scale max_active according to #cpus */
     324             : #define WQ_UNBOUND_MAX_ACTIVE   \
     325             :         max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
     326             : 
     327             : /*
     328             :  * System-wide workqueues which are always present.
     329             :  *
     330             :  * system_wq is the one used by schedule[_delayed]_work[_on]().
     331             :  * Multi-CPU multi-threaded.  There are users which expect relatively
     332             :  * short queue flush time.  Don't queue works which can run for too
     333             :  * long.
     334             :  *
     335             :  * system_highpri_wq is similar to system_wq but for work items which
     336             :  * require WQ_HIGHPRI.
     337             :  *
     338             :  * system_long_wq is similar to system_wq but may host long running
     339             :  * works.  Queue flushing might take relatively long.
     340             :  *
     341             :  * system_unbound_wq is unbound workqueue.  Workers are not bound to
     342             :  * any specific CPU, not concurrency managed, and all queued works are
     343             :  * executed immediately as long as max_active limit is not reached and
     344             :  * resources are available.
     345             :  *
     346             :  * system_freezable_wq is equivalent to system_wq except that it's
     347             :  * freezable.
     348             :  *
     349             :  * *_power_efficient_wq are inclined towards saving power and converted
     350             :  * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
     351             :  * they are same as their non-power-efficient counterparts - e.g.
     352             :  * system_power_efficient_wq is identical to system_wq if
     353             :  * 'wq_power_efficient' is disabled.  See WQ_POWER_EFFICIENT for more info.
     354             :  */
     355             : extern struct workqueue_struct *system_wq;
     356             : extern struct workqueue_struct *system_highpri_wq;
     357             : extern struct workqueue_struct *system_long_wq;
     358             : extern struct workqueue_struct *system_unbound_wq;
     359             : extern struct workqueue_struct *system_freezable_wq;
     360             : extern struct workqueue_struct *system_power_efficient_wq;
     361             : extern struct workqueue_struct *system_freezable_power_efficient_wq;
     362             : 
     363             : extern struct workqueue_struct *
     364             : __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
     365             :         struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
     366             : 
     367             : /**
     368             :  * alloc_workqueue - allocate a workqueue
     369             :  * @fmt: printf format for the name of the workqueue
     370             :  * @flags: WQ_* flags
     371             :  * @max_active: max in-flight work items, 0 for default
     372             :  * @args: args for @fmt
     373             :  *
     374             :  * Allocate a workqueue with the specified parameters.  For detailed
     375             :  * information on WQ_* flags, please refer to Documentation/workqueue.txt.
     376             :  *
     377             :  * The __lock_name macro dance is to guarantee that single lock_class_key
     378             :  * doesn't end up with different namesm, which isn't allowed by lockdep.
     379             :  *
     380             :  * RETURNS:
     381             :  * Pointer to the allocated workqueue on success, %NULL on failure.
     382             :  */
     383             : #ifdef CONFIG_LOCKDEP
     384             : #define alloc_workqueue(fmt, flags, max_active, args...)                \
     385             : ({                                                                      \
     386             :         static struct lock_class_key __key;                             \
     387             :         const char *__lock_name;                                        \
     388             :                                                                         \
     389             :         __lock_name = #fmt#args;                                        \
     390             :                                                                         \
     391             :         __alloc_workqueue_key((fmt), (flags), (max_active),             \
     392             :                               &__key, __lock_name, ##args);         \
     393             : })
     394             : #else
     395             : #define alloc_workqueue(fmt, flags, max_active, args...)                \
     396             :         __alloc_workqueue_key((fmt), (flags), (max_active),             \
     397             :                               NULL, NULL, ##args)
     398             : #endif
     399             : 
     400             : /**
     401             :  * alloc_ordered_workqueue - allocate an ordered workqueue
     402             :  * @fmt: printf format for the name of the workqueue
     403             :  * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
     404             :  * @args: args for @fmt
     405             :  *
     406             :  * Allocate an ordered workqueue.  An ordered workqueue executes at
     407             :  * most one work item at any given time in the queued order.  They are
     408             :  * implemented as unbound workqueues with @max_active of one.
     409             :  *
     410             :  * RETURNS:
     411             :  * Pointer to the allocated workqueue on success, %NULL on failure.
     412             :  */
     413             : #define alloc_ordered_workqueue(fmt, flags, args...)                    \
     414             :         alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
     415             : 
     416             : #define create_workqueue(name)                                          \
     417             :         alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name))
     418             : #define create_freezable_workqueue(name)                                \
     419             :         alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \
     420             :                         1, (name))
     421             : #define create_singlethread_workqueue(name)                             \
     422             :         alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name)
     423             : 
     424             : extern void destroy_workqueue(struct workqueue_struct *wq);
     425             : 
     426             : struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
     427             : void free_workqueue_attrs(struct workqueue_attrs *attrs);
     428             : int apply_workqueue_attrs(struct workqueue_struct *wq,
     429             :                           const struct workqueue_attrs *attrs);
     430             : 
     431             : extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
     432             :                         struct work_struct *work);
     433             : extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
     434             :                         struct delayed_work *work, unsigned long delay);
     435             : extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
     436             :                         struct delayed_work *dwork, unsigned long delay);
     437             : 
     438             : extern void flush_workqueue(struct workqueue_struct *wq);
     439             : extern void drain_workqueue(struct workqueue_struct *wq);
     440             : extern void flush_scheduled_work(void);
     441             : 
     442             : extern int schedule_on_each_cpu(work_func_t func);
     443             : 
     444             : int execute_in_process_context(work_func_t fn, struct execute_work *);
     445             : 
     446             : extern bool flush_work(struct work_struct *work);
     447             : extern bool cancel_work_sync(struct work_struct *work);
     448             : 
     449             : extern bool flush_delayed_work(struct delayed_work *dwork);
     450             : extern bool cancel_delayed_work(struct delayed_work *dwork);
     451             : extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
     452             : 
     453             : extern void workqueue_set_max_active(struct workqueue_struct *wq,
     454             :                                      int max_active);
     455             : extern bool current_is_workqueue_rescuer(void);
     456             : extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
     457             : extern unsigned int work_busy(struct work_struct *work);
     458             : extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
     459             : extern void print_worker_info(const char *log_lvl, struct task_struct *task);
     460             : 
     461             : /**
     462             :  * queue_work - queue work on a workqueue
     463             :  * @wq: workqueue to use
     464             :  * @work: work to queue
     465             :  *
     466             :  * Returns %false if @work was already on a queue, %true otherwise.
     467             :  *
     468             :  * We queue the work to the CPU on which it was submitted, but if the CPU dies
     469             :  * it can be processed by another CPU.
     470             :  */
     471             : static inline bool queue_work(struct workqueue_struct *wq,
     472             :                               struct work_struct *work)
     473             : {
     474      364945 :         return queue_work_on(WORK_CPU_UNBOUND, wq, work);
     475             : }
     476             : 
     477             : /**
     478             :  * queue_delayed_work - queue work on a workqueue after delay
     479             :  * @wq: workqueue to use
     480             :  * @dwork: delayable work to queue
     481             :  * @delay: number of jiffies to wait before queueing
     482             :  *
     483             :  * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
     484             :  */
     485             : static inline bool queue_delayed_work(struct workqueue_struct *wq,
     486             :                                       struct delayed_work *dwork,
     487             :                                       unsigned long delay)
     488             : {
     489             :         return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
     490             : }
     491             : 
     492             : /**
     493             :  * mod_delayed_work - modify delay of or queue a delayed work
     494             :  * @wq: workqueue to use
     495             :  * @dwork: work to queue
     496             :  * @delay: number of jiffies to wait before queueing
     497             :  *
     498             :  * mod_delayed_work_on() on local CPU.
     499             :  */
     500             : static inline bool mod_delayed_work(struct workqueue_struct *wq,
     501             :                                     struct delayed_work *dwork,
     502             :                                     unsigned long delay)
     503             : {
     504             :         return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
     505             : }
     506             : 
     507             : /**
     508             :  * schedule_work_on - put work task on a specific cpu
     509             :  * @cpu: cpu to put the work task on
     510             :  * @work: job to be done
     511             :  *
     512             :  * This puts a job on a specific cpu
     513             :  */
     514             : static inline bool schedule_work_on(int cpu, struct work_struct *work)
     515             : {
     516             :         return queue_work_on(cpu, system_wq, work);
     517             : }
     518             : 
     519             : /**
     520             :  * schedule_work - put work task in global workqueue
     521             :  * @work: job to be done
     522             :  *
     523             :  * Returns %false if @work was already on the kernel-global workqueue and
     524             :  * %true otherwise.
     525             :  *
     526             :  * This puts a job in the kernel-global workqueue if it was not already
     527             :  * queued and leaves it in the same position on the kernel-global
     528             :  * workqueue otherwise.
     529             :  */
     530             : static inline bool schedule_work(struct work_struct *work)
     531             : {
     532         256 :         return queue_work(system_wq, work);
     533             : }
     534             : 
     535             : /**
     536             :  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
     537             :  * @cpu: cpu to use
     538             :  * @dwork: job to be done
     539             :  * @delay: number of jiffies to wait
     540             :  *
     541             :  * After waiting for a given time this puts a job in the kernel-global
     542             :  * workqueue on the specified CPU.
     543             :  */
     544             : static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
     545             :                                             unsigned long delay)
     546             : {
     547             :         return queue_delayed_work_on(cpu, system_wq, dwork, delay);
     548             : }
     549             : 
     550             : /**
     551             :  * schedule_delayed_work - put work task in global workqueue after delay
     552             :  * @dwork: job to be done
     553             :  * @delay: number of jiffies to wait or 0 for immediate execution
     554             :  *
     555             :  * After waiting for a given time this puts a job in the kernel-global
     556             :  * workqueue.
     557             :  */
     558             : static inline bool schedule_delayed_work(struct delayed_work *dwork,
     559             :                                          unsigned long delay)
     560             : {
     561             :         return queue_delayed_work(system_wq, dwork, delay);
     562             : }
     563             : 
     564             : /**
     565             :  * keventd_up - is workqueue initialized yet?
     566             :  */
     567             : static inline bool keventd_up(void)
     568             : {
     569             :         return system_wq != NULL;
     570             : }
     571             : 
     572             : #ifndef CONFIG_SMP
     573             : static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
     574             : {
     575             :         return fn(arg);
     576             : }
     577             : #else
     578             : long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
     579             : #endif /* CONFIG_SMP */
     580             : 
     581             : #ifdef CONFIG_FREEZER
     582             : extern void freeze_workqueues_begin(void);
     583             : extern bool freeze_workqueues_busy(void);
     584             : extern void thaw_workqueues(void);
     585             : #endif /* CONFIG_FREEZER */
     586             : 
     587             : #ifdef CONFIG_SYSFS
     588             : int workqueue_sysfs_register(struct workqueue_struct *wq);
     589             : #else   /* CONFIG_SYSFS */
     590             : static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
     591             : { return 0; }
     592             : #endif  /* CONFIG_SYSFS */
     593             : 
     594             : #endif

Generated by: LCOV version 1.10