LCOV - code coverage report
Current view: top level - include/linux - rcupdate.h (source / functions) Hit Total Coverage
Test: btrfstest.info Lines: 3 7 42.9 %
Date: 2014-11-28 Functions: 0 1 0.0 %

          Line data    Source code
       1             : /*
       2             :  * Read-Copy Update mechanism for mutual exclusion
       3             :  *
       4             :  * This program is free software; you can redistribute it and/or modify
       5             :  * it under the terms of the GNU General Public License as published by
       6             :  * the Free Software Foundation; either version 2 of the License, or
       7             :  * (at your option) any later version.
       8             :  *
       9             :  * This program is distributed in the hope that it will be useful,
      10             :  * but WITHOUT ANY WARRANTY; without even the implied warranty of
      11             :  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
      12             :  * GNU General Public License for more details.
      13             :  *
      14             :  * You should have received a copy of the GNU General Public License
      15             :  * along with this program; if not, you can access it online at
      16             :  * http://www.gnu.org/licenses/gpl-2.0.html.
      17             :  *
      18             :  * Copyright IBM Corporation, 2001
      19             :  *
      20             :  * Author: Dipankar Sarma <dipankar@in.ibm.com>
      21             :  *
      22             :  * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
      23             :  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
      24             :  * Papers:
      25             :  * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
      26             :  * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
      27             :  *
      28             :  * For detailed explanation of Read-Copy Update mechanism see -
      29             :  *              http://lse.sourceforge.net/locking/rcupdate.html
      30             :  *
      31             :  */
      32             : 
      33             : #ifndef __LINUX_RCUPDATE_H
      34             : #define __LINUX_RCUPDATE_H
      35             : 
      36             : #include <linux/types.h>
      37             : #include <linux/cache.h>
      38             : #include <linux/spinlock.h>
      39             : #include <linux/threads.h>
      40             : #include <linux/cpumask.h>
      41             : #include <linux/seqlock.h>
      42             : #include <linux/lockdep.h>
      43             : #include <linux/completion.h>
      44             : #include <linux/debugobjects.h>
      45             : #include <linux/bug.h>
      46             : #include <linux/compiler.h>
      47             : #include <asm/barrier.h>
      48             : 
      49             : extern int rcu_expedited; /* for sysctl */
      50             : #ifdef CONFIG_RCU_TORTURE_TEST
      51             : extern int rcutorture_runnable; /* for sysctl */
      52             : #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
      53             : 
      54             : enum rcutorture_type {
      55             :         RCU_FLAVOR,
      56             :         RCU_BH_FLAVOR,
      57             :         RCU_SCHED_FLAVOR,
      58             :         SRCU_FLAVOR,
      59             :         INVALID_RCU_FLAVOR
      60             : };
      61             : 
      62             : #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
      63             : void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
      64             :                             unsigned long *gpnum, unsigned long *completed);
      65             : void rcutorture_record_test_transition(void);
      66             : void rcutorture_record_progress(unsigned long vernum);
      67             : void do_trace_rcu_torture_read(const char *rcutorturename,
      68             :                                struct rcu_head *rhp,
      69             :                                unsigned long secs,
      70             :                                unsigned long c_old,
      71             :                                unsigned long c);
      72             : #else
      73             : static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
      74             :                                           int *flags,
      75             :                                           unsigned long *gpnum,
      76             :                                           unsigned long *completed)
      77             : {
      78             :         *flags = 0;
      79             :         *gpnum = 0;
      80             :         *completed = 0;
      81             : }
      82             : static inline void rcutorture_record_test_transition(void)
      83             : {
      84             : }
      85             : static inline void rcutorture_record_progress(unsigned long vernum)
      86             : {
      87             : }
      88             : #ifdef CONFIG_RCU_TRACE
      89             : void do_trace_rcu_torture_read(const char *rcutorturename,
      90             :                                struct rcu_head *rhp,
      91             :                                unsigned long secs,
      92             :                                unsigned long c_old,
      93             :                                unsigned long c);
      94             : #else
      95             : #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
      96             :         do { } while (0)
      97             : #endif
      98             : #endif
      99             : 
     100             : #define UINT_CMP_GE(a, b)       (UINT_MAX / 2 >= (a) - (b))
     101             : #define UINT_CMP_LT(a, b)       (UINT_MAX / 2 < (a) - (b))
     102             : #define ULONG_CMP_GE(a, b)      (ULONG_MAX / 2 >= (a) - (b))
     103             : #define ULONG_CMP_LT(a, b)      (ULONG_MAX / 2 < (a) - (b))
     104             : #define ulong2long(a)           (*(long *)(&(a)))
     105             : 
     106             : /* Exported common interfaces */
     107             : 
     108             : #ifdef CONFIG_PREEMPT_RCU
     109             : 
     110             : /**
     111             :  * call_rcu() - Queue an RCU callback for invocation after a grace period.
     112             :  * @head: structure to be used for queueing the RCU updates.
     113             :  * @func: actual callback function to be invoked after the grace period
     114             :  *
     115             :  * The callback function will be invoked some time after a full grace
     116             :  * period elapses, in other words after all pre-existing RCU read-side
     117             :  * critical sections have completed.  However, the callback function
     118             :  * might well execute concurrently with RCU read-side critical sections
     119             :  * that started after call_rcu() was invoked.  RCU read-side critical
     120             :  * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
     121             :  * and may be nested.
     122             :  *
     123             :  * Note that all CPUs must agree that the grace period extended beyond
     124             :  * all pre-existing RCU read-side critical section.  On systems with more
     125             :  * than one CPU, this means that when "func()" is invoked, each CPU is
     126             :  * guaranteed to have executed a full memory barrier since the end of its
     127             :  * last RCU read-side critical section whose beginning preceded the call
     128             :  * to call_rcu().  It also means that each CPU executing an RCU read-side
     129             :  * critical section that continues beyond the start of "func()" must have
     130             :  * executed a memory barrier after the call_rcu() but before the beginning
     131             :  * of that RCU read-side critical section.  Note that these guarantees
     132             :  * include CPUs that are offline, idle, or executing in user mode, as
     133             :  * well as CPUs that are executing in the kernel.
     134             :  *
     135             :  * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
     136             :  * resulting RCU callback function "func()", then both CPU A and CPU B are
     137             :  * guaranteed to execute a full memory barrier during the time interval
     138             :  * between the call to call_rcu() and the invocation of "func()" -- even
     139             :  * if CPU A and CPU B are the same CPU (but again only if the system has
     140             :  * more than one CPU).
     141             :  */
     142             : void call_rcu(struct rcu_head *head,
     143             :               void (*func)(struct rcu_head *head));
     144             : 
     145             : #else /* #ifdef CONFIG_PREEMPT_RCU */
     146             : 
     147             : /* In classic RCU, call_rcu() is just call_rcu_sched(). */
     148             : #define call_rcu        call_rcu_sched
     149             : 
     150             : #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
     151             : 
     152             : /**
     153             :  * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
     154             :  * @head: structure to be used for queueing the RCU updates.
     155             :  * @func: actual callback function to be invoked after the grace period
     156             :  *
     157             :  * The callback function will be invoked some time after a full grace
     158             :  * period elapses, in other words after all currently executing RCU
     159             :  * read-side critical sections have completed. call_rcu_bh() assumes
     160             :  * that the read-side critical sections end on completion of a softirq
     161             :  * handler. This means that read-side critical sections in process
     162             :  * context must not be interrupted by softirqs. This interface is to be
     163             :  * used when most of the read-side critical sections are in softirq context.
     164             :  * RCU read-side critical sections are delimited by :
     165             :  *  - rcu_read_lock() and  rcu_read_unlock(), if in interrupt context.
     166             :  *  OR
     167             :  *  - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
     168             :  *  These may be nested.
     169             :  *
     170             :  * See the description of call_rcu() for more detailed information on
     171             :  * memory ordering guarantees.
     172             :  */
     173             : void call_rcu_bh(struct rcu_head *head,
     174             :                  void (*func)(struct rcu_head *head));
     175             : 
     176             : /**
     177             :  * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
     178             :  * @head: structure to be used for queueing the RCU updates.
     179             :  * @func: actual callback function to be invoked after the grace period
     180             :  *
     181             :  * The callback function will be invoked some time after a full grace
     182             :  * period elapses, in other words after all currently executing RCU
     183             :  * read-side critical sections have completed. call_rcu_sched() assumes
     184             :  * that the read-side critical sections end on enabling of preemption
     185             :  * or on voluntary preemption.
     186             :  * RCU read-side critical sections are delimited by :
     187             :  *  - rcu_read_lock_sched() and  rcu_read_unlock_sched(),
     188             :  *  OR
     189             :  *  anything that disables preemption.
     190             :  *  These may be nested.
     191             :  *
     192             :  * See the description of call_rcu() for more detailed information on
     193             :  * memory ordering guarantees.
     194             :  */
     195             : void call_rcu_sched(struct rcu_head *head,
     196             :                     void (*func)(struct rcu_head *rcu));
     197             : 
     198             : void synchronize_sched(void);
     199             : 
     200             : #ifdef CONFIG_PREEMPT_RCU
     201             : 
     202             : void __rcu_read_lock(void);
     203             : void __rcu_read_unlock(void);
     204             : void rcu_read_unlock_special(struct task_struct *t);
     205             : void synchronize_rcu(void);
     206             : 
     207             : /*
     208             :  * Defined as a macro as it is a very low level header included from
     209             :  * areas that don't even know about current.  This gives the rcu_read_lock()
     210             :  * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
     211             :  * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
     212             :  */
     213             : #define rcu_preempt_depth() (current->rcu_read_lock_nesting)
     214             : 
     215             : #else /* #ifdef CONFIG_PREEMPT_RCU */
     216             : 
     217             : static inline void __rcu_read_lock(void)
     218             : {
     219     5611392 :         preempt_disable();
     220             : }
     221             : 
     222             : static inline void __rcu_read_unlock(void)
     223             : {
     224     5614468 :         preempt_enable();
     225             : }
     226             : 
     227           0 : static inline void synchronize_rcu(void)
     228             : {
     229         221 :         synchronize_sched();
     230           0 : }
     231             : 
     232             : static inline int rcu_preempt_depth(void)
     233             : {
     234             :         return 0;
     235             : }
     236             : 
     237             : #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
     238             : 
     239             : /* Internal to kernel */
     240             : void rcu_init(void);
     241             : void rcu_sched_qs(int cpu);
     242             : void rcu_bh_qs(int cpu);
     243             : void rcu_check_callbacks(int cpu, int user);
     244             : struct notifier_block;
     245             : void rcu_idle_enter(void);
     246             : void rcu_idle_exit(void);
     247             : void rcu_irq_enter(void);
     248             : void rcu_irq_exit(void);
     249             : 
     250             : #ifdef CONFIG_RCU_STALL_COMMON
     251             : void rcu_sysrq_start(void);
     252             : void rcu_sysrq_end(void);
     253             : #else /* #ifdef CONFIG_RCU_STALL_COMMON */
     254             : static inline void rcu_sysrq_start(void)
     255             : {
     256             : }
     257             : static inline void rcu_sysrq_end(void)
     258             : {
     259             : }
     260             : #endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
     261             : 
     262             : #ifdef CONFIG_RCU_USER_QS
     263             : void rcu_user_enter(void);
     264             : void rcu_user_exit(void);
     265             : #else
     266             : static inline void rcu_user_enter(void) { }
     267             : static inline void rcu_user_exit(void) { }
     268             : static inline void rcu_user_hooks_switch(struct task_struct *prev,
     269             :                                          struct task_struct *next) { }
     270             : #endif /* CONFIG_RCU_USER_QS */
     271             : 
     272             : /**
     273             :  * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
     274             :  * @a: Code that RCU needs to pay attention to.
     275             :  *
     276             :  * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden
     277             :  * in the inner idle loop, that is, between the rcu_idle_enter() and
     278             :  * the rcu_idle_exit() -- RCU will happily ignore any such read-side
     279             :  * critical sections.  However, things like powertop need tracepoints
     280             :  * in the inner idle loop.
     281             :  *
     282             :  * This macro provides the way out:  RCU_NONIDLE(do_something_with_RCU())
     283             :  * will tell RCU that it needs to pay attending, invoke its argument
     284             :  * (in this example, a call to the do_something_with_RCU() function),
     285             :  * and then tell RCU to go back to ignoring this CPU.  It is permissible
     286             :  * to nest RCU_NONIDLE() wrappers, but the nesting level is currently
     287             :  * quite limited.  If deeper nesting is required, it will be necessary
     288             :  * to adjust DYNTICK_TASK_NESTING_VALUE accordingly.
     289             :  */
     290             : #define RCU_NONIDLE(a) \
     291             :         do { \
     292             :                 rcu_irq_enter(); \
     293             :                 do { a; } while (0); \
     294             :                 rcu_irq_exit(); \
     295             :         } while (0)
     296             : 
     297             : #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
     298             : bool __rcu_is_watching(void);
     299             : #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
     300             : 
     301             : /*
     302             :  * Infrastructure to implement the synchronize_() primitives in
     303             :  * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
     304             :  */
     305             : 
     306             : typedef void call_rcu_func_t(struct rcu_head *head,
     307             :                              void (*func)(struct rcu_head *head));
     308             : void wait_rcu_gp(call_rcu_func_t crf);
     309             : 
     310             : #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
     311             : #include <linux/rcutree.h>
     312             : #elif defined(CONFIG_TINY_RCU)
     313             : #include <linux/rcutiny.h>
     314             : #else
     315             : #error "Unknown RCU implementation specified to kernel configuration"
     316             : #endif
     317             : 
     318             : /*
     319             :  * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
     320             :  * initialization and destruction of rcu_head on the stack. rcu_head structures
     321             :  * allocated dynamically in the heap or defined statically don't need any
     322             :  * initialization.
     323             :  */
     324             : #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
     325             : void init_rcu_head(struct rcu_head *head);
     326             : void destroy_rcu_head(struct rcu_head *head);
     327             : void init_rcu_head_on_stack(struct rcu_head *head);
     328             : void destroy_rcu_head_on_stack(struct rcu_head *head);
     329             : #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
     330             : static inline void init_rcu_head(struct rcu_head *head)
     331             : {
     332             : }
     333             : 
     334             : static inline void destroy_rcu_head(struct rcu_head *head)
     335             : {
     336             : }
     337             : 
     338             : static inline void init_rcu_head_on_stack(struct rcu_head *head)
     339             : {
     340             : }
     341             : 
     342             : static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
     343             : {
     344             : }
     345             : #endif  /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
     346             : 
     347             : #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
     348             : bool rcu_lockdep_current_cpu_online(void);
     349             : #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
     350             : static inline bool rcu_lockdep_current_cpu_online(void)
     351             : {
     352             :         return 1;
     353             : }
     354             : #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
     355             : 
     356             : #ifdef CONFIG_DEBUG_LOCK_ALLOC
     357             : 
     358             : static inline void rcu_lock_acquire(struct lockdep_map *map)
     359             : {
     360             :         lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
     361             : }
     362             : 
     363             : static inline void rcu_lock_release(struct lockdep_map *map)
     364             : {
     365             :         lock_release(map, 1, _THIS_IP_);
     366             : }
     367             : 
     368             : extern struct lockdep_map rcu_lock_map;
     369             : extern struct lockdep_map rcu_bh_lock_map;
     370             : extern struct lockdep_map rcu_sched_lock_map;
     371             : extern struct lockdep_map rcu_callback_map;
     372             : int debug_lockdep_rcu_enabled(void);
     373             : 
     374             : /**
     375             :  * rcu_read_lock_held() - might we be in RCU read-side critical section?
     376             :  *
     377             :  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
     378             :  * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
     379             :  * this assumes we are in an RCU read-side critical section unless it can
     380             :  * prove otherwise.  This is useful for debug checks in functions that
     381             :  * require that they be called within an RCU read-side critical section.
     382             :  *
     383             :  * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
     384             :  * and while lockdep is disabled.
     385             :  *
     386             :  * Note that rcu_read_lock() and the matching rcu_read_unlock() must
     387             :  * occur in the same context, for example, it is illegal to invoke
     388             :  * rcu_read_unlock() in process context if the matching rcu_read_lock()
     389             :  * was invoked from within an irq handler.
     390             :  *
     391             :  * Note that rcu_read_lock() is disallowed if the CPU is either idle or
     392             :  * offline from an RCU perspective, so check for those as well.
     393             :  */
     394             : static inline int rcu_read_lock_held(void)
     395             : {
     396             :         if (!debug_lockdep_rcu_enabled())
     397             :                 return 1;
     398             :         if (!rcu_is_watching())
     399             :                 return 0;
     400             :         if (!rcu_lockdep_current_cpu_online())
     401             :                 return 0;
     402             :         return lock_is_held(&rcu_lock_map);
     403             : }
     404             : 
     405             : /*
     406             :  * rcu_read_lock_bh_held() is defined out of line to avoid #include-file
     407             :  * hell.
     408             :  */
     409             : int rcu_read_lock_bh_held(void);
     410             : 
     411             : /**
     412             :  * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
     413             :  *
     414             :  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
     415             :  * RCU-sched read-side critical section.  In absence of
     416             :  * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
     417             :  * critical section unless it can prove otherwise.  Note that disabling
     418             :  * of preemption (including disabling irqs) counts as an RCU-sched
     419             :  * read-side critical section.  This is useful for debug checks in functions
     420             :  * that required that they be called within an RCU-sched read-side
     421             :  * critical section.
     422             :  *
     423             :  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
     424             :  * and while lockdep is disabled.
     425             :  *
     426             :  * Note that if the CPU is in the idle loop from an RCU point of
     427             :  * view (ie: that we are in the section between rcu_idle_enter() and
     428             :  * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
     429             :  * did an rcu_read_lock().  The reason for this is that RCU ignores CPUs
     430             :  * that are in such a section, considering these as in extended quiescent
     431             :  * state, so such a CPU is effectively never in an RCU read-side critical
     432             :  * section regardless of what RCU primitives it invokes.  This state of
     433             :  * affairs is required --- we need to keep an RCU-free window in idle
     434             :  * where the CPU may possibly enter into low power mode. This way we can
     435             :  * notice an extended quiescent state to other CPUs that started a grace
     436             :  * period. Otherwise we would delay any grace period as long as we run in
     437             :  * the idle task.
     438             :  *
     439             :  * Similarly, we avoid claiming an SRCU read lock held if the current
     440             :  * CPU is offline.
     441             :  */
     442             : #ifdef CONFIG_PREEMPT_COUNT
     443             : static inline int rcu_read_lock_sched_held(void)
     444             : {
     445             :         int lockdep_opinion = 0;
     446             : 
     447             :         if (!debug_lockdep_rcu_enabled())
     448             :                 return 1;
     449             :         if (!rcu_is_watching())
     450             :                 return 0;
     451             :         if (!rcu_lockdep_current_cpu_online())
     452             :                 return 0;
     453             :         if (debug_locks)
     454             :                 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
     455             :         return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
     456             : }
     457             : #else /* #ifdef CONFIG_PREEMPT_COUNT */
     458             : static inline int rcu_read_lock_sched_held(void)
     459             : {
     460             :         return 1;
     461             : }
     462             : #endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
     463             : 
     464             : #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
     465             : 
     466             : # define rcu_lock_acquire(a)            do { } while (0)
     467             : # define rcu_lock_release(a)            do { } while (0)
     468             : 
     469             : static inline int rcu_read_lock_held(void)
     470             : {
     471             :         return 1;
     472             : }
     473             : 
     474             : static inline int rcu_read_lock_bh_held(void)
     475             : {
     476             :         return 1;
     477             : }
     478             : 
     479             : #ifdef CONFIG_PREEMPT_COUNT
     480             : static inline int rcu_read_lock_sched_held(void)
     481             : {
     482             :         return preempt_count() != 0 || irqs_disabled();
     483             : }
     484             : #else /* #ifdef CONFIG_PREEMPT_COUNT */
     485             : static inline int rcu_read_lock_sched_held(void)
     486             : {
     487             :         return 1;
     488             : }
     489             : #endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
     490             : 
     491             : #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
     492             : 
     493             : #ifdef CONFIG_PROVE_RCU
     494             : 
     495             : /**
     496             :  * rcu_lockdep_assert - emit lockdep splat if specified condition not met
     497             :  * @c: condition to check
     498             :  * @s: informative message
     499             :  */
     500             : #define rcu_lockdep_assert(c, s)                                        \
     501             :         do {                                                            \
     502             :                 static bool __section(.data.unlikely) __warned;         \
     503             :                 if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
     504             :                         __warned = true;                                \
     505             :                         lockdep_rcu_suspicious(__FILE__, __LINE__, s);  \
     506             :                 }                                                       \
     507             :         } while (0)
     508             : 
     509             : #if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
     510             : static inline void rcu_preempt_sleep_check(void)
     511             : {
     512             :         rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
     513             :                            "Illegal context switch in RCU read-side critical section");
     514             : }
     515             : #else /* #ifdef CONFIG_PROVE_RCU */
     516             : static inline void rcu_preempt_sleep_check(void)
     517             : {
     518             : }
     519             : #endif /* #else #ifdef CONFIG_PROVE_RCU */
     520             : 
     521             : #define rcu_sleep_check()                                               \
     522             :         do {                                                            \
     523             :                 rcu_preempt_sleep_check();                              \
     524             :                 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \
     525             :                                    "Illegal context switch in RCU-bh read-side critical section"); \
     526             :                 rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),      \
     527             :                                    "Illegal context switch in RCU-sched read-side critical section"); \
     528             :         } while (0)
     529             : 
     530             : #else /* #ifdef CONFIG_PROVE_RCU */
     531             : 
     532             : #define rcu_lockdep_assert(c, s) do { } while (0)
     533             : #define rcu_sleep_check() do { } while (0)
     534             : 
     535             : #endif /* #else #ifdef CONFIG_PROVE_RCU */
     536             : 
     537             : /*
     538             :  * Helper functions for rcu_dereference_check(), rcu_dereference_protected()
     539             :  * and rcu_assign_pointer().  Some of these could be folded into their
     540             :  * callers, but they are left separate in order to ease introduction of
     541             :  * multiple flavors of pointers to match the multiple flavors of RCU
     542             :  * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in
     543             :  * the future.
     544             :  */
     545             : 
     546             : #ifdef __CHECKER__
     547             : #define rcu_dereference_sparse(p, space) \
     548             :         ((void)(((typeof(*p) space *)p) == p))
     549             : #else /* #ifdef __CHECKER__ */
     550             : #define rcu_dereference_sparse(p, space)
     551             : #endif /* #else #ifdef __CHECKER__ */
     552             : 
     553             : #define __rcu_access_pointer(p, space) \
     554             : ({ \
     555             :         typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
     556             :         rcu_dereference_sparse(p, space); \
     557             :         ((typeof(*p) __force __kernel *)(_________p1)); \
     558             : })
     559             : #define __rcu_dereference_check(p, c, space) \
     560             : ({ \
     561             :         typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
     562             :         rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
     563             :         rcu_dereference_sparse(p, space); \
     564             :         smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
     565             :         ((typeof(*p) __force __kernel *)(_________p1)); \
     566             : })
     567             : #define __rcu_dereference_protected(p, c, space) \
     568             : ({ \
     569             :         rcu_lockdep_assert(c, "suspicious rcu_dereference_protected() usage"); \
     570             :         rcu_dereference_sparse(p, space); \
     571             :         ((typeof(*p) __force __kernel *)(p)); \
     572             : })
     573             : 
     574             : #define __rcu_access_index(p, space) \
     575             : ({ \
     576             :         typeof(p) _________p1 = ACCESS_ONCE(p); \
     577             :         rcu_dereference_sparse(p, space); \
     578             :         (_________p1); \
     579             : })
     580             : #define __rcu_dereference_index_check(p, c) \
     581             : ({ \
     582             :         typeof(p) _________p1 = ACCESS_ONCE(p); \
     583             :         rcu_lockdep_assert(c, \
     584             :                            "suspicious rcu_dereference_index_check() usage"); \
     585             :         smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
     586             :         (_________p1); \
     587             : })
     588             : 
     589             : /**
     590             :  * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
     591             :  * @v: The value to statically initialize with.
     592             :  */
     593             : #define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
     594             : 
     595             : /**
     596             :  * rcu_assign_pointer() - assign to RCU-protected pointer
     597             :  * @p: pointer to assign to
     598             :  * @v: value to assign (publish)
     599             :  *
     600             :  * Assigns the specified value to the specified RCU-protected
     601             :  * pointer, ensuring that any concurrent RCU readers will see
     602             :  * any prior initialization.
     603             :  *
     604             :  * Inserts memory barriers on architectures that require them
     605             :  * (which is most of them), and also prevents the compiler from
     606             :  * reordering the code that initializes the structure after the pointer
     607             :  * assignment.  More importantly, this call documents which pointers
     608             :  * will be dereferenced by RCU read-side code.
     609             :  *
     610             :  * In some special cases, you may use RCU_INIT_POINTER() instead
     611             :  * of rcu_assign_pointer().  RCU_INIT_POINTER() is a bit faster due
     612             :  * to the fact that it does not constrain either the CPU or the compiler.
     613             :  * That said, using RCU_INIT_POINTER() when you should have used
     614             :  * rcu_assign_pointer() is a very bad thing that results in
     615             :  * impossible-to-diagnose memory corruption.  So please be careful.
     616             :  * See the RCU_INIT_POINTER() comment header for details.
     617             :  *
     618             :  * Note that rcu_assign_pointer() evaluates each of its arguments only
     619             :  * once, appearances notwithstanding.  One of the "extra" evaluations
     620             :  * is in typeof() and the other visible only to sparse (__CHECKER__),
     621             :  * neither of which actually execute the argument.  As with most cpp
     622             :  * macros, this execute-arguments-only-once property is important, so
     623             :  * please be careful when making changes to rcu_assign_pointer() and the
     624             :  * other macros that it invokes.
     625             :  */
     626             : #define rcu_assign_pointer(p, v) smp_store_release(&p, RCU_INITIALIZER(v))
     627             : 
     628             : /**
     629             :  * rcu_access_pointer() - fetch RCU pointer with no dereferencing
     630             :  * @p: The pointer to read
     631             :  *
     632             :  * Return the value of the specified RCU-protected pointer, but omit the
     633             :  * smp_read_barrier_depends() and keep the ACCESS_ONCE().  This is useful
     634             :  * when the value of this pointer is accessed, but the pointer is not
     635             :  * dereferenced, for example, when testing an RCU-protected pointer against
     636             :  * NULL.  Although rcu_access_pointer() may also be used in cases where
     637             :  * update-side locks prevent the value of the pointer from changing, you
     638             :  * should instead use rcu_dereference_protected() for this use case.
     639             :  *
     640             :  * It is also permissible to use rcu_access_pointer() when read-side
     641             :  * access to the pointer was removed at least one grace period ago, as
     642             :  * is the case in the context of the RCU callback that is freeing up
     643             :  * the data, or after a synchronize_rcu() returns.  This can be useful
     644             :  * when tearing down multi-linked structures after a grace period
     645             :  * has elapsed.
     646             :  */
     647             : #define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
     648             : 
     649             : /**
     650             :  * rcu_dereference_check() - rcu_dereference with debug checking
     651             :  * @p: The pointer to read, prior to dereferencing
     652             :  * @c: The conditions under which the dereference will take place
     653             :  *
     654             :  * Do an rcu_dereference(), but check that the conditions under which the
     655             :  * dereference will take place are correct.  Typically the conditions
     656             :  * indicate the various locking conditions that should be held at that
     657             :  * point.  The check should return true if the conditions are satisfied.
     658             :  * An implicit check for being in an RCU read-side critical section
     659             :  * (rcu_read_lock()) is included.
     660             :  *
     661             :  * For example:
     662             :  *
     663             :  *      bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock));
     664             :  *
     665             :  * could be used to indicate to lockdep that foo->bar may only be dereferenced
     666             :  * if either rcu_read_lock() is held, or that the lock required to replace
     667             :  * the bar struct at foo->bar is held.
     668             :  *
     669             :  * Note that the list of conditions may also include indications of when a lock
     670             :  * need not be held, for example during initialisation or destruction of the
     671             :  * target struct:
     672             :  *
     673             :  *      bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) ||
     674             :  *                                            atomic_read(&foo->usage) == 0);
     675             :  *
     676             :  * Inserts memory barriers on architectures that require them
     677             :  * (currently only the Alpha), prevents the compiler from refetching
     678             :  * (and from merging fetches), and, more importantly, documents exactly
     679             :  * which pointers are protected by RCU and checks that the pointer is
     680             :  * annotated as __rcu.
     681             :  */
     682             : #define rcu_dereference_check(p, c) \
     683             :         __rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu)
     684             : 
     685             : /**
     686             :  * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
     687             :  * @p: The pointer to read, prior to dereferencing
     688             :  * @c: The conditions under which the dereference will take place
     689             :  *
     690             :  * This is the RCU-bh counterpart to rcu_dereference_check().
     691             :  */
     692             : #define rcu_dereference_bh_check(p, c) \
     693             :         __rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu)
     694             : 
     695             : /**
     696             :  * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
     697             :  * @p: The pointer to read, prior to dereferencing
     698             :  * @c: The conditions under which the dereference will take place
     699             :  *
     700             :  * This is the RCU-sched counterpart to rcu_dereference_check().
     701             :  */
     702             : #define rcu_dereference_sched_check(p, c) \
     703             :         __rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \
     704             :                                 __rcu)
     705             : 
     706             : #define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
     707             : 
     708             : /*
     709             :  * The tracing infrastructure traces RCU (we want that), but unfortunately
     710             :  * some of the RCU checks causes tracing to lock up the system.
     711             :  *
     712             :  * The tracing version of rcu_dereference_raw() must not call
     713             :  * rcu_read_lock_held().
     714             :  */
     715             : #define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
     716             : 
     717             : /**
     718             :  * rcu_access_index() - fetch RCU index with no dereferencing
     719             :  * @p: The index to read
     720             :  *
     721             :  * Return the value of the specified RCU-protected index, but omit the
     722             :  * smp_read_barrier_depends() and keep the ACCESS_ONCE().  This is useful
     723             :  * when the value of this index is accessed, but the index is not
     724             :  * dereferenced, for example, when testing an RCU-protected index against
     725             :  * -1.  Although rcu_access_index() may also be used in cases where
     726             :  * update-side locks prevent the value of the index from changing, you
     727             :  * should instead use rcu_dereference_index_protected() for this use case.
     728             :  */
     729             : #define rcu_access_index(p) __rcu_access_index((p), __rcu)
     730             : 
     731             : /**
     732             :  * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
     733             :  * @p: The pointer to read, prior to dereferencing
     734             :  * @c: The conditions under which the dereference will take place
     735             :  *
     736             :  * Similar to rcu_dereference_check(), but omits the sparse checking.
     737             :  * This allows rcu_dereference_index_check() to be used on integers,
     738             :  * which can then be used as array indices.  Attempting to use
     739             :  * rcu_dereference_check() on an integer will give compiler warnings
     740             :  * because the sparse address-space mechanism relies on dereferencing
     741             :  * the RCU-protected pointer.  Dereferencing integers is not something
     742             :  * that even gcc will put up with.
     743             :  *
     744             :  * Note that this function does not implicitly check for RCU read-side
     745             :  * critical sections.  If this function gains lots of uses, it might
     746             :  * make sense to provide versions for each flavor of RCU, but it does
     747             :  * not make sense as of early 2010.
     748             :  */
     749             : #define rcu_dereference_index_check(p, c) \
     750             :         __rcu_dereference_index_check((p), (c))
     751             : 
     752             : /**
     753             :  * rcu_dereference_protected() - fetch RCU pointer when updates prevented
     754             :  * @p: The pointer to read, prior to dereferencing
     755             :  * @c: The conditions under which the dereference will take place
     756             :  *
     757             :  * Return the value of the specified RCU-protected pointer, but omit
     758             :  * both the smp_read_barrier_depends() and the ACCESS_ONCE().  This
     759             :  * is useful in cases where update-side locks prevent the value of the
     760             :  * pointer from changing.  Please note that this primitive does -not-
     761             :  * prevent the compiler from repeating this reference or combining it
     762             :  * with other references, so it should not be used without protection
     763             :  * of appropriate locks.
     764             :  *
     765             :  * This function is only for update-side use.  Using this function
     766             :  * when protected only by rcu_read_lock() will result in infrequent
     767             :  * but very ugly failures.
     768             :  */
     769             : #define rcu_dereference_protected(p, c) \
     770             :         __rcu_dereference_protected((p), (c), __rcu)
     771             : 
     772             : 
     773             : /**
     774             :  * rcu_dereference() - fetch RCU-protected pointer for dereferencing
     775             :  * @p: The pointer to read, prior to dereferencing
     776             :  *
     777             :  * This is a simple wrapper around rcu_dereference_check().
     778             :  */
     779             : #define rcu_dereference(p) rcu_dereference_check(p, 0)
     780             : 
     781             : /**
     782             :  * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing
     783             :  * @p: The pointer to read, prior to dereferencing
     784             :  *
     785             :  * Makes rcu_dereference_check() do the dirty work.
     786             :  */
     787             : #define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0)
     788             : 
     789             : /**
     790             :  * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing
     791             :  * @p: The pointer to read, prior to dereferencing
     792             :  *
     793             :  * Makes rcu_dereference_check() do the dirty work.
     794             :  */
     795             : #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
     796             : 
     797             : /**
     798             :  * rcu_read_lock() - mark the beginning of an RCU read-side critical section
     799             :  *
     800             :  * When synchronize_rcu() is invoked on one CPU while other CPUs
     801             :  * are within RCU read-side critical sections, then the
     802             :  * synchronize_rcu() is guaranteed to block until after all the other
     803             :  * CPUs exit their critical sections.  Similarly, if call_rcu() is invoked
     804             :  * on one CPU while other CPUs are within RCU read-side critical
     805             :  * sections, invocation of the corresponding RCU callback is deferred
     806             :  * until after the all the other CPUs exit their critical sections.
     807             :  *
     808             :  * Note, however, that RCU callbacks are permitted to run concurrently
     809             :  * with new RCU read-side critical sections.  One way that this can happen
     810             :  * is via the following sequence of events: (1) CPU 0 enters an RCU
     811             :  * read-side critical section, (2) CPU 1 invokes call_rcu() to register
     812             :  * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
     813             :  * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
     814             :  * callback is invoked.  This is legal, because the RCU read-side critical
     815             :  * section that was running concurrently with the call_rcu() (and which
     816             :  * therefore might be referencing something that the corresponding RCU
     817             :  * callback would free up) has completed before the corresponding
     818             :  * RCU callback is invoked.
     819             :  *
     820             :  * RCU read-side critical sections may be nested.  Any deferred actions
     821             :  * will be deferred until the outermost RCU read-side critical section
     822             :  * completes.
     823             :  *
     824             :  * You can avoid reading and understanding the next paragraph by
     825             :  * following this rule: don't put anything in an rcu_read_lock() RCU
     826             :  * read-side critical section that would block in a !PREEMPT kernel.
     827             :  * But if you want the full story, read on!
     828             :  *
     829             :  * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU),
     830             :  * it is illegal to block while in an RCU read-side critical section.
     831             :  * In preemptible RCU implementations (TREE_PREEMPT_RCU) in CONFIG_PREEMPT
     832             :  * kernel builds, RCU read-side critical sections may be preempted,
     833             :  * but explicit blocking is illegal.  Finally, in preemptible RCU
     834             :  * implementations in real-time (with -rt patchset) kernel builds, RCU
     835             :  * read-side critical sections may be preempted and they may also block, but
     836             :  * only when acquiring spinlocks that are subject to priority inheritance.
     837             :  */
     838             : static inline void rcu_read_lock(void)
     839             : {
     840             :         __rcu_read_lock();
     841             :         __acquire(RCU);
     842             :         rcu_lock_acquire(&rcu_lock_map);
     843             :         rcu_lockdep_assert(rcu_is_watching(),
     844             :                            "rcu_read_lock() used illegally while idle");
     845             : }
     846             : 
     847             : /*
     848             :  * So where is rcu_write_lock()?  It does not exist, as there is no
     849             :  * way for writers to lock out RCU readers.  This is a feature, not
     850             :  * a bug -- this property is what provides RCU's performance benefits.
     851             :  * Of course, writers must coordinate with each other.  The normal
     852             :  * spinlock primitives work well for this, but any other technique may be
     853             :  * used as well.  RCU does not care how the writers keep out of each
     854             :  * others' way, as long as they do so.
     855             :  */
     856             : 
     857             : /**
     858             :  * rcu_read_unlock() - marks the end of an RCU read-side critical section.
     859             :  *
     860             :  * In most situations, rcu_read_unlock() is immune from deadlock.
     861             :  * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock()
     862             :  * is responsible for deboosting, which it does via rt_mutex_unlock().
     863             :  * Unfortunately, this function acquires the scheduler's runqueue and
     864             :  * priority-inheritance spinlocks.  This means that deadlock could result
     865             :  * if the caller of rcu_read_unlock() already holds one of these locks or
     866             :  * any lock that is ever acquired while holding them.
     867             :  *
     868             :  * That said, RCU readers are never priority boosted unless they were
     869             :  * preempted.  Therefore, one way to avoid deadlock is to make sure
     870             :  * that preemption never happens within any RCU read-side critical
     871             :  * section whose outermost rcu_read_unlock() is called with one of
     872             :  * rt_mutex_unlock()'s locks held.  Such preemption can be avoided in
     873             :  * a number of ways, for example, by invoking preempt_disable() before
     874             :  * critical section's outermost rcu_read_lock().
     875             :  *
     876             :  * Given that the set of locks acquired by rt_mutex_unlock() might change
     877             :  * at any time, a somewhat more future-proofed approach is to make sure
     878             :  * that that preemption never happens within any RCU read-side critical
     879             :  * section whose outermost rcu_read_unlock() is called with irqs disabled.
     880             :  * This approach relies on the fact that rt_mutex_unlock() currently only
     881             :  * acquires irq-disabled locks.
     882             :  *
     883             :  * The second of these two approaches is best in most situations,
     884             :  * however, the first approach can also be useful, at least to those
     885             :  * developers willing to keep abreast of the set of locks acquired by
     886             :  * rt_mutex_unlock().
     887             :  *
     888             :  * See rcu_read_lock() for more information.
     889             :  */
     890             : static inline void rcu_read_unlock(void)
     891             : {
     892             :         rcu_lockdep_assert(rcu_is_watching(),
     893             :                            "rcu_read_unlock() used illegally while idle");
     894             :         rcu_lock_release(&rcu_lock_map);
     895             :         __release(RCU);
     896             :         __rcu_read_unlock();
     897             : }
     898             : 
     899             : /**
     900             :  * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
     901             :  *
     902             :  * This is equivalent of rcu_read_lock(), but to be used when updates
     903             :  * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since
     904             :  * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a
     905             :  * softirq handler to be a quiescent state, a process in RCU read-side
     906             :  * critical section must be protected by disabling softirqs. Read-side
     907             :  * critical sections in interrupt context can use just rcu_read_lock(),
     908             :  * though this should at least be commented to avoid confusing people
     909             :  * reading the code.
     910             :  *
     911             :  * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
     912             :  * must occur in the same context, for example, it is illegal to invoke
     913             :  * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh()
     914             :  * was invoked from some other task.
     915             :  */
     916             : static inline void rcu_read_lock_bh(void)
     917             : {
     918             :         local_bh_disable();
     919             :         __acquire(RCU_BH);
     920             :         rcu_lock_acquire(&rcu_bh_lock_map);
     921             :         rcu_lockdep_assert(rcu_is_watching(),
     922             :                            "rcu_read_lock_bh() used illegally while idle");
     923             : }
     924             : 
     925             : /*
     926             :  * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
     927             :  *
     928             :  * See rcu_read_lock_bh() for more information.
     929             :  */
     930             : static inline void rcu_read_unlock_bh(void)
     931             : {
     932             :         rcu_lockdep_assert(rcu_is_watching(),
     933             :                            "rcu_read_unlock_bh() used illegally while idle");
     934             :         rcu_lock_release(&rcu_bh_lock_map);
     935             :         __release(RCU_BH);
     936             :         local_bh_enable();
     937             : }
     938             : 
     939             : /**
     940             :  * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
     941             :  *
     942             :  * This is equivalent of rcu_read_lock(), but to be used when updates
     943             :  * are being done using call_rcu_sched() or synchronize_rcu_sched().
     944             :  * Read-side critical sections can also be introduced by anything that
     945             :  * disables preemption, including local_irq_disable() and friends.
     946             :  *
     947             :  * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
     948             :  * must occur in the same context, for example, it is illegal to invoke
     949             :  * rcu_read_unlock_sched() from process context if the matching
     950             :  * rcu_read_lock_sched() was invoked from an NMI handler.
     951             :  */
     952             : static inline void rcu_read_lock_sched(void)
     953             : {
     954             :         preempt_disable();
     955             :         __acquire(RCU_SCHED);
     956             :         rcu_lock_acquire(&rcu_sched_lock_map);
     957             :         rcu_lockdep_assert(rcu_is_watching(),
     958             :                            "rcu_read_lock_sched() used illegally while idle");
     959             : }
     960             : 
     961             : /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
     962             : static inline notrace void rcu_read_lock_sched_notrace(void)
     963             : {
     964           0 :         preempt_disable_notrace();
     965             :         __acquire(RCU_SCHED);
     966             : }
     967             : 
     968             : /*
     969             :  * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
     970             :  *
     971             :  * See rcu_read_lock_sched for more information.
     972             :  */
     973             : static inline void rcu_read_unlock_sched(void)
     974             : {
     975             :         rcu_lockdep_assert(rcu_is_watching(),
     976             :                            "rcu_read_unlock_sched() used illegally while idle");
     977             :         rcu_lock_release(&rcu_sched_lock_map);
     978             :         __release(RCU_SCHED);
     979             :         preempt_enable();
     980             : }
     981             : 
     982             : /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
     983             : static inline notrace void rcu_read_unlock_sched_notrace(void)
     984             : {
     985             :         __release(RCU_SCHED);
     986           0 :         preempt_enable_notrace();
     987             : }
     988             : 
     989             : /**
     990             :  * RCU_INIT_POINTER() - initialize an RCU protected pointer
     991             :  *
     992             :  * Initialize an RCU-protected pointer in special cases where readers
     993             :  * do not need ordering constraints on the CPU or the compiler.  These
     994             :  * special cases are:
     995             :  *
     996             :  * 1.   This use of RCU_INIT_POINTER() is NULLing out the pointer -or-
     997             :  * 2.   The caller has taken whatever steps are required to prevent
     998             :  *      RCU readers from concurrently accessing this pointer -or-
     999             :  * 3.   The referenced data structure has already been exposed to
    1000             :  *      readers either at compile time or via rcu_assign_pointer() -and-
    1001             :  *      a.      You have not made -any- reader-visible changes to
    1002             :  *              this structure since then -or-
    1003             :  *      b.      It is OK for readers accessing this structure from its
    1004             :  *              new location to see the old state of the structure.  (For
    1005             :  *              example, the changes were to statistical counters or to
    1006             :  *              other state where exact synchronization is not required.)
    1007             :  *
    1008             :  * Failure to follow these rules governing use of RCU_INIT_POINTER() will
    1009             :  * result in impossible-to-diagnose memory corruption.  As in the structures
    1010             :  * will look OK in crash dumps, but any concurrent RCU readers might
    1011             :  * see pre-initialized values of the referenced data structure.  So
    1012             :  * please be very careful how you use RCU_INIT_POINTER()!!!
    1013             :  *
    1014             :  * If you are creating an RCU-protected linked structure that is accessed
    1015             :  * by a single external-to-structure RCU-protected pointer, then you may
    1016             :  * use RCU_INIT_POINTER() to initialize the internal RCU-protected
    1017             :  * pointers, but you must use rcu_assign_pointer() to initialize the
    1018             :  * external-to-structure pointer -after- you have completely initialized
    1019             :  * the reader-accessible portions of the linked structure.
    1020             :  *
    1021             :  * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no
    1022             :  * ordering guarantees for either the CPU or the compiler.
    1023             :  */
    1024             : #define RCU_INIT_POINTER(p, v) \
    1025             :         do { \
    1026             :                 p = RCU_INITIALIZER(v); \
    1027             :         } while (0)
    1028             : 
    1029             : /**
    1030             :  * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
    1031             :  *
    1032             :  * GCC-style initialization for an RCU-protected pointer in a structure field.
    1033             :  */
    1034             : #define RCU_POINTER_INITIALIZER(p, v) \
    1035             :                 .p = RCU_INITIALIZER(v)
    1036             : 
    1037             : /*
    1038             :  * Does the specified offset indicate that the corresponding rcu_head
    1039             :  * structure can be handled by kfree_rcu()?
    1040             :  */
    1041             : #define __is_kfree_rcu_offset(offset) ((offset) < 4096)
    1042             : 
    1043             : /*
    1044             :  * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain.
    1045             :  */
    1046             : #define __kfree_rcu(head, offset) \
    1047             :         do { \
    1048             :                 BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
    1049             :                 kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \
    1050             :         } while (0)
    1051             : 
    1052             : /**
    1053             :  * kfree_rcu() - kfree an object after a grace period.
    1054             :  * @ptr:        pointer to kfree
    1055             :  * @rcu_head:   the name of the struct rcu_head within the type of @ptr.
    1056             :  *
    1057             :  * Many rcu callbacks functions just call kfree() on the base structure.
    1058             :  * These functions are trivial, but their size adds up, and furthermore
    1059             :  * when they are used in a kernel module, that module must invoke the
    1060             :  * high-latency rcu_barrier() function at module-unload time.
    1061             :  *
    1062             :  * The kfree_rcu() function handles this issue.  Rather than encoding a
    1063             :  * function address in the embedded rcu_head structure, kfree_rcu() instead
    1064             :  * encodes the offset of the rcu_head structure within the base structure.
    1065             :  * Because the functions are not allowed in the low-order 4096 bytes of
    1066             :  * kernel virtual memory, offsets up to 4095 bytes can be accommodated.
    1067             :  * If the offset is larger than 4095 bytes, a compile-time error will
    1068             :  * be generated in __kfree_rcu().  If this error is triggered, you can
    1069             :  * either fall back to use of call_rcu() or rearrange the structure to
    1070             :  * position the rcu_head structure into the first 4096 bytes.
    1071             :  *
    1072             :  * Note that the allowable offset might decrease in the future, for example,
    1073             :  * to allow something like kmem_cache_free_rcu().
    1074             :  *
    1075             :  * The BUILD_BUG_ON check must not involve any function calls, hence the
    1076             :  * checks are done in macros here.
    1077             :  */
    1078             : #define kfree_rcu(ptr, rcu_head)                                        \
    1079             :         __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
    1080             : 
    1081             : #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL)
    1082             : static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
    1083             : {
    1084             :         *delta_jiffies = ULONG_MAX;
    1085             :         return 0;
    1086             : }
    1087             : #endif /* #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) */
    1088             : 
    1089             : #if defined(CONFIG_RCU_NOCB_CPU_ALL)
    1090             : static inline bool rcu_is_nocb_cpu(int cpu) { return true; }
    1091             : #elif defined(CONFIG_RCU_NOCB_CPU)
    1092             : bool rcu_is_nocb_cpu(int cpu);
    1093             : #else
    1094             : static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
    1095             : #endif
    1096             : 
    1097             : 
    1098             : /* Only for use by adaptive-ticks code. */
    1099             : #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
    1100             : bool rcu_sys_is_idle(void);
    1101             : void rcu_sysidle_force_exit(void);
    1102             : #else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
    1103             : 
    1104             : static inline bool rcu_sys_is_idle(void)
    1105             : {
    1106             :         return false;
    1107             : }
    1108             : 
    1109             : static inline void rcu_sysidle_force_exit(void)
    1110             : {
    1111             : }
    1112             : 
    1113             : #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
    1114             : 
    1115             : 
    1116             : #endif /* __LINUX_RCUPDATE_H */

Generated by: LCOV version 1.10