Line data Source code
1 : /* Freezer declarations */
2 :
3 : #ifndef FREEZER_H_INCLUDED
4 : #define FREEZER_H_INCLUDED
5 :
6 : #include <linux/debug_locks.h>
7 : #include <linux/sched.h>
8 : #include <linux/wait.h>
9 : #include <linux/atomic.h>
10 :
11 : #ifdef CONFIG_FREEZER
12 : extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
13 : extern bool pm_freezing; /* PM freezing in effect */
14 : extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
15 :
16 : /*
17 : * Timeout for stopping processes
18 : */
19 : extern unsigned int freeze_timeout_msecs;
20 :
21 : /*
22 : * Check if a process has been frozen
23 : */
24 : static inline bool frozen(struct task_struct *p)
25 : {
26 : return p->flags & PF_FROZEN;
27 : }
28 :
29 : extern bool freezing_slow_path(struct task_struct *p);
30 :
31 : /*
32 : * Check if there is a request to freeze a process
33 : */
34 : static inline bool freezing(struct task_struct *p)
35 : {
36 1032 : if (likely(!atomic_read(&system_freezing_cnt)))
37 : return false;
38 0 : return freezing_slow_path(p);
39 : }
40 :
41 : /* Takes and releases task alloc lock using task_lock() */
42 : extern void __thaw_task(struct task_struct *t);
43 :
44 : extern bool __refrigerator(bool check_kthr_stop);
45 : extern int freeze_processes(void);
46 : extern int freeze_kernel_threads(void);
47 : extern void thaw_processes(void);
48 : extern void thaw_kernel_threads(void);
49 :
50 : /*
51 : * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
52 : * If try_to_freeze causes a lockdep warning it means the caller may deadlock
53 : */
54 1032 : static inline bool try_to_freeze_unsafe(void)
55 : {
56 1032 : might_sleep();
57 1032 : if (likely(!freezing(current)))
58 : return false;
59 0 : return __refrigerator(false);
60 : }
61 :
62 : static inline bool try_to_freeze(void)
63 : {
64 : if (!(current->flags & PF_NOFREEZE))
65 : debug_check_no_locks_held();
66 1032 : return try_to_freeze_unsafe();
67 : }
68 :
69 : extern bool freeze_task(struct task_struct *p);
70 : extern bool set_freezable(void);
71 :
72 : #ifdef CONFIG_CGROUP_FREEZER
73 : extern bool cgroup_freezing(struct task_struct *task);
74 : #else /* !CONFIG_CGROUP_FREEZER */
75 : static inline bool cgroup_freezing(struct task_struct *task)
76 : {
77 : return false;
78 : }
79 : #endif /* !CONFIG_CGROUP_FREEZER */
80 :
81 : /*
82 : * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
83 : * calls wait_for_completion(&vfork) and reset right after it returns from this
84 : * function. Next, the parent should call try_to_freeze() to freeze itself
85 : * appropriately in case the child has exited before the freezing of tasks is
86 : * complete. However, we don't want kernel threads to be frozen in unexpected
87 : * places, so we allow them to block freeze_processes() instead or to set
88 : * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
89 : * parent won't really block freeze_processes(), since ____call_usermodehelper()
90 : * (the child) does a little before exec/exit and it can't be frozen before
91 : * waking up the parent.
92 : */
93 :
94 :
95 : /**
96 : * freezer_do_not_count - tell freezer to ignore %current
97 : *
98 : * Tell freezers to ignore the current task when determining whether the
99 : * target frozen state is reached. IOW, the current task will be
100 : * considered frozen enough by freezers.
101 : *
102 : * The caller shouldn't do anything which isn't allowed for a frozen task
103 : * until freezer_cont() is called. Usually, freezer[_do_not]_count() pair
104 : * wrap a scheduling operation and nothing much else.
105 : */
106 : static inline void freezer_do_not_count(void)
107 : {
108 : current->flags |= PF_FREEZER_SKIP;
109 : }
110 :
111 : /**
112 : * freezer_count - tell freezer to stop ignoring %current
113 : *
114 : * Undo freezer_do_not_count(). It tells freezers that %current should be
115 : * considered again and tries to freeze if freezing condition is already in
116 : * effect.
117 : */
118 : static inline void freezer_count(void)
119 : {
120 : current->flags &= ~PF_FREEZER_SKIP;
121 : /*
122 : * If freezing is in progress, the following paired with smp_mb()
123 : * in freezer_should_skip() ensures that either we see %true
124 : * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
125 : */
126 : smp_mb();
127 : try_to_freeze();
128 : }
129 :
130 : /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
131 : static inline void freezer_count_unsafe(void)
132 : {
133 : current->flags &= ~PF_FREEZER_SKIP;
134 : smp_mb();
135 : try_to_freeze_unsafe();
136 : }
137 :
138 : /**
139 : * freezer_should_skip - whether to skip a task when determining frozen
140 : * state is reached
141 : * @p: task in quesion
142 : *
143 : * This function is used by freezers after establishing %true freezing() to
144 : * test whether a task should be skipped when determining the target frozen
145 : * state is reached. IOW, if this function returns %true, @p is considered
146 : * frozen enough.
147 : */
148 : static inline bool freezer_should_skip(struct task_struct *p)
149 : {
150 : /*
151 : * The following smp_mb() paired with the one in freezer_count()
152 : * ensures that either freezer_count() sees %true freezing() or we
153 : * see cleared %PF_FREEZER_SKIP and return %false. This makes it
154 : * impossible for a task to slip frozen state testing after
155 : * clearing %PF_FREEZER_SKIP.
156 : */
157 : smp_mb();
158 : return p->flags & PF_FREEZER_SKIP;
159 : }
160 :
161 : /*
162 : * These functions are intended to be used whenever you want allow a sleeping
163 : * task to be frozen. Note that neither return any clear indication of
164 : * whether a freeze event happened while in this function.
165 : */
166 :
167 : /* Like schedule(), but should not block the freezer. */
168 : static inline void freezable_schedule(void)
169 : {
170 : freezer_do_not_count();
171 : schedule();
172 : freezer_count();
173 : }
174 :
175 : /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
176 : static inline void freezable_schedule_unsafe(void)
177 : {
178 : freezer_do_not_count();
179 : schedule();
180 : freezer_count_unsafe();
181 : }
182 :
183 : /*
184 : * Like freezable_schedule_timeout(), but should not block the freezer. Do not
185 : * call this with locks held.
186 : */
187 : static inline long freezable_schedule_timeout(long timeout)
188 : {
189 : long __retval;
190 : freezer_do_not_count();
191 : __retval = schedule_timeout(timeout);
192 : freezer_count();
193 : return __retval;
194 : }
195 :
196 : /*
197 : * Like schedule_timeout_interruptible(), but should not block the freezer. Do not
198 : * call this with locks held.
199 : */
200 : static inline long freezable_schedule_timeout_interruptible(long timeout)
201 : {
202 : long __retval;
203 : freezer_do_not_count();
204 : __retval = schedule_timeout_interruptible(timeout);
205 : freezer_count();
206 : return __retval;
207 : }
208 :
209 : /* Like schedule_timeout_killable(), but should not block the freezer. */
210 : static inline long freezable_schedule_timeout_killable(long timeout)
211 : {
212 : long __retval;
213 : freezer_do_not_count();
214 : __retval = schedule_timeout_killable(timeout);
215 : freezer_count();
216 : return __retval;
217 : }
218 :
219 : /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
220 : static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
221 : {
222 : long __retval;
223 : freezer_do_not_count();
224 : __retval = schedule_timeout_killable(timeout);
225 : freezer_count_unsafe();
226 : return __retval;
227 : }
228 :
229 : /*
230 : * Like schedule_hrtimeout_range(), but should not block the freezer. Do not
231 : * call this with locks held.
232 : */
233 : static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
234 : unsigned long delta, const enum hrtimer_mode mode)
235 : {
236 : int __retval;
237 : freezer_do_not_count();
238 : __retval = schedule_hrtimeout_range(expires, delta, mode);
239 : freezer_count();
240 : return __retval;
241 : }
242 :
243 : /*
244 : * Freezer-friendly wrappers around wait_event_interruptible(),
245 : * wait_event_killable() and wait_event_interruptible_timeout(), originally
246 : * defined in <linux/wait.h>
247 : */
248 :
249 : #define wait_event_freezekillable(wq, condition) \
250 : ({ \
251 : int __retval; \
252 : freezer_do_not_count(); \
253 : __retval = wait_event_killable(wq, (condition)); \
254 : freezer_count(); \
255 : __retval; \
256 : })
257 :
258 : /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
259 : #define wait_event_freezekillable_unsafe(wq, condition) \
260 : ({ \
261 : int __retval; \
262 : freezer_do_not_count(); \
263 : __retval = wait_event_killable(wq, (condition)); \
264 : freezer_count_unsafe(); \
265 : __retval; \
266 : })
267 :
268 : #define wait_event_freezable(wq, condition) \
269 : ({ \
270 : int __retval; \
271 : freezer_do_not_count(); \
272 : __retval = wait_event_interruptible(wq, (condition)); \
273 : freezer_count(); \
274 : __retval; \
275 : })
276 :
277 : #define wait_event_freezable_timeout(wq, condition, timeout) \
278 : ({ \
279 : long __retval = timeout; \
280 : freezer_do_not_count(); \
281 : __retval = wait_event_interruptible_timeout(wq, (condition), \
282 : __retval); \
283 : freezer_count(); \
284 : __retval; \
285 : })
286 :
287 : #define wait_event_freezable_exclusive(wq, condition) \
288 : ({ \
289 : int __retval; \
290 : freezer_do_not_count(); \
291 : __retval = wait_event_interruptible_exclusive(wq, condition); \
292 : freezer_count(); \
293 : __retval; \
294 : })
295 :
296 :
297 : #else /* !CONFIG_FREEZER */
298 : static inline bool frozen(struct task_struct *p) { return false; }
299 : static inline bool freezing(struct task_struct *p) { return false; }
300 : static inline void __thaw_task(struct task_struct *t) {}
301 :
302 : static inline bool __refrigerator(bool check_kthr_stop) { return false; }
303 : static inline int freeze_processes(void) { return -ENOSYS; }
304 : static inline int freeze_kernel_threads(void) { return -ENOSYS; }
305 : static inline void thaw_processes(void) {}
306 : static inline void thaw_kernel_threads(void) {}
307 :
308 : static inline bool try_to_freeze_nowarn(void) { return false; }
309 : static inline bool try_to_freeze(void) { return false; }
310 :
311 : static inline void freezer_do_not_count(void) {}
312 : static inline void freezer_count(void) {}
313 : static inline int freezer_should_skip(struct task_struct *p) { return 0; }
314 : static inline void set_freezable(void) {}
315 :
316 : #define freezable_schedule() schedule()
317 :
318 : #define freezable_schedule_unsafe() schedule()
319 :
320 : #define freezable_schedule_timeout(timeout) schedule_timeout(timeout)
321 :
322 : #define freezable_schedule_timeout_interruptible(timeout) \
323 : schedule_timeout_interruptible(timeout)
324 :
325 : #define freezable_schedule_timeout_killable(timeout) \
326 : schedule_timeout_killable(timeout)
327 :
328 : #define freezable_schedule_timeout_killable_unsafe(timeout) \
329 : schedule_timeout_killable(timeout)
330 :
331 : #define freezable_schedule_hrtimeout_range(expires, delta, mode) \
332 : schedule_hrtimeout_range(expires, delta, mode)
333 :
334 : #define wait_event_freezable(wq, condition) \
335 : wait_event_interruptible(wq, condition)
336 :
337 : #define wait_event_freezable_timeout(wq, condition, timeout) \
338 : wait_event_interruptible_timeout(wq, condition, timeout)
339 :
340 : #define wait_event_freezable_exclusive(wq, condition) \
341 : wait_event_interruptible_exclusive(wq, condition)
342 :
343 : #define wait_event_freezekillable(wq, condition) \
344 : wait_event_killable(wq, condition)
345 :
346 : #define wait_event_freezekillable_unsafe(wq, condition) \
347 : wait_event_killable(wq, condition)
348 :
349 : #endif /* !CONFIG_FREEZER */
350 :
351 : #endif /* FREEZER_H_INCLUDED */
|