Line data Source code
1 : #ifndef _LINUX_KERNEL_H
2 : #define _LINUX_KERNEL_H
3 :
4 :
5 : #include <stdarg.h>
6 : #include <linux/linkage.h>
7 : #include <linux/stddef.h>
8 : #include <linux/types.h>
9 : #include <linux/compiler.h>
10 : #include <linux/bitops.h>
11 : #include <linux/log2.h>
12 : #include <linux/typecheck.h>
13 : #include <linux/printk.h>
14 : #include <linux/dynamic_debug.h>
15 : #include <asm/byteorder.h>
16 : #include <uapi/linux/kernel.h>
17 :
18 : #define USHRT_MAX ((u16)(~0U))
19 : #define SHRT_MAX ((s16)(USHRT_MAX>>1))
20 : #define SHRT_MIN ((s16)(-SHRT_MAX - 1))
21 : #define INT_MAX ((int)(~0U>>1))
22 : #define INT_MIN (-INT_MAX - 1)
23 : #define UINT_MAX (~0U)
24 : #define LONG_MAX ((long)(~0UL>>1))
25 : #define LONG_MIN (-LONG_MAX - 1)
26 : #define ULONG_MAX (~0UL)
27 : #define LLONG_MAX ((long long)(~0ULL>>1))
28 : #define LLONG_MIN (-LLONG_MAX - 1)
29 : #define ULLONG_MAX (~0ULL)
30 : #define SIZE_MAX (~(size_t)0)
31 :
32 : #define U8_MAX ((u8)~0U)
33 : #define S8_MAX ((s8)(U8_MAX>>1))
34 : #define S8_MIN ((s8)(-S8_MAX - 1))
35 : #define U16_MAX ((u16)~0U)
36 : #define S16_MAX ((s16)(U16_MAX>>1))
37 : #define S16_MIN ((s16)(-S16_MAX - 1))
38 : #define U32_MAX ((u32)~0U)
39 : #define S32_MAX ((s32)(U32_MAX>>1))
40 : #define S32_MIN ((s32)(-S32_MAX - 1))
41 : #define U64_MAX ((u64)~0ULL)
42 : #define S64_MAX ((s64)(U64_MAX>>1))
43 : #define S64_MIN ((s64)(-S64_MAX - 1))
44 :
45 : #define STACK_MAGIC 0xdeadbeef
46 :
47 : #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
48 :
49 : #define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
50 : #define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
51 : #define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
52 : #define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
53 :
54 : #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
55 :
56 : /*
57 : * This looks more complex than it should be. But we need to
58 : * get the type for the ~ right in round_down (it needs to be
59 : * as wide as the result!), and we want to evaluate the macro
60 : * arguments just once each.
61 : */
62 : #define __round_mask(x, y) ((__typeof__(x))((y)-1))
63 : #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
64 : #define round_down(x, y) ((x) & ~__round_mask(x, y))
65 :
66 : #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
67 : #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
68 : #define DIV_ROUND_UP_ULL(ll,d) \
69 : ({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; })
70 :
71 : #if BITS_PER_LONG == 32
72 : # define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
73 : #else
74 : # define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d)
75 : #endif
76 :
77 : /* The `const' in roundup() prevents gcc-3.3 from calling __divdi3 */
78 : #define roundup(x, y) ( \
79 : { \
80 : const typeof(y) __y = y; \
81 : (((x) + (__y - 1)) / __y) * __y; \
82 : } \
83 : )
84 : #define rounddown(x, y) ( \
85 : { \
86 : typeof(x) __x = (x); \
87 : __x - (__x % (y)); \
88 : } \
89 : )
90 :
91 : /*
92 : * Divide positive or negative dividend by positive divisor and round
93 : * to closest integer. Result is undefined for negative divisors and
94 : * for negative dividends if the divisor variable type is unsigned.
95 : */
96 : #define DIV_ROUND_CLOSEST(x, divisor)( \
97 : { \
98 : typeof(x) __x = x; \
99 : typeof(divisor) __d = divisor; \
100 : (((typeof(x))-1) > 0 || \
101 : ((typeof(divisor))-1) > 0 || (__x) > 0) ? \
102 : (((__x) + ((__d) / 2)) / (__d)) : \
103 : (((__x) - ((__d) / 2)) / (__d)); \
104 : } \
105 : )
106 :
107 : /*
108 : * Multiplies an integer by a fraction, while avoiding unnecessary
109 : * overflow or loss of precision.
110 : */
111 : #define mult_frac(x, numer, denom)( \
112 : { \
113 : typeof(x) quot = (x) / (denom); \
114 : typeof(x) rem = (x) % (denom); \
115 : (quot * (numer)) + ((rem * (numer)) / (denom)); \
116 : } \
117 : )
118 :
119 :
120 : #define _RET_IP_ (unsigned long)__builtin_return_address(0)
121 : #define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
122 :
123 : #ifdef CONFIG_LBDAF
124 : # include <asm/div64.h>
125 : # define sector_div(a, b) do_div(a, b)
126 : #else
127 : # define sector_div(n, b)( \
128 : { \
129 : int _res; \
130 : _res = (n) % (b); \
131 : (n) /= (b); \
132 : _res; \
133 : } \
134 : )
135 : #endif
136 :
137 : /**
138 : * upper_32_bits - return bits 32-63 of a number
139 : * @n: the number we're accessing
140 : *
141 : * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
142 : * the "right shift count >= width of type" warning when that quantity is
143 : * 32-bits.
144 : */
145 : #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
146 :
147 : /**
148 : * lower_32_bits - return bits 0-31 of a number
149 : * @n: the number we're accessing
150 : */
151 : #define lower_32_bits(n) ((u32)(n))
152 :
153 : struct completion;
154 : struct pt_regs;
155 : struct user;
156 :
157 : #ifdef CONFIG_PREEMPT_VOLUNTARY
158 : extern int _cond_resched(void);
159 : # define might_resched() _cond_resched()
160 : #else
161 : # define might_resched() do { } while (0)
162 : #endif
163 :
164 : #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
165 : void __might_sleep(const char *file, int line, int preempt_offset);
166 : /**
167 : * might_sleep - annotation for functions that can sleep
168 : *
169 : * this macro will print a stack trace if it is executed in an atomic
170 : * context (spinlock, irq-handler, ...).
171 : *
172 : * This is a useful debugging help to be able to catch problems early and not
173 : * be bitten later when the calling function happens to sleep when it is not
174 : * supposed to.
175 : */
176 : # define might_sleep() \
177 : do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
178 : #else
179 : static inline void __might_sleep(const char *file, int line,
180 : int preempt_offset) { }
181 : # define might_sleep() do { might_resched(); } while (0)
182 : #endif
183 :
184 : #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
185 :
186 : /*
187 : * abs() handles unsigned and signed longs, ints, shorts and chars. For all
188 : * input types abs() returns a signed long.
189 : * abs() should not be used for 64-bit types (s64, u64, long long) - use abs64()
190 : * for those.
191 : */
192 : #define abs(x) ({ \
193 : long ret; \
194 : if (sizeof(x) == sizeof(long)) { \
195 : long __x = (x); \
196 : ret = (__x < 0) ? -__x : __x; \
197 : } else { \
198 : int __x = (x); \
199 : ret = (__x < 0) ? -__x : __x; \
200 : } \
201 : ret; \
202 : })
203 :
204 : #define abs64(x) ({ \
205 : s64 __x = (x); \
206 : (__x < 0) ? -__x : __x; \
207 : })
208 :
209 : /**
210 : * reciprocal_scale - "scale" a value into range [0, ep_ro)
211 : * @val: value
212 : * @ep_ro: right open interval endpoint
213 : *
214 : * Perform a "reciprocal multiplication" in order to "scale" a value into
215 : * range [0, ep_ro), where the upper interval endpoint is right-open.
216 : * This is useful, e.g. for accessing a index of an array containing
217 : * ep_ro elements, for example. Think of it as sort of modulus, only that
218 : * the result isn't that of modulo. ;) Note that if initial input is a
219 : * small value, then result will return 0.
220 : *
221 : * Return: a result based on val in interval [0, ep_ro).
222 : */
223 : static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
224 : {
225 : return (u32)(((u64) val * ep_ro) >> 32);
226 : }
227 :
228 : #if defined(CONFIG_MMU) && \
229 : (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
230 : void might_fault(void);
231 : #else
232 : static inline void might_fault(void) { }
233 : #endif
234 :
235 : extern struct atomic_notifier_head panic_notifier_list;
236 : extern long (*panic_blink)(int state);
237 : __printf(1, 2)
238 : void panic(const char *fmt, ...)
239 : __noreturn __cold;
240 : extern void oops_enter(void);
241 : extern void oops_exit(void);
242 : void print_oops_end_marker(void);
243 : extern int oops_may_print(void);
244 : void do_exit(long error_code)
245 : __noreturn;
246 : void complete_and_exit(struct completion *, long)
247 : __noreturn;
248 :
249 : /* Internal, do not use. */
250 : int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
251 : int __must_check _kstrtol(const char *s, unsigned int base, long *res);
252 :
253 : int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
254 : int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
255 :
256 : /**
257 : * kstrtoul - convert a string to an unsigned long
258 : * @s: The start of the string. The string must be null-terminated, and may also
259 : * include a single newline before its terminating null. The first character
260 : * may also be a plus sign, but not a minus sign.
261 : * @base: The number base to use. The maximum supported base is 16. If base is
262 : * given as 0, then the base of the string is automatically detected with the
263 : * conventional semantics - If it begins with 0x the number will be parsed as a
264 : * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
265 : * parsed as an octal number. Otherwise it will be parsed as a decimal.
266 : * @res: Where to write the result of the conversion on success.
267 : *
268 : * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
269 : * Used as a replacement for the obsolete simple_strtoull. Return code must
270 : * be checked.
271 : */
272 : static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
273 : {
274 : /*
275 : * We want to shortcut function call, but
276 : * __builtin_types_compatible_p(unsigned long, unsigned long long) = 0.
277 : */
278 : if (sizeof(unsigned long) == sizeof(unsigned long long) &&
279 : __alignof__(unsigned long) == __alignof__(unsigned long long))
280 0 : return kstrtoull(s, base, (unsigned long long *)res);
281 : else
282 : return _kstrtoul(s, base, res);
283 : }
284 :
285 : /**
286 : * kstrtol - convert a string to a long
287 : * @s: The start of the string. The string must be null-terminated, and may also
288 : * include a single newline before its terminating null. The first character
289 : * may also be a plus sign or a minus sign.
290 : * @base: The number base to use. The maximum supported base is 16. If base is
291 : * given as 0, then the base of the string is automatically detected with the
292 : * conventional semantics - If it begins with 0x the number will be parsed as a
293 : * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
294 : * parsed as an octal number. Otherwise it will be parsed as a decimal.
295 : * @res: Where to write the result of the conversion on success.
296 : *
297 : * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
298 : * Used as a replacement for the obsolete simple_strtoull. Return code must
299 : * be checked.
300 : */
301 : static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
302 : {
303 : /*
304 : * We want to shortcut function call, but
305 : * __builtin_types_compatible_p(long, long long) = 0.
306 : */
307 : if (sizeof(long) == sizeof(long long) &&
308 : __alignof__(long) == __alignof__(long long))
309 : return kstrtoll(s, base, (long long *)res);
310 : else
311 : return _kstrtol(s, base, res);
312 : }
313 :
314 : int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
315 : int __must_check kstrtoint(const char *s, unsigned int base, int *res);
316 :
317 : static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
318 : {
319 : return kstrtoull(s, base, res);
320 : }
321 :
322 : static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
323 : {
324 : return kstrtoll(s, base, res);
325 : }
326 :
327 : static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
328 : {
329 : return kstrtouint(s, base, res);
330 : }
331 :
332 : static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
333 : {
334 : return kstrtoint(s, base, res);
335 : }
336 :
337 : int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
338 : int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
339 : int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
340 : int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
341 :
342 : int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
343 : int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
344 : int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
345 : int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
346 : int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
347 : int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
348 : int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
349 : int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
350 : int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
351 : int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
352 :
353 : static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
354 : {
355 : return kstrtoull_from_user(s, count, base, res);
356 : }
357 :
358 : static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
359 : {
360 : return kstrtoll_from_user(s, count, base, res);
361 : }
362 :
363 : static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
364 : {
365 : return kstrtouint_from_user(s, count, base, res);
366 : }
367 :
368 : static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
369 : {
370 : return kstrtoint_from_user(s, count, base, res);
371 : }
372 :
373 : /* Obsolete, do not use. Use kstrto<foo> instead */
374 :
375 : extern unsigned long simple_strtoul(const char *,char **,unsigned int);
376 : extern long simple_strtol(const char *,char **,unsigned int);
377 : extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
378 : extern long long simple_strtoll(const char *,char **,unsigned int);
379 : #define strict_strtoul kstrtoul
380 : #define strict_strtol kstrtol
381 : #define strict_strtoull kstrtoull
382 : #define strict_strtoll kstrtoll
383 :
384 : extern int num_to_str(char *buf, int size, unsigned long long num);
385 :
386 : /* lib/printf utilities */
387 :
388 : extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
389 : extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list);
390 : extern __printf(3, 4)
391 : int snprintf(char *buf, size_t size, const char *fmt, ...);
392 : extern __printf(3, 0)
393 : int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
394 : extern __printf(3, 4)
395 : int scnprintf(char *buf, size_t size, const char *fmt, ...);
396 : extern __printf(3, 0)
397 : int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
398 : extern __printf(2, 3)
399 : char *kasprintf(gfp_t gfp, const char *fmt, ...);
400 : extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
401 :
402 : extern __scanf(2, 3)
403 : int sscanf(const char *, const char *, ...);
404 : extern __scanf(2, 0)
405 : int vsscanf(const char *, const char *, va_list);
406 :
407 : extern int get_option(char **str, int *pint);
408 : extern char *get_options(const char *str, int nints, int *ints);
409 : extern unsigned long long memparse(const char *ptr, char **retptr);
410 :
411 : extern int core_kernel_text(unsigned long addr);
412 : extern int core_kernel_data(unsigned long addr);
413 : extern int __kernel_text_address(unsigned long addr);
414 : extern int kernel_text_address(unsigned long addr);
415 : extern int func_ptr_is_kernel_text(void *ptr);
416 :
417 : struct pid;
418 : extern struct pid *session_of_pgrp(struct pid *pgrp);
419 :
420 : unsigned long int_sqrt(unsigned long);
421 :
422 : extern void bust_spinlocks(int yes);
423 : extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
424 : extern int panic_timeout;
425 : extern int panic_on_oops;
426 : extern int panic_on_unrecovered_nmi;
427 : extern int panic_on_io_nmi;
428 : extern int sysctl_panic_on_stackoverflow;
429 : /*
430 : * Only to be used by arch init code. If the user over-wrote the default
431 : * CONFIG_PANIC_TIMEOUT, honor it.
432 : */
433 : static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
434 : {
435 : if (panic_timeout == arch_default_timeout)
436 : panic_timeout = timeout;
437 : }
438 : extern const char *print_tainted(void);
439 : enum lockdep_ok {
440 : LOCKDEP_STILL_OK,
441 : LOCKDEP_NOW_UNRELIABLE
442 : };
443 : extern void add_taint(unsigned flag, enum lockdep_ok);
444 : extern int test_taint(unsigned flag);
445 : extern unsigned long get_taint(void);
446 : extern int root_mountflags;
447 :
448 : extern bool early_boot_irqs_disabled;
449 :
450 : /* Values used for system_state */
451 : extern enum system_states {
452 : SYSTEM_BOOTING,
453 : SYSTEM_RUNNING,
454 : SYSTEM_HALT,
455 : SYSTEM_POWER_OFF,
456 : SYSTEM_RESTART,
457 : } system_state;
458 :
459 : #define TAINT_PROPRIETARY_MODULE 0
460 : #define TAINT_FORCED_MODULE 1
461 : #define TAINT_CPU_OUT_OF_SPEC 2
462 : #define TAINT_FORCED_RMMOD 3
463 : #define TAINT_MACHINE_CHECK 4
464 : #define TAINT_BAD_PAGE 5
465 : #define TAINT_USER 6
466 : #define TAINT_DIE 7
467 : #define TAINT_OVERRIDDEN_ACPI_TABLE 8
468 : #define TAINT_WARN 9
469 : #define TAINT_CRAP 10
470 : #define TAINT_FIRMWARE_WORKAROUND 11
471 : #define TAINT_OOT_MODULE 12
472 : #define TAINT_UNSIGNED_MODULE 13
473 : #define TAINT_SOFTLOCKUP 14
474 :
475 : extern const char hex_asc[];
476 : #define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
477 : #define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
478 :
479 : static inline char *hex_byte_pack(char *buf, u8 byte)
480 : {
481 : *buf++ = hex_asc_hi(byte);
482 : *buf++ = hex_asc_lo(byte);
483 : return buf;
484 : }
485 :
486 : extern const char hex_asc_upper[];
487 : #define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
488 : #define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
489 :
490 : static inline char *hex_byte_pack_upper(char *buf, u8 byte)
491 : {
492 : *buf++ = hex_asc_upper_hi(byte);
493 : *buf++ = hex_asc_upper_lo(byte);
494 : return buf;
495 : }
496 :
497 : extern int hex_to_bin(char ch);
498 : extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
499 :
500 : bool mac_pton(const char *s, u8 *mac);
501 :
502 : /*
503 : * General tracing related utility functions - trace_printk(),
504 : * tracing_on/tracing_off and tracing_start()/tracing_stop
505 : *
506 : * Use tracing_on/tracing_off when you want to quickly turn on or off
507 : * tracing. It simply enables or disables the recording of the trace events.
508 : * This also corresponds to the user space /sys/kernel/debug/tracing/tracing_on
509 : * file, which gives a means for the kernel and userspace to interact.
510 : * Place a tracing_off() in the kernel where you want tracing to end.
511 : * From user space, examine the trace, and then echo 1 > tracing_on
512 : * to continue tracing.
513 : *
514 : * tracing_stop/tracing_start has slightly more overhead. It is used
515 : * by things like suspend to ram where disabling the recording of the
516 : * trace is not enough, but tracing must actually stop because things
517 : * like calling smp_processor_id() may crash the system.
518 : *
519 : * Most likely, you want to use tracing_on/tracing_off.
520 : */
521 : #ifdef CONFIG_RING_BUFFER
522 : /* trace_off_permanent stops recording with no way to bring it back */
523 : void tracing_off_permanent(void);
524 : #else
525 : static inline void tracing_off_permanent(void) { }
526 : #endif
527 :
528 : enum ftrace_dump_mode {
529 : DUMP_NONE,
530 : DUMP_ALL,
531 : DUMP_ORIG,
532 : };
533 :
534 : #ifdef CONFIG_TRACING
535 : void tracing_on(void);
536 : void tracing_off(void);
537 : int tracing_is_on(void);
538 : void tracing_snapshot(void);
539 : void tracing_snapshot_alloc(void);
540 :
541 : extern void tracing_start(void);
542 : extern void tracing_stop(void);
543 :
544 : static inline __printf(1, 2)
545 : void ____trace_printk_check_format(const char *fmt, ...)
546 : {
547 : }
548 : #define __trace_printk_check_format(fmt, args...) \
549 : do { \
550 : if (0) \
551 : ____trace_printk_check_format(fmt, ##args); \
552 : } while (0)
553 :
554 : /**
555 : * trace_printk - printf formatting in the ftrace buffer
556 : * @fmt: the printf format for printing
557 : *
558 : * Note: __trace_printk is an internal function for trace_printk and
559 : * the @ip is passed in via the trace_printk macro.
560 : *
561 : * This function allows a kernel developer to debug fast path sections
562 : * that printk is not appropriate for. By scattering in various
563 : * printk like tracing in the code, a developer can quickly see
564 : * where problems are occurring.
565 : *
566 : * This is intended as a debugging tool for the developer only.
567 : * Please refrain from leaving trace_printks scattered around in
568 : * your code. (Extra memory is used for special buffers that are
569 : * allocated when trace_printk() is used)
570 : *
571 : * A little optization trick is done here. If there's only one
572 : * argument, there's no need to scan the string for printf formats.
573 : * The trace_puts() will suffice. But how can we take advantage of
574 : * using trace_puts() when trace_printk() has only one argument?
575 : * By stringifying the args and checking the size we can tell
576 : * whether or not there are args. __stringify((__VA_ARGS__)) will
577 : * turn into "()\0" with a size of 3 when there are no args, anything
578 : * else will be bigger. All we need to do is define a string to this,
579 : * and then take its size and compare to 3. If it's bigger, use
580 : * do_trace_printk() otherwise, optimize it to trace_puts(). Then just
581 : * let gcc optimize the rest.
582 : */
583 :
584 : #define trace_printk(fmt, ...) \
585 : do { \
586 : char _______STR[] = __stringify((__VA_ARGS__)); \
587 : if (sizeof(_______STR) > 3) \
588 : do_trace_printk(fmt, ##__VA_ARGS__); \
589 : else \
590 : trace_puts(fmt); \
591 : } while (0)
592 :
593 : #define do_trace_printk(fmt, args...) \
594 : do { \
595 : static const char *trace_printk_fmt \
596 : __attribute__((section("__trace_printk_fmt"))) = \
597 : __builtin_constant_p(fmt) ? fmt : NULL; \
598 : \
599 : __trace_printk_check_format(fmt, ##args); \
600 : \
601 : if (__builtin_constant_p(fmt)) \
602 : __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
603 : else \
604 : __trace_printk(_THIS_IP_, fmt, ##args); \
605 : } while (0)
606 :
607 : extern __printf(2, 3)
608 : int __trace_bprintk(unsigned long ip, const char *fmt, ...);
609 :
610 : extern __printf(2, 3)
611 : int __trace_printk(unsigned long ip, const char *fmt, ...);
612 :
613 : /**
614 : * trace_puts - write a string into the ftrace buffer
615 : * @str: the string to record
616 : *
617 : * Note: __trace_bputs is an internal function for trace_puts and
618 : * the @ip is passed in via the trace_puts macro.
619 : *
620 : * This is similar to trace_printk() but is made for those really fast
621 : * paths that a developer wants the least amount of "Heisenbug" affects,
622 : * where the processing of the print format is still too much.
623 : *
624 : * This function allows a kernel developer to debug fast path sections
625 : * that printk is not appropriate for. By scattering in various
626 : * printk like tracing in the code, a developer can quickly see
627 : * where problems are occurring.
628 : *
629 : * This is intended as a debugging tool for the developer only.
630 : * Please refrain from leaving trace_puts scattered around in
631 : * your code. (Extra memory is used for special buffers that are
632 : * allocated when trace_puts() is used)
633 : *
634 : * Returns: 0 if nothing was written, positive # if string was.
635 : * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used)
636 : */
637 :
638 : #define trace_puts(str) ({ \
639 : static const char *trace_printk_fmt \
640 : __attribute__((section("__trace_printk_fmt"))) = \
641 : __builtin_constant_p(str) ? str : NULL; \
642 : \
643 : if (__builtin_constant_p(str)) \
644 : __trace_bputs(_THIS_IP_, trace_printk_fmt); \
645 : else \
646 : __trace_puts(_THIS_IP_, str, strlen(str)); \
647 : })
648 : extern int __trace_bputs(unsigned long ip, const char *str);
649 : extern int __trace_puts(unsigned long ip, const char *str, int size);
650 :
651 : extern void trace_dump_stack(int skip);
652 :
653 : /*
654 : * The double __builtin_constant_p is because gcc will give us an error
655 : * if we try to allocate the static variable to fmt if it is not a
656 : * constant. Even with the outer if statement.
657 : */
658 : #define ftrace_vprintk(fmt, vargs) \
659 : do { \
660 : if (__builtin_constant_p(fmt)) { \
661 : static const char *trace_printk_fmt \
662 : __attribute__((section("__trace_printk_fmt"))) = \
663 : __builtin_constant_p(fmt) ? fmt : NULL; \
664 : \
665 : __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
666 : } else \
667 : __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
668 : } while (0)
669 :
670 : extern int
671 : __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
672 :
673 : extern int
674 : __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
675 :
676 : extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
677 : #else
678 : static inline void tracing_start(void) { }
679 : static inline void tracing_stop(void) { }
680 : static inline void trace_dump_stack(int skip) { }
681 :
682 : static inline void tracing_on(void) { }
683 : static inline void tracing_off(void) { }
684 : static inline int tracing_is_on(void) { return 0; }
685 : static inline void tracing_snapshot(void) { }
686 : static inline void tracing_snapshot_alloc(void) { }
687 :
688 : static inline __printf(1, 2)
689 : int trace_printk(const char *fmt, ...)
690 : {
691 : return 0;
692 : }
693 : static inline int
694 : ftrace_vprintk(const char *fmt, va_list ap)
695 : {
696 : return 0;
697 : }
698 : static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
699 : #endif /* CONFIG_TRACING */
700 :
701 : /*
702 : * min()/max()/clamp() macros that also do
703 : * strict type-checking.. See the
704 : * "unnecessary" pointer comparison.
705 : */
706 : #define min(x, y) ({ \
707 : typeof(x) _min1 = (x); \
708 : typeof(y) _min2 = (y); \
709 : (void) (&_min1 == &_min2); \
710 : _min1 < _min2 ? _min1 : _min2; })
711 :
712 : #define max(x, y) ({ \
713 : typeof(x) _max1 = (x); \
714 : typeof(y) _max2 = (y); \
715 : (void) (&_max1 == &_max2); \
716 : _max1 > _max2 ? _max1 : _max2; })
717 :
718 : #define min3(x, y, z) ({ \
719 : typeof(x) _min1 = (x); \
720 : typeof(y) _min2 = (y); \
721 : typeof(z) _min3 = (z); \
722 : (void) (&_min1 == &_min2); \
723 : (void) (&_min1 == &_min3); \
724 : _min1 < _min2 ? (_min1 < _min3 ? _min1 : _min3) : \
725 : (_min2 < _min3 ? _min2 : _min3); })
726 :
727 : #define max3(x, y, z) ({ \
728 : typeof(x) _max1 = (x); \
729 : typeof(y) _max2 = (y); \
730 : typeof(z) _max3 = (z); \
731 : (void) (&_max1 == &_max2); \
732 : (void) (&_max1 == &_max3); \
733 : _max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \
734 : (_max2 > _max3 ? _max2 : _max3); })
735 :
736 : /**
737 : * min_not_zero - return the minimum that is _not_ zero, unless both are zero
738 : * @x: value1
739 : * @y: value2
740 : */
741 : #define min_not_zero(x, y) ({ \
742 : typeof(x) __x = (x); \
743 : typeof(y) __y = (y); \
744 : __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
745 :
746 : /**
747 : * clamp - return a value clamped to a given range with strict typechecking
748 : * @val: current value
749 : * @min: minimum allowable value
750 : * @max: maximum allowable value
751 : *
752 : * This macro does strict typechecking of min/max to make sure they are of the
753 : * same type as val. See the unnecessary pointer comparisons.
754 : */
755 : #define clamp(val, min, max) ({ \
756 : typeof(val) __val = (val); \
757 : typeof(min) __min = (min); \
758 : typeof(max) __max = (max); \
759 : (void) (&__val == &__min); \
760 : (void) (&__val == &__max); \
761 : __val = __val < __min ? __min: __val; \
762 : __val > __max ? __max: __val; })
763 :
764 : /*
765 : * ..and if you can't take the strict
766 : * types, you can specify one yourself.
767 : *
768 : * Or not use min/max/clamp at all, of course.
769 : */
770 : #define min_t(type, x, y) ({ \
771 : type __min1 = (x); \
772 : type __min2 = (y); \
773 : __min1 < __min2 ? __min1: __min2; })
774 :
775 : #define max_t(type, x, y) ({ \
776 : type __max1 = (x); \
777 : type __max2 = (y); \
778 : __max1 > __max2 ? __max1: __max2; })
779 :
780 : /**
781 : * clamp_t - return a value clamped to a given range using a given type
782 : * @type: the type of variable to use
783 : * @val: current value
784 : * @min: minimum allowable value
785 : * @max: maximum allowable value
786 : *
787 : * This macro does no typechecking and uses temporary variables of type
788 : * 'type' to make all the comparisons.
789 : */
790 : #define clamp_t(type, val, min, max) ({ \
791 : type __val = (val); \
792 : type __min = (min); \
793 : type __max = (max); \
794 : __val = __val < __min ? __min: __val; \
795 : __val > __max ? __max: __val; })
796 :
797 : /**
798 : * clamp_val - return a value clamped to a given range using val's type
799 : * @val: current value
800 : * @min: minimum allowable value
801 : * @max: maximum allowable value
802 : *
803 : * This macro does no typechecking and uses temporary variables of whatever
804 : * type the input argument 'val' is. This is useful when val is an unsigned
805 : * type and min and max are literals that will otherwise be assigned a signed
806 : * integer type.
807 : */
808 : #define clamp_val(val, min, max) ({ \
809 : typeof(val) __val = (val); \
810 : typeof(val) __min = (min); \
811 : typeof(val) __max = (max); \
812 : __val = __val < __min ? __min: __val; \
813 : __val > __max ? __max: __val; })
814 :
815 :
816 : /*
817 : * swap - swap value of @a and @b
818 : */
819 : #define swap(a, b) \
820 : do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
821 :
822 : /**
823 : * container_of - cast a member of a structure out to the containing structure
824 : * @ptr: the pointer to the member.
825 : * @type: the type of the container struct this is embedded in.
826 : * @member: the name of the member within the struct.
827 : *
828 : */
829 : #define container_of(ptr, type, member) ({ \
830 : const typeof( ((type *)0)->member ) *__mptr = (ptr); \
831 : (type *)( (char *)__mptr - offsetof(type,member) );})
832 :
833 : /* Trap pasters of __FUNCTION__ at compile-time */
834 : #define __FUNCTION__ (__func__)
835 :
836 : /* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
837 : #ifdef CONFIG_FTRACE_MCOUNT_RECORD
838 : # define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
839 : #endif
840 :
841 : /* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */
842 : #define VERIFY_OCTAL_PERMISSIONS(perms) \
843 : (BUILD_BUG_ON_ZERO((perms) < 0) + \
844 : BUILD_BUG_ON_ZERO((perms) > 0777) + \
845 : /* User perms >= group perms >= other perms */ \
846 : BUILD_BUG_ON_ZERO(((perms) >> 6) < (((perms) >> 3) & 7)) + \
847 : BUILD_BUG_ON_ZERO((((perms) >> 3) & 7) < ((perms) & 7)) + \
848 : /* Other writable? Generally considered a bad idea. */ \
849 : BUILD_BUG_ON_ZERO((perms) & 2) + \
850 : (perms))
851 : #endif
|