LCOV - code coverage report
Current view: top level - include/linux - mm.h (source / functions) Hit Total Coverage
Test: btrfstest.info Lines: 12 14 85.7 %
Date: 2014-11-28 Functions: 2 2 100.0 %

          Line data    Source code
       1             : #ifndef _LINUX_MM_H
       2             : #define _LINUX_MM_H
       3             : 
       4             : #include <linux/errno.h>
       5             : 
       6             : #ifdef __KERNEL__
       7             : 
       8             : #include <linux/mmdebug.h>
       9             : #include <linux/gfp.h>
      10             : #include <linux/bug.h>
      11             : #include <linux/list.h>
      12             : #include <linux/mmzone.h>
      13             : #include <linux/rbtree.h>
      14             : #include <linux/atomic.h>
      15             : #include <linux/debug_locks.h>
      16             : #include <linux/mm_types.h>
      17             : #include <linux/range.h>
      18             : #include <linux/pfn.h>
      19             : #include <linux/bit_spinlock.h>
      20             : #include <linux/shrinker.h>
      21             : 
      22             : struct mempolicy;
      23             : struct anon_vma;
      24             : struct anon_vma_chain;
      25             : struct file_ra_state;
      26             : struct user_struct;
      27             : struct writeback_control;
      28             : 
      29             : #ifndef CONFIG_NEED_MULTIPLE_NODES      /* Don't use mapnrs, do it properly */
      30             : extern unsigned long max_mapnr;
      31             : 
      32             : static inline void set_max_mapnr(unsigned long limit)
      33             : {
      34             :         max_mapnr = limit;
      35             : }
      36             : #else
      37             : static inline void set_max_mapnr(unsigned long limit) { }
      38             : #endif
      39             : 
      40             : extern unsigned long totalram_pages;
      41             : extern void * high_memory;
      42             : extern int page_cluster;
      43             : 
      44             : #ifdef CONFIG_SYSCTL
      45             : extern int sysctl_legacy_va_layout;
      46             : #else
      47             : #define sysctl_legacy_va_layout 0
      48             : #endif
      49             : 
      50             : #include <asm/page.h>
      51             : #include <asm/pgtable.h>
      52             : #include <asm/processor.h>
      53             : 
      54             : #ifndef __pa_symbol
      55             : #define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
      56             : #endif
      57             : 
      58             : extern unsigned long sysctl_user_reserve_kbytes;
      59             : extern unsigned long sysctl_admin_reserve_kbytes;
      60             : 
      61             : extern int sysctl_overcommit_memory;
      62             : extern int sysctl_overcommit_ratio;
      63             : extern unsigned long sysctl_overcommit_kbytes;
      64             : 
      65             : extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
      66             :                                     size_t *, loff_t *);
      67             : extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
      68             :                                     size_t *, loff_t *);
      69             : 
      70             : #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
      71             : 
      72             : /* to align the pointer to the (next) page boundary */
      73             : #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
      74             : 
      75             : /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
      76             : #define PAGE_ALIGNED(addr)      IS_ALIGNED((unsigned long)addr, PAGE_SIZE)
      77             : 
      78             : /*
      79             :  * Linux kernel virtual memory manager primitives.
      80             :  * The idea being to have a "virtual" mm in the same way
      81             :  * we have a virtual fs - giving a cleaner interface to the
      82             :  * mm details, and allowing different kinds of memory mappings
      83             :  * (from shared memory to executable loading to arbitrary
      84             :  * mmap() functions).
      85             :  */
      86             : 
      87             : extern struct kmem_cache *vm_area_cachep;
      88             : 
      89             : #ifndef CONFIG_MMU
      90             : extern struct rb_root nommu_region_tree;
      91             : extern struct rw_semaphore nommu_region_sem;
      92             : 
      93             : extern unsigned int kobjsize(const void *objp);
      94             : #endif
      95             : 
      96             : /*
      97             :  * vm_flags in vm_area_struct, see mm_types.h.
      98             :  */
      99             : #define VM_NONE         0x00000000
     100             : 
     101             : #define VM_READ         0x00000001      /* currently active flags */
     102             : #define VM_WRITE        0x00000002
     103             : #define VM_EXEC         0x00000004
     104             : #define VM_SHARED       0x00000008
     105             : 
     106             : /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
     107             : #define VM_MAYREAD      0x00000010      /* limits for mprotect() etc */
     108             : #define VM_MAYWRITE     0x00000020
     109             : #define VM_MAYEXEC      0x00000040
     110             : #define VM_MAYSHARE     0x00000080
     111             : 
     112             : #define VM_GROWSDOWN    0x00000100      /* general info on the segment */
     113             : #define VM_PFNMAP       0x00000400      /* Page-ranges managed without "struct page", just pure PFN */
     114             : #define VM_DENYWRITE    0x00000800      /* ETXTBSY on write attempts.. */
     115             : 
     116             : #define VM_LOCKED       0x00002000
     117             : #define VM_IO           0x00004000      /* Memory mapped I/O or similar */
     118             : 
     119             :                                         /* Used by sys_madvise() */
     120             : #define VM_SEQ_READ     0x00008000      /* App will access data sequentially */
     121             : #define VM_RAND_READ    0x00010000      /* App will not benefit from clustered reads */
     122             : 
     123             : #define VM_DONTCOPY     0x00020000      /* Do not copy this vma on fork */
     124             : #define VM_DONTEXPAND   0x00040000      /* Cannot expand with mremap() */
     125             : #define VM_ACCOUNT      0x00100000      /* Is a VM accounted object */
     126             : #define VM_NORESERVE    0x00200000      /* should the VM suppress accounting */
     127             : #define VM_HUGETLB      0x00400000      /* Huge TLB Page VM */
     128             : #define VM_NONLINEAR    0x00800000      /* Is non-linear (remap_file_pages) */
     129             : #define VM_ARCH_1       0x01000000      /* Architecture-specific flag */
     130             : #define VM_DONTDUMP     0x04000000      /* Do not include in the core dump */
     131             : 
     132             : #ifdef CONFIG_MEM_SOFT_DIRTY
     133             : # define VM_SOFTDIRTY   0x08000000      /* Not soft dirty clean area */
     134             : #else
     135             : # define VM_SOFTDIRTY   0
     136             : #endif
     137             : 
     138             : #define VM_MIXEDMAP     0x10000000      /* Can contain "struct page" and pure PFN pages */
     139             : #define VM_HUGEPAGE     0x20000000      /* MADV_HUGEPAGE marked this vma */
     140             : #define VM_NOHUGEPAGE   0x40000000      /* MADV_NOHUGEPAGE marked this vma */
     141             : #define VM_MERGEABLE    0x80000000      /* KSM may merge identical pages */
     142             : 
     143             : #if defined(CONFIG_X86)
     144             : # define VM_PAT         VM_ARCH_1       /* PAT reserves whole VMA at once (x86) */
     145             : #elif defined(CONFIG_PPC)
     146             : # define VM_SAO         VM_ARCH_1       /* Strong Access Ordering (powerpc) */
     147             : #elif defined(CONFIG_PARISC)
     148             : # define VM_GROWSUP     VM_ARCH_1
     149             : #elif defined(CONFIG_METAG)
     150             : # define VM_GROWSUP     VM_ARCH_1
     151             : #elif defined(CONFIG_IA64)
     152             : # define VM_GROWSUP     VM_ARCH_1
     153             : #elif !defined(CONFIG_MMU)
     154             : # define VM_MAPPED_COPY VM_ARCH_1       /* T if mapped copy of data (nommu mmap) */
     155             : #endif
     156             : 
     157             : #ifndef VM_GROWSUP
     158             : # define VM_GROWSUP     VM_NONE
     159             : #endif
     160             : 
     161             : /* Bits set in the VMA until the stack is in its final location */
     162             : #define VM_STACK_INCOMPLETE_SETUP       (VM_RAND_READ | VM_SEQ_READ)
     163             : 
     164             : #ifndef VM_STACK_DEFAULT_FLAGS          /* arch can override this */
     165             : #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
     166             : #endif
     167             : 
     168             : #ifdef CONFIG_STACK_GROWSUP
     169             : #define VM_STACK_FLAGS  (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
     170             : #else
     171             : #define VM_STACK_FLAGS  (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
     172             : #endif
     173             : 
     174             : /*
     175             :  * Special vmas that are non-mergable, non-mlock()able.
     176             :  * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
     177             :  */
     178             : #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
     179             : 
     180             : /* This mask defines which mm->def_flags a process can inherit its parent */
     181             : #define VM_INIT_DEF_MASK        VM_NOHUGEPAGE
     182             : 
     183             : /*
     184             :  * mapping from the currently active vm_flags protection bits (the
     185             :  * low four bits) to a page protection mask..
     186             :  */
     187             : extern pgprot_t protection_map[16];
     188             : 
     189             : #define FAULT_FLAG_WRITE        0x01    /* Fault was a write access */
     190             : #define FAULT_FLAG_NONLINEAR    0x02    /* Fault was via a nonlinear mapping */
     191             : #define FAULT_FLAG_MKWRITE      0x04    /* Fault was mkwrite of existing pte */
     192             : #define FAULT_FLAG_ALLOW_RETRY  0x08    /* Retry fault if blocking */
     193             : #define FAULT_FLAG_RETRY_NOWAIT 0x10    /* Don't drop mmap_sem and wait when retrying */
     194             : #define FAULT_FLAG_KILLABLE     0x20    /* The fault task is in SIGKILL killable region */
     195             : #define FAULT_FLAG_TRIED        0x40    /* second try */
     196             : #define FAULT_FLAG_USER         0x80    /* The fault originated in userspace */
     197             : 
     198             : /*
     199             :  * vm_fault is filled by the the pagefault handler and passed to the vma's
     200             :  * ->fault function. The vma's ->fault is responsible for returning a bitmask
     201             :  * of VM_FAULT_xxx flags that give details about how the fault was handled.
     202             :  *
     203             :  * pgoff should be used in favour of virtual_address, if possible. If pgoff
     204             :  * is used, one may implement ->remap_pages to get nonlinear mapping support.
     205             :  */
     206             : struct vm_fault {
     207             :         unsigned int flags;             /* FAULT_FLAG_xxx flags */
     208             :         pgoff_t pgoff;                  /* Logical page offset based on vma */
     209             :         void __user *virtual_address;   /* Faulting virtual address */
     210             : 
     211             :         struct page *page;              /* ->fault handlers should return a
     212             :                                          * page here, unless VM_FAULT_NOPAGE
     213             :                                          * is set (which is also implied by
     214             :                                          * VM_FAULT_ERROR).
     215             :                                          */
     216             :         /* for ->map_pages() only */
     217             :         pgoff_t max_pgoff;              /* map pages for offset from pgoff till
     218             :                                          * max_pgoff inclusive */
     219             :         pte_t *pte;                     /* pte entry associated with ->pgoff */
     220             : };
     221             : 
     222             : /*
     223             :  * These are the virtual MM functions - opening of an area, closing and
     224             :  * unmapping it (needed to keep files on disk up-to-date etc), pointer
     225             :  * to the functions called when a no-page or a wp-page exception occurs. 
     226             :  */
     227             : struct vm_operations_struct {
     228             :         void (*open)(struct vm_area_struct * area);
     229             :         void (*close)(struct vm_area_struct * area);
     230             :         int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
     231             :         void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
     232             : 
     233             :         /* notification that a previously read-only page is about to become
     234             :          * writable, if an error is returned it will cause a SIGBUS */
     235             :         int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
     236             : 
     237             :         /* called by access_process_vm when get_user_pages() fails, typically
     238             :          * for use by special VMAs that can switch between memory and hardware
     239             :          */
     240             :         int (*access)(struct vm_area_struct *vma, unsigned long addr,
     241             :                       void *buf, int len, int write);
     242             : 
     243             :         /* Called by the /proc/PID/maps code to ask the vma whether it
     244             :          * has a special name.  Returning non-NULL will also cause this
     245             :          * vma to be dumped unconditionally. */
     246             :         const char *(*name)(struct vm_area_struct *vma);
     247             : 
     248             : #ifdef CONFIG_NUMA
     249             :         /*
     250             :          * set_policy() op must add a reference to any non-NULL @new mempolicy
     251             :          * to hold the policy upon return.  Caller should pass NULL @new to
     252             :          * remove a policy and fall back to surrounding context--i.e. do not
     253             :          * install a MPOL_DEFAULT policy, nor the task or system default
     254             :          * mempolicy.
     255             :          */
     256             :         int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
     257             : 
     258             :         /*
     259             :          * get_policy() op must add reference [mpol_get()] to any policy at
     260             :          * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
     261             :          * in mm/mempolicy.c will do this automatically.
     262             :          * get_policy() must NOT add a ref if the policy at (vma,addr) is not
     263             :          * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
     264             :          * If no [shared/vma] mempolicy exists at the addr, get_policy() op
     265             :          * must return NULL--i.e., do not "fallback" to task or system default
     266             :          * policy.
     267             :          */
     268             :         struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
     269             :                                         unsigned long addr);
     270             :         int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
     271             :                 const nodemask_t *to, unsigned long flags);
     272             : #endif
     273             :         /* called by sys_remap_file_pages() to populate non-linear mapping */
     274             :         int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
     275             :                            unsigned long size, pgoff_t pgoff);
     276             : };
     277             : 
     278             : struct mmu_gather;
     279             : struct inode;
     280             : 
     281             : #define page_private(page)              ((page)->private)
     282             : #define set_page_private(page, v)       ((page)->private = (v))
     283             : 
     284             : /* It's valid only if the page is free path or free_list */
     285             : static inline void set_freepage_migratetype(struct page *page, int migratetype)
     286             : {
     287             :         page->index = migratetype;
     288             : }
     289             : 
     290             : /* It's valid only if the page is free path or free_list */
     291             : static inline int get_freepage_migratetype(struct page *page)
     292             : {
     293             :         return page->index;
     294             : }
     295             : 
     296             : /*
     297             :  * FIXME: take this include out, include page-flags.h in
     298             :  * files which need it (119 of them)
     299             :  */
     300             : #include <linux/page-flags.h>
     301             : #include <linux/huge_mm.h>
     302             : 
     303             : /*
     304             :  * Methods to modify the page usage count.
     305             :  *
     306             :  * What counts for a page usage:
     307             :  * - cache mapping   (page->mapping)
     308             :  * - private data    (page->private)
     309             :  * - page mapped in a task's page tables, each mapping
     310             :  *   is counted separately
     311             :  *
     312             :  * Also, many kernel routines increase the page count before a critical
     313             :  * routine so they can be sure the page doesn't go away from under them.
     314             :  */
     315             : 
     316             : /*
     317             :  * Drop a ref, return true if the refcount fell to zero (the page has no users)
     318             :  */
     319             : static inline int put_page_testzero(struct page *page)
     320             : {
     321             :         VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page);
     322             :         return atomic_dec_and_test(&page->_count);
     323             : }
     324             : 
     325             : /*
     326             :  * Try to grab a ref unless the page has a refcount of zero, return false if
     327             :  * that is the case.
     328             :  * This can be called when MMU is off so it must not access
     329             :  * any of the virtual mappings.
     330             :  */
     331          34 : static inline int get_page_unless_zero(struct page *page)
     332             : {
     333          34 :         return atomic_inc_not_zero(&page->_count);
     334             : }
     335             : 
     336             : /*
     337             :  * Try to drop a ref unless the page has a refcount of one, return false if
     338             :  * that is the case.
     339             :  * This is to make sure that the refcount won't become zero after this drop.
     340             :  * This can be called when MMU is off so it must not access
     341             :  * any of the virtual mappings.
     342             :  */
     343             : static inline int put_page_unless_one(struct page *page)
     344             : {
     345             :         return atomic_add_unless(&page->_count, -1, 1);
     346             : }
     347             : 
     348             : extern int page_is_ram(unsigned long pfn);
     349             : 
     350             : /* Support for virtually mapped pages */
     351             : struct page *vmalloc_to_page(const void *addr);
     352             : unsigned long vmalloc_to_pfn(const void *addr);
     353             : 
     354             : /*
     355             :  * Determine if an address is within the vmalloc range
     356             :  *
     357             :  * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
     358             :  * is no special casing required.
     359             :  */
     360             : static inline int is_vmalloc_addr(const void *x)
     361             : {
     362             : #ifdef CONFIG_MMU
     363         221 :         unsigned long addr = (unsigned long)x;
     364             : 
     365         221 :         return addr >= VMALLOC_START && addr < VMALLOC_END;
     366             : #else
     367             :         return 0;
     368             : #endif
     369             : }
     370             : #ifdef CONFIG_MMU
     371             : extern int is_vmalloc_or_module_addr(const void *x);
     372             : #else
     373             : static inline int is_vmalloc_or_module_addr(const void *x)
     374             : {
     375             :         return 0;
     376             : }
     377             : #endif
     378             : 
     379             : extern void kvfree(const void *addr);
     380             : 
     381             : static inline void compound_lock(struct page *page)
     382             : {
     383             : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
     384             :         VM_BUG_ON_PAGE(PageSlab(page), page);
     385             :         bit_spin_lock(PG_compound_lock, &page->flags);
     386             : #endif
     387             : }
     388             : 
     389             : static inline void compound_unlock(struct page *page)
     390             : {
     391             : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
     392             :         VM_BUG_ON_PAGE(PageSlab(page), page);
     393             :         bit_spin_unlock(PG_compound_lock, &page->flags);
     394             : #endif
     395             : }
     396             : 
     397             : static inline unsigned long compound_lock_irqsave(struct page *page)
     398             : {
     399             :         unsigned long uninitialized_var(flags);
     400             : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
     401             :         local_irq_save(flags);
     402             :         compound_lock(page);
     403             : #endif
     404             :         return flags;
     405             : }
     406             : 
     407             : static inline void compound_unlock_irqrestore(struct page *page,
     408             :                                               unsigned long flags)
     409             : {
     410             : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
     411             :         compound_unlock(page);
     412             :         local_irq_restore(flags);
     413             : #endif
     414             : }
     415             : 
     416             : static inline struct page *compound_head_by_tail(struct page *tail)
     417             : {
     418             :         struct page *head = tail->first_page;
     419             : 
     420             :         /*
     421             :          * page->first_page may be a dangling pointer to an old
     422             :          * compound page, so recheck that it is still a tail
     423             :          * page before returning.
     424             :          */
     425             :         smp_rmb();
     426             :         if (likely(PageTail(tail)))
     427             :                 return head;
     428             :         return tail;
     429             : }
     430             : 
     431             : static inline struct page *compound_head(struct page *page)
     432             : {
     433             :         if (unlikely(PageTail(page)))
     434             :                 return compound_head_by_tail(page);
     435             :         return page;
     436             : }
     437             : 
     438             : /*
     439             :  * The atomic page->_mapcount, starts from -1: so that transitions
     440             :  * both from it and to it can be tracked, using atomic_inc_and_test
     441             :  * and atomic_add_negative(-1).
     442             :  */
     443             : static inline void page_mapcount_reset(struct page *page)
     444             : {
     445             :         atomic_set(&(page)->_mapcount, -1);
     446             : }
     447             : 
     448             : static inline int page_mapcount(struct page *page)
     449             : {
     450             :         return atomic_read(&(page)->_mapcount) + 1;
     451             : }
     452             : 
     453             : static inline int page_count(struct page *page)
     454             : {
     455             :         return atomic_read(&compound_head(page)->_count);
     456             : }
     457             : 
     458             : #ifdef CONFIG_HUGETLB_PAGE
     459             : extern int PageHeadHuge(struct page *page_head);
     460             : #else /* CONFIG_HUGETLB_PAGE */
     461             : static inline int PageHeadHuge(struct page *page_head)
     462             : {
     463             :         return 0;
     464             : }
     465             : #endif /* CONFIG_HUGETLB_PAGE */
     466             : 
     467             : static inline bool __compound_tail_refcounted(struct page *page)
     468             : {
     469             :         return !PageSlab(page) && !PageHeadHuge(page);
     470             : }
     471             : 
     472             : /*
     473             :  * This takes a head page as parameter and tells if the
     474             :  * tail page reference counting can be skipped.
     475             :  *
     476             :  * For this to be safe, PageSlab and PageHeadHuge must remain true on
     477             :  * any given page where they return true here, until all tail pins
     478             :  * have been released.
     479             :  */
     480             : static inline bool compound_tail_refcounted(struct page *page)
     481             : {
     482             :         VM_BUG_ON_PAGE(!PageHead(page), page);
     483             :         return __compound_tail_refcounted(page);
     484             : }
     485             : 
     486             : static inline void get_huge_page_tail(struct page *page)
     487             : {
     488             :         /*
     489             :          * __split_huge_page_refcount() cannot run from under us.
     490             :          */
     491             :         VM_BUG_ON_PAGE(!PageTail(page), page);
     492             :         VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
     493             :         VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
     494             :         if (compound_tail_refcounted(page->first_page))
     495             :                 atomic_inc(&page->_mapcount);
     496             : }
     497             : 
     498             : extern bool __get_page_tail(struct page *page);
     499             : 
     500     1949891 : static inline void get_page(struct page *page)
     501             : {
     502     1949891 :         if (unlikely(PageTail(page)))
     503           0 :                 if (likely(__get_page_tail(page)))
     504     1949897 :                         return;
     505             :         /*
     506             :          * Getting a normal page or the head of a compound page
     507             :          * requires to already have an elevated page->_count.
     508             :          */
     509     1949890 :         VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
     510     1949890 :         atomic_inc(&page->_count);
     511             : }
     512             : 
     513             : static inline struct page *virt_to_head_page(const void *x)
     514             : {
     515             :         struct page *page = virt_to_page(x);
     516             :         return compound_head(page);
     517             : }
     518             : 
     519             : /*
     520             :  * Setup the page count before being freed into the page allocator for
     521             :  * the first time (boot or memory hotplug)
     522             :  */
     523             : static inline void init_page_count(struct page *page)
     524             : {
     525             :         atomic_set(&page->_count, 1);
     526             : }
     527             : 
     528             : /*
     529             :  * PageBuddy() indicate that the page is free and in the buddy system
     530             :  * (see mm/page_alloc.c).
     531             :  *
     532             :  * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
     533             :  * -2 so that an underflow of the page_mapcount() won't be mistaken
     534             :  * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
     535             :  * efficiently by most CPU architectures.
     536             :  */
     537             : #define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
     538             : 
     539             : static inline int PageBuddy(struct page *page)
     540             : {
     541             :         return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
     542             : }
     543             : 
     544             : static inline void __SetPageBuddy(struct page *page)
     545             : {
     546             :         VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
     547             :         atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
     548             : }
     549             : 
     550             : static inline void __ClearPageBuddy(struct page *page)
     551             : {
     552             :         VM_BUG_ON_PAGE(!PageBuddy(page), page);
     553             :         atomic_set(&page->_mapcount, -1);
     554             : }
     555             : 
     556             : void put_page(struct page *page);
     557             : void put_pages_list(struct list_head *pages);
     558             : 
     559             : void split_page(struct page *page, unsigned int order);
     560             : int split_free_page(struct page *page);
     561             : 
     562             : /*
     563             :  * Compound pages have a destructor function.  Provide a
     564             :  * prototype for that function and accessor functions.
     565             :  * These are _only_ valid on the head of a PG_compound page.
     566             :  */
     567             : typedef void compound_page_dtor(struct page *);
     568             : 
     569             : static inline void set_compound_page_dtor(struct page *page,
     570             :                                                 compound_page_dtor *dtor)
     571             : {
     572             :         page[1].lru.next = (void *)dtor;
     573             : }
     574             : 
     575             : static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
     576             : {
     577             :         return (compound_page_dtor *)page[1].lru.next;
     578             : }
     579             : 
     580             : static inline int compound_order(struct page *page)
     581             : {
     582             :         if (!PageHead(page))
     583             :                 return 0;
     584             :         return (unsigned long)page[1].lru.prev;
     585             : }
     586             : 
     587             : static inline void set_compound_order(struct page *page, unsigned long order)
     588             : {
     589             :         page[1].lru.prev = (void *)order;
     590             : }
     591             : 
     592             : #ifdef CONFIG_MMU
     593             : /*
     594             :  * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
     595             :  * servicing faults for write access.  In the normal case, do always want
     596             :  * pte_mkwrite.  But get_user_pages can cause write faults for mappings
     597             :  * that do not have writing enabled, when used by access_process_vm.
     598             :  */
     599             : static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
     600             : {
     601             :         if (likely(vma->vm_flags & VM_WRITE))
     602             :                 pte = pte_mkwrite(pte);
     603             :         return pte;
     604             : }
     605             : 
     606             : void do_set_pte(struct vm_area_struct *vma, unsigned long address,
     607             :                 struct page *page, pte_t *pte, bool write, bool anon);
     608             : #endif
     609             : 
     610             : /*
     611             :  * Multiple processes may "see" the same page. E.g. for untouched
     612             :  * mappings of /dev/null, all processes see the same page full of
     613             :  * zeroes, and text pages of executables and shared libraries have
     614             :  * only one copy in memory, at most, normally.
     615             :  *
     616             :  * For the non-reserved pages, page_count(page) denotes a reference count.
     617             :  *   page_count() == 0 means the page is free. page->lru is then used for
     618             :  *   freelist management in the buddy allocator.
     619             :  *   page_count() > 0  means the page has been allocated.
     620             :  *
     621             :  * Pages are allocated by the slab allocator in order to provide memory
     622             :  * to kmalloc and kmem_cache_alloc. In this case, the management of the
     623             :  * page, and the fields in 'struct page' are the responsibility of mm/slab.c
     624             :  * unless a particular usage is carefully commented. (the responsibility of
     625             :  * freeing the kmalloc memory is the caller's, of course).
     626             :  *
     627             :  * A page may be used by anyone else who does a __get_free_page().
     628             :  * In this case, page_count still tracks the references, and should only
     629             :  * be used through the normal accessor functions. The top bits of page->flags
     630             :  * and page->virtual store page management information, but all other fields
     631             :  * are unused and could be used privately, carefully. The management of this
     632             :  * page is the responsibility of the one who allocated it, and those who have
     633             :  * subsequently been given references to it.
     634             :  *
     635             :  * The other pages (we may call them "pagecache pages") are completely
     636             :  * managed by the Linux memory manager: I/O, buffers, swapping etc.
     637             :  * The following discussion applies only to them.
     638             :  *
     639             :  * A pagecache page contains an opaque `private' member, which belongs to the
     640             :  * page's address_space. Usually, this is the address of a circular list of
     641             :  * the page's disk buffers. PG_private must be set to tell the VM to call
     642             :  * into the filesystem to release these pages.
     643             :  *
     644             :  * A page may belong to an inode's memory mapping. In this case, page->mapping
     645             :  * is the pointer to the inode, and page->index is the file offset of the page,
     646             :  * in units of PAGE_CACHE_SIZE.
     647             :  *
     648             :  * If pagecache pages are not associated with an inode, they are said to be
     649             :  * anonymous pages. These may become associated with the swapcache, and in that
     650             :  * case PG_swapcache is set, and page->private is an offset into the swapcache.
     651             :  *
     652             :  * In either case (swapcache or inode backed), the pagecache itself holds one
     653             :  * reference to the page. Setting PG_private should also increment the
     654             :  * refcount. The each user mapping also has a reference to the page.
     655             :  *
     656             :  * The pagecache pages are stored in a per-mapping radix tree, which is
     657             :  * rooted at mapping->page_tree, and indexed by offset.
     658             :  * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
     659             :  * lists, we instead now tag pages as dirty/writeback in the radix tree.
     660             :  *
     661             :  * All pagecache pages may be subject to I/O:
     662             :  * - inode pages may need to be read from disk,
     663             :  * - inode pages which have been modified and are MAP_SHARED may need
     664             :  *   to be written back to the inode on disk,
     665             :  * - anonymous pages (including MAP_PRIVATE file mappings) which have been
     666             :  *   modified may need to be swapped out to swap space and (later) to be read
     667             :  *   back into memory.
     668             :  */
     669             : 
     670             : /*
     671             :  * The zone field is never updated after free_area_init_core()
     672             :  * sets it, so none of the operations on it need to be atomic.
     673             :  */
     674             : 
     675             : /* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
     676             : #define SECTIONS_PGOFF          ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
     677             : #define NODES_PGOFF             (SECTIONS_PGOFF - NODES_WIDTH)
     678             : #define ZONES_PGOFF             (NODES_PGOFF - ZONES_WIDTH)
     679             : #define LAST_CPUPID_PGOFF       (ZONES_PGOFF - LAST_CPUPID_WIDTH)
     680             : 
     681             : /*
     682             :  * Define the bit shifts to access each section.  For non-existent
     683             :  * sections we define the shift as 0; that plus a 0 mask ensures
     684             :  * the compiler will optimise away reference to them.
     685             :  */
     686             : #define SECTIONS_PGSHIFT        (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
     687             : #define NODES_PGSHIFT           (NODES_PGOFF * (NODES_WIDTH != 0))
     688             : #define ZONES_PGSHIFT           (ZONES_PGOFF * (ZONES_WIDTH != 0))
     689             : #define LAST_CPUPID_PGSHIFT     (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
     690             : 
     691             : /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
     692             : #ifdef NODE_NOT_IN_PAGE_FLAGS
     693             : #define ZONEID_SHIFT            (SECTIONS_SHIFT + ZONES_SHIFT)
     694             : #define ZONEID_PGOFF            ((SECTIONS_PGOFF < ZONES_PGOFF)? \
     695             :                                                 SECTIONS_PGOFF : ZONES_PGOFF)
     696             : #else
     697             : #define ZONEID_SHIFT            (NODES_SHIFT + ZONES_SHIFT)
     698             : #define ZONEID_PGOFF            ((NODES_PGOFF < ZONES_PGOFF)? \
     699             :                                                 NODES_PGOFF : ZONES_PGOFF)
     700             : #endif
     701             : 
     702             : #define ZONEID_PGSHIFT          (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
     703             : 
     704             : #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
     705             : #error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
     706             : #endif
     707             : 
     708             : #define ZONES_MASK              ((1UL << ZONES_WIDTH) - 1)
     709             : #define NODES_MASK              ((1UL << NODES_WIDTH) - 1)
     710             : #define SECTIONS_MASK           ((1UL << SECTIONS_WIDTH) - 1)
     711             : #define LAST_CPUPID_MASK        ((1UL << LAST_CPUPID_SHIFT) - 1)
     712             : #define ZONEID_MASK             ((1UL << ZONEID_SHIFT) - 1)
     713             : 
     714             : static inline enum zone_type page_zonenum(const struct page *page)
     715             : {
     716             :         return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
     717             : }
     718             : 
     719             : #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
     720             : #define SECTION_IN_PAGE_FLAGS
     721             : #endif
     722             : 
     723             : /*
     724             :  * The identification function is mainly used by the buddy allocator for
     725             :  * determining if two pages could be buddies. We are not really identifying
     726             :  * the zone since we could be using the section number id if we do not have
     727             :  * node id available in page flags.
     728             :  * We only guarantee that it will return the same value for two combinable
     729             :  * pages in a zone.
     730             :  */
     731             : static inline int page_zone_id(struct page *page)
     732             : {
     733             :         return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
     734             : }
     735             : 
     736             : static inline int zone_to_nid(struct zone *zone)
     737             : {
     738             : #ifdef CONFIG_NUMA
     739             :         return zone->node;
     740             : #else
     741             :         return 0;
     742             : #endif
     743             : }
     744             : 
     745             : #ifdef NODE_NOT_IN_PAGE_FLAGS
     746             : extern int page_to_nid(const struct page *page);
     747             : #else
     748             : static inline int page_to_nid(const struct page *page)
     749             : {
     750             :         return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
     751             : }
     752             : #endif
     753             : 
     754             : #ifdef CONFIG_NUMA_BALANCING
     755             : static inline int cpu_pid_to_cpupid(int cpu, int pid)
     756             : {
     757             :         return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
     758             : }
     759             : 
     760             : static inline int cpupid_to_pid(int cpupid)
     761             : {
     762             :         return cpupid & LAST__PID_MASK;
     763             : }
     764             : 
     765             : static inline int cpupid_to_cpu(int cpupid)
     766             : {
     767             :         return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
     768             : }
     769             : 
     770             : static inline int cpupid_to_nid(int cpupid)
     771             : {
     772             :         return cpu_to_node(cpupid_to_cpu(cpupid));
     773             : }
     774             : 
     775             : static inline bool cpupid_pid_unset(int cpupid)
     776             : {
     777             :         return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
     778             : }
     779             : 
     780             : static inline bool cpupid_cpu_unset(int cpupid)
     781             : {
     782             :         return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
     783             : }
     784             : 
     785             : static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
     786             : {
     787             :         return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
     788             : }
     789             : 
     790             : #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
     791             : #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
     792             : static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
     793             : {
     794             :         return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
     795             : }
     796             : 
     797             : static inline int page_cpupid_last(struct page *page)
     798             : {
     799             :         return page->_last_cpupid;
     800             : }
     801             : static inline void page_cpupid_reset_last(struct page *page)
     802             : {
     803             :         page->_last_cpupid = -1 & LAST_CPUPID_MASK;
     804             : }
     805             : #else
     806             : static inline int page_cpupid_last(struct page *page)
     807             : {
     808             :         return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
     809             : }
     810             : 
     811             : extern int page_cpupid_xchg_last(struct page *page, int cpupid);
     812             : 
     813             : static inline void page_cpupid_reset_last(struct page *page)
     814             : {
     815             :         int cpupid = (1 << LAST_CPUPID_SHIFT) - 1;
     816             : 
     817             :         page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
     818             :         page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
     819             : }
     820             : #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
     821             : #else /* !CONFIG_NUMA_BALANCING */
     822             : static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
     823             : {
     824             :         return page_to_nid(page); /* XXX */
     825             : }
     826             : 
     827             : static inline int page_cpupid_last(struct page *page)
     828             : {
     829             :         return page_to_nid(page); /* XXX */
     830             : }
     831             : 
     832             : static inline int cpupid_to_nid(int cpupid)
     833             : {
     834             :         return -1;
     835             : }
     836             : 
     837             : static inline int cpupid_to_pid(int cpupid)
     838             : {
     839             :         return -1;
     840             : }
     841             : 
     842             : static inline int cpupid_to_cpu(int cpupid)
     843             : {
     844             :         return -1;
     845             : }
     846             : 
     847             : static inline int cpu_pid_to_cpupid(int nid, int pid)
     848             : {
     849             :         return -1;
     850             : }
     851             : 
     852             : static inline bool cpupid_pid_unset(int cpupid)
     853             : {
     854             :         return 1;
     855             : }
     856             : 
     857             : static inline void page_cpupid_reset_last(struct page *page)
     858             : {
     859             : }
     860             : 
     861             : static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
     862             : {
     863             :         return false;
     864             : }
     865             : #endif /* CONFIG_NUMA_BALANCING */
     866             : 
     867             : static inline struct zone *page_zone(const struct page *page)
     868             : {
     869             :         return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
     870             : }
     871             : 
     872             : #ifdef SECTION_IN_PAGE_FLAGS
     873             : static inline void set_page_section(struct page *page, unsigned long section)
     874             : {
     875             :         page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
     876             :         page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
     877             : }
     878             : 
     879             : static inline unsigned long page_to_section(const struct page *page)
     880             : {
     881             :         return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
     882             : }
     883             : #endif
     884             : 
     885             : static inline void set_page_zone(struct page *page, enum zone_type zone)
     886             : {
     887             :         page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
     888             :         page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
     889             : }
     890             : 
     891             : static inline void set_page_node(struct page *page, unsigned long node)
     892             : {
     893             :         page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
     894             :         page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
     895             : }
     896             : 
     897             : static inline void set_page_links(struct page *page, enum zone_type zone,
     898             :         unsigned long node, unsigned long pfn)
     899             : {
     900             :         set_page_zone(page, zone);
     901             :         set_page_node(page, node);
     902             : #ifdef SECTION_IN_PAGE_FLAGS
     903             :         set_page_section(page, pfn_to_section_nr(pfn));
     904             : #endif
     905             : }
     906             : 
     907             : /*
     908             :  * Some inline functions in vmstat.h depend on page_zone()
     909             :  */
     910             : #include <linux/vmstat.h>
     911             : 
     912             : static __always_inline void *lowmem_page_address(const struct page *page)
     913             : {
     914    98214134 :         return __va(PFN_PHYS(page_to_pfn(page)));
     915             : }
     916             : 
     917             : #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
     918             : #define HASHED_PAGE_VIRTUAL
     919             : #endif
     920             : 
     921             : #if defined(WANT_PAGE_VIRTUAL)
     922             : static inline void *page_address(const struct page *page)
     923             : {
     924             :         return page->virtual;
     925             : }
     926             : static inline void set_page_address(struct page *page, void *address)
     927             : {
     928             :         page->virtual = address;
     929             : }
     930             : #define page_address_init()  do { } while(0)
     931             : #endif
     932             : 
     933             : #if defined(HASHED_PAGE_VIRTUAL)
     934             : void *page_address(const struct page *page);
     935             : void set_page_address(struct page *page, void *virtual);
     936             : void page_address_init(void);
     937             : #endif
     938             : 
     939             : #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
     940             : #define page_address(page) lowmem_page_address(page)
     941             : #define set_page_address(page, address)  do { } while(0)
     942             : #define page_address_init()  do { } while(0)
     943             : #endif
     944             : 
     945             : /*
     946             :  * On an anonymous page mapped into a user virtual memory area,
     947             :  * page->mapping points to its anon_vma, not to a struct address_space;
     948             :  * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
     949             :  *
     950             :  * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
     951             :  * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
     952             :  * and then page->mapping points, not to an anon_vma, but to a private
     953             :  * structure which KSM associates with that merged page.  See ksm.h.
     954             :  *
     955             :  * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
     956             :  *
     957             :  * Please note that, confusingly, "page_mapping" refers to the inode
     958             :  * address_space which maps the page from disk; whereas "page_mapped"
     959             :  * refers to user virtual address space into which the page is mapped.
     960             :  */
     961             : #define PAGE_MAPPING_ANON       1
     962             : #define PAGE_MAPPING_KSM        2
     963             : #define PAGE_MAPPING_FLAGS      (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
     964             : 
     965             : extern struct address_space *page_mapping(struct page *page);
     966             : 
     967             : /* Neutral page->mapping pointer to address_space or anon_vma or other */
     968             : static inline void *page_rmapping(struct page *page)
     969             : {
     970             :         return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
     971             : }
     972             : 
     973             : extern struct address_space *__page_file_mapping(struct page *);
     974             : 
     975             : static inline
     976             : struct address_space *page_file_mapping(struct page *page)
     977             : {
     978             :         if (unlikely(PageSwapCache(page)))
     979             :                 return __page_file_mapping(page);
     980             : 
     981             :         return page->mapping;
     982             : }
     983             : 
     984             : static inline int PageAnon(struct page *page)
     985             : {
     986             :         return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
     987             : }
     988             : 
     989             : /*
     990             :  * Return the pagecache index of the passed page.  Regular pagecache pages
     991             :  * use ->index whereas swapcache pages use ->private
     992             :  */
     993             : static inline pgoff_t page_index(struct page *page)
     994             : {
     995        4706 :         if (unlikely(PageSwapCache(page)))
     996           0 :                 return page_private(page);
     997        4706 :         return page->index;
     998             : }
     999             : 
    1000             : extern pgoff_t __page_file_index(struct page *page);
    1001             : 
    1002             : /*
    1003             :  * Return the file index of the page. Regular pagecache pages use ->index
    1004             :  * whereas swapcache pages use swp_offset(->private)
    1005             :  */
    1006             : static inline pgoff_t page_file_index(struct page *page)
    1007             : {
    1008             :         if (unlikely(PageSwapCache(page)))
    1009             :                 return __page_file_index(page);
    1010             : 
    1011             :         return page->index;
    1012             : }
    1013             : 
    1014             : /*
    1015             :  * Return true if this page is mapped into pagetables.
    1016             :  */
    1017             : static inline int page_mapped(struct page *page)
    1018             : {
    1019             :         return atomic_read(&(page)->_mapcount) >= 0;
    1020             : }
    1021             : 
    1022             : /*
    1023             :  * Different kinds of faults, as returned by handle_mm_fault().
    1024             :  * Used to decide whether a process gets delivered SIGBUS or
    1025             :  * just gets major/minor fault counters bumped up.
    1026             :  */
    1027             : 
    1028             : #define VM_FAULT_MINOR  0 /* For backwards compat. Remove me quickly. */
    1029             : 
    1030             : #define VM_FAULT_OOM    0x0001
    1031             : #define VM_FAULT_SIGBUS 0x0002
    1032             : #define VM_FAULT_MAJOR  0x0004
    1033             : #define VM_FAULT_WRITE  0x0008  /* Special case for get_user_pages */
    1034             : #define VM_FAULT_HWPOISON 0x0010        /* Hit poisoned small page */
    1035             : #define VM_FAULT_HWPOISON_LARGE 0x0020  /* Hit poisoned large page. Index encoded in upper bits */
    1036             : 
    1037             : #define VM_FAULT_NOPAGE 0x0100  /* ->fault installed the pte, not return page */
    1038             : #define VM_FAULT_LOCKED 0x0200  /* ->fault locked the returned page */
    1039             : #define VM_FAULT_RETRY  0x0400  /* ->fault blocked, must retry */
    1040             : #define VM_FAULT_FALLBACK 0x0800        /* huge page fault failed, fall back to small */
    1041             : 
    1042             : #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
    1043             : 
    1044             : #define VM_FAULT_ERROR  (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
    1045             :                          VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE)
    1046             : 
    1047             : /* Encode hstate index for a hwpoisoned large page */
    1048             : #define VM_FAULT_SET_HINDEX(x) ((x) << 12)
    1049             : #define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
    1050             : 
    1051             : /*
    1052             :  * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
    1053             :  */
    1054             : extern void pagefault_out_of_memory(void);
    1055             : 
    1056             : #define offset_in_page(p)       ((unsigned long)(p) & ~PAGE_MASK)
    1057             : 
    1058             : /*
    1059             :  * Flags passed to show_mem() and show_free_areas() to suppress output in
    1060             :  * various contexts.
    1061             :  */
    1062             : #define SHOW_MEM_FILTER_NODES           (0x0001u)       /* disallowed nodes */
    1063             : 
    1064             : extern void show_free_areas(unsigned int flags);
    1065             : extern bool skip_free_areas_node(unsigned int flags, int nid);
    1066             : 
    1067             : int shmem_zero_setup(struct vm_area_struct *);
    1068             : #ifdef CONFIG_SHMEM
    1069             : bool shmem_mapping(struct address_space *mapping);
    1070             : #else
    1071             : static inline bool shmem_mapping(struct address_space *mapping)
    1072             : {
    1073             :         return false;
    1074             : }
    1075             : #endif
    1076             : 
    1077             : extern int can_do_mlock(void);
    1078             : extern int user_shm_lock(size_t, struct user_struct *);
    1079             : extern void user_shm_unlock(size_t, struct user_struct *);
    1080             : 
    1081             : /*
    1082             :  * Parameter block passed down to zap_pte_range in exceptional cases.
    1083             :  */
    1084             : struct zap_details {
    1085             :         struct vm_area_struct *nonlinear_vma;   /* Check page->index if set */
    1086             :         struct address_space *check_mapping;    /* Check page->mapping if set */
    1087             :         pgoff_t first_index;                    /* Lowest page->index to unmap */
    1088             :         pgoff_t last_index;                     /* Highest page->index to unmap */
    1089             : };
    1090             : 
    1091             : struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
    1092             :                 pte_t pte);
    1093             : 
    1094             : int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
    1095             :                 unsigned long size);
    1096             : void zap_page_range(struct vm_area_struct *vma, unsigned long address,
    1097             :                 unsigned long size, struct zap_details *);
    1098             : void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
    1099             :                 unsigned long start, unsigned long end);
    1100             : 
    1101             : /**
    1102             :  * mm_walk - callbacks for walk_page_range
    1103             :  * @pgd_entry: if set, called for each non-empty PGD (top-level) entry
    1104             :  * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
    1105             :  * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
    1106             :  *             this handler is required to be able to handle
    1107             :  *             pmd_trans_huge() pmds.  They may simply choose to
    1108             :  *             split_huge_page() instead of handling it explicitly.
    1109             :  * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
    1110             :  * @pte_hole: if set, called for each hole at all levels
    1111             :  * @hugetlb_entry: if set, called for each hugetlb entry
    1112             :  *                 *Caution*: The caller must hold mmap_sem() if @hugetlb_entry
    1113             :  *                            is used.
    1114             :  *
    1115             :  * (see walk_page_range for more details)
    1116             :  */
    1117             : struct mm_walk {
    1118             :         int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
    1119             :                          unsigned long next, struct mm_walk *walk);
    1120             :         int (*pud_entry)(pud_t *pud, unsigned long addr,
    1121             :                          unsigned long next, struct mm_walk *walk);
    1122             :         int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
    1123             :                          unsigned long next, struct mm_walk *walk);
    1124             :         int (*pte_entry)(pte_t *pte, unsigned long addr,
    1125             :                          unsigned long next, struct mm_walk *walk);
    1126             :         int (*pte_hole)(unsigned long addr, unsigned long next,
    1127             :                         struct mm_walk *walk);
    1128             :         int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
    1129             :                              unsigned long addr, unsigned long next,
    1130             :                              struct mm_walk *walk);
    1131             :         struct mm_struct *mm;
    1132             :         void *private;
    1133             : };
    1134             : 
    1135             : int walk_page_range(unsigned long addr, unsigned long end,
    1136             :                 struct mm_walk *walk);
    1137             : void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
    1138             :                 unsigned long end, unsigned long floor, unsigned long ceiling);
    1139             : int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
    1140             :                         struct vm_area_struct *vma);
    1141             : void unmap_mapping_range(struct address_space *mapping,
    1142             :                 loff_t const holebegin, loff_t const holelen, int even_cows);
    1143             : int follow_pfn(struct vm_area_struct *vma, unsigned long address,
    1144             :         unsigned long *pfn);
    1145             : int follow_phys(struct vm_area_struct *vma, unsigned long address,
    1146             :                 unsigned int flags, unsigned long *prot, resource_size_t *phys);
    1147             : int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
    1148             :                         void *buf, int len, int write);
    1149             : 
    1150             : static inline void unmap_shared_mapping_range(struct address_space *mapping,
    1151             :                 loff_t const holebegin, loff_t const holelen)
    1152             : {
    1153             :         unmap_mapping_range(mapping, holebegin, holelen, 0);
    1154             : }
    1155             : 
    1156             : extern void truncate_pagecache(struct inode *inode, loff_t new);
    1157             : extern void truncate_setsize(struct inode *inode, loff_t newsize);
    1158             : void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
    1159             : int truncate_inode_page(struct address_space *mapping, struct page *page);
    1160             : int generic_error_remove_page(struct address_space *mapping, struct page *page);
    1161             : int invalidate_inode_page(struct page *page);
    1162             : 
    1163             : #ifdef CONFIG_MMU
    1164             : extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
    1165             :                         unsigned long address, unsigned int flags);
    1166             : extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
    1167             :                             unsigned long address, unsigned int fault_flags);
    1168             : #else
    1169             : static inline int handle_mm_fault(struct mm_struct *mm,
    1170             :                         struct vm_area_struct *vma, unsigned long address,
    1171             :                         unsigned int flags)
    1172             : {
    1173             :         /* should never happen if there's no MMU */
    1174             :         BUG();
    1175             :         return VM_FAULT_SIGBUS;
    1176             : }
    1177             : static inline int fixup_user_fault(struct task_struct *tsk,
    1178             :                 struct mm_struct *mm, unsigned long address,
    1179             :                 unsigned int fault_flags)
    1180             : {
    1181             :         /* should never happen if there's no MMU */
    1182             :         BUG();
    1183             :         return -EFAULT;
    1184             : }
    1185             : #endif
    1186             : 
    1187             : extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
    1188             : extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
    1189             :                 void *buf, int len, int write);
    1190             : 
    1191             : long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
    1192             :                       unsigned long start, unsigned long nr_pages,
    1193             :                       unsigned int foll_flags, struct page **pages,
    1194             :                       struct vm_area_struct **vmas, int *nonblocking);
    1195             : long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
    1196             :                     unsigned long start, unsigned long nr_pages,
    1197             :                     int write, int force, struct page **pages,
    1198             :                     struct vm_area_struct **vmas);
    1199             : int get_user_pages_fast(unsigned long start, int nr_pages, int write,
    1200             :                         struct page **pages);
    1201             : struct kvec;
    1202             : int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
    1203             :                         struct page **pages);
    1204             : int get_kernel_page(unsigned long start, int write, struct page **pages);
    1205             : struct page *get_dump_page(unsigned long addr);
    1206             : 
    1207             : extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
    1208             : extern void do_invalidatepage(struct page *page, unsigned int offset,
    1209             :                               unsigned int length);
    1210             : 
    1211             : int __set_page_dirty_nobuffers(struct page *page);
    1212             : int __set_page_dirty_no_writeback(struct page *page);
    1213             : int redirty_page_for_writepage(struct writeback_control *wbc,
    1214             :                                 struct page *page);
    1215             : void account_page_dirtied(struct page *page, struct address_space *mapping);
    1216             : void account_page_writeback(struct page *page);
    1217             : int set_page_dirty(struct page *page);
    1218             : int set_page_dirty_lock(struct page *page);
    1219             : int clear_page_dirty_for_io(struct page *page);
    1220             : int get_cmdline(struct task_struct *task, char *buffer, int buflen);
    1221             : 
    1222             : /* Is the vma a continuation of the stack vma above it? */
    1223             : static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
    1224             : {
    1225             :         return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
    1226             : }
    1227             : 
    1228             : static inline int stack_guard_page_start(struct vm_area_struct *vma,
    1229             :                                              unsigned long addr)
    1230             : {
    1231             :         return (vma->vm_flags & VM_GROWSDOWN) &&
    1232             :                 (vma->vm_start == addr) &&
    1233             :                 !vma_growsdown(vma->vm_prev, addr);
    1234             : }
    1235             : 
    1236             : /* Is the vma a continuation of the stack vma below it? */
    1237             : static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
    1238             : {
    1239             :         return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
    1240             : }
    1241             : 
    1242             : static inline int stack_guard_page_end(struct vm_area_struct *vma,
    1243             :                                            unsigned long addr)
    1244             : {
    1245             :         return (vma->vm_flags & VM_GROWSUP) &&
    1246             :                 (vma->vm_end == addr) &&
    1247             :                 !vma_growsup(vma->vm_next, addr);
    1248             : }
    1249             : 
    1250             : extern pid_t
    1251             : vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
    1252             : 
    1253             : extern unsigned long move_page_tables(struct vm_area_struct *vma,
    1254             :                 unsigned long old_addr, struct vm_area_struct *new_vma,
    1255             :                 unsigned long new_addr, unsigned long len,
    1256             :                 bool need_rmap_locks);
    1257             : extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
    1258             :                               unsigned long end, pgprot_t newprot,
    1259             :                               int dirty_accountable, int prot_numa);
    1260             : extern int mprotect_fixup(struct vm_area_struct *vma,
    1261             :                           struct vm_area_struct **pprev, unsigned long start,
    1262             :                           unsigned long end, unsigned long newflags);
    1263             : 
    1264             : /*
    1265             :  * doesn't attempt to fault and will return short.
    1266             :  */
    1267             : int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
    1268             :                           struct page **pages);
    1269             : /*
    1270             :  * per-process(per-mm_struct) statistics.
    1271             :  */
    1272             : static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
    1273             : {
    1274             :         long val = atomic_long_read(&mm->rss_stat.count[member]);
    1275             : 
    1276             : #ifdef SPLIT_RSS_COUNTING
    1277             :         /*
    1278             :          * counter is updated in asynchronous manner and may go to minus.
    1279             :          * But it's never be expected number for users.
    1280             :          */
    1281             :         if (val < 0)
    1282             :                 val = 0;
    1283             : #endif
    1284             :         return (unsigned long)val;
    1285             : }
    1286             : 
    1287             : static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
    1288             : {
    1289             :         atomic_long_add(value, &mm->rss_stat.count[member]);
    1290             : }
    1291             : 
    1292             : static inline void inc_mm_counter(struct mm_struct *mm, int member)
    1293             : {
    1294             :         atomic_long_inc(&mm->rss_stat.count[member]);
    1295             : }
    1296             : 
    1297             : static inline void dec_mm_counter(struct mm_struct *mm, int member)
    1298             : {
    1299             :         atomic_long_dec(&mm->rss_stat.count[member]);
    1300             : }
    1301             : 
    1302             : static inline unsigned long get_mm_rss(struct mm_struct *mm)
    1303             : {
    1304             :         return get_mm_counter(mm, MM_FILEPAGES) +
    1305             :                 get_mm_counter(mm, MM_ANONPAGES);
    1306             : }
    1307             : 
    1308             : static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
    1309             : {
    1310             :         return max(mm->hiwater_rss, get_mm_rss(mm));
    1311             : }
    1312             : 
    1313             : static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
    1314             : {
    1315             :         return max(mm->hiwater_vm, mm->total_vm);
    1316             : }
    1317             : 
    1318             : static inline void update_hiwater_rss(struct mm_struct *mm)
    1319             : {
    1320             :         unsigned long _rss = get_mm_rss(mm);
    1321             : 
    1322             :         if ((mm)->hiwater_rss < _rss)
    1323             :                 (mm)->hiwater_rss = _rss;
    1324             : }
    1325             : 
    1326             : static inline void update_hiwater_vm(struct mm_struct *mm)
    1327             : {
    1328             :         if (mm->hiwater_vm < mm->total_vm)
    1329             :                 mm->hiwater_vm = mm->total_vm;
    1330             : }
    1331             : 
    1332             : static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
    1333             :                                          struct mm_struct *mm)
    1334             : {
    1335             :         unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
    1336             : 
    1337             :         if (*maxrss < hiwater_rss)
    1338             :                 *maxrss = hiwater_rss;
    1339             : }
    1340             : 
    1341             : #if defined(SPLIT_RSS_COUNTING)
    1342             : void sync_mm_rss(struct mm_struct *mm);
    1343             : #else
    1344             : static inline void sync_mm_rss(struct mm_struct *mm)
    1345             : {
    1346             : }
    1347             : #endif
    1348             : 
    1349             : int vma_wants_writenotify(struct vm_area_struct *vma);
    1350             : 
    1351             : extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
    1352             :                                spinlock_t **ptl);
    1353             : static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
    1354             :                                     spinlock_t **ptl)
    1355             : {
    1356             :         pte_t *ptep;
    1357             :         __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
    1358             :         return ptep;
    1359             : }
    1360             : 
    1361             : #ifdef __PAGETABLE_PUD_FOLDED
    1362             : static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
    1363             :                                                 unsigned long address)
    1364             : {
    1365             :         return 0;
    1366             : }
    1367             : #else
    1368             : int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
    1369             : #endif
    1370             : 
    1371             : #ifdef __PAGETABLE_PMD_FOLDED
    1372             : static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
    1373             :                                                 unsigned long address)
    1374             : {
    1375             :         return 0;
    1376             : }
    1377             : #else
    1378             : int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
    1379             : #endif
    1380             : 
    1381             : int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
    1382             :                 pmd_t *pmd, unsigned long address);
    1383             : int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
    1384             : 
    1385             : /*
    1386             :  * The following ifdef needed to get the 4level-fixup.h header to work.
    1387             :  * Remove it when 4level-fixup.h has been removed.
    1388             :  */
    1389             : #if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
    1390             : static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
    1391             : {
    1392             :         return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
    1393             :                 NULL: pud_offset(pgd, address);
    1394             : }
    1395             : 
    1396             : static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
    1397             : {
    1398             :         return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
    1399             :                 NULL: pmd_offset(pud, address);
    1400             : }
    1401             : #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
    1402             : 
    1403             : #if USE_SPLIT_PTE_PTLOCKS
    1404             : #if ALLOC_SPLIT_PTLOCKS
    1405             : void __init ptlock_cache_init(void);
    1406             : extern bool ptlock_alloc(struct page *page);
    1407             : extern void ptlock_free(struct page *page);
    1408             : 
    1409             : static inline spinlock_t *ptlock_ptr(struct page *page)
    1410             : {
    1411             :         return page->ptl;
    1412             : }
    1413             : #else /* ALLOC_SPLIT_PTLOCKS */
    1414             : static inline void ptlock_cache_init(void)
    1415             : {
    1416             : }
    1417             : 
    1418             : static inline bool ptlock_alloc(struct page *page)
    1419             : {
    1420             :         return true;
    1421             : }
    1422             : 
    1423             : static inline void ptlock_free(struct page *page)
    1424             : {
    1425             : }
    1426             : 
    1427             : static inline spinlock_t *ptlock_ptr(struct page *page)
    1428             : {
    1429             :         return &page->ptl;
    1430             : }
    1431             : #endif /* ALLOC_SPLIT_PTLOCKS */
    1432             : 
    1433             : static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
    1434             : {
    1435             :         return ptlock_ptr(pmd_page(*pmd));
    1436             : }
    1437             : 
    1438             : static inline bool ptlock_init(struct page *page)
    1439             : {
    1440             :         /*
    1441             :          * prep_new_page() initialize page->private (and therefore page->ptl)
    1442             :          * with 0. Make sure nobody took it in use in between.
    1443             :          *
    1444             :          * It can happen if arch try to use slab for page table allocation:
    1445             :          * slab code uses page->slab_cache and page->first_page (for tail
    1446             :          * pages), which share storage with page->ptl.
    1447             :          */
    1448             :         VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
    1449             :         if (!ptlock_alloc(page))
    1450             :                 return false;
    1451             :         spin_lock_init(ptlock_ptr(page));
    1452             :         return true;
    1453             : }
    1454             : 
    1455             : /* Reset page->mapping so free_pages_check won't complain. */
    1456             : static inline void pte_lock_deinit(struct page *page)
    1457             : {
    1458             :         page->mapping = NULL;
    1459             :         ptlock_free(page);
    1460             : }
    1461             : 
    1462             : #else   /* !USE_SPLIT_PTE_PTLOCKS */
    1463             : /*
    1464             :  * We use mm->page_table_lock to guard all pagetable pages of the mm.
    1465             :  */
    1466             : static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
    1467             : {
    1468             :         return &mm->page_table_lock;
    1469             : }
    1470             : static inline void ptlock_cache_init(void) {}
    1471             : static inline bool ptlock_init(struct page *page) { return true; }
    1472             : static inline void pte_lock_deinit(struct page *page) {}
    1473             : #endif /* USE_SPLIT_PTE_PTLOCKS */
    1474             : 
    1475             : static inline void pgtable_init(void)
    1476             : {
    1477             :         ptlock_cache_init();
    1478             :         pgtable_cache_init();
    1479             : }
    1480             : 
    1481             : static inline bool pgtable_page_ctor(struct page *page)
    1482             : {
    1483             :         inc_zone_page_state(page, NR_PAGETABLE);
    1484             :         return ptlock_init(page);
    1485             : }
    1486             : 
    1487             : static inline void pgtable_page_dtor(struct page *page)
    1488             : {
    1489             :         pte_lock_deinit(page);
    1490             :         dec_zone_page_state(page, NR_PAGETABLE);
    1491             : }
    1492             : 
    1493             : #define pte_offset_map_lock(mm, pmd, address, ptlp)     \
    1494             : ({                                                      \
    1495             :         spinlock_t *__ptl = pte_lockptr(mm, pmd);       \
    1496             :         pte_t *__pte = pte_offset_map(pmd, address);    \
    1497             :         *(ptlp) = __ptl;                                \
    1498             :         spin_lock(__ptl);                               \
    1499             :         __pte;                                          \
    1500             : })
    1501             : 
    1502             : #define pte_unmap_unlock(pte, ptl)      do {            \
    1503             :         spin_unlock(ptl);                               \
    1504             :         pte_unmap(pte);                                 \
    1505             : } while (0)
    1506             : 
    1507             : #define pte_alloc_map(mm, vma, pmd, address)                            \
    1508             :         ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma,    \
    1509             :                                                         pmd, address))? \
    1510             :          NULL: pte_offset_map(pmd, address))
    1511             : 
    1512             : #define pte_alloc_map_lock(mm, pmd, address, ptlp)      \
    1513             :         ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL,   \
    1514             :                                                         pmd, address))? \
    1515             :                 NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
    1516             : 
    1517             : #define pte_alloc_kernel(pmd, address)                  \
    1518             :         ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
    1519             :                 NULL: pte_offset_kernel(pmd, address))
    1520             : 
    1521             : #if USE_SPLIT_PMD_PTLOCKS
    1522             : 
    1523             : static struct page *pmd_to_page(pmd_t *pmd)
    1524             : {
    1525             :         unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
    1526             :         return virt_to_page((void *)((unsigned long) pmd & mask));
    1527             : }
    1528             : 
    1529             : static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
    1530             : {
    1531             :         return ptlock_ptr(pmd_to_page(pmd));
    1532             : }
    1533             : 
    1534             : static inline bool pgtable_pmd_page_ctor(struct page *page)
    1535             : {
    1536             : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
    1537             :         page->pmd_huge_pte = NULL;
    1538             : #endif
    1539             :         return ptlock_init(page);
    1540             : }
    1541             : 
    1542             : static inline void pgtable_pmd_page_dtor(struct page *page)
    1543             : {
    1544             : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
    1545             :         VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
    1546             : #endif
    1547             :         ptlock_free(page);
    1548             : }
    1549             : 
    1550             : #define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
    1551             : 
    1552             : #else
    1553             : 
    1554             : static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
    1555             : {
    1556             :         return &mm->page_table_lock;
    1557             : }
    1558             : 
    1559             : static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
    1560             : static inline void pgtable_pmd_page_dtor(struct page *page) {}
    1561             : 
    1562             : #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
    1563             : 
    1564             : #endif
    1565             : 
    1566             : static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
    1567             : {
    1568             :         spinlock_t *ptl = pmd_lockptr(mm, pmd);
    1569             :         spin_lock(ptl);
    1570             :         return ptl;
    1571             : }
    1572             : 
    1573             : extern void free_area_init(unsigned long * zones_size);
    1574             : extern void free_area_init_node(int nid, unsigned long * zones_size,
    1575             :                 unsigned long zone_start_pfn, unsigned long *zholes_size);
    1576             : extern void free_initmem(void);
    1577             : 
    1578             : /*
    1579             :  * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
    1580             :  * into the buddy system. The freed pages will be poisoned with pattern
    1581             :  * "poison" if it's within range [0, UCHAR_MAX].
    1582             :  * Return pages freed into the buddy system.
    1583             :  */
    1584             : extern unsigned long free_reserved_area(void *start, void *end,
    1585             :                                         int poison, char *s);
    1586             : 
    1587             : #ifdef  CONFIG_HIGHMEM
    1588             : /*
    1589             :  * Free a highmem page into the buddy system, adjusting totalhigh_pages
    1590             :  * and totalram_pages.
    1591             :  */
    1592             : extern void free_highmem_page(struct page *page);
    1593             : #endif
    1594             : 
    1595             : extern void adjust_managed_page_count(struct page *page, long count);
    1596             : extern void mem_init_print_info(const char *str);
    1597             : 
    1598             : /* Free the reserved page into the buddy system, so it gets managed. */
    1599             : static inline void __free_reserved_page(struct page *page)
    1600             : {
    1601             :         ClearPageReserved(page);
    1602             :         init_page_count(page);
    1603             :         __free_page(page);
    1604             : }
    1605             : 
    1606             : static inline void free_reserved_page(struct page *page)
    1607             : {
    1608             :         __free_reserved_page(page);
    1609             :         adjust_managed_page_count(page, 1);
    1610             : }
    1611             : 
    1612             : static inline void mark_page_reserved(struct page *page)
    1613             : {
    1614             :         SetPageReserved(page);
    1615             :         adjust_managed_page_count(page, -1);
    1616             : }
    1617             : 
    1618             : /*
    1619             :  * Default method to free all the __init memory into the buddy system.
    1620             :  * The freed pages will be poisoned with pattern "poison" if it's within
    1621             :  * range [0, UCHAR_MAX].
    1622             :  * Return pages freed into the buddy system.
    1623             :  */
    1624             : static inline unsigned long free_initmem_default(int poison)
    1625             : {
    1626             :         extern char __init_begin[], __init_end[];
    1627             : 
    1628             :         return free_reserved_area(&__init_begin, &__init_end,
    1629             :                                   poison, "unused kernel");
    1630             : }
    1631             : 
    1632             : static inline unsigned long get_num_physpages(void)
    1633             : {
    1634             :         int nid;
    1635             :         unsigned long phys_pages = 0;
    1636             : 
    1637             :         for_each_online_node(nid)
    1638             :                 phys_pages += node_present_pages(nid);
    1639             : 
    1640             :         return phys_pages;
    1641             : }
    1642             : 
    1643             : #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
    1644             : /*
    1645             :  * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
    1646             :  * zones, allocate the backing mem_map and account for memory holes in a more
    1647             :  * architecture independent manner. This is a substitute for creating the
    1648             :  * zone_sizes[] and zholes_size[] arrays and passing them to
    1649             :  * free_area_init_node()
    1650             :  *
    1651             :  * An architecture is expected to register range of page frames backed by
    1652             :  * physical memory with memblock_add[_node]() before calling
    1653             :  * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
    1654             :  * usage, an architecture is expected to do something like
    1655             :  *
    1656             :  * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
    1657             :  *                                                       max_highmem_pfn};
    1658             :  * for_each_valid_physical_page_range()
    1659             :  *      memblock_add_node(base, size, nid)
    1660             :  * free_area_init_nodes(max_zone_pfns);
    1661             :  *
    1662             :  * free_bootmem_with_active_regions() calls free_bootmem_node() for each
    1663             :  * registered physical page range.  Similarly
    1664             :  * sparse_memory_present_with_active_regions() calls memory_present() for
    1665             :  * each range when SPARSEMEM is enabled.
    1666             :  *
    1667             :  * See mm/page_alloc.c for more information on each function exposed by
    1668             :  * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
    1669             :  */
    1670             : extern void free_area_init_nodes(unsigned long *max_zone_pfn);
    1671             : unsigned long node_map_pfn_alignment(void);
    1672             : unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
    1673             :                                                 unsigned long end_pfn);
    1674             : extern unsigned long absent_pages_in_range(unsigned long start_pfn,
    1675             :                                                 unsigned long end_pfn);
    1676             : extern void get_pfn_range_for_nid(unsigned int nid,
    1677             :                         unsigned long *start_pfn, unsigned long *end_pfn);
    1678             : extern unsigned long find_min_pfn_with_active_regions(void);
    1679             : extern void free_bootmem_with_active_regions(int nid,
    1680             :                                                 unsigned long max_low_pfn);
    1681             : extern void sparse_memory_present_with_active_regions(int nid);
    1682             : 
    1683             : #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
    1684             : 
    1685             : #if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
    1686             :     !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
    1687             : static inline int __early_pfn_to_nid(unsigned long pfn)
    1688             : {
    1689             :         return 0;
    1690             : }
    1691             : #else
    1692             : /* please see mm/page_alloc.c */
    1693             : extern int __meminit early_pfn_to_nid(unsigned long pfn);
    1694             : /* there is a per-arch backend function. */
    1695             : extern int __meminit __early_pfn_to_nid(unsigned long pfn);
    1696             : #endif
    1697             : 
    1698             : extern void set_dma_reserve(unsigned long new_dma_reserve);
    1699             : extern void memmap_init_zone(unsigned long, int, unsigned long,
    1700             :                                 unsigned long, enum memmap_context);
    1701             : extern void setup_per_zone_wmarks(void);
    1702             : extern int __meminit init_per_zone_wmark_min(void);
    1703             : extern void mem_init(void);
    1704             : extern void __init mmap_init(void);
    1705             : extern void show_mem(unsigned int flags);
    1706             : extern void si_meminfo(struct sysinfo * val);
    1707             : extern void si_meminfo_node(struct sysinfo *val, int nid);
    1708             : 
    1709             : extern __printf(3, 4)
    1710             : void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
    1711             : 
    1712             : extern void setup_per_cpu_pageset(void);
    1713             : 
    1714             : extern void zone_pcp_update(struct zone *zone);
    1715             : extern void zone_pcp_reset(struct zone *zone);
    1716             : 
    1717             : /* page_alloc.c */
    1718             : extern int min_free_kbytes;
    1719             : 
    1720             : /* nommu.c */
    1721             : extern atomic_long_t mmap_pages_allocated;
    1722             : extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
    1723             : 
    1724             : /* interval_tree.c */
    1725             : void vma_interval_tree_insert(struct vm_area_struct *node,
    1726             :                               struct rb_root *root);
    1727             : void vma_interval_tree_insert_after(struct vm_area_struct *node,
    1728             :                                     struct vm_area_struct *prev,
    1729             :                                     struct rb_root *root);
    1730             : void vma_interval_tree_remove(struct vm_area_struct *node,
    1731             :                               struct rb_root *root);
    1732             : struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root,
    1733             :                                 unsigned long start, unsigned long last);
    1734             : struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
    1735             :                                 unsigned long start, unsigned long last);
    1736             : 
    1737             : #define vma_interval_tree_foreach(vma, root, start, last)               \
    1738             :         for (vma = vma_interval_tree_iter_first(root, start, last);     \
    1739             :              vma; vma = vma_interval_tree_iter_next(vma, start, last))
    1740             : 
    1741             : static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
    1742             :                                         struct list_head *list)
    1743             : {
    1744             :         list_add_tail(&vma->shared.nonlinear, list);
    1745             : }
    1746             : 
    1747             : void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
    1748             :                                    struct rb_root *root);
    1749             : void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
    1750             :                                    struct rb_root *root);
    1751             : struct anon_vma_chain *anon_vma_interval_tree_iter_first(
    1752             :         struct rb_root *root, unsigned long start, unsigned long last);
    1753             : struct anon_vma_chain *anon_vma_interval_tree_iter_next(
    1754             :         struct anon_vma_chain *node, unsigned long start, unsigned long last);
    1755             : #ifdef CONFIG_DEBUG_VM_RB
    1756             : void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
    1757             : #endif
    1758             : 
    1759             : #define anon_vma_interval_tree_foreach(avc, root, start, last)           \
    1760             :         for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
    1761             :              avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
    1762             : 
    1763             : /* mmap.c */
    1764             : extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
    1765             : extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
    1766             :         unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
    1767             : extern struct vm_area_struct *vma_merge(struct mm_struct *,
    1768             :         struct vm_area_struct *prev, unsigned long addr, unsigned long end,
    1769             :         unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
    1770             :         struct mempolicy *);
    1771             : extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
    1772             : extern int split_vma(struct mm_struct *,
    1773             :         struct vm_area_struct *, unsigned long addr, int new_below);
    1774             : extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
    1775             : extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
    1776             :         struct rb_node **, struct rb_node *);
    1777             : extern void unlink_file_vma(struct vm_area_struct *);
    1778             : extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
    1779             :         unsigned long addr, unsigned long len, pgoff_t pgoff,
    1780             :         bool *need_rmap_locks);
    1781             : extern void exit_mmap(struct mm_struct *);
    1782             : 
    1783             : extern int mm_take_all_locks(struct mm_struct *mm);
    1784             : extern void mm_drop_all_locks(struct mm_struct *mm);
    1785             : 
    1786             : extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
    1787             : extern struct file *get_mm_exe_file(struct mm_struct *mm);
    1788             : 
    1789             : extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
    1790             : extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
    1791             :                                    unsigned long addr, unsigned long len,
    1792             :                                    unsigned long flags,
    1793             :                                    const struct vm_special_mapping *spec);
    1794             : /* This is an obsolete alternative to _install_special_mapping. */
    1795             : extern int install_special_mapping(struct mm_struct *mm,
    1796             :                                    unsigned long addr, unsigned long len,
    1797             :                                    unsigned long flags, struct page **pages);
    1798             : 
    1799             : extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
    1800             : 
    1801             : extern unsigned long mmap_region(struct file *file, unsigned long addr,
    1802             :         unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
    1803             : extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
    1804             :         unsigned long len, unsigned long prot, unsigned long flags,
    1805             :         unsigned long pgoff, unsigned long *populate);
    1806             : extern int do_munmap(struct mm_struct *, unsigned long, size_t);
    1807             : 
    1808             : #ifdef CONFIG_MMU
    1809             : extern int __mm_populate(unsigned long addr, unsigned long len,
    1810             :                          int ignore_errors);
    1811             : static inline void mm_populate(unsigned long addr, unsigned long len)
    1812             : {
    1813             :         /* Ignore errors */
    1814             :         (void) __mm_populate(addr, len, 1);
    1815             : }
    1816             : #else
    1817             : static inline void mm_populate(unsigned long addr, unsigned long len) {}
    1818             : #endif
    1819             : 
    1820             : /* These take the mm semaphore themselves */
    1821             : extern unsigned long vm_brk(unsigned long, unsigned long);
    1822             : extern int vm_munmap(unsigned long, size_t);
    1823             : extern unsigned long vm_mmap(struct file *, unsigned long,
    1824             :         unsigned long, unsigned long,
    1825             :         unsigned long, unsigned long);
    1826             : 
    1827             : struct vm_unmapped_area_info {
    1828             : #define VM_UNMAPPED_AREA_TOPDOWN 1
    1829             :         unsigned long flags;
    1830             :         unsigned long length;
    1831             :         unsigned long low_limit;
    1832             :         unsigned long high_limit;
    1833             :         unsigned long align_mask;
    1834             :         unsigned long align_offset;
    1835             : };
    1836             : 
    1837             : extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
    1838             : extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
    1839             : 
    1840             : /*
    1841             :  * Search for an unmapped address range.
    1842             :  *
    1843             :  * We are looking for a range that:
    1844             :  * - does not intersect with any VMA;
    1845             :  * - is contained within the [low_limit, high_limit) interval;
    1846             :  * - is at least the desired size.
    1847             :  * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
    1848             :  */
    1849             : static inline unsigned long
    1850             : vm_unmapped_area(struct vm_unmapped_area_info *info)
    1851             : {
    1852             :         if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
    1853             :                 return unmapped_area(info);
    1854             :         else
    1855             :                 return unmapped_area_topdown(info);
    1856             : }
    1857             : 
    1858             : /* truncate.c */
    1859             : extern void truncate_inode_pages(struct address_space *, loff_t);
    1860             : extern void truncate_inode_pages_range(struct address_space *,
    1861             :                                        loff_t lstart, loff_t lend);
    1862             : extern void truncate_inode_pages_final(struct address_space *);
    1863             : 
    1864             : /* generic vm_area_ops exported for stackable file systems */
    1865             : extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
    1866             : extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf);
    1867             : extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
    1868             : 
    1869             : /* mm/page-writeback.c */
    1870             : int write_one_page(struct page *page, int wait);
    1871             : void task_dirty_inc(struct task_struct *tsk);
    1872             : 
    1873             : /* readahead.c */
    1874             : #define VM_MAX_READAHEAD        128     /* kbytes */
    1875             : #define VM_MIN_READAHEAD        16      /* kbytes (includes current page) */
    1876             : 
    1877             : int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
    1878             :                         pgoff_t offset, unsigned long nr_to_read);
    1879             : 
    1880             : void page_cache_sync_readahead(struct address_space *mapping,
    1881             :                                struct file_ra_state *ra,
    1882             :                                struct file *filp,
    1883             :                                pgoff_t offset,
    1884             :                                unsigned long size);
    1885             : 
    1886             : void page_cache_async_readahead(struct address_space *mapping,
    1887             :                                 struct file_ra_state *ra,
    1888             :                                 struct file *filp,
    1889             :                                 struct page *pg,
    1890             :                                 pgoff_t offset,
    1891             :                                 unsigned long size);
    1892             : 
    1893             : unsigned long max_sane_readahead(unsigned long nr);
    1894             : 
    1895             : /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
    1896             : extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
    1897             : 
    1898             : /* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
    1899             : extern int expand_downwards(struct vm_area_struct *vma,
    1900             :                 unsigned long address);
    1901             : #if VM_GROWSUP
    1902             : extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
    1903             : #else
    1904             :   #define expand_upwards(vma, address) do { } while (0)
    1905             : #endif
    1906             : 
    1907             : /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
    1908             : extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
    1909             : extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
    1910             :                                              struct vm_area_struct **pprev);
    1911             : 
    1912             : /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
    1913             :    NULL if none.  Assume start_addr < end_addr. */
    1914             : static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
    1915             : {
    1916             :         struct vm_area_struct * vma = find_vma(mm,start_addr);
    1917             : 
    1918             :         if (vma && end_addr <= vma->vm_start)
    1919             :                 vma = NULL;
    1920             :         return vma;
    1921             : }
    1922             : 
    1923             : static inline unsigned long vma_pages(struct vm_area_struct *vma)
    1924             : {
    1925             :         return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
    1926             : }
    1927             : 
    1928             : /* Look up the first VMA which exactly match the interval vm_start ... vm_end */
    1929             : static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
    1930             :                                 unsigned long vm_start, unsigned long vm_end)
    1931             : {
    1932             :         struct vm_area_struct *vma = find_vma(mm, vm_start);
    1933             : 
    1934             :         if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
    1935             :                 vma = NULL;
    1936             : 
    1937             :         return vma;
    1938             : }
    1939             : 
    1940             : #ifdef CONFIG_MMU
    1941             : pgprot_t vm_get_page_prot(unsigned long vm_flags);
    1942             : #else
    1943             : static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
    1944             : {
    1945             :         return __pgprot(0);
    1946             : }
    1947             : #endif
    1948             : 
    1949             : #ifdef CONFIG_NUMA_BALANCING
    1950             : unsigned long change_prot_numa(struct vm_area_struct *vma,
    1951             :                         unsigned long start, unsigned long end);
    1952             : #endif
    1953             : 
    1954             : struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
    1955             : int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
    1956             :                         unsigned long pfn, unsigned long size, pgprot_t);
    1957             : int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
    1958             : int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
    1959             :                         unsigned long pfn);
    1960             : int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
    1961             :                         unsigned long pfn);
    1962             : int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
    1963             : 
    1964             : 
    1965             : struct page *follow_page_mask(struct vm_area_struct *vma,
    1966             :                               unsigned long address, unsigned int foll_flags,
    1967             :                               unsigned int *page_mask);
    1968             : 
    1969             : static inline struct page *follow_page(struct vm_area_struct *vma,
    1970             :                 unsigned long address, unsigned int foll_flags)
    1971             : {
    1972             :         unsigned int unused_page_mask;
    1973             :         return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
    1974             : }
    1975             : 
    1976             : #define FOLL_WRITE      0x01    /* check pte is writable */
    1977             : #define FOLL_TOUCH      0x02    /* mark page accessed */
    1978             : #define FOLL_GET        0x04    /* do get_page on page */
    1979             : #define FOLL_DUMP       0x08    /* give error on hole if it would be zero */
    1980             : #define FOLL_FORCE      0x10    /* get_user_pages read/write w/o permission */
    1981             : #define FOLL_NOWAIT     0x20    /* if a disk transfer is needed, start the IO
    1982             :                                  * and return without waiting upon it */
    1983             : #define FOLL_MLOCK      0x40    /* mark page as mlocked */
    1984             : #define FOLL_SPLIT      0x80    /* don't return transhuge pages, split them */
    1985             : #define FOLL_HWPOISON   0x100   /* check page is hwpoisoned */
    1986             : #define FOLL_NUMA       0x200   /* force NUMA hinting page fault */
    1987             : #define FOLL_MIGRATION  0x400   /* wait for page to replace migration entry */
    1988             : 
    1989             : typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
    1990             :                         void *data);
    1991             : extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
    1992             :                                unsigned long size, pte_fn_t fn, void *data);
    1993             : 
    1994             : #ifdef CONFIG_PROC_FS
    1995             : void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
    1996             : #else
    1997             : static inline void vm_stat_account(struct mm_struct *mm,
    1998             :                         unsigned long flags, struct file *file, long pages)
    1999             : {
    2000             :         mm->total_vm += pages;
    2001             : }
    2002             : #endif /* CONFIG_PROC_FS */
    2003             : 
    2004             : #ifdef CONFIG_DEBUG_PAGEALLOC
    2005             : extern void kernel_map_pages(struct page *page, int numpages, int enable);
    2006             : #ifdef CONFIG_HIBERNATION
    2007             : extern bool kernel_page_present(struct page *page);
    2008             : #endif /* CONFIG_HIBERNATION */
    2009             : #else
    2010             : static inline void
    2011             : kernel_map_pages(struct page *page, int numpages, int enable) {}
    2012             : #ifdef CONFIG_HIBERNATION
    2013             : static inline bool kernel_page_present(struct page *page) { return true; }
    2014             : #endif /* CONFIG_HIBERNATION */
    2015             : #endif
    2016             : 
    2017             : #ifdef __HAVE_ARCH_GATE_AREA
    2018             : extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
    2019             : extern int in_gate_area_no_mm(unsigned long addr);
    2020             : extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
    2021             : #else
    2022             : static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
    2023             : {
    2024             :         return NULL;
    2025             : }
    2026             : static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
    2027             : static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
    2028             : {
    2029             :         return 0;
    2030             : }
    2031             : #endif  /* __HAVE_ARCH_GATE_AREA */
    2032             : 
    2033             : #ifdef CONFIG_SYSCTL
    2034             : extern int sysctl_drop_caches;
    2035             : int drop_caches_sysctl_handler(struct ctl_table *, int,
    2036             :                                         void __user *, size_t *, loff_t *);
    2037             : #endif
    2038             : 
    2039             : unsigned long shrink_slab(struct shrink_control *shrink,
    2040             :                           unsigned long nr_pages_scanned,
    2041             :                           unsigned long lru_pages);
    2042             : 
    2043             : #ifndef CONFIG_MMU
    2044             : #define randomize_va_space 0
    2045             : #else
    2046             : extern int randomize_va_space;
    2047             : #endif
    2048             : 
    2049             : const char * arch_vma_name(struct vm_area_struct *vma);
    2050             : void print_vma_addr(char *prefix, unsigned long rip);
    2051             : 
    2052             : void sparse_mem_maps_populate_node(struct page **map_map,
    2053             :                                    unsigned long pnum_begin,
    2054             :                                    unsigned long pnum_end,
    2055             :                                    unsigned long map_count,
    2056             :                                    int nodeid);
    2057             : 
    2058             : struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
    2059             : pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
    2060             : pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
    2061             : pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
    2062             : pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
    2063             : void *vmemmap_alloc_block(unsigned long size, int node);
    2064             : void *vmemmap_alloc_block_buf(unsigned long size, int node);
    2065             : void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
    2066             : int vmemmap_populate_basepages(unsigned long start, unsigned long end,
    2067             :                                int node);
    2068             : int vmemmap_populate(unsigned long start, unsigned long end, int node);
    2069             : void vmemmap_populate_print_last(void);
    2070             : #ifdef CONFIG_MEMORY_HOTPLUG
    2071             : void vmemmap_free(unsigned long start, unsigned long end);
    2072             : #endif
    2073             : void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
    2074             :                                   unsigned long size);
    2075             : 
    2076             : enum mf_flags {
    2077             :         MF_COUNT_INCREASED = 1 << 0,
    2078             :         MF_ACTION_REQUIRED = 1 << 1,
    2079             :         MF_MUST_KILL = 1 << 2,
    2080             :         MF_SOFT_OFFLINE = 1 << 3,
    2081             : };
    2082             : extern int memory_failure(unsigned long pfn, int trapno, int flags);
    2083             : extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
    2084             : extern int unpoison_memory(unsigned long pfn);
    2085             : extern int sysctl_memory_failure_early_kill;
    2086             : extern int sysctl_memory_failure_recovery;
    2087             : extern void shake_page(struct page *p, int access);
    2088             : extern atomic_long_t num_poisoned_pages;
    2089             : extern int soft_offline_page(struct page *page, int flags);
    2090             : 
    2091             : #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
    2092             : extern void clear_huge_page(struct page *page,
    2093             :                             unsigned long addr,
    2094             :                             unsigned int pages_per_huge_page);
    2095             : extern void copy_user_huge_page(struct page *dst, struct page *src,
    2096             :                                 unsigned long addr, struct vm_area_struct *vma,
    2097             :                                 unsigned int pages_per_huge_page);
    2098             : #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
    2099             : 
    2100             : #ifdef CONFIG_DEBUG_PAGEALLOC
    2101             : extern unsigned int _debug_guardpage_minorder;
    2102             : 
    2103             : static inline unsigned int debug_guardpage_minorder(void)
    2104             : {
    2105             :         return _debug_guardpage_minorder;
    2106             : }
    2107             : 
    2108             : static inline bool page_is_guard(struct page *page)
    2109             : {
    2110             :         return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
    2111             : }
    2112             : #else
    2113             : static inline unsigned int debug_guardpage_minorder(void) { return 0; }
    2114             : static inline bool page_is_guard(struct page *page) { return false; }
    2115             : #endif /* CONFIG_DEBUG_PAGEALLOC */
    2116             : 
    2117             : #if MAX_NUMNODES > 1
    2118             : void __init setup_nr_node_ids(void);
    2119             : #else
    2120             : static inline void setup_nr_node_ids(void) {}
    2121             : #endif
    2122             : 
    2123             : #endif /* __KERNEL__ */
    2124             : #endif /* _LINUX_MM_H */

Generated by: LCOV version 1.10