LCOV - code coverage report
Current view: top level - fs/btrfs - delayed-inode.c (source / functions) Hit Total Coverage
Test: btrfstest.info Lines: 640 748 85.6 %
Date: 2014-11-28 Functions: 54 57 94.7 %

          Line data    Source code
       1             : /*
       2             :  * Copyright (C) 2011 Fujitsu.  All rights reserved.
       3             :  * Written by Miao Xie <miaox@cn.fujitsu.com>
       4             :  *
       5             :  * This program is free software; you can redistribute it and/or
       6             :  * modify it under the terms of the GNU General Public
       7             :  * License v2 as published by the Free Software Foundation.
       8             :  *
       9             :  * This program is distributed in the hope that it will be useful,
      10             :  * but WITHOUT ANY WARRANTY; without even the implied warranty of
      11             :  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
      12             :  * General Public License for more details.
      13             :  *
      14             :  * You should have received a copy of the GNU General Public
      15             :  * License along with this program; if not, write to the
      16             :  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
      17             :  * Boston, MA 021110-1307, USA.
      18             :  */
      19             : 
      20             : #include <linux/slab.h>
      21             : #include "delayed-inode.h"
      22             : #include "disk-io.h"
      23             : #include "transaction.h"
      24             : #include "ctree.h"
      25             : 
      26             : #define BTRFS_DELAYED_WRITEBACK         512
      27             : #define BTRFS_DELAYED_BACKGROUND        128
      28             : #define BTRFS_DELAYED_BATCH             16
      29             : 
      30             : static struct kmem_cache *delayed_node_cache;
      31             : 
      32           0 : int __init btrfs_delayed_inode_init(void)
      33             : {
      34           0 :         delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
      35             :                                         sizeof(struct btrfs_delayed_node),
      36             :                                         0,
      37             :                                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
      38             :                                         NULL);
      39           0 :         if (!delayed_node_cache)
      40             :                 return -ENOMEM;
      41           0 :         return 0;
      42             : }
      43             : 
      44           0 : void btrfs_delayed_inode_exit(void)
      45             : {
      46           0 :         if (delayed_node_cache)
      47           0 :                 kmem_cache_destroy(delayed_node_cache);
      48           0 : }
      49             : 
      50       22329 : static inline void btrfs_init_delayed_node(
      51             :                                 struct btrfs_delayed_node *delayed_node,
      52             :                                 struct btrfs_root *root, u64 inode_id)
      53             : {
      54       22329 :         delayed_node->root = root;
      55       22329 :         delayed_node->inode_id = inode_id;
      56             :         atomic_set(&delayed_node->refs, 0);
      57       22329 :         delayed_node->count = 0;
      58       22329 :         delayed_node->flags = 0;
      59       22329 :         delayed_node->ins_root = RB_ROOT;
      60       22329 :         delayed_node->del_root = RB_ROOT;
      61       22329 :         mutex_init(&delayed_node->mutex);
      62       22328 :         delayed_node->index_cnt = 0;
      63       22328 :         INIT_LIST_HEAD(&delayed_node->n_list);
      64       22328 :         INIT_LIST_HEAD(&delayed_node->p_list);
      65       22328 :         delayed_node->bytes_reserved = 0;
      66       22328 :         memset(&delayed_node->inode_item, 0, sizeof(delayed_node->inode_item));
      67       22328 : }
      68             : 
      69             : static inline int btrfs_is_continuous_delayed_item(
      70             :                                         struct btrfs_delayed_item *item1,
      71             :                                         struct btrfs_delayed_item *item2)
      72             : {
      73       39132 :         if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
      74       39132 :             item1->key.objectid == item2->key.objectid &&
      75       39132 :             item1->key.type == item2->key.type &&
      76       19566 :             item1->key.offset + 1 == item2->key.offset)
      77             :                 return 1;
      78             :         return 0;
      79             : }
      80             : 
      81             : static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
      82             :                                                         struct btrfs_root *root)
      83             : {
      84      143294 :         return root->fs_info->delayed_root;
      85             : }
      86             : 
      87      237888 : static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
      88             : {
      89             :         struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
      90      237888 :         struct btrfs_root *root = btrfs_inode->root;
      91             :         u64 ino = btrfs_ino(inode);
      92             :         struct btrfs_delayed_node *node;
      93             : 
      94      237888 :         node = ACCESS_ONCE(btrfs_inode->delayed_node);
      95      237888 :         if (node) {
      96      209201 :                 atomic_inc(&node->refs);
      97      209197 :                 return node;
      98             :         }
      99             : 
     100             :         spin_lock(&root->inode_lock);
     101       28687 :         node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
     102       28687 :         if (node) {
     103           0 :                 if (btrfs_inode->delayed_node) {
     104           0 :                         atomic_inc(&node->refs); /* can be accessed */
     105           0 :                         BUG_ON(btrfs_inode->delayed_node != node);
     106             :                         spin_unlock(&root->inode_lock);
     107           0 :                         return node;
     108             :                 }
     109           0 :                 btrfs_inode->delayed_node = node;
     110             :                 /* can be accessed and cached in the inode */
     111           0 :                 atomic_add(2, &node->refs);
     112             :                 spin_unlock(&root->inode_lock);
     113           0 :                 return node;
     114             :         }
     115             :         spin_unlock(&root->inode_lock);
     116             : 
     117       28687 :         return NULL;
     118             : }
     119             : 
     120             : /* Will return either the node or PTR_ERR(-ENOMEM) */
     121      195283 : static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
     122             :                                                         struct inode *inode)
     123             : {
     124             :         struct btrfs_delayed_node *node;
     125             :         struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
     126      195283 :         struct btrfs_root *root = btrfs_inode->root;
     127             :         u64 ino = btrfs_ino(inode);
     128             :         int ret;
     129             : 
     130             : again:
     131      195283 :         node = btrfs_get_delayed_node(inode);
     132      195278 :         if (node)
     133             :                 return node;
     134             : 
     135       22329 :         node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
     136       22329 :         if (!node)
     137             :                 return ERR_PTR(-ENOMEM);
     138       22329 :         btrfs_init_delayed_node(node, root, ino);
     139             : 
     140             :         /* cached in the btrfs inode and can be accessed */
     141       22329 :         atomic_add(2, &node->refs);
     142             : 
     143       22329 :         ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
     144       22329 :         if (ret) {
     145           0 :                 kmem_cache_free(delayed_node_cache, node);
     146           0 :                 return ERR_PTR(ret);
     147             :         }
     148             : 
     149             :         spin_lock(&root->inode_lock);
     150       22329 :         ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
     151       22329 :         if (ret == -EEXIST) {
     152             :                 spin_unlock(&root->inode_lock);
     153           0 :                 kmem_cache_free(delayed_node_cache, node);
     154             :                 radix_tree_preload_end();
     155             :                 goto again;
     156             :         }
     157       22329 :         btrfs_inode->delayed_node = node;
     158             :         spin_unlock(&root->inode_lock);
     159             :         radix_tree_preload_end();
     160             : 
     161       22329 :         return node;
     162             : }
     163             : 
     164             : /*
     165             :  * Call it when holding delayed_node->mutex
     166             :  *
     167             :  * If mod = 1, add this node into the prepared list.
     168             :  */
     169      196450 : static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
     170             :                                      struct btrfs_delayed_node *node,
     171             :                                      int mod)
     172             : {
     173             :         spin_lock(&root->lock);
     174      196453 :         if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
     175      274566 :                 if (!list_empty(&node->p_list))
     176      137281 :                         list_move_tail(&node->p_list, &root->prepare_list);
     177           2 :                 else if (mod)
     178           1 :                         list_add_tail(&node->p_list, &root->prepare_list);
     179             :         } else {
     180       59170 :                 list_add_tail(&node->n_list, &root->node_list);
     181       59170 :                 list_add_tail(&node->p_list, &root->prepare_list);
     182       59170 :                 atomic_inc(&node->refs); /* inserted into list */
     183       59170 :                 root->nodes++;
     184             :                 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
     185             :         }
     186             :         spin_unlock(&root->lock);
     187      196453 : }
     188             : 
     189             : /* Call it when holding delayed_node->mutex */
     190       87366 : static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
     191             :                                        struct btrfs_delayed_node *node)
     192             : {
     193             :         spin_lock(&root->lock);
     194       87368 :         if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
     195       59180 :                 root->nodes--;
     196       59180 :                 atomic_dec(&node->refs); /* not in the list */
     197       59180 :                 list_del_init(&node->n_list);
     198      118360 :                 if (!list_empty(&node->p_list))
     199             :                         list_del_init(&node->p_list);
     200             :                 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
     201             :         }
     202             :         spin_unlock(&root->lock);
     203       87368 : }
     204             : 
     205        8765 : static struct btrfs_delayed_node *btrfs_first_delayed_node(
     206             :                         struct btrfs_delayed_root *delayed_root)
     207             : {
     208             :         struct list_head *p;
     209             :         struct btrfs_delayed_node *node = NULL;
     210             : 
     211             :         spin_lock(&delayed_root->lock);
     212       17530 :         if (list_empty(&delayed_root->node_list))
     213             :                 goto out;
     214             : 
     215             :         p = delayed_root->node_list.next;
     216        1535 :         node = list_entry(p, struct btrfs_delayed_node, n_list);
     217        1535 :         atomic_inc(&node->refs);
     218             : out:
     219             :         spin_unlock(&delayed_root->lock);
     220             : 
     221        8765 :         return node;
     222             : }
     223             : 
     224       25773 : static struct btrfs_delayed_node *btrfs_next_delayed_node(
     225             :                                                 struct btrfs_delayed_node *node)
     226             : {
     227             :         struct btrfs_delayed_root *delayed_root;
     228             :         struct list_head *p;
     229             :         struct btrfs_delayed_node *next = NULL;
     230             : 
     231       25773 :         delayed_root = node->root->fs_info->delayed_root;
     232             :         spin_lock(&delayed_root->lock);
     233       25773 :         if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
     234             :                 /* not in the list */
     235           2 :                 if (list_empty(&delayed_root->node_list))
     236             :                         goto out;
     237             :                 p = delayed_root->node_list.next;
     238       25772 :         } else if (list_is_last(&node->n_list, &delayed_root->node_list))
     239             :                 goto out;
     240             :         else
     241             :                 p = node->n_list.next;
     242             : 
     243       24254 :         next = list_entry(p, struct btrfs_delayed_node, n_list);
     244       24254 :         atomic_inc(&next->refs);
     245             : out:
     246             :         spin_unlock(&delayed_root->lock);
     247             : 
     248       25773 :         return next;
     249             : }
     250             : 
     251      283766 : static void __btrfs_release_delayed_node(
     252             :                                 struct btrfs_delayed_node *delayed_node,
     253             :                                 int mod)
     254             : {
     255             :         struct btrfs_delayed_root *delayed_root;
     256             : 
     257      283766 :         if (!delayed_node)
     258      283820 :                 return;
     259             : 
     260      283766 :         delayed_root = delayed_node->root->fs_info->delayed_root;
     261             : 
     262      283766 :         mutex_lock(&delayed_node->mutex);
     263      283818 :         if (delayed_node->count)
     264      196452 :                 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
     265             :         else
     266       87366 :                 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
     267      283821 :         mutex_unlock(&delayed_node->mutex);
     268             : 
     269      567638 :         if (atomic_dec_and_test(&delayed_node->refs)) {
     270             :                 bool free = false;
     271       22322 :                 struct btrfs_root *root = delayed_node->root;
     272             :                 spin_lock(&root->inode_lock);
     273       22322 :                 if (atomic_read(&delayed_node->refs) == 0) {
     274       22322 :                         radix_tree_delete(&root->delayed_nodes_tree,
     275       22322 :                                           delayed_node->inode_id);
     276             :                         free = true;
     277             :                 }
     278             :                 spin_unlock(&root->inode_lock);
     279       22322 :                 if (free)
     280       22322 :                         kmem_cache_free(delayed_node_cache, delayed_node);
     281             :         }
     282             : }
     283             : 
     284             : static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
     285             : {
     286      258567 :         __btrfs_release_delayed_node(node, 0);
     287             : }
     288             : 
     289       25262 : static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
     290             :                                         struct btrfs_delayed_root *delayed_root)
     291             : {
     292             :         struct list_head *p;
     293             :         struct btrfs_delayed_node *node = NULL;
     294             : 
     295             :         spin_lock(&delayed_root->lock);
     296       50526 :         if (list_empty(&delayed_root->prepare_list))
     297             :                 goto out;
     298             : 
     299             :         p = delayed_root->prepare_list.next;
     300             :         list_del_init(p);
     301       25252 :         node = list_entry(p, struct btrfs_delayed_node, p_list);
     302       25252 :         atomic_inc(&node->refs);
     303             : out:
     304             :         spin_unlock(&delayed_root->lock);
     305             : 
     306       25263 :         return node;
     307             : }
     308             : 
     309             : static inline void btrfs_release_prepared_delayed_node(
     310             :                                         struct btrfs_delayed_node *node)
     311             : {
     312       25200 :         __btrfs_release_delayed_node(node, 1);
     313             : }
     314             : 
     315       37951 : static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
     316             : {
     317             :         struct btrfs_delayed_item *item;
     318       37951 :         item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
     319       37950 :         if (item) {
     320       37950 :                 item->data_len = data_len;
     321       37950 :                 item->ins_or_del = 0;
     322       37950 :                 item->bytes_reserved = 0;
     323       37950 :                 item->delayed_node = NULL;
     324             :                 atomic_set(&item->refs, 1);
     325             :         }
     326       37950 :         return item;
     327             : }
     328             : 
     329             : /*
     330             :  * __btrfs_lookup_delayed_item - look up the delayed item by key
     331             :  * @delayed_node: pointer to the delayed node
     332             :  * @key:          the key to look up
     333             :  * @prev:         used to store the prev item if the right item isn't found
     334             :  * @next:         used to store the next item if the right item isn't found
     335             :  *
     336             :  * Note: if we don't find the right item, we will return the prev item and
     337             :  * the next item.
     338             :  */
     339       12323 : static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
     340             :                                 struct rb_root *root,
     341             :                                 struct btrfs_key *key,
     342             :                                 struct btrfs_delayed_item **prev,
     343             :                                 struct btrfs_delayed_item **next)
     344             : {
     345             :         struct rb_node *node, *prev_node = NULL;
     346             :         struct btrfs_delayed_item *delayed_item = NULL;
     347             :         int ret = 0;
     348             : 
     349       12323 :         node = root->rb_node;
     350             : 
     351       26863 :         while (node) {
     352             :                 delayed_item = rb_entry(node, struct btrfs_delayed_item,
     353             :                                         rb_node);
     354             :                 prev_node = node;
     355        3090 :                 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
     356        3090 :                 if (ret < 0)
     357         433 :                         node = node->rb_right;
     358        2657 :                 else if (ret > 0)
     359        1784 :                         node = node->rb_left;
     360             :                 else
     361             :                         return delayed_item;
     362             :         }
     363             : 
     364       11450 :         if (prev) {
     365           0 :                 if (!prev_node)
     366           0 :                         *prev = NULL;
     367           0 :                 else if (ret < 0)
     368           0 :                         *prev = delayed_item;
     369           0 :                 else if ((node = rb_prev(prev_node)) != NULL) {
     370           0 :                         *prev = rb_entry(node, struct btrfs_delayed_item,
     371             :                                          rb_node);
     372             :                 } else
     373           0 :                         *prev = NULL;
     374             :         }
     375             : 
     376       11450 :         if (next) {
     377           0 :                 if (!prev_node)
     378           0 :                         *next = NULL;
     379           0 :                 else if (ret > 0)
     380           0 :                         *next = delayed_item;
     381           0 :                 else if ((node = rb_next(prev_node)) != NULL) {
     382           0 :                         *next = rb_entry(node, struct btrfs_delayed_item,
     383             :                                          rb_node);
     384             :                 } else
     385           0 :                         *next = NULL;
     386             :         }
     387             :         return NULL;
     388             : }
     389             : 
     390             : static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
     391             :                                         struct btrfs_delayed_node *delayed_node,
     392             :                                         struct btrfs_key *key)
     393             : {
     394             :         struct btrfs_delayed_item *item;
     395             : 
     396       12323 :         item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
     397             :                                            NULL, NULL);
     398             :         return item;
     399             : }
     400             : 
     401       37950 : static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
     402             :                                     struct btrfs_delayed_item *ins,
     403             :                                     int action)
     404             : {
     405             :         struct rb_node **p, *node;
     406             :         struct rb_node *parent_node = NULL;
     407             :         struct rb_root *root;
     408             :         struct btrfs_delayed_item *item;
     409             :         int cmp;
     410             : 
     411       37950 :         if (action == BTRFS_DELAYED_INSERTION_ITEM)
     412       26500 :                 root = &delayed_node->ins_root;
     413       11450 :         else if (action == BTRFS_DELAYED_DELETION_ITEM)
     414       11450 :                 root = &delayed_node->del_root;
     415             :         else
     416           0 :                 BUG();
     417       37950 :         p = &root->rb_node;
     418       37950 :         node = &ins->rb_node;
     419             : 
     420      228103 :         while (*p) {
     421             :                 parent_node = *p;
     422             :                 item = rb_entry(parent_node, struct btrfs_delayed_item,
     423             :                                  rb_node);
     424             : 
     425      152203 :                 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
     426      152203 :                 if (cmp < 0)
     427      151902 :                         p = &(*p)->rb_right;
     428         301 :                 else if (cmp > 0)
     429         301 :                         p = &(*p)->rb_left;
     430             :                 else
     431             :                         return -EEXIST;
     432             :         }
     433             : 
     434             :         rb_link_node(node, parent_node, p);
     435       37950 :         rb_insert_color(node, root);
     436       37950 :         ins->delayed_node = delayed_node;
     437       37950 :         ins->ins_or_del = action;
     438             : 
     439       37950 :         if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
     440       26500 :             action == BTRFS_DELAYED_INSERTION_ITEM &&
     441       26500 :             ins->key.offset >= delayed_node->index_cnt)
     442       26500 :                         delayed_node->index_cnt = ins->key.offset + 1;
     443             : 
     444       37950 :         delayed_node->count++;
     445       37950 :         atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
     446       37951 :         return 0;
     447             : }
     448             : 
     449             : static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
     450             :                                               struct btrfs_delayed_item *item)
     451             : {
     452       26501 :         return __btrfs_add_delayed_item(node, item,
     453             :                                         BTRFS_DELAYED_INSERTION_ITEM);
     454             : }
     455             : 
     456             : static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
     457             :                                              struct btrfs_delayed_item *item)
     458             : {
     459       11450 :         return __btrfs_add_delayed_item(node, item,
     460             :                                         BTRFS_DELAYED_DELETION_ITEM);
     461             : }
     462             : 
     463      103387 : static void finish_one_item(struct btrfs_delayed_root *delayed_root)
     464             : {
     465      103387 :         int seq = atomic_inc_return(&delayed_root->items_seq);
     466      206766 :         if ((atomic_dec_return(&delayed_root->items) <
     467      103920 :             BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
     468             :             waitqueue_active(&delayed_root->wait))
     469          12 :                 wake_up(&delayed_root->wait);
     470      103381 : }
     471             : 
     472       37951 : static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
     473             : {
     474             :         struct rb_root *root;
     475             :         struct btrfs_delayed_root *delayed_root;
     476             : 
     477       37951 :         delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
     478             : 
     479       37951 :         BUG_ON(!delayed_root);
     480       37951 :         BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
     481             :                delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
     482             : 
     483       37951 :         if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
     484       26501 :                 root = &delayed_item->delayed_node->ins_root;
     485             :         else
     486       11450 :                 root = &delayed_item->delayed_node->del_root;
     487             : 
     488       37951 :         rb_erase(&delayed_item->rb_node, root);
     489       37951 :         delayed_item->delayed_node->count--;
     490             : 
     491       37951 :         finish_one_item(delayed_root);
     492       37952 : }
     493             : 
     494       37951 : static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
     495             : {
     496       37951 :         if (item) {
     497       37951 :                 __btrfs_remove_delayed_item(item);
     498       75904 :                 if (atomic_dec_and_test(&item->refs))
     499       37952 :                         kfree(item);
     500             :         }
     501       37952 : }
     502             : 
     503             : static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
     504             :                                         struct btrfs_delayed_node *delayed_node)
     505             : {
     506             :         struct rb_node *p;
     507             :         struct btrfs_delayed_item *item = NULL;
     508             : 
     509       89798 :         p = rb_first(&delayed_node->ins_root);
     510       89799 :         if (p)
     511             :                 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
     512             : 
     513             :         return item;
     514             : }
     515             : 
     516             : static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
     517             :                                         struct btrfs_delayed_node *delayed_node)
     518             : {
     519             :         struct rb_node *p;
     520             :         struct btrfs_delayed_item *item = NULL;
     521             : 
     522       82832 :         p = rb_first(&delayed_node->del_root);
     523       82832 :         if (p)
     524             :                 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
     525             : 
     526             :         return item;
     527             : }
     528             : 
     529             : static struct btrfs_delayed_item *__btrfs_next_delayed_item(
     530             :                                                 struct btrfs_delayed_item *item)
     531             : {
     532             :         struct rb_node *p;
     533             :         struct btrfs_delayed_item *next = NULL;
     534             : 
     535       38241 :         p = rb_next(&item->rb_node);
     536       38241 :         if (p)
     537             :                 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
     538             : 
     539             :         return next;
     540             : }
     541             : 
     542       37947 : static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
     543       37947 :                                                struct btrfs_root *root,
     544             :                                                struct btrfs_delayed_item *item)
     545             : {
     546             :         struct btrfs_block_rsv *src_rsv;
     547             :         struct btrfs_block_rsv *dst_rsv;
     548             :         u64 num_bytes;
     549             :         int ret;
     550             : 
     551       37947 :         if (!trans->bytes_reserved)
     552             :                 return 0;
     553             : 
     554       37947 :         src_rsv = trans->block_rsv;
     555       37947 :         dst_rsv = &root->fs_info->delayed_block_rsv;
     556             : 
     557             :         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
     558       37947 :         ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
     559       37951 :         if (!ret) {
     560       37951 :                 trace_btrfs_space_reservation(root->fs_info, "delayed_item",
     561             :                                               item->key.objectid,
     562             :                                               num_bytes, 1);
     563       37951 :                 item->bytes_reserved = num_bytes;
     564             :         }
     565             : 
     566             :         return ret;
     567             : }
     568             : 
     569       37951 : static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
     570             :                                                 struct btrfs_delayed_item *item)
     571             : {
     572             :         struct btrfs_block_rsv *rsv;
     573             : 
     574       37951 :         if (!item->bytes_reserved)
     575       37951 :                 return;
     576             : 
     577       37951 :         rsv = &root->fs_info->delayed_block_rsv;
     578       37951 :         trace_btrfs_space_reservation(root->fs_info, "delayed_item",
     579             :                                       item->key.objectid, item->bytes_reserved,
     580             :                                       0);
     581       37951 :         btrfs_block_rsv_release(root, rsv,
     582             :                                 item->bytes_reserved);
     583             : }
     584             : 
     585       59172 : static int btrfs_delayed_inode_reserve_metadata(
     586             :                                         struct btrfs_trans_handle *trans,
     587       59172 :                                         struct btrfs_root *root,
     588             :                                         struct inode *inode,
     589             :                                         struct btrfs_delayed_node *node)
     590             : {
     591             :         struct btrfs_block_rsv *src_rsv;
     592             :         struct btrfs_block_rsv *dst_rsv;
     593             :         u64 num_bytes;
     594             :         int ret;
     595             :         bool release = false;
     596             : 
     597       59172 :         src_rsv = trans->block_rsv;
     598       59172 :         dst_rsv = &root->fs_info->delayed_block_rsv;
     599             : 
     600             :         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
     601             : 
     602             :         /*
     603             :          * btrfs_dirty_inode will update the inode under btrfs_join_transaction
     604             :          * which doesn't reserve space for speed.  This is a problem since we
     605             :          * still need to reserve space for this update, so try to reserve the
     606             :          * space.
     607             :          *
     608             :          * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
     609             :          * we're accounted for.
     610             :          */
     611       72197 :         if (!src_rsv || (!trans->bytes_reserved &&
     612       13025 :                          src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
     613        4797 :                 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
     614             :                                           BTRFS_RESERVE_NO_FLUSH);
     615             :                 /*
     616             :                  * Since we're under a transaction reserve_metadata_bytes could
     617             :                  * try to commit the transaction which will make it return
     618             :                  * EAGAIN to make us stop the transaction we have, so return
     619             :                  * ENOSPC instead so that btrfs_dirty_inode knows what to do.
     620             :                  */
     621        4797 :                 if (ret == -EAGAIN)
     622             :                         ret = -ENOSPC;
     623        4797 :                 if (!ret) {
     624        4797 :                         node->bytes_reserved = num_bytes;
     625        4797 :                         trace_btrfs_space_reservation(root->fs_info,
     626             :                                                       "delayed_inode",
     627             :                                                       btrfs_ino(inode),
     628             :                                                       num_bytes, 1);
     629             :                 }
     630             :                 return ret;
     631       54375 :         } else if (src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
     632             :                 spin_lock(&BTRFS_I(inode)->lock);
     633       13025 :                 if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
     634       13025 :                                        &BTRFS_I(inode)->runtime_flags)) {
     635             :                         spin_unlock(&BTRFS_I(inode)->lock);
     636             :                         release = true;
     637             :                         goto migrate;
     638             :                 }
     639             :                 spin_unlock(&BTRFS_I(inode)->lock);
     640             : 
     641             :                 /* Ok we didn't have space pre-reserved.  This shouldn't happen
     642             :                  * too often but it can happen if we do delalloc to an existing
     643             :                  * inode which gets dirtied because of the time update, and then
     644             :                  * isn't touched again until after the transaction commits and
     645             :                  * then we try to write out the data.  First try to be nice and
     646             :                  * reserve something strictly for us.  If not be a pain and try
     647             :                  * to steal from the delalloc block rsv.
     648             :                  */
     649           8 :                 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
     650             :                                           BTRFS_RESERVE_NO_FLUSH);
     651           8 :                 if (!ret)
     652             :                         goto out;
     653             : 
     654           0 :                 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
     655           0 :                 if (!WARN_ON(ret))
     656             :                         goto out;
     657             : 
     658             :                 /*
     659             :                  * Ok this is a problem, let's just steal from the global rsv
     660             :                  * since this really shouldn't happen that often.
     661             :                  */
     662           0 :                 ret = btrfs_block_rsv_migrate(&root->fs_info->global_block_rsv,
     663             :                                               dst_rsv, num_bytes);
     664             :                 goto out;
     665             :         }
     666             : 
     667             : migrate:
     668       54367 :         ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
     669             : 
     670             : out:
     671             :         /*
     672             :          * Migrate only takes a reservation, it doesn't touch the size of the
     673             :          * block_rsv.  This is to simplify people who don't normally have things
     674             :          * migrated from their block rsv.  If they go to release their
     675             :          * reservation, that will decrease the size as well, so if migrate
     676             :          * reduced size we'd end up with a negative size.  But for the
     677             :          * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
     678             :          * but we could in fact do this reserve/migrate dance several times
     679             :          * between the time we did the original reservation and we'd clean it
     680             :          * up.  So to take care of this, release the space for the meta
     681             :          * reservation here.  I think it may be time for a documentation page on
     682             :          * how block rsvs. work.
     683             :          */
     684       54375 :         if (!ret) {
     685       54375 :                 trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
     686             :                                               btrfs_ino(inode), num_bytes, 1);
     687       54375 :                 node->bytes_reserved = num_bytes;
     688             :         }
     689             : 
     690       54375 :         if (release) {
     691       13017 :                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
     692             :                                               btrfs_ino(inode), num_bytes, 0);
     693       13017 :                 btrfs_block_rsv_release(root, src_rsv, num_bytes);
     694             :         }
     695             : 
     696             :         return ret;
     697             : }
     698             : 
     699       59158 : static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
     700             :                                                 struct btrfs_delayed_node *node)
     701             : {
     702             :         struct btrfs_block_rsv *rsv;
     703             : 
     704       59158 :         if (!node->bytes_reserved)
     705       59180 :                 return;
     706             : 
     707       59159 :         rsv = &root->fs_info->delayed_block_rsv;
     708       59159 :         trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
     709             :                                       node->inode_id, node->bytes_reserved, 0);
     710       59160 :         btrfs_block_rsv_release(root, rsv,
     711             :                                 node->bytes_reserved);
     712       59181 :         node->bytes_reserved = 0;
     713             : }
     714             : 
     715             : /*
     716             :  * This helper will insert some continuous items into the same leaf according
     717             :  * to the free space of the leaf.
     718             :  */
     719        2584 : static int btrfs_batch_insert_items(struct btrfs_root *root,
     720             :                                     struct btrfs_path *path,
     721             :                                     struct btrfs_delayed_item *item)
     722             : {
     723       16083 :         struct btrfs_delayed_item *curr, *next;
     724             :         int free_space;
     725             :         int total_data_size = 0, total_size = 0;
     726             :         struct extent_buffer *leaf;
     727             :         char *data_ptr;
     728             :         struct btrfs_key *keys;
     729             :         u32 *data_size;
     730             :         struct list_head head;
     731             :         int slot;
     732             :         int nitems;
     733             :         int i;
     734             :         int ret = 0;
     735             : 
     736        2584 :         BUG_ON(!path->nodes[0]);
     737             : 
     738             :         leaf = path->nodes[0];
     739        2584 :         free_space = btrfs_leaf_free_space(root, leaf);
     740             :         INIT_LIST_HEAD(&head);
     741             : 
     742             :         next = item;
     743             :         nitems = 0;
     744             : 
     745             :         /*
     746             :          * count the number of the continuous items that we can insert in batch
     747             :          */
     748       18991 :         while (total_size + next->data_len + sizeof(struct btrfs_item) <=
     749             :                free_space) {
     750       16083 :                 total_data_size += next->data_len;
     751       16083 :                 total_size += next->data_len + sizeof(struct btrfs_item);
     752       16083 :                 list_add_tail(&next->tree_list, &head);
     753       16083 :                 nitems++;
     754             : 
     755             :                 curr = next;
     756             :                 next = __btrfs_next_delayed_item(curr);
     757       16083 :                 if (!next)
     758             :                         break;
     759             : 
     760       13861 :                 if (!btrfs_is_continuous_delayed_item(curr, next))
     761             :                         break;
     762             :         }
     763             : 
     764        2584 :         if (!nitems) {
     765             :                 ret = 0;
     766             :                 goto out;
     767             :         }
     768             : 
     769             :         /*
     770             :          * we need allocate some memory space, but it might cause the task
     771             :          * to sleep, so we set all locked nodes in the path to blocking locks
     772             :          * first.
     773             :          */
     774        2487 :         btrfs_set_path_blocking(path);
     775             : 
     776        2487 :         keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
     777        2487 :         if (!keys) {
     778             :                 ret = -ENOMEM;
     779             :                 goto out;
     780             :         }
     781             : 
     782             :         data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
     783        2487 :         if (!data_size) {
     784             :                 ret = -ENOMEM;
     785             :                 goto error;
     786             :         }
     787             : 
     788             :         /* get keys of all the delayed items */
     789             :         i = 0;
     790       18570 :         list_for_each_entry(next, &head, tree_list) {
     791       16083 :                 keys[i] = next->key;
     792       16083 :                 data_size[i] = next->data_len;
     793       16083 :                 i++;
     794             :         }
     795             : 
     796             :         /* reset all the locked nodes in the patch to spinning locks. */
     797        2487 :         btrfs_clear_path_blocking(path, NULL, 0);
     798             : 
     799             :         /* insert the keys of the items */
     800        2487 :         setup_items_for_insert(root, path, keys, data_size,
     801             :                                total_data_size, total_size, nitems);
     802             : 
     803             :         /* insert the dir index items */
     804        2487 :         slot = path->slots[0];
     805       18570 :         list_for_each_entry_safe(curr, next, &head, tree_list) {
     806       16083 :                 data_ptr = btrfs_item_ptr(leaf, slot, char);
     807       16083 :                 write_extent_buffer(leaf, &curr->data,
     808             :                                     (unsigned long)data_ptr,
     809       16083 :                                     curr->data_len);
     810       16083 :                 slot++;
     811             : 
     812       16083 :                 btrfs_delayed_item_release_metadata(root, curr);
     813             : 
     814       16083 :                 list_del(&curr->tree_list);
     815       16083 :                 btrfs_release_delayed_item(curr);
     816             :         }
     817             : 
     818             : error:
     819        2487 :         kfree(data_size);
     820        2487 :         kfree(keys);
     821             : out:
     822        2584 :         return ret;
     823             : }
     824             : 
     825             : /*
     826             :  * This helper can just do simple insertion that needn't extend item for new
     827             :  * data, such as directory name index insertion, inode insertion.
     828             :  */
     829        9546 : static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
     830             :                                      struct btrfs_root *root,
     831             :                                      struct btrfs_path *path,
     832        9546 :                                      struct btrfs_delayed_item *delayed_item)
     833             : {
     834             :         struct extent_buffer *leaf;
     835             :         char *ptr;
     836             :         int ret;
     837             : 
     838        9546 :         ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
     839             :                                       delayed_item->data_len);
     840        9546 :         if (ret < 0 && ret != -EEXIST)
     841             :                 return ret;
     842             : 
     843        9546 :         leaf = path->nodes[0];
     844             : 
     845       19092 :         ptr = btrfs_item_ptr(leaf, path->slots[0], char);
     846             : 
     847        9546 :         write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
     848        9546 :                             delayed_item->data_len);
     849        9546 :         btrfs_mark_buffer_dirty(leaf);
     850             : 
     851        9546 :         btrfs_delayed_item_release_metadata(root, delayed_item);
     852        9546 :         return 0;
     853             : }
     854             : 
     855             : /*
     856             :  * we insert an item first, then if there are some continuous items, we try
     857             :  * to insert those items into the same leaf.
     858             :  */
     859       52379 : static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
     860             :                                       struct btrfs_path *path,
     861             :                                       struct btrfs_root *root,
     862             :                                       struct btrfs_delayed_node *node)
     863             : {
     864             :         struct btrfs_delayed_item *curr, *prev;
     865             :         int ret = 0;
     866             : 
     867             : do_again:
     868       61925 :         mutex_lock(&node->mutex);
     869             :         curr = __btrfs_first_delayed_insertion_item(node);
     870       61933 :         if (!curr)
     871             :                 goto insert_end;
     872             : 
     873        9546 :         ret = btrfs_insert_delayed_item(trans, root, path, curr);
     874        9546 :         if (ret < 0) {
     875           0 :                 btrfs_release_path(path);
     876           0 :                 goto insert_end;
     877             :         }
     878             : 
     879             :         prev = curr;
     880             :         curr = __btrfs_next_delayed_item(prev);
     881       12210 :         if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
     882             :                 /* insert the continuous items into the same leaf */
     883        2584 :                 path->slots[0]++;
     884        2584 :                 btrfs_batch_insert_items(root, path, curr);
     885             :         }
     886        9546 :         btrfs_release_delayed_item(prev);
     887        9546 :         btrfs_mark_buffer_dirty(path->nodes[0]);
     888             : 
     889        9546 :         btrfs_release_path(path);
     890        9546 :         mutex_unlock(&node->mutex);
     891        9546 :         goto do_again;
     892             : 
     893             : insert_end:
     894       52387 :         mutex_unlock(&node->mutex);
     895       52391 :         return ret;
     896             : }
     897             : 
     898        2575 : static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
     899             :                                     struct btrfs_root *root,
     900             :                                     struct btrfs_path *path,
     901             :                                     struct btrfs_delayed_item *item)
     902             : {
     903        5345 :         struct btrfs_delayed_item *curr, *next;
     904        2575 :         struct extent_buffer *leaf;
     905             :         struct btrfs_key key;
     906             :         struct list_head head;
     907             :         int nitems, i, last_item;
     908             :         int ret = 0;
     909             : 
     910        2575 :         BUG_ON(!path->nodes[0]);
     911             : 
     912             :         leaf = path->nodes[0];
     913             : 
     914        2575 :         i = path->slots[0];
     915        2575 :         last_item = btrfs_header_nritems(leaf) - 1;
     916        2575 :         if (i > last_item)
     917             :                 return -ENOENT; /* FIXME: Is errno suitable? */
     918             : 
     919             :         next = item;
     920             :         INIT_LIST_HEAD(&head);
     921        2575 :         btrfs_item_key_to_cpu(leaf, &key, i);
     922             :         nitems = 0;
     923             :         /*
     924             :          * count the number of the dir index items that we can delete in batch
     925             :          */
     926        7920 :         while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
     927        5345 :                 list_add_tail(&next->tree_list, &head);
     928        5345 :                 nitems++;
     929             : 
     930             :                 curr = next;
     931             :                 next = __btrfs_next_delayed_item(curr);
     932        5345 :                 if (!next)
     933             :                         break;
     934             : 
     935        3041 :                 if (!btrfs_is_continuous_delayed_item(curr, next))
     936             :                         break;
     937             : 
     938        2783 :                 i++;
     939        2783 :                 if (i > last_item)
     940             :                         break;
     941        2770 :                 btrfs_item_key_to_cpu(leaf, &key, i);
     942             :         }
     943             : 
     944        2575 :         if (!nitems)
     945             :                 return 0;
     946             : 
     947        2575 :         ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
     948        2575 :         if (ret)
     949             :                 goto out;
     950             : 
     951        7920 :         list_for_each_entry_safe(curr, next, &head, tree_list) {
     952        5345 :                 btrfs_delayed_item_release_metadata(root, curr);
     953        5345 :                 list_del(&curr->tree_list);
     954        5345 :                 btrfs_release_delayed_item(curr);
     955             :         }
     956             : 
     957             : out:
     958        2575 :         return ret;
     959             : }
     960             : 
     961       52388 : static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
     962             :                                       struct btrfs_path *path,
     963             :                                       struct btrfs_root *root,
     964             :                                       struct btrfs_delayed_node *node)
     965             : {
     966             :         struct btrfs_delayed_item *curr, *prev;
     967             :         int ret = 0;
     968             : 
     969             : do_again:
     970       54963 :         mutex_lock(&node->mutex);
     971             :         curr = __btrfs_first_delayed_deletion_item(node);
     972       54966 :         if (!curr)
     973             :                 goto delete_fail;
     974             : 
     975        2575 :         ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
     976        2575 :         if (ret < 0)
     977             :                 goto delete_fail;
     978        2575 :         else if (ret > 0) {
     979             :                 /*
     980             :                  * can't find the item which the node points to, so this node
     981             :                  * is invalid, just drop it.
     982             :                  */
     983             :                 prev = curr;
     984             :                 curr = __btrfs_next_delayed_item(prev);
     985           0 :                 btrfs_release_delayed_item(prev);
     986             :                 ret = 0;
     987           0 :                 btrfs_release_path(path);
     988           0 :                 if (curr) {
     989           0 :                         mutex_unlock(&node->mutex);
     990           0 :                         goto do_again;
     991             :                 } else
     992             :                         goto delete_fail;
     993             :         }
     994             : 
     995        2575 :         btrfs_batch_delete_items(trans, root, path, curr);
     996        2575 :         btrfs_release_path(path);
     997        2575 :         mutex_unlock(&node->mutex);
     998        2575 :         goto do_again;
     999             : 
    1000             : delete_fail:
    1001       52391 :         btrfs_release_path(path);
    1002       52365 :         mutex_unlock(&node->mutex);
    1003       52393 :         return ret;
    1004             : }
    1005             : 
    1006       59181 : static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
    1007             : {
    1008             :         struct btrfs_delayed_root *delayed_root;
    1009             : 
    1010      118362 :         if (delayed_node &&
    1011             :             test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
    1012       59181 :                 BUG_ON(!delayed_node->root);
    1013             :                 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
    1014       59180 :                 delayed_node->count--;
    1015             : 
    1016       59180 :                 delayed_root = delayed_node->root->fs_info->delayed_root;
    1017       59180 :                 finish_one_item(delayed_root);
    1018             :         }
    1019       59175 : }
    1020             : 
    1021        6256 : static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
    1022             : {
    1023             :         struct btrfs_delayed_root *delayed_root;
    1024             : 
    1025             :         ASSERT(delayed_node->root);
    1026             :         clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
    1027        6256 :         delayed_node->count--;
    1028             : 
    1029        6256 :         delayed_root = delayed_node->root->fs_info->delayed_root;
    1030        6256 :         finish_one_item(delayed_root);
    1031        6256 : }
    1032             : 
    1033       59179 : static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
    1034             :                                         struct btrfs_root *root,
    1035             :                                         struct btrfs_path *path,
    1036       59161 :                                         struct btrfs_delayed_node *node)
    1037             : {
    1038             :         struct btrfs_key key;
    1039             :         struct btrfs_inode_item *inode_item;
    1040        6256 :         struct extent_buffer *leaf;
    1041             :         int mod;
    1042             :         int ret;
    1043             : 
    1044       59179 :         key.objectid = node->inode_id;
    1045             :         btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
    1046       59179 :         key.offset = 0;
    1047             : 
    1048       59179 :         if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
    1049             :                 mod = -1;
    1050             :         else
    1051             :                 mod = 1;
    1052             : 
    1053       59179 :         ret = btrfs_lookup_inode(trans, root, path, &key, mod);
    1054       59142 :         if (ret > 0) {
    1055           0 :                 btrfs_release_path(path);
    1056           0 :                 return -ENOENT;
    1057       59142 :         } else if (ret < 0) {
    1058             :                 return ret;
    1059             :         }
    1060             : 
    1061       59142 :         leaf = path->nodes[0];
    1062      118277 :         inode_item = btrfs_item_ptr(leaf, path->slots[0],
    1063             :                                     struct btrfs_inode_item);
    1064       59135 :         write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
    1065             :                             sizeof(struct btrfs_inode_item));
    1066       59120 :         btrfs_mark_buffer_dirty(leaf);
    1067             : 
    1068       59179 :         if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
    1069             :                 goto no_iref;
    1070             : 
    1071        6256 :         path->slots[0]++;
    1072       12512 :         if (path->slots[0] >= btrfs_header_nritems(leaf))
    1073             :                 goto search;
    1074             : again:
    1075        6256 :         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
    1076        6256 :         if (key.objectid != node->inode_id)
    1077             :                 goto out;
    1078             : 
    1079        6256 :         if (key.type != BTRFS_INODE_REF_KEY &&
    1080             :             key.type != BTRFS_INODE_EXTREF_KEY)
    1081             :                 goto out;
    1082             : 
    1083             :         /*
    1084             :          * Delayed iref deletion is for the inode who has only one link,
    1085             :          * so there is only one iref. The case that several irefs are
    1086             :          * in the same item doesn't exist.
    1087             :          */
    1088             :         btrfs_del_item(trans, root, path);
    1089             : out:
    1090        6256 :         btrfs_release_delayed_iref(node);
    1091             : no_iref:
    1092       59179 :         btrfs_release_path(path);
    1093             : err_out:
    1094       59161 :         btrfs_delayed_inode_release_metadata(root, node);
    1095       59181 :         btrfs_release_delayed_inode(node);
    1096             : 
    1097       59174 :         return ret;
    1098             : 
    1099             : search:
    1100         151 :         btrfs_release_path(path);
    1101             : 
    1102             :         btrfs_set_key_type(&key, BTRFS_INODE_EXTREF_KEY);
    1103         151 :         key.offset = -1;
    1104         151 :         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
    1105         151 :         if (ret < 0)
    1106             :                 goto err_out;
    1107             :         ASSERT(ret);
    1108             : 
    1109             :         ret = 0;
    1110         151 :         leaf = path->nodes[0];
    1111         151 :         path->slots[0]--;
    1112         151 :         goto again;
    1113             : }
    1114             : 
    1115       52390 : static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
    1116             :                                              struct btrfs_root *root,
    1117             :                                              struct btrfs_path *path,
    1118             :                                              struct btrfs_delayed_node *node)
    1119             : {
    1120             :         int ret;
    1121             : 
    1122       52390 :         mutex_lock(&node->mutex);
    1123       52392 :         if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
    1124           1 :                 mutex_unlock(&node->mutex);
    1125           1 :                 return 0;
    1126             :         }
    1127             : 
    1128       52391 :         ret = __btrfs_update_delayed_inode(trans, root, path, node);
    1129       52385 :         mutex_unlock(&node->mutex);
    1130       52389 :         return ret;
    1131             : }
    1132             : 
    1133             : static inline int
    1134       52381 : __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
    1135             :                                    struct btrfs_path *path,
    1136             :                                    struct btrfs_delayed_node *node)
    1137             : {
    1138             :         int ret;
    1139             : 
    1140       52381 :         ret = btrfs_insert_delayed_items(trans, path, node->root, node);
    1141       52389 :         if (ret)
    1142             :                 return ret;
    1143             : 
    1144       52389 :         ret = btrfs_delete_delayed_items(trans, path, node->root, node);
    1145       52392 :         if (ret)
    1146             :                 return ret;
    1147             : 
    1148       52390 :         ret = btrfs_update_delayed_inode(trans, node->root, path, node);
    1149       52390 :         return ret;
    1150             : }
    1151             : 
    1152             : /*
    1153             :  * Called when committing the transaction.
    1154             :  * Returns 0 on success.
    1155             :  * Returns < 0 on error and returns with an aborted transaction with any
    1156             :  * outstanding delayed items cleaned up.
    1157             :  */
    1158        6667 : static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
    1159        6667 :                                      struct btrfs_root *root, int nr)
    1160             : {
    1161             :         struct btrfs_delayed_root *delayed_root;
    1162             :         struct btrfs_delayed_node *curr_node, *prev_node;
    1163             :         struct btrfs_path *path;
    1164             :         struct btrfs_block_rsv *block_rsv;
    1165             :         int ret = 0;
    1166        6667 :         bool count = (nr > 0);
    1167             : 
    1168        6667 :         if (trans->aborted)
    1169             :                 return -EIO;
    1170             : 
    1171        6667 :         path = btrfs_alloc_path();
    1172        6667 :         if (!path)
    1173             :                 return -ENOMEM;
    1174        6667 :         path->leave_spinning = 1;
    1175             : 
    1176        6667 :         block_rsv = trans->block_rsv;
    1177        6667 :         trans->block_rsv = &root->fs_info->delayed_block_rsv;
    1178             : 
    1179             :         delayed_root = btrfs_get_delayed_root(root);
    1180             : 
    1181        6667 :         curr_node = btrfs_first_delayed_node(delayed_root);
    1182       39107 :         while (curr_node && (!count || (count && nr--))) {
    1183       25773 :                 ret = __btrfs_commit_inode_delayed_items(trans, path,
    1184             :                                                          curr_node);
    1185       25773 :                 if (ret) {
    1186             :                         btrfs_release_delayed_node(curr_node);
    1187             :                         curr_node = NULL;
    1188           0 :                         btrfs_abort_transaction(trans, root, ret);
    1189           0 :                         break;
    1190             :                 }
    1191             : 
    1192             :                 prev_node = curr_node;
    1193       25773 :                 curr_node = btrfs_next_delayed_node(curr_node);
    1194             :                 btrfs_release_delayed_node(prev_node);
    1195             :         }
    1196             : 
    1197        6667 :         if (curr_node)
    1198             :                 btrfs_release_delayed_node(curr_node);
    1199        6667 :         btrfs_free_path(path);
    1200        6667 :         trans->block_rsv = block_rsv;
    1201             : 
    1202        6667 :         return ret;
    1203             : }
    1204             : 
    1205        6440 : int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
    1206             :                             struct btrfs_root *root)
    1207             : {
    1208        6440 :         return __btrfs_run_delayed_items(trans, root, -1);
    1209             : }
    1210             : 
    1211         227 : int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
    1212             :                                struct btrfs_root *root, int nr)
    1213             : {
    1214         227 :         return __btrfs_run_delayed_items(trans, root, nr);
    1215             : }
    1216             : 
    1217        1527 : int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
    1218             :                                      struct inode *inode)
    1219             : {
    1220        1527 :         struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
    1221             :         struct btrfs_path *path;
    1222             :         struct btrfs_block_rsv *block_rsv;
    1223             :         int ret;
    1224             : 
    1225        1527 :         if (!delayed_node)
    1226             :                 return 0;
    1227             : 
    1228        1527 :         mutex_lock(&delayed_node->mutex);
    1229        1527 :         if (!delayed_node->count) {
    1230         159 :                 mutex_unlock(&delayed_node->mutex);
    1231             :                 btrfs_release_delayed_node(delayed_node);
    1232         159 :                 return 0;
    1233             :         }
    1234        1368 :         mutex_unlock(&delayed_node->mutex);
    1235             : 
    1236        1368 :         path = btrfs_alloc_path();
    1237        1368 :         if (!path) {
    1238             :                 btrfs_release_delayed_node(delayed_node);
    1239           0 :                 return -ENOMEM;
    1240             :         }
    1241        1368 :         path->leave_spinning = 1;
    1242             : 
    1243        1368 :         block_rsv = trans->block_rsv;
    1244        1368 :         trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
    1245             : 
    1246        1368 :         ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
    1247             : 
    1248             :         btrfs_release_delayed_node(delayed_node);
    1249        1368 :         btrfs_free_path(path);
    1250        1368 :         trans->block_rsv = block_rsv;
    1251             : 
    1252        1368 :         return ret;
    1253             : }
    1254             : 
    1255        6907 : int btrfs_commit_inode_delayed_inode(struct inode *inode)
    1256             : {
    1257             :         struct btrfs_trans_handle *trans;
    1258        6907 :         struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
    1259             :         struct btrfs_path *path;
    1260             :         struct btrfs_block_rsv *block_rsv;
    1261             :         int ret;
    1262             : 
    1263        6907 :         if (!delayed_node)
    1264             :                 return 0;
    1265             : 
    1266        6793 :         mutex_lock(&delayed_node->mutex);
    1267        6793 :         if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
    1268           3 :                 mutex_unlock(&delayed_node->mutex);
    1269             :                 btrfs_release_delayed_node(delayed_node);
    1270           3 :                 return 0;
    1271             :         }
    1272        6790 :         mutex_unlock(&delayed_node->mutex);
    1273             : 
    1274        6790 :         trans = btrfs_join_transaction(delayed_node->root);
    1275        6790 :         if (IS_ERR(trans)) {
    1276           0 :                 ret = PTR_ERR(trans);
    1277           0 :                 goto out;
    1278             :         }
    1279             : 
    1280        6790 :         path = btrfs_alloc_path();
    1281        6790 :         if (!path) {
    1282             :                 ret = -ENOMEM;
    1283             :                 goto trans_out;
    1284             :         }
    1285        6790 :         path->leave_spinning = 1;
    1286             : 
    1287        6790 :         block_rsv = trans->block_rsv;
    1288        6790 :         trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
    1289             : 
    1290        6790 :         mutex_lock(&delayed_node->mutex);
    1291        6790 :         if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
    1292        6790 :                 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
    1293             :                                                    path, delayed_node);
    1294             :         else
    1295             :                 ret = 0;
    1296        6790 :         mutex_unlock(&delayed_node->mutex);
    1297             : 
    1298        6790 :         btrfs_free_path(path);
    1299        6790 :         trans->block_rsv = block_rsv;
    1300             : trans_out:
    1301        6790 :         btrfs_end_transaction(trans, delayed_node->root);
    1302        6790 :         btrfs_btree_balance_dirty(delayed_node->root);
    1303             : out:
    1304             :         btrfs_release_delayed_node(delayed_node);
    1305             : 
    1306        6790 :         return ret;
    1307             : }
    1308             : 
    1309       25704 : void btrfs_remove_delayed_node(struct inode *inode)
    1310             : {
    1311             :         struct btrfs_delayed_node *delayed_node;
    1312             : 
    1313       25704 :         delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
    1314       25704 :         if (!delayed_node)
    1315       25704 :                 return;
    1316             : 
    1317       22322 :         BTRFS_I(inode)->delayed_node = NULL;
    1318             :         btrfs_release_delayed_node(delayed_node);
    1319             : }
    1320             : 
    1321             : struct btrfs_async_delayed_work {
    1322             :         struct btrfs_delayed_root *delayed_root;
    1323             :         int nr;
    1324             :         struct btrfs_work work;
    1325             : };
    1326             : 
    1327        1596 : static void btrfs_async_run_delayed_root(struct btrfs_work *work)
    1328             : {
    1329             :         struct btrfs_async_delayed_work *async_work;
    1330             :         struct btrfs_delayed_root *delayed_root;
    1331             :         struct btrfs_trans_handle *trans;
    1332             :         struct btrfs_path *path;
    1333             :         struct btrfs_delayed_node *delayed_node = NULL;
    1334             :         struct btrfs_root *root;
    1335             :         struct btrfs_block_rsv *block_rsv;
    1336             :         int total_done = 0;
    1337             : 
    1338        1596 :         async_work = container_of(work, struct btrfs_async_delayed_work, work);
    1339        1596 :         delayed_root = async_work->delayed_root;
    1340             : 
    1341        1596 :         path = btrfs_alloc_path();
    1342        1596 :         if (!path)
    1343             :                 goto out;
    1344             : 
    1345             : again:
    1346       25464 :         if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND / 2)
    1347             :                 goto free_path;
    1348             : 
    1349       25262 :         delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
    1350       25263 :         if (!delayed_node)
    1351             :                 goto free_path;
    1352             : 
    1353       25252 :         path->leave_spinning = 1;
    1354       25252 :         root = delayed_node->root;
    1355             : 
    1356       25252 :         trans = btrfs_join_transaction(root);
    1357       25244 :         if (IS_ERR(trans))
    1358             :                 goto release_path;
    1359             : 
    1360       25244 :         block_rsv = trans->block_rsv;
    1361       25244 :         trans->block_rsv = &root->fs_info->delayed_block_rsv;
    1362             : 
    1363       25244 :         __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
    1364             : 
    1365       25250 :         trans->block_rsv = block_rsv;
    1366       25250 :         btrfs_end_transaction(trans, root);
    1367       25243 :         btrfs_btree_balance_dirty_nodelay(root);
    1368             : 
    1369             : release_path:
    1370       25236 :         btrfs_release_path(path);
    1371       25200 :         total_done++;
    1372             : 
    1373             :         btrfs_release_prepared_delayed_node(delayed_node);
    1374       25251 :         if (async_work->nr == 0 || total_done < async_work->nr)
    1375             :                 goto again;
    1376             : 
    1377             : free_path:
    1378        1596 :         btrfs_free_path(path);
    1379             : out:
    1380        1596 :         wake_up(&delayed_root->wait);
    1381        1596 :         kfree(async_work);
    1382        1596 : }
    1383             : 
    1384             : 
    1385        1596 : static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
    1386             :                                      struct btrfs_root *root, int nr)
    1387             : {
    1388             :         struct btrfs_async_delayed_work *async_work;
    1389             : 
    1390        1596 :         if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
    1391             :                 return 0;
    1392             : 
    1393             :         async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
    1394        1596 :         if (!async_work)
    1395             :                 return -ENOMEM;
    1396             : 
    1397        1596 :         async_work->delayed_root = delayed_root;
    1398        1596 :         btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
    1399             :                         btrfs_async_run_delayed_root, NULL, NULL);
    1400        1596 :         async_work->nr = nr;
    1401             : 
    1402        1596 :         btrfs_queue_work(root->fs_info->delayed_workers, &async_work->work);
    1403             :         return 0;
    1404             : }
    1405             : 
    1406        2098 : void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
    1407             : {
    1408             :         struct btrfs_delayed_root *delayed_root;
    1409             :         delayed_root = btrfs_get_delayed_root(root);
    1410        2098 :         WARN_ON(btrfs_first_delayed_node(delayed_root));
    1411        2098 : }
    1412             : 
    1413             : static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
    1414             : {
    1415             :         int val = atomic_read(&delayed_root->items_seq);
    1416             : 
    1417          28 :         if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
    1418             :                 return 1;
    1419             : 
    1420          20 :         if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
    1421             :                 return 1;
    1422             : 
    1423             :         return 0;
    1424             : }
    1425             : 
    1426      134529 : void btrfs_balance_delayed_items(struct btrfs_root *root)
    1427             : {
    1428             :         struct btrfs_delayed_root *delayed_root;
    1429             : 
    1430             :         delayed_root = btrfs_get_delayed_root(root);
    1431             : 
    1432      134529 :         if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
    1433             :                 return;
    1434             : 
    1435        1596 :         if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
    1436             :                 int seq;
    1437             :                 int ret;
    1438             : 
    1439             :                 seq = atomic_read(&delayed_root->items_seq);
    1440             : 
    1441           8 :                 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
    1442           8 :                 if (ret)
    1443             :                         return;
    1444             : 
    1445          36 :                 wait_event_interruptible(delayed_root->wait,
    1446             :                                          could_end_wait(delayed_root, seq));
    1447             :                 return;
    1448             :         }
    1449             : 
    1450        1588 :         btrfs_wq_run_delayed_node(delayed_root, root, BTRFS_DELAYED_BATCH);
    1451             : }
    1452             : 
    1453             : /* Will return 0 or -ENOMEM */
    1454       53001 : int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
    1455             :                                    struct btrfs_root *root, const char *name,
    1456             :                                    int name_len, struct inode *dir,
    1457             :                                    struct btrfs_disk_key *disk_key, u8 type,
    1458             :                                    u64 index)
    1459             : {
    1460             :         struct btrfs_delayed_node *delayed_node;
    1461             :         struct btrfs_delayed_item *delayed_item;
    1462             :         struct btrfs_dir_item *dir_item;
    1463             :         int ret;
    1464             : 
    1465       26501 :         delayed_node = btrfs_get_or_create_delayed_node(dir);
    1466       26501 :         if (IS_ERR(delayed_node))
    1467           0 :                 return PTR_ERR(delayed_node);
    1468             : 
    1469       26501 :         delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
    1470       26500 :         if (!delayed_item) {
    1471             :                 ret = -ENOMEM;
    1472             :                 goto release_node;
    1473             :         }
    1474             : 
    1475       26500 :         delayed_item->key.objectid = btrfs_ino(dir);
    1476             :         btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY);
    1477       26500 :         delayed_item->key.offset = index;
    1478             : 
    1479             :         dir_item = (struct btrfs_dir_item *)delayed_item->data;
    1480       26500 :         dir_item->location = *disk_key;
    1481       26500 :         btrfs_set_stack_dir_transid(dir_item, trans->transid);
    1482             :         btrfs_set_stack_dir_data_len(dir_item, 0);
    1483       26500 :         btrfs_set_stack_dir_name_len(dir_item, name_len);
    1484             :         btrfs_set_stack_dir_type(dir_item, type);
    1485       26500 :         memcpy((char *)(dir_item + 1), name, name_len);
    1486             : 
    1487       26500 :         ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
    1488             :         /*
    1489             :          * we have reserved enough space when we start a new transaction,
    1490             :          * so reserving metadata failure is impossible
    1491             :          */
    1492       26501 :         BUG_ON(ret);
    1493             : 
    1494             : 
    1495       26501 :         mutex_lock(&delayed_node->mutex);
    1496             :         ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
    1497       26501 :         if (unlikely(ret)) {
    1498           0 :                 btrfs_err(root->fs_info, "err add delayed dir index item(name: %.*s) "
    1499             :                                 "into the insertion tree of the delayed node"
    1500             :                                 "(root id: %llu, inode id: %llu, errno: %d)",
    1501             :                                 name_len, name, delayed_node->root->objectid,
    1502             :                                 delayed_node->inode_id, ret);
    1503           0 :                 BUG();
    1504             :         }
    1505       26501 :         mutex_unlock(&delayed_node->mutex);
    1506             : 
    1507             : release_node:
    1508             :         btrfs_release_delayed_node(delayed_node);
    1509       26501 :         return ret;
    1510             : }
    1511             : 
    1512       12323 : static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
    1513             :                                                struct btrfs_delayed_node *node,
    1514             :                                                struct btrfs_key *key)
    1515             : {
    1516         873 :         struct btrfs_delayed_item *item;
    1517             : 
    1518       12323 :         mutex_lock(&node->mutex);
    1519             :         item = __btrfs_lookup_delayed_insertion_item(node, key);
    1520       12323 :         if (!item) {
    1521       11450 :                 mutex_unlock(&node->mutex);
    1522       11450 :                 return 1;
    1523             :         }
    1524             : 
    1525         873 :         btrfs_delayed_item_release_metadata(root, item);
    1526         873 :         btrfs_release_delayed_item(item);
    1527         873 :         mutex_unlock(&node->mutex);
    1528         873 :         return 0;
    1529             : }
    1530             : 
    1531       23773 : int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
    1532             :                                    struct btrfs_root *root, struct inode *dir,
    1533             :                                    u64 index)
    1534             : {
    1535             :         struct btrfs_delayed_node *node;
    1536             :         struct btrfs_delayed_item *item;
    1537             :         struct btrfs_key item_key;
    1538             :         int ret;
    1539             : 
    1540       12323 :         node = btrfs_get_or_create_delayed_node(dir);
    1541       12323 :         if (IS_ERR(node))
    1542           0 :                 return PTR_ERR(node);
    1543             : 
    1544       12323 :         item_key.objectid = btrfs_ino(dir);
    1545             :         btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY);
    1546       12323 :         item_key.offset = index;
    1547             : 
    1548       12323 :         ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
    1549       12323 :         if (!ret)
    1550             :                 goto end;
    1551             : 
    1552       11450 :         item = btrfs_alloc_delayed_item(0);
    1553       11450 :         if (!item) {
    1554             :                 ret = -ENOMEM;
    1555             :                 goto end;
    1556             :         }
    1557             : 
    1558       11450 :         item->key = item_key;
    1559             : 
    1560       11450 :         ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
    1561             :         /*
    1562             :          * we have reserved enough space when we start a new transaction,
    1563             :          * so reserving metadata failure is impossible.
    1564             :          */
    1565       11450 :         BUG_ON(ret);
    1566             : 
    1567       11450 :         mutex_lock(&node->mutex);
    1568             :         ret = __btrfs_add_delayed_deletion_item(node, item);
    1569       11450 :         if (unlikely(ret)) {
    1570           0 :                 btrfs_err(root->fs_info, "err add delayed dir index item(index: %llu) "
    1571             :                                 "into the deletion tree of the delayed node"
    1572             :                                 "(root id: %llu, inode id: %llu, errno: %d)",
    1573             :                                 index, node->root->objectid, node->inode_id,
    1574             :                                 ret);
    1575           0 :                 BUG();
    1576             :         }
    1577       11450 :         mutex_unlock(&node->mutex);
    1578             : end:
    1579             :         btrfs_release_delayed_node(node);
    1580       12323 :         return ret;
    1581             : }
    1582             : 
    1583         185 : int btrfs_inode_delayed_dir_index_count(struct inode *inode)
    1584             : {
    1585         185 :         struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
    1586             : 
    1587         185 :         if (!delayed_node)
    1588             :                 return -ENOENT;
    1589             : 
    1590             :         /*
    1591             :          * Since we have held i_mutex of this directory, it is impossible that
    1592             :          * a new directory index is added into the delayed node and index_cnt
    1593             :          * is updated now. So we needn't lock the delayed node.
    1594             :          */
    1595          61 :         if (!delayed_node->index_cnt) {
    1596             :                 btrfs_release_delayed_node(delayed_node);
    1597          61 :                 return -EINVAL;
    1598             :         }
    1599             : 
    1600           0 :         BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
    1601             :         btrfs_release_delayed_node(delayed_node);
    1602           0 :         return 0;
    1603             : }
    1604             : 
    1605       22061 : void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
    1606             :                              struct list_head *del_list)
    1607             : {
    1608             :         struct btrfs_delayed_node *delayed_node;
    1609             :         struct btrfs_delayed_item *item;
    1610             : 
    1611       22061 :         delayed_node = btrfs_get_delayed_node(inode);
    1612       22061 :         if (!delayed_node)
    1613       22061 :                 return;
    1614             : 
    1615       21073 :         mutex_lock(&delayed_node->mutex);
    1616             :         item = __btrfs_first_delayed_insertion_item(delayed_node);
    1617       43112 :         while (item) {
    1618         966 :                 atomic_inc(&item->refs);
    1619         966 :                 list_add_tail(&item->readdir_list, ins_list);
    1620             :                 item = __btrfs_next_delayed_item(item);
    1621             :         }
    1622             : 
    1623             :         item = __btrfs_first_delayed_deletion_item(delayed_node);
    1624       42342 :         while (item) {
    1625         196 :                 atomic_inc(&item->refs);
    1626         196 :                 list_add_tail(&item->readdir_list, del_list);
    1627             :                 item = __btrfs_next_delayed_item(item);
    1628             :         }
    1629       21073 :         mutex_unlock(&delayed_node->mutex);
    1630             :         /*
    1631             :          * This delayed node is still cached in the btrfs inode, so refs
    1632             :          * must be > 1 now, and we needn't check it is going to be freed
    1633             :          * or not.
    1634             :          *
    1635             :          * Besides that, this function is used to read dir, we do not
    1636             :          * insert/delete delayed items in this period. So we also needn't
    1637             :          * requeue or dequeue this delayed node.
    1638             :          */
    1639       21073 :         atomic_dec(&delayed_node->refs);
    1640             : }
    1641             : 
    1642       22061 : void btrfs_put_delayed_items(struct list_head *ins_list,
    1643             :                              struct list_head *del_list)
    1644             : {
    1645             :         struct btrfs_delayed_item *curr, *next;
    1646             : 
    1647       22061 :         list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
    1648           0 :                 list_del(&curr->readdir_list);
    1649           0 :                 if (atomic_dec_and_test(&curr->refs))
    1650           0 :                         kfree(curr);
    1651             :         }
    1652             : 
    1653       22140 :         list_for_each_entry_safe(curr, next, del_list, readdir_list) {
    1654          79 :                 list_del(&curr->readdir_list);
    1655         158 :                 if (atomic_dec_and_test(&curr->refs))
    1656           0 :                         kfree(curr);
    1657             :         }
    1658       22061 : }
    1659             : 
    1660      146477 : int btrfs_should_delete_dir_index(struct list_head *del_list,
    1661             :                                   u64 index)
    1662             : {
    1663             :         struct btrfs_delayed_item *curr, *next;
    1664             :         int ret;
    1665             : 
    1666      146477 :         if (list_empty(del_list))
    1667             :                 return 0;
    1668             : 
    1669         392 :         list_for_each_entry_safe(curr, next, del_list, readdir_list) {
    1670         392 :                 if (curr->key.offset > index)
    1671             :                         break;
    1672             : 
    1673         117 :                 list_del(&curr->readdir_list);
    1674         117 :                 ret = (curr->key.offset == index);
    1675             : 
    1676         234 :                 if (atomic_dec_and_test(&curr->refs))
    1677           0 :                         kfree(curr);
    1678             : 
    1679         117 :                 if (ret)
    1680             :                         return 1;
    1681             :                 else
    1682           0 :                         continue;
    1683             :         }
    1684             :         return 0;
    1685             : }
    1686             : 
    1687             : /*
    1688             :  * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
    1689             :  *
    1690             :  */
    1691       22057 : int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
    1692             :                                     struct list_head *ins_list)
    1693             : {
    1694             :         struct btrfs_dir_item *di;
    1695             :         struct btrfs_delayed_item *curr, *next;
    1696             :         struct btrfs_key location;
    1697             :         char *name;
    1698             :         int name_len;
    1699             :         int over = 0;
    1700             :         unsigned char d_type;
    1701             : 
    1702       22057 :         if (list_empty(ins_list))
    1703             :                 return 0;
    1704             : 
    1705             :         /*
    1706             :          * Changing the data of the delayed item is impossible. So
    1707             :          * we needn't lock them. And we have held i_mutex of the
    1708             :          * directory, nobody can delete any directory indexes now.
    1709             :          */
    1710        1638 :         list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
    1711         966 :                 list_del(&curr->readdir_list);
    1712             : 
    1713         966 :                 if (curr->key.offset < ctx->pos) {
    1714         826 :                         if (atomic_dec_and_test(&curr->refs))
    1715           0 :                                 kfree(curr);
    1716         413 :                         continue;
    1717             :                 }
    1718             : 
    1719         553 :                 ctx->pos = curr->key.offset;
    1720             : 
    1721             :                 di = (struct btrfs_dir_item *)curr->data;
    1722         553 :                 name = (char *)(di + 1);
    1723         553 :                 name_len = btrfs_stack_dir_name_len(di);
    1724             : 
    1725         553 :                 d_type = btrfs_filetype_table[di->type];
    1726             :                 btrfs_disk_key_to_cpu(&location, &di->location);
    1727             : 
    1728         553 :                 over = !dir_emit(ctx, name, name_len,
    1729             :                                location.objectid, d_type);
    1730             : 
    1731        1106 :                 if (atomic_dec_and_test(&curr->refs))
    1732           0 :                         kfree(curr);
    1733             : 
    1734         553 :                 if (over)
    1735             :                         return 1;
    1736             :         }
    1737             :         return 0;
    1738             : }
    1739             : 
    1740      150201 : static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
    1741             :                                   struct btrfs_inode_item *inode_item,
    1742             :                                   struct inode *inode)
    1743             : {
    1744             :         btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
    1745             :         btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
    1746      150196 :         btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
    1747      150196 :         btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
    1748      150196 :         btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
    1749      150196 :         btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
    1750      150203 :         btrfs_set_stack_inode_generation(inode_item,
    1751             :                                          BTRFS_I(inode)->generation);
    1752      150203 :         btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
    1753      150203 :         btrfs_set_stack_inode_transid(inode_item, trans->transid);
    1754      150203 :         btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
    1755      150203 :         btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
    1756             :         btrfs_set_stack_inode_block_group(inode_item, 0);
    1757             : 
    1758      150203 :         btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
    1759      150203 :                                      inode->i_atime.tv_sec);
    1760      150203 :         btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item),
    1761      150203 :                                       inode->i_atime.tv_nsec);
    1762             : 
    1763      150203 :         btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item),
    1764      150203 :                                      inode->i_mtime.tv_sec);
    1765      150203 :         btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item),
    1766      150203 :                                       inode->i_mtime.tv_nsec);
    1767             : 
    1768      150203 :         btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item),
    1769      150203 :                                      inode->i_ctime.tv_sec);
    1770      150203 :         btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item),
    1771      150203 :                                       inode->i_ctime.tv_nsec);
    1772      150203 : }
    1773             : 
    1774        5018 : int btrfs_fill_inode(struct inode *inode, u32 *rdev)
    1775             : {
    1776             :         struct btrfs_delayed_node *delayed_node;
    1777             :         struct btrfs_inode_item *inode_item;
    1778             :         struct btrfs_timespec *tspec;
    1779             : 
    1780        5018 :         delayed_node = btrfs_get_delayed_node(inode);
    1781        5018 :         if (!delayed_node)
    1782             :                 return -ENOENT;
    1783             : 
    1784           0 :         mutex_lock(&delayed_node->mutex);
    1785           0 :         if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
    1786           0 :                 mutex_unlock(&delayed_node->mutex);
    1787             :                 btrfs_release_delayed_node(delayed_node);
    1788           0 :                 return -ENOENT;
    1789             :         }
    1790             : 
    1791           0 :         inode_item = &delayed_node->inode_item;
    1792             : 
    1793             :         i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
    1794             :         i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
    1795             :         btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
    1796           0 :         inode->i_mode = btrfs_stack_inode_mode(inode_item);
    1797           0 :         set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
    1798           0 :         inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
    1799           0 :         BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
    1800           0 :         inode->i_version = btrfs_stack_inode_sequence(inode_item);
    1801           0 :         inode->i_rdev = 0;
    1802           0 :         *rdev = btrfs_stack_inode_rdev(inode_item);
    1803           0 :         BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
    1804             : 
    1805             :         tspec = btrfs_inode_atime(inode_item);
    1806           0 :         inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec);
    1807           0 :         inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
    1808             : 
    1809             :         tspec = btrfs_inode_mtime(inode_item);
    1810           0 :         inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec);
    1811           0 :         inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
    1812             : 
    1813             :         tspec = btrfs_inode_ctime(inode_item);
    1814           0 :         inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec);
    1815           0 :         inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
    1816             : 
    1817           0 :         inode->i_generation = BTRFS_I(inode)->generation;
    1818           0 :         BTRFS_I(inode)->index_cnt = (u64)-1;
    1819             : 
    1820           0 :         mutex_unlock(&delayed_node->mutex);
    1821             :         btrfs_release_delayed_node(delayed_node);
    1822           0 :         return 0;
    1823             : }
    1824             : 
    1825      209375 : int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
    1826             :                                struct btrfs_root *root, struct inode *inode)
    1827             : {
    1828             :         struct btrfs_delayed_node *delayed_node;
    1829             :         int ret = 0;
    1830             : 
    1831      150203 :         delayed_node = btrfs_get_or_create_delayed_node(inode);
    1832      150199 :         if (IS_ERR(delayed_node))
    1833           0 :                 return PTR_ERR(delayed_node);
    1834             : 
    1835      150199 :         mutex_lock(&delayed_node->mutex);
    1836      150204 :         if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
    1837       91032 :                 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
    1838       91031 :                 goto release_node;
    1839             :         }
    1840             : 
    1841       59172 :         ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
    1842             :                                                    delayed_node);
    1843       59172 :         if (ret)
    1844             :                 goto release_node;
    1845             : 
    1846       59172 :         fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
    1847             :         set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
    1848       59172 :         delayed_node->count++;
    1849       59172 :         atomic_inc(&root->fs_info->delayed_root->items);
    1850             : release_node:
    1851      150200 :         mutex_unlock(&delayed_node->mutex);
    1852             :         btrfs_release_delayed_node(delayed_node);
    1853      150204 :         return ret;
    1854             : }
    1855             : 
    1856        6256 : int btrfs_delayed_delete_inode_ref(struct inode *inode)
    1857             : {
    1858             :         struct btrfs_delayed_node *delayed_node;
    1859             : 
    1860        6256 :         delayed_node = btrfs_get_or_create_delayed_node(inode);
    1861        6256 :         if (IS_ERR(delayed_node))
    1862           0 :                 return PTR_ERR(delayed_node);
    1863             : 
    1864             :         /*
    1865             :          * We don't reserve space for inode ref deletion is because:
    1866             :          * - We ONLY do async inode ref deletion for the inode who has only
    1867             :          *   one link(i_nlink == 1), it means there is only one inode ref.
    1868             :          *   And in most case, the inode ref and the inode item are in the
    1869             :          *   same leaf, and we will deal with them at the same time.
    1870             :          *   Since we are sure we will reserve the space for the inode item,
    1871             :          *   it is unnecessary to reserve space for inode ref deletion.
    1872             :          * - If the inode ref and the inode item are not in the same leaf,
    1873             :          *   We also needn't worry about enospc problem, because we reserve
    1874             :          *   much more space for the inode update than it needs.
    1875             :          * - At the worst, we can steal some space from the global reservation.
    1876             :          *   It is very rare.
    1877             :          */
    1878        6256 :         mutex_lock(&delayed_node->mutex);
    1879        6256 :         if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
    1880             :                 goto release_node;
    1881             : 
    1882             :         set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
    1883        6256 :         delayed_node->count++;
    1884        6256 :         atomic_inc(&BTRFS_I(inode)->root->fs_info->delayed_root->items);
    1885             : release_node:
    1886        6256 :         mutex_unlock(&delayed_node->mutex);
    1887             :         btrfs_release_delayed_node(delayed_node);
    1888        6256 :         return 0;
    1889             : }
    1890             : 
    1891        6793 : static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
    1892             : {
    1893        6793 :         struct btrfs_root *root = delayed_node->root;
    1894        6105 :         struct btrfs_delayed_item *curr_item, *prev_item;
    1895             : 
    1896        6793 :         mutex_lock(&delayed_node->mutex);
    1897             :         curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
    1898       13586 :         while (curr_item) {
    1899           0 :                 btrfs_delayed_item_release_metadata(root, curr_item);
    1900             :                 prev_item = curr_item;
    1901             :                 curr_item = __btrfs_next_delayed_item(prev_item);
    1902           0 :                 btrfs_release_delayed_item(prev_item);
    1903             :         }
    1904             : 
    1905             :         curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
    1906       19691 :         while (curr_item) {
    1907        6105 :                 btrfs_delayed_item_release_metadata(root, curr_item);
    1908             :                 prev_item = curr_item;
    1909             :                 curr_item = __btrfs_next_delayed_item(prev_item);
    1910        6105 :                 btrfs_release_delayed_item(prev_item);
    1911             :         }
    1912             : 
    1913        6793 :         if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
    1914           0 :                 btrfs_release_delayed_iref(delayed_node);
    1915             : 
    1916        6793 :         if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
    1917           0 :                 btrfs_delayed_inode_release_metadata(root, delayed_node);
    1918           0 :                 btrfs_release_delayed_inode(delayed_node);
    1919             :         }
    1920        6793 :         mutex_unlock(&delayed_node->mutex);
    1921        6793 : }
    1922             : 
    1923        6907 : void btrfs_kill_delayed_inode_items(struct inode *inode)
    1924             : {
    1925             :         struct btrfs_delayed_node *delayed_node;
    1926             : 
    1927        6907 :         delayed_node = btrfs_get_delayed_node(inode);
    1928        6907 :         if (!delayed_node)
    1929        6907 :                 return;
    1930             : 
    1931        6793 :         __btrfs_kill_delayed_node(delayed_node);
    1932             :         btrfs_release_delayed_node(delayed_node);
    1933             : }
    1934             : 
    1935          11 : void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
    1936             : {
    1937             :         u64 inode_id = 0;
    1938             :         struct btrfs_delayed_node *delayed_nodes[8];
    1939             :         int i, n;
    1940             : 
    1941             :         while (1) {
    1942             :                 spin_lock(&root->inode_lock);
    1943          11 :                 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
    1944             :                                            (void **)delayed_nodes, inode_id,
    1945             :                                            ARRAY_SIZE(delayed_nodes));
    1946          11 :                 if (!n) {
    1947             :                         spin_unlock(&root->inode_lock);
    1948             :                         break;
    1949             :                 }
    1950             : 
    1951           0 :                 inode_id = delayed_nodes[n - 1]->inode_id + 1;
    1952             : 
    1953           0 :                 for (i = 0; i < n; i++)
    1954           0 :                         atomic_inc(&delayed_nodes[i]->refs);
    1955             :                 spin_unlock(&root->inode_lock);
    1956             : 
    1957           0 :                 for (i = 0; i < n; i++) {
    1958           0 :                         __btrfs_kill_delayed_node(delayed_nodes[i]);
    1959           0 :                         btrfs_release_delayed_node(delayed_nodes[i]);
    1960             :                 }
    1961             :         }
    1962          11 : }
    1963             : 
    1964           0 : void btrfs_destroy_delayed_inodes(struct btrfs_root *root)
    1965             : {
    1966             :         struct btrfs_delayed_root *delayed_root;
    1967             :         struct btrfs_delayed_node *curr_node, *prev_node;
    1968             : 
    1969             :         delayed_root = btrfs_get_delayed_root(root);
    1970             : 
    1971           0 :         curr_node = btrfs_first_delayed_node(delayed_root);
    1972           0 :         while (curr_node) {
    1973           0 :                 __btrfs_kill_delayed_node(curr_node);
    1974             : 
    1975             :                 prev_node = curr_node;
    1976           0 :                 curr_node = btrfs_next_delayed_node(curr_node);
    1977             :                 btrfs_release_delayed_node(prev_node);
    1978             :         }
    1979           0 : }
    1980             : 

Generated by: LCOV version 1.10