Btrfs: don't pin down freed tree log blocks

There won't be that many and setting bits on the extent io tree is a hefty
cost for something we have to wait on.  So instead hold the blocks for use
after the transaction commits.  Thanks,

Signed-off-by: Josef Bacik <jbacik@fusionio.com>
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 37b8c72..1db1034 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1378,6 +1378,9 @@
 	 */
 	struct list_head ordered_operations;
 
+	spinlock_t freed_ebs_lock;
+	struct list_head freed_ebs;
+
 	/*
 	 * there is a pool of worker threads for checksumming during writes
 	 * and a pool for checksumming after reads.  This is because readers
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 604678dd..d1ef429 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2031,6 +2031,7 @@
 	spin_lock_init(&fs_info->defrag_inodes_lock);
 	spin_lock_init(&fs_info->free_chunk_lock);
 	spin_lock_init(&fs_info->tree_mod_seq_lock);
+	spin_lock_init(&fs_info->freed_ebs_lock);
 	rwlock_init(&fs_info->tree_mod_log_lock);
 	mutex_init(&fs_info->reloc_mutex);
 
@@ -2038,6 +2039,7 @@
 	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
 	INIT_LIST_HEAD(&fs_info->space_info);
 	INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
+	INIT_LIST_HEAD(&fs_info->freed_ebs);
 	btrfs_mapping_init(&fs_info->mapping_tree);
 	btrfs_init_block_rsv(&fs_info->global_block_rsv,
 			     BTRFS_BLOCK_RSV_GLOBAL);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 6f2d283..e8c3fa6 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4927,6 +4927,19 @@
 	struct btrfs_caching_control *next;
 	struct btrfs_caching_control *caching_ctl;
 	struct btrfs_block_group_cache *cache;
+	struct extent_buffer *eb;
+
+	spin_lock(&fs_info->freed_ebs_lock);
+	while (!list_empty(&fs_info->freed_ebs)) {
+		eb = list_first_entry(&fs_info->freed_ebs,
+				      struct extent_buffer, free_list);
+		list_del_init(&eb->free_list);
+		spin_unlock(&fs_info->freed_ebs_lock);
+		btrfs_pin_extent(root, eb->start, eb->len, 1);
+		free_extent_buffer(eb);
+		spin_lock(&fs_info->freed_ebs_lock);
+	}
+	spin_unlock(&fs_info->freed_ebs_lock);
 
 	down_write(&fs_info->extent_commit_sem);
 
@@ -5346,6 +5359,7 @@
 			   u64 parent, int last_ref)
 {
 	struct btrfs_block_group_cache *cache = NULL;
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	int ret;
 
 	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
@@ -5370,6 +5384,14 @@
 		}
 
 		if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
+			if (root->objectid == BTRFS_TREE_LOG_OBJECTID) {
+				spin_lock(&fs_info->freed_ebs_lock);
+				extent_buffer_get(buf);
+				list_add_tail(&buf->free_list,
+					      &fs_info->freed_ebs);
+				spin_unlock(&fs_info->freed_ebs_lock);
+				goto out;
+			}
 			pin_down_extent(root, cache, buf->start, buf->len, 1);
 			goto out;
 		}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index e2c0f69..9e48413 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4083,6 +4083,7 @@
 	atomic_set(&eb->spinning_readers, 0);
 	atomic_set(&eb->spinning_writers, 0);
 	INIT_LIST_HEAD(&eb->dirty_list);
+	INIT_LIST_HEAD(&eb->free_list);
 	eb->lock_nested = 0;
 	init_waitqueue_head(&eb->write_lock_wq);
 	init_waitqueue_head(&eb->read_lock_wq);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index cc01dce..96b672a 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -136,6 +136,7 @@
 	atomic_t io_pages;
 	int read_mirror;
 	struct list_head leak_list;
+	struct list_head free_list;
 	struct list_head dirty_list;
 	struct rcu_head rcu_head;
 	pid_t lock_owner;