python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2007 Oracle. All rights reserved. */ #include <asm/unaligned.h> #include "messages.h" #include "ctree.h" #include "accessors.h" static bool check_setget_bounds(const struct extent_buffer *eb, const void *ptr, unsigned off, int size) { const unsigned long member_offset = (unsigned long)ptr + off; if (unlikely(member_offset + size > eb->len)) { btrfs_warn(eb->fs_info, "bad eb member %s: ptr 0x%lx start %llu member offset %lu size %d", (member_offset > eb->len ? "start" : "end"), (unsigned long)ptr, eb->start, member_offset, size); return false; } return true; } void btrfs_init_map_token(struct btrfs_map_token *token, struct extent_buffer *eb) { token->eb = eb; token->kaddr = page_address(eb->pages[0]); token->offset = 0; } /* * Macro templates that define helpers to read/write extent buffer data of a * given size, that are also used via ctree.h for access to item members by * specialized helpers. * * Generic helpers: * - btrfs_set_8 (for 8/16/32/64) * - btrfs_get_8 (for 8/16/32/64) * * Generic helpers with a token (cached address of the most recently accessed * page): * - btrfs_set_token_8 (for 8/16/32/64) * - btrfs_get_token_8 (for 8/16/32/64) * * The set/get functions handle data spanning two pages transparently, in case * metadata block size is larger than page. Every pointer to metadata items is * an offset into the extent buffer page array, cast to a specific type. This * gives us all the type checking. * * The extent buffer pages stored in the array pages do not form a contiguous * phyusical range, but the API functions assume the linear offset to the range * from 0 to metadata node size. */ #define DEFINE_BTRFS_SETGET_BITS(bits) \ u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \ const void *ptr, unsigned long off) \ { \ const unsigned long member_offset = (unsigned long)ptr + off; \ const unsigned long idx = get_eb_page_index(member_offset); \ const unsigned long oip = get_eb_offset_in_page(token->eb, \ member_offset); \ const int size = sizeof(u##bits); \ u8 lebytes[sizeof(u##bits)]; \ const int part = PAGE_SIZE - oip; \ \ ASSERT(token); \ ASSERT(token->kaddr); \ ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \ if (token->offset <= member_offset && \ member_offset + size <= token->offset + PAGE_SIZE) { \ return get_unaligned_le##bits(token->kaddr + oip); \ } \ token->kaddr = page_address(token->eb->pages[idx]); \ token->offset = idx << PAGE_SHIFT; \ if (INLINE_EXTENT_BUFFER_PAGES == 1 || oip + size <= PAGE_SIZE ) \ return get_unaligned_le##bits(token->kaddr + oip); \ \ memcpy(lebytes, token->kaddr + oip, part); \ token->kaddr = page_address(token->eb->pages[idx + 1]); \ token->offset = (idx + 1) << PAGE_SHIFT; \ memcpy(lebytes + part, token->kaddr, size - part); \ return get_unaligned_le##bits(lebytes); \ } \ u##bits btrfs_get_##bits(const struct extent_buffer *eb, \ const void *ptr, unsigned long off) \ { \ const unsigned long member_offset = (unsigned long)ptr + off; \ const unsigned long oip = get_eb_offset_in_page(eb, member_offset); \ const unsigned long idx = get_eb_page_index(member_offset); \ char *kaddr = page_address(eb->pages[idx]); \ const int size = sizeof(u##bits); \ const int part = PAGE_SIZE - oip; \ u8 lebytes[sizeof(u##bits)]; \ \ ASSERT(check_setget_bounds(eb, ptr, off, size)); \ if (INLINE_EXTENT_BUFFER_PAGES == 1 || oip + size <= PAGE_SIZE) \ return get_unaligned_le##bits(kaddr + oip); \ \ memcpy(lebytes, kaddr + oip, part); \ kaddr = page_address(eb->pages[idx + 1]); \ memcpy(lebytes + part, kaddr, size - part); \ return get_unaligned_le##bits(lebytes); \ } \ void btrfs_set_token_##bits(struct btrfs_map_token *token, \ const void *ptr, unsigned long off, \ u##bits val) \ { \ const unsigned long member_offset = (unsigned long)ptr + off; \ const unsigned long idx = get_eb_page_index(member_offset); \ const unsigned long oip = get_eb_offset_in_page(token->eb, \ member_offset); \ const int size = sizeof(u##bits); \ u8 lebytes[sizeof(u##bits)]; \ const int part = PAGE_SIZE - oip; \ \ ASSERT(token); \ ASSERT(token->kaddr); \ ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \ if (token->offset <= member_offset && \ member_offset + size <= token->offset + PAGE_SIZE) { \ put_unaligned_le##bits(val, token->kaddr + oip); \ return; \ } \ token->kaddr = page_address(token->eb->pages[idx]); \ token->offset = idx << PAGE_SHIFT; \ if (INLINE_EXTENT_BUFFER_PAGES == 1 || oip + size <= PAGE_SIZE) { \ put_unaligned_le##bits(val, token->kaddr + oip); \ return; \ } \ put_unaligned_le##bits(val, lebytes); \ memcpy(token->kaddr + oip, lebytes, part); \ token->kaddr = page_address(token->eb->pages[idx + 1]); \ token->offset = (idx + 1) << PAGE_SHIFT; \ memcpy(token->kaddr, lebytes + part, size - part); \ } \ void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \ unsigned long off, u##bits val) \ { \ const unsigned long member_offset = (unsigned long)ptr + off; \ const unsigned long oip = get_eb_offset_in_page(eb, member_offset); \ const unsigned long idx = get_eb_page_index(member_offset); \ char *kaddr = page_address(eb->pages[idx]); \ const int size = sizeof(u##bits); \ const int part = PAGE_SIZE - oip; \ u8 lebytes[sizeof(u##bits)]; \ \ ASSERT(check_setget_bounds(eb, ptr, off, size)); \ if (INLINE_EXTENT_BUFFER_PAGES == 1 || oip + size <= PAGE_SIZE) { \ put_unaligned_le##bits(val, kaddr + oip); \ return; \ } \ \ put_unaligned_le##bits(val, lebytes); \ memcpy(kaddr + oip, lebytes, part); \ kaddr = page_address(eb->pages[idx + 1]); \ memcpy(kaddr, lebytes + part, size - part); \ } DEFINE_BTRFS_SETGET_BITS(8) DEFINE_BTRFS_SETGET_BITS(16) DEFINE_BTRFS_SETGET_BITS(32) DEFINE_BTRFS_SETGET_BITS(64) void btrfs_node_key(const struct extent_buffer *eb, struct btrfs_disk_key *disk_key, int nr) { unsigned long ptr = btrfs_node_key_ptr_offset(eb, nr); read_eb_member(eb, (struct btrfs_key_ptr *)ptr, struct btrfs_key_ptr, key, disk_key); }
linux-master
fs/btrfs/accessors.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2009 Oracle. All rights reserved. */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/sort.h> #include "messages.h" #include "ctree.h" #include "delayed-ref.h" #include "transaction.h" #include "qgroup.h" #include "space-info.h" #include "tree-mod-log.h" #include "fs.h" struct kmem_cache *btrfs_delayed_ref_head_cachep; struct kmem_cache *btrfs_delayed_tree_ref_cachep; struct kmem_cache *btrfs_delayed_data_ref_cachep; struct kmem_cache *btrfs_delayed_extent_op_cachep; /* * delayed back reference update tracking. For subvolume trees * we queue up extent allocations and backref maintenance for * delayed processing. This avoids deep call chains where we * add extents in the middle of btrfs_search_slot, and it allows * us to buffer up frequently modified backrefs in an rb tree instead * of hammering updates on the extent allocation tree. */ bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info) { struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; bool ret = false; u64 reserved; spin_lock(&global_rsv->lock); reserved = global_rsv->reserved; spin_unlock(&global_rsv->lock); /* * Since the global reserve is just kind of magic we don't really want * to rely on it to save our bacon, so if our size is more than the * delayed_refs_rsv and the global rsv then it's time to think about * bailing. */ spin_lock(&delayed_refs_rsv->lock); reserved += delayed_refs_rsv->reserved; if (delayed_refs_rsv->size >= reserved) ret = true; spin_unlock(&delayed_refs_rsv->lock); return ret; } /* * Release a ref head's reservation. * * @fs_info: the filesystem * @nr: number of items to drop * * Drops the delayed ref head's count from the delayed refs rsv and free any * excess reservation we had. */ void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr) { struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv; const u64 num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, nr); u64 released = 0; released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL); if (released) trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", 0, released, 0); } /* * Adjust the size of the delayed refs rsv. * * This is to be called anytime we may have adjusted trans->delayed_ref_updates, * it'll calculate the additional size and add it to the delayed_refs_rsv. */ void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv; u64 num_bytes; if (!trans->delayed_ref_updates) return; num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, trans->delayed_ref_updates); spin_lock(&delayed_rsv->lock); delayed_rsv->size += num_bytes; delayed_rsv->full = false; spin_unlock(&delayed_rsv->lock); trans->delayed_ref_updates = 0; } /* * Transfer bytes to our delayed refs rsv. * * @fs_info: the filesystem * @src: source block rsv to transfer from * @num_bytes: number of bytes to transfer * * This transfers up to the num_bytes amount from the src rsv to the * delayed_refs_rsv. Any extra bytes are returned to the space info. */ void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info, struct btrfs_block_rsv *src, u64 num_bytes) { struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; u64 to_free = 0; spin_lock(&src->lock); src->reserved -= num_bytes; src->size -= num_bytes; spin_unlock(&src->lock); spin_lock(&delayed_refs_rsv->lock); if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) { u64 delta = delayed_refs_rsv->size - delayed_refs_rsv->reserved; if (num_bytes > delta) { to_free = num_bytes - delta; num_bytes = delta; } } else { to_free = num_bytes; num_bytes = 0; } if (num_bytes) delayed_refs_rsv->reserved += num_bytes; if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size) delayed_refs_rsv->full = true; spin_unlock(&delayed_refs_rsv->lock); if (num_bytes) trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", 0, num_bytes, 1); if (to_free) btrfs_space_info_free_bytes_may_use(fs_info, delayed_refs_rsv->space_info, to_free); } /* * Refill based on our delayed refs usage. * * @fs_info: the filesystem * @flush: control how we can flush for this reservation. * * This will refill the delayed block_rsv up to 1 items size worth of space and * will return -ENOSPC if we can't make the reservation. */ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info, enum btrfs_reserve_flush_enum flush) { struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv; u64 limit = btrfs_calc_delayed_ref_bytes(fs_info, 1); u64 num_bytes = 0; int ret = -ENOSPC; spin_lock(&block_rsv->lock); if (block_rsv->reserved < block_rsv->size) { num_bytes = block_rsv->size - block_rsv->reserved; num_bytes = min(num_bytes, limit); } spin_unlock(&block_rsv->lock); if (!num_bytes) return 0; ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush); if (ret) return ret; btrfs_block_rsv_add_bytes(block_rsv, num_bytes, false); trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", 0, num_bytes, 1); return 0; } /* * compare two delayed tree backrefs with same bytenr and type */ static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1, struct btrfs_delayed_tree_ref *ref2) { if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) { if (ref1->root < ref2->root) return -1; if (ref1->root > ref2->root) return 1; } else { if (ref1->parent < ref2->parent) return -1; if (ref1->parent > ref2->parent) return 1; } return 0; } /* * compare two delayed data backrefs with same bytenr and type */ static int comp_data_refs(struct btrfs_delayed_data_ref *ref1, struct btrfs_delayed_data_ref *ref2) { if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) { if (ref1->root < ref2->root) return -1; if (ref1->root > ref2->root) return 1; if (ref1->objectid < ref2->objectid) return -1; if (ref1->objectid > ref2->objectid) return 1; if (ref1->offset < ref2->offset) return -1; if (ref1->offset > ref2->offset) return 1; } else { if (ref1->parent < ref2->parent) return -1; if (ref1->parent > ref2->parent) return 1; } return 0; } static int comp_refs(struct btrfs_delayed_ref_node *ref1, struct btrfs_delayed_ref_node *ref2, bool check_seq) { int ret = 0; if (ref1->type < ref2->type) return -1; if (ref1->type > ref2->type) return 1; if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY || ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1), btrfs_delayed_node_to_tree_ref(ref2)); else ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1), btrfs_delayed_node_to_data_ref(ref2)); if (ret) return ret; if (check_seq) { if (ref1->seq < ref2->seq) return -1; if (ref1->seq > ref2->seq) return 1; } return 0; } /* insert a new ref to head ref rbtree */ static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root, struct rb_node *node) { struct rb_node **p = &root->rb_root.rb_node; struct rb_node *parent_node = NULL; struct btrfs_delayed_ref_head *entry; struct btrfs_delayed_ref_head *ins; u64 bytenr; bool leftmost = true; ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node); bytenr = ins->bytenr; while (*p) { parent_node = *p; entry = rb_entry(parent_node, struct btrfs_delayed_ref_head, href_node); if (bytenr < entry->bytenr) { p = &(*p)->rb_left; } else if (bytenr > entry->bytenr) { p = &(*p)->rb_right; leftmost = false; } else { return entry; } } rb_link_node(node, parent_node, p); rb_insert_color_cached(node, root, leftmost); return NULL; } static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root, struct btrfs_delayed_ref_node *ins) { struct rb_node **p = &root->rb_root.rb_node; struct rb_node *node = &ins->ref_node; struct rb_node *parent_node = NULL; struct btrfs_delayed_ref_node *entry; bool leftmost = true; while (*p) { int comp; parent_node = *p; entry = rb_entry(parent_node, struct btrfs_delayed_ref_node, ref_node); comp = comp_refs(ins, entry, true); if (comp < 0) { p = &(*p)->rb_left; } else if (comp > 0) { p = &(*p)->rb_right; leftmost = false; } else { return entry; } } rb_link_node(node, parent_node, p); rb_insert_color_cached(node, root, leftmost); return NULL; } static struct btrfs_delayed_ref_head *find_first_ref_head( struct btrfs_delayed_ref_root *dr) { struct rb_node *n; struct btrfs_delayed_ref_head *entry; n = rb_first_cached(&dr->href_root); if (!n) return NULL; entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node); return entry; } /* * Find a head entry based on bytenr. This returns the delayed ref head if it * was able to find one, or NULL if nothing was in that spot. If return_bigger * is given, the next bigger entry is returned if no exact match is found. */ static struct btrfs_delayed_ref_head *find_ref_head( struct btrfs_delayed_ref_root *dr, u64 bytenr, bool return_bigger) { struct rb_root *root = &dr->href_root.rb_root; struct rb_node *n; struct btrfs_delayed_ref_head *entry; n = root->rb_node; entry = NULL; while (n) { entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node); if (bytenr < entry->bytenr) n = n->rb_left; else if (bytenr > entry->bytenr) n = n->rb_right; else return entry; } if (entry && return_bigger) { if (bytenr > entry->bytenr) { n = rb_next(&entry->href_node); if (!n) return NULL; entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node); } return entry; } return NULL; } int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs, struct btrfs_delayed_ref_head *head) { lockdep_assert_held(&delayed_refs->lock); if (mutex_trylock(&head->mutex)) return 0; refcount_inc(&head->refs); spin_unlock(&delayed_refs->lock); mutex_lock(&head->mutex); spin_lock(&delayed_refs->lock); if (RB_EMPTY_NODE(&head->href_node)) { mutex_unlock(&head->mutex); btrfs_put_delayed_ref_head(head); return -EAGAIN; } btrfs_put_delayed_ref_head(head); return 0; } static inline void drop_delayed_ref(struct btrfs_delayed_ref_root *delayed_refs, struct btrfs_delayed_ref_head *head, struct btrfs_delayed_ref_node *ref) { lockdep_assert_held(&head->lock); rb_erase_cached(&ref->ref_node, &head->ref_tree); RB_CLEAR_NODE(&ref->ref_node); if (!list_empty(&ref->add_list)) list_del(&ref->add_list); btrfs_put_delayed_ref(ref); atomic_dec(&delayed_refs->num_entries); } static bool merge_ref(struct btrfs_delayed_ref_root *delayed_refs, struct btrfs_delayed_ref_head *head, struct btrfs_delayed_ref_node *ref, u64 seq) { struct btrfs_delayed_ref_node *next; struct rb_node *node = rb_next(&ref->ref_node); bool done = false; while (!done && node) { int mod; next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node); node = rb_next(node); if (seq && next->seq >= seq) break; if (comp_refs(ref, next, false)) break; if (ref->action == next->action) { mod = next->ref_mod; } else { if (ref->ref_mod < next->ref_mod) { swap(ref, next); done = true; } mod = -next->ref_mod; } drop_delayed_ref(delayed_refs, head, next); ref->ref_mod += mod; if (ref->ref_mod == 0) { drop_delayed_ref(delayed_refs, head, ref); done = true; } else { /* * Can't have multiples of the same ref on a tree block. */ WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY || ref->type == BTRFS_SHARED_BLOCK_REF_KEY); } } return done; } void btrfs_merge_delayed_refs(struct btrfs_fs_info *fs_info, struct btrfs_delayed_ref_root *delayed_refs, struct btrfs_delayed_ref_head *head) { struct btrfs_delayed_ref_node *ref; struct rb_node *node; u64 seq = 0; lockdep_assert_held(&head->lock); if (RB_EMPTY_ROOT(&head->ref_tree.rb_root)) return; /* We don't have too many refs to merge for data. */ if (head->is_data) return; seq = btrfs_tree_mod_log_lowest_seq(fs_info); again: for (node = rb_first_cached(&head->ref_tree); node; node = rb_next(node)) { ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node); if (seq && ref->seq >= seq) continue; if (merge_ref(delayed_refs, head, ref, seq)) goto again; } } int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq) { int ret = 0; u64 min_seq = btrfs_tree_mod_log_lowest_seq(fs_info); if (min_seq != 0 && seq >= min_seq) { btrfs_debug(fs_info, "holding back delayed_ref %llu, lowest is %llu", seq, min_seq); ret = 1; } return ret; } struct btrfs_delayed_ref_head *btrfs_select_ref_head( struct btrfs_delayed_ref_root *delayed_refs) { struct btrfs_delayed_ref_head *head; lockdep_assert_held(&delayed_refs->lock); again: head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start, true); if (!head && delayed_refs->run_delayed_start != 0) { delayed_refs->run_delayed_start = 0; head = find_first_ref_head(delayed_refs); } if (!head) return NULL; while (head->processing) { struct rb_node *node; node = rb_next(&head->href_node); if (!node) { if (delayed_refs->run_delayed_start == 0) return NULL; delayed_refs->run_delayed_start = 0; goto again; } head = rb_entry(node, struct btrfs_delayed_ref_head, href_node); } head->processing = true; WARN_ON(delayed_refs->num_heads_ready == 0); delayed_refs->num_heads_ready--; delayed_refs->run_delayed_start = head->bytenr + head->num_bytes; return head; } void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs, struct btrfs_delayed_ref_head *head) { lockdep_assert_held(&delayed_refs->lock); lockdep_assert_held(&head->lock); rb_erase_cached(&head->href_node, &delayed_refs->href_root); RB_CLEAR_NODE(&head->href_node); atomic_dec(&delayed_refs->num_entries); delayed_refs->num_heads--; if (!head->processing) delayed_refs->num_heads_ready--; } /* * Helper to insert the ref_node to the tail or merge with tail. * * Return false if the ref was inserted. * Return true if the ref was merged into an existing one (and therefore can be * freed by the caller). */ static bool insert_delayed_ref(struct btrfs_delayed_ref_root *root, struct btrfs_delayed_ref_head *href, struct btrfs_delayed_ref_node *ref) { struct btrfs_delayed_ref_node *exist; int mod; spin_lock(&href->lock); exist = tree_insert(&href->ref_tree, ref); if (!exist) { if (ref->action == BTRFS_ADD_DELAYED_REF) list_add_tail(&ref->add_list, &href->ref_add_list); atomic_inc(&root->num_entries); spin_unlock(&href->lock); return false; } /* Now we are sure we can merge */ if (exist->action == ref->action) { mod = ref->ref_mod; } else { /* Need to change action */ if (exist->ref_mod < ref->ref_mod) { exist->action = ref->action; mod = -exist->ref_mod; exist->ref_mod = ref->ref_mod; if (ref->action == BTRFS_ADD_DELAYED_REF) list_add_tail(&exist->add_list, &href->ref_add_list); else if (ref->action == BTRFS_DROP_DELAYED_REF) { ASSERT(!list_empty(&exist->add_list)); list_del(&exist->add_list); } else { ASSERT(0); } } else mod = -ref->ref_mod; } exist->ref_mod += mod; /* remove existing tail if its ref_mod is zero */ if (exist->ref_mod == 0) drop_delayed_ref(root, href, exist); spin_unlock(&href->lock); return true; } /* * helper function to update the accounting in the head ref * existing and update must have the same bytenr */ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *existing, struct btrfs_delayed_ref_head *update) { struct btrfs_delayed_ref_root *delayed_refs = &trans->transaction->delayed_refs; struct btrfs_fs_info *fs_info = trans->fs_info; int old_ref_mod; BUG_ON(existing->is_data != update->is_data); spin_lock(&existing->lock); if (update->must_insert_reserved) { /* if the extent was freed and then * reallocated before the delayed ref * entries were processed, we can end up * with an existing head ref without * the must_insert_reserved flag set. * Set it again here */ existing->must_insert_reserved = update->must_insert_reserved; /* * update the num_bytes so we make sure the accounting * is done correctly */ existing->num_bytes = update->num_bytes; } if (update->extent_op) { if (!existing->extent_op) { existing->extent_op = update->extent_op; } else { if (update->extent_op->update_key) { memcpy(&existing->extent_op->key, &update->extent_op->key, sizeof(update->extent_op->key)); existing->extent_op->update_key = true; } if (update->extent_op->update_flags) { existing->extent_op->flags_to_set |= update->extent_op->flags_to_set; existing->extent_op->update_flags = true; } btrfs_free_delayed_extent_op(update->extent_op); } } /* * update the reference mod on the head to reflect this new operation, * only need the lock for this case cause we could be processing it * currently, for refs we just added we know we're a-ok. */ old_ref_mod = existing->total_ref_mod; existing->ref_mod += update->ref_mod; existing->total_ref_mod += update->ref_mod; /* * If we are going to from a positive ref mod to a negative or vice * versa we need to make sure to adjust pending_csums accordingly. */ if (existing->is_data) { u64 csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, existing->num_bytes); if (existing->total_ref_mod >= 0 && old_ref_mod < 0) { delayed_refs->pending_csums -= existing->num_bytes; btrfs_delayed_refs_rsv_release(fs_info, csum_leaves); } if (existing->total_ref_mod < 0 && old_ref_mod >= 0) { delayed_refs->pending_csums += existing->num_bytes; trans->delayed_ref_updates += csum_leaves; } } spin_unlock(&existing->lock); } static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref, struct btrfs_qgroup_extent_record *qrecord, u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved, int action, bool is_data, bool is_system) { int count_mod = 1; bool must_insert_reserved = false; /* If reserved is provided, it must be a data extent. */ BUG_ON(!is_data && reserved); switch (action) { case BTRFS_UPDATE_DELAYED_HEAD: count_mod = 0; break; case BTRFS_DROP_DELAYED_REF: /* * The head node stores the sum of all the mods, so dropping a ref * should drop the sum in the head node by one. */ count_mod = -1; break; case BTRFS_ADD_DELAYED_EXTENT: /* * BTRFS_ADD_DELAYED_EXTENT means that we need to update the * reserved accounting when the extent is finally added, or if a * later modification deletes the delayed ref without ever * inserting the extent into the extent allocation tree. * ref->must_insert_reserved is the flag used to record that * accounting mods are required. * * Once we record must_insert_reserved, switch the action to * BTRFS_ADD_DELAYED_REF because other special casing is not * required. */ must_insert_reserved = true; break; } refcount_set(&head_ref->refs, 1); head_ref->bytenr = bytenr; head_ref->num_bytes = num_bytes; head_ref->ref_mod = count_mod; head_ref->must_insert_reserved = must_insert_reserved; head_ref->is_data = is_data; head_ref->is_system = is_system; head_ref->ref_tree = RB_ROOT_CACHED; INIT_LIST_HEAD(&head_ref->ref_add_list); RB_CLEAR_NODE(&head_ref->href_node); head_ref->processing = false; head_ref->total_ref_mod = count_mod; spin_lock_init(&head_ref->lock); mutex_init(&head_ref->mutex); if (qrecord) { if (ref_root && reserved) { qrecord->data_rsv = reserved; qrecord->data_rsv_refroot = ref_root; } qrecord->bytenr = bytenr; qrecord->num_bytes = num_bytes; qrecord->old_roots = NULL; } } /* * helper function to actually insert a head node into the rbtree. * this does all the dirty work in terms of maintaining the correct * overall modification count. */ static noinline struct btrfs_delayed_ref_head * add_delayed_ref_head(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *head_ref, struct btrfs_qgroup_extent_record *qrecord, int action, bool *qrecord_inserted_ret) { struct btrfs_delayed_ref_head *existing; struct btrfs_delayed_ref_root *delayed_refs; bool qrecord_inserted = false; delayed_refs = &trans->transaction->delayed_refs; /* Record qgroup extent info if provided */ if (qrecord) { if (btrfs_qgroup_trace_extent_nolock(trans->fs_info, delayed_refs, qrecord)) kfree(qrecord); else qrecord_inserted = true; } trace_add_delayed_ref_head(trans->fs_info, head_ref, action); existing = htree_insert(&delayed_refs->href_root, &head_ref->href_node); if (existing) { update_existing_head_ref(trans, existing, head_ref); /* * we've updated the existing ref, free the newly * allocated ref */ kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref); head_ref = existing; } else { if (head_ref->is_data && head_ref->ref_mod < 0) { delayed_refs->pending_csums += head_ref->num_bytes; trans->delayed_ref_updates += btrfs_csum_bytes_to_leaves(trans->fs_info, head_ref->num_bytes); } delayed_refs->num_heads++; delayed_refs->num_heads_ready++; atomic_inc(&delayed_refs->num_entries); trans->delayed_ref_updates++; } if (qrecord_inserted_ret) *qrecord_inserted_ret = qrecord_inserted; return head_ref; } /* * init_delayed_ref_common - Initialize the structure which represents a * modification to a an extent. * * @fs_info: Internal to the mounted filesystem mount structure. * * @ref: The structure which is going to be initialized. * * @bytenr: The logical address of the extent for which a modification is * going to be recorded. * * @num_bytes: Size of the extent whose modification is being recorded. * * @ref_root: The id of the root where this modification has originated, this * can be either one of the well-known metadata trees or the * subvolume id which references this extent. * * @action: Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or * BTRFS_ADD_DELAYED_EXTENT * * @ref_type: Holds the type of the extent which is being recorded, can be * one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY * when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/ * BTRFS_EXTENT_DATA_REF_KEY when recording data extent */ static void init_delayed_ref_common(struct btrfs_fs_info *fs_info, struct btrfs_delayed_ref_node *ref, u64 bytenr, u64 num_bytes, u64 ref_root, int action, u8 ref_type) { u64 seq = 0; if (action == BTRFS_ADD_DELAYED_EXTENT) action = BTRFS_ADD_DELAYED_REF; if (is_fstree(ref_root)) seq = atomic64_read(&fs_info->tree_mod_seq); refcount_set(&ref->refs, 1); ref->bytenr = bytenr; ref->num_bytes = num_bytes; ref->ref_mod = 1; ref->action = action; ref->seq = seq; ref->type = ref_type; RB_CLEAR_NODE(&ref->ref_node); INIT_LIST_HEAD(&ref->add_list); } /* * add a delayed tree ref. This does all of the accounting required * to make sure the delayed ref is eventually processed before this * transaction commits. */ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans, struct btrfs_ref *generic_ref, struct btrfs_delayed_extent_op *extent_op) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_delayed_tree_ref *ref; struct btrfs_delayed_ref_head *head_ref; struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_qgroup_extent_record *record = NULL; bool qrecord_inserted; bool is_system; bool merged; int action = generic_ref->action; int level = generic_ref->tree_ref.level; u64 bytenr = generic_ref->bytenr; u64 num_bytes = generic_ref->len; u64 parent = generic_ref->parent; u8 ref_type; is_system = (generic_ref->tree_ref.owning_root == BTRFS_CHUNK_TREE_OBJECTID); ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action); ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS); if (!ref) return -ENOMEM; head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS); if (!head_ref) { kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref); return -ENOMEM; } if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) && !generic_ref->skip_qgroup) { record = kzalloc(sizeof(*record), GFP_NOFS); if (!record) { kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref); kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref); return -ENOMEM; } } if (parent) ref_type = BTRFS_SHARED_BLOCK_REF_KEY; else ref_type = BTRFS_TREE_BLOCK_REF_KEY; init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes, generic_ref->tree_ref.owning_root, action, ref_type); ref->root = generic_ref->tree_ref.owning_root; ref->parent = parent; ref->level = level; init_delayed_ref_head(head_ref, record, bytenr, num_bytes, generic_ref->tree_ref.owning_root, 0, action, false, is_system); head_ref->extent_op = extent_op; delayed_refs = &trans->transaction->delayed_refs; spin_lock(&delayed_refs->lock); /* * insert both the head node and the new ref without dropping * the spin lock */ head_ref = add_delayed_ref_head(trans, head_ref, record, action, &qrecord_inserted); merged = insert_delayed_ref(delayed_refs, head_ref, &ref->node); spin_unlock(&delayed_refs->lock); /* * Need to update the delayed_refs_rsv with any changes we may have * made. */ btrfs_update_delayed_refs_rsv(trans); trace_add_delayed_tree_ref(fs_info, &ref->node, ref, action == BTRFS_ADD_DELAYED_EXTENT ? BTRFS_ADD_DELAYED_REF : action); if (merged) kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref); if (qrecord_inserted) btrfs_qgroup_trace_extent_post(trans, record); return 0; } /* * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref. */ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans, struct btrfs_ref *generic_ref, u64 reserved) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_delayed_data_ref *ref; struct btrfs_delayed_ref_head *head_ref; struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_qgroup_extent_record *record = NULL; bool qrecord_inserted; int action = generic_ref->action; bool merged; u64 bytenr = generic_ref->bytenr; u64 num_bytes = generic_ref->len; u64 parent = generic_ref->parent; u64 ref_root = generic_ref->data_ref.owning_root; u64 owner = generic_ref->data_ref.ino; u64 offset = generic_ref->data_ref.offset; u8 ref_type; ASSERT(generic_ref->type == BTRFS_REF_DATA && action); ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS); if (!ref) return -ENOMEM; if (parent) ref_type = BTRFS_SHARED_DATA_REF_KEY; else ref_type = BTRFS_EXTENT_DATA_REF_KEY; init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes, ref_root, action, ref_type); ref->root = ref_root; ref->parent = parent; ref->objectid = owner; ref->offset = offset; head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS); if (!head_ref) { kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); return -ENOMEM; } if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) && !generic_ref->skip_qgroup) { record = kzalloc(sizeof(*record), GFP_NOFS); if (!record) { kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref); return -ENOMEM; } } init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root, reserved, action, true, false); head_ref->extent_op = NULL; delayed_refs = &trans->transaction->delayed_refs; spin_lock(&delayed_refs->lock); /* * insert both the head node and the new ref without dropping * the spin lock */ head_ref = add_delayed_ref_head(trans, head_ref, record, action, &qrecord_inserted); merged = insert_delayed_ref(delayed_refs, head_ref, &ref->node); spin_unlock(&delayed_refs->lock); /* * Need to update the delayed_refs_rsv with any changes we may have * made. */ btrfs_update_delayed_refs_rsv(trans); trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref, action == BTRFS_ADD_DELAYED_EXTENT ? BTRFS_ADD_DELAYED_REF : action); if (merged) kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); if (qrecord_inserted) return btrfs_qgroup_trace_extent_post(trans, record); return 0; } int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes, struct btrfs_delayed_extent_op *extent_op) { struct btrfs_delayed_ref_head *head_ref; struct btrfs_delayed_ref_root *delayed_refs; head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS); if (!head_ref) return -ENOMEM; init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD, false, false); head_ref->extent_op = extent_op; delayed_refs = &trans->transaction->delayed_refs; spin_lock(&delayed_refs->lock); add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD, NULL); spin_unlock(&delayed_refs->lock); /* * Need to update the delayed_refs_rsv with any changes we may have * made. */ btrfs_update_delayed_refs_rsv(trans); return 0; } /* * This does a simple search for the head node for a given extent. Returns the * head node if found, or NULL if not. */ struct btrfs_delayed_ref_head * btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr) { lockdep_assert_held(&delayed_refs->lock); return find_ref_head(delayed_refs, bytenr, false); } void __cold btrfs_delayed_ref_exit(void) { kmem_cache_destroy(btrfs_delayed_ref_head_cachep); kmem_cache_destroy(btrfs_delayed_tree_ref_cachep); kmem_cache_destroy(btrfs_delayed_data_ref_cachep); kmem_cache_destroy(btrfs_delayed_extent_op_cachep); } int __init btrfs_delayed_ref_init(void) { btrfs_delayed_ref_head_cachep = kmem_cache_create( "btrfs_delayed_ref_head", sizeof(struct btrfs_delayed_ref_head), 0, SLAB_MEM_SPREAD, NULL); if (!btrfs_delayed_ref_head_cachep) goto fail; btrfs_delayed_tree_ref_cachep = kmem_cache_create( "btrfs_delayed_tree_ref", sizeof(struct btrfs_delayed_tree_ref), 0, SLAB_MEM_SPREAD, NULL); if (!btrfs_delayed_tree_ref_cachep) goto fail; btrfs_delayed_data_ref_cachep = kmem_cache_create( "btrfs_delayed_data_ref", sizeof(struct btrfs_delayed_data_ref), 0, SLAB_MEM_SPREAD, NULL); if (!btrfs_delayed_data_ref_cachep) goto fail; btrfs_delayed_extent_op_cachep = kmem_cache_create( "btrfs_delayed_extent_op", sizeof(struct btrfs_delayed_extent_op), 0, SLAB_MEM_SPREAD, NULL); if (!btrfs_delayed_extent_op_cachep) goto fail; return 0; fail: btrfs_delayed_ref_exit(); return -ENOMEM; }
linux-master
fs/btrfs/delayed-ref.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2011, 2012 STRATO. All rights reserved. */ #include <linux/blkdev.h> #include <linux/ratelimit.h> #include <linux/sched/mm.h> #include <crypto/hash.h> #include "ctree.h" #include "discard.h" #include "volumes.h" #include "disk-io.h" #include "ordered-data.h" #include "transaction.h" #include "backref.h" #include "extent_io.h" #include "dev-replace.h" #include "check-integrity.h" #include "raid56.h" #include "block-group.h" #include "zoned.h" #include "fs.h" #include "accessors.h" #include "file-item.h" #include "scrub.h" /* * This is only the first step towards a full-features scrub. It reads all * extent and super block and verifies the checksums. In case a bad checksum * is found or the extent cannot be read, good data will be written back if * any can be found. * * Future enhancements: * - In case an unrepairable extent is encountered, track which files are * affected and report them * - track and record media errors, throw out bad devices * - add a mode to also read unallocated space */ struct scrub_ctx; /* * The following value only influences the performance. * * This detemines how many stripes would be submitted in one go, * which is 512KiB (BTRFS_STRIPE_LEN * SCRUB_STRIPES_PER_GROUP). */ #define SCRUB_STRIPES_PER_GROUP 8 /* * How many groups we have for each sctx. * * This would be 8M per device, the same value as the old scrub in-flight bios * size limit. */ #define SCRUB_GROUPS_PER_SCTX 16 #define SCRUB_TOTAL_STRIPES (SCRUB_GROUPS_PER_SCTX * SCRUB_STRIPES_PER_GROUP) /* * The following value times PAGE_SIZE needs to be large enough to match the * largest node/leaf/sector size that shall be supported. */ #define SCRUB_MAX_SECTORS_PER_BLOCK (BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K) /* Represent one sector and its needed info to verify the content. */ struct scrub_sector_verification { bool is_metadata; union { /* * Csum pointer for data csum verification. Should point to a * sector csum inside scrub_stripe::csums. * * NULL if this data sector has no csum. */ u8 *csum; /* * Extra info for metadata verification. All sectors inside a * tree block share the same generation. */ u64 generation; }; }; enum scrub_stripe_flags { /* Set when @mirror_num, @dev, @physical and @logical are set. */ SCRUB_STRIPE_FLAG_INITIALIZED, /* Set when the read-repair is finished. */ SCRUB_STRIPE_FLAG_REPAIR_DONE, /* * Set for data stripes if it's triggered from P/Q stripe. * During such scrub, we should not report errors in data stripes, nor * update the accounting. */ SCRUB_STRIPE_FLAG_NO_REPORT, }; #define SCRUB_STRIPE_PAGES (BTRFS_STRIPE_LEN / PAGE_SIZE) /* * Represent one contiguous range with a length of BTRFS_STRIPE_LEN. */ struct scrub_stripe { struct scrub_ctx *sctx; struct btrfs_block_group *bg; struct page *pages[SCRUB_STRIPE_PAGES]; struct scrub_sector_verification *sectors; struct btrfs_device *dev; u64 logical; u64 physical; u16 mirror_num; /* Should be BTRFS_STRIPE_LEN / sectorsize. */ u16 nr_sectors; /* * How many data/meta extents are in this stripe. Only for scrub status * reporting purposes. */ u16 nr_data_extents; u16 nr_meta_extents; atomic_t pending_io; wait_queue_head_t io_wait; wait_queue_head_t repair_wait; /* * Indicate the states of the stripe. Bits are defined in * scrub_stripe_flags enum. */ unsigned long state; /* Indicate which sectors are covered by extent items. */ unsigned long extent_sector_bitmap; /* * The errors hit during the initial read of the stripe. * * Would be utilized for error reporting and repair. * * The remaining init_nr_* records the number of errors hit, only used * by error reporting. */ unsigned long init_error_bitmap; unsigned int init_nr_io_errors; unsigned int init_nr_csum_errors; unsigned int init_nr_meta_errors; /* * The following error bitmaps are all for the current status. * Every time we submit a new read, these bitmaps may be updated. * * error_bitmap = io_error_bitmap | csum_error_bitmap | meta_error_bitmap; * * IO and csum errors can happen for both metadata and data. */ unsigned long error_bitmap; unsigned long io_error_bitmap; unsigned long csum_error_bitmap; unsigned long meta_error_bitmap; /* For writeback (repair or replace) error reporting. */ unsigned long write_error_bitmap; /* Writeback can be concurrent, thus we need to protect the bitmap. */ spinlock_t write_error_lock; /* * Checksum for the whole stripe if this stripe is inside a data block * group. */ u8 *csums; struct work_struct work; }; struct scrub_ctx { struct scrub_stripe stripes[SCRUB_TOTAL_STRIPES]; struct scrub_stripe *raid56_data_stripes; struct btrfs_fs_info *fs_info; struct btrfs_path extent_path; struct btrfs_path csum_path; int first_free; int cur_stripe; atomic_t cancel_req; int readonly; int sectors_per_bio; /* State of IO submission throttling affecting the associated device */ ktime_t throttle_deadline; u64 throttle_sent; int is_dev_replace; u64 write_pointer; struct mutex wr_lock; struct btrfs_device *wr_tgtdev; /* * statistics */ struct btrfs_scrub_progress stat; spinlock_t stat_lock; /* * Use a ref counter to avoid use-after-free issues. Scrub workers * decrement bios_in_flight and workers_pending and then do a wakeup * on the list_wait wait queue. We must ensure the main scrub task * doesn't free the scrub context before or while the workers are * doing the wakeup() call. */ refcount_t refs; }; struct scrub_warning { struct btrfs_path *path; u64 extent_item_size; const char *errstr; u64 physical; u64 logical; struct btrfs_device *dev; }; static void release_scrub_stripe(struct scrub_stripe *stripe) { if (!stripe) return; for (int i = 0; i < SCRUB_STRIPE_PAGES; i++) { if (stripe->pages[i]) __free_page(stripe->pages[i]); stripe->pages[i] = NULL; } kfree(stripe->sectors); kfree(stripe->csums); stripe->sectors = NULL; stripe->csums = NULL; stripe->sctx = NULL; stripe->state = 0; } static int init_scrub_stripe(struct btrfs_fs_info *fs_info, struct scrub_stripe *stripe) { int ret; memset(stripe, 0, sizeof(*stripe)); stripe->nr_sectors = BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits; stripe->state = 0; init_waitqueue_head(&stripe->io_wait); init_waitqueue_head(&stripe->repair_wait); atomic_set(&stripe->pending_io, 0); spin_lock_init(&stripe->write_error_lock); ret = btrfs_alloc_page_array(SCRUB_STRIPE_PAGES, stripe->pages); if (ret < 0) goto error; stripe->sectors = kcalloc(stripe->nr_sectors, sizeof(struct scrub_sector_verification), GFP_KERNEL); if (!stripe->sectors) goto error; stripe->csums = kcalloc(BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits, fs_info->csum_size, GFP_KERNEL); if (!stripe->csums) goto error; return 0; error: release_scrub_stripe(stripe); return -ENOMEM; } static void wait_scrub_stripe_io(struct scrub_stripe *stripe) { wait_event(stripe->io_wait, atomic_read(&stripe->pending_io) == 0); } static void scrub_put_ctx(struct scrub_ctx *sctx); static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) { while (atomic_read(&fs_info->scrub_pause_req)) { mutex_unlock(&fs_info->scrub_lock); wait_event(fs_info->scrub_pause_wait, atomic_read(&fs_info->scrub_pause_req) == 0); mutex_lock(&fs_info->scrub_lock); } } static void scrub_pause_on(struct btrfs_fs_info *fs_info) { atomic_inc(&fs_info->scrubs_paused); wake_up(&fs_info->scrub_pause_wait); } static void scrub_pause_off(struct btrfs_fs_info *fs_info) { mutex_lock(&fs_info->scrub_lock); __scrub_blocked_if_needed(fs_info); atomic_dec(&fs_info->scrubs_paused); mutex_unlock(&fs_info->scrub_lock); wake_up(&fs_info->scrub_pause_wait); } static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) { scrub_pause_on(fs_info); scrub_pause_off(fs_info); } static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx) { int i; if (!sctx) return; for (i = 0; i < SCRUB_TOTAL_STRIPES; i++) release_scrub_stripe(&sctx->stripes[i]); kvfree(sctx); } static void scrub_put_ctx(struct scrub_ctx *sctx) { if (refcount_dec_and_test(&sctx->refs)) scrub_free_ctx(sctx); } static noinline_for_stack struct scrub_ctx *scrub_setup_ctx( struct btrfs_fs_info *fs_info, int is_dev_replace) { struct scrub_ctx *sctx; int i; /* Since sctx has inline 128 stripes, it can go beyond 64K easily. Use * kvzalloc(). */ sctx = kvzalloc(sizeof(*sctx), GFP_KERNEL); if (!sctx) goto nomem; refcount_set(&sctx->refs, 1); sctx->is_dev_replace = is_dev_replace; sctx->fs_info = fs_info; sctx->extent_path.search_commit_root = 1; sctx->extent_path.skip_locking = 1; sctx->csum_path.search_commit_root = 1; sctx->csum_path.skip_locking = 1; for (i = 0; i < SCRUB_TOTAL_STRIPES; i++) { int ret; ret = init_scrub_stripe(fs_info, &sctx->stripes[i]); if (ret < 0) goto nomem; sctx->stripes[i].sctx = sctx; } sctx->first_free = 0; atomic_set(&sctx->cancel_req, 0); spin_lock_init(&sctx->stat_lock); sctx->throttle_deadline = 0; mutex_init(&sctx->wr_lock); if (is_dev_replace) { WARN_ON(!fs_info->dev_replace.tgtdev); sctx->wr_tgtdev = fs_info->dev_replace.tgtdev; } return sctx; nomem: scrub_free_ctx(sctx); return ERR_PTR(-ENOMEM); } static int scrub_print_warning_inode(u64 inum, u64 offset, u64 num_bytes, u64 root, void *warn_ctx) { u32 nlink; int ret; int i; unsigned nofs_flag; struct extent_buffer *eb; struct btrfs_inode_item *inode_item; struct scrub_warning *swarn = warn_ctx; struct btrfs_fs_info *fs_info = swarn->dev->fs_info; struct inode_fs_paths *ipath = NULL; struct btrfs_root *local_root; struct btrfs_key key; local_root = btrfs_get_fs_root(fs_info, root, true); if (IS_ERR(local_root)) { ret = PTR_ERR(local_root); goto err; } /* * this makes the path point to (inum INODE_ITEM ioff) */ key.objectid = inum; key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0); if (ret) { btrfs_put_root(local_root); btrfs_release_path(swarn->path); goto err; } eb = swarn->path->nodes[0]; inode_item = btrfs_item_ptr(eb, swarn->path->slots[0], struct btrfs_inode_item); nlink = btrfs_inode_nlink(eb, inode_item); btrfs_release_path(swarn->path); /* * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub * uses GFP_NOFS in this context, so we keep it consistent but it does * not seem to be strictly necessary. */ nofs_flag = memalloc_nofs_save(); ipath = init_ipath(4096, local_root, swarn->path); memalloc_nofs_restore(nofs_flag); if (IS_ERR(ipath)) { btrfs_put_root(local_root); ret = PTR_ERR(ipath); ipath = NULL; goto err; } ret = paths_from_inode(inum, ipath); if (ret < 0) goto err; /* * we deliberately ignore the bit ipath might have been too small to * hold all of the paths here */ for (i = 0; i < ipath->fspath->elem_cnt; ++i) btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)", swarn->errstr, swarn->logical, btrfs_dev_name(swarn->dev), swarn->physical, root, inum, offset, fs_info->sectorsize, nlink, (char *)(unsigned long)ipath->fspath->val[i]); btrfs_put_root(local_root); free_ipath(ipath); return 0; err: btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d", swarn->errstr, swarn->logical, btrfs_dev_name(swarn->dev), swarn->physical, root, inum, offset, ret); free_ipath(ipath); return 0; } static void scrub_print_common_warning(const char *errstr, struct btrfs_device *dev, bool is_super, u64 logical, u64 physical) { struct btrfs_fs_info *fs_info = dev->fs_info; struct btrfs_path *path; struct btrfs_key found_key; struct extent_buffer *eb; struct btrfs_extent_item *ei; struct scrub_warning swarn; u64 flags = 0; u32 item_size; int ret; /* Super block error, no need to search extent tree. */ if (is_super) { btrfs_warn_in_rcu(fs_info, "%s on device %s, physical %llu", errstr, btrfs_dev_name(dev), physical); return; } path = btrfs_alloc_path(); if (!path) return; swarn.physical = physical; swarn.logical = logical; swarn.errstr = errstr; swarn.dev = NULL; ret = extent_from_logical(fs_info, swarn.logical, path, &found_key, &flags); if (ret < 0) goto out; swarn.extent_item_size = found_key.offset; eb = path->nodes[0]; ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); item_size = btrfs_item_size(eb, path->slots[0]); if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { unsigned long ptr = 0; u8 ref_level; u64 ref_root; while (true) { ret = tree_backref_for_extent(&ptr, eb, &found_key, ei, item_size, &ref_root, &ref_level); if (ret < 0) { btrfs_warn(fs_info, "failed to resolve tree backref for logical %llu: %d", swarn.logical, ret); break; } if (ret > 0) break; btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu", errstr, swarn.logical, btrfs_dev_name(dev), swarn.physical, (ref_level ? "node" : "leaf"), ref_level, ref_root); } btrfs_release_path(path); } else { struct btrfs_backref_walk_ctx ctx = { 0 }; btrfs_release_path(path); ctx.bytenr = found_key.objectid; ctx.extent_item_pos = swarn.logical - found_key.objectid; ctx.fs_info = fs_info; swarn.path = path; swarn.dev = dev; iterate_extent_inodes(&ctx, true, scrub_print_warning_inode, &swarn); } out: btrfs_free_path(path); } static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical) { int ret = 0; u64 length; if (!btrfs_is_zoned(sctx->fs_info)) return 0; if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) return 0; if (sctx->write_pointer < physical) { length = physical - sctx->write_pointer; ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev, sctx->write_pointer, length); if (!ret) sctx->write_pointer = physical; } return ret; } static struct page *scrub_stripe_get_page(struct scrub_stripe *stripe, int sector_nr) { struct btrfs_fs_info *fs_info = stripe->bg->fs_info; int page_index = (sector_nr << fs_info->sectorsize_bits) >> PAGE_SHIFT; return stripe->pages[page_index]; } static unsigned int scrub_stripe_get_page_offset(struct scrub_stripe *stripe, int sector_nr) { struct btrfs_fs_info *fs_info = stripe->bg->fs_info; return offset_in_page(sector_nr << fs_info->sectorsize_bits); } static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr) { struct btrfs_fs_info *fs_info = stripe->bg->fs_info; const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits; const u64 logical = stripe->logical + (sector_nr << fs_info->sectorsize_bits); const struct page *first_page = scrub_stripe_get_page(stripe, sector_nr); const unsigned int first_off = scrub_stripe_get_page_offset(stripe, sector_nr); SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); u8 on_disk_csum[BTRFS_CSUM_SIZE]; u8 calculated_csum[BTRFS_CSUM_SIZE]; struct btrfs_header *header; /* * Here we don't have a good way to attach the pages (and subpages) * to a dummy extent buffer, thus we have to directly grab the members * from pages. */ header = (struct btrfs_header *)(page_address(first_page) + first_off); memcpy(on_disk_csum, header->csum, fs_info->csum_size); if (logical != btrfs_stack_header_bytenr(header)) { bitmap_set(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree); bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); btrfs_warn_rl(fs_info, "tree block %llu mirror %u has bad bytenr, has %llu want %llu", logical, stripe->mirror_num, btrfs_stack_header_bytenr(header), logical); return; } if (memcmp(header->fsid, fs_info->fs_devices->metadata_uuid, BTRFS_FSID_SIZE) != 0) { bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); btrfs_warn_rl(fs_info, "tree block %llu mirror %u has bad fsid, has %pU want %pU", logical, stripe->mirror_num, header->fsid, fs_info->fs_devices->fsid); return; } if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid, BTRFS_UUID_SIZE) != 0) { bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); btrfs_warn_rl(fs_info, "tree block %llu mirror %u has bad chunk tree uuid, has %pU want %pU", logical, stripe->mirror_num, header->chunk_tree_uuid, fs_info->chunk_tree_uuid); return; } /* Now check tree block csum. */ shash->tfm = fs_info->csum_shash; crypto_shash_init(shash); crypto_shash_update(shash, page_address(first_page) + first_off + BTRFS_CSUM_SIZE, fs_info->sectorsize - BTRFS_CSUM_SIZE); for (int i = sector_nr + 1; i < sector_nr + sectors_per_tree; i++) { struct page *page = scrub_stripe_get_page(stripe, i); unsigned int page_off = scrub_stripe_get_page_offset(stripe, i); crypto_shash_update(shash, page_address(page) + page_off, fs_info->sectorsize); } crypto_shash_final(shash, calculated_csum); if (memcmp(calculated_csum, on_disk_csum, fs_info->csum_size) != 0) { bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); btrfs_warn_rl(fs_info, "tree block %llu mirror %u has bad csum, has " CSUM_FMT " want " CSUM_FMT, logical, stripe->mirror_num, CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum), CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum)); return; } if (stripe->sectors[sector_nr].generation != btrfs_stack_header_generation(header)) { bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); btrfs_warn_rl(fs_info, "tree block %llu mirror %u has bad generation, has %llu want %llu", logical, stripe->mirror_num, btrfs_stack_header_generation(header), stripe->sectors[sector_nr].generation); return; } bitmap_clear(&stripe->error_bitmap, sector_nr, sectors_per_tree); bitmap_clear(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree); bitmap_clear(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); } static void scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr) { struct btrfs_fs_info *fs_info = stripe->bg->fs_info; struct scrub_sector_verification *sector = &stripe->sectors[sector_nr]; const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits; struct page *page = scrub_stripe_get_page(stripe, sector_nr); unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr); u8 csum_buf[BTRFS_CSUM_SIZE]; int ret; ASSERT(sector_nr >= 0 && sector_nr < stripe->nr_sectors); /* Sector not utilized, skip it. */ if (!test_bit(sector_nr, &stripe->extent_sector_bitmap)) return; /* IO error, no need to check. */ if (test_bit(sector_nr, &stripe->io_error_bitmap)) return; /* Metadata, verify the full tree block. */ if (sector->is_metadata) { /* * Check if the tree block crosses the stripe boudary. If * crossed the boundary, we cannot verify it but only give a * warning. * * This can only happen on a very old filesystem where chunks * are not ensured to be stripe aligned. */ if (unlikely(sector_nr + sectors_per_tree > stripe->nr_sectors)) { btrfs_warn_rl(fs_info, "tree block at %llu crosses stripe boundary %llu", stripe->logical + (sector_nr << fs_info->sectorsize_bits), stripe->logical); return; } scrub_verify_one_metadata(stripe, sector_nr); return; } /* * Data is easier, we just verify the data csum (if we have it). For * cases without csum, we have no other choice but to trust it. */ if (!sector->csum) { clear_bit(sector_nr, &stripe->error_bitmap); return; } ret = btrfs_check_sector_csum(fs_info, page, pgoff, csum_buf, sector->csum); if (ret < 0) { set_bit(sector_nr, &stripe->csum_error_bitmap); set_bit(sector_nr, &stripe->error_bitmap); } else { clear_bit(sector_nr, &stripe->csum_error_bitmap); clear_bit(sector_nr, &stripe->error_bitmap); } } /* Verify specified sectors of a stripe. */ static void scrub_verify_one_stripe(struct scrub_stripe *stripe, unsigned long bitmap) { struct btrfs_fs_info *fs_info = stripe->bg->fs_info; const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits; int sector_nr; for_each_set_bit(sector_nr, &bitmap, stripe->nr_sectors) { scrub_verify_one_sector(stripe, sector_nr); if (stripe->sectors[sector_nr].is_metadata) sector_nr += sectors_per_tree - 1; } } static int calc_sector_number(struct scrub_stripe *stripe, struct bio_vec *first_bvec) { int i; for (i = 0; i < stripe->nr_sectors; i++) { if (scrub_stripe_get_page(stripe, i) == first_bvec->bv_page && scrub_stripe_get_page_offset(stripe, i) == first_bvec->bv_offset) break; } ASSERT(i < stripe->nr_sectors); return i; } /* * Repair read is different to the regular read: * * - Only reads the failed sectors * - May have extra blocksize limits */ static void scrub_repair_read_endio(struct btrfs_bio *bbio) { struct scrub_stripe *stripe = bbio->private; struct btrfs_fs_info *fs_info = stripe->bg->fs_info; struct bio_vec *bvec; int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio)); u32 bio_size = 0; int i; ASSERT(sector_nr < stripe->nr_sectors); bio_for_each_bvec_all(bvec, &bbio->bio, i) bio_size += bvec->bv_len; if (bbio->bio.bi_status) { bitmap_set(&stripe->io_error_bitmap, sector_nr, bio_size >> fs_info->sectorsize_bits); bitmap_set(&stripe->error_bitmap, sector_nr, bio_size >> fs_info->sectorsize_bits); } else { bitmap_clear(&stripe->io_error_bitmap, sector_nr, bio_size >> fs_info->sectorsize_bits); } bio_put(&bbio->bio); if (atomic_dec_and_test(&stripe->pending_io)) wake_up(&stripe->io_wait); } static int calc_next_mirror(int mirror, int num_copies) { ASSERT(mirror <= num_copies); return (mirror + 1 > num_copies) ? 1 : mirror + 1; } static void scrub_stripe_submit_repair_read(struct scrub_stripe *stripe, int mirror, int blocksize, bool wait) { struct btrfs_fs_info *fs_info = stripe->bg->fs_info; struct btrfs_bio *bbio = NULL; const unsigned long old_error_bitmap = stripe->error_bitmap; int i; ASSERT(stripe->mirror_num >= 1); ASSERT(atomic_read(&stripe->pending_io) == 0); for_each_set_bit(i, &old_error_bitmap, stripe->nr_sectors) { struct page *page; int pgoff; int ret; page = scrub_stripe_get_page(stripe, i); pgoff = scrub_stripe_get_page_offset(stripe, i); /* The current sector cannot be merged, submit the bio. */ if (bbio && ((i > 0 && !test_bit(i - 1, &stripe->error_bitmap)) || bbio->bio.bi_iter.bi_size >= blocksize)) { ASSERT(bbio->bio.bi_iter.bi_size); atomic_inc(&stripe->pending_io); btrfs_submit_bio(bbio, mirror); if (wait) wait_scrub_stripe_io(stripe); bbio = NULL; } if (!bbio) { bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ, fs_info, scrub_repair_read_endio, stripe); bbio->bio.bi_iter.bi_sector = (stripe->logical + (i << fs_info->sectorsize_bits)) >> SECTOR_SHIFT; } ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff); ASSERT(ret == fs_info->sectorsize); } if (bbio) { ASSERT(bbio->bio.bi_iter.bi_size); atomic_inc(&stripe->pending_io); btrfs_submit_bio(bbio, mirror); if (wait) wait_scrub_stripe_io(stripe); } } static void scrub_stripe_report_errors(struct scrub_ctx *sctx, struct scrub_stripe *stripe) { static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); struct btrfs_fs_info *fs_info = sctx->fs_info; struct btrfs_device *dev = NULL; u64 physical = 0; int nr_data_sectors = 0; int nr_meta_sectors = 0; int nr_nodatacsum_sectors = 0; int nr_repaired_sectors = 0; int sector_nr; if (test_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state)) return; /* * Init needed infos for error reporting. * * Although our scrub_stripe infrastucture is mostly based on btrfs_submit_bio() * thus no need for dev/physical, error reporting still needs dev and physical. */ if (!bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) { u64 mapped_len = fs_info->sectorsize; struct btrfs_io_context *bioc = NULL; int stripe_index = stripe->mirror_num - 1; int ret; /* For scrub, our mirror_num should always start at 1. */ ASSERT(stripe->mirror_num >= 1); ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, stripe->logical, &mapped_len, &bioc, NULL, NULL, 1); /* * If we failed, dev will be NULL, and later detailed reports * will just be skipped. */ if (ret < 0) goto skip; physical = bioc->stripes[stripe_index].physical; dev = bioc->stripes[stripe_index].dev; btrfs_put_bioc(bioc); } skip: for_each_set_bit(sector_nr, &stripe->extent_sector_bitmap, stripe->nr_sectors) { bool repaired = false; if (stripe->sectors[sector_nr].is_metadata) { nr_meta_sectors++; } else { nr_data_sectors++; if (!stripe->sectors[sector_nr].csum) nr_nodatacsum_sectors++; } if (test_bit(sector_nr, &stripe->init_error_bitmap) && !test_bit(sector_nr, &stripe->error_bitmap)) { nr_repaired_sectors++; repaired = true; } /* Good sector from the beginning, nothing need to be done. */ if (!test_bit(sector_nr, &stripe->init_error_bitmap)) continue; /* * Report error for the corrupted sectors. If repaired, just * output the message of repaired message. */ if (repaired) { if (dev) { btrfs_err_rl_in_rcu(fs_info, "fixed up error at logical %llu on dev %s physical %llu", stripe->logical, btrfs_dev_name(dev), physical); } else { btrfs_err_rl_in_rcu(fs_info, "fixed up error at logical %llu on mirror %u", stripe->logical, stripe->mirror_num); } continue; } /* The remaining are all for unrepaired. */ if (dev) { btrfs_err_rl_in_rcu(fs_info, "unable to fixup (regular) error at logical %llu on dev %s physical %llu", stripe->logical, btrfs_dev_name(dev), physical); } else { btrfs_err_rl_in_rcu(fs_info, "unable to fixup (regular) error at logical %llu on mirror %u", stripe->logical, stripe->mirror_num); } if (test_bit(sector_nr, &stripe->io_error_bitmap)) if (__ratelimit(&rs) && dev) scrub_print_common_warning("i/o error", dev, false, stripe->logical, physical); if (test_bit(sector_nr, &stripe->csum_error_bitmap)) if (__ratelimit(&rs) && dev) scrub_print_common_warning("checksum error", dev, false, stripe->logical, physical); if (test_bit(sector_nr, &stripe->meta_error_bitmap)) if (__ratelimit(&rs) && dev) scrub_print_common_warning("header error", dev, false, stripe->logical, physical); } spin_lock(&sctx->stat_lock); sctx->stat.data_extents_scrubbed += stripe->nr_data_extents; sctx->stat.tree_extents_scrubbed += stripe->nr_meta_extents; sctx->stat.data_bytes_scrubbed += nr_data_sectors << fs_info->sectorsize_bits; sctx->stat.tree_bytes_scrubbed += nr_meta_sectors << fs_info->sectorsize_bits; sctx->stat.no_csum += nr_nodatacsum_sectors; sctx->stat.read_errors += stripe->init_nr_io_errors; sctx->stat.csum_errors += stripe->init_nr_csum_errors; sctx->stat.verify_errors += stripe->init_nr_meta_errors; sctx->stat.uncorrectable_errors += bitmap_weight(&stripe->error_bitmap, stripe->nr_sectors); sctx->stat.corrected_errors += nr_repaired_sectors; spin_unlock(&sctx->stat_lock); } static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe, unsigned long write_bitmap, bool dev_replace); /* * The main entrance for all read related scrub work, including: * * - Wait for the initial read to finish * - Verify and locate any bad sectors * - Go through the remaining mirrors and try to read as large blocksize as * possible * - Go through all mirrors (including the failed mirror) sector-by-sector * - Submit writeback for repaired sectors * * Writeback for dev-replace does not happen here, it needs extra * synchronization for zoned devices. */ static void scrub_stripe_read_repair_worker(struct work_struct *work) { struct scrub_stripe *stripe = container_of(work, struct scrub_stripe, work); struct scrub_ctx *sctx = stripe->sctx; struct btrfs_fs_info *fs_info = sctx->fs_info; int num_copies = btrfs_num_copies(fs_info, stripe->bg->start, stripe->bg->length); int mirror; int i; ASSERT(stripe->mirror_num > 0); wait_scrub_stripe_io(stripe); scrub_verify_one_stripe(stripe, stripe->extent_sector_bitmap); /* Save the initial failed bitmap for later repair and report usage. */ stripe->init_error_bitmap = stripe->error_bitmap; stripe->init_nr_io_errors = bitmap_weight(&stripe->io_error_bitmap, stripe->nr_sectors); stripe->init_nr_csum_errors = bitmap_weight(&stripe->csum_error_bitmap, stripe->nr_sectors); stripe->init_nr_meta_errors = bitmap_weight(&stripe->meta_error_bitmap, stripe->nr_sectors); if (bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) goto out; /* * Try all remaining mirrors. * * Here we still try to read as large block as possible, as this is * faster and we have extra safety nets to rely on. */ for (mirror = calc_next_mirror(stripe->mirror_num, num_copies); mirror != stripe->mirror_num; mirror = calc_next_mirror(mirror, num_copies)) { const unsigned long old_error_bitmap = stripe->error_bitmap; scrub_stripe_submit_repair_read(stripe, mirror, BTRFS_STRIPE_LEN, false); wait_scrub_stripe_io(stripe); scrub_verify_one_stripe(stripe, old_error_bitmap); if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors)) goto out; } /* * Last safety net, try re-checking all mirrors, including the failed * one, sector-by-sector. * * As if one sector failed the drive's internal csum, the whole read * containing the offending sector would be marked as error. * Thus here we do sector-by-sector read. * * This can be slow, thus we only try it as the last resort. */ for (i = 0, mirror = stripe->mirror_num; i < num_copies; i++, mirror = calc_next_mirror(mirror, num_copies)) { const unsigned long old_error_bitmap = stripe->error_bitmap; scrub_stripe_submit_repair_read(stripe, mirror, fs_info->sectorsize, true); wait_scrub_stripe_io(stripe); scrub_verify_one_stripe(stripe, old_error_bitmap); if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors)) goto out; } out: /* * Submit the repaired sectors. For zoned case, we cannot do repair * in-place, but queue the bg to be relocated. */ if (btrfs_is_zoned(fs_info)) { if (!bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors)) btrfs_repair_one_zone(fs_info, sctx->stripes[0].bg->start); } else if (!sctx->readonly) { unsigned long repaired; bitmap_andnot(&repaired, &stripe->init_error_bitmap, &stripe->error_bitmap, stripe->nr_sectors); scrub_write_sectors(sctx, stripe, repaired, false); wait_scrub_stripe_io(stripe); } scrub_stripe_report_errors(sctx, stripe); set_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state); wake_up(&stripe->repair_wait); } static void scrub_read_endio(struct btrfs_bio *bbio) { struct scrub_stripe *stripe = bbio->private; if (bbio->bio.bi_status) { bitmap_set(&stripe->io_error_bitmap, 0, stripe->nr_sectors); bitmap_set(&stripe->error_bitmap, 0, stripe->nr_sectors); } else { bitmap_clear(&stripe->io_error_bitmap, 0, stripe->nr_sectors); } bio_put(&bbio->bio); if (atomic_dec_and_test(&stripe->pending_io)) { wake_up(&stripe->io_wait); INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker); queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work); } } static void scrub_write_endio(struct btrfs_bio *bbio) { struct scrub_stripe *stripe = bbio->private; struct btrfs_fs_info *fs_info = stripe->bg->fs_info; struct bio_vec *bvec; int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio)); u32 bio_size = 0; int i; bio_for_each_bvec_all(bvec, &bbio->bio, i) bio_size += bvec->bv_len; if (bbio->bio.bi_status) { unsigned long flags; spin_lock_irqsave(&stripe->write_error_lock, flags); bitmap_set(&stripe->write_error_bitmap, sector_nr, bio_size >> fs_info->sectorsize_bits); spin_unlock_irqrestore(&stripe->write_error_lock, flags); } bio_put(&bbio->bio); if (atomic_dec_and_test(&stripe->pending_io)) wake_up(&stripe->io_wait); } static void scrub_submit_write_bio(struct scrub_ctx *sctx, struct scrub_stripe *stripe, struct btrfs_bio *bbio, bool dev_replace) { struct btrfs_fs_info *fs_info = sctx->fs_info; u32 bio_len = bbio->bio.bi_iter.bi_size; u32 bio_off = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT) - stripe->logical; fill_writer_pointer_gap(sctx, stripe->physical + bio_off); atomic_inc(&stripe->pending_io); btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace); if (!btrfs_is_zoned(fs_info)) return; /* * For zoned writeback, queue depth must be 1, thus we must wait for * the write to finish before the next write. */ wait_scrub_stripe_io(stripe); /* * And also need to update the write pointer if write finished * successfully. */ if (!test_bit(bio_off >> fs_info->sectorsize_bits, &stripe->write_error_bitmap)) sctx->write_pointer += bio_len; } /* * Submit the write bio(s) for the sectors specified by @write_bitmap. * * Here we utilize btrfs_submit_repair_write(), which has some extra benefits: * * - Only needs logical bytenr and mirror_num * Just like the scrub read path * * - Would only result in writes to the specified mirror * Unlike the regular writeback path, which would write back to all stripes * * - Handle dev-replace and read-repair writeback differently */ static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe, unsigned long write_bitmap, bool dev_replace) { struct btrfs_fs_info *fs_info = stripe->bg->fs_info; struct btrfs_bio *bbio = NULL; int sector_nr; for_each_set_bit(sector_nr, &write_bitmap, stripe->nr_sectors) { struct page *page = scrub_stripe_get_page(stripe, sector_nr); unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr); int ret; /* We should only writeback sectors covered by an extent. */ ASSERT(test_bit(sector_nr, &stripe->extent_sector_bitmap)); /* Cannot merge with previous sector, submit the current one. */ if (bbio && sector_nr && !test_bit(sector_nr - 1, &write_bitmap)) { scrub_submit_write_bio(sctx, stripe, bbio, dev_replace); bbio = NULL; } if (!bbio) { bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_WRITE, fs_info, scrub_write_endio, stripe); bbio->bio.bi_iter.bi_sector = (stripe->logical + (sector_nr << fs_info->sectorsize_bits)) >> SECTOR_SHIFT; } ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff); ASSERT(ret == fs_info->sectorsize); } if (bbio) scrub_submit_write_bio(sctx, stripe, bbio, dev_replace); } /* * Throttling of IO submission, bandwidth-limit based, the timeslice is 1 * second. Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max. */ static void scrub_throttle_dev_io(struct scrub_ctx *sctx, struct btrfs_device *device, unsigned int bio_size) { const int time_slice = 1000; s64 delta; ktime_t now; u32 div; u64 bwlimit; bwlimit = READ_ONCE(device->scrub_speed_max); if (bwlimit == 0) return; /* * Slice is divided into intervals when the IO is submitted, adjust by * bwlimit and maximum of 64 intervals. */ div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024))); div = min_t(u32, 64, div); /* Start new epoch, set deadline */ now = ktime_get(); if (sctx->throttle_deadline == 0) { sctx->throttle_deadline = ktime_add_ms(now, time_slice / div); sctx->throttle_sent = 0; } /* Still in the time to send? */ if (ktime_before(now, sctx->throttle_deadline)) { /* If current bio is within the limit, send it */ sctx->throttle_sent += bio_size; if (sctx->throttle_sent <= div_u64(bwlimit, div)) return; /* We're over the limit, sleep until the rest of the slice */ delta = ktime_ms_delta(sctx->throttle_deadline, now); } else { /* New request after deadline, start new epoch */ delta = 0; } if (delta) { long timeout; timeout = div_u64(delta * HZ, 1000); schedule_timeout_interruptible(timeout); } /* Next call will start the deadline period */ sctx->throttle_deadline = 0; } /* * Given a physical address, this will calculate it's * logical offset. if this is a parity stripe, it will return * the most left data stripe's logical offset. * * return 0 if it is a data stripe, 1 means parity stripe. */ static int get_raid56_logic_offset(u64 physical, int num, struct map_lookup *map, u64 *offset, u64 *stripe_start) { int i; int j = 0; u64 last_offset; const int data_stripes = nr_data_stripes(map); last_offset = (physical - map->stripes[num].physical) * data_stripes; if (stripe_start) *stripe_start = last_offset; *offset = last_offset; for (i = 0; i < data_stripes; i++) { u32 stripe_nr; u32 stripe_index; u32 rot; *offset = last_offset + btrfs_stripe_nr_to_offset(i); stripe_nr = (u32)(*offset >> BTRFS_STRIPE_LEN_SHIFT) / data_stripes; /* Work out the disk rotation on this stripe-set */ rot = stripe_nr % map->num_stripes; /* calculate which stripe this data locates */ rot += i; stripe_index = rot % map->num_stripes; if (stripe_index == num) return 0; if (stripe_index < num) j++; } *offset = last_offset + btrfs_stripe_nr_to_offset(j); return 1; } /* * Return 0 if the extent item range covers any byte of the range. * Return <0 if the extent item is before @search_start. * Return >0 if the extent item is after @start_start + @search_len. */ static int compare_extent_item_range(struct btrfs_path *path, u64 search_start, u64 search_len) { struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info; u64 len; struct btrfs_key key; btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY || key.type == BTRFS_METADATA_ITEM_KEY); if (key.type == BTRFS_METADATA_ITEM_KEY) len = fs_info->nodesize; else len = key.offset; if (key.objectid + len <= search_start) return -1; if (key.objectid >= search_start + search_len) return 1; return 0; } /* * Locate one extent item which covers any byte in range * [@search_start, @search_start + @search_length) * * If the path is not initialized, we will initialize the search by doing * a btrfs_search_slot(). * If the path is already initialized, we will use the path as the initial * slot, to avoid duplicated btrfs_search_slot() calls. * * NOTE: If an extent item starts before @search_start, we will still * return the extent item. This is for data extent crossing stripe boundary. * * Return 0 if we found such extent item, and @path will point to the extent item. * Return >0 if no such extent item can be found, and @path will be released. * Return <0 if hit fatal error, and @path will be released. */ static int find_first_extent_item(struct btrfs_root *extent_root, struct btrfs_path *path, u64 search_start, u64 search_len) { struct btrfs_fs_info *fs_info = extent_root->fs_info; struct btrfs_key key; int ret; /* Continue using the existing path */ if (path->nodes[0]) goto search_forward; if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) key.type = BTRFS_METADATA_ITEM_KEY; else key.type = BTRFS_EXTENT_ITEM_KEY; key.objectid = search_start; key.offset = (u64)-1; ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); if (ret < 0) return ret; ASSERT(ret > 0); /* * Here we intentionally pass 0 as @min_objectid, as there could be * an extent item starting before @search_start. */ ret = btrfs_previous_extent_item(extent_root, path, 0); if (ret < 0) return ret; /* * No matter whether we have found an extent item, the next loop will * properly do every check on the key. */ search_forward: while (true) { btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.objectid >= search_start + search_len) break; if (key.type != BTRFS_METADATA_ITEM_KEY && key.type != BTRFS_EXTENT_ITEM_KEY) goto next; ret = compare_extent_item_range(path, search_start, search_len); if (ret == 0) return ret; if (ret > 0) break; next: path->slots[0]++; if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { ret = btrfs_next_leaf(extent_root, path); if (ret) { /* Either no more item or fatal error */ btrfs_release_path(path); return ret; } } } btrfs_release_path(path); return 1; } static void get_extent_info(struct btrfs_path *path, u64 *extent_start_ret, u64 *size_ret, u64 *flags_ret, u64 *generation_ret) { struct btrfs_key key; struct btrfs_extent_item *ei; btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); ASSERT(key.type == BTRFS_METADATA_ITEM_KEY || key.type == BTRFS_EXTENT_ITEM_KEY); *extent_start_ret = key.objectid; if (key.type == BTRFS_METADATA_ITEM_KEY) *size_ret = path->nodes[0]->fs_info->nodesize; else *size_ret = key.offset; ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item); *flags_ret = btrfs_extent_flags(path->nodes[0], ei); *generation_ret = btrfs_extent_generation(path->nodes[0], ei); } static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical, u64 physical, u64 physical_end) { struct btrfs_fs_info *fs_info = sctx->fs_info; int ret = 0; if (!btrfs_is_zoned(fs_info)) return 0; mutex_lock(&sctx->wr_lock); if (sctx->write_pointer < physical_end) { ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical, physical, sctx->write_pointer); if (ret) btrfs_err(fs_info, "zoned: failed to recover write pointer"); } mutex_unlock(&sctx->wr_lock); btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical); return ret; } static void fill_one_extent_info(struct btrfs_fs_info *fs_info, struct scrub_stripe *stripe, u64 extent_start, u64 extent_len, u64 extent_flags, u64 extent_gen) { for (u64 cur_logical = max(stripe->logical, extent_start); cur_logical < min(stripe->logical + BTRFS_STRIPE_LEN, extent_start + extent_len); cur_logical += fs_info->sectorsize) { const int nr_sector = (cur_logical - stripe->logical) >> fs_info->sectorsize_bits; struct scrub_sector_verification *sector = &stripe->sectors[nr_sector]; set_bit(nr_sector, &stripe->extent_sector_bitmap); if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { sector->is_metadata = true; sector->generation = extent_gen; } } } static void scrub_stripe_reset_bitmaps(struct scrub_stripe *stripe) { stripe->extent_sector_bitmap = 0; stripe->init_error_bitmap = 0; stripe->init_nr_io_errors = 0; stripe->init_nr_csum_errors = 0; stripe->init_nr_meta_errors = 0; stripe->error_bitmap = 0; stripe->io_error_bitmap = 0; stripe->csum_error_bitmap = 0; stripe->meta_error_bitmap = 0; } /* * Locate one stripe which has at least one extent in its range. * * Return 0 if found such stripe, and store its info into @stripe. * Return >0 if there is no such stripe in the specified range. * Return <0 for error. */ static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg, struct btrfs_path *extent_path, struct btrfs_path *csum_path, struct btrfs_device *dev, u64 physical, int mirror_num, u64 logical_start, u32 logical_len, struct scrub_stripe *stripe) { struct btrfs_fs_info *fs_info = bg->fs_info; struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bg->start); struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bg->start); const u64 logical_end = logical_start + logical_len; u64 cur_logical = logical_start; u64 stripe_end; u64 extent_start; u64 extent_len; u64 extent_flags; u64 extent_gen; int ret; memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) * stripe->nr_sectors); scrub_stripe_reset_bitmaps(stripe); /* The range must be inside the bg. */ ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length); ret = find_first_extent_item(extent_root, extent_path, logical_start, logical_len); /* Either error or not found. */ if (ret) goto out; get_extent_info(extent_path, &extent_start, &extent_len, &extent_flags, &extent_gen); if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) stripe->nr_meta_extents++; if (extent_flags & BTRFS_EXTENT_FLAG_DATA) stripe->nr_data_extents++; cur_logical = max(extent_start, cur_logical); /* * Round down to stripe boundary. * * The extra calculation against bg->start is to handle block groups * whose logical bytenr is not BTRFS_STRIPE_LEN aligned. */ stripe->logical = round_down(cur_logical - bg->start, BTRFS_STRIPE_LEN) + bg->start; stripe->physical = physical + stripe->logical - logical_start; stripe->dev = dev; stripe->bg = bg; stripe->mirror_num = mirror_num; stripe_end = stripe->logical + BTRFS_STRIPE_LEN - 1; /* Fill the first extent info into stripe->sectors[] array. */ fill_one_extent_info(fs_info, stripe, extent_start, extent_len, extent_flags, extent_gen); cur_logical = extent_start + extent_len; /* Fill the extent info for the remaining sectors. */ while (cur_logical <= stripe_end) { ret = find_first_extent_item(extent_root, extent_path, cur_logical, stripe_end - cur_logical + 1); if (ret < 0) goto out; if (ret > 0) { ret = 0; break; } get_extent_info(extent_path, &extent_start, &extent_len, &extent_flags, &extent_gen); if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) stripe->nr_meta_extents++; if (extent_flags & BTRFS_EXTENT_FLAG_DATA) stripe->nr_data_extents++; fill_one_extent_info(fs_info, stripe, extent_start, extent_len, extent_flags, extent_gen); cur_logical = extent_start + extent_len; } /* Now fill the data csum. */ if (bg->flags & BTRFS_BLOCK_GROUP_DATA) { int sector_nr; unsigned long csum_bitmap = 0; /* Csum space should have already been allocated. */ ASSERT(stripe->csums); /* * Our csum bitmap should be large enough, as BTRFS_STRIPE_LEN * should contain at most 16 sectors. */ ASSERT(BITS_PER_LONG >= BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits); ret = btrfs_lookup_csums_bitmap(csum_root, csum_path, stripe->logical, stripe_end, stripe->csums, &csum_bitmap); if (ret < 0) goto out; if (ret > 0) ret = 0; for_each_set_bit(sector_nr, &csum_bitmap, stripe->nr_sectors) { stripe->sectors[sector_nr].csum = stripe->csums + sector_nr * fs_info->csum_size; } } set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state); out: return ret; } static void scrub_reset_stripe(struct scrub_stripe *stripe) { scrub_stripe_reset_bitmaps(stripe); stripe->nr_meta_extents = 0; stripe->nr_data_extents = 0; stripe->state = 0; for (int i = 0; i < stripe->nr_sectors; i++) { stripe->sectors[i].is_metadata = false; stripe->sectors[i].csum = NULL; stripe->sectors[i].generation = 0; } } static void scrub_submit_initial_read(struct scrub_ctx *sctx, struct scrub_stripe *stripe) { struct btrfs_fs_info *fs_info = sctx->fs_info; struct btrfs_bio *bbio; int mirror = stripe->mirror_num; ASSERT(stripe->bg); ASSERT(stripe->mirror_num > 0); ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state)); bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, REQ_OP_READ, fs_info, scrub_read_endio, stripe); /* Read the whole stripe. */ bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT; for (int i = 0; i < BTRFS_STRIPE_LEN >> PAGE_SHIFT; i++) { int ret; ret = bio_add_page(&bbio->bio, stripe->pages[i], PAGE_SIZE, 0); /* We should have allocated enough bio vectors. */ ASSERT(ret == PAGE_SIZE); } atomic_inc(&stripe->pending_io); /* * For dev-replace, either user asks to avoid the source dev, or * the device is missing, we try the next mirror instead. */ if (sctx->is_dev_replace && (fs_info->dev_replace.cont_reading_from_srcdev_mode == BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID || !stripe->dev->bdev)) { int num_copies = btrfs_num_copies(fs_info, stripe->bg->start, stripe->bg->length); mirror = calc_next_mirror(mirror, num_copies); } btrfs_submit_bio(bbio, mirror); } static bool stripe_has_metadata_error(struct scrub_stripe *stripe) { int i; for_each_set_bit(i, &stripe->error_bitmap, stripe->nr_sectors) { if (stripe->sectors[i].is_metadata) { struct btrfs_fs_info *fs_info = stripe->bg->fs_info; btrfs_err(fs_info, "stripe %llu has unrepaired metadata sector at %llu", stripe->logical, stripe->logical + (i << fs_info->sectorsize_bits)); return true; } } return false; } static void submit_initial_group_read(struct scrub_ctx *sctx, unsigned int first_slot, unsigned int nr_stripes) { struct blk_plug plug; ASSERT(first_slot < SCRUB_TOTAL_STRIPES); ASSERT(first_slot + nr_stripes <= SCRUB_TOTAL_STRIPES); scrub_throttle_dev_io(sctx, sctx->stripes[0].dev, btrfs_stripe_nr_to_offset(nr_stripes)); blk_start_plug(&plug); for (int i = 0; i < nr_stripes; i++) { struct scrub_stripe *stripe = &sctx->stripes[first_slot + i]; /* Those stripes should be initialized. */ ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state)); scrub_submit_initial_read(sctx, stripe); } blk_finish_plug(&plug); } static int flush_scrub_stripes(struct scrub_ctx *sctx) { struct btrfs_fs_info *fs_info = sctx->fs_info; struct scrub_stripe *stripe; const int nr_stripes = sctx->cur_stripe; int ret = 0; if (!nr_stripes) return 0; ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state)); /* Submit the stripes which are populated but not submitted. */ if (nr_stripes % SCRUB_STRIPES_PER_GROUP) { const int first_slot = round_down(nr_stripes, SCRUB_STRIPES_PER_GROUP); submit_initial_group_read(sctx, first_slot, nr_stripes - first_slot); } for (int i = 0; i < nr_stripes; i++) { stripe = &sctx->stripes[i]; wait_event(stripe->repair_wait, test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state)); } /* Submit for dev-replace. */ if (sctx->is_dev_replace) { /* * For dev-replace, if we know there is something wrong with * metadata, we should immedately abort. */ for (int i = 0; i < nr_stripes; i++) { if (stripe_has_metadata_error(&sctx->stripes[i])) { ret = -EIO; goto out; } } for (int i = 0; i < nr_stripes; i++) { unsigned long good; stripe = &sctx->stripes[i]; ASSERT(stripe->dev == fs_info->dev_replace.srcdev); bitmap_andnot(&good, &stripe->extent_sector_bitmap, &stripe->error_bitmap, stripe->nr_sectors); scrub_write_sectors(sctx, stripe, good, true); } } /* Wait for the above writebacks to finish. */ for (int i = 0; i < nr_stripes; i++) { stripe = &sctx->stripes[i]; wait_scrub_stripe_io(stripe); scrub_reset_stripe(stripe); } out: sctx->cur_stripe = 0; return ret; } static void raid56_scrub_wait_endio(struct bio *bio) { complete(bio->bi_private); } static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *bg, struct btrfs_device *dev, int mirror_num, u64 logical, u32 length, u64 physical, u64 *found_logical_ret) { struct scrub_stripe *stripe; int ret; /* * There should always be one slot left, as caller filling the last * slot should flush them all. */ ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES); stripe = &sctx->stripes[sctx->cur_stripe]; scrub_reset_stripe(stripe); ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path, &sctx->csum_path, dev, physical, mirror_num, logical, length, stripe); /* Either >0 as no more extents or <0 for error. */ if (ret) return ret; if (found_logical_ret) *found_logical_ret = stripe->logical; sctx->cur_stripe++; /* We filled one group, submit it. */ if (sctx->cur_stripe % SCRUB_STRIPES_PER_GROUP == 0) { const int first_slot = sctx->cur_stripe - SCRUB_STRIPES_PER_GROUP; submit_initial_group_read(sctx, first_slot, SCRUB_STRIPES_PER_GROUP); } /* Last slot used, flush them all. */ if (sctx->cur_stripe == SCRUB_TOTAL_STRIPES) return flush_scrub_stripes(sctx); return 0; } static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx, struct btrfs_device *scrub_dev, struct btrfs_block_group *bg, struct map_lookup *map, u64 full_stripe_start) { DECLARE_COMPLETION_ONSTACK(io_done); struct btrfs_fs_info *fs_info = sctx->fs_info; struct btrfs_raid_bio *rbio; struct btrfs_io_context *bioc = NULL; struct btrfs_path extent_path = { 0 }; struct btrfs_path csum_path = { 0 }; struct bio *bio; struct scrub_stripe *stripe; bool all_empty = true; const int data_stripes = nr_data_stripes(map); unsigned long extent_bitmap = 0; u64 length = btrfs_stripe_nr_to_offset(data_stripes); int ret; ASSERT(sctx->raid56_data_stripes); /* * For data stripe search, we cannot re-use the same extent/csum paths, * as the data stripe bytenr may be smaller than previous extent. Thus * we have to use our own extent/csum paths. */ extent_path.search_commit_root = 1; extent_path.skip_locking = 1; csum_path.search_commit_root = 1; csum_path.skip_locking = 1; for (int i = 0; i < data_stripes; i++) { int stripe_index; int rot; u64 physical; stripe = &sctx->raid56_data_stripes[i]; rot = div_u64(full_stripe_start - bg->start, data_stripes) >> BTRFS_STRIPE_LEN_SHIFT; stripe_index = (i + rot) % map->num_stripes; physical = map->stripes[stripe_index].physical + btrfs_stripe_nr_to_offset(rot); scrub_reset_stripe(stripe); set_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state); ret = scrub_find_fill_first_stripe(bg, &extent_path, &csum_path, map->stripes[stripe_index].dev, physical, 1, full_stripe_start + btrfs_stripe_nr_to_offset(i), BTRFS_STRIPE_LEN, stripe); if (ret < 0) goto out; /* * No extent in this data stripe, need to manually mark them * initialized to make later read submission happy. */ if (ret > 0) { stripe->logical = full_stripe_start + btrfs_stripe_nr_to_offset(i); stripe->dev = map->stripes[stripe_index].dev; stripe->mirror_num = 1; set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state); } } /* Check if all data stripes are empty. */ for (int i = 0; i < data_stripes; i++) { stripe = &sctx->raid56_data_stripes[i]; if (!bitmap_empty(&stripe->extent_sector_bitmap, stripe->nr_sectors)) { all_empty = false; break; } } if (all_empty) { ret = 0; goto out; } for (int i = 0; i < data_stripes; i++) { stripe = &sctx->raid56_data_stripes[i]; scrub_submit_initial_read(sctx, stripe); } for (int i = 0; i < data_stripes; i++) { stripe = &sctx->raid56_data_stripes[i]; wait_event(stripe->repair_wait, test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state)); } /* For now, no zoned support for RAID56. */ ASSERT(!btrfs_is_zoned(sctx->fs_info)); /* * Now all data stripes are properly verified. Check if we have any * unrepaired, if so abort immediately or we could further corrupt the * P/Q stripes. * * During the loop, also populate extent_bitmap. */ for (int i = 0; i < data_stripes; i++) { unsigned long error; stripe = &sctx->raid56_data_stripes[i]; /* * We should only check the errors where there is an extent. * As we may hit an empty data stripe while it's missing. */ bitmap_and(&error, &stripe->error_bitmap, &stripe->extent_sector_bitmap, stripe->nr_sectors); if (!bitmap_empty(&error, stripe->nr_sectors)) { btrfs_err(fs_info, "unrepaired sectors detected, full stripe %llu data stripe %u errors %*pbl", full_stripe_start, i, stripe->nr_sectors, &error); ret = -EIO; goto out; } bitmap_or(&extent_bitmap, &extent_bitmap, &stripe->extent_sector_bitmap, stripe->nr_sectors); } /* Now we can check and regenerate the P/Q stripe. */ bio = bio_alloc(NULL, 1, REQ_OP_READ, GFP_NOFS); bio->bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT; bio->bi_private = &io_done; bio->bi_end_io = raid56_scrub_wait_endio; btrfs_bio_counter_inc_blocked(fs_info); ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start, &length, &bioc, NULL, NULL, 1); if (ret < 0) { btrfs_put_bioc(bioc); btrfs_bio_counter_dec(fs_info); goto out; } rbio = raid56_parity_alloc_scrub_rbio(bio, bioc, scrub_dev, &extent_bitmap, BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits); btrfs_put_bioc(bioc); if (!rbio) { ret = -ENOMEM; btrfs_bio_counter_dec(fs_info); goto out; } /* Use the recovered stripes as cache to avoid read them from disk again. */ for (int i = 0; i < data_stripes; i++) { stripe = &sctx->raid56_data_stripes[i]; raid56_parity_cache_data_pages(rbio, stripe->pages, full_stripe_start + (i << BTRFS_STRIPE_LEN_SHIFT)); } raid56_parity_submit_scrub_rbio(rbio); wait_for_completion_io(&io_done); ret = blk_status_to_errno(bio->bi_status); bio_put(bio); btrfs_bio_counter_dec(fs_info); btrfs_release_path(&extent_path); btrfs_release_path(&csum_path); out: return ret; } /* * Scrub one range which can only has simple mirror based profile. * (Including all range in SINGLE/DUP/RAID1/RAID1C*, and each stripe in * RAID0/RAID10). * * Since we may need to handle a subset of block group, we need @logical_start * and @logical_length parameter. */ static int scrub_simple_mirror(struct scrub_ctx *sctx, struct btrfs_block_group *bg, struct map_lookup *map, u64 logical_start, u64 logical_length, struct btrfs_device *device, u64 physical, int mirror_num) { struct btrfs_fs_info *fs_info = sctx->fs_info; const u64 logical_end = logical_start + logical_length; u64 cur_logical = logical_start; int ret; /* The range must be inside the bg */ ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length); /* Go through each extent items inside the logical range */ while (cur_logical < logical_end) { u64 found_logical; u64 cur_physical = physical + cur_logical - logical_start; /* Canceled? */ if (atomic_read(&fs_info->scrub_cancel_req) || atomic_read(&sctx->cancel_req)) { ret = -ECANCELED; break; } /* Paused? */ if (atomic_read(&fs_info->scrub_pause_req)) { /* Push queued extents */ scrub_blocked_if_needed(fs_info); } /* Block group removed? */ spin_lock(&bg->lock); if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) { spin_unlock(&bg->lock); ret = 0; break; } spin_unlock(&bg->lock); ret = queue_scrub_stripe(sctx, bg, device, mirror_num, cur_logical, logical_end - cur_logical, cur_physical, &found_logical); if (ret > 0) { /* No more extent, just update the accounting */ sctx->stat.last_physical = physical + logical_length; ret = 0; break; } if (ret < 0) break; cur_logical = found_logical + BTRFS_STRIPE_LEN; /* Don't hold CPU for too long time */ cond_resched(); } return ret; } /* Calculate the full stripe length for simple stripe based profiles */ static u64 simple_stripe_full_stripe_len(const struct map_lookup *map) { ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)); return btrfs_stripe_nr_to_offset(map->num_stripes / map->sub_stripes); } /* Get the logical bytenr for the stripe */ static u64 simple_stripe_get_logical(struct map_lookup *map, struct btrfs_block_group *bg, int stripe_index) { ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)); ASSERT(stripe_index < map->num_stripes); /* * (stripe_index / sub_stripes) gives how many data stripes we need to * skip. */ return btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes) + bg->start; } /* Get the mirror number for the stripe */ static int simple_stripe_mirror_num(struct map_lookup *map, int stripe_index) { ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)); ASSERT(stripe_index < map->num_stripes); /* For RAID0, it's fixed to 1, for RAID10 it's 0,1,0,1... */ return stripe_index % map->sub_stripes + 1; } static int scrub_simple_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *bg, struct map_lookup *map, struct btrfs_device *device, int stripe_index) { const u64 logical_increment = simple_stripe_full_stripe_len(map); const u64 orig_logical = simple_stripe_get_logical(map, bg, stripe_index); const u64 orig_physical = map->stripes[stripe_index].physical; const int mirror_num = simple_stripe_mirror_num(map, stripe_index); u64 cur_logical = orig_logical; u64 cur_physical = orig_physical; int ret = 0; while (cur_logical < bg->start + bg->length) { /* * Inside each stripe, RAID0 is just SINGLE, and RAID10 is * just RAID1, so we can reuse scrub_simple_mirror() to scrub * this stripe. */ ret = scrub_simple_mirror(sctx, bg, map, cur_logical, BTRFS_STRIPE_LEN, device, cur_physical, mirror_num); if (ret) return ret; /* Skip to next stripe which belongs to the target device */ cur_logical += logical_increment; /* For physical offset, we just go to next stripe */ cur_physical += BTRFS_STRIPE_LEN; } return ret; } static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *bg, struct extent_map *em, struct btrfs_device *scrub_dev, int stripe_index) { struct btrfs_fs_info *fs_info = sctx->fs_info; struct map_lookup *map = em->map_lookup; const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK; const u64 chunk_logical = bg->start; int ret; int ret2; u64 physical = map->stripes[stripe_index].physical; const u64 dev_stripe_len = btrfs_calc_stripe_length(em); const u64 physical_end = physical + dev_stripe_len; u64 logical; u64 logic_end; /* The logical increment after finishing one stripe */ u64 increment; /* Offset inside the chunk */ u64 offset; u64 stripe_logical; int stop_loop = 0; /* Extent_path should be released by now. */ ASSERT(sctx->extent_path.nodes[0] == NULL); scrub_blocked_if_needed(fs_info); if (sctx->is_dev_replace && btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) { mutex_lock(&sctx->wr_lock); sctx->write_pointer = physical; mutex_unlock(&sctx->wr_lock); } /* Prepare the extra data stripes used by RAID56. */ if (profile & BTRFS_BLOCK_GROUP_RAID56_MASK) { ASSERT(sctx->raid56_data_stripes == NULL); sctx->raid56_data_stripes = kcalloc(nr_data_stripes(map), sizeof(struct scrub_stripe), GFP_KERNEL); if (!sctx->raid56_data_stripes) { ret = -ENOMEM; goto out; } for (int i = 0; i < nr_data_stripes(map); i++) { ret = init_scrub_stripe(fs_info, &sctx->raid56_data_stripes[i]); if (ret < 0) goto out; sctx->raid56_data_stripes[i].bg = bg; sctx->raid56_data_stripes[i].sctx = sctx; } } /* * There used to be a big double loop to handle all profiles using the * same routine, which grows larger and more gross over time. * * So here we handle each profile differently, so simpler profiles * have simpler scrubbing function. */ if (!(profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10 | BTRFS_BLOCK_GROUP_RAID56_MASK))) { /* * Above check rules out all complex profile, the remaining * profiles are SINGLE|DUP|RAID1|RAID1C*, which is simple * mirrored duplication without stripe. * * Only @physical and @mirror_num needs to calculated using * @stripe_index. */ ret = scrub_simple_mirror(sctx, bg, map, bg->start, bg->length, scrub_dev, map->stripes[stripe_index].physical, stripe_index + 1); offset = 0; goto out; } if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) { ret = scrub_simple_stripe(sctx, bg, map, scrub_dev, stripe_index); offset = btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes); goto out; } /* Only RAID56 goes through the old code */ ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK); ret = 0; /* Calculate the logical end of the stripe */ get_raid56_logic_offset(physical_end, stripe_index, map, &logic_end, NULL); logic_end += chunk_logical; /* Initialize @offset in case we need to go to out: label */ get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL); increment = btrfs_stripe_nr_to_offset(nr_data_stripes(map)); /* * Due to the rotation, for RAID56 it's better to iterate each stripe * using their physical offset. */ while (physical < physical_end) { ret = get_raid56_logic_offset(physical, stripe_index, map, &logical, &stripe_logical); logical += chunk_logical; if (ret) { /* it is parity strip */ stripe_logical += chunk_logical; ret = scrub_raid56_parity_stripe(sctx, scrub_dev, bg, map, stripe_logical); if (ret) goto out; goto next; } /* * Now we're at a data stripe, scrub each extents in the range. * * At this stage, if we ignore the repair part, inside each data * stripe it is no different than SINGLE profile. * We can reuse scrub_simple_mirror() here, as the repair part * is still based on @mirror_num. */ ret = scrub_simple_mirror(sctx, bg, map, logical, BTRFS_STRIPE_LEN, scrub_dev, physical, 1); if (ret < 0) goto out; next: logical += increment; physical += BTRFS_STRIPE_LEN; spin_lock(&sctx->stat_lock); if (stop_loop) sctx->stat.last_physical = map->stripes[stripe_index].physical + dev_stripe_len; else sctx->stat.last_physical = physical; spin_unlock(&sctx->stat_lock); if (stop_loop) break; } out: ret2 = flush_scrub_stripes(sctx); if (!ret) ret = ret2; btrfs_release_path(&sctx->extent_path); btrfs_release_path(&sctx->csum_path); if (sctx->raid56_data_stripes) { for (int i = 0; i < nr_data_stripes(map); i++) release_scrub_stripe(&sctx->raid56_data_stripes[i]); kfree(sctx->raid56_data_stripes); sctx->raid56_data_stripes = NULL; } if (sctx->is_dev_replace && ret >= 0) { int ret2; ret2 = sync_write_pointer_for_zoned(sctx, chunk_logical + offset, map->stripes[stripe_index].physical, physical_end); if (ret2) ret = ret2; } return ret < 0 ? ret : 0; } static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, struct btrfs_block_group *bg, struct btrfs_device *scrub_dev, u64 dev_offset, u64 dev_extent_len) { struct btrfs_fs_info *fs_info = sctx->fs_info; struct extent_map_tree *map_tree = &fs_info->mapping_tree; struct map_lookup *map; struct extent_map *em; int i; int ret = 0; read_lock(&map_tree->lock); em = lookup_extent_mapping(map_tree, bg->start, bg->length); read_unlock(&map_tree->lock); if (!em) { /* * Might have been an unused block group deleted by the cleaner * kthread or relocation. */ spin_lock(&bg->lock); if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) ret = -EINVAL; spin_unlock(&bg->lock); return ret; } if (em->start != bg->start) goto out; if (em->len < dev_extent_len) goto out; map = em->map_lookup; for (i = 0; i < map->num_stripes; ++i) { if (map->stripes[i].dev->bdev == scrub_dev->bdev && map->stripes[i].physical == dev_offset) { ret = scrub_stripe(sctx, bg, em, scrub_dev, i); if (ret) goto out; } } out: free_extent_map(em); return ret; } static int finish_extent_writes_for_zoned(struct btrfs_root *root, struct btrfs_block_group *cache) { struct btrfs_fs_info *fs_info = cache->fs_info; struct btrfs_trans_handle *trans; if (!btrfs_is_zoned(fs_info)) return 0; btrfs_wait_block_group_reservations(cache); btrfs_wait_nocow_writers(cache); btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length); trans = btrfs_join_transaction(root); if (IS_ERR(trans)) return PTR_ERR(trans); return btrfs_commit_transaction(trans); } static noinline_for_stack int scrub_enumerate_chunks(struct scrub_ctx *sctx, struct btrfs_device *scrub_dev, u64 start, u64 end) { struct btrfs_dev_extent *dev_extent = NULL; struct btrfs_path *path; struct btrfs_fs_info *fs_info = sctx->fs_info; struct btrfs_root *root = fs_info->dev_root; u64 chunk_offset; int ret = 0; int ro_set; int slot; struct extent_buffer *l; struct btrfs_key key; struct btrfs_key found_key; struct btrfs_block_group *cache; struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->reada = READA_FORWARD; path->search_commit_root = 1; path->skip_locking = 1; key.objectid = scrub_dev->devid; key.offset = 0ull; key.type = BTRFS_DEV_EXTENT_KEY; while (1) { u64 dev_extent_len; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) break; if (ret > 0) { if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { ret = btrfs_next_leaf(root, path); if (ret < 0) break; if (ret > 0) { ret = 0; break; } } else { ret = 0; } } l = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(l, &found_key, slot); if (found_key.objectid != scrub_dev->devid) break; if (found_key.type != BTRFS_DEV_EXTENT_KEY) break; if (found_key.offset >= end) break; if (found_key.offset < key.offset) break; dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); dev_extent_len = btrfs_dev_extent_length(l, dev_extent); if (found_key.offset + dev_extent_len <= start) goto skip; chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); /* * get a reference on the corresponding block group to prevent * the chunk from going away while we scrub it */ cache = btrfs_lookup_block_group(fs_info, chunk_offset); /* some chunks are removed but not committed to disk yet, * continue scrubbing */ if (!cache) goto skip; ASSERT(cache->start <= chunk_offset); /* * We are using the commit root to search for device extents, so * that means we could have found a device extent item from a * block group that was deleted in the current transaction. The * logical start offset of the deleted block group, stored at * @chunk_offset, might be part of the logical address range of * a new block group (which uses different physical extents). * In this case btrfs_lookup_block_group() has returned the new * block group, and its start address is less than @chunk_offset. * * We skip such new block groups, because it's pointless to * process them, as we won't find their extents because we search * for them using the commit root of the extent tree. For a device * replace it's also fine to skip it, we won't miss copying them * to the target device because we have the write duplication * setup through the regular write path (by btrfs_map_block()), * and we have committed a transaction when we started the device * replace, right after setting up the device replace state. */ if (cache->start < chunk_offset) { btrfs_put_block_group(cache); goto skip; } if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) { if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) { btrfs_put_block_group(cache); goto skip; } } /* * Make sure that while we are scrubbing the corresponding block * group doesn't get its logical address and its device extents * reused for another block group, which can possibly be of a * different type and different profile. We do this to prevent * false error detections and crashes due to bogus attempts to * repair extents. */ spin_lock(&cache->lock); if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) { spin_unlock(&cache->lock); btrfs_put_block_group(cache); goto skip; } btrfs_freeze_block_group(cache); spin_unlock(&cache->lock); /* * we need call btrfs_inc_block_group_ro() with scrubs_paused, * to avoid deadlock caused by: * btrfs_inc_block_group_ro() * -> btrfs_wait_for_commit() * -> btrfs_commit_transaction() * -> btrfs_scrub_pause() */ scrub_pause_on(fs_info); /* * Don't do chunk preallocation for scrub. * * This is especially important for SYSTEM bgs, or we can hit * -EFBIG from btrfs_finish_chunk_alloc() like: * 1. The only SYSTEM bg is marked RO. * Since SYSTEM bg is small, that's pretty common. * 2. New SYSTEM bg will be allocated * Due to regular version will allocate new chunk. * 3. New SYSTEM bg is empty and will get cleaned up * Before cleanup really happens, it's marked RO again. * 4. Empty SYSTEM bg get scrubbed * We go back to 2. * * This can easily boost the amount of SYSTEM chunks if cleaner * thread can't be triggered fast enough, and use up all space * of btrfs_super_block::sys_chunk_array * * While for dev replace, we need to try our best to mark block * group RO, to prevent race between: * - Write duplication * Contains latest data * - Scrub copy * Contains data from commit tree * * If target block group is not marked RO, nocow writes can * be overwritten by scrub copy, causing data corruption. * So for dev-replace, it's not allowed to continue if a block * group is not RO. */ ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace); if (!ret && sctx->is_dev_replace) { ret = finish_extent_writes_for_zoned(root, cache); if (ret) { btrfs_dec_block_group_ro(cache); scrub_pause_off(fs_info); btrfs_put_block_group(cache); break; } } if (ret == 0) { ro_set = 1; } else if (ret == -ENOSPC && !sctx->is_dev_replace && !(cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) { /* * btrfs_inc_block_group_ro return -ENOSPC when it * failed in creating new chunk for metadata. * It is not a problem for scrub, because * metadata are always cowed, and our scrub paused * commit_transactions. * * For RAID56 chunks, we have to mark them read-only * for scrub, as later we would use our own cache * out of RAID56 realm. * Thus we want the RAID56 bg to be marked RO to * prevent RMW from screwing up out cache. */ ro_set = 0; } else if (ret == -ETXTBSY) { btrfs_warn(fs_info, "skipping scrub of block group %llu due to active swapfile", cache->start); scrub_pause_off(fs_info); ret = 0; goto skip_unfreeze; } else { btrfs_warn(fs_info, "failed setting block group ro: %d", ret); btrfs_unfreeze_block_group(cache); btrfs_put_block_group(cache); scrub_pause_off(fs_info); break; } /* * Now the target block is marked RO, wait for nocow writes to * finish before dev-replace. * COW is fine, as COW never overwrites extents in commit tree. */ if (sctx->is_dev_replace) { btrfs_wait_nocow_writers(cache); btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length); } scrub_pause_off(fs_info); down_write(&dev_replace->rwsem); dev_replace->cursor_right = found_key.offset + dev_extent_len; dev_replace->cursor_left = found_key.offset; dev_replace->item_needs_writeback = 1; up_write(&dev_replace->rwsem); ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset, dev_extent_len); if (sctx->is_dev_replace && !btrfs_finish_block_group_to_copy(dev_replace->srcdev, cache, found_key.offset)) ro_set = 0; down_write(&dev_replace->rwsem); dev_replace->cursor_left = dev_replace->cursor_right; dev_replace->item_needs_writeback = 1; up_write(&dev_replace->rwsem); if (ro_set) btrfs_dec_block_group_ro(cache); /* * We might have prevented the cleaner kthread from deleting * this block group if it was already unused because we raced * and set it to RO mode first. So add it back to the unused * list, otherwise it might not ever be deleted unless a manual * balance is triggered or it becomes used and unused again. */ spin_lock(&cache->lock); if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) && !cache->ro && cache->reserved == 0 && cache->used == 0) { spin_unlock(&cache->lock); if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) btrfs_discard_queue_work(&fs_info->discard_ctl, cache); else btrfs_mark_bg_unused(cache); } else { spin_unlock(&cache->lock); } skip_unfreeze: btrfs_unfreeze_block_group(cache); btrfs_put_block_group(cache); if (ret) break; if (sctx->is_dev_replace && atomic64_read(&dev_replace->num_write_errors) > 0) { ret = -EIO; break; } if (sctx->stat.malloc_errors > 0) { ret = -ENOMEM; break; } skip: key.offset = found_key.offset + dev_extent_len; btrfs_release_path(path); } btrfs_free_path(path); return ret; } static int scrub_one_super(struct scrub_ctx *sctx, struct btrfs_device *dev, struct page *page, u64 physical, u64 generation) { struct btrfs_fs_info *fs_info = sctx->fs_info; struct bio_vec bvec; struct bio bio; struct btrfs_super_block *sb = page_address(page); int ret; bio_init(&bio, dev->bdev, &bvec, 1, REQ_OP_READ); bio.bi_iter.bi_sector = physical >> SECTOR_SHIFT; __bio_add_page(&bio, page, BTRFS_SUPER_INFO_SIZE, 0); ret = submit_bio_wait(&bio); bio_uninit(&bio); if (ret < 0) return ret; ret = btrfs_check_super_csum(fs_info, sb); if (ret != 0) { btrfs_err_rl(fs_info, "super block at physical %llu devid %llu has bad csum", physical, dev->devid); return -EIO; } if (btrfs_super_generation(sb) != generation) { btrfs_err_rl(fs_info, "super block at physical %llu devid %llu has bad generation %llu expect %llu", physical, dev->devid, btrfs_super_generation(sb), generation); return -EUCLEAN; } return btrfs_validate_super(fs_info, sb, -1); } static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, struct btrfs_device *scrub_dev) { int i; u64 bytenr; u64 gen; int ret = 0; struct page *page; struct btrfs_fs_info *fs_info = sctx->fs_info; if (BTRFS_FS_ERROR(fs_info)) return -EROFS; page = alloc_page(GFP_KERNEL); if (!page) { spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); return -ENOMEM; } /* Seed devices of a new filesystem has their own generation. */ if (scrub_dev->fs_devices != fs_info->fs_devices) gen = scrub_dev->generation; else gen = fs_info->last_trans_committed; for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { bytenr = btrfs_sb_offset(i); if (bytenr + BTRFS_SUPER_INFO_SIZE > scrub_dev->commit_total_bytes) break; if (!btrfs_check_super_location(scrub_dev, bytenr)) continue; ret = scrub_one_super(sctx, scrub_dev, page, bytenr, gen); if (ret) { spin_lock(&sctx->stat_lock); sctx->stat.super_errors++; spin_unlock(&sctx->stat_lock); } } __free_page(page); return 0; } static void scrub_workers_put(struct btrfs_fs_info *fs_info) { if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt, &fs_info->scrub_lock)) { struct workqueue_struct *scrub_workers = fs_info->scrub_workers; fs_info->scrub_workers = NULL; mutex_unlock(&fs_info->scrub_lock); if (scrub_workers) destroy_workqueue(scrub_workers); } } /* * get a reference count on fs_info->scrub_workers. start worker if necessary */ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info) { struct workqueue_struct *scrub_workers = NULL; unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND; int max_active = fs_info->thread_pool_size; int ret = -ENOMEM; if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt)) return 0; scrub_workers = alloc_workqueue("btrfs-scrub", flags, max_active); if (!scrub_workers) return -ENOMEM; mutex_lock(&fs_info->scrub_lock); if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) { ASSERT(fs_info->scrub_workers == NULL); fs_info->scrub_workers = scrub_workers; refcount_set(&fs_info->scrub_workers_refcnt, 1); mutex_unlock(&fs_info->scrub_lock); return 0; } /* Other thread raced in and created the workers for us */ refcount_inc(&fs_info->scrub_workers_refcnt); mutex_unlock(&fs_info->scrub_lock); ret = 0; destroy_workqueue(scrub_workers); return ret; } int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, u64 end, struct btrfs_scrub_progress *progress, int readonly, int is_dev_replace) { struct btrfs_dev_lookup_args args = { .devid = devid }; struct scrub_ctx *sctx; int ret; struct btrfs_device *dev; unsigned int nofs_flag; bool need_commit = false; if (btrfs_fs_closing(fs_info)) return -EAGAIN; /* At mount time we have ensured nodesize is in the range of [4K, 64K]. */ ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN); /* * SCRUB_MAX_SECTORS_PER_BLOCK is calculated using the largest possible * value (max nodesize / min sectorsize), thus nodesize should always * be fine. */ ASSERT(fs_info->nodesize <= SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits); /* Allocate outside of device_list_mutex */ sctx = scrub_setup_ctx(fs_info, is_dev_replace); if (IS_ERR(sctx)) return PTR_ERR(sctx); ret = scrub_workers_get(fs_info); if (ret) goto out_free_ctx; mutex_lock(&fs_info->fs_devices->device_list_mutex); dev = btrfs_find_device(fs_info->fs_devices, &args); if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) && !is_dev_replace)) { mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -ENODEV; goto out; } if (!is_dev_replace && !readonly && !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) { mutex_unlock(&fs_info->fs_devices->device_list_mutex); btrfs_err_in_rcu(fs_info, "scrub on devid %llu: filesystem on %s is not writable", devid, btrfs_dev_name(dev)); ret = -EROFS; goto out; } mutex_lock(&fs_info->scrub_lock); if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) { mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -EIO; goto out; } down_read(&fs_info->dev_replace.rwsem); if (dev->scrub_ctx || (!is_dev_replace && btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) { up_read(&fs_info->dev_replace.rwsem); mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -EINPROGRESS; goto out; } up_read(&fs_info->dev_replace.rwsem); sctx->readonly = readonly; dev->scrub_ctx = sctx; mutex_unlock(&fs_info->fs_devices->device_list_mutex); /* * checking @scrub_pause_req here, we can avoid * race between committing transaction and scrubbing. */ __scrub_blocked_if_needed(fs_info); atomic_inc(&fs_info->scrubs_running); mutex_unlock(&fs_info->scrub_lock); /* * In order to avoid deadlock with reclaim when there is a transaction * trying to pause scrub, make sure we use GFP_NOFS for all the * allocations done at btrfs_scrub_sectors() and scrub_sectors_for_parity() * invoked by our callees. The pausing request is done when the * transaction commit starts, and it blocks the transaction until scrub * is paused (done at specific points at scrub_stripe() or right above * before incrementing fs_info->scrubs_running). */ nofs_flag = memalloc_nofs_save(); if (!is_dev_replace) { u64 old_super_errors; spin_lock(&sctx->stat_lock); old_super_errors = sctx->stat.super_errors; spin_unlock(&sctx->stat_lock); btrfs_info(fs_info, "scrub: started on devid %llu", devid); /* * by holding device list mutex, we can * kick off writing super in log tree sync. */ mutex_lock(&fs_info->fs_devices->device_list_mutex); ret = scrub_supers(sctx, dev); mutex_unlock(&fs_info->fs_devices->device_list_mutex); spin_lock(&sctx->stat_lock); /* * Super block errors found, but we can not commit transaction * at current context, since btrfs_commit_transaction() needs * to pause the current running scrub (hold by ourselves). */ if (sctx->stat.super_errors > old_super_errors && !sctx->readonly) need_commit = true; spin_unlock(&sctx->stat_lock); } if (!ret) ret = scrub_enumerate_chunks(sctx, dev, start, end); memalloc_nofs_restore(nofs_flag); atomic_dec(&fs_info->scrubs_running); wake_up(&fs_info->scrub_pause_wait); if (progress) memcpy(progress, &sctx->stat, sizeof(*progress)); if (!is_dev_replace) btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d", ret ? "not finished" : "finished", devid, ret); mutex_lock(&fs_info->scrub_lock); dev->scrub_ctx = NULL; mutex_unlock(&fs_info->scrub_lock); scrub_workers_put(fs_info); scrub_put_ctx(sctx); /* * We found some super block errors before, now try to force a * transaction commit, as scrub has finished. */ if (need_commit) { struct btrfs_trans_handle *trans; trans = btrfs_start_transaction(fs_info->tree_root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); btrfs_err(fs_info, "scrub: failed to start transaction to fix super block errors: %d", ret); return ret; } ret = btrfs_commit_transaction(trans); if (ret < 0) btrfs_err(fs_info, "scrub: failed to commit transaction to fix super block errors: %d", ret); } return ret; out: scrub_workers_put(fs_info); out_free_ctx: scrub_free_ctx(sctx); return ret; } void btrfs_scrub_pause(struct btrfs_fs_info *fs_info) { mutex_lock(&fs_info->scrub_lock); atomic_inc(&fs_info->scrub_pause_req); while (atomic_read(&fs_info->scrubs_paused) != atomic_read(&fs_info->scrubs_running)) { mutex_unlock(&fs_info->scrub_lock); wait_event(fs_info->scrub_pause_wait, atomic_read(&fs_info->scrubs_paused) == atomic_read(&fs_info->scrubs_running)); mutex_lock(&fs_info->scrub_lock); } mutex_unlock(&fs_info->scrub_lock); } void btrfs_scrub_continue(struct btrfs_fs_info *fs_info) { atomic_dec(&fs_info->scrub_pause_req); wake_up(&fs_info->scrub_pause_wait); } int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info) { mutex_lock(&fs_info->scrub_lock); if (!atomic_read(&fs_info->scrubs_running)) { mutex_unlock(&fs_info->scrub_lock); return -ENOTCONN; } atomic_inc(&fs_info->scrub_cancel_req); while (atomic_read(&fs_info->scrubs_running)) { mutex_unlock(&fs_info->scrub_lock); wait_event(fs_info->scrub_pause_wait, atomic_read(&fs_info->scrubs_running) == 0); mutex_lock(&fs_info->scrub_lock); } atomic_dec(&fs_info->scrub_cancel_req); mutex_unlock(&fs_info->scrub_lock); return 0; } int btrfs_scrub_cancel_dev(struct btrfs_device *dev) { struct btrfs_fs_info *fs_info = dev->fs_info; struct scrub_ctx *sctx; mutex_lock(&fs_info->scrub_lock); sctx = dev->scrub_ctx; if (!sctx) { mutex_unlock(&fs_info->scrub_lock); return -ENOTCONN; } atomic_inc(&sctx->cancel_req); while (dev->scrub_ctx) { mutex_unlock(&fs_info->scrub_lock); wait_event(fs_info->scrub_pause_wait, dev->scrub_ctx == NULL); mutex_lock(&fs_info->scrub_lock); } mutex_unlock(&fs_info->scrub_lock); return 0; } int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid, struct btrfs_scrub_progress *progress) { struct btrfs_dev_lookup_args args = { .devid = devid }; struct btrfs_device *dev; struct scrub_ctx *sctx = NULL; mutex_lock(&fs_info->fs_devices->device_list_mutex); dev = btrfs_find_device(fs_info->fs_devices, &args); if (dev) sctx = dev->scrub_ctx; if (sctx) memcpy(progress, &sctx->stat, sizeof(*progress)); mutex_unlock(&fs_info->fs_devices->device_list_mutex); return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; }
linux-master
fs/btrfs/scrub.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/mm.h> #include "lru_cache.h" #include "messages.h" /* * Initialize a cache object. * * @cache: The cache. * @max_size: Maximum size (number of entries) for the cache. * Use 0 for unlimited size, it's the user's responsability to * trim the cache in that case. */ void btrfs_lru_cache_init(struct btrfs_lru_cache *cache, unsigned int max_size) { INIT_LIST_HEAD(&cache->lru_list); mt_init(&cache->entries); cache->size = 0; cache->max_size = max_size; } static struct btrfs_lru_cache_entry *match_entry(struct list_head *head, u64 key, u64 gen) { struct btrfs_lru_cache_entry *entry; list_for_each_entry(entry, head, list) { if (entry->key == key && entry->gen == gen) return entry; } return NULL; } /* * Lookup for an entry in the cache. * * @cache: The cache. * @key: The key of the entry we are looking for. * @gen: Generation associated to the key. * * Returns the entry associated with the key or NULL if none found. */ struct btrfs_lru_cache_entry *btrfs_lru_cache_lookup(struct btrfs_lru_cache *cache, u64 key, u64 gen) { struct list_head *head; struct btrfs_lru_cache_entry *entry; head = mtree_load(&cache->entries, key); if (!head) return NULL; entry = match_entry(head, key, gen); if (entry) list_move_tail(&entry->lru_list, &cache->lru_list); return entry; } /* * Remove an entry from the cache. * * @cache: The cache to remove from. * @entry: The entry to remove from the cache. * * Note: this also frees the memory used by the entry. */ void btrfs_lru_cache_remove(struct btrfs_lru_cache *cache, struct btrfs_lru_cache_entry *entry) { struct list_head *prev = entry->list.prev; ASSERT(cache->size > 0); ASSERT(!mtree_empty(&cache->entries)); list_del(&entry->list); list_del(&entry->lru_list); if (list_empty(prev)) { struct list_head *head; /* * If previous element in the list entry->list is now empty, it * means it's a head entry not pointing to any cached entries, * so remove it from the maple tree and free it. */ head = mtree_erase(&cache->entries, entry->key); ASSERT(head == prev); kfree(head); } kfree(entry); cache->size--; } /* * Store an entry in the cache. * * @cache: The cache. * @entry: The entry to store. * * Returns 0 on success and < 0 on error. */ int btrfs_lru_cache_store(struct btrfs_lru_cache *cache, struct btrfs_lru_cache_entry *new_entry, gfp_t gfp) { const u64 key = new_entry->key; struct list_head *head; int ret; head = kmalloc(sizeof(*head), gfp); if (!head) return -ENOMEM; ret = mtree_insert(&cache->entries, key, head, gfp); if (ret == 0) { INIT_LIST_HEAD(head); list_add_tail(&new_entry->list, head); } else if (ret == -EEXIST) { kfree(head); head = mtree_load(&cache->entries, key); ASSERT(head != NULL); if (match_entry(head, key, new_entry->gen) != NULL) return -EEXIST; list_add_tail(&new_entry->list, head); } else if (ret < 0) { kfree(head); return ret; } if (cache->max_size > 0 && cache->size == cache->max_size) { struct btrfs_lru_cache_entry *lru_entry; lru_entry = list_first_entry(&cache->lru_list, struct btrfs_lru_cache_entry, lru_list); btrfs_lru_cache_remove(cache, lru_entry); } list_add_tail(&new_entry->lru_list, &cache->lru_list); cache->size++; return 0; } /* * Empty a cache. * * @cache: The cache to empty. * * Removes all entries from the cache. */ void btrfs_lru_cache_clear(struct btrfs_lru_cache *cache) { struct btrfs_lru_cache_entry *entry; struct btrfs_lru_cache_entry *tmp; list_for_each_entry_safe(entry, tmp, &cache->lru_list, lru_list) btrfs_lru_cache_remove(cache, entry); ASSERT(cache->size == 0); ASSERT(mtree_empty(&cache->entries)); }
linux-master
fs/btrfs/lru_cache.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2008 Oracle. All rights reserved. */ #include <linux/sched.h> #include <linux/pagemap.h> #include <linux/spinlock.h> #include <linux/page-flags.h> #include <asm/bug.h> #include "misc.h" #include "ctree.h" #include "extent_io.h" #include "locking.h" #include "accessors.h" /* * Lockdep class keys for extent_buffer->lock's in this root. For a given * eb, the lockdep key is determined by the btrfs_root it belongs to and * the level the eb occupies in the tree. * * Different roots are used for different purposes and may nest inside each * other and they require separate keysets. As lockdep keys should be * static, assign keysets according to the purpose of the root as indicated * by btrfs_root->root_key.objectid. This ensures that all special purpose * roots have separate keysets. * * Lock-nesting across peer nodes is always done with the immediate parent * node locked thus preventing deadlock. As lockdep doesn't know this, use * subclass to avoid triggering lockdep warning in such cases. * * The key is set by the readpage_end_io_hook after the buffer has passed * csum validation but before the pages are unlocked. It is also set by * btrfs_init_new_buffer on freshly allocated blocks. * * We also add a check to make sure the highest level of the tree is the * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code * needs update as well. */ #ifdef CONFIG_DEBUG_LOCK_ALLOC #if BTRFS_MAX_LEVEL != 8 #error #endif #define DEFINE_LEVEL(stem, level) \ .names[level] = "btrfs-" stem "-0" #level, #define DEFINE_NAME(stem) \ DEFINE_LEVEL(stem, 0) \ DEFINE_LEVEL(stem, 1) \ DEFINE_LEVEL(stem, 2) \ DEFINE_LEVEL(stem, 3) \ DEFINE_LEVEL(stem, 4) \ DEFINE_LEVEL(stem, 5) \ DEFINE_LEVEL(stem, 6) \ DEFINE_LEVEL(stem, 7) static struct btrfs_lockdep_keyset { u64 id; /* root objectid */ /* Longest entry: btrfs-block-group-00 */ char names[BTRFS_MAX_LEVEL][24]; struct lock_class_key keys[BTRFS_MAX_LEVEL]; } btrfs_lockdep_keysets[] = { { .id = BTRFS_ROOT_TREE_OBJECTID, DEFINE_NAME("root") }, { .id = BTRFS_EXTENT_TREE_OBJECTID, DEFINE_NAME("extent") }, { .id = BTRFS_CHUNK_TREE_OBJECTID, DEFINE_NAME("chunk") }, { .id = BTRFS_DEV_TREE_OBJECTID, DEFINE_NAME("dev") }, { .id = BTRFS_CSUM_TREE_OBJECTID, DEFINE_NAME("csum") }, { .id = BTRFS_QUOTA_TREE_OBJECTID, DEFINE_NAME("quota") }, { .id = BTRFS_TREE_LOG_OBJECTID, DEFINE_NAME("log") }, { .id = BTRFS_TREE_RELOC_OBJECTID, DEFINE_NAME("treloc") }, { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, DEFINE_NAME("dreloc") }, { .id = BTRFS_UUID_TREE_OBJECTID, DEFINE_NAME("uuid") }, { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, DEFINE_NAME("free-space") }, { .id = BTRFS_BLOCK_GROUP_TREE_OBJECTID, DEFINE_NAME("block-group") }, { .id = 0, DEFINE_NAME("tree") }, }; #undef DEFINE_LEVEL #undef DEFINE_NAME void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level) { struct btrfs_lockdep_keyset *ks; BUG_ON(level >= ARRAY_SIZE(ks->keys)); /* Find the matching keyset, id 0 is the default entry */ for (ks = btrfs_lockdep_keysets; ks->id; ks++) if (ks->id == objectid) break; lockdep_set_class_and_name(&eb->lock, &ks->keys[level], ks->names[level]); } void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buffer *eb) { if (test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state)) btrfs_set_buffer_lockdep_class(root->root_key.objectid, eb, btrfs_header_level(eb)); } #endif /* * Extent buffer locking * ===================== * * We use a rw_semaphore for tree locking, and the semantics are exactly the * same: * * - reader/writer exclusion * - writer/writer exclusion * - reader/reader sharing * - try-lock semantics for readers and writers * * The rwsem implementation does opportunistic spinning which reduces number of * times the locking task needs to sleep. */ /* * __btrfs_tree_read_lock - lock extent buffer for read * @eb: the eb to be locked * @nest: the nesting level to be used for lockdep * * This takes the read lock on the extent buffer, using the specified nesting * level for lockdep purposes. */ void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest) { u64 start_ns = 0; if (trace_btrfs_tree_read_lock_enabled()) start_ns = ktime_get_ns(); down_read_nested(&eb->lock, nest); trace_btrfs_tree_read_lock(eb, start_ns); } void btrfs_tree_read_lock(struct extent_buffer *eb) { __btrfs_tree_read_lock(eb, BTRFS_NESTING_NORMAL); } /* * Try-lock for read. * * Return 1 if the rwlock has been taken, 0 otherwise */ int btrfs_try_tree_read_lock(struct extent_buffer *eb) { if (down_read_trylock(&eb->lock)) { trace_btrfs_try_tree_read_lock(eb); return 1; } return 0; } /* * Try-lock for write. * * Return 1 if the rwlock has been taken, 0 otherwise */ int btrfs_try_tree_write_lock(struct extent_buffer *eb) { if (down_write_trylock(&eb->lock)) { eb->lock_owner = current->pid; trace_btrfs_try_tree_write_lock(eb); return 1; } return 0; } /* * Release read lock. */ void btrfs_tree_read_unlock(struct extent_buffer *eb) { trace_btrfs_tree_read_unlock(eb); up_read(&eb->lock); } /* * __btrfs_tree_lock - lock eb for write * @eb: the eb to lock * @nest: the nesting to use for the lock * * Returns with the eb->lock write locked. */ void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest) __acquires(&eb->lock) { u64 start_ns = 0; if (trace_btrfs_tree_lock_enabled()) start_ns = ktime_get_ns(); down_write_nested(&eb->lock, nest); eb->lock_owner = current->pid; trace_btrfs_tree_lock(eb, start_ns); } void btrfs_tree_lock(struct extent_buffer *eb) { __btrfs_tree_lock(eb, BTRFS_NESTING_NORMAL); } /* * Release the write lock. */ void btrfs_tree_unlock(struct extent_buffer *eb) { trace_btrfs_tree_unlock(eb); eb->lock_owner = 0; up_write(&eb->lock); } /* * This releases any locks held in the path starting at level and going all the * way up to the root. * * btrfs_search_slot will keep the lock held on higher nodes in a few corner * cases, such as COW of the block at slot zero in the node. This ignores * those rules, and it should only be called when there are no more updates to * be done higher up in the tree. */ void btrfs_unlock_up_safe(struct btrfs_path *path, int level) { int i; if (path->keep_locks) return; for (i = level; i < BTRFS_MAX_LEVEL; i++) { if (!path->nodes[i]) continue; if (!path->locks[i]) continue; btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); path->locks[i] = 0; } } /* * Loop around taking references on and locking the root node of the tree until * we end up with a lock on the root node. * * Return: root extent buffer with write lock held */ struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root) { struct extent_buffer *eb; while (1) { eb = btrfs_root_node(root); btrfs_maybe_reset_lockdep_class(root, eb); btrfs_tree_lock(eb); if (eb == root->node) break; btrfs_tree_unlock(eb); free_extent_buffer(eb); } return eb; } /* * Loop around taking references on and locking the root node of the tree until * we end up with a lock on the root node. * * Return: root extent buffer with read lock held */ struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root) { struct extent_buffer *eb; while (1) { eb = btrfs_root_node(root); btrfs_maybe_reset_lockdep_class(root, eb); btrfs_tree_read_lock(eb); if (eb == root->node) break; btrfs_tree_read_unlock(eb); free_extent_buffer(eb); } return eb; } /* * Loop around taking references on and locking the root node of the tree in * nowait mode until we end up with a lock on the root node or returning to * avoid blocking. * * Return: root extent buffer with read lock held or -EAGAIN. */ struct extent_buffer *btrfs_try_read_lock_root_node(struct btrfs_root *root) { struct extent_buffer *eb; while (1) { eb = btrfs_root_node(root); if (!btrfs_try_tree_read_lock(eb)) { free_extent_buffer(eb); return ERR_PTR(-EAGAIN); } if (eb == root->node) break; btrfs_tree_read_unlock(eb); free_extent_buffer(eb); } return eb; } /* * DREW locks * ========== * * DREW stands for double-reader-writer-exclusion lock. It's used in situation * where you want to provide A-B exclusion but not AA or BB. * * Currently implementation gives more priority to reader. If a reader and a * writer both race to acquire their respective sides of the lock the writer * would yield its lock as soon as it detects a concurrent reader. Additionally * if there are pending readers no new writers would be allowed to come in and * acquire the lock. */ void btrfs_drew_lock_init(struct btrfs_drew_lock *lock) { atomic_set(&lock->readers, 0); atomic_set(&lock->writers, 0); init_waitqueue_head(&lock->pending_readers); init_waitqueue_head(&lock->pending_writers); } /* Return true if acquisition is successful, false otherwise */ bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock) { if (atomic_read(&lock->readers)) return false; atomic_inc(&lock->writers); /* Ensure writers count is updated before we check for pending readers */ smp_mb__after_atomic(); if (atomic_read(&lock->readers)) { btrfs_drew_write_unlock(lock); return false; } return true; } void btrfs_drew_write_lock(struct btrfs_drew_lock *lock) { while (true) { if (btrfs_drew_try_write_lock(lock)) return; wait_event(lock->pending_writers, !atomic_read(&lock->readers)); } } void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock) { atomic_dec(&lock->writers); cond_wake_up(&lock->pending_readers); } void btrfs_drew_read_lock(struct btrfs_drew_lock *lock) { atomic_inc(&lock->readers); /* * Ensure the pending reader count is perceieved BEFORE this reader * goes to sleep in case of active writers. This guarantees new writers * won't be allowed and that the current reader will be woken up when * the last active writer finishes its jobs. */ smp_mb__after_atomic(); wait_event(lock->pending_readers, atomic_read(&lock->writers) == 0); } void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock) { /* * atomic_dec_and_test implies a full barrier, so woken up writers * are guaranteed to see the decrement */ if (atomic_dec_and_test(&lock->readers)) wake_up(&lock->pending_writers); }
linux-master
fs/btrfs/locking.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2015 Facebook. All rights reserved. */ #include <linux/kernel.h> #include <linux/sched/mm.h> #include "messages.h" #include "ctree.h" #include "disk-io.h" #include "locking.h" #include "free-space-tree.h" #include "transaction.h" #include "block-group.h" #include "fs.h" #include "accessors.h" #include "extent-tree.h" #include "root-tree.h" static int __add_block_group_free_space(struct btrfs_trans_handle *trans, struct btrfs_block_group *block_group, struct btrfs_path *path); static struct btrfs_root *btrfs_free_space_root( struct btrfs_block_group *block_group) { struct btrfs_key key = { .objectid = BTRFS_FREE_SPACE_TREE_OBJECTID, .type = BTRFS_ROOT_ITEM_KEY, .offset = 0, }; if (btrfs_fs_incompat(block_group->fs_info, EXTENT_TREE_V2)) key.offset = block_group->global_root_id; return btrfs_global_root(block_group->fs_info, &key); } void set_free_space_tree_thresholds(struct btrfs_block_group *cache) { u32 bitmap_range; size_t bitmap_size; u64 num_bitmaps, total_bitmap_size; if (WARN_ON(cache->length == 0)) btrfs_warn(cache->fs_info, "block group %llu length is zero", cache->start); /* * We convert to bitmaps when the disk space required for using extents * exceeds that required for using bitmaps. */ bitmap_range = cache->fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS; num_bitmaps = div_u64(cache->length + bitmap_range - 1, bitmap_range); bitmap_size = sizeof(struct btrfs_item) + BTRFS_FREE_SPACE_BITMAP_SIZE; total_bitmap_size = num_bitmaps * bitmap_size; cache->bitmap_high_thresh = div_u64(total_bitmap_size, sizeof(struct btrfs_item)); /* * We allow for a small buffer between the high threshold and low * threshold to avoid thrashing back and forth between the two formats. */ if (cache->bitmap_high_thresh > 100) cache->bitmap_low_thresh = cache->bitmap_high_thresh - 100; else cache->bitmap_low_thresh = 0; } static int add_new_free_space_info(struct btrfs_trans_handle *trans, struct btrfs_block_group *block_group, struct btrfs_path *path) { struct btrfs_root *root = btrfs_free_space_root(block_group); struct btrfs_free_space_info *info; struct btrfs_key key; struct extent_buffer *leaf; int ret; key.objectid = block_group->start; key.type = BTRFS_FREE_SPACE_INFO_KEY; key.offset = block_group->length; ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*info)); if (ret) goto out; leaf = path->nodes[0]; info = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_free_space_info); btrfs_set_free_space_extent_count(leaf, info, 0); btrfs_set_free_space_flags(leaf, info, 0); btrfs_mark_buffer_dirty(leaf); ret = 0; out: btrfs_release_path(path); return ret; } EXPORT_FOR_TESTS struct btrfs_free_space_info *search_free_space_info( struct btrfs_trans_handle *trans, struct btrfs_block_group *block_group, struct btrfs_path *path, int cow) { struct btrfs_fs_info *fs_info = block_group->fs_info; struct btrfs_root *root = btrfs_free_space_root(block_group); struct btrfs_key key; int ret; key.objectid = block_group->start; key.type = BTRFS_FREE_SPACE_INFO_KEY; key.offset = block_group->length; ret = btrfs_search_slot(trans, root, &key, path, 0, cow); if (ret < 0) return ERR_PTR(ret); if (ret != 0) { btrfs_warn(fs_info, "missing free space info for %llu", block_group->start); ASSERT(0); return ERR_PTR(-ENOENT); } return btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_free_space_info); } /* * btrfs_search_slot() but we're looking for the greatest key less than the * passed key. */ static int btrfs_search_prev_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *key, struct btrfs_path *p, int ins_len, int cow) { int ret; ret = btrfs_search_slot(trans, root, key, p, ins_len, cow); if (ret < 0) return ret; if (ret == 0) { ASSERT(0); return -EIO; } if (p->slots[0] == 0) { ASSERT(0); return -EIO; } p->slots[0]--; return 0; } static inline u32 free_space_bitmap_size(const struct btrfs_fs_info *fs_info, u64 size) { return DIV_ROUND_UP(size >> fs_info->sectorsize_bits, BITS_PER_BYTE); } static unsigned long *alloc_bitmap(u32 bitmap_size) { unsigned long *ret; unsigned int nofs_flag; u32 bitmap_rounded_size = round_up(bitmap_size, sizeof(unsigned long)); /* * GFP_NOFS doesn't work with kvmalloc(), but we really can't recurse * into the filesystem as the free space bitmap can be modified in the * critical section of a transaction commit. * * TODO: push the memalloc_nofs_{save,restore}() to the caller where we * know that recursion is unsafe. */ nofs_flag = memalloc_nofs_save(); ret = kvzalloc(bitmap_rounded_size, GFP_KERNEL); memalloc_nofs_restore(nofs_flag); return ret; } static void le_bitmap_set(unsigned long *map, unsigned int start, int len) { u8 *p = ((u8 *)map) + BIT_BYTE(start); const unsigned int size = start + len; int bits_to_set = BITS_PER_BYTE - (start % BITS_PER_BYTE); u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(start); while (len - bits_to_set >= 0) { *p |= mask_to_set; len -= bits_to_set; bits_to_set = BITS_PER_BYTE; mask_to_set = ~0; p++; } if (len) { mask_to_set &= BITMAP_LAST_BYTE_MASK(size); *p |= mask_to_set; } } EXPORT_FOR_TESTS int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, struct btrfs_block_group *block_group, struct btrfs_path *path) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_root *root = btrfs_free_space_root(block_group); struct btrfs_free_space_info *info; struct btrfs_key key, found_key; struct extent_buffer *leaf; unsigned long *bitmap; char *bitmap_cursor; u64 start, end; u64 bitmap_range, i; u32 bitmap_size, flags, expected_extent_count; u32 extent_count = 0; int done = 0, nr; int ret; bitmap_size = free_space_bitmap_size(fs_info, block_group->length); bitmap = alloc_bitmap(bitmap_size); if (!bitmap) { ret = -ENOMEM; goto out; } start = block_group->start; end = block_group->start + block_group->length; key.objectid = end - 1; key.type = (u8)-1; key.offset = (u64)-1; while (!done) { ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); if (ret) goto out; leaf = path->nodes[0]; nr = 0; path->slots[0]++; while (path->slots[0] > 0) { btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { ASSERT(found_key.objectid == block_group->start); ASSERT(found_key.offset == block_group->length); done = 1; break; } else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY) { u64 first, last; ASSERT(found_key.objectid >= start); ASSERT(found_key.objectid < end); ASSERT(found_key.objectid + found_key.offset <= end); first = div_u64(found_key.objectid - start, fs_info->sectorsize); last = div_u64(found_key.objectid + found_key.offset - start, fs_info->sectorsize); le_bitmap_set(bitmap, first, last - first); extent_count++; nr++; path->slots[0]--; } else { ASSERT(0); } } ret = btrfs_del_items(trans, root, path, path->slots[0], nr); if (ret) goto out; btrfs_release_path(path); } info = search_free_space_info(trans, block_group, path, 1); if (IS_ERR(info)) { ret = PTR_ERR(info); goto out; } leaf = path->nodes[0]; flags = btrfs_free_space_flags(leaf, info); flags |= BTRFS_FREE_SPACE_USING_BITMAPS; btrfs_set_free_space_flags(leaf, info, flags); expected_extent_count = btrfs_free_space_extent_count(leaf, info); btrfs_mark_buffer_dirty(leaf); btrfs_release_path(path); if (extent_count != expected_extent_count) { btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u", block_group->start, extent_count, expected_extent_count); ASSERT(0); ret = -EIO; goto out; } bitmap_cursor = (char *)bitmap; bitmap_range = fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS; i = start; while (i < end) { unsigned long ptr; u64 extent_size; u32 data_size; extent_size = min(end - i, bitmap_range); data_size = free_space_bitmap_size(fs_info, extent_size); key.objectid = i; key.type = BTRFS_FREE_SPACE_BITMAP_KEY; key.offset = extent_size; ret = btrfs_insert_empty_item(trans, root, path, &key, data_size); if (ret) goto out; leaf = path->nodes[0]; ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); write_extent_buffer(leaf, bitmap_cursor, ptr, data_size); btrfs_mark_buffer_dirty(leaf); btrfs_release_path(path); i += extent_size; bitmap_cursor += data_size; } ret = 0; out: kvfree(bitmap); if (ret) btrfs_abort_transaction(trans, ret); return ret; } EXPORT_FOR_TESTS int convert_free_space_to_extents(struct btrfs_trans_handle *trans, struct btrfs_block_group *block_group, struct btrfs_path *path) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_root *root = btrfs_free_space_root(block_group); struct btrfs_free_space_info *info; struct btrfs_key key, found_key; struct extent_buffer *leaf; unsigned long *bitmap; u64 start, end; u32 bitmap_size, flags, expected_extent_count; unsigned long nrbits, start_bit, end_bit; u32 extent_count = 0; int done = 0, nr; int ret; bitmap_size = free_space_bitmap_size(fs_info, block_group->length); bitmap = alloc_bitmap(bitmap_size); if (!bitmap) { ret = -ENOMEM; goto out; } start = block_group->start; end = block_group->start + block_group->length; key.objectid = end - 1; key.type = (u8)-1; key.offset = (u64)-1; while (!done) { ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); if (ret) goto out; leaf = path->nodes[0]; nr = 0; path->slots[0]++; while (path->slots[0] > 0) { btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { ASSERT(found_key.objectid == block_group->start); ASSERT(found_key.offset == block_group->length); done = 1; break; } else if (found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) { unsigned long ptr; char *bitmap_cursor; u32 bitmap_pos, data_size; ASSERT(found_key.objectid >= start); ASSERT(found_key.objectid < end); ASSERT(found_key.objectid + found_key.offset <= end); bitmap_pos = div_u64(found_key.objectid - start, fs_info->sectorsize * BITS_PER_BYTE); bitmap_cursor = ((char *)bitmap) + bitmap_pos; data_size = free_space_bitmap_size(fs_info, found_key.offset); ptr = btrfs_item_ptr_offset(leaf, path->slots[0] - 1); read_extent_buffer(leaf, bitmap_cursor, ptr, data_size); nr++; path->slots[0]--; } else { ASSERT(0); } } ret = btrfs_del_items(trans, root, path, path->slots[0], nr); if (ret) goto out; btrfs_release_path(path); } info = search_free_space_info(trans, block_group, path, 1); if (IS_ERR(info)) { ret = PTR_ERR(info); goto out; } leaf = path->nodes[0]; flags = btrfs_free_space_flags(leaf, info); flags &= ~BTRFS_FREE_SPACE_USING_BITMAPS; btrfs_set_free_space_flags(leaf, info, flags); expected_extent_count = btrfs_free_space_extent_count(leaf, info); btrfs_mark_buffer_dirty(leaf); btrfs_release_path(path); nrbits = block_group->length >> block_group->fs_info->sectorsize_bits; start_bit = find_next_bit_le(bitmap, nrbits, 0); while (start_bit < nrbits) { end_bit = find_next_zero_bit_le(bitmap, nrbits, start_bit); ASSERT(start_bit < end_bit); key.objectid = start + start_bit * block_group->fs_info->sectorsize; key.type = BTRFS_FREE_SPACE_EXTENT_KEY; key.offset = (end_bit - start_bit) * block_group->fs_info->sectorsize; ret = btrfs_insert_empty_item(trans, root, path, &key, 0); if (ret) goto out; btrfs_release_path(path); extent_count++; start_bit = find_next_bit_le(bitmap, nrbits, end_bit); } if (extent_count != expected_extent_count) { btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u", block_group->start, extent_count, expected_extent_count); ASSERT(0); ret = -EIO; goto out; } ret = 0; out: kvfree(bitmap); if (ret) btrfs_abort_transaction(trans, ret); return ret; } static int update_free_space_extent_count(struct btrfs_trans_handle *trans, struct btrfs_block_group *block_group, struct btrfs_path *path, int new_extents) { struct btrfs_free_space_info *info; u32 flags; u32 extent_count; int ret = 0; if (new_extents == 0) return 0; info = search_free_space_info(trans, block_group, path, 1); if (IS_ERR(info)) { ret = PTR_ERR(info); goto out; } flags = btrfs_free_space_flags(path->nodes[0], info); extent_count = btrfs_free_space_extent_count(path->nodes[0], info); extent_count += new_extents; btrfs_set_free_space_extent_count(path->nodes[0], info, extent_count); btrfs_mark_buffer_dirty(path->nodes[0]); btrfs_release_path(path); if (!(flags & BTRFS_FREE_SPACE_USING_BITMAPS) && extent_count > block_group->bitmap_high_thresh) { ret = convert_free_space_to_bitmaps(trans, block_group, path); } else if ((flags & BTRFS_FREE_SPACE_USING_BITMAPS) && extent_count < block_group->bitmap_low_thresh) { ret = convert_free_space_to_extents(trans, block_group, path); } out: return ret; } EXPORT_FOR_TESTS int free_space_test_bit(struct btrfs_block_group *block_group, struct btrfs_path *path, u64 offset) { struct extent_buffer *leaf; struct btrfs_key key; u64 found_start, found_end; unsigned long ptr, i; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY); found_start = key.objectid; found_end = key.objectid + key.offset; ASSERT(offset >= found_start && offset < found_end); ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); i = div_u64(offset - found_start, block_group->fs_info->sectorsize); return !!extent_buffer_test_bit(leaf, ptr, i); } static void free_space_set_bits(struct btrfs_block_group *block_group, struct btrfs_path *path, u64 *start, u64 *size, int bit) { struct btrfs_fs_info *fs_info = block_group->fs_info; struct extent_buffer *leaf; struct btrfs_key key; u64 end = *start + *size; u64 found_start, found_end; unsigned long ptr, first, last; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY); found_start = key.objectid; found_end = key.objectid + key.offset; ASSERT(*start >= found_start && *start < found_end); ASSERT(end > found_start); if (end > found_end) end = found_end; ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); first = (*start - found_start) >> fs_info->sectorsize_bits; last = (end - found_start) >> fs_info->sectorsize_bits; if (bit) extent_buffer_bitmap_set(leaf, ptr, first, last - first); else extent_buffer_bitmap_clear(leaf, ptr, first, last - first); btrfs_mark_buffer_dirty(leaf); *size -= end - *start; *start = end; } /* * We can't use btrfs_next_item() in modify_free_space_bitmap() because * btrfs_next_leaf() doesn't get the path for writing. We can forgo the fancy * tree walking in btrfs_next_leaf() anyways because we know exactly what we're * looking for. */ static int free_space_next_bitmap(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *p) { struct btrfs_key key; if (p->slots[0] + 1 < btrfs_header_nritems(p->nodes[0])) { p->slots[0]++; return 0; } btrfs_item_key_to_cpu(p->nodes[0], &key, p->slots[0]); btrfs_release_path(p); key.objectid += key.offset; key.type = (u8)-1; key.offset = (u64)-1; return btrfs_search_prev_slot(trans, root, &key, p, 0, 1); } /* * If remove is 1, then we are removing free space, thus clearing bits in the * bitmap. If remove is 0, then we are adding free space, thus setting bits in * the bitmap. */ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans, struct btrfs_block_group *block_group, struct btrfs_path *path, u64 start, u64 size, int remove) { struct btrfs_root *root = btrfs_free_space_root(block_group); struct btrfs_key key; u64 end = start + size; u64 cur_start, cur_size; int prev_bit, next_bit; int new_extents; int ret; /* * Read the bit for the block immediately before the extent of space if * that block is within the block group. */ if (start > block_group->start) { u64 prev_block = start - block_group->fs_info->sectorsize; key.objectid = prev_block; key.type = (u8)-1; key.offset = (u64)-1; ret = btrfs_search_prev_slot(trans, root, &key, path, 0, 1); if (ret) goto out; prev_bit = free_space_test_bit(block_group, path, prev_block); /* The previous block may have been in the previous bitmap. */ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (start >= key.objectid + key.offset) { ret = free_space_next_bitmap(trans, root, path); if (ret) goto out; } } else { key.objectid = start; key.type = (u8)-1; key.offset = (u64)-1; ret = btrfs_search_prev_slot(trans, root, &key, path, 0, 1); if (ret) goto out; prev_bit = -1; } /* * Iterate over all of the bitmaps overlapped by the extent of space, * clearing/setting bits as required. */ cur_start = start; cur_size = size; while (1) { free_space_set_bits(block_group, path, &cur_start, &cur_size, !remove); if (cur_size == 0) break; ret = free_space_next_bitmap(trans, root, path); if (ret) goto out; } /* * Read the bit for the block immediately after the extent of space if * that block is within the block group. */ if (end < block_group->start + block_group->length) { /* The next block may be in the next bitmap. */ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (end >= key.objectid + key.offset) { ret = free_space_next_bitmap(trans, root, path); if (ret) goto out; } next_bit = free_space_test_bit(block_group, path, end); } else { next_bit = -1; } if (remove) { new_extents = -1; if (prev_bit == 1) { /* Leftover on the left. */ new_extents++; } if (next_bit == 1) { /* Leftover on the right. */ new_extents++; } } else { new_extents = 1; if (prev_bit == 1) { /* Merging with neighbor on the left. */ new_extents--; } if (next_bit == 1) { /* Merging with neighbor on the right. */ new_extents--; } } btrfs_release_path(path); ret = update_free_space_extent_count(trans, block_group, path, new_extents); out: return ret; } static int remove_free_space_extent(struct btrfs_trans_handle *trans, struct btrfs_block_group *block_group, struct btrfs_path *path, u64 start, u64 size) { struct btrfs_root *root = btrfs_free_space_root(block_group); struct btrfs_key key; u64 found_start, found_end; u64 end = start + size; int new_extents = -1; int ret; key.objectid = start; key.type = (u8)-1; key.offset = (u64)-1; ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); if (ret) goto out; btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY); found_start = key.objectid; found_end = key.objectid + key.offset; ASSERT(start >= found_start && end <= found_end); /* * Okay, now that we've found the free space extent which contains the * free space that we are removing, there are four cases: * * 1. We're using the whole extent: delete the key we found and * decrement the free space extent count. * 2. We are using part of the extent starting at the beginning: delete * the key we found and insert a new key representing the leftover at * the end. There is no net change in the number of extents. * 3. We are using part of the extent ending at the end: delete the key * we found and insert a new key representing the leftover at the * beginning. There is no net change in the number of extents. * 4. We are using part of the extent in the middle: delete the key we * found and insert two new keys representing the leftovers on each * side. Where we used to have one extent, we now have two, so increment * the extent count. We may need to convert the block group to bitmaps * as a result. */ /* Delete the existing key (cases 1-4). */ ret = btrfs_del_item(trans, root, path); if (ret) goto out; /* Add a key for leftovers at the beginning (cases 3 and 4). */ if (start > found_start) { key.objectid = found_start; key.type = BTRFS_FREE_SPACE_EXTENT_KEY; key.offset = start - found_start; btrfs_release_path(path); ret = btrfs_insert_empty_item(trans, root, path, &key, 0); if (ret) goto out; new_extents++; } /* Add a key for leftovers at the end (cases 2 and 4). */ if (end < found_end) { key.objectid = end; key.type = BTRFS_FREE_SPACE_EXTENT_KEY; key.offset = found_end - end; btrfs_release_path(path); ret = btrfs_insert_empty_item(trans, root, path, &key, 0); if (ret) goto out; new_extents++; } btrfs_release_path(path); ret = update_free_space_extent_count(trans, block_group, path, new_extents); out: return ret; } EXPORT_FOR_TESTS int __remove_from_free_space_tree(struct btrfs_trans_handle *trans, struct btrfs_block_group *block_group, struct btrfs_path *path, u64 start, u64 size) { struct btrfs_free_space_info *info; u32 flags; int ret; if (test_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags)) { ret = __add_block_group_free_space(trans, block_group, path); if (ret) return ret; } info = search_free_space_info(NULL, block_group, path, 0); if (IS_ERR(info)) return PTR_ERR(info); flags = btrfs_free_space_flags(path->nodes[0], info); btrfs_release_path(path); if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) { return modify_free_space_bitmap(trans, block_group, path, start, size, 1); } else { return remove_free_space_extent(trans, block_group, path, start, size); } } int remove_from_free_space_tree(struct btrfs_trans_handle *trans, u64 start, u64 size) { struct btrfs_block_group *block_group; struct btrfs_path *path; int ret; if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE)) return 0; path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto out; } block_group = btrfs_lookup_block_group(trans->fs_info, start); if (!block_group) { ASSERT(0); ret = -ENOENT; goto out; } mutex_lock(&block_group->free_space_lock); ret = __remove_from_free_space_tree(trans, block_group, path, start, size); mutex_unlock(&block_group->free_space_lock); btrfs_put_block_group(block_group); out: btrfs_free_path(path); if (ret) btrfs_abort_transaction(trans, ret); return ret; } static int add_free_space_extent(struct btrfs_trans_handle *trans, struct btrfs_block_group *block_group, struct btrfs_path *path, u64 start, u64 size) { struct btrfs_root *root = btrfs_free_space_root(block_group); struct btrfs_key key, new_key; u64 found_start, found_end; u64 end = start + size; int new_extents = 1; int ret; /* * We are adding a new extent of free space, but we need to merge * extents. There are four cases here: * * 1. The new extent does not have any immediate neighbors to merge * with: add the new key and increment the free space extent count. We * may need to convert the block group to bitmaps as a result. * 2. The new extent has an immediate neighbor before it: remove the * previous key and insert a new key combining both of them. There is no * net change in the number of extents. * 3. The new extent has an immediate neighbor after it: remove the next * key and insert a new key combining both of them. There is no net * change in the number of extents. * 4. The new extent has immediate neighbors on both sides: remove both * of the keys and insert a new key combining all of them. Where we used * to have two extents, we now have one, so decrement the extent count. */ new_key.objectid = start; new_key.type = BTRFS_FREE_SPACE_EXTENT_KEY; new_key.offset = size; /* Search for a neighbor on the left. */ if (start == block_group->start) goto right; key.objectid = start - 1; key.type = (u8)-1; key.offset = (u64)-1; ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); if (ret) goto out; btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.type != BTRFS_FREE_SPACE_EXTENT_KEY) { ASSERT(key.type == BTRFS_FREE_SPACE_INFO_KEY); btrfs_release_path(path); goto right; } found_start = key.objectid; found_end = key.objectid + key.offset; ASSERT(found_start >= block_group->start && found_end > block_group->start); ASSERT(found_start < start && found_end <= start); /* * Delete the neighbor on the left and absorb it into the new key (cases * 2 and 4). */ if (found_end == start) { ret = btrfs_del_item(trans, root, path); if (ret) goto out; new_key.objectid = found_start; new_key.offset += key.offset; new_extents--; } btrfs_release_path(path); right: /* Search for a neighbor on the right. */ if (end == block_group->start + block_group->length) goto insert; key.objectid = end; key.type = (u8)-1; key.offset = (u64)-1; ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); if (ret) goto out; btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.type != BTRFS_FREE_SPACE_EXTENT_KEY) { ASSERT(key.type == BTRFS_FREE_SPACE_INFO_KEY); btrfs_release_path(path); goto insert; } found_start = key.objectid; found_end = key.objectid + key.offset; ASSERT(found_start >= block_group->start && found_end > block_group->start); ASSERT((found_start < start && found_end <= start) || (found_start >= end && found_end > end)); /* * Delete the neighbor on the right and absorb it into the new key * (cases 3 and 4). */ if (found_start == end) { ret = btrfs_del_item(trans, root, path); if (ret) goto out; new_key.offset += key.offset; new_extents--; } btrfs_release_path(path); insert: /* Insert the new key (cases 1-4). */ ret = btrfs_insert_empty_item(trans, root, path, &new_key, 0); if (ret) goto out; btrfs_release_path(path); ret = update_free_space_extent_count(trans, block_group, path, new_extents); out: return ret; } EXPORT_FOR_TESTS int __add_to_free_space_tree(struct btrfs_trans_handle *trans, struct btrfs_block_group *block_group, struct btrfs_path *path, u64 start, u64 size) { struct btrfs_free_space_info *info; u32 flags; int ret; if (test_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags)) { ret = __add_block_group_free_space(trans, block_group, path); if (ret) return ret; } info = search_free_space_info(NULL, block_group, path, 0); if (IS_ERR(info)) return PTR_ERR(info); flags = btrfs_free_space_flags(path->nodes[0], info); btrfs_release_path(path); if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) { return modify_free_space_bitmap(trans, block_group, path, start, size, 0); } else { return add_free_space_extent(trans, block_group, path, start, size); } } int add_to_free_space_tree(struct btrfs_trans_handle *trans, u64 start, u64 size) { struct btrfs_block_group *block_group; struct btrfs_path *path; int ret; if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE)) return 0; path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto out; } block_group = btrfs_lookup_block_group(trans->fs_info, start); if (!block_group) { ASSERT(0); ret = -ENOENT; goto out; } mutex_lock(&block_group->free_space_lock); ret = __add_to_free_space_tree(trans, block_group, path, start, size); mutex_unlock(&block_group->free_space_lock); btrfs_put_block_group(block_group); out: btrfs_free_path(path); if (ret) btrfs_abort_transaction(trans, ret); return ret; } /* * Populate the free space tree by walking the extent tree. Operations on the * extent tree that happen as a result of writes to the free space tree will go * through the normal add/remove hooks. */ static int populate_free_space_tree(struct btrfs_trans_handle *trans, struct btrfs_block_group *block_group) { struct btrfs_root *extent_root; struct btrfs_path *path, *path2; struct btrfs_key key; u64 start, end; int ret; path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->reada = READA_FORWARD; path2 = btrfs_alloc_path(); if (!path2) { btrfs_free_path(path); return -ENOMEM; } ret = add_new_free_space_info(trans, block_group, path2); if (ret) goto out; mutex_lock(&block_group->free_space_lock); /* * Iterate through all of the extent and metadata items in this block * group, adding the free space between them and the free space at the * end. Note that EXTENT_ITEM and METADATA_ITEM are less than * BLOCK_GROUP_ITEM, so an extent may precede the block group that it's * contained in. */ key.objectid = block_group->start; key.type = BTRFS_EXTENT_ITEM_KEY; key.offset = 0; extent_root = btrfs_extent_root(trans->fs_info, key.objectid); ret = btrfs_search_slot_for_read(extent_root, &key, path, 1, 0); if (ret < 0) goto out_locked; ASSERT(ret == 0); start = block_group->start; end = block_group->start + block_group->length; while (1) { btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.type == BTRFS_EXTENT_ITEM_KEY || key.type == BTRFS_METADATA_ITEM_KEY) { if (key.objectid >= end) break; if (start < key.objectid) { ret = __add_to_free_space_tree(trans, block_group, path2, start, key.objectid - start); if (ret) goto out_locked; } start = key.objectid; if (key.type == BTRFS_METADATA_ITEM_KEY) start += trans->fs_info->nodesize; else start += key.offset; } else if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { if (key.objectid != block_group->start) break; } ret = btrfs_next_item(extent_root, path); if (ret < 0) goto out_locked; if (ret) break; } if (start < end) { ret = __add_to_free_space_tree(trans, block_group, path2, start, end - start); if (ret) goto out_locked; } ret = 0; out_locked: mutex_unlock(&block_group->free_space_lock); out: btrfs_free_path(path2); btrfs_free_path(path); return ret; } int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info) { struct btrfs_trans_handle *trans; struct btrfs_root *tree_root = fs_info->tree_root; struct btrfs_root *free_space_root; struct btrfs_block_group *block_group; struct rb_node *node; int ret; trans = btrfs_start_transaction(tree_root, 0); if (IS_ERR(trans)) return PTR_ERR(trans); set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); set_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags); free_space_root = btrfs_create_tree(trans, BTRFS_FREE_SPACE_TREE_OBJECTID); if (IS_ERR(free_space_root)) { ret = PTR_ERR(free_space_root); goto abort; } ret = btrfs_global_root_insert(free_space_root); if (ret) { btrfs_put_root(free_space_root); goto abort; } node = rb_first_cached(&fs_info->block_group_cache_tree); while (node) { block_group = rb_entry(node, struct btrfs_block_group, cache_node); ret = populate_free_space_tree(trans, block_group); if (ret) goto abort; node = rb_next(node); } btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE); btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID); clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); ret = btrfs_commit_transaction(trans); /* * Now that we've committed the transaction any reading of our commit * root will be safe, so we can cache from the free space tree now. */ clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags); return ret; abort: clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags); btrfs_abort_transaction(trans, ret); btrfs_end_transaction(trans); return ret; } static int clear_free_space_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_path *path; struct btrfs_key key; int nr; int ret; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = 0; key.type = 0; key.offset = 0; while (1) { ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret < 0) goto out; nr = btrfs_header_nritems(path->nodes[0]); if (!nr) break; path->slots[0] = 0; ret = btrfs_del_items(trans, root, path, 0, nr); if (ret) goto out; btrfs_release_path(path); } ret = 0; out: btrfs_free_path(path); return ret; } int btrfs_delete_free_space_tree(struct btrfs_fs_info *fs_info) { struct btrfs_trans_handle *trans; struct btrfs_root *tree_root = fs_info->tree_root; struct btrfs_key key = { .objectid = BTRFS_FREE_SPACE_TREE_OBJECTID, .type = BTRFS_ROOT_ITEM_KEY, .offset = 0, }; struct btrfs_root *free_space_root = btrfs_global_root(fs_info, &key); int ret; trans = btrfs_start_transaction(tree_root, 0); if (IS_ERR(trans)) return PTR_ERR(trans); btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE); btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID); ret = clear_free_space_tree(trans, free_space_root); if (ret) goto abort; ret = btrfs_del_root(trans, &free_space_root->root_key); if (ret) goto abort; btrfs_global_root_delete(free_space_root); spin_lock(&fs_info->trans_lock); list_del(&free_space_root->dirty_list); spin_unlock(&fs_info->trans_lock); btrfs_tree_lock(free_space_root->node); btrfs_clear_buffer_dirty(trans, free_space_root->node); btrfs_tree_unlock(free_space_root->node); btrfs_free_tree_block(trans, btrfs_root_id(free_space_root), free_space_root->node, 0, 1); btrfs_put_root(free_space_root); return btrfs_commit_transaction(trans); abort: btrfs_abort_transaction(trans, ret); btrfs_end_transaction(trans); return ret; } int btrfs_rebuild_free_space_tree(struct btrfs_fs_info *fs_info) { struct btrfs_trans_handle *trans; struct btrfs_key key = { .objectid = BTRFS_FREE_SPACE_TREE_OBJECTID, .type = BTRFS_ROOT_ITEM_KEY, .offset = 0, }; struct btrfs_root *free_space_root = btrfs_global_root(fs_info, &key); struct rb_node *node; int ret; trans = btrfs_start_transaction(free_space_root, 1); if (IS_ERR(trans)) return PTR_ERR(trans); set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); set_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags); ret = clear_free_space_tree(trans, free_space_root); if (ret) goto abort; node = rb_first_cached(&fs_info->block_group_cache_tree); while (node) { struct btrfs_block_group *block_group; block_group = rb_entry(node, struct btrfs_block_group, cache_node); ret = populate_free_space_tree(trans, block_group); if (ret) goto abort; node = rb_next(node); } btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE); btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID); clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); ret = btrfs_commit_transaction(trans); clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags); return ret; abort: btrfs_abort_transaction(trans, ret); btrfs_end_transaction(trans); return ret; } static int __add_block_group_free_space(struct btrfs_trans_handle *trans, struct btrfs_block_group *block_group, struct btrfs_path *path) { int ret; clear_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags); ret = add_new_free_space_info(trans, block_group, path); if (ret) return ret; return __add_to_free_space_tree(trans, block_group, path, block_group->start, block_group->length); } int add_block_group_free_space(struct btrfs_trans_handle *trans, struct btrfs_block_group *block_group) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_path *path = NULL; int ret = 0; if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) return 0; mutex_lock(&block_group->free_space_lock); if (!test_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags)) goto out; path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto out; } ret = __add_block_group_free_space(trans, block_group, path); out: btrfs_free_path(path); mutex_unlock(&block_group->free_space_lock); if (ret) btrfs_abort_transaction(trans, ret); return ret; } int remove_block_group_free_space(struct btrfs_trans_handle *trans, struct btrfs_block_group *block_group) { struct btrfs_root *root = btrfs_free_space_root(block_group); struct btrfs_path *path; struct btrfs_key key, found_key; struct extent_buffer *leaf; u64 start, end; int done = 0, nr; int ret; if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE)) return 0; if (test_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags)) { /* We never added this block group to the free space tree. */ return 0; } path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto out; } start = block_group->start; end = block_group->start + block_group->length; key.objectid = end - 1; key.type = (u8)-1; key.offset = (u64)-1; while (!done) { ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); if (ret) goto out; leaf = path->nodes[0]; nr = 0; path->slots[0]++; while (path->slots[0] > 0) { btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { ASSERT(found_key.objectid == block_group->start); ASSERT(found_key.offset == block_group->length); done = 1; nr++; path->slots[0]--; break; } else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY || found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) { ASSERT(found_key.objectid >= start); ASSERT(found_key.objectid < end); ASSERT(found_key.objectid + found_key.offset <= end); nr++; path->slots[0]--; } else { ASSERT(0); } } ret = btrfs_del_items(trans, root, path, path->slots[0], nr); if (ret) goto out; btrfs_release_path(path); } ret = 0; out: btrfs_free_path(path); if (ret) btrfs_abort_transaction(trans, ret); return ret; } static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl, struct btrfs_path *path, u32 expected_extent_count) { struct btrfs_block_group *block_group; struct btrfs_fs_info *fs_info; struct btrfs_root *root; struct btrfs_key key; int prev_bit = 0, bit; /* Initialize to silence GCC. */ u64 extent_start = 0; u64 end, offset; u64 total_found = 0; u32 extent_count = 0; int ret; block_group = caching_ctl->block_group; fs_info = block_group->fs_info; root = btrfs_free_space_root(block_group); end = block_group->start + block_group->length; while (1) { ret = btrfs_next_item(root, path); if (ret < 0) goto out; if (ret) break; btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.type == BTRFS_FREE_SPACE_INFO_KEY) break; ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY); ASSERT(key.objectid < end && key.objectid + key.offset <= end); offset = key.objectid; while (offset < key.objectid + key.offset) { bit = free_space_test_bit(block_group, path, offset); if (prev_bit == 0 && bit == 1) { extent_start = offset; } else if (prev_bit == 1 && bit == 0) { u64 space_added; ret = btrfs_add_new_free_space(block_group, extent_start, offset, &space_added); if (ret) goto out; total_found += space_added; if (total_found > CACHING_CTL_WAKE_UP) { total_found = 0; wake_up(&caching_ctl->wait); } extent_count++; } prev_bit = bit; offset += fs_info->sectorsize; } } if (prev_bit == 1) { ret = btrfs_add_new_free_space(block_group, extent_start, end, NULL); if (ret) goto out; extent_count++; } if (extent_count != expected_extent_count) { btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u", block_group->start, extent_count, expected_extent_count); ASSERT(0); ret = -EIO; goto out; } ret = 0; out: return ret; } static int load_free_space_extents(struct btrfs_caching_control *caching_ctl, struct btrfs_path *path, u32 expected_extent_count) { struct btrfs_block_group *block_group; struct btrfs_fs_info *fs_info; struct btrfs_root *root; struct btrfs_key key; u64 end; u64 total_found = 0; u32 extent_count = 0; int ret; block_group = caching_ctl->block_group; fs_info = block_group->fs_info; root = btrfs_free_space_root(block_group); end = block_group->start + block_group->length; while (1) { u64 space_added; ret = btrfs_next_item(root, path); if (ret < 0) goto out; if (ret) break; btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.type == BTRFS_FREE_SPACE_INFO_KEY) break; ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY); ASSERT(key.objectid < end && key.objectid + key.offset <= end); ret = btrfs_add_new_free_space(block_group, key.objectid, key.objectid + key.offset, &space_added); if (ret) goto out; total_found += space_added; if (total_found > CACHING_CTL_WAKE_UP) { total_found = 0; wake_up(&caching_ctl->wait); } extent_count++; } if (extent_count != expected_extent_count) { btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u", block_group->start, extent_count, expected_extent_count); ASSERT(0); ret = -EIO; goto out; } ret = 0; out: return ret; } int load_free_space_tree(struct btrfs_caching_control *caching_ctl) { struct btrfs_block_group *block_group; struct btrfs_free_space_info *info; struct btrfs_path *path; u32 extent_count, flags; int ret; block_group = caching_ctl->block_group; path = btrfs_alloc_path(); if (!path) return -ENOMEM; /* * Just like caching_thread() doesn't want to deadlock on the extent * tree, we don't want to deadlock on the free space tree. */ path->skip_locking = 1; path->search_commit_root = 1; path->reada = READA_FORWARD; info = search_free_space_info(NULL, block_group, path, 0); if (IS_ERR(info)) { ret = PTR_ERR(info); goto out; } extent_count = btrfs_free_space_extent_count(path->nodes[0], info); flags = btrfs_free_space_flags(path->nodes[0], info); /* * We left path pointing to the free space info item, so now * load_free_space_foo can just iterate through the free space tree from * there. */ if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) ret = load_free_space_bitmaps(caching_ctl, path, extent_count); else ret = load_free_space_extents(caching_ctl, path, extent_count); out: btrfs_free_path(path); return ret; }
linux-master
fs/btrfs/free-space-tree.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2008 Oracle. All rights reserved. */ #include <linux/kernel.h> #include <linux/bio.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/highmem.h> #include <linux/kthread.h> #include <linux/time.h> #include <linux/init.h> #include <linux/string.h> #include <linux/backing-dev.h> #include <linux/writeback.h> #include <linux/psi.h> #include <linux/slab.h> #include <linux/sched/mm.h> #include <linux/log2.h> #include <crypto/hash.h> #include "misc.h" #include "ctree.h" #include "fs.h" #include "disk-io.h" #include "transaction.h" #include "btrfs_inode.h" #include "bio.h" #include "ordered-data.h" #include "compression.h" #include "extent_io.h" #include "extent_map.h" #include "subpage.h" #include "zoned.h" #include "file-item.h" #include "super.h" static struct bio_set btrfs_compressed_bioset; static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" }; const char* btrfs_compress_type2str(enum btrfs_compression_type type) { switch (type) { case BTRFS_COMPRESS_ZLIB: case BTRFS_COMPRESS_LZO: case BTRFS_COMPRESS_ZSTD: case BTRFS_COMPRESS_NONE: return btrfs_compress_types[type]; default: break; } return NULL; } static inline struct compressed_bio *to_compressed_bio(struct btrfs_bio *bbio) { return container_of(bbio, struct compressed_bio, bbio); } static struct compressed_bio *alloc_compressed_bio(struct btrfs_inode *inode, u64 start, blk_opf_t op, btrfs_bio_end_io_t end_io) { struct btrfs_bio *bbio; bbio = btrfs_bio(bio_alloc_bioset(NULL, BTRFS_MAX_COMPRESSED_PAGES, op, GFP_NOFS, &btrfs_compressed_bioset)); btrfs_bio_init(bbio, inode->root->fs_info, end_io, NULL); bbio->inode = inode; bbio->file_offset = start; return to_compressed_bio(bbio); } bool btrfs_compress_is_valid_type(const char *str, size_t len) { int i; for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) { size_t comp_len = strlen(btrfs_compress_types[i]); if (len < comp_len) continue; if (!strncmp(btrfs_compress_types[i], str, comp_len)) return true; } return false; } static int compression_compress_pages(int type, struct list_head *ws, struct address_space *mapping, u64 start, struct page **pages, unsigned long *out_pages, unsigned long *total_in, unsigned long *total_out) { switch (type) { case BTRFS_COMPRESS_ZLIB: return zlib_compress_pages(ws, mapping, start, pages, out_pages, total_in, total_out); case BTRFS_COMPRESS_LZO: return lzo_compress_pages(ws, mapping, start, pages, out_pages, total_in, total_out); case BTRFS_COMPRESS_ZSTD: return zstd_compress_pages(ws, mapping, start, pages, out_pages, total_in, total_out); case BTRFS_COMPRESS_NONE: default: /* * This can happen when compression races with remount setting * it to 'no compress', while caller doesn't call * inode_need_compress() to check if we really need to * compress. * * Not a big deal, just need to inform caller that we * haven't allocated any pages yet. */ *out_pages = 0; return -E2BIG; } } static int compression_decompress_bio(struct list_head *ws, struct compressed_bio *cb) { switch (cb->compress_type) { case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb); case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb); case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb); case BTRFS_COMPRESS_NONE: default: /* * This can't happen, the type is validated several times * before we get here. */ BUG(); } } static int compression_decompress(int type, struct list_head *ws, const u8 *data_in, struct page *dest_page, unsigned long start_byte, size_t srclen, size_t destlen) { switch (type) { case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page, start_byte, srclen, destlen); case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page, start_byte, srclen, destlen); case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page, start_byte, srclen, destlen); case BTRFS_COMPRESS_NONE: default: /* * This can't happen, the type is validated several times * before we get here. */ BUG(); } } static void btrfs_free_compressed_pages(struct compressed_bio *cb) { for (unsigned int i = 0; i < cb->nr_pages; i++) put_page(cb->compressed_pages[i]); kfree(cb->compressed_pages); } static int btrfs_decompress_bio(struct compressed_bio *cb); static void end_compressed_bio_read(struct btrfs_bio *bbio) { struct compressed_bio *cb = to_compressed_bio(bbio); blk_status_t status = bbio->bio.bi_status; if (!status) status = errno_to_blk_status(btrfs_decompress_bio(cb)); btrfs_free_compressed_pages(cb); btrfs_bio_end_io(cb->orig_bbio, status); bio_put(&bbio->bio); } /* * Clear the writeback bits on all of the file * pages for a compressed write */ static noinline void end_compressed_writeback(const struct compressed_bio *cb) { struct inode *inode = &cb->bbio.inode->vfs_inode; struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); unsigned long index = cb->start >> PAGE_SHIFT; unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT; struct folio_batch fbatch; const int errno = blk_status_to_errno(cb->bbio.bio.bi_status); int i; int ret; if (errno) mapping_set_error(inode->i_mapping, errno); folio_batch_init(&fbatch); while (index <= end_index) { ret = filemap_get_folios(inode->i_mapping, &index, end_index, &fbatch); if (ret == 0) return; for (i = 0; i < ret; i++) { struct folio *folio = fbatch.folios[i]; btrfs_page_clamp_clear_writeback(fs_info, &folio->page, cb->start, cb->len); } folio_batch_release(&fbatch); } /* the inode may be gone now */ } static void btrfs_finish_compressed_write_work(struct work_struct *work) { struct compressed_bio *cb = container_of(work, struct compressed_bio, write_end_work); btrfs_finish_ordered_extent(cb->bbio.ordered, NULL, cb->start, cb->len, cb->bbio.bio.bi_status == BLK_STS_OK); if (cb->writeback) end_compressed_writeback(cb); /* Note, our inode could be gone now */ btrfs_free_compressed_pages(cb); bio_put(&cb->bbio.bio); } /* * Do the cleanup once all the compressed pages hit the disk. This will clear * writeback on the file pages and free the compressed pages. * * This also calls the writeback end hooks for the file pages so that metadata * and checksums can be updated in the file. */ static void end_compressed_bio_write(struct btrfs_bio *bbio) { struct compressed_bio *cb = to_compressed_bio(bbio); struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info; queue_work(fs_info->compressed_write_workers, &cb->write_end_work); } static void btrfs_add_compressed_bio_pages(struct compressed_bio *cb) { struct bio *bio = &cb->bbio.bio; u32 offset = 0; while (offset < cb->compressed_len) { u32 len = min_t(u32, cb->compressed_len - offset, PAGE_SIZE); /* Maximum compressed extent is smaller than bio size limit. */ __bio_add_page(bio, cb->compressed_pages[offset >> PAGE_SHIFT], len, 0); offset += len; } } /* * worker function to build and submit bios for previously compressed pages. * The corresponding pages in the inode should be marked for writeback * and the compressed pages should have a reference on them for dropping * when the IO is complete. * * This also checksums the file bytes and gets things ready for * the end io hooks. */ void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered, struct page **compressed_pages, unsigned int nr_pages, blk_opf_t write_flags, bool writeback) { struct btrfs_inode *inode = BTRFS_I(ordered->inode); struct btrfs_fs_info *fs_info = inode->root->fs_info; struct compressed_bio *cb; ASSERT(IS_ALIGNED(ordered->file_offset, fs_info->sectorsize)); ASSERT(IS_ALIGNED(ordered->num_bytes, fs_info->sectorsize)); cb = alloc_compressed_bio(inode, ordered->file_offset, REQ_OP_WRITE | write_flags, end_compressed_bio_write); cb->start = ordered->file_offset; cb->len = ordered->num_bytes; cb->compressed_pages = compressed_pages; cb->compressed_len = ordered->disk_num_bytes; cb->writeback = writeback; INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work); cb->nr_pages = nr_pages; cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT; cb->bbio.ordered = ordered; btrfs_add_compressed_bio_pages(cb); btrfs_submit_bio(&cb->bbio, 0); } /* * Add extra pages in the same compressed file extent so that we don't need to * re-read the same extent again and again. * * NOTE: this won't work well for subpage, as for subpage read, we lock the * full page then submit bio for each compressed/regular extents. * * This means, if we have several sectors in the same page points to the same * on-disk compressed data, we will re-read the same extent many times and * this function can only help for the next page. */ static noinline int add_ra_bio_pages(struct inode *inode, u64 compressed_end, struct compressed_bio *cb, int *memstall, unsigned long *pflags) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); unsigned long end_index; struct bio *orig_bio = &cb->orig_bbio->bio; u64 cur = cb->orig_bbio->file_offset + orig_bio->bi_iter.bi_size; u64 isize = i_size_read(inode); int ret; struct page *page; struct extent_map *em; struct address_space *mapping = inode->i_mapping; struct extent_map_tree *em_tree; struct extent_io_tree *tree; int sectors_missed = 0; em_tree = &BTRFS_I(inode)->extent_tree; tree = &BTRFS_I(inode)->io_tree; if (isize == 0) return 0; /* * For current subpage support, we only support 64K page size, * which means maximum compressed extent size (128K) is just 2x page * size. * This makes readahead less effective, so here disable readahead for * subpage for now, until full compressed write is supported. */ if (btrfs_sb(inode->i_sb)->sectorsize < PAGE_SIZE) return 0; end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; while (cur < compressed_end) { u64 page_end; u64 pg_index = cur >> PAGE_SHIFT; u32 add_size; if (pg_index > end_index) break; page = xa_load(&mapping->i_pages, pg_index); if (page && !xa_is_value(page)) { sectors_missed += (PAGE_SIZE - offset_in_page(cur)) >> fs_info->sectorsize_bits; /* Beyond threshold, no need to continue */ if (sectors_missed > 4) break; /* * Jump to next page start as we already have page for * current offset. */ cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE; continue; } page = __page_cache_alloc(mapping_gfp_constraint(mapping, ~__GFP_FS)); if (!page) break; if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { put_page(page); /* There is already a page, skip to page end */ cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE; continue; } if (!*memstall && PageWorkingset(page)) { psi_memstall_enter(pflags); *memstall = 1; } ret = set_page_extent_mapped(page); if (ret < 0) { unlock_page(page); put_page(page); break; } page_end = (pg_index << PAGE_SHIFT) + PAGE_SIZE - 1; lock_extent(tree, cur, page_end, NULL); read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur); read_unlock(&em_tree->lock); /* * At this point, we have a locked page in the page cache for * these bytes in the file. But, we have to make sure they map * to this compressed extent on disk. */ if (!em || cur < em->start || (cur + fs_info->sectorsize > extent_map_end(em)) || (em->block_start >> SECTOR_SHIFT) != orig_bio->bi_iter.bi_sector) { free_extent_map(em); unlock_extent(tree, cur, page_end, NULL); unlock_page(page); put_page(page); break; } free_extent_map(em); if (page->index == end_index) { size_t zero_offset = offset_in_page(isize); if (zero_offset) { int zeros; zeros = PAGE_SIZE - zero_offset; memzero_page(page, zero_offset, zeros); } } add_size = min(em->start + em->len, page_end + 1) - cur; ret = bio_add_page(orig_bio, page, add_size, offset_in_page(cur)); if (ret != add_size) { unlock_extent(tree, cur, page_end, NULL); unlock_page(page); put_page(page); break; } /* * If it's subpage, we also need to increase its * subpage::readers number, as at endio we will decrease * subpage::readers and to unlock the page. */ if (fs_info->sectorsize < PAGE_SIZE) btrfs_subpage_start_reader(fs_info, page, cur, add_size); put_page(page); cur += add_size; } return 0; } /* * for a compressed read, the bio we get passed has all the inode pages * in it. We don't actually do IO on those pages but allocate new ones * to hold the compressed pages on disk. * * bio->bi_iter.bi_sector points to the compressed extent on disk * bio->bi_io_vec points to all of the inode pages * * After the compressed pages are read, we copy the bytes into the * bio we were passed and then call the bio end_io calls */ void btrfs_submit_compressed_read(struct btrfs_bio *bbio) { struct btrfs_inode *inode = bbio->inode; struct btrfs_fs_info *fs_info = inode->root->fs_info; struct extent_map_tree *em_tree = &inode->extent_tree; struct compressed_bio *cb; unsigned int compressed_len; u64 file_offset = bbio->file_offset; u64 em_len; u64 em_start; struct extent_map *em; unsigned long pflags; int memstall = 0; blk_status_t ret; int ret2; /* we need the actual starting offset of this extent in the file */ read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize); read_unlock(&em_tree->lock); if (!em) { ret = BLK_STS_IOERR; goto out; } ASSERT(em->compress_type != BTRFS_COMPRESS_NONE); compressed_len = em->block_len; cb = alloc_compressed_bio(inode, file_offset, REQ_OP_READ, end_compressed_bio_read); cb->start = em->orig_start; em_len = em->len; em_start = em->start; cb->len = bbio->bio.bi_iter.bi_size; cb->compressed_len = compressed_len; cb->compress_type = em->compress_type; cb->orig_bbio = bbio; free_extent_map(em); cb->nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE); cb->compressed_pages = kcalloc(cb->nr_pages, sizeof(struct page *), GFP_NOFS); if (!cb->compressed_pages) { ret = BLK_STS_RESOURCE; goto out_free_bio; } ret2 = btrfs_alloc_page_array(cb->nr_pages, cb->compressed_pages); if (ret2) { ret = BLK_STS_RESOURCE; goto out_free_compressed_pages; } add_ra_bio_pages(&inode->vfs_inode, em_start + em_len, cb, &memstall, &pflags); /* include any pages we added in add_ra-bio_pages */ cb->len = bbio->bio.bi_iter.bi_size; cb->bbio.bio.bi_iter.bi_sector = bbio->bio.bi_iter.bi_sector; btrfs_add_compressed_bio_pages(cb); if (memstall) psi_memstall_leave(&pflags); btrfs_submit_bio(&cb->bbio, 0); return; out_free_compressed_pages: kfree(cb->compressed_pages); out_free_bio: bio_put(&cb->bbio.bio); out: btrfs_bio_end_io(bbio, ret); } /* * Heuristic uses systematic sampling to collect data from the input data * range, the logic can be tuned by the following constants: * * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample * @SAMPLING_INTERVAL - range from which the sampled data can be collected */ #define SAMPLING_READ_SIZE (16) #define SAMPLING_INTERVAL (256) /* * For statistical analysis of the input data we consider bytes that form a * Galois Field of 256 objects. Each object has an attribute count, ie. how * many times the object appeared in the sample. */ #define BUCKET_SIZE (256) /* * The size of the sample is based on a statistical sampling rule of thumb. * The common way is to perform sampling tests as long as the number of * elements in each cell is at least 5. * * Instead of 5, we choose 32 to obtain more accurate results. * If the data contain the maximum number of symbols, which is 256, we obtain a * sample size bound by 8192. * * For a sample of at most 8KB of data per data range: 16 consecutive bytes * from up to 512 locations. */ #define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \ SAMPLING_READ_SIZE / SAMPLING_INTERVAL) struct bucket_item { u32 count; }; struct heuristic_ws { /* Partial copy of input data */ u8 *sample; u32 sample_size; /* Buckets store counters for each byte value */ struct bucket_item *bucket; /* Sorting buffer */ struct bucket_item *bucket_b; struct list_head list; }; static struct workspace_manager heuristic_wsm; static void free_heuristic_ws(struct list_head *ws) { struct heuristic_ws *workspace; workspace = list_entry(ws, struct heuristic_ws, list); kvfree(workspace->sample); kfree(workspace->bucket); kfree(workspace->bucket_b); kfree(workspace); } static struct list_head *alloc_heuristic_ws(unsigned int level) { struct heuristic_ws *ws; ws = kzalloc(sizeof(*ws), GFP_KERNEL); if (!ws) return ERR_PTR(-ENOMEM); ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL); if (!ws->sample) goto fail; ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL); if (!ws->bucket) goto fail; ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL); if (!ws->bucket_b) goto fail; INIT_LIST_HEAD(&ws->list); return &ws->list; fail: free_heuristic_ws(&ws->list); return ERR_PTR(-ENOMEM); } const struct btrfs_compress_op btrfs_heuristic_compress = { .workspace_manager = &heuristic_wsm, }; static const struct btrfs_compress_op * const btrfs_compress_op[] = { /* The heuristic is represented as compression type 0 */ &btrfs_heuristic_compress, &btrfs_zlib_compress, &btrfs_lzo_compress, &btrfs_zstd_compress, }; static struct list_head *alloc_workspace(int type, unsigned int level) { switch (type) { case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level); case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level); case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(level); case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level); default: /* * This can't happen, the type is validated several times * before we get here. */ BUG(); } } static void free_workspace(int type, struct list_head *ws) { switch (type) { case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws); case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws); case BTRFS_COMPRESS_LZO: return lzo_free_workspace(ws); case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws); default: /* * This can't happen, the type is validated several times * before we get here. */ BUG(); } } static void btrfs_init_workspace_manager(int type) { struct workspace_manager *wsm; struct list_head *workspace; wsm = btrfs_compress_op[type]->workspace_manager; INIT_LIST_HEAD(&wsm->idle_ws); spin_lock_init(&wsm->ws_lock); atomic_set(&wsm->total_ws, 0); init_waitqueue_head(&wsm->ws_wait); /* * Preallocate one workspace for each compression type so we can * guarantee forward progress in the worst case */ workspace = alloc_workspace(type, 0); if (IS_ERR(workspace)) { pr_warn( "BTRFS: cannot preallocate compression workspace, will try later\n"); } else { atomic_set(&wsm->total_ws, 1); wsm->free_ws = 1; list_add(workspace, &wsm->idle_ws); } } static void btrfs_cleanup_workspace_manager(int type) { struct workspace_manager *wsman; struct list_head *ws; wsman = btrfs_compress_op[type]->workspace_manager; while (!list_empty(&wsman->idle_ws)) { ws = wsman->idle_ws.next; list_del(ws); free_workspace(type, ws); atomic_dec(&wsman->total_ws); } } /* * This finds an available workspace or allocates a new one. * If it's not possible to allocate a new one, waits until there's one. * Preallocation makes a forward progress guarantees and we do not return * errors. */ struct list_head *btrfs_get_workspace(int type, unsigned int level) { struct workspace_manager *wsm; struct list_head *workspace; int cpus = num_online_cpus(); unsigned nofs_flag; struct list_head *idle_ws; spinlock_t *ws_lock; atomic_t *total_ws; wait_queue_head_t *ws_wait; int *free_ws; wsm = btrfs_compress_op[type]->workspace_manager; idle_ws = &wsm->idle_ws; ws_lock = &wsm->ws_lock; total_ws = &wsm->total_ws; ws_wait = &wsm->ws_wait; free_ws = &wsm->free_ws; again: spin_lock(ws_lock); if (!list_empty(idle_ws)) { workspace = idle_ws->next; list_del(workspace); (*free_ws)--; spin_unlock(ws_lock); return workspace; } if (atomic_read(total_ws) > cpus) { DEFINE_WAIT(wait); spin_unlock(ws_lock); prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE); if (atomic_read(total_ws) > cpus && !*free_ws) schedule(); finish_wait(ws_wait, &wait); goto again; } atomic_inc(total_ws); spin_unlock(ws_lock); /* * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have * to turn it off here because we might get called from the restricted * context of btrfs_compress_bio/btrfs_compress_pages */ nofs_flag = memalloc_nofs_save(); workspace = alloc_workspace(type, level); memalloc_nofs_restore(nofs_flag); if (IS_ERR(workspace)) { atomic_dec(total_ws); wake_up(ws_wait); /* * Do not return the error but go back to waiting. There's a * workspace preallocated for each type and the compression * time is bounded so we get to a workspace eventually. This * makes our caller's life easier. * * To prevent silent and low-probability deadlocks (when the * initial preallocation fails), check if there are any * workspaces at all. */ if (atomic_read(total_ws) == 0) { static DEFINE_RATELIMIT_STATE(_rs, /* once per minute */ 60 * HZ, /* no burst */ 1); if (__ratelimit(&_rs)) { pr_warn("BTRFS: no compression workspaces, low memory, retrying\n"); } } goto again; } return workspace; } static struct list_head *get_workspace(int type, int level) { switch (type) { case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level); case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level); case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(type, level); case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level); default: /* * This can't happen, the type is validated several times * before we get here. */ BUG(); } } /* * put a workspace struct back on the list or free it if we have enough * idle ones sitting around */ void btrfs_put_workspace(int type, struct list_head *ws) { struct workspace_manager *wsm; struct list_head *idle_ws; spinlock_t *ws_lock; atomic_t *total_ws; wait_queue_head_t *ws_wait; int *free_ws; wsm = btrfs_compress_op[type]->workspace_manager; idle_ws = &wsm->idle_ws; ws_lock = &wsm->ws_lock; total_ws = &wsm->total_ws; ws_wait = &wsm->ws_wait; free_ws = &wsm->free_ws; spin_lock(ws_lock); if (*free_ws <= num_online_cpus()) { list_add(ws, idle_ws); (*free_ws)++; spin_unlock(ws_lock); goto wake; } spin_unlock(ws_lock); free_workspace(type, ws); atomic_dec(total_ws); wake: cond_wake_up(ws_wait); } static void put_workspace(int type, struct list_head *ws) { switch (type) { case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws); case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws); case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(type, ws); case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws); default: /* * This can't happen, the type is validated several times * before we get here. */ BUG(); } } /* * Adjust @level according to the limits of the compression algorithm or * fallback to default */ static unsigned int btrfs_compress_set_level(int type, unsigned level) { const struct btrfs_compress_op *ops = btrfs_compress_op[type]; if (level == 0) level = ops->default_level; else level = min(level, ops->max_level); return level; } /* * Given an address space and start and length, compress the bytes into @pages * that are allocated on demand. * * @type_level is encoded algorithm and level, where level 0 means whatever * default the algorithm chooses and is opaque here; * - compression algo are 0-3 * - the level are bits 4-7 * * @out_pages is an in/out parameter, holds maximum number of pages to allocate * and returns number of actually allocated pages * * @total_in is used to return the number of bytes actually read. It * may be smaller than the input length if we had to exit early because we * ran out of room in the pages array or because we cross the * max_out threshold. * * @total_out is an in/out parameter, must be set to the input length and will * be also used to return the total number of compressed bytes */ int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping, u64 start, struct page **pages, unsigned long *out_pages, unsigned long *total_in, unsigned long *total_out) { int type = btrfs_compress_type(type_level); int level = btrfs_compress_level(type_level); struct list_head *workspace; int ret; level = btrfs_compress_set_level(type, level); workspace = get_workspace(type, level); ret = compression_compress_pages(type, workspace, mapping, start, pages, out_pages, total_in, total_out); put_workspace(type, workspace); return ret; } static int btrfs_decompress_bio(struct compressed_bio *cb) { struct list_head *workspace; int ret; int type = cb->compress_type; workspace = get_workspace(type, 0); ret = compression_decompress_bio(workspace, cb); put_workspace(type, workspace); if (!ret) zero_fill_bio(&cb->orig_bbio->bio); return ret; } /* * a less complex decompression routine. Our compressed data fits in a * single page, and we want to read a single page out of it. * start_byte tells us the offset into the compressed data we're interested in */ int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page, unsigned long start_byte, size_t srclen, size_t destlen) { struct list_head *workspace; int ret; workspace = get_workspace(type, 0); ret = compression_decompress(type, workspace, data_in, dest_page, start_byte, srclen, destlen); put_workspace(type, workspace); return ret; } int __init btrfs_init_compress(void) { if (bioset_init(&btrfs_compressed_bioset, BIO_POOL_SIZE, offsetof(struct compressed_bio, bbio.bio), BIOSET_NEED_BVECS)) return -ENOMEM; btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE); btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB); btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO); zstd_init_workspace_manager(); return 0; } void __cold btrfs_exit_compress(void) { btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE); btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB); btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO); zstd_cleanup_workspace_manager(); bioset_exit(&btrfs_compressed_bioset); } /* * Copy decompressed data from working buffer to pages. * * @buf: The decompressed data buffer * @buf_len: The decompressed data length * @decompressed: Number of bytes that are already decompressed inside the * compressed extent * @cb: The compressed extent descriptor * @orig_bio: The original bio that the caller wants to read for * * An easier to understand graph is like below: * * |<- orig_bio ->| |<- orig_bio->| * |<------- full decompressed extent ----->| * |<----------- @cb range ---->| * | |<-- @buf_len -->| * |<--- @decompressed --->| * * Note that, @cb can be a subpage of the full decompressed extent, but * @cb->start always has the same as the orig_file_offset value of the full * decompressed extent. * * When reading compressed extent, we have to read the full compressed extent, * while @orig_bio may only want part of the range. * Thus this function will ensure only data covered by @orig_bio will be copied * to. * * Return 0 if we have copied all needed contents for @orig_bio. * Return >0 if we need continue decompress. */ int btrfs_decompress_buf2page(const char *buf, u32 buf_len, struct compressed_bio *cb, u32 decompressed) { struct bio *orig_bio = &cb->orig_bbio->bio; /* Offset inside the full decompressed extent */ u32 cur_offset; cur_offset = decompressed; /* The main loop to do the copy */ while (cur_offset < decompressed + buf_len) { struct bio_vec bvec; size_t copy_len; u32 copy_start; /* Offset inside the full decompressed extent */ u32 bvec_offset; bvec = bio_iter_iovec(orig_bio, orig_bio->bi_iter); /* * cb->start may underflow, but subtracting that value can still * give us correct offset inside the full decompressed extent. */ bvec_offset = page_offset(bvec.bv_page) + bvec.bv_offset - cb->start; /* Haven't reached the bvec range, exit */ if (decompressed + buf_len <= bvec_offset) return 1; copy_start = max(cur_offset, bvec_offset); copy_len = min(bvec_offset + bvec.bv_len, decompressed + buf_len) - copy_start; ASSERT(copy_len); /* * Extra range check to ensure we didn't go beyond * @buf + @buf_len. */ ASSERT(copy_start - decompressed < buf_len); memcpy_to_page(bvec.bv_page, bvec.bv_offset, buf + copy_start - decompressed, copy_len); cur_offset += copy_len; bio_advance(orig_bio, copy_len); /* Finished the bio */ if (!orig_bio->bi_iter.bi_size) return 0; } return 1; } /* * Shannon Entropy calculation * * Pure byte distribution analysis fails to determine compressibility of data. * Try calculating entropy to estimate the average minimum number of bits * needed to encode the sampled data. * * For convenience, return the percentage of needed bits, instead of amount of * bits directly. * * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy * and can be compressible with high probability * * @ENTROPY_LVL_HIGH - data are not compressible with high probability * * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate. */ #define ENTROPY_LVL_ACEPTABLE (65) #define ENTROPY_LVL_HIGH (80) /* * For increasead precision in shannon_entropy calculation, * let's do pow(n, M) to save more digits after comma: * * - maximum int bit length is 64 * - ilog2(MAX_SAMPLE_SIZE) -> 13 * - 13 * 4 = 52 < 64 -> M = 4 * * So use pow(n, 4). */ static inline u32 ilog2_w(u64 n) { return ilog2(n * n * n * n); } static u32 shannon_entropy(struct heuristic_ws *ws) { const u32 entropy_max = 8 * ilog2_w(2); u32 entropy_sum = 0; u32 p, p_base, sz_base; u32 i; sz_base = ilog2_w(ws->sample_size); for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) { p = ws->bucket[i].count; p_base = ilog2_w(p); entropy_sum += p * (sz_base - p_base); } entropy_sum /= ws->sample_size; return entropy_sum * 100 / entropy_max; } #define RADIX_BASE 4U #define COUNTERS_SIZE (1U << RADIX_BASE) static u8 get4bits(u64 num, int shift) { u8 low4bits; num >>= shift; /* Reverse order */ low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE); return low4bits; } /* * Use 4 bits as radix base * Use 16 u32 counters for calculating new position in buf array * * @array - array that will be sorted * @array_buf - buffer array to store sorting results * must be equal in size to @array * @num - array size */ static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf, int num) { u64 max_num; u64 buf_num; u32 counters[COUNTERS_SIZE]; u32 new_addr; u32 addr; int bitlen; int shift; int i; /* * Try avoid useless loop iterations for small numbers stored in big * counters. Example: 48 33 4 ... in 64bit array */ max_num = array[0].count; for (i = 1; i < num; i++) { buf_num = array[i].count; if (buf_num > max_num) max_num = buf_num; } buf_num = ilog2(max_num); bitlen = ALIGN(buf_num, RADIX_BASE * 2); shift = 0; while (shift < bitlen) { memset(counters, 0, sizeof(counters)); for (i = 0; i < num; i++) { buf_num = array[i].count; addr = get4bits(buf_num, shift); counters[addr]++; } for (i = 1; i < COUNTERS_SIZE; i++) counters[i] += counters[i - 1]; for (i = num - 1; i >= 0; i--) { buf_num = array[i].count; addr = get4bits(buf_num, shift); counters[addr]--; new_addr = counters[addr]; array_buf[new_addr] = array[i]; } shift += RADIX_BASE; /* * Normal radix expects to move data from a temporary array, to * the main one. But that requires some CPU time. Avoid that * by doing another sort iteration to original array instead of * memcpy() */ memset(counters, 0, sizeof(counters)); for (i = 0; i < num; i ++) { buf_num = array_buf[i].count; addr = get4bits(buf_num, shift); counters[addr]++; } for (i = 1; i < COUNTERS_SIZE; i++) counters[i] += counters[i - 1]; for (i = num - 1; i >= 0; i--) { buf_num = array_buf[i].count; addr = get4bits(buf_num, shift); counters[addr]--; new_addr = counters[addr]; array[new_addr] = array_buf[i]; } shift += RADIX_BASE; } } /* * Size of the core byte set - how many bytes cover 90% of the sample * * There are several types of structured binary data that use nearly all byte * values. The distribution can be uniform and counts in all buckets will be * nearly the same (eg. encrypted data). Unlikely to be compressible. * * Other possibility is normal (Gaussian) distribution, where the data could * be potentially compressible, but we have to take a few more steps to decide * how much. * * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently, * compression algo can easy fix that * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high * probability is not compressible */ #define BYTE_CORE_SET_LOW (64) #define BYTE_CORE_SET_HIGH (200) static int byte_core_set_size(struct heuristic_ws *ws) { u32 i; u32 coreset_sum = 0; const u32 core_set_threshold = ws->sample_size * 90 / 100; struct bucket_item *bucket = ws->bucket; /* Sort in reverse order */ radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE); for (i = 0; i < BYTE_CORE_SET_LOW; i++) coreset_sum += bucket[i].count; if (coreset_sum > core_set_threshold) return i; for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) { coreset_sum += bucket[i].count; if (coreset_sum > core_set_threshold) break; } return i; } /* * Count byte values in buckets. * This heuristic can detect textual data (configs, xml, json, html, etc). * Because in most text-like data byte set is restricted to limited number of * possible characters, and that restriction in most cases makes data easy to * compress. * * @BYTE_SET_THRESHOLD - consider all data within this byte set size: * less - compressible * more - need additional analysis */ #define BYTE_SET_THRESHOLD (64) static u32 byte_set_size(const struct heuristic_ws *ws) { u32 i; u32 byte_set_size = 0; for (i = 0; i < BYTE_SET_THRESHOLD; i++) { if (ws->bucket[i].count > 0) byte_set_size++; } /* * Continue collecting count of byte values in buckets. If the byte * set size is bigger then the threshold, it's pointless to continue, * the detection technique would fail for this type of data. */ for (; i < BUCKET_SIZE; i++) { if (ws->bucket[i].count > 0) { byte_set_size++; if (byte_set_size > BYTE_SET_THRESHOLD) return byte_set_size; } } return byte_set_size; } static bool sample_repeated_patterns(struct heuristic_ws *ws) { const u32 half_of_sample = ws->sample_size / 2; const u8 *data = ws->sample; return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0; } static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end, struct heuristic_ws *ws) { struct page *page; u64 index, index_end; u32 i, curr_sample_pos; u8 *in_data; /* * Compression handles the input data by chunks of 128KiB * (defined by BTRFS_MAX_UNCOMPRESSED) * * We do the same for the heuristic and loop over the whole range. * * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will * process no more than BTRFS_MAX_UNCOMPRESSED at a time. */ if (end - start > BTRFS_MAX_UNCOMPRESSED) end = start + BTRFS_MAX_UNCOMPRESSED; index = start >> PAGE_SHIFT; index_end = end >> PAGE_SHIFT; /* Don't miss unaligned end */ if (!PAGE_ALIGNED(end)) index_end++; curr_sample_pos = 0; while (index < index_end) { page = find_get_page(inode->i_mapping, index); in_data = kmap_local_page(page); /* Handle case where the start is not aligned to PAGE_SIZE */ i = start % PAGE_SIZE; while (i < PAGE_SIZE - SAMPLING_READ_SIZE) { /* Don't sample any garbage from the last page */ if (start > end - SAMPLING_READ_SIZE) break; memcpy(&ws->sample[curr_sample_pos], &in_data[i], SAMPLING_READ_SIZE); i += SAMPLING_INTERVAL; start += SAMPLING_INTERVAL; curr_sample_pos += SAMPLING_READ_SIZE; } kunmap_local(in_data); put_page(page); index++; } ws->sample_size = curr_sample_pos; } /* * Compression heuristic. * * For now is's a naive and optimistic 'return true', we'll extend the logic to * quickly (compared to direct compression) detect data characteristics * (compressible/incompressible) to avoid wasting CPU time on incompressible * data. * * The following types of analysis can be performed: * - detect mostly zero data * - detect data with low "byte set" size (text, etc) * - detect data with low/high "core byte" set * * Return non-zero if the compression should be done, 0 otherwise. */ int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end) { struct list_head *ws_list = get_workspace(0, 0); struct heuristic_ws *ws; u32 i; u8 byte; int ret = 0; ws = list_entry(ws_list, struct heuristic_ws, list); heuristic_collect_sample(inode, start, end, ws); if (sample_repeated_patterns(ws)) { ret = 1; goto out; } memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE); for (i = 0; i < ws->sample_size; i++) { byte = ws->sample[i]; ws->bucket[byte].count++; } i = byte_set_size(ws); if (i < BYTE_SET_THRESHOLD) { ret = 2; goto out; } i = byte_core_set_size(ws); if (i <= BYTE_CORE_SET_LOW) { ret = 3; goto out; } if (i >= BYTE_CORE_SET_HIGH) { ret = 0; goto out; } i = shannon_entropy(ws); if (i <= ENTROPY_LVL_ACEPTABLE) { ret = 4; goto out; } /* * For the levels below ENTROPY_LVL_HIGH, additional analysis would be * needed to give green light to compression. * * For now just assume that compression at that level is not worth the * resources because: * * 1. it is possible to defrag the data later * * 2. the data would turn out to be hardly compressible, eg. 150 byte * values, every bucket has counter at level ~54. The heuristic would * be confused. This can happen when data have some internal repeated * patterns like "abbacbbc...". This can be detected by analyzing * pairs of bytes, which is too costly. */ if (i < ENTROPY_LVL_HIGH) { ret = 5; goto out; } else { ret = 0; goto out; } out: put_workspace(0, ws_list); return ret; } /* * Convert the compression suffix (eg. after "zlib" starting with ":") to * level, unrecognized string will set the default level */ unsigned int btrfs_compress_str2level(unsigned int type, const char *str) { unsigned int level = 0; int ret; if (!type) return 0; if (str[0] == ':') { ret = kstrtouint(str + 1, 10, &level); if (ret) level = 0; } level = btrfs_compress_set_level(type, level); return level; }
linux-master
fs/btrfs/compression.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2008 Oracle. All rights reserved. */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/list_sort.h> #include <linux/iversion.h> #include "misc.h" #include "ctree.h" #include "tree-log.h" #include "disk-io.h" #include "locking.h" #include "print-tree.h" #include "backref.h" #include "compression.h" #include "qgroup.h" #include "block-group.h" #include "space-info.h" #include "zoned.h" #include "inode-item.h" #include "fs.h" #include "accessors.h" #include "extent-tree.h" #include "root-tree.h" #include "dir-item.h" #include "file-item.h" #include "file.h" #include "orphan.h" #include "tree-checker.h" #define MAX_CONFLICT_INODES 10 /* magic values for the inode_only field in btrfs_log_inode: * * LOG_INODE_ALL means to log everything * LOG_INODE_EXISTS means to log just enough to recreate the inode * during log replay */ enum { LOG_INODE_ALL, LOG_INODE_EXISTS, }; /* * directory trouble cases * * 1) on rename or unlink, if the inode being unlinked isn't in the fsync * log, we must force a full commit before doing an fsync of the directory * where the unlink was done. * ---> record transid of last unlink/rename per directory * * mkdir foo/some_dir * normal commit * rename foo/some_dir foo2/some_dir * mkdir foo/some_dir * fsync foo/some_dir/some_file * * The fsync above will unlink the original some_dir without recording * it in its new location (foo2). After a crash, some_dir will be gone * unless the fsync of some_file forces a full commit * * 2) we must log any new names for any file or dir that is in the fsync * log. ---> check inode while renaming/linking. * * 2a) we must log any new names for any file or dir during rename * when the directory they are being removed from was logged. * ---> check inode and old parent dir during rename * * 2a is actually the more important variant. With the extra logging * a crash might unlink the old name without recreating the new one * * 3) after a crash, we must go through any directories with a link count * of zero and redo the rm -rf * * mkdir f1/foo * normal commit * rm -rf f1/foo * fsync(f1) * * The directory f1 was fully removed from the FS, but fsync was never * called on f1, only its parent dir. After a crash the rm -rf must * be replayed. This must be able to recurse down the entire * directory tree. The inode link count fixup code takes care of the * ugly details. */ /* * stages for the tree walking. The first * stage (0) is to only pin down the blocks we find * the second stage (1) is to make sure that all the inodes * we find in the log are created in the subvolume. * * The last stage is to deal with directories and links and extents * and all the other fun semantics */ enum { LOG_WALK_PIN_ONLY, LOG_WALK_REPLAY_INODES, LOG_WALK_REPLAY_DIR_INDEX, LOG_WALK_REPLAY_ALL, }; static int btrfs_log_inode(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, int inode_only, struct btrfs_log_ctx *ctx); static int link_to_fixup_dir(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 objectid); static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_root *log, struct btrfs_path *path, u64 dirid, int del_all); static void wait_log_commit(struct btrfs_root *root, int transid); /* * tree logging is a special write ahead log used to make sure that * fsyncs and O_SYNCs can happen without doing full tree commits. * * Full tree commits are expensive because they require commonly * modified blocks to be recowed, creating many dirty pages in the * extent tree an 4x-6x higher write load than ext3. * * Instead of doing a tree commit on every fsync, we use the * key ranges and transaction ids to find items for a given file or directory * that have changed in this transaction. Those items are copied into * a special tree (one per subvolume root), that tree is written to disk * and then the fsync is considered complete. * * After a crash, items are copied out of the log-tree back into the * subvolume tree. Any file data extents found are recorded in the extent * allocation tree, and the log-tree freed. * * The log tree is read three times, once to pin down all the extents it is * using in ram and once, once to create all the inodes logged in the tree * and once to do all the other items. */ /* * start a sub transaction and setup the log tree * this increments the log tree writer count to make the people * syncing the tree wait for us to finish */ static int start_log_trans(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_log_ctx *ctx) { struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_root *tree_root = fs_info->tree_root; const bool zoned = btrfs_is_zoned(fs_info); int ret = 0; bool created = false; /* * First check if the log root tree was already created. If not, create * it before locking the root's log_mutex, just to keep lockdep happy. */ if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &tree_root->state)) { mutex_lock(&tree_root->log_mutex); if (!fs_info->log_root_tree) { ret = btrfs_init_log_root_tree(trans, fs_info); if (!ret) { set_bit(BTRFS_ROOT_HAS_LOG_TREE, &tree_root->state); created = true; } } mutex_unlock(&tree_root->log_mutex); if (ret) return ret; } mutex_lock(&root->log_mutex); again: if (root->log_root) { int index = (root->log_transid + 1) % 2; if (btrfs_need_log_full_commit(trans)) { ret = BTRFS_LOG_FORCE_COMMIT; goto out; } if (zoned && atomic_read(&root->log_commit[index])) { wait_log_commit(root, root->log_transid - 1); goto again; } if (!root->log_start_pid) { clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); root->log_start_pid = current->pid; } else if (root->log_start_pid != current->pid) { set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); } } else { /* * This means fs_info->log_root_tree was already created * for some other FS trees. Do the full commit not to mix * nodes from multiple log transactions to do sequential * writing. */ if (zoned && !created) { ret = BTRFS_LOG_FORCE_COMMIT; goto out; } ret = btrfs_add_log_tree(trans, root); if (ret) goto out; set_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state); clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); root->log_start_pid = current->pid; } atomic_inc(&root->log_writers); if (!ctx->logging_new_name) { int index = root->log_transid % 2; list_add_tail(&ctx->list, &root->log_ctxs[index]); ctx->log_transid = root->log_transid; } out: mutex_unlock(&root->log_mutex); return ret; } /* * returns 0 if there was a log transaction running and we were able * to join, or returns -ENOENT if there were not transactions * in progress */ static int join_running_log_trans(struct btrfs_root *root) { const bool zoned = btrfs_is_zoned(root->fs_info); int ret = -ENOENT; if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state)) return ret; mutex_lock(&root->log_mutex); again: if (root->log_root) { int index = (root->log_transid + 1) % 2; ret = 0; if (zoned && atomic_read(&root->log_commit[index])) { wait_log_commit(root, root->log_transid - 1); goto again; } atomic_inc(&root->log_writers); } mutex_unlock(&root->log_mutex); return ret; } /* * This either makes the current running log transaction wait * until you call btrfs_end_log_trans() or it makes any future * log transactions wait until you call btrfs_end_log_trans() */ void btrfs_pin_log_trans(struct btrfs_root *root) { atomic_inc(&root->log_writers); } /* * indicate we're done making changes to the log tree * and wake up anyone waiting to do a sync */ void btrfs_end_log_trans(struct btrfs_root *root) { if (atomic_dec_and_test(&root->log_writers)) { /* atomic_dec_and_test implies a barrier */ cond_wake_up_nomb(&root->log_writer_wait); } } /* * the walk control struct is used to pass state down the chain when * processing the log tree. The stage field tells us which part * of the log tree processing we are currently doing. The others * are state fields used for that specific part */ struct walk_control { /* should we free the extent on disk when done? This is used * at transaction commit time while freeing a log tree */ int free; /* pin only walk, we record which extents on disk belong to the * log trees */ int pin; /* what stage of the replay code we're currently in */ int stage; /* * Ignore any items from the inode currently being processed. Needs * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in * the LOG_WALK_REPLAY_INODES stage. */ bool ignore_cur_inode; /* the root we are currently replaying */ struct btrfs_root *replay_dest; /* the trans handle for the current replay */ struct btrfs_trans_handle *trans; /* the function that gets used to process blocks we find in the * tree. Note the extent_buffer might not be up to date when it is * passed in, and it must be checked or read if you need the data * inside it */ int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb, struct walk_control *wc, u64 gen, int level); }; /* * process_func used to pin down extents, write them or wait on them */ static int process_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, struct walk_control *wc, u64 gen, int level) { struct btrfs_fs_info *fs_info = log->fs_info; int ret = 0; /* * If this fs is mixed then we need to be able to process the leaves to * pin down any logged extents, so we have to read the block. */ if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) { struct btrfs_tree_parent_check check = { .level = level, .transid = gen }; ret = btrfs_read_extent_buffer(eb, &check); if (ret) return ret; } if (wc->pin) { ret = btrfs_pin_extent_for_log_replay(wc->trans, eb->start, eb->len); if (ret) return ret; if (btrfs_buffer_uptodate(eb, gen, 0) && btrfs_header_level(eb) == 0) ret = btrfs_exclude_logged_extents(eb); } return ret; } /* * Item overwrite used by replay and tree logging. eb, slot and key all refer * to the src data we are copying out. * * root is the tree we are copying into, and path is a scratch * path for use in this function (it should be released on entry and * will be released on exit). * * If the key is already in the destination tree the existing item is * overwritten. If the existing item isn't big enough, it is extended. * If it is too large, it is truncated. * * If the key isn't in the destination yet, a new item is inserted. */ static int overwrite_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *eb, int slot, struct btrfs_key *key) { int ret; u32 item_size; u64 saved_i_size = 0; int save_old_i_size = 0; unsigned long src_ptr; unsigned long dst_ptr; bool inode_item = key->type == BTRFS_INODE_ITEM_KEY; /* * This is only used during log replay, so the root is always from a * fs/subvolume tree. In case we ever need to support a log root, then * we'll have to clone the leaf in the path, release the path and use * the leaf before writing into the log tree. See the comments at * copy_items() for more details. */ ASSERT(root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID); item_size = btrfs_item_size(eb, slot); src_ptr = btrfs_item_ptr_offset(eb, slot); /* Look for the key in the destination tree. */ ret = btrfs_search_slot(NULL, root, key, path, 0, 0); if (ret < 0) return ret; if (ret == 0) { char *src_copy; char *dst_copy; u32 dst_size = btrfs_item_size(path->nodes[0], path->slots[0]); if (dst_size != item_size) goto insert; if (item_size == 0) { btrfs_release_path(path); return 0; } dst_copy = kmalloc(item_size, GFP_NOFS); src_copy = kmalloc(item_size, GFP_NOFS); if (!dst_copy || !src_copy) { btrfs_release_path(path); kfree(dst_copy); kfree(src_copy); return -ENOMEM; } read_extent_buffer(eb, src_copy, src_ptr, item_size); dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); read_extent_buffer(path->nodes[0], dst_copy, dst_ptr, item_size); ret = memcmp(dst_copy, src_copy, item_size); kfree(dst_copy); kfree(src_copy); /* * they have the same contents, just return, this saves * us from cowing blocks in the destination tree and doing * extra writes that may not have been done by a previous * sync */ if (ret == 0) { btrfs_release_path(path); return 0; } /* * We need to load the old nbytes into the inode so when we * replay the extents we've logged we get the right nbytes. */ if (inode_item) { struct btrfs_inode_item *item; u64 nbytes; u32 mode; item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_inode_item); nbytes = btrfs_inode_nbytes(path->nodes[0], item); item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); btrfs_set_inode_nbytes(eb, item, nbytes); /* * If this is a directory we need to reset the i_size to * 0 so that we can set it up properly when replaying * the rest of the items in this log. */ mode = btrfs_inode_mode(eb, item); if (S_ISDIR(mode)) btrfs_set_inode_size(eb, item, 0); } } else if (inode_item) { struct btrfs_inode_item *item; u32 mode; /* * New inode, set nbytes to 0 so that the nbytes comes out * properly when we replay the extents. */ item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); btrfs_set_inode_nbytes(eb, item, 0); /* * If this is a directory we need to reset the i_size to 0 so * that we can set it up properly when replaying the rest of * the items in this log. */ mode = btrfs_inode_mode(eb, item); if (S_ISDIR(mode)) btrfs_set_inode_size(eb, item, 0); } insert: btrfs_release_path(path); /* try to insert the key into the destination tree */ path->skip_release_on_error = 1; ret = btrfs_insert_empty_item(trans, root, path, key, item_size); path->skip_release_on_error = 0; /* make sure any existing item is the correct size */ if (ret == -EEXIST || ret == -EOVERFLOW) { u32 found_size; found_size = btrfs_item_size(path->nodes[0], path->slots[0]); if (found_size > item_size) btrfs_truncate_item(path, item_size, 1); else if (found_size < item_size) btrfs_extend_item(path, item_size - found_size); } else if (ret) { return ret; } dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); /* don't overwrite an existing inode if the generation number * was logged as zero. This is done when the tree logging code * is just logging an inode to make sure it exists after recovery. * * Also, don't overwrite i_size on directories during replay. * log replay inserts and removes directory items based on the * state of the tree found in the subvolume, and i_size is modified * as it goes */ if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) { struct btrfs_inode_item *src_item; struct btrfs_inode_item *dst_item; src_item = (struct btrfs_inode_item *)src_ptr; dst_item = (struct btrfs_inode_item *)dst_ptr; if (btrfs_inode_generation(eb, src_item) == 0) { struct extent_buffer *dst_eb = path->nodes[0]; const u64 ino_size = btrfs_inode_size(eb, src_item); /* * For regular files an ino_size == 0 is used only when * logging that an inode exists, as part of a directory * fsync, and the inode wasn't fsynced before. In this * case don't set the size of the inode in the fs/subvol * tree, otherwise we would be throwing valid data away. */ if (S_ISREG(btrfs_inode_mode(eb, src_item)) && S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) && ino_size != 0) btrfs_set_inode_size(dst_eb, dst_item, ino_size); goto no_copy; } if (S_ISDIR(btrfs_inode_mode(eb, src_item)) && S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) { save_old_i_size = 1; saved_i_size = btrfs_inode_size(path->nodes[0], dst_item); } } copy_extent_buffer(path->nodes[0], eb, dst_ptr, src_ptr, item_size); if (save_old_i_size) { struct btrfs_inode_item *dst_item; dst_item = (struct btrfs_inode_item *)dst_ptr; btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size); } /* make sure the generation is filled in */ if (key->type == BTRFS_INODE_ITEM_KEY) { struct btrfs_inode_item *dst_item; dst_item = (struct btrfs_inode_item *)dst_ptr; if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) { btrfs_set_inode_generation(path->nodes[0], dst_item, trans->transid); } } no_copy: btrfs_mark_buffer_dirty(path->nodes[0]); btrfs_release_path(path); return 0; } static int read_alloc_one_name(struct extent_buffer *eb, void *start, int len, struct fscrypt_str *name) { char *buf; buf = kmalloc(len, GFP_NOFS); if (!buf) return -ENOMEM; read_extent_buffer(eb, buf, (unsigned long)start, len); name->name = buf; name->len = len; return 0; } /* * simple helper to read an inode off the disk from a given root * This can only be called for subvolume roots and not for the log */ static noinline struct inode *read_one_inode(struct btrfs_root *root, u64 objectid) { struct inode *inode; inode = btrfs_iget(root->fs_info->sb, objectid, root); if (IS_ERR(inode)) inode = NULL; return inode; } /* replays a single extent in 'eb' at 'slot' with 'key' into the * subvolume 'root'. path is released on entry and should be released * on exit. * * extents in the log tree have not been allocated out of the extent * tree yet. So, this completes the allocation, taking a reference * as required if the extent already exists or creating a new extent * if it isn't in the extent allocation tree yet. * * The extent is inserted into the file, dropping any existing extents * from the file that overlap the new one. */ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *eb, int slot, struct btrfs_key *key) { struct btrfs_drop_extents_args drop_args = { 0 }; struct btrfs_fs_info *fs_info = root->fs_info; int found_type; u64 extent_end; u64 start = key->offset; u64 nbytes = 0; struct btrfs_file_extent_item *item; struct inode *inode = NULL; unsigned long size; int ret = 0; item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); found_type = btrfs_file_extent_type(eb, item); if (found_type == BTRFS_FILE_EXTENT_REG || found_type == BTRFS_FILE_EXTENT_PREALLOC) { nbytes = btrfs_file_extent_num_bytes(eb, item); extent_end = start + nbytes; /* * We don't add to the inodes nbytes if we are prealloc or a * hole. */ if (btrfs_file_extent_disk_bytenr(eb, item) == 0) nbytes = 0; } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { size = btrfs_file_extent_ram_bytes(eb, item); nbytes = btrfs_file_extent_ram_bytes(eb, item); extent_end = ALIGN(start + size, fs_info->sectorsize); } else { ret = 0; goto out; } inode = read_one_inode(root, key->objectid); if (!inode) { ret = -EIO; goto out; } /* * first check to see if we already have this extent in the * file. This must be done before the btrfs_drop_extents run * so we don't try to drop this extent. */ ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(BTRFS_I(inode)), start, 0); if (ret == 0 && (found_type == BTRFS_FILE_EXTENT_REG || found_type == BTRFS_FILE_EXTENT_PREALLOC)) { struct btrfs_file_extent_item cmp1; struct btrfs_file_extent_item cmp2; struct btrfs_file_extent_item *existing; struct extent_buffer *leaf; leaf = path->nodes[0]; existing = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); read_extent_buffer(eb, &cmp1, (unsigned long)item, sizeof(cmp1)); read_extent_buffer(leaf, &cmp2, (unsigned long)existing, sizeof(cmp2)); /* * we already have a pointer to this exact extent, * we don't have to do anything */ if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) { btrfs_release_path(path); goto out; } } btrfs_release_path(path); /* drop any overlapping extents */ drop_args.start = start; drop_args.end = extent_end; drop_args.drop_cache = true; ret = btrfs_drop_extents(trans, root, BTRFS_I(inode), &drop_args); if (ret) goto out; if (found_type == BTRFS_FILE_EXTENT_REG || found_type == BTRFS_FILE_EXTENT_PREALLOC) { u64 offset; unsigned long dest_offset; struct btrfs_key ins; if (btrfs_file_extent_disk_bytenr(eb, item) == 0 && btrfs_fs_incompat(fs_info, NO_HOLES)) goto update_inode; ret = btrfs_insert_empty_item(trans, root, path, key, sizeof(*item)); if (ret) goto out; dest_offset = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); copy_extent_buffer(path->nodes[0], eb, dest_offset, (unsigned long)item, sizeof(*item)); ins.objectid = btrfs_file_extent_disk_bytenr(eb, item); ins.offset = btrfs_file_extent_disk_num_bytes(eb, item); ins.type = BTRFS_EXTENT_ITEM_KEY; offset = key->offset - btrfs_file_extent_offset(eb, item); /* * Manually record dirty extent, as here we did a shallow * file extent item copy and skip normal backref update, * but modifying extent tree all by ourselves. * So need to manually record dirty extent for qgroup, * as the owner of the file extent changed from log tree * (doesn't affect qgroup) to fs/file tree(affects qgroup) */ ret = btrfs_qgroup_trace_extent(trans, btrfs_file_extent_disk_bytenr(eb, item), btrfs_file_extent_disk_num_bytes(eb, item)); if (ret < 0) goto out; if (ins.objectid > 0) { struct btrfs_ref ref = { 0 }; u64 csum_start; u64 csum_end; LIST_HEAD(ordered_sums); /* * is this extent already allocated in the extent * allocation tree? If so, just add a reference */ ret = btrfs_lookup_data_extent(fs_info, ins.objectid, ins.offset); if (ret < 0) { goto out; } else if (ret == 0) { btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, ins.objectid, ins.offset, 0); btrfs_init_data_ref(&ref, root->root_key.objectid, key->objectid, offset, 0, false); ret = btrfs_inc_extent_ref(trans, &ref); if (ret) goto out; } else { /* * insert the extent pointer in the extent * allocation tree */ ret = btrfs_alloc_logged_file_extent(trans, root->root_key.objectid, key->objectid, offset, &ins); if (ret) goto out; } btrfs_release_path(path); if (btrfs_file_extent_compression(eb, item)) { csum_start = ins.objectid; csum_end = csum_start + ins.offset; } else { csum_start = ins.objectid + btrfs_file_extent_offset(eb, item); csum_end = csum_start + btrfs_file_extent_num_bytes(eb, item); } ret = btrfs_lookup_csums_list(root->log_root, csum_start, csum_end - 1, &ordered_sums, 0, false); if (ret) goto out; /* * Now delete all existing cums in the csum root that * cover our range. We do this because we can have an * extent that is completely referenced by one file * extent item and partially referenced by another * file extent item (like after using the clone or * extent_same ioctls). In this case if we end up doing * the replay of the one that partially references the * extent first, and we do not do the csum deletion * below, we can get 2 csum items in the csum tree that * overlap each other. For example, imagine our log has * the two following file extent items: * * key (257 EXTENT_DATA 409600) * extent data disk byte 12845056 nr 102400 * extent data offset 20480 nr 20480 ram 102400 * * key (257 EXTENT_DATA 819200) * extent data disk byte 12845056 nr 102400 * extent data offset 0 nr 102400 ram 102400 * * Where the second one fully references the 100K extent * that starts at disk byte 12845056, and the log tree * has a single csum item that covers the entire range * of the extent: * * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100 * * After the first file extent item is replayed, the * csum tree gets the following csum item: * * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20 * * Which covers the 20K sub-range starting at offset 20K * of our extent. Now when we replay the second file * extent item, if we do not delete existing csum items * that cover any of its blocks, we end up getting two * csum items in our csum tree that overlap each other: * * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20 * * Which is a problem, because after this anyone trying * to lookup up for the checksum of any block of our * extent starting at an offset of 40K or higher, will * end up looking at the second csum item only, which * does not contain the checksum for any block starting * at offset 40K or higher of our extent. */ while (!list_empty(&ordered_sums)) { struct btrfs_ordered_sum *sums; struct btrfs_root *csum_root; sums = list_entry(ordered_sums.next, struct btrfs_ordered_sum, list); csum_root = btrfs_csum_root(fs_info, sums->logical); if (!ret) ret = btrfs_del_csums(trans, csum_root, sums->logical, sums->len); if (!ret) ret = btrfs_csum_file_blocks(trans, csum_root, sums); list_del(&sums->list); kfree(sums); } if (ret) goto out; } else { btrfs_release_path(path); } } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { /* inline extents are easy, we just overwrite them */ ret = overwrite_item(trans, root, path, eb, slot, key); if (ret) goto out; } ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), start, extent_end - start); if (ret) goto out; update_inode: btrfs_update_inode_bytes(BTRFS_I(inode), nbytes, drop_args.bytes_found); ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); out: iput(inode); return ret; } static int unlink_inode_for_log_replay(struct btrfs_trans_handle *trans, struct btrfs_inode *dir, struct btrfs_inode *inode, const struct fscrypt_str *name) { int ret; ret = btrfs_unlink_inode(trans, dir, inode, name); if (ret) return ret; /* * Whenever we need to check if a name exists or not, we check the * fs/subvolume tree. So after an unlink we must run delayed items, so * that future checks for a name during log replay see that the name * does not exists anymore. */ return btrfs_run_delayed_items(trans); } /* * when cleaning up conflicts between the directory names in the * subvolume, directory names in the log and directory names in the * inode back references, we may have to unlink inodes from directories. * * This is a helper function to do the unlink of a specific directory * item */ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, struct btrfs_path *path, struct btrfs_inode *dir, struct btrfs_dir_item *di) { struct btrfs_root *root = dir->root; struct inode *inode; struct fscrypt_str name; struct extent_buffer *leaf; struct btrfs_key location; int ret; leaf = path->nodes[0]; btrfs_dir_item_key_to_cpu(leaf, di, &location); ret = read_alloc_one_name(leaf, di + 1, btrfs_dir_name_len(leaf, di), &name); if (ret) return -ENOMEM; btrfs_release_path(path); inode = read_one_inode(root, location.objectid); if (!inode) { ret = -EIO; goto out; } ret = link_to_fixup_dir(trans, root, path, location.objectid); if (ret) goto out; ret = unlink_inode_for_log_replay(trans, dir, BTRFS_I(inode), &name); out: kfree(name.name); iput(inode); return ret; } /* * See if a given name and sequence number found in an inode back reference are * already in a directory and correctly point to this inode. * * Returns: < 0 on error, 0 if the directory entry does not exists and 1 if it * exists. */ static noinline int inode_in_dir(struct btrfs_root *root, struct btrfs_path *path, u64 dirid, u64 objectid, u64 index, struct fscrypt_str *name) { struct btrfs_dir_item *di; struct btrfs_key location; int ret = 0; di = btrfs_lookup_dir_index_item(NULL, root, path, dirid, index, name, 0); if (IS_ERR(di)) { ret = PTR_ERR(di); goto out; } else if (di) { btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); if (location.objectid != objectid) goto out; } else { goto out; } btrfs_release_path(path); di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, 0); if (IS_ERR(di)) { ret = PTR_ERR(di); goto out; } else if (di) { btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); if (location.objectid == objectid) ret = 1; } out: btrfs_release_path(path); return ret; } /* * helper function to check a log tree for a named back reference in * an inode. This is used to decide if a back reference that is * found in the subvolume conflicts with what we find in the log. * * inode backreferences may have multiple refs in a single item, * during replay we process one reference at a time, and we don't * want to delete valid links to a file from the subvolume if that * link is also in the log. */ static noinline int backref_in_log(struct btrfs_root *log, struct btrfs_key *key, u64 ref_objectid, const struct fscrypt_str *name) { struct btrfs_path *path; int ret; path = btrfs_alloc_path(); if (!path) return -ENOMEM; ret = btrfs_search_slot(NULL, log, key, path, 0, 0); if (ret < 0) { goto out; } else if (ret == 1) { ret = 0; goto out; } if (key->type == BTRFS_INODE_EXTREF_KEY) ret = !!btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0], ref_objectid, name); else ret = !!btrfs_find_name_in_backref(path->nodes[0], path->slots[0], name); out: btrfs_free_path(path); return ret; } static inline int __add_inode_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_root *log_root, struct btrfs_inode *dir, struct btrfs_inode *inode, u64 inode_objectid, u64 parent_objectid, u64 ref_index, struct fscrypt_str *name) { int ret; struct extent_buffer *leaf; struct btrfs_dir_item *di; struct btrfs_key search_key; struct btrfs_inode_extref *extref; again: /* Search old style refs */ search_key.objectid = inode_objectid; search_key.type = BTRFS_INODE_REF_KEY; search_key.offset = parent_objectid; ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); if (ret == 0) { struct btrfs_inode_ref *victim_ref; unsigned long ptr; unsigned long ptr_end; leaf = path->nodes[0]; /* are we trying to overwrite a back ref for the root directory * if so, just jump out, we're done */ if (search_key.objectid == search_key.offset) return 1; /* check all the names in this back reference to see * if they are in the log. if so, we allow them to stay * otherwise they must be unlinked as a conflict */ ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); ptr_end = ptr + btrfs_item_size(leaf, path->slots[0]); while (ptr < ptr_end) { struct fscrypt_str victim_name; victim_ref = (struct btrfs_inode_ref *)ptr; ret = read_alloc_one_name(leaf, (victim_ref + 1), btrfs_inode_ref_name_len(leaf, victim_ref), &victim_name); if (ret) return ret; ret = backref_in_log(log_root, &search_key, parent_objectid, &victim_name); if (ret < 0) { kfree(victim_name.name); return ret; } else if (!ret) { inc_nlink(&inode->vfs_inode); btrfs_release_path(path); ret = unlink_inode_for_log_replay(trans, dir, inode, &victim_name); kfree(victim_name.name); if (ret) return ret; goto again; } kfree(victim_name.name); ptr = (unsigned long)(victim_ref + 1) + victim_name.len; } } btrfs_release_path(path); /* Same search but for extended refs */ extref = btrfs_lookup_inode_extref(NULL, root, path, name, inode_objectid, parent_objectid, 0, 0); if (IS_ERR(extref)) { return PTR_ERR(extref); } else if (extref) { u32 item_size; u32 cur_offset = 0; unsigned long base; struct inode *victim_parent; leaf = path->nodes[0]; item_size = btrfs_item_size(leaf, path->slots[0]); base = btrfs_item_ptr_offset(leaf, path->slots[0]); while (cur_offset < item_size) { struct fscrypt_str victim_name; extref = (struct btrfs_inode_extref *)(base + cur_offset); if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid) goto next; ret = read_alloc_one_name(leaf, &extref->name, btrfs_inode_extref_name_len(leaf, extref), &victim_name); if (ret) return ret; search_key.objectid = inode_objectid; search_key.type = BTRFS_INODE_EXTREF_KEY; search_key.offset = btrfs_extref_hash(parent_objectid, victim_name.name, victim_name.len); ret = backref_in_log(log_root, &search_key, parent_objectid, &victim_name); if (ret < 0) { kfree(victim_name.name); return ret; } else if (!ret) { ret = -ENOENT; victim_parent = read_one_inode(root, parent_objectid); if (victim_parent) { inc_nlink(&inode->vfs_inode); btrfs_release_path(path); ret = unlink_inode_for_log_replay(trans, BTRFS_I(victim_parent), inode, &victim_name); } iput(victim_parent); kfree(victim_name.name); if (ret) return ret; goto again; } kfree(victim_name.name); next: cur_offset += victim_name.len + sizeof(*extref); } } btrfs_release_path(path); /* look for a conflicting sequence number */ di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir), ref_index, name, 0); if (IS_ERR(di)) { return PTR_ERR(di); } else if (di) { ret = drop_one_dir_item(trans, path, dir, di); if (ret) return ret; } btrfs_release_path(path); /* look for a conflicting name */ di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir), name, 0); if (IS_ERR(di)) { return PTR_ERR(di); } else if (di) { ret = drop_one_dir_item(trans, path, dir, di); if (ret) return ret; } btrfs_release_path(path); return 0; } static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, struct fscrypt_str *name, u64 *index, u64 *parent_objectid) { struct btrfs_inode_extref *extref; int ret; extref = (struct btrfs_inode_extref *)ref_ptr; ret = read_alloc_one_name(eb, &extref->name, btrfs_inode_extref_name_len(eb, extref), name); if (ret) return ret; if (index) *index = btrfs_inode_extref_index(eb, extref); if (parent_objectid) *parent_objectid = btrfs_inode_extref_parent(eb, extref); return 0; } static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, struct fscrypt_str *name, u64 *index) { struct btrfs_inode_ref *ref; int ret; ref = (struct btrfs_inode_ref *)ref_ptr; ret = read_alloc_one_name(eb, ref + 1, btrfs_inode_ref_name_len(eb, ref), name); if (ret) return ret; if (index) *index = btrfs_inode_ref_index(eb, ref); return 0; } /* * Take an inode reference item from the log tree and iterate all names from the * inode reference item in the subvolume tree with the same key (if it exists). * For any name that is not in the inode reference item from the log tree, do a * proper unlink of that name (that is, remove its entry from the inode * reference item and both dir index keys). */ static int unlink_old_inode_refs(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_inode *inode, struct extent_buffer *log_eb, int log_slot, struct btrfs_key *key) { int ret; unsigned long ref_ptr; unsigned long ref_end; struct extent_buffer *eb; again: btrfs_release_path(path); ret = btrfs_search_slot(NULL, root, key, path, 0, 0); if (ret > 0) { ret = 0; goto out; } if (ret < 0) goto out; eb = path->nodes[0]; ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]); ref_end = ref_ptr + btrfs_item_size(eb, path->slots[0]); while (ref_ptr < ref_end) { struct fscrypt_str name; u64 parent_id; if (key->type == BTRFS_INODE_EXTREF_KEY) { ret = extref_get_fields(eb, ref_ptr, &name, NULL, &parent_id); } else { parent_id = key->offset; ret = ref_get_fields(eb, ref_ptr, &name, NULL); } if (ret) goto out; if (key->type == BTRFS_INODE_EXTREF_KEY) ret = !!btrfs_find_name_in_ext_backref(log_eb, log_slot, parent_id, &name); else ret = !!btrfs_find_name_in_backref(log_eb, log_slot, &name); if (!ret) { struct inode *dir; btrfs_release_path(path); dir = read_one_inode(root, parent_id); if (!dir) { ret = -ENOENT; kfree(name.name); goto out; } ret = unlink_inode_for_log_replay(trans, BTRFS_I(dir), inode, &name); kfree(name.name); iput(dir); if (ret) goto out; goto again; } kfree(name.name); ref_ptr += name.len; if (key->type == BTRFS_INODE_EXTREF_KEY) ref_ptr += sizeof(struct btrfs_inode_extref); else ref_ptr += sizeof(struct btrfs_inode_ref); } ret = 0; out: btrfs_release_path(path); return ret; } /* * replay one inode back reference item found in the log tree. * eb, slot and key refer to the buffer and key found in the log tree. * root is the destination we are replaying into, and path is for temp * use by this function. (it should be released on return). */ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_root *log, struct btrfs_path *path, struct extent_buffer *eb, int slot, struct btrfs_key *key) { struct inode *dir = NULL; struct inode *inode = NULL; unsigned long ref_ptr; unsigned long ref_end; struct fscrypt_str name; int ret; int log_ref_ver = 0; u64 parent_objectid; u64 inode_objectid; u64 ref_index = 0; int ref_struct_size; ref_ptr = btrfs_item_ptr_offset(eb, slot); ref_end = ref_ptr + btrfs_item_size(eb, slot); if (key->type == BTRFS_INODE_EXTREF_KEY) { struct btrfs_inode_extref *r; ref_struct_size = sizeof(struct btrfs_inode_extref); log_ref_ver = 1; r = (struct btrfs_inode_extref *)ref_ptr; parent_objectid = btrfs_inode_extref_parent(eb, r); } else { ref_struct_size = sizeof(struct btrfs_inode_ref); parent_objectid = key->offset; } inode_objectid = key->objectid; /* * it is possible that we didn't log all the parent directories * for a given inode. If we don't find the dir, just don't * copy the back ref in. The link count fixup code will take * care of the rest */ dir = read_one_inode(root, parent_objectid); if (!dir) { ret = -ENOENT; goto out; } inode = read_one_inode(root, inode_objectid); if (!inode) { ret = -EIO; goto out; } while (ref_ptr < ref_end) { if (log_ref_ver) { ret = extref_get_fields(eb, ref_ptr, &name, &ref_index, &parent_objectid); /* * parent object can change from one array * item to another. */ if (!dir) dir = read_one_inode(root, parent_objectid); if (!dir) { ret = -ENOENT; goto out; } } else { ret = ref_get_fields(eb, ref_ptr, &name, &ref_index); } if (ret) goto out; ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)), btrfs_ino(BTRFS_I(inode)), ref_index, &name); if (ret < 0) { goto out; } else if (ret == 0) { /* * look for a conflicting back reference in the * metadata. if we find one we have to unlink that name * of the file before we add our new link. Later on, we * overwrite any existing back reference, and we don't * want to create dangling pointers in the directory. */ ret = __add_inode_ref(trans, root, path, log, BTRFS_I(dir), BTRFS_I(inode), inode_objectid, parent_objectid, ref_index, &name); if (ret) { if (ret == 1) ret = 0; goto out; } /* insert our name */ ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), &name, 0, ref_index); if (ret) goto out; ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); if (ret) goto out; } /* Else, ret == 1, we already have a perfect match, we're done. */ ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + name.len; kfree(name.name); name.name = NULL; if (log_ref_ver) { iput(dir); dir = NULL; } } /* * Before we overwrite the inode reference item in the subvolume tree * with the item from the log tree, we must unlink all names from the * parent directory that are in the subvolume's tree inode reference * item, otherwise we end up with an inconsistent subvolume tree where * dir index entries exist for a name but there is no inode reference * item with the same name. */ ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot, key); if (ret) goto out; /* finally write the back reference in the inode */ ret = overwrite_item(trans, root, path, eb, slot, key); out: btrfs_release_path(path); kfree(name.name); iput(dir); iput(inode); return ret; } static int count_inode_extrefs(struct btrfs_root *root, struct btrfs_inode *inode, struct btrfs_path *path) { int ret = 0; int name_len; unsigned int nlink = 0; u32 item_size; u32 cur_offset = 0; u64 inode_objectid = btrfs_ino(inode); u64 offset = 0; unsigned long ptr; struct btrfs_inode_extref *extref; struct extent_buffer *leaf; while (1) { ret = btrfs_find_one_extref(root, inode_objectid, offset, path, &extref, &offset); if (ret) break; leaf = path->nodes[0]; item_size = btrfs_item_size(leaf, path->slots[0]); ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); cur_offset = 0; while (cur_offset < item_size) { extref = (struct btrfs_inode_extref *) (ptr + cur_offset); name_len = btrfs_inode_extref_name_len(leaf, extref); nlink++; cur_offset += name_len + sizeof(*extref); } offset++; btrfs_release_path(path); } btrfs_release_path(path); if (ret < 0 && ret != -ENOENT) return ret; return nlink; } static int count_inode_refs(struct btrfs_root *root, struct btrfs_inode *inode, struct btrfs_path *path) { int ret; struct btrfs_key key; unsigned int nlink = 0; unsigned long ptr; unsigned long ptr_end; int name_len; u64 ino = btrfs_ino(inode); key.objectid = ino; key.type = BTRFS_INODE_REF_KEY; key.offset = (u64)-1; while (1) { ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) break; if (ret > 0) { if (path->slots[0] == 0) break; path->slots[0]--; } process_slot: btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.objectid != ino || key.type != BTRFS_INODE_REF_KEY) break; ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); ptr_end = ptr + btrfs_item_size(path->nodes[0], path->slots[0]); while (ptr < ptr_end) { struct btrfs_inode_ref *ref; ref = (struct btrfs_inode_ref *)ptr; name_len = btrfs_inode_ref_name_len(path->nodes[0], ref); ptr = (unsigned long)(ref + 1) + name_len; nlink++; } if (key.offset == 0) break; if (path->slots[0] > 0) { path->slots[0]--; goto process_slot; } key.offset--; btrfs_release_path(path); } btrfs_release_path(path); return nlink; } /* * There are a few corners where the link count of the file can't * be properly maintained during replay. So, instead of adding * lots of complexity to the log code, we just scan the backrefs * for any file that has been through replay. * * The scan will update the link count on the inode to reflect the * number of back refs found. If it goes down to zero, the iput * will free the inode. */ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode) { struct btrfs_path *path; int ret; u64 nlink = 0; u64 ino = btrfs_ino(BTRFS_I(inode)); path = btrfs_alloc_path(); if (!path) return -ENOMEM; ret = count_inode_refs(root, BTRFS_I(inode), path); if (ret < 0) goto out; nlink = ret; ret = count_inode_extrefs(root, BTRFS_I(inode), path); if (ret < 0) goto out; nlink += ret; ret = 0; if (nlink != inode->i_nlink) { set_nlink(inode, nlink); ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); if (ret) goto out; } BTRFS_I(inode)->index_cnt = (u64)-1; if (inode->i_nlink == 0) { if (S_ISDIR(inode->i_mode)) { ret = replay_dir_deletes(trans, root, NULL, path, ino, 1); if (ret) goto out; } ret = btrfs_insert_orphan_item(trans, root, ino); if (ret == -EEXIST) ret = 0; } out: btrfs_free_path(path); return ret; } static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path) { int ret; struct btrfs_key key; struct inode *inode; key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; key.type = BTRFS_ORPHAN_ITEM_KEY; key.offset = (u64)-1; while (1) { ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret < 0) break; if (ret == 1) { ret = 0; if (path->slots[0] == 0) break; path->slots[0]--; } btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID || key.type != BTRFS_ORPHAN_ITEM_KEY) break; ret = btrfs_del_item(trans, root, path); if (ret) break; btrfs_release_path(path); inode = read_one_inode(root, key.offset); if (!inode) { ret = -EIO; break; } ret = fixup_inode_link_count(trans, root, inode); iput(inode); if (ret) break; /* * fixup on a directory may create new entries, * make sure we always look for the highset possible * offset */ key.offset = (u64)-1; } btrfs_release_path(path); return ret; } /* * record a given inode in the fixup dir so we can check its link * count when replay is done. The link count is incremented here * so the inode won't go away until we check it */ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 objectid) { struct btrfs_key key; int ret = 0; struct inode *inode; inode = read_one_inode(root, objectid); if (!inode) return -EIO; key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; key.type = BTRFS_ORPHAN_ITEM_KEY; key.offset = objectid; ret = btrfs_insert_empty_item(trans, root, path, &key, 0); btrfs_release_path(path); if (ret == 0) { if (!inode->i_nlink) set_nlink(inode, 1); else inc_nlink(inode); ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); } else if (ret == -EEXIST) { ret = 0; } iput(inode); return ret; } /* * when replaying the log for a directory, we only insert names * for inodes that actually exist. This means an fsync on a directory * does not implicitly fsync all the new files in it */ static noinline int insert_one_name(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 dirid, u64 index, const struct fscrypt_str *name, struct btrfs_key *location) { struct inode *inode; struct inode *dir; int ret; inode = read_one_inode(root, location->objectid); if (!inode) return -ENOENT; dir = read_one_inode(root, dirid); if (!dir) { iput(inode); return -EIO; } ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, 1, index); /* FIXME, put inode into FIXUP list */ iput(inode); iput(dir); return ret; } static int delete_conflicting_dir_entry(struct btrfs_trans_handle *trans, struct btrfs_inode *dir, struct btrfs_path *path, struct btrfs_dir_item *dst_di, const struct btrfs_key *log_key, u8 log_flags, bool exists) { struct btrfs_key found_key; btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key); /* The existing dentry points to the same inode, don't delete it. */ if (found_key.objectid == log_key->objectid && found_key.type == log_key->type && found_key.offset == log_key->offset && btrfs_dir_flags(path->nodes[0], dst_di) == log_flags) return 1; /* * Don't drop the conflicting directory entry if the inode for the new * entry doesn't exist. */ if (!exists) return 0; return drop_one_dir_item(trans, path, dir, dst_di); } /* * take a single entry in a log directory item and replay it into * the subvolume. * * if a conflicting item exists in the subdirectory already, * the inode it points to is unlinked and put into the link count * fix up tree. * * If a name from the log points to a file or directory that does * not exist in the FS, it is skipped. fsyncs on directories * do not force down inodes inside that directory, just changes to the * names or unlinks in a directory. * * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a * non-existing inode) and 1 if the name was replayed. */ static noinline int replay_one_name(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *eb, struct btrfs_dir_item *di, struct btrfs_key *key) { struct fscrypt_str name; struct btrfs_dir_item *dir_dst_di; struct btrfs_dir_item *index_dst_di; bool dir_dst_matches = false; bool index_dst_matches = false; struct btrfs_key log_key; struct btrfs_key search_key; struct inode *dir; u8 log_flags; bool exists; int ret; bool update_size = true; bool name_added = false; dir = read_one_inode(root, key->objectid); if (!dir) return -EIO; ret = read_alloc_one_name(eb, di + 1, btrfs_dir_name_len(eb, di), &name); if (ret) goto out; log_flags = btrfs_dir_flags(eb, di); btrfs_dir_item_key_to_cpu(eb, di, &log_key); ret = btrfs_lookup_inode(trans, root, path, &log_key, 0); btrfs_release_path(path); if (ret < 0) goto out; exists = (ret == 0); ret = 0; dir_dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, &name, 1); if (IS_ERR(dir_dst_di)) { ret = PTR_ERR(dir_dst_di); goto out; } else if (dir_dst_di) { ret = delete_conflicting_dir_entry(trans, BTRFS_I(dir), path, dir_dst_di, &log_key, log_flags, exists); if (ret < 0) goto out; dir_dst_matches = (ret == 1); } btrfs_release_path(path); index_dst_di = btrfs_lookup_dir_index_item(trans, root, path, key->objectid, key->offset, &name, 1); if (IS_ERR(index_dst_di)) { ret = PTR_ERR(index_dst_di); goto out; } else if (index_dst_di) { ret = delete_conflicting_dir_entry(trans, BTRFS_I(dir), path, index_dst_di, &log_key, log_flags, exists); if (ret < 0) goto out; index_dst_matches = (ret == 1); } btrfs_release_path(path); if (dir_dst_matches && index_dst_matches) { ret = 0; update_size = false; goto out; } /* * Check if the inode reference exists in the log for the given name, * inode and parent inode */ search_key.objectid = log_key.objectid; search_key.type = BTRFS_INODE_REF_KEY; search_key.offset = key->objectid; ret = backref_in_log(root->log_root, &search_key, 0, &name); if (ret < 0) { goto out; } else if (ret) { /* The dentry will be added later. */ ret = 0; update_size = false; goto out; } search_key.objectid = log_key.objectid; search_key.type = BTRFS_INODE_EXTREF_KEY; search_key.offset = key->objectid; ret = backref_in_log(root->log_root, &search_key, key->objectid, &name); if (ret < 0) { goto out; } else if (ret) { /* The dentry will be added later. */ ret = 0; update_size = false; goto out; } btrfs_release_path(path); ret = insert_one_name(trans, root, key->objectid, key->offset, &name, &log_key); if (ret && ret != -ENOENT && ret != -EEXIST) goto out; if (!ret) name_added = true; update_size = false; ret = 0; out: if (!ret && update_size) { btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name.len * 2); ret = btrfs_update_inode(trans, root, BTRFS_I(dir)); } kfree(name.name); iput(dir); if (!ret && name_added) ret = 1; return ret; } /* Replay one dir item from a BTRFS_DIR_INDEX_KEY key. */ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *eb, int slot, struct btrfs_key *key) { int ret; struct btrfs_dir_item *di; /* We only log dir index keys, which only contain a single dir item. */ ASSERT(key->type == BTRFS_DIR_INDEX_KEY); di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); ret = replay_one_name(trans, root, path, eb, di, key); if (ret < 0) return ret; /* * If this entry refers to a non-directory (directories can not have a * link count > 1) and it was added in the transaction that was not * committed, make sure we fixup the link count of the inode the entry * points to. Otherwise something like the following would result in a * directory pointing to an inode with a wrong link that does not account * for this dir entry: * * mkdir testdir * touch testdir/foo * touch testdir/bar * sync * * ln testdir/bar testdir/bar_link * ln testdir/foo testdir/foo_link * xfs_io -c "fsync" testdir/bar * * <power failure> * * mount fs, log replay happens * * File foo would remain with a link count of 1 when it has two entries * pointing to it in the directory testdir. This would make it impossible * to ever delete the parent directory has it would result in stale * dentries that can never be deleted. */ if (ret == 1 && btrfs_dir_ftype(eb, di) != BTRFS_FT_DIR) { struct btrfs_path *fixup_path; struct btrfs_key di_key; fixup_path = btrfs_alloc_path(); if (!fixup_path) return -ENOMEM; btrfs_dir_item_key_to_cpu(eb, di, &di_key); ret = link_to_fixup_dir(trans, root, fixup_path, di_key.objectid); btrfs_free_path(fixup_path); } return ret; } /* * directory replay has two parts. There are the standard directory * items in the log copied from the subvolume, and range items * created in the log while the subvolume was logged. * * The range items tell us which parts of the key space the log * is authoritative for. During replay, if a key in the subvolume * directory is in a logged range item, but not actually in the log * that means it was deleted from the directory before the fsync * and should be removed. */ static noinline int find_dir_range(struct btrfs_root *root, struct btrfs_path *path, u64 dirid, u64 *start_ret, u64 *end_ret) { struct btrfs_key key; u64 found_end; struct btrfs_dir_log_item *item; int ret; int nritems; if (*start_ret == (u64)-1) return 1; key.objectid = dirid; key.type = BTRFS_DIR_LOG_INDEX_KEY; key.offset = *start_ret; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; if (ret > 0) { if (path->slots[0] == 0) goto out; path->slots[0]--; } if (ret != 0) btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.type != BTRFS_DIR_LOG_INDEX_KEY || key.objectid != dirid) { ret = 1; goto next; } item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_dir_log_item); found_end = btrfs_dir_log_end(path->nodes[0], item); if (*start_ret >= key.offset && *start_ret <= found_end) { ret = 0; *start_ret = key.offset; *end_ret = found_end; goto out; } ret = 1; next: /* check the next slot in the tree to see if it is a valid item */ nritems = btrfs_header_nritems(path->nodes[0]); path->slots[0]++; if (path->slots[0] >= nritems) { ret = btrfs_next_leaf(root, path); if (ret) goto out; } btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.type != BTRFS_DIR_LOG_INDEX_KEY || key.objectid != dirid) { ret = 1; goto out; } item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_dir_log_item); found_end = btrfs_dir_log_end(path->nodes[0], item); *start_ret = key.offset; *end_ret = found_end; ret = 0; out: btrfs_release_path(path); return ret; } /* * this looks for a given directory item in the log. If the directory * item is not in the log, the item is removed and the inode it points * to is unlinked */ static noinline int check_item_in_log(struct btrfs_trans_handle *trans, struct btrfs_root *log, struct btrfs_path *path, struct btrfs_path *log_path, struct inode *dir, struct btrfs_key *dir_key) { struct btrfs_root *root = BTRFS_I(dir)->root; int ret; struct extent_buffer *eb; int slot; struct btrfs_dir_item *di; struct fscrypt_str name; struct inode *inode = NULL; struct btrfs_key location; /* * Currently we only log dir index keys. Even if we replay a log created * by an older kernel that logged both dir index and dir item keys, all * we need to do is process the dir index keys, we (and our caller) can * safely ignore dir item keys (key type BTRFS_DIR_ITEM_KEY). */ ASSERT(dir_key->type == BTRFS_DIR_INDEX_KEY); eb = path->nodes[0]; slot = path->slots[0]; di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); ret = read_alloc_one_name(eb, di + 1, btrfs_dir_name_len(eb, di), &name); if (ret) goto out; if (log) { struct btrfs_dir_item *log_di; log_di = btrfs_lookup_dir_index_item(trans, log, log_path, dir_key->objectid, dir_key->offset, &name, 0); if (IS_ERR(log_di)) { ret = PTR_ERR(log_di); goto out; } else if (log_di) { /* The dentry exists in the log, we have nothing to do. */ ret = 0; goto out; } } btrfs_dir_item_key_to_cpu(eb, di, &location); btrfs_release_path(path); btrfs_release_path(log_path); inode = read_one_inode(root, location.objectid); if (!inode) { ret = -EIO; goto out; } ret = link_to_fixup_dir(trans, root, path, location.objectid); if (ret) goto out; inc_nlink(inode); ret = unlink_inode_for_log_replay(trans, BTRFS_I(dir), BTRFS_I(inode), &name); /* * Unlike dir item keys, dir index keys can only have one name (entry) in * them, as there are no key collisions since each key has a unique offset * (an index number), so we're done. */ out: btrfs_release_path(path); btrfs_release_path(log_path); kfree(name.name); iput(inode); return ret; } static int replay_xattr_deletes(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_root *log, struct btrfs_path *path, const u64 ino) { struct btrfs_key search_key; struct btrfs_path *log_path; int i; int nritems; int ret; log_path = btrfs_alloc_path(); if (!log_path) return -ENOMEM; search_key.objectid = ino; search_key.type = BTRFS_XATTR_ITEM_KEY; search_key.offset = 0; again: ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); if (ret < 0) goto out; process_leaf: nritems = btrfs_header_nritems(path->nodes[0]); for (i = path->slots[0]; i < nritems; i++) { struct btrfs_key key; struct btrfs_dir_item *di; struct btrfs_dir_item *log_di; u32 total_size; u32 cur; btrfs_item_key_to_cpu(path->nodes[0], &key, i); if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) { ret = 0; goto out; } di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item); total_size = btrfs_item_size(path->nodes[0], i); cur = 0; while (cur < total_size) { u16 name_len = btrfs_dir_name_len(path->nodes[0], di); u16 data_len = btrfs_dir_data_len(path->nodes[0], di); u32 this_len = sizeof(*di) + name_len + data_len; char *name; name = kmalloc(name_len, GFP_NOFS); if (!name) { ret = -ENOMEM; goto out; } read_extent_buffer(path->nodes[0], name, (unsigned long)(di + 1), name_len); log_di = btrfs_lookup_xattr(NULL, log, log_path, ino, name, name_len, 0); btrfs_release_path(log_path); if (!log_di) { /* Doesn't exist in log tree, so delete it. */ btrfs_release_path(path); di = btrfs_lookup_xattr(trans, root, path, ino, name, name_len, -1); kfree(name); if (IS_ERR(di)) { ret = PTR_ERR(di); goto out; } ASSERT(di); ret = btrfs_delete_one_dir_name(trans, root, path, di); if (ret) goto out; btrfs_release_path(path); search_key = key; goto again; } kfree(name); if (IS_ERR(log_di)) { ret = PTR_ERR(log_di); goto out; } cur += this_len; di = (struct btrfs_dir_item *)((char *)di + this_len); } } ret = btrfs_next_leaf(root, path); if (ret > 0) ret = 0; else if (ret == 0) goto process_leaf; out: btrfs_free_path(log_path); btrfs_release_path(path); return ret; } /* * deletion replay happens before we copy any new directory items * out of the log or out of backreferences from inodes. It * scans the log to find ranges of keys that log is authoritative for, * and then scans the directory to find items in those ranges that are * not present in the log. * * Anything we don't find in the log is unlinked and removed from the * directory. */ static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_root *log, struct btrfs_path *path, u64 dirid, int del_all) { u64 range_start; u64 range_end; int ret = 0; struct btrfs_key dir_key; struct btrfs_key found_key; struct btrfs_path *log_path; struct inode *dir; dir_key.objectid = dirid; dir_key.type = BTRFS_DIR_INDEX_KEY; log_path = btrfs_alloc_path(); if (!log_path) return -ENOMEM; dir = read_one_inode(root, dirid); /* it isn't an error if the inode isn't there, that can happen * because we replay the deletes before we copy in the inode item * from the log */ if (!dir) { btrfs_free_path(log_path); return 0; } range_start = 0; range_end = 0; while (1) { if (del_all) range_end = (u64)-1; else { ret = find_dir_range(log, path, dirid, &range_start, &range_end); if (ret < 0) goto out; else if (ret > 0) break; } dir_key.offset = range_start; while (1) { int nritems; ret = btrfs_search_slot(NULL, root, &dir_key, path, 0, 0); if (ret < 0) goto out; nritems = btrfs_header_nritems(path->nodes[0]); if (path->slots[0] >= nritems) { ret = btrfs_next_leaf(root, path); if (ret == 1) break; else if (ret < 0) goto out; } btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); if (found_key.objectid != dirid || found_key.type != dir_key.type) { ret = 0; goto out; } if (found_key.offset > range_end) break; ret = check_item_in_log(trans, log, path, log_path, dir, &found_key); if (ret) goto out; if (found_key.offset == (u64)-1) break; dir_key.offset = found_key.offset + 1; } btrfs_release_path(path); if (range_end == (u64)-1) break; range_start = range_end + 1; } ret = 0; out: btrfs_release_path(path); btrfs_free_path(log_path); iput(dir); return ret; } /* * the process_func used to replay items from the log tree. This * gets called in two different stages. The first stage just looks * for inodes and makes sure they are all copied into the subvolume. * * The second stage copies all the other item types from the log into * the subvolume. The two stage approach is slower, but gets rid of * lots of complexity around inodes referencing other inodes that exist * only in the log (references come from either directory items or inode * back refs). */ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, struct walk_control *wc, u64 gen, int level) { int nritems; struct btrfs_tree_parent_check check = { .transid = gen, .level = level }; struct btrfs_path *path; struct btrfs_root *root = wc->replay_dest; struct btrfs_key key; int i; int ret; ret = btrfs_read_extent_buffer(eb, &check); if (ret) return ret; level = btrfs_header_level(eb); if (level != 0) return 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; nritems = btrfs_header_nritems(eb); for (i = 0; i < nritems; i++) { btrfs_item_key_to_cpu(eb, &key, i); /* inode keys are done during the first stage */ if (key.type == BTRFS_INODE_ITEM_KEY && wc->stage == LOG_WALK_REPLAY_INODES) { struct btrfs_inode_item *inode_item; u32 mode; inode_item = btrfs_item_ptr(eb, i, struct btrfs_inode_item); /* * If we have a tmpfile (O_TMPFILE) that got fsync'ed * and never got linked before the fsync, skip it, as * replaying it is pointless since it would be deleted * later. We skip logging tmpfiles, but it's always * possible we are replaying a log created with a kernel * that used to log tmpfiles. */ if (btrfs_inode_nlink(eb, inode_item) == 0) { wc->ignore_cur_inode = true; continue; } else { wc->ignore_cur_inode = false; } ret = replay_xattr_deletes(wc->trans, root, log, path, key.objectid); if (ret) break; mode = btrfs_inode_mode(eb, inode_item); if (S_ISDIR(mode)) { ret = replay_dir_deletes(wc->trans, root, log, path, key.objectid, 0); if (ret) break; } ret = overwrite_item(wc->trans, root, path, eb, i, &key); if (ret) break; /* * Before replaying extents, truncate the inode to its * size. We need to do it now and not after log replay * because before an fsync we can have prealloc extents * added beyond the inode's i_size. If we did it after, * through orphan cleanup for example, we would drop * those prealloc extents just after replaying them. */ if (S_ISREG(mode)) { struct btrfs_drop_extents_args drop_args = { 0 }; struct inode *inode; u64 from; inode = read_one_inode(root, key.objectid); if (!inode) { ret = -EIO; break; } from = ALIGN(i_size_read(inode), root->fs_info->sectorsize); drop_args.start = from; drop_args.end = (u64)-1; drop_args.drop_cache = true; ret = btrfs_drop_extents(wc->trans, root, BTRFS_I(inode), &drop_args); if (!ret) { inode_sub_bytes(inode, drop_args.bytes_found); /* Update the inode's nbytes. */ ret = btrfs_update_inode(wc->trans, root, BTRFS_I(inode)); } iput(inode); if (ret) break; } ret = link_to_fixup_dir(wc->trans, root, path, key.objectid); if (ret) break; } if (wc->ignore_cur_inode) continue; if (key.type == BTRFS_DIR_INDEX_KEY && wc->stage == LOG_WALK_REPLAY_DIR_INDEX) { ret = replay_one_dir_item(wc->trans, root, path, eb, i, &key); if (ret) break; } if (wc->stage < LOG_WALK_REPLAY_ALL) continue; /* these keys are simply copied */ if (key.type == BTRFS_XATTR_ITEM_KEY) { ret = overwrite_item(wc->trans, root, path, eb, i, &key); if (ret) break; } else if (key.type == BTRFS_INODE_REF_KEY || key.type == BTRFS_INODE_EXTREF_KEY) { ret = add_inode_ref(wc->trans, root, log, path, eb, i, &key); if (ret && ret != -ENOENT) break; ret = 0; } else if (key.type == BTRFS_EXTENT_DATA_KEY) { ret = replay_one_extent(wc->trans, root, path, eb, i, &key); if (ret) break; } /* * We don't log BTRFS_DIR_ITEM_KEY keys anymore, only the * BTRFS_DIR_INDEX_KEY items which we use to derive the * BTRFS_DIR_ITEM_KEY items. If we are replaying a log from an * older kernel with such keys, ignore them. */ } btrfs_free_path(path); return ret; } /* * Correctly adjust the reserved bytes occupied by a log tree extent buffer */ static void unaccount_log_buffer(struct btrfs_fs_info *fs_info, u64 start) { struct btrfs_block_group *cache; cache = btrfs_lookup_block_group(fs_info, start); if (!cache) { btrfs_err(fs_info, "unable to find block group for %llu", start); return; } spin_lock(&cache->space_info->lock); spin_lock(&cache->lock); cache->reserved -= fs_info->nodesize; cache->space_info->bytes_reserved -= fs_info->nodesize; spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); btrfs_put_block_group(cache); } static int clean_log_buffer(struct btrfs_trans_handle *trans, struct extent_buffer *eb) { int ret; btrfs_tree_lock(eb); btrfs_clear_buffer_dirty(trans, eb); wait_on_extent_buffer_writeback(eb); btrfs_tree_unlock(eb); if (trans) { ret = btrfs_pin_reserved_extent(trans, eb->start, eb->len); if (ret) return ret; btrfs_redirty_list_add(trans->transaction, eb); } else { unaccount_log_buffer(eb->fs_info, eb->start); } return 0; } static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int *level, struct walk_control *wc) { struct btrfs_fs_info *fs_info = root->fs_info; u64 bytenr; u64 ptr_gen; struct extent_buffer *next; struct extent_buffer *cur; int ret = 0; while (*level > 0) { struct btrfs_tree_parent_check check = { 0 }; cur = path->nodes[*level]; WARN_ON(btrfs_header_level(cur) != *level); if (path->slots[*level] >= btrfs_header_nritems(cur)) break; bytenr = btrfs_node_blockptr(cur, path->slots[*level]); ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); check.transid = ptr_gen; check.level = *level - 1; check.has_first_key = true; btrfs_node_key_to_cpu(cur, &check.first_key, path->slots[*level]); next = btrfs_find_create_tree_block(fs_info, bytenr, btrfs_header_owner(cur), *level - 1); if (IS_ERR(next)) return PTR_ERR(next); if (*level == 1) { ret = wc->process_func(root, next, wc, ptr_gen, *level - 1); if (ret) { free_extent_buffer(next); return ret; } path->slots[*level]++; if (wc->free) { ret = btrfs_read_extent_buffer(next, &check); if (ret) { free_extent_buffer(next); return ret; } ret = clean_log_buffer(trans, next); if (ret) { free_extent_buffer(next); return ret; } } free_extent_buffer(next); continue; } ret = btrfs_read_extent_buffer(next, &check); if (ret) { free_extent_buffer(next); return ret; } if (path->nodes[*level-1]) free_extent_buffer(path->nodes[*level-1]); path->nodes[*level-1] = next; *level = btrfs_header_level(next); path->slots[*level] = 0; cond_resched(); } path->slots[*level] = btrfs_header_nritems(path->nodes[*level]); cond_resched(); return 0; } static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int *level, struct walk_control *wc) { int i; int slot; int ret; for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) { slot = path->slots[i]; if (slot + 1 < btrfs_header_nritems(path->nodes[i])) { path->slots[i]++; *level = i; WARN_ON(*level == 0); return 0; } else { ret = wc->process_func(root, path->nodes[*level], wc, btrfs_header_generation(path->nodes[*level]), *level); if (ret) return ret; if (wc->free) { ret = clean_log_buffer(trans, path->nodes[*level]); if (ret) return ret; } free_extent_buffer(path->nodes[*level]); path->nodes[*level] = NULL; *level = i + 1; } } return 1; } /* * drop the reference count on the tree rooted at 'snap'. This traverses * the tree freeing any blocks that have a ref count of zero after being * decremented. */ static int walk_log_tree(struct btrfs_trans_handle *trans, struct btrfs_root *log, struct walk_control *wc) { int ret = 0; int wret; int level; struct btrfs_path *path; int orig_level; path = btrfs_alloc_path(); if (!path) return -ENOMEM; level = btrfs_header_level(log->node); orig_level = level; path->nodes[level] = log->node; atomic_inc(&log->node->refs); path->slots[level] = 0; while (1) { wret = walk_down_log_tree(trans, log, path, &level, wc); if (wret > 0) break; if (wret < 0) { ret = wret; goto out; } wret = walk_up_log_tree(trans, log, path, &level, wc); if (wret > 0) break; if (wret < 0) { ret = wret; goto out; } } /* was the root node processed? if not, catch it here */ if (path->nodes[orig_level]) { ret = wc->process_func(log, path->nodes[orig_level], wc, btrfs_header_generation(path->nodes[orig_level]), orig_level); if (ret) goto out; if (wc->free) ret = clean_log_buffer(trans, path->nodes[orig_level]); } out: btrfs_free_path(path); return ret; } /* * helper function to update the item for a given subvolumes log root * in the tree of log roots */ static int update_log_root(struct btrfs_trans_handle *trans, struct btrfs_root *log, struct btrfs_root_item *root_item) { struct btrfs_fs_info *fs_info = log->fs_info; int ret; if (log->log_transid == 1) { /* insert root item on the first sync */ ret = btrfs_insert_root(trans, fs_info->log_root_tree, &log->root_key, root_item); } else { ret = btrfs_update_root(trans, fs_info->log_root_tree, &log->root_key, root_item); } return ret; } static void wait_log_commit(struct btrfs_root *root, int transid) { DEFINE_WAIT(wait); int index = transid % 2; /* * we only allow two pending log transactions at a time, * so we know that if ours is more than 2 older than the * current transaction, we're done */ for (;;) { prepare_to_wait(&root->log_commit_wait[index], &wait, TASK_UNINTERRUPTIBLE); if (!(root->log_transid_committed < transid && atomic_read(&root->log_commit[index]))) break; mutex_unlock(&root->log_mutex); schedule(); mutex_lock(&root->log_mutex); } finish_wait(&root->log_commit_wait[index], &wait); } static void wait_for_writer(struct btrfs_root *root) { DEFINE_WAIT(wait); for (;;) { prepare_to_wait(&root->log_writer_wait, &wait, TASK_UNINTERRUPTIBLE); if (!atomic_read(&root->log_writers)) break; mutex_unlock(&root->log_mutex); schedule(); mutex_lock(&root->log_mutex); } finish_wait(&root->log_writer_wait, &wait); } static inline void btrfs_remove_log_ctx(struct btrfs_root *root, struct btrfs_log_ctx *ctx) { mutex_lock(&root->log_mutex); list_del_init(&ctx->list); mutex_unlock(&root->log_mutex); } /* * Invoked in log mutex context, or be sure there is no other task which * can access the list. */ static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root, int index, int error) { struct btrfs_log_ctx *ctx; struct btrfs_log_ctx *safe; list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) { list_del_init(&ctx->list); ctx->log_ret = error; } } /* * btrfs_sync_log does sends a given tree log down to the disk and * updates the super blocks to record it. When this call is done, * you know that any inodes previously logged are safely on disk only * if it returns 0. * * Any other return value means you need to call btrfs_commit_transaction. * Some of the edge cases for fsyncing directories that have had unlinks * or renames done in the past mean that sometimes the only safe * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN, * that has happened. */ int btrfs_sync_log(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_log_ctx *ctx) { int index1; int index2; int mark; int ret; struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_root *log = root->log_root; struct btrfs_root *log_root_tree = fs_info->log_root_tree; struct btrfs_root_item new_root_item; int log_transid = 0; struct btrfs_log_ctx root_log_ctx; struct blk_plug plug; u64 log_root_start; u64 log_root_level; mutex_lock(&root->log_mutex); log_transid = ctx->log_transid; if (root->log_transid_committed >= log_transid) { mutex_unlock(&root->log_mutex); return ctx->log_ret; } index1 = log_transid % 2; if (atomic_read(&root->log_commit[index1])) { wait_log_commit(root, log_transid); mutex_unlock(&root->log_mutex); return ctx->log_ret; } ASSERT(log_transid == root->log_transid); atomic_set(&root->log_commit[index1], 1); /* wait for previous tree log sync to complete */ if (atomic_read(&root->log_commit[(index1 + 1) % 2])) wait_log_commit(root, log_transid - 1); while (1) { int batch = atomic_read(&root->log_batch); /* when we're on an ssd, just kick the log commit out */ if (!btrfs_test_opt(fs_info, SSD) && test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) { mutex_unlock(&root->log_mutex); schedule_timeout_uninterruptible(1); mutex_lock(&root->log_mutex); } wait_for_writer(root); if (batch == atomic_read(&root->log_batch)) break; } /* bail out if we need to do a full commit */ if (btrfs_need_log_full_commit(trans)) { ret = BTRFS_LOG_FORCE_COMMIT; mutex_unlock(&root->log_mutex); goto out; } if (log_transid % 2 == 0) mark = EXTENT_DIRTY; else mark = EXTENT_NEW; /* we start IO on all the marked extents here, but we don't actually * wait for them until later. */ blk_start_plug(&plug); ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark); /* * -EAGAIN happens when someone, e.g., a concurrent transaction * commit, writes a dirty extent in this tree-log commit. This * concurrent write will create a hole writing out the extents, * and we cannot proceed on a zoned filesystem, requiring * sequential writing. While we can bail out to a full commit * here, but we can continue hoping the concurrent writing fills * the hole. */ if (ret == -EAGAIN && btrfs_is_zoned(fs_info)) ret = 0; if (ret) { blk_finish_plug(&plug); btrfs_set_log_full_commit(trans); mutex_unlock(&root->log_mutex); goto out; } /* * We _must_ update under the root->log_mutex in order to make sure we * have a consistent view of the log root we are trying to commit at * this moment. * * We _must_ copy this into a local copy, because we are not holding the * log_root_tree->log_mutex yet. This is important because when we * commit the log_root_tree we must have a consistent view of the * log_root_tree when we update the super block to point at the * log_root_tree bytenr. If we update the log_root_tree here we'll race * with the commit and possibly point at the new block which we may not * have written out. */ btrfs_set_root_node(&log->root_item, log->node); memcpy(&new_root_item, &log->root_item, sizeof(new_root_item)); root->log_transid++; log->log_transid = root->log_transid; root->log_start_pid = 0; /* * IO has been started, blocks of the log tree have WRITTEN flag set * in their headers. new modifications of the log will be written to * new positions. so it's safe to allow log writers to go in. */ mutex_unlock(&root->log_mutex); if (btrfs_is_zoned(fs_info)) { mutex_lock(&fs_info->tree_root->log_mutex); if (!log_root_tree->node) { ret = btrfs_alloc_log_tree_node(trans, log_root_tree); if (ret) { mutex_unlock(&fs_info->tree_root->log_mutex); blk_finish_plug(&plug); goto out; } } mutex_unlock(&fs_info->tree_root->log_mutex); } btrfs_init_log_ctx(&root_log_ctx, NULL); mutex_lock(&log_root_tree->log_mutex); index2 = log_root_tree->log_transid % 2; list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]); root_log_ctx.log_transid = log_root_tree->log_transid; /* * Now we are safe to update the log_root_tree because we're under the * log_mutex, and we're a current writer so we're holding the commit * open until we drop the log_mutex. */ ret = update_log_root(trans, log, &new_root_item); if (ret) { if (!list_empty(&root_log_ctx.list)) list_del_init(&root_log_ctx.list); blk_finish_plug(&plug); btrfs_set_log_full_commit(trans); if (ret != -ENOSPC) btrfs_err(fs_info, "failed to update log for root %llu ret %d", root->root_key.objectid, ret); btrfs_wait_tree_log_extents(log, mark); mutex_unlock(&log_root_tree->log_mutex); goto out; } if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) { blk_finish_plug(&plug); list_del_init(&root_log_ctx.list); mutex_unlock(&log_root_tree->log_mutex); ret = root_log_ctx.log_ret; goto out; } index2 = root_log_ctx.log_transid % 2; if (atomic_read(&log_root_tree->log_commit[index2])) { blk_finish_plug(&plug); ret = btrfs_wait_tree_log_extents(log, mark); wait_log_commit(log_root_tree, root_log_ctx.log_transid); mutex_unlock(&log_root_tree->log_mutex); if (!ret) ret = root_log_ctx.log_ret; goto out; } ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid); atomic_set(&log_root_tree->log_commit[index2], 1); if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) { wait_log_commit(log_root_tree, root_log_ctx.log_transid - 1); } /* * now that we've moved on to the tree of log tree roots, * check the full commit flag again */ if (btrfs_need_log_full_commit(trans)) { blk_finish_plug(&plug); btrfs_wait_tree_log_extents(log, mark); mutex_unlock(&log_root_tree->log_mutex); ret = BTRFS_LOG_FORCE_COMMIT; goto out_wake_log_root; } ret = btrfs_write_marked_extents(fs_info, &log_root_tree->dirty_log_pages, EXTENT_DIRTY | EXTENT_NEW); blk_finish_plug(&plug); /* * As described above, -EAGAIN indicates a hole in the extents. We * cannot wait for these write outs since the waiting cause a * deadlock. Bail out to the full commit instead. */ if (ret == -EAGAIN && btrfs_is_zoned(fs_info)) { btrfs_set_log_full_commit(trans); btrfs_wait_tree_log_extents(log, mark); mutex_unlock(&log_root_tree->log_mutex); goto out_wake_log_root; } else if (ret) { btrfs_set_log_full_commit(trans); mutex_unlock(&log_root_tree->log_mutex); goto out_wake_log_root; } ret = btrfs_wait_tree_log_extents(log, mark); if (!ret) ret = btrfs_wait_tree_log_extents(log_root_tree, EXTENT_NEW | EXTENT_DIRTY); if (ret) { btrfs_set_log_full_commit(trans); mutex_unlock(&log_root_tree->log_mutex); goto out_wake_log_root; } log_root_start = log_root_tree->node->start; log_root_level = btrfs_header_level(log_root_tree->node); log_root_tree->log_transid++; mutex_unlock(&log_root_tree->log_mutex); /* * Here we are guaranteed that nobody is going to write the superblock * for the current transaction before us and that neither we do write * our superblock before the previous transaction finishes its commit * and writes its superblock, because: * * 1) We are holding a handle on the current transaction, so no body * can commit it until we release the handle; * * 2) Before writing our superblock we acquire the tree_log_mutex, so * if the previous transaction is still committing, and hasn't yet * written its superblock, we wait for it to do it, because a * transaction commit acquires the tree_log_mutex when the commit * begins and releases it only after writing its superblock. */ mutex_lock(&fs_info->tree_log_mutex); /* * The previous transaction writeout phase could have failed, and thus * marked the fs in an error state. We must not commit here, as we * could have updated our generation in the super_for_commit and * writing the super here would result in transid mismatches. If there * is an error here just bail. */ if (BTRFS_FS_ERROR(fs_info)) { ret = -EIO; btrfs_set_log_full_commit(trans); btrfs_abort_transaction(trans, ret); mutex_unlock(&fs_info->tree_log_mutex); goto out_wake_log_root; } btrfs_set_super_log_root(fs_info->super_for_commit, log_root_start); btrfs_set_super_log_root_level(fs_info->super_for_commit, log_root_level); ret = write_all_supers(fs_info, 1); mutex_unlock(&fs_info->tree_log_mutex); if (ret) { btrfs_set_log_full_commit(trans); btrfs_abort_transaction(trans, ret); goto out_wake_log_root; } /* * We know there can only be one task here, since we have not yet set * root->log_commit[index1] to 0 and any task attempting to sync the * log must wait for the previous log transaction to commit if it's * still in progress or wait for the current log transaction commit if * someone else already started it. We use <= and not < because the * first log transaction has an ID of 0. */ ASSERT(root->last_log_commit <= log_transid); root->last_log_commit = log_transid; out_wake_log_root: mutex_lock(&log_root_tree->log_mutex); btrfs_remove_all_log_ctxs(log_root_tree, index2, ret); log_root_tree->log_transid_committed++; atomic_set(&log_root_tree->log_commit[index2], 0); mutex_unlock(&log_root_tree->log_mutex); /* * The barrier before waitqueue_active (in cond_wake_up) is needed so * all the updates above are seen by the woken threads. It might not be * necessary, but proving that seems to be hard. */ cond_wake_up(&log_root_tree->log_commit_wait[index2]); out: mutex_lock(&root->log_mutex); btrfs_remove_all_log_ctxs(root, index1, ret); root->log_transid_committed++; atomic_set(&root->log_commit[index1], 0); mutex_unlock(&root->log_mutex); /* * The barrier before waitqueue_active (in cond_wake_up) is needed so * all the updates above are seen by the woken threads. It might not be * necessary, but proving that seems to be hard. */ cond_wake_up(&root->log_commit_wait[index1]); return ret; } static void free_log_tree(struct btrfs_trans_handle *trans, struct btrfs_root *log) { int ret; struct walk_control wc = { .free = 1, .process_func = process_one_buffer }; if (log->node) { ret = walk_log_tree(trans, log, &wc); if (ret) { /* * We weren't able to traverse the entire log tree, the * typical scenario is getting an -EIO when reading an * extent buffer of the tree, due to a previous writeback * failure of it. */ set_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR, &log->fs_info->fs_state); /* * Some extent buffers of the log tree may still be dirty * and not yet written back to storage, because we may * have updates to a log tree without syncing a log tree, * such as during rename and link operations. So flush * them out and wait for their writeback to complete, so * that we properly cleanup their state and pages. */ btrfs_write_marked_extents(log->fs_info, &log->dirty_log_pages, EXTENT_DIRTY | EXTENT_NEW); btrfs_wait_tree_log_extents(log, EXTENT_DIRTY | EXTENT_NEW); if (trans) btrfs_abort_transaction(trans, ret); else btrfs_handle_fs_error(log->fs_info, ret, NULL); } } clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1, EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT); extent_io_tree_release(&log->log_csum_range); btrfs_put_root(log); } /* * free all the extents used by the tree log. This should be called * at commit time of the full transaction */ int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root) { if (root->log_root) { free_log_tree(trans, root->log_root); root->log_root = NULL; clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state); } return 0; } int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info) { if (fs_info->log_root_tree) { free_log_tree(trans, fs_info->log_root_tree); fs_info->log_root_tree = NULL; clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &fs_info->tree_root->state); } return 0; } /* * Check if an inode was logged in the current transaction. This correctly deals * with the case where the inode was logged but has a logged_trans of 0, which * happens if the inode is evicted and loaded again, as logged_trans is an in * memory only field (not persisted). * * Returns 1 if the inode was logged before in the transaction, 0 if it was not, * and < 0 on error. */ static int inode_logged(const struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct btrfs_path *path_in) { struct btrfs_path *path = path_in; struct btrfs_key key; int ret; if (inode->logged_trans == trans->transid) return 1; /* * If logged_trans is not 0, then we know the inode logged was not logged * in this transaction, so we can return false right away. */ if (inode->logged_trans > 0) return 0; /* * If no log tree was created for this root in this transaction, then * the inode can not have been logged in this transaction. In that case * set logged_trans to anything greater than 0 and less than the current * transaction's ID, to avoid the search below in a future call in case * a log tree gets created after this. */ if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &inode->root->state)) { inode->logged_trans = trans->transid - 1; return 0; } /* * We have a log tree and the inode's logged_trans is 0. We can't tell * for sure if the inode was logged before in this transaction by looking * only at logged_trans. We could be pessimistic and assume it was, but * that can lead to unnecessarily logging an inode during rename and link * operations, and then further updating the log in followup rename and * link operations, specially if it's a directory, which adds latency * visible to applications doing a series of rename or link operations. * * A logged_trans of 0 here can mean several things: * * 1) The inode was never logged since the filesystem was mounted, and may * or may have not been evicted and loaded again; * * 2) The inode was logged in a previous transaction, then evicted and * then loaded again; * * 3) The inode was logged in the current transaction, then evicted and * then loaded again. * * For cases 1) and 2) we don't want to return true, but we need to detect * case 3) and return true. So we do a search in the log root for the inode * item. */ key.objectid = btrfs_ino(inode); key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; if (!path) { path = btrfs_alloc_path(); if (!path) return -ENOMEM; } ret = btrfs_search_slot(NULL, inode->root->log_root, &key, path, 0, 0); if (path_in) btrfs_release_path(path); else btrfs_free_path(path); /* * Logging an inode always results in logging its inode item. So if we * did not find the item we know the inode was not logged for sure. */ if (ret < 0) { return ret; } else if (ret > 0) { /* * Set logged_trans to a value greater than 0 and less then the * current transaction to avoid doing the search in future calls. */ inode->logged_trans = trans->transid - 1; return 0; } /* * The inode was previously logged and then evicted, set logged_trans to * the current transacion's ID, to avoid future tree searches as long as * the inode is not evicted again. */ inode->logged_trans = trans->transid; /* * If it's a directory, then we must set last_dir_index_offset to the * maximum possible value, so that the next attempt to log the inode does * not skip checking if dir index keys found in modified subvolume tree * leaves have been logged before, otherwise it would result in attempts * to insert duplicate dir index keys in the log tree. This must be done * because last_dir_index_offset is an in-memory only field, not persisted * in the inode item or any other on-disk structure, so its value is lost * once the inode is evicted. */ if (S_ISDIR(inode->vfs_inode.i_mode)) inode->last_dir_index_offset = (u64)-1; return 1; } /* * Delete a directory entry from the log if it exists. * * Returns < 0 on error * 1 if the entry does not exists * 0 if the entry existed and was successfully deleted */ static int del_logged_dentry(struct btrfs_trans_handle *trans, struct btrfs_root *log, struct btrfs_path *path, u64 dir_ino, const struct fscrypt_str *name, u64 index) { struct btrfs_dir_item *di; /* * We only log dir index items of a directory, so we don't need to look * for dir item keys. */ di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino, index, name, -1); if (IS_ERR(di)) return PTR_ERR(di); else if (!di) return 1; /* * We do not need to update the size field of the directory's * inode item because on log replay we update the field to reflect * all existing entries in the directory (see overwrite_item()). */ return btrfs_delete_one_dir_name(trans, log, path, di); } /* * If both a file and directory are logged, and unlinks or renames are * mixed in, we have a few interesting corners: * * create file X in dir Y * link file X to X.link in dir Y * fsync file X * unlink file X but leave X.link * fsync dir Y * * After a crash we would expect only X.link to exist. But file X * didn't get fsync'd again so the log has back refs for X and X.link. * * We solve this by removing directory entries and inode backrefs from the * log when a file that was logged in the current transaction is * unlinked. Any later fsync will include the updated log entries, and * we'll be able to reconstruct the proper directory items from backrefs. * * This optimizations allows us to avoid relogging the entire inode * or the entire directory. */ void btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, struct btrfs_root *root, const struct fscrypt_str *name, struct btrfs_inode *dir, u64 index) { struct btrfs_path *path; int ret; ret = inode_logged(trans, dir, NULL); if (ret == 0) return; else if (ret < 0) { btrfs_set_log_full_commit(trans); return; } ret = join_running_log_trans(root); if (ret) return; mutex_lock(&dir->log_mutex); path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto out_unlock; } ret = del_logged_dentry(trans, root->log_root, path, btrfs_ino(dir), name, index); btrfs_free_path(path); out_unlock: mutex_unlock(&dir->log_mutex); if (ret < 0) btrfs_set_log_full_commit(trans); btrfs_end_log_trans(root); } /* see comments for btrfs_del_dir_entries_in_log */ void btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, struct btrfs_root *root, const struct fscrypt_str *name, struct btrfs_inode *inode, u64 dirid) { struct btrfs_root *log; u64 index; int ret; ret = inode_logged(trans, inode, NULL); if (ret == 0) return; else if (ret < 0) { btrfs_set_log_full_commit(trans); return; } ret = join_running_log_trans(root); if (ret) return; log = root->log_root; mutex_lock(&inode->log_mutex); ret = btrfs_del_inode_ref(trans, log, name, btrfs_ino(inode), dirid, &index); mutex_unlock(&inode->log_mutex); if (ret < 0 && ret != -ENOENT) btrfs_set_log_full_commit(trans); btrfs_end_log_trans(root); } /* * creates a range item in the log for 'dirid'. first_offset and * last_offset tell us which parts of the key space the log should * be considered authoritative for. */ static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans, struct btrfs_root *log, struct btrfs_path *path, u64 dirid, u64 first_offset, u64 last_offset) { int ret; struct btrfs_key key; struct btrfs_dir_log_item *item; key.objectid = dirid; key.offset = first_offset; key.type = BTRFS_DIR_LOG_INDEX_KEY; ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item)); /* * -EEXIST is fine and can happen sporadically when we are logging a * directory and have concurrent insertions in the subvolume's tree for * items from other inodes and that result in pushing off some dir items * from one leaf to another in order to accommodate for the new items. * This results in logging the same dir index range key. */ if (ret && ret != -EEXIST) return ret; item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_dir_log_item); if (ret == -EEXIST) { const u64 curr_end = btrfs_dir_log_end(path->nodes[0], item); /* * btrfs_del_dir_entries_in_log() might have been called during * an unlink between the initial insertion of this key and the * current update, or we might be logging a single entry deletion * during a rename, so set the new last_offset to the max value. */ last_offset = max(last_offset, curr_end); } btrfs_set_dir_log_end(path->nodes[0], item, last_offset); btrfs_mark_buffer_dirty(path->nodes[0]); btrfs_release_path(path); return 0; } static int flush_dir_items_batch(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct extent_buffer *src, struct btrfs_path *dst_path, int start_slot, int count) { struct btrfs_root *log = inode->root->log_root; char *ins_data = NULL; struct btrfs_item_batch batch; struct extent_buffer *dst; unsigned long src_offset; unsigned long dst_offset; u64 last_index; struct btrfs_key key; u32 item_size; int ret; int i; ASSERT(count > 0); batch.nr = count; if (count == 1) { btrfs_item_key_to_cpu(src, &key, start_slot); item_size = btrfs_item_size(src, start_slot); batch.keys = &key; batch.data_sizes = &item_size; batch.total_data_size = item_size; } else { struct btrfs_key *ins_keys; u32 *ins_sizes; ins_data = kmalloc(count * sizeof(u32) + count * sizeof(struct btrfs_key), GFP_NOFS); if (!ins_data) return -ENOMEM; ins_sizes = (u32 *)ins_data; ins_keys = (struct btrfs_key *)(ins_data + count * sizeof(u32)); batch.keys = ins_keys; batch.data_sizes = ins_sizes; batch.total_data_size = 0; for (i = 0; i < count; i++) { const int slot = start_slot + i; btrfs_item_key_to_cpu(src, &ins_keys[i], slot); ins_sizes[i] = btrfs_item_size(src, slot); batch.total_data_size += ins_sizes[i]; } } ret = btrfs_insert_empty_items(trans, log, dst_path, &batch); if (ret) goto out; dst = dst_path->nodes[0]; /* * Copy all the items in bulk, in a single copy operation. Item data is * organized such that it's placed at the end of a leaf and from right * to left. For example, the data for the second item ends at an offset * that matches the offset where the data for the first item starts, the * data for the third item ends at an offset that matches the offset * where the data of the second items starts, and so on. * Therefore our source and destination start offsets for copy match the * offsets of the last items (highest slots). */ dst_offset = btrfs_item_ptr_offset(dst, dst_path->slots[0] + count - 1); src_offset = btrfs_item_ptr_offset(src, start_slot + count - 1); copy_extent_buffer(dst, src, dst_offset, src_offset, batch.total_data_size); btrfs_release_path(dst_path); last_index = batch.keys[count - 1].offset; ASSERT(last_index > inode->last_dir_index_offset); /* * If for some unexpected reason the last item's index is not greater * than the last index we logged, warn and force a transaction commit. */ if (WARN_ON(last_index <= inode->last_dir_index_offset)) ret = BTRFS_LOG_FORCE_COMMIT; else inode->last_dir_index_offset = last_index; if (btrfs_get_first_dir_index_to_log(inode) == 0) btrfs_set_first_dir_index_to_log(inode, batch.keys[0].offset); out: kfree(ins_data); return ret; } static int process_dir_items_leaf(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct btrfs_path *path, struct btrfs_path *dst_path, struct btrfs_log_ctx *ctx, u64 *last_old_dentry_offset) { struct btrfs_root *log = inode->root->log_root; struct extent_buffer *src; const int nritems = btrfs_header_nritems(path->nodes[0]); const u64 ino = btrfs_ino(inode); bool last_found = false; int batch_start = 0; int batch_size = 0; int i; /* * We need to clone the leaf, release the read lock on it, and use the * clone before modifying the log tree. See the comment at copy_items() * about why we need to do this. */ src = btrfs_clone_extent_buffer(path->nodes[0]); if (!src) return -ENOMEM; i = path->slots[0]; btrfs_release_path(path); path->nodes[0] = src; path->slots[0] = i; for (; i < nritems; i++) { struct btrfs_dir_item *di; struct btrfs_key key; int ret; btrfs_item_key_to_cpu(src, &key, i); if (key.objectid != ino || key.type != BTRFS_DIR_INDEX_KEY) { last_found = true; break; } di = btrfs_item_ptr(src, i, struct btrfs_dir_item); /* * Skip ranges of items that consist only of dir item keys created * in past transactions. However if we find a gap, we must log a * dir index range item for that gap, so that index keys in that * gap are deleted during log replay. */ if (btrfs_dir_transid(src, di) < trans->transid) { if (key.offset > *last_old_dentry_offset + 1) { ret = insert_dir_log_key(trans, log, dst_path, ino, *last_old_dentry_offset + 1, key.offset - 1); if (ret < 0) return ret; } *last_old_dentry_offset = key.offset; continue; } /* If we logged this dir index item before, we can skip it. */ if (key.offset <= inode->last_dir_index_offset) continue; /* * We must make sure that when we log a directory entry, the * corresponding inode, after log replay, has a matching link * count. For example: * * touch foo * mkdir mydir * sync * ln foo mydir/bar * xfs_io -c "fsync" mydir * <crash> * <mount fs and log replay> * * Would result in a fsync log that when replayed, our file inode * would have a link count of 1, but we get two directory entries * pointing to the same inode. After removing one of the names, * it would not be possible to remove the other name, which * resulted always in stale file handle errors, and would not be * possible to rmdir the parent directory, since its i_size could * never be decremented to the value BTRFS_EMPTY_DIR_SIZE, * resulting in -ENOTEMPTY errors. */ if (!ctx->log_new_dentries) { struct btrfs_key di_key; btrfs_dir_item_key_to_cpu(src, di, &di_key); if (di_key.type != BTRFS_ROOT_ITEM_KEY) ctx->log_new_dentries = true; } if (batch_size == 0) batch_start = i; batch_size++; } if (batch_size > 0) { int ret; ret = flush_dir_items_batch(trans, inode, src, dst_path, batch_start, batch_size); if (ret < 0) return ret; } return last_found ? 1 : 0; } /* * log all the items included in the current transaction for a given * directory. This also creates the range items in the log tree required * to replay anything deleted before the fsync */ static noinline int log_dir_items(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct btrfs_path *path, struct btrfs_path *dst_path, struct btrfs_log_ctx *ctx, u64 min_offset, u64 *last_offset_ret) { struct btrfs_key min_key; struct btrfs_root *root = inode->root; struct btrfs_root *log = root->log_root; int ret; u64 last_old_dentry_offset = min_offset - 1; u64 last_offset = (u64)-1; u64 ino = btrfs_ino(inode); min_key.objectid = ino; min_key.type = BTRFS_DIR_INDEX_KEY; min_key.offset = min_offset; ret = btrfs_search_forward(root, &min_key, path, trans->transid); /* * we didn't find anything from this transaction, see if there * is anything at all */ if (ret != 0 || min_key.objectid != ino || min_key.type != BTRFS_DIR_INDEX_KEY) { min_key.objectid = ino; min_key.type = BTRFS_DIR_INDEX_KEY; min_key.offset = (u64)-1; btrfs_release_path(path); ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); if (ret < 0) { btrfs_release_path(path); return ret; } ret = btrfs_previous_item(root, path, ino, BTRFS_DIR_INDEX_KEY); /* if ret == 0 there are items for this type, * create a range to tell us the last key of this type. * otherwise, there are no items in this directory after * *min_offset, and we create a range to indicate that. */ if (ret == 0) { struct btrfs_key tmp; btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); if (tmp.type == BTRFS_DIR_INDEX_KEY) last_old_dentry_offset = tmp.offset; } else if (ret > 0) { ret = 0; } goto done; } /* go backward to find any previous key */ ret = btrfs_previous_item(root, path, ino, BTRFS_DIR_INDEX_KEY); if (ret == 0) { struct btrfs_key tmp; btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); /* * The dir index key before the first one we found that needs to * be logged might be in a previous leaf, and there might be a * gap between these keys, meaning that we had deletions that * happened. So the key range item we log (key type * BTRFS_DIR_LOG_INDEX_KEY) must cover a range that starts at the * previous key's offset plus 1, so that those deletes are replayed. */ if (tmp.type == BTRFS_DIR_INDEX_KEY) last_old_dentry_offset = tmp.offset; } else if (ret < 0) { goto done; } btrfs_release_path(path); /* * Find the first key from this transaction again or the one we were at * in the loop below in case we had to reschedule. We may be logging the * directory without holding its VFS lock, which happen when logging new * dentries (through log_new_dir_dentries()) or in some cases when we * need to log the parent directory of an inode. This means a dir index * key might be deleted from the inode's root, and therefore we may not * find it anymore. If we can't find it, just move to the next key. We * can not bail out and ignore, because if we do that we will simply * not log dir index keys that come after the one that was just deleted * and we can end up logging a dir index range that ends at (u64)-1 * (@last_offset is initialized to that), resulting in removing dir * entries we should not remove at log replay time. */ search: ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); if (ret > 0) { ret = btrfs_next_item(root, path); if (ret > 0) { /* There are no more keys in the inode's root. */ ret = 0; goto done; } } if (ret < 0) goto done; /* * we have a block from this transaction, log every item in it * from our directory */ while (1) { ret = process_dir_items_leaf(trans, inode, path, dst_path, ctx, &last_old_dentry_offset); if (ret != 0) { if (ret > 0) ret = 0; goto done; } path->slots[0] = btrfs_header_nritems(path->nodes[0]); /* * look ahead to the next item and see if it is also * from this directory and from this transaction */ ret = btrfs_next_leaf(root, path); if (ret) { if (ret == 1) { last_offset = (u64)-1; ret = 0; } goto done; } btrfs_item_key_to_cpu(path->nodes[0], &min_key, path->slots[0]); if (min_key.objectid != ino || min_key.type != BTRFS_DIR_INDEX_KEY) { last_offset = (u64)-1; goto done; } if (btrfs_header_generation(path->nodes[0]) != trans->transid) { /* * The next leaf was not changed in the current transaction * and has at least one dir index key. * We check for the next key because there might have been * one or more deletions between the last key we logged and * that next key. So the key range item we log (key type * BTRFS_DIR_LOG_INDEX_KEY) must end at the next key's * offset minus 1, so that those deletes are replayed. */ last_offset = min_key.offset - 1; goto done; } if (need_resched()) { btrfs_release_path(path); cond_resched(); goto search; } } done: btrfs_release_path(path); btrfs_release_path(dst_path); if (ret == 0) { *last_offset_ret = last_offset; /* * In case the leaf was changed in the current transaction but * all its dir items are from a past transaction, the last item * in the leaf is a dir item and there's no gap between that last * dir item and the first one on the next leaf (which did not * change in the current transaction), then we don't need to log * a range, last_old_dentry_offset is == to last_offset. */ ASSERT(last_old_dentry_offset <= last_offset); if (last_old_dentry_offset < last_offset) ret = insert_dir_log_key(trans, log, path, ino, last_old_dentry_offset + 1, last_offset); } return ret; } /* * If the inode was logged before and it was evicted, then its * last_dir_index_offset is (u64)-1, so we don't the value of the last index * key offset. If that's the case, search for it and update the inode. This * is to avoid lookups in the log tree every time we try to insert a dir index * key from a leaf changed in the current transaction, and to allow us to always * do batch insertions of dir index keys. */ static int update_last_dir_index_offset(struct btrfs_inode *inode, struct btrfs_path *path, const struct btrfs_log_ctx *ctx) { const u64 ino = btrfs_ino(inode); struct btrfs_key key; int ret; lockdep_assert_held(&inode->log_mutex); if (inode->last_dir_index_offset != (u64)-1) return 0; if (!ctx->logged_before) { inode->last_dir_index_offset = BTRFS_DIR_START_INDEX - 1; return 0; } key.objectid = ino; key.type = BTRFS_DIR_INDEX_KEY; key.offset = (u64)-1; ret = btrfs_search_slot(NULL, inode->root->log_root, &key, path, 0, 0); /* * An error happened or we actually have an index key with an offset * value of (u64)-1. Bail out, we're done. */ if (ret <= 0) goto out; ret = 0; inode->last_dir_index_offset = BTRFS_DIR_START_INDEX - 1; /* * No dir index items, bail out and leave last_dir_index_offset with * the value right before the first valid index value. */ if (path->slots[0] == 0) goto out; /* * btrfs_search_slot() left us at one slot beyond the slot with the last * index key, or beyond the last key of the directory that is not an * index key. If we have an index key before, set last_dir_index_offset * to its offset value, otherwise leave it with a value right before the * first valid index value, as it means we have an empty directory. */ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1); if (key.objectid == ino && key.type == BTRFS_DIR_INDEX_KEY) inode->last_dir_index_offset = key.offset; out: btrfs_release_path(path); return ret; } /* * logging directories is very similar to logging inodes, We find all the items * from the current transaction and write them to the log. * * The recovery code scans the directory in the subvolume, and if it finds a * key in the range logged that is not present in the log tree, then it means * that dir entry was unlinked during the transaction. * * In order for that scan to work, we must include one key smaller than * the smallest logged by this transaction and one key larger than the largest * key logged by this transaction. */ static noinline int log_directory_changes(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct btrfs_path *path, struct btrfs_path *dst_path, struct btrfs_log_ctx *ctx) { u64 min_key; u64 max_key; int ret; ret = update_last_dir_index_offset(inode, path, ctx); if (ret) return ret; min_key = BTRFS_DIR_START_INDEX; max_key = 0; while (1) { ret = log_dir_items(trans, inode, path, dst_path, ctx, min_key, &max_key); if (ret) return ret; if (max_key == (u64)-1) break; min_key = max_key + 1; } return 0; } /* * a helper function to drop items from the log before we relog an * inode. max_key_type indicates the highest item type to remove. * This cannot be run for file data extents because it does not * free the extents they point to. */ static int drop_inode_items(struct btrfs_trans_handle *trans, struct btrfs_root *log, struct btrfs_path *path, struct btrfs_inode *inode, int max_key_type) { int ret; struct btrfs_key key; struct btrfs_key found_key; int start_slot; key.objectid = btrfs_ino(inode); key.type = max_key_type; key.offset = (u64)-1; while (1) { ret = btrfs_search_slot(trans, log, &key, path, -1, 1); if (ret < 0) { break; } else if (ret > 0) { if (path->slots[0] == 0) break; path->slots[0]--; } btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); if (found_key.objectid != key.objectid) break; found_key.offset = 0; found_key.type = 0; ret = btrfs_bin_search(path->nodes[0], 0, &found_key, &start_slot); if (ret < 0) break; ret = btrfs_del_items(trans, log, path, start_slot, path->slots[0] - start_slot + 1); /* * If start slot isn't 0 then we don't need to re-search, we've * found the last guy with the objectid in this tree. */ if (ret || start_slot != 0) break; btrfs_release_path(path); } btrfs_release_path(path); if (ret > 0) ret = 0; return ret; } static int truncate_inode_items(struct btrfs_trans_handle *trans, struct btrfs_root *log_root, struct btrfs_inode *inode, u64 new_size, u32 min_type) { struct btrfs_truncate_control control = { .new_size = new_size, .ino = btrfs_ino(inode), .min_type = min_type, .skip_ref_updates = true, }; return btrfs_truncate_inode_items(trans, log_root, &control); } static void fill_inode_item(struct btrfs_trans_handle *trans, struct extent_buffer *leaf, struct btrfs_inode_item *item, struct inode *inode, int log_inode_only, u64 logged_isize) { struct btrfs_map_token token; u64 flags; btrfs_init_map_token(&token, leaf); if (log_inode_only) { /* set the generation to zero so the recover code * can tell the difference between an logging * just to say 'this inode exists' and a logging * to say 'update this inode with these values' */ btrfs_set_token_inode_generation(&token, item, 0); btrfs_set_token_inode_size(&token, item, logged_isize); } else { btrfs_set_token_inode_generation(&token, item, BTRFS_I(inode)->generation); btrfs_set_token_inode_size(&token, item, inode->i_size); } btrfs_set_token_inode_uid(&token, item, i_uid_read(inode)); btrfs_set_token_inode_gid(&token, item, i_gid_read(inode)); btrfs_set_token_inode_mode(&token, item, inode->i_mode); btrfs_set_token_inode_nlink(&token, item, inode->i_nlink); btrfs_set_token_timespec_sec(&token, &item->atime, inode->i_atime.tv_sec); btrfs_set_token_timespec_nsec(&token, &item->atime, inode->i_atime.tv_nsec); btrfs_set_token_timespec_sec(&token, &item->mtime, inode->i_mtime.tv_sec); btrfs_set_token_timespec_nsec(&token, &item->mtime, inode->i_mtime.tv_nsec); btrfs_set_token_timespec_sec(&token, &item->ctime, inode_get_ctime(inode).tv_sec); btrfs_set_token_timespec_nsec(&token, &item->ctime, inode_get_ctime(inode).tv_nsec); /* * We do not need to set the nbytes field, in fact during a fast fsync * its value may not even be correct, since a fast fsync does not wait * for ordered extent completion, which is where we update nbytes, it * only waits for writeback to complete. During log replay as we find * file extent items and replay them, we adjust the nbytes field of the * inode item in subvolume tree as needed (see overwrite_item()). */ btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode)); btrfs_set_token_inode_transid(&token, item, trans->transid); btrfs_set_token_inode_rdev(&token, item, inode->i_rdev); flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags, BTRFS_I(inode)->ro_flags); btrfs_set_token_inode_flags(&token, item, flags); btrfs_set_token_inode_block_group(&token, item, 0); } static int log_inode_item(struct btrfs_trans_handle *trans, struct btrfs_root *log, struct btrfs_path *path, struct btrfs_inode *inode, bool inode_item_dropped) { struct btrfs_inode_item *inode_item; int ret; /* * If we are doing a fast fsync and the inode was logged before in the * current transaction, then we know the inode was previously logged and * it exists in the log tree. For performance reasons, in this case use * btrfs_search_slot() directly with ins_len set to 0 so that we never * attempt a write lock on the leaf's parent, which adds unnecessary lock * contention in case there are concurrent fsyncs for other inodes of the * same subvolume. Using btrfs_insert_empty_item() when the inode item * already exists can also result in unnecessarily splitting a leaf. */ if (!inode_item_dropped && inode->logged_trans == trans->transid) { ret = btrfs_search_slot(trans, log, &inode->location, path, 0, 1); ASSERT(ret <= 0); if (ret > 0) ret = -ENOENT; } else { /* * This means it is the first fsync in the current transaction, * so the inode item is not in the log and we need to insert it. * We can never get -EEXIST because we are only called for a fast * fsync and in case an inode eviction happens after the inode was * logged before in the current transaction, when we load again * the inode, we set BTRFS_INODE_NEEDS_FULL_SYNC on its runtime * flags and set ->logged_trans to 0. */ ret = btrfs_insert_empty_item(trans, log, path, &inode->location, sizeof(*inode_item)); ASSERT(ret != -EEXIST); } if (ret) return ret; inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_inode_item); fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode, 0, 0); btrfs_release_path(path); return 0; } static int log_csums(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct btrfs_root *log_root, struct btrfs_ordered_sum *sums) { const u64 lock_end = sums->logical + sums->len - 1; struct extent_state *cached_state = NULL; int ret; /* * If this inode was not used for reflink operations in the current * transaction with new extents, then do the fast path, no need to * worry about logging checksum items with overlapping ranges. */ if (inode->last_reflink_trans < trans->transid) return btrfs_csum_file_blocks(trans, log_root, sums); /* * Serialize logging for checksums. This is to avoid racing with the * same checksum being logged by another task that is logging another * file which happens to refer to the same extent as well. Such races * can leave checksum items in the log with overlapping ranges. */ ret = lock_extent(&log_root->log_csum_range, sums->logical, lock_end, &cached_state); if (ret) return ret; /* * Due to extent cloning, we might have logged a csum item that covers a * subrange of a cloned extent, and later we can end up logging a csum * item for a larger subrange of the same extent or the entire range. * This would leave csum items in the log tree that cover the same range * and break the searches for checksums in the log tree, resulting in * some checksums missing in the fs/subvolume tree. So just delete (or * trim and adjust) any existing csum items in the log for this range. */ ret = btrfs_del_csums(trans, log_root, sums->logical, sums->len); if (!ret) ret = btrfs_csum_file_blocks(trans, log_root, sums); unlock_extent(&log_root->log_csum_range, sums->logical, lock_end, &cached_state); return ret; } static noinline int copy_items(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct btrfs_path *dst_path, struct btrfs_path *src_path, int start_slot, int nr, int inode_only, u64 logged_isize) { struct btrfs_root *log = inode->root->log_root; struct btrfs_file_extent_item *extent; struct extent_buffer *src; int ret = 0; struct btrfs_key *ins_keys; u32 *ins_sizes; struct btrfs_item_batch batch; char *ins_data; int i; int dst_index; const bool skip_csum = (inode->flags & BTRFS_INODE_NODATASUM); const u64 i_size = i_size_read(&inode->vfs_inode); /* * To keep lockdep happy and avoid deadlocks, clone the source leaf and * use the clone. This is because otherwise we would be changing the log * tree, to insert items from the subvolume tree or insert csum items, * while holding a read lock on a leaf from the subvolume tree, which * creates a nasty lock dependency when COWing log tree nodes/leaves: * * 1) Modifying the log tree triggers an extent buffer allocation while * holding a write lock on a parent extent buffer from the log tree. * Allocating the pages for an extent buffer, or the extent buffer * struct, can trigger inode eviction and finally the inode eviction * will trigger a release/remove of a delayed node, which requires * taking the delayed node's mutex; * * 2) Allocating a metadata extent for a log tree can trigger the async * reclaim thread and make us wait for it to release enough space and * unblock our reservation ticket. The reclaim thread can start * flushing delayed items, and that in turn results in the need to * lock delayed node mutexes and in the need to write lock extent * buffers of a subvolume tree - all this while holding a write lock * on the parent extent buffer in the log tree. * * So one task in scenario 1) running in parallel with another task in * scenario 2) could lead to a deadlock, one wanting to lock a delayed * node mutex while having a read lock on a leaf from the subvolume, * while the other is holding the delayed node's mutex and wants to * write lock the same subvolume leaf for flushing delayed items. */ src = btrfs_clone_extent_buffer(src_path->nodes[0]); if (!src) return -ENOMEM; i = src_path->slots[0]; btrfs_release_path(src_path); src_path->nodes[0] = src; src_path->slots[0] = i; ins_data = kmalloc(nr * sizeof(struct btrfs_key) + nr * sizeof(u32), GFP_NOFS); if (!ins_data) return -ENOMEM; ins_sizes = (u32 *)ins_data; ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); batch.keys = ins_keys; batch.data_sizes = ins_sizes; batch.total_data_size = 0; batch.nr = 0; dst_index = 0; for (i = 0; i < nr; i++) { const int src_slot = start_slot + i; struct btrfs_root *csum_root; struct btrfs_ordered_sum *sums; struct btrfs_ordered_sum *sums_next; LIST_HEAD(ordered_sums); u64 disk_bytenr; u64 disk_num_bytes; u64 extent_offset; u64 extent_num_bytes; bool is_old_extent; btrfs_item_key_to_cpu(src, &ins_keys[dst_index], src_slot); if (ins_keys[dst_index].type != BTRFS_EXTENT_DATA_KEY) goto add_to_batch; extent = btrfs_item_ptr(src, src_slot, struct btrfs_file_extent_item); is_old_extent = (btrfs_file_extent_generation(src, extent) < trans->transid); /* * Don't copy extents from past generations. That would make us * log a lot more metadata for common cases like doing only a * few random writes into a file and then fsync it for the first * time or after the full sync flag is set on the inode. We can * get leaves full of extent items, most of which are from past * generations, so we can skip them - as long as the inode has * not been the target of a reflink operation in this transaction, * as in that case it might have had file extent items with old * generations copied into it. We also must always log prealloc * extents that start at or beyond eof, otherwise we would lose * them on log replay. */ if (is_old_extent && ins_keys[dst_index].offset < i_size && inode->last_reflink_trans < trans->transid) continue; if (skip_csum) goto add_to_batch; /* Only regular extents have checksums. */ if (btrfs_file_extent_type(src, extent) != BTRFS_FILE_EXTENT_REG) goto add_to_batch; /* * If it's an extent created in a past transaction, then its * checksums are already accessible from the committed csum tree, * no need to log them. */ if (is_old_extent) goto add_to_batch; disk_bytenr = btrfs_file_extent_disk_bytenr(src, extent); /* If it's an explicit hole, there are no checksums. */ if (disk_bytenr == 0) goto add_to_batch; disk_num_bytes = btrfs_file_extent_disk_num_bytes(src, extent); if (btrfs_file_extent_compression(src, extent)) { extent_offset = 0; extent_num_bytes = disk_num_bytes; } else { extent_offset = btrfs_file_extent_offset(src, extent); extent_num_bytes = btrfs_file_extent_num_bytes(src, extent); } csum_root = btrfs_csum_root(trans->fs_info, disk_bytenr); disk_bytenr += extent_offset; ret = btrfs_lookup_csums_list(csum_root, disk_bytenr, disk_bytenr + extent_num_bytes - 1, &ordered_sums, 0, false); if (ret) goto out; list_for_each_entry_safe(sums, sums_next, &ordered_sums, list) { if (!ret) ret = log_csums(trans, inode, log, sums); list_del(&sums->list); kfree(sums); } if (ret) goto out; add_to_batch: ins_sizes[dst_index] = btrfs_item_size(src, src_slot); batch.total_data_size += ins_sizes[dst_index]; batch.nr++; dst_index++; } /* * We have a leaf full of old extent items that don't need to be logged, * so we don't need to do anything. */ if (batch.nr == 0) goto out; ret = btrfs_insert_empty_items(trans, log, dst_path, &batch); if (ret) goto out; dst_index = 0; for (i = 0; i < nr; i++) { const int src_slot = start_slot + i; const int dst_slot = dst_path->slots[0] + dst_index; struct btrfs_key key; unsigned long src_offset; unsigned long dst_offset; /* * We're done, all the remaining items in the source leaf * correspond to old file extent items. */ if (dst_index >= batch.nr) break; btrfs_item_key_to_cpu(src, &key, src_slot); if (key.type != BTRFS_EXTENT_DATA_KEY) goto copy_item; extent = btrfs_item_ptr(src, src_slot, struct btrfs_file_extent_item); /* See the comment in the previous loop, same logic. */ if (btrfs_file_extent_generation(src, extent) < trans->transid && key.offset < i_size && inode->last_reflink_trans < trans->transid) continue; copy_item: dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0], dst_slot); src_offset = btrfs_item_ptr_offset(src, src_slot); if (key.type == BTRFS_INODE_ITEM_KEY) { struct btrfs_inode_item *inode_item; inode_item = btrfs_item_ptr(dst_path->nodes[0], dst_slot, struct btrfs_inode_item); fill_inode_item(trans, dst_path->nodes[0], inode_item, &inode->vfs_inode, inode_only == LOG_INODE_EXISTS, logged_isize); } else { copy_extent_buffer(dst_path->nodes[0], src, dst_offset, src_offset, ins_sizes[dst_index]); } dst_index++; } btrfs_mark_buffer_dirty(dst_path->nodes[0]); btrfs_release_path(dst_path); out: kfree(ins_data); return ret; } static int extent_cmp(void *priv, const struct list_head *a, const struct list_head *b) { const struct extent_map *em1, *em2; em1 = list_entry(a, struct extent_map, list); em2 = list_entry(b, struct extent_map, list); if (em1->start < em2->start) return -1; else if (em1->start > em2->start) return 1; return 0; } static int log_extent_csums(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct btrfs_root *log_root, const struct extent_map *em, struct btrfs_log_ctx *ctx) { struct btrfs_ordered_extent *ordered; struct btrfs_root *csum_root; u64 csum_offset; u64 csum_len; u64 mod_start = em->mod_start; u64 mod_len = em->mod_len; LIST_HEAD(ordered_sums); int ret = 0; if (inode->flags & BTRFS_INODE_NODATASUM || test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || em->block_start == EXTENT_MAP_HOLE) return 0; list_for_each_entry(ordered, &ctx->ordered_extents, log_list) { const u64 ordered_end = ordered->file_offset + ordered->num_bytes; const u64 mod_end = mod_start + mod_len; struct btrfs_ordered_sum *sums; if (mod_len == 0) break; if (ordered_end <= mod_start) continue; if (mod_end <= ordered->file_offset) break; /* * We are going to copy all the csums on this ordered extent, so * go ahead and adjust mod_start and mod_len in case this ordered * extent has already been logged. */ if (ordered->file_offset > mod_start) { if (ordered_end >= mod_end) mod_len = ordered->file_offset - mod_start; /* * If we have this case * * |--------- logged extent ---------| * |----- ordered extent ----| * * Just don't mess with mod_start and mod_len, we'll * just end up logging more csums than we need and it * will be ok. */ } else { if (ordered_end < mod_end) { mod_len = mod_end - ordered_end; mod_start = ordered_end; } else { mod_len = 0; } } /* * To keep us from looping for the above case of an ordered * extent that falls inside of the logged extent. */ if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM, &ordered->flags)) continue; list_for_each_entry(sums, &ordered->list, list) { ret = log_csums(trans, inode, log_root, sums); if (ret) return ret; } } /* We're done, found all csums in the ordered extents. */ if (mod_len == 0) return 0; /* If we're compressed we have to save the entire range of csums. */ if (em->compress_type) { csum_offset = 0; csum_len = max(em->block_len, em->orig_block_len); } else { csum_offset = mod_start - em->start; csum_len = mod_len; } /* block start is already adjusted for the file extent offset. */ csum_root = btrfs_csum_root(trans->fs_info, em->block_start); ret = btrfs_lookup_csums_list(csum_root, em->block_start + csum_offset, em->block_start + csum_offset + csum_len - 1, &ordered_sums, 0, false); if (ret) return ret; while (!list_empty(&ordered_sums)) { struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, struct btrfs_ordered_sum, list); if (!ret) ret = log_csums(trans, inode, log_root, sums); list_del(&sums->list); kfree(sums); } return ret; } static int log_one_extent(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, const struct extent_map *em, struct btrfs_path *path, struct btrfs_log_ctx *ctx) { struct btrfs_drop_extents_args drop_args = { 0 }; struct btrfs_root *log = inode->root->log_root; struct btrfs_file_extent_item fi = { 0 }; struct extent_buffer *leaf; struct btrfs_key key; u64 extent_offset = em->start - em->orig_start; u64 block_len; int ret; btrfs_set_stack_file_extent_generation(&fi, trans->transid); if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) btrfs_set_stack_file_extent_type(&fi, BTRFS_FILE_EXTENT_PREALLOC); else btrfs_set_stack_file_extent_type(&fi, BTRFS_FILE_EXTENT_REG); block_len = max(em->block_len, em->orig_block_len); if (em->compress_type != BTRFS_COMPRESS_NONE) { btrfs_set_stack_file_extent_disk_bytenr(&fi, em->block_start); btrfs_set_stack_file_extent_disk_num_bytes(&fi, block_len); } else if (em->block_start < EXTENT_MAP_LAST_BYTE) { btrfs_set_stack_file_extent_disk_bytenr(&fi, em->block_start - extent_offset); btrfs_set_stack_file_extent_disk_num_bytes(&fi, block_len); } btrfs_set_stack_file_extent_offset(&fi, extent_offset); btrfs_set_stack_file_extent_num_bytes(&fi, em->len); btrfs_set_stack_file_extent_ram_bytes(&fi, em->ram_bytes); btrfs_set_stack_file_extent_compression(&fi, em->compress_type); ret = log_extent_csums(trans, inode, log, em, ctx); if (ret) return ret; /* * If this is the first time we are logging the inode in the current * transaction, we can avoid btrfs_drop_extents(), which is expensive * because it does a deletion search, which always acquires write locks * for extent buffers at levels 2, 1 and 0. This not only wastes time * but also adds significant contention in a log tree, since log trees * are small, with a root at level 2 or 3 at most, due to their short * life span. */ if (ctx->logged_before) { drop_args.path = path; drop_args.start = em->start; drop_args.end = em->start + em->len; drop_args.replace_extent = true; drop_args.extent_item_size = sizeof(fi); ret = btrfs_drop_extents(trans, log, inode, &drop_args); if (ret) return ret; } if (!drop_args.extent_inserted) { key.objectid = btrfs_ino(inode); key.type = BTRFS_EXTENT_DATA_KEY; key.offset = em->start; ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(fi)); if (ret) return ret; } leaf = path->nodes[0]; write_extent_buffer(leaf, &fi, btrfs_item_ptr_offset(leaf, path->slots[0]), sizeof(fi)); btrfs_mark_buffer_dirty(leaf); btrfs_release_path(path); return ret; } /* * Log all prealloc extents beyond the inode's i_size to make sure we do not * lose them after doing a full/fast fsync and replaying the log. We scan the * subvolume's root instead of iterating the inode's extent map tree because * otherwise we can log incorrect extent items based on extent map conversion. * That can happen due to the fact that extent maps are merged when they * are not in the extent map tree's list of modified extents. */ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct btrfs_path *path) { struct btrfs_root *root = inode->root; struct btrfs_key key; const u64 i_size = i_size_read(&inode->vfs_inode); const u64 ino = btrfs_ino(inode); struct btrfs_path *dst_path = NULL; bool dropped_extents = false; u64 truncate_offset = i_size; struct extent_buffer *leaf; int slot; int ins_nr = 0; int start_slot; int ret; if (!(inode->flags & BTRFS_INODE_PREALLOC)) return 0; key.objectid = ino; key.type = BTRFS_EXTENT_DATA_KEY; key.offset = i_size; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; /* * We must check if there is a prealloc extent that starts before the * i_size and crosses the i_size boundary. This is to ensure later we * truncate down to the end of that extent and not to the i_size, as * otherwise we end up losing part of the prealloc extent after a log * replay and with an implicit hole if there is another prealloc extent * that starts at an offset beyond i_size. */ ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY); if (ret < 0) goto out; if (ret == 0) { struct btrfs_file_extent_item *ei; leaf = path->nodes[0]; slot = path->slots[0]; ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_PREALLOC) { u64 extent_end; btrfs_item_key_to_cpu(leaf, &key, slot); extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, ei); if (extent_end > i_size) truncate_offset = extent_end; } } else { ret = 0; } while (true) { leaf = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(leaf)) { if (ins_nr > 0) { ret = copy_items(trans, inode, dst_path, path, start_slot, ins_nr, 1, 0); if (ret < 0) goto out; ins_nr = 0; } ret = btrfs_next_leaf(root, path); if (ret < 0) goto out; if (ret > 0) { ret = 0; break; } continue; } btrfs_item_key_to_cpu(leaf, &key, slot); if (key.objectid > ino) break; if (WARN_ON_ONCE(key.objectid < ino) || key.type < BTRFS_EXTENT_DATA_KEY || key.offset < i_size) { path->slots[0]++; continue; } if (!dropped_extents) { /* * Avoid logging extent items logged in past fsync calls * and leading to duplicate keys in the log tree. */ ret = truncate_inode_items(trans, root->log_root, inode, truncate_offset, BTRFS_EXTENT_DATA_KEY); if (ret) goto out; dropped_extents = true; } if (ins_nr == 0) start_slot = slot; ins_nr++; path->slots[0]++; if (!dst_path) { dst_path = btrfs_alloc_path(); if (!dst_path) { ret = -ENOMEM; goto out; } } } if (ins_nr > 0) ret = copy_items(trans, inode, dst_path, path, start_slot, ins_nr, 1, 0); out: btrfs_release_path(path); btrfs_free_path(dst_path); return ret; } static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct btrfs_path *path, struct btrfs_log_ctx *ctx) { struct btrfs_ordered_extent *ordered; struct btrfs_ordered_extent *tmp; struct extent_map *em, *n; LIST_HEAD(extents); struct extent_map_tree *tree = &inode->extent_tree; int ret = 0; int num = 0; write_lock(&tree->lock); list_for_each_entry_safe(em, n, &tree->modified_extents, list) { list_del_init(&em->list); /* * Just an arbitrary number, this can be really CPU intensive * once we start getting a lot of extents, and really once we * have a bunch of extents we just want to commit since it will * be faster. */ if (++num > 32768) { list_del_init(&tree->modified_extents); ret = -EFBIG; goto process; } if (em->generation < trans->transid) continue; /* We log prealloc extents beyond eof later. */ if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && em->start >= i_size_read(&inode->vfs_inode)) continue; /* Need a ref to keep it from getting evicted from cache */ refcount_inc(&em->refs); set_bit(EXTENT_FLAG_LOGGING, &em->flags); list_add_tail(&em->list, &extents); num++; } list_sort(NULL, &extents, extent_cmp); process: while (!list_empty(&extents)) { em = list_entry(extents.next, struct extent_map, list); list_del_init(&em->list); /* * If we had an error we just need to delete everybody from our * private list. */ if (ret) { clear_em_logging(tree, em); free_extent_map(em); continue; } write_unlock(&tree->lock); ret = log_one_extent(trans, inode, em, path, ctx); write_lock(&tree->lock); clear_em_logging(tree, em); free_extent_map(em); } WARN_ON(!list_empty(&extents)); write_unlock(&tree->lock); if (!ret) ret = btrfs_log_prealloc_extents(trans, inode, path); if (ret) return ret; /* * We have logged all extents successfully, now make sure the commit of * the current transaction waits for the ordered extents to complete * before it commits and wipes out the log trees, otherwise we would * lose data if an ordered extents completes after the transaction * commits and a power failure happens after the transaction commit. */ list_for_each_entry_safe(ordered, tmp, &ctx->ordered_extents, log_list) { list_del_init(&ordered->log_list); set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags); if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) { spin_lock_irq(&inode->ordered_tree.lock); if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) { set_bit(BTRFS_ORDERED_PENDING, &ordered->flags); atomic_inc(&trans->transaction->pending_ordered); } spin_unlock_irq(&inode->ordered_tree.lock); } btrfs_put_ordered_extent(ordered); } return 0; } static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode, struct btrfs_path *path, u64 *size_ret) { struct btrfs_key key; int ret; key.objectid = btrfs_ino(inode); key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; ret = btrfs_search_slot(NULL, log, &key, path, 0, 0); if (ret < 0) { return ret; } else if (ret > 0) { *size_ret = 0; } else { struct btrfs_inode_item *item; item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_inode_item); *size_ret = btrfs_inode_size(path->nodes[0], item); /* * If the in-memory inode's i_size is smaller then the inode * size stored in the btree, return the inode's i_size, so * that we get a correct inode size after replaying the log * when before a power failure we had a shrinking truncate * followed by addition of a new name (rename / new hard link). * Otherwise return the inode size from the btree, to avoid * data loss when replaying a log due to previously doing a * write that expands the inode's size and logging a new name * immediately after. */ if (*size_ret > inode->vfs_inode.i_size) *size_ret = inode->vfs_inode.i_size; } btrfs_release_path(path); return 0; } /* * At the moment we always log all xattrs. This is to figure out at log replay * time which xattrs must have their deletion replayed. If a xattr is missing * in the log tree and exists in the fs/subvol tree, we delete it. This is * because if a xattr is deleted, the inode is fsynced and a power failure * happens, causing the log to be replayed the next time the fs is mounted, * we want the xattr to not exist anymore (same behaviour as other filesystems * with a journal, ext3/4, xfs, f2fs, etc). */ static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct btrfs_path *path, struct btrfs_path *dst_path) { struct btrfs_root *root = inode->root; int ret; struct btrfs_key key; const u64 ino = btrfs_ino(inode); int ins_nr = 0; int start_slot = 0; bool found_xattrs = false; if (test_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags)) return 0; key.objectid = ino; key.type = BTRFS_XATTR_ITEM_KEY; key.offset = 0; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) return ret; while (true) { int slot = path->slots[0]; struct extent_buffer *leaf = path->nodes[0]; int nritems = btrfs_header_nritems(leaf); if (slot >= nritems) { if (ins_nr > 0) { ret = copy_items(trans, inode, dst_path, path, start_slot, ins_nr, 1, 0); if (ret < 0) return ret; ins_nr = 0; } ret = btrfs_next_leaf(root, path); if (ret < 0) return ret; else if (ret > 0) break; continue; } btrfs_item_key_to_cpu(leaf, &key, slot); if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) break; if (ins_nr == 0) start_slot = slot; ins_nr++; path->slots[0]++; found_xattrs = true; cond_resched(); } if (ins_nr > 0) { ret = copy_items(trans, inode, dst_path, path, start_slot, ins_nr, 1, 0); if (ret < 0) return ret; } if (!found_xattrs) set_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags); return 0; } /* * When using the NO_HOLES feature if we punched a hole that causes the * deletion of entire leafs or all the extent items of the first leaf (the one * that contains the inode item and references) we may end up not processing * any extents, because there are no leafs with a generation matching the * current transaction that have extent items for our inode. So we need to find * if any holes exist and then log them. We also need to log holes after any * truncate operation that changes the inode's size. */ static int btrfs_log_holes(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct btrfs_path *path) { struct btrfs_root *root = inode->root; struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_key key; const u64 ino = btrfs_ino(inode); const u64 i_size = i_size_read(&inode->vfs_inode); u64 prev_extent_end = 0; int ret; if (!btrfs_fs_incompat(fs_info, NO_HOLES) || i_size == 0) return 0; key.objectid = ino; key.type = BTRFS_EXTENT_DATA_KEY; key.offset = 0; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) return ret; while (true) { struct extent_buffer *leaf = path->nodes[0]; if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { ret = btrfs_next_leaf(root, path); if (ret < 0) return ret; if (ret > 0) { ret = 0; break; } leaf = path->nodes[0]; } btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) break; /* We have a hole, log it. */ if (prev_extent_end < key.offset) { const u64 hole_len = key.offset - prev_extent_end; /* * Release the path to avoid deadlocks with other code * paths that search the root while holding locks on * leafs from the log root. */ btrfs_release_path(path); ret = btrfs_insert_hole_extent(trans, root->log_root, ino, prev_extent_end, hole_len); if (ret < 0) return ret; /* * Search for the same key again in the root. Since it's * an extent item and we are holding the inode lock, the * key must still exist. If it doesn't just emit warning * and return an error to fall back to a transaction * commit. */ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) return ret; if (WARN_ON(ret > 0)) return -ENOENT; leaf = path->nodes[0]; } prev_extent_end = btrfs_file_extent_end(path); path->slots[0]++; cond_resched(); } if (prev_extent_end < i_size) { u64 hole_len; btrfs_release_path(path); hole_len = ALIGN(i_size - prev_extent_end, fs_info->sectorsize); ret = btrfs_insert_hole_extent(trans, root->log_root, ino, prev_extent_end, hole_len); if (ret < 0) return ret; } return 0; } /* * When we are logging a new inode X, check if it doesn't have a reference that * matches the reference from some other inode Y created in a past transaction * and that was renamed in the current transaction. If we don't do this, then at * log replay time we can lose inode Y (and all its files if it's a directory): * * mkdir /mnt/x * echo "hello world" > /mnt/x/foobar * sync * mv /mnt/x /mnt/y * mkdir /mnt/x # or touch /mnt/x * xfs_io -c fsync /mnt/x * <power fail> * mount fs, trigger log replay * * After the log replay procedure, we would lose the first directory and all its * files (file foobar). * For the case where inode Y is not a directory we simply end up losing it: * * echo "123" > /mnt/foo * sync * mv /mnt/foo /mnt/bar * echo "abc" > /mnt/foo * xfs_io -c fsync /mnt/foo * <power fail> * * We also need this for cases where a snapshot entry is replaced by some other * entry (file or directory) otherwise we end up with an unreplayable log due to * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as * if it were a regular entry: * * mkdir /mnt/x * btrfs subvolume snapshot /mnt /mnt/x/snap * btrfs subvolume delete /mnt/x/snap * rmdir /mnt/x * mkdir /mnt/x * fsync /mnt/x or fsync some new file inside it * <power fail> * * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in * the same transaction. */ static int btrfs_check_ref_name_override(struct extent_buffer *eb, const int slot, const struct btrfs_key *key, struct btrfs_inode *inode, u64 *other_ino, u64 *other_parent) { int ret; struct btrfs_path *search_path; char *name = NULL; u32 name_len = 0; u32 item_size = btrfs_item_size(eb, slot); u32 cur_offset = 0; unsigned long ptr = btrfs_item_ptr_offset(eb, slot); search_path = btrfs_alloc_path(); if (!search_path) return -ENOMEM; search_path->search_commit_root = 1; search_path->skip_locking = 1; while (cur_offset < item_size) { u64 parent; u32 this_name_len; u32 this_len; unsigned long name_ptr; struct btrfs_dir_item *di; struct fscrypt_str name_str; if (key->type == BTRFS_INODE_REF_KEY) { struct btrfs_inode_ref *iref; iref = (struct btrfs_inode_ref *)(ptr + cur_offset); parent = key->offset; this_name_len = btrfs_inode_ref_name_len(eb, iref); name_ptr = (unsigned long)(iref + 1); this_len = sizeof(*iref) + this_name_len; } else { struct btrfs_inode_extref *extref; extref = (struct btrfs_inode_extref *)(ptr + cur_offset); parent = btrfs_inode_extref_parent(eb, extref); this_name_len = btrfs_inode_extref_name_len(eb, extref); name_ptr = (unsigned long)&extref->name; this_len = sizeof(*extref) + this_name_len; } if (this_name_len > name_len) { char *new_name; new_name = krealloc(name, this_name_len, GFP_NOFS); if (!new_name) { ret = -ENOMEM; goto out; } name_len = this_name_len; name = new_name; } read_extent_buffer(eb, name, name_ptr, this_name_len); name_str.name = name; name_str.len = this_name_len; di = btrfs_lookup_dir_item(NULL, inode->root, search_path, parent, &name_str, 0); if (di && !IS_ERR(di)) { struct btrfs_key di_key; btrfs_dir_item_key_to_cpu(search_path->nodes[0], di, &di_key); if (di_key.type == BTRFS_INODE_ITEM_KEY) { if (di_key.objectid != key->objectid) { ret = 1; *other_ino = di_key.objectid; *other_parent = parent; } else { ret = 0; } } else { ret = -EAGAIN; } goto out; } else if (IS_ERR(di)) { ret = PTR_ERR(di); goto out; } btrfs_release_path(search_path); cur_offset += this_len; } ret = 0; out: btrfs_free_path(search_path); kfree(name); return ret; } /* * Check if we need to log an inode. This is used in contexts where while * logging an inode we need to log another inode (either that it exists or in * full mode). This is used instead of btrfs_inode_in_log() because the later * requires the inode to be in the log and have the log transaction committed, * while here we do not care if the log transaction was already committed - our * caller will commit the log later - and we want to avoid logging an inode * multiple times when multiple tasks have joined the same log transaction. */ static bool need_log_inode(const struct btrfs_trans_handle *trans, struct btrfs_inode *inode) { /* * If a directory was not modified, no dentries added or removed, we can * and should avoid logging it. */ if (S_ISDIR(inode->vfs_inode.i_mode) && inode->last_trans < trans->transid) return false; /* * If this inode does not have new/updated/deleted xattrs since the last * time it was logged and is flagged as logged in the current transaction, * we can skip logging it. As for new/deleted names, those are updated in * the log by link/unlink/rename operations. * In case the inode was logged and then evicted and reloaded, its * logged_trans will be 0, in which case we have to fully log it since * logged_trans is a transient field, not persisted. */ if (inode_logged(trans, inode, NULL) == 1 && !test_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags)) return false; return true; } struct btrfs_dir_list { u64 ino; struct list_head list; }; /* * Log the inodes of the new dentries of a directory. * See process_dir_items_leaf() for details about why it is needed. * This is a recursive operation - if an existing dentry corresponds to a * directory, that directory's new entries are logged too (same behaviour as * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes * the dentries point to we do not acquire their VFS lock, otherwise lockdep * complains about the following circular lock dependency / possible deadlock: * * CPU0 CPU1 * ---- ---- * lock(&type->i_mutex_dir_key#3/2); * lock(sb_internal#2); * lock(&type->i_mutex_dir_key#3/2); * lock(&sb->s_type->i_mutex_key#14); * * Where sb_internal is the lock (a counter that works as a lock) acquired by * sb_start_intwrite() in btrfs_start_transaction(). * Not acquiring the VFS lock of the inodes is still safe because: * * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible * that while logging the inode new references (names) are added or removed * from the inode, leaving the logged inode item with a link count that does * not match the number of logged inode reference items. This is fine because * at log replay time we compute the real number of links and correct the * link count in the inode item (see replay_one_buffer() and * link_to_fixup_dir()); * * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that * while logging the inode's items new index items (key type * BTRFS_DIR_INDEX_KEY) are added to fs/subvol tree and the logged inode item * has a size that doesn't match the sum of the lengths of all the logged * names - this is ok, not a problem, because at log replay time we set the * directory's i_size to the correct value (see replay_one_name() and * overwrite_item()). */ static int log_new_dir_dentries(struct btrfs_trans_handle *trans, struct btrfs_inode *start_inode, struct btrfs_log_ctx *ctx) { struct btrfs_root *root = start_inode->root; struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_path *path; LIST_HEAD(dir_list); struct btrfs_dir_list *dir_elem; u64 ino = btrfs_ino(start_inode); struct btrfs_inode *curr_inode = start_inode; int ret = 0; /* * If we are logging a new name, as part of a link or rename operation, * don't bother logging new dentries, as we just want to log the names * of an inode and that any new parents exist. */ if (ctx->logging_new_name) return 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; /* Pairs with btrfs_add_delayed_iput below. */ ihold(&curr_inode->vfs_inode); while (true) { struct inode *vfs_inode; struct btrfs_key key; struct btrfs_key found_key; u64 next_index; bool continue_curr_inode = true; int iter_ret; key.objectid = ino; key.type = BTRFS_DIR_INDEX_KEY; key.offset = btrfs_get_first_dir_index_to_log(curr_inode); next_index = key.offset; again: btrfs_for_each_slot(root->log_root, &key, &found_key, path, iter_ret) { struct extent_buffer *leaf = path->nodes[0]; struct btrfs_dir_item *di; struct btrfs_key di_key; struct inode *di_inode; int log_mode = LOG_INODE_EXISTS; int type; if (found_key.objectid != ino || found_key.type != BTRFS_DIR_INDEX_KEY) { continue_curr_inode = false; break; } next_index = found_key.offset + 1; di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); type = btrfs_dir_ftype(leaf, di); if (btrfs_dir_transid(leaf, di) < trans->transid) continue; btrfs_dir_item_key_to_cpu(leaf, di, &di_key); if (di_key.type == BTRFS_ROOT_ITEM_KEY) continue; btrfs_release_path(path); di_inode = btrfs_iget(fs_info->sb, di_key.objectid, root); if (IS_ERR(di_inode)) { ret = PTR_ERR(di_inode); goto out; } if (!need_log_inode(trans, BTRFS_I(di_inode))) { btrfs_add_delayed_iput(BTRFS_I(di_inode)); break; } ctx->log_new_dentries = false; if (type == BTRFS_FT_DIR) log_mode = LOG_INODE_ALL; ret = btrfs_log_inode(trans, BTRFS_I(di_inode), log_mode, ctx); btrfs_add_delayed_iput(BTRFS_I(di_inode)); if (ret) goto out; if (ctx->log_new_dentries) { dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS); if (!dir_elem) { ret = -ENOMEM; goto out; } dir_elem->ino = di_key.objectid; list_add_tail(&dir_elem->list, &dir_list); } break; } btrfs_release_path(path); if (iter_ret < 0) { ret = iter_ret; goto out; } else if (iter_ret > 0) { continue_curr_inode = false; } else { key = found_key; } if (continue_curr_inode && key.offset < (u64)-1) { key.offset++; goto again; } btrfs_set_first_dir_index_to_log(curr_inode, next_index); if (list_empty(&dir_list)) break; dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list, list); ino = dir_elem->ino; list_del(&dir_elem->list); kfree(dir_elem); btrfs_add_delayed_iput(curr_inode); curr_inode = NULL; vfs_inode = btrfs_iget(fs_info->sb, ino, root); if (IS_ERR(vfs_inode)) { ret = PTR_ERR(vfs_inode); break; } curr_inode = BTRFS_I(vfs_inode); } out: btrfs_free_path(path); if (curr_inode) btrfs_add_delayed_iput(curr_inode); if (ret) { struct btrfs_dir_list *next; list_for_each_entry_safe(dir_elem, next, &dir_list, list) kfree(dir_elem); } return ret; } struct btrfs_ino_list { u64 ino; u64 parent; struct list_head list; }; static void free_conflicting_inodes(struct btrfs_log_ctx *ctx) { struct btrfs_ino_list *curr; struct btrfs_ino_list *next; list_for_each_entry_safe(curr, next, &ctx->conflict_inodes, list) { list_del(&curr->list); kfree(curr); } } static int conflicting_inode_is_dir(struct btrfs_root *root, u64 ino, struct btrfs_path *path) { struct btrfs_key key; int ret; key.objectid = ino; key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; path->search_commit_root = 1; path->skip_locking = 1; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (WARN_ON_ONCE(ret > 0)) { /* * We have previously found the inode through the commit root * so this should not happen. If it does, just error out and * fallback to a transaction commit. */ ret = -ENOENT; } else if (ret == 0) { struct btrfs_inode_item *item; item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_inode_item); if (S_ISDIR(btrfs_inode_mode(path->nodes[0], item))) ret = 1; } btrfs_release_path(path); path->search_commit_root = 0; path->skip_locking = 0; return ret; } static int add_conflicting_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 ino, u64 parent, struct btrfs_log_ctx *ctx) { struct btrfs_ino_list *ino_elem; struct inode *inode; /* * It's rare to have a lot of conflicting inodes, in practice it is not * common to have more than 1 or 2. We don't want to collect too many, * as we could end up logging too many inodes (even if only in * LOG_INODE_EXISTS mode) and slow down other fsyncs or transaction * commits. */ if (ctx->num_conflict_inodes >= MAX_CONFLICT_INODES) return BTRFS_LOG_FORCE_COMMIT; inode = btrfs_iget(root->fs_info->sb, ino, root); /* * If the other inode that had a conflicting dir entry was deleted in * the current transaction then we either: * * 1) Log the parent directory (later after adding it to the list) if * the inode is a directory. This is because it may be a deleted * subvolume/snapshot or it may be a regular directory that had * deleted subvolumes/snapshots (or subdirectories that had them), * and at the moment we can't deal with dropping subvolumes/snapshots * during log replay. So we just log the parent, which will result in * a fallback to a transaction commit if we are dealing with those * cases (last_unlink_trans will match the current transaction); * * 2) Do nothing if it's not a directory. During log replay we simply * unlink the conflicting dentry from the parent directory and then * add the dentry for our inode. Like this we can avoid logging the * parent directory (and maybe fallback to a transaction commit in * case it has a last_unlink_trans == trans->transid, due to moving * some inode from it to some other directory). */ if (IS_ERR(inode)) { int ret = PTR_ERR(inode); if (ret != -ENOENT) return ret; ret = conflicting_inode_is_dir(root, ino, path); /* Not a directory or we got an error. */ if (ret <= 0) return ret; /* Conflicting inode is a directory, so we'll log its parent. */ ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS); if (!ino_elem) return -ENOMEM; ino_elem->ino = ino; ino_elem->parent = parent; list_add_tail(&ino_elem->list, &ctx->conflict_inodes); ctx->num_conflict_inodes++; return 0; } /* * If the inode was already logged skip it - otherwise we can hit an * infinite loop. Example: * * From the commit root (previous transaction) we have the following * inodes: * * inode 257 a directory * inode 258 with references "zz" and "zz_link" on inode 257 * inode 259 with reference "a" on inode 257 * * And in the current (uncommitted) transaction we have: * * inode 257 a directory, unchanged * inode 258 with references "a" and "a2" on inode 257 * inode 259 with reference "zz_link" on inode 257 * inode 261 with reference "zz" on inode 257 * * When logging inode 261 the following infinite loop could * happen if we don't skip already logged inodes: * * - we detect inode 258 as a conflicting inode, with inode 261 * on reference "zz", and log it; * * - we detect inode 259 as a conflicting inode, with inode 258 * on reference "a", and log it; * * - we detect inode 258 as a conflicting inode, with inode 259 * on reference "zz_link", and log it - again! After this we * repeat the above steps forever. * * Here we can use need_log_inode() because we only need to log the * inode in LOG_INODE_EXISTS mode and rename operations update the log, * so that the log ends up with the new name and without the old name. */ if (!need_log_inode(trans, BTRFS_I(inode))) { btrfs_add_delayed_iput(BTRFS_I(inode)); return 0; } btrfs_add_delayed_iput(BTRFS_I(inode)); ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS); if (!ino_elem) return -ENOMEM; ino_elem->ino = ino; ino_elem->parent = parent; list_add_tail(&ino_elem->list, &ctx->conflict_inodes); ctx->num_conflict_inodes++; return 0; } static int log_conflicting_inodes(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_log_ctx *ctx) { struct btrfs_fs_info *fs_info = root->fs_info; int ret = 0; /* * Conflicting inodes are logged by the first call to btrfs_log_inode(), * otherwise we could have unbounded recursion of btrfs_log_inode() * calls. This check guarantees we can have only 1 level of recursion. */ if (ctx->logging_conflict_inodes) return 0; ctx->logging_conflict_inodes = true; /* * New conflicting inodes may be found and added to the list while we * are logging a conflicting inode, so keep iterating while the list is * not empty. */ while (!list_empty(&ctx->conflict_inodes)) { struct btrfs_ino_list *curr; struct inode *inode; u64 ino; u64 parent; curr = list_first_entry(&ctx->conflict_inodes, struct btrfs_ino_list, list); ino = curr->ino; parent = curr->parent; list_del(&curr->list); kfree(curr); inode = btrfs_iget(fs_info->sb, ino, root); /* * If the other inode that had a conflicting dir entry was * deleted in the current transaction, we need to log its parent * directory. See the comment at add_conflicting_inode(). */ if (IS_ERR(inode)) { ret = PTR_ERR(inode); if (ret != -ENOENT) break; inode = btrfs_iget(fs_info->sb, parent, root); if (IS_ERR(inode)) { ret = PTR_ERR(inode); break; } /* * Always log the directory, we cannot make this * conditional on need_log_inode() because the directory * might have been logged in LOG_INODE_EXISTS mode or * the dir index of the conflicting inode is not in a * dir index key range logged for the directory. So we * must make sure the deletion is recorded. */ ret = btrfs_log_inode(trans, BTRFS_I(inode), LOG_INODE_ALL, ctx); btrfs_add_delayed_iput(BTRFS_I(inode)); if (ret) break; continue; } /* * Here we can use need_log_inode() because we only need to log * the inode in LOG_INODE_EXISTS mode and rename operations * update the log, so that the log ends up with the new name and * without the old name. * * We did this check at add_conflicting_inode(), but here we do * it again because if some other task logged the inode after * that, we can avoid doing it again. */ if (!need_log_inode(trans, BTRFS_I(inode))) { btrfs_add_delayed_iput(BTRFS_I(inode)); continue; } /* * We are safe logging the other inode without acquiring its * lock as long as we log with the LOG_INODE_EXISTS mode. We * are safe against concurrent renames of the other inode as * well because during a rename we pin the log and update the * log with the new name before we unpin it. */ ret = btrfs_log_inode(trans, BTRFS_I(inode), LOG_INODE_EXISTS, ctx); btrfs_add_delayed_iput(BTRFS_I(inode)); if (ret) break; } ctx->logging_conflict_inodes = false; if (ret) free_conflicting_inodes(ctx); return ret; } static int copy_inode_items_to_log(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct btrfs_key *min_key, const struct btrfs_key *max_key, struct btrfs_path *path, struct btrfs_path *dst_path, const u64 logged_isize, const int inode_only, struct btrfs_log_ctx *ctx, bool *need_log_inode_item) { const u64 i_size = i_size_read(&inode->vfs_inode); struct btrfs_root *root = inode->root; int ins_start_slot = 0; int ins_nr = 0; int ret; while (1) { ret = btrfs_search_forward(root, min_key, path, trans->transid); if (ret < 0) return ret; if (ret > 0) { ret = 0; break; } again: /* Note, ins_nr might be > 0 here, cleanup outside the loop */ if (min_key->objectid != max_key->objectid) break; if (min_key->type > max_key->type) break; if (min_key->type == BTRFS_INODE_ITEM_KEY) { *need_log_inode_item = false; } else if (min_key->type == BTRFS_EXTENT_DATA_KEY && min_key->offset >= i_size) { /* * Extents at and beyond eof are logged with * btrfs_log_prealloc_extents(). * Only regular files have BTRFS_EXTENT_DATA_KEY keys, * and no keys greater than that, so bail out. */ break; } else if ((min_key->type == BTRFS_INODE_REF_KEY || min_key->type == BTRFS_INODE_EXTREF_KEY) && (inode->generation == trans->transid || ctx->logging_conflict_inodes)) { u64 other_ino = 0; u64 other_parent = 0; ret = btrfs_check_ref_name_override(path->nodes[0], path->slots[0], min_key, inode, &other_ino, &other_parent); if (ret < 0) { return ret; } else if (ret > 0 && other_ino != btrfs_ino(BTRFS_I(ctx->inode))) { if (ins_nr > 0) { ins_nr++; } else { ins_nr = 1; ins_start_slot = path->slots[0]; } ret = copy_items(trans, inode, dst_path, path, ins_start_slot, ins_nr, inode_only, logged_isize); if (ret < 0) return ret; ins_nr = 0; btrfs_release_path(path); ret = add_conflicting_inode(trans, root, path, other_ino, other_parent, ctx); if (ret) return ret; goto next_key; } } else if (min_key->type == BTRFS_XATTR_ITEM_KEY) { /* Skip xattrs, logged later with btrfs_log_all_xattrs() */ if (ins_nr == 0) goto next_slot; ret = copy_items(trans, inode, dst_path, path, ins_start_slot, ins_nr, inode_only, logged_isize); if (ret < 0) return ret; ins_nr = 0; goto next_slot; } if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { ins_nr++; goto next_slot; } else if (!ins_nr) { ins_start_slot = path->slots[0]; ins_nr = 1; goto next_slot; } ret = copy_items(trans, inode, dst_path, path, ins_start_slot, ins_nr, inode_only, logged_isize); if (ret < 0) return ret; ins_nr = 1; ins_start_slot = path->slots[0]; next_slot: path->slots[0]++; if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) { btrfs_item_key_to_cpu(path->nodes[0], min_key, path->slots[0]); goto again; } if (ins_nr) { ret = copy_items(trans, inode, dst_path, path, ins_start_slot, ins_nr, inode_only, logged_isize); if (ret < 0) return ret; ins_nr = 0; } btrfs_release_path(path); next_key: if (min_key->offset < (u64)-1) { min_key->offset++; } else if (min_key->type < max_key->type) { min_key->type++; min_key->offset = 0; } else { break; } /* * We may process many leaves full of items for our inode, so * avoid monopolizing a cpu for too long by rescheduling while * not holding locks on any tree. */ cond_resched(); } if (ins_nr) { ret = copy_items(trans, inode, dst_path, path, ins_start_slot, ins_nr, inode_only, logged_isize); if (ret) return ret; } if (inode_only == LOG_INODE_ALL && S_ISREG(inode->vfs_inode.i_mode)) { /* * Release the path because otherwise we might attempt to double * lock the same leaf with btrfs_log_prealloc_extents() below. */ btrfs_release_path(path); ret = btrfs_log_prealloc_extents(trans, inode, dst_path); } return ret; } static int insert_delayed_items_batch(struct btrfs_trans_handle *trans, struct btrfs_root *log, struct btrfs_path *path, const struct btrfs_item_batch *batch, const struct btrfs_delayed_item *first_item) { const struct btrfs_delayed_item *curr = first_item; int ret; ret = btrfs_insert_empty_items(trans, log, path, batch); if (ret) return ret; for (int i = 0; i < batch->nr; i++) { char *data_ptr; data_ptr = btrfs_item_ptr(path->nodes[0], path->slots[0], char); write_extent_buffer(path->nodes[0], &curr->data, (unsigned long)data_ptr, curr->data_len); curr = list_next_entry(curr, log_list); path->slots[0]++; } btrfs_release_path(path); return 0; } static int log_delayed_insertion_items(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct btrfs_path *path, const struct list_head *delayed_ins_list, struct btrfs_log_ctx *ctx) { /* 195 (4095 bytes of keys and sizes) fits in a single 4K page. */ const int max_batch_size = 195; const int leaf_data_size = BTRFS_LEAF_DATA_SIZE(trans->fs_info); const u64 ino = btrfs_ino(inode); struct btrfs_root *log = inode->root->log_root; struct btrfs_item_batch batch = { .nr = 0, .total_data_size = 0, }; const struct btrfs_delayed_item *first = NULL; const struct btrfs_delayed_item *curr; char *ins_data; struct btrfs_key *ins_keys; u32 *ins_sizes; u64 curr_batch_size = 0; int batch_idx = 0; int ret; /* We are adding dir index items to the log tree. */ lockdep_assert_held(&inode->log_mutex); /* * We collect delayed items before copying index keys from the subvolume * to the log tree. However just after we collected them, they may have * been flushed (all of them or just some of them), and therefore we * could have copied them from the subvolume tree to the log tree. * So find the first delayed item that was not yet logged (they are * sorted by index number). */ list_for_each_entry(curr, delayed_ins_list, log_list) { if (curr->index > inode->last_dir_index_offset) { first = curr; break; } } /* Empty list or all delayed items were already logged. */ if (!first) return 0; ins_data = kmalloc(max_batch_size * sizeof(u32) + max_batch_size * sizeof(struct btrfs_key), GFP_NOFS); if (!ins_data) return -ENOMEM; ins_sizes = (u32 *)ins_data; batch.data_sizes = ins_sizes; ins_keys = (struct btrfs_key *)(ins_data + max_batch_size * sizeof(u32)); batch.keys = ins_keys; curr = first; while (!list_entry_is_head(curr, delayed_ins_list, log_list)) { const u32 curr_size = curr->data_len + sizeof(struct btrfs_item); if (curr_batch_size + curr_size > leaf_data_size || batch.nr == max_batch_size) { ret = insert_delayed_items_batch(trans, log, path, &batch, first); if (ret) goto out; batch_idx = 0; batch.nr = 0; batch.total_data_size = 0; curr_batch_size = 0; first = curr; } ins_sizes[batch_idx] = curr->data_len; ins_keys[batch_idx].objectid = ino; ins_keys[batch_idx].type = BTRFS_DIR_INDEX_KEY; ins_keys[batch_idx].offset = curr->index; curr_batch_size += curr_size; batch.total_data_size += curr->data_len; batch.nr++; batch_idx++; curr = list_next_entry(curr, log_list); } ASSERT(batch.nr >= 1); ret = insert_delayed_items_batch(trans, log, path, &batch, first); curr = list_last_entry(delayed_ins_list, struct btrfs_delayed_item, log_list); inode->last_dir_index_offset = curr->index; out: kfree(ins_data); return ret; } static int log_delayed_deletions_full(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct btrfs_path *path, const struct list_head *delayed_del_list, struct btrfs_log_ctx *ctx) { const u64 ino = btrfs_ino(inode); const struct btrfs_delayed_item *curr; curr = list_first_entry(delayed_del_list, struct btrfs_delayed_item, log_list); while (!list_entry_is_head(curr, delayed_del_list, log_list)) { u64 first_dir_index = curr->index; u64 last_dir_index; const struct btrfs_delayed_item *next; int ret; /* * Find a range of consecutive dir index items to delete. Like * this we log a single dir range item spanning several contiguous * dir items instead of logging one range item per dir index item. */ next = list_next_entry(curr, log_list); while (!list_entry_is_head(next, delayed_del_list, log_list)) { if (next->index != curr->index + 1) break; curr = next; next = list_next_entry(next, log_list); } last_dir_index = curr->index; ASSERT(last_dir_index >= first_dir_index); ret = insert_dir_log_key(trans, inode->root->log_root, path, ino, first_dir_index, last_dir_index); if (ret) return ret; curr = list_next_entry(curr, log_list); } return 0; } static int batch_delete_dir_index_items(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct btrfs_path *path, struct btrfs_log_ctx *ctx, const struct list_head *delayed_del_list, const struct btrfs_delayed_item *first, const struct btrfs_delayed_item **last_ret) { const struct btrfs_delayed_item *next; struct extent_buffer *leaf = path->nodes[0]; const int last_slot = btrfs_header_nritems(leaf) - 1; int slot = path->slots[0] + 1; const u64 ino = btrfs_ino(inode); next = list_next_entry(first, log_list); while (slot < last_slot && !list_entry_is_head(next, delayed_del_list, log_list)) { struct btrfs_key key; btrfs_item_key_to_cpu(leaf, &key, slot); if (key.objectid != ino || key.type != BTRFS_DIR_INDEX_KEY || key.offset != next->index) break; slot++; *last_ret = next; next = list_next_entry(next, log_list); } return btrfs_del_items(trans, inode->root->log_root, path, path->slots[0], slot - path->slots[0]); } static int log_delayed_deletions_incremental(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct btrfs_path *path, const struct list_head *delayed_del_list, struct btrfs_log_ctx *ctx) { struct btrfs_root *log = inode->root->log_root; const struct btrfs_delayed_item *curr; u64 last_range_start = 0; u64 last_range_end = 0; struct btrfs_key key; key.objectid = btrfs_ino(inode); key.type = BTRFS_DIR_INDEX_KEY; curr = list_first_entry(delayed_del_list, struct btrfs_delayed_item, log_list); while (!list_entry_is_head(curr, delayed_del_list, log_list)) { const struct btrfs_delayed_item *last = curr; u64 first_dir_index = curr->index; u64 last_dir_index; bool deleted_items = false; int ret; key.offset = curr->index; ret = btrfs_search_slot(trans, log, &key, path, -1, 1); if (ret < 0) { return ret; } else if (ret == 0) { ret = batch_delete_dir_index_items(trans, inode, path, ctx, delayed_del_list, curr, &last); if (ret) return ret; deleted_items = true; } btrfs_release_path(path); /* * If we deleted items from the leaf, it means we have a range * item logging their range, so no need to add one or update an * existing one. Otherwise we have to log a dir range item. */ if (deleted_items) goto next_batch; last_dir_index = last->index; ASSERT(last_dir_index >= first_dir_index); /* * If this range starts right after where the previous one ends, * then we want to reuse the previous range item and change its * end offset to the end of this range. This is just to minimize * leaf space usage, by avoiding adding a new range item. */ if (last_range_end != 0 && first_dir_index == last_range_end + 1) first_dir_index = last_range_start; ret = insert_dir_log_key(trans, log, path, key.objectid, first_dir_index, last_dir_index); if (ret) return ret; last_range_start = first_dir_index; last_range_end = last_dir_index; next_batch: curr = list_next_entry(last, log_list); } return 0; } static int log_delayed_deletion_items(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct btrfs_path *path, const struct list_head *delayed_del_list, struct btrfs_log_ctx *ctx) { /* * We are deleting dir index items from the log tree or adding range * items to it. */ lockdep_assert_held(&inode->log_mutex); if (list_empty(delayed_del_list)) return 0; if (ctx->logged_before) return log_delayed_deletions_incremental(trans, inode, path, delayed_del_list, ctx); return log_delayed_deletions_full(trans, inode, path, delayed_del_list, ctx); } /* * Similar logic as for log_new_dir_dentries(), but it iterates over the delayed * items instead of the subvolume tree. */ static int log_new_delayed_dentries(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, const struct list_head *delayed_ins_list, struct btrfs_log_ctx *ctx) { const bool orig_log_new_dentries = ctx->log_new_dentries; struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_delayed_item *item; int ret = 0; /* * No need for the log mutex, plus to avoid potential deadlocks or * lockdep annotations due to nesting of delayed inode mutexes and log * mutexes. */ lockdep_assert_not_held(&inode->log_mutex); ASSERT(!ctx->logging_new_delayed_dentries); ctx->logging_new_delayed_dentries = true; list_for_each_entry(item, delayed_ins_list, log_list) { struct btrfs_dir_item *dir_item; struct inode *di_inode; struct btrfs_key key; int log_mode = LOG_INODE_EXISTS; dir_item = (struct btrfs_dir_item *)item->data; btrfs_disk_key_to_cpu(&key, &dir_item->location); if (key.type == BTRFS_ROOT_ITEM_KEY) continue; di_inode = btrfs_iget(fs_info->sb, key.objectid, inode->root); if (IS_ERR(di_inode)) { ret = PTR_ERR(di_inode); break; } if (!need_log_inode(trans, BTRFS_I(di_inode))) { btrfs_add_delayed_iput(BTRFS_I(di_inode)); continue; } if (btrfs_stack_dir_ftype(dir_item) == BTRFS_FT_DIR) log_mode = LOG_INODE_ALL; ctx->log_new_dentries = false; ret = btrfs_log_inode(trans, BTRFS_I(di_inode), log_mode, ctx); if (!ret && ctx->log_new_dentries) ret = log_new_dir_dentries(trans, BTRFS_I(di_inode), ctx); btrfs_add_delayed_iput(BTRFS_I(di_inode)); if (ret) break; } ctx->log_new_dentries = orig_log_new_dentries; ctx->logging_new_delayed_dentries = false; return ret; } /* log a single inode in the tree log. * At least one parent directory for this inode must exist in the tree * or be logged already. * * Any items from this inode changed by the current transaction are copied * to the log tree. An extra reference is taken on any extents in this * file, allowing us to avoid a whole pile of corner cases around logging * blocks that have been removed from the tree. * * See LOG_INODE_ALL and related defines for a description of what inode_only * does. * * This handles both files and directories. */ static int btrfs_log_inode(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, int inode_only, struct btrfs_log_ctx *ctx) { struct btrfs_path *path; struct btrfs_path *dst_path; struct btrfs_key min_key; struct btrfs_key max_key; struct btrfs_root *log = inode->root->log_root; int ret; bool fast_search = false; u64 ino = btrfs_ino(inode); struct extent_map_tree *em_tree = &inode->extent_tree; u64 logged_isize = 0; bool need_log_inode_item = true; bool xattrs_logged = false; bool inode_item_dropped = true; bool full_dir_logging = false; LIST_HEAD(delayed_ins_list); LIST_HEAD(delayed_del_list); path = btrfs_alloc_path(); if (!path) return -ENOMEM; dst_path = btrfs_alloc_path(); if (!dst_path) { btrfs_free_path(path); return -ENOMEM; } min_key.objectid = ino; min_key.type = BTRFS_INODE_ITEM_KEY; min_key.offset = 0; max_key.objectid = ino; /* today the code can only do partial logging of directories */ if (S_ISDIR(inode->vfs_inode.i_mode) || (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) && inode_only >= LOG_INODE_EXISTS)) max_key.type = BTRFS_XATTR_ITEM_KEY; else max_key.type = (u8)-1; max_key.offset = (u64)-1; if (S_ISDIR(inode->vfs_inode.i_mode) && inode_only == LOG_INODE_ALL) full_dir_logging = true; /* * If we are logging a directory while we are logging dentries of the * delayed items of some other inode, then we need to flush the delayed * items of this directory and not log the delayed items directly. This * is to prevent more than one level of recursion into btrfs_log_inode() * by having something like this: * * $ mkdir -p a/b/c/d/e/f/g/h/... * $ xfs_io -c "fsync" a * * Where all directories in the path did not exist before and are * created in the current transaction. * So in such a case we directly log the delayed items of the main * directory ("a") without flushing them first, while for each of its * subdirectories we flush their delayed items before logging them. * This prevents a potential unbounded recursion like this: * * btrfs_log_inode() * log_new_delayed_dentries() * btrfs_log_inode() * log_new_delayed_dentries() * btrfs_log_inode() * log_new_delayed_dentries() * (...) * * We have thresholds for the maximum number of delayed items to have in * memory, and once they are hit, the items are flushed asynchronously. * However the limit is quite high, so lets prevent deep levels of * recursion to happen by limiting the maximum depth to be 1. */ if (full_dir_logging && ctx->logging_new_delayed_dentries) { ret = btrfs_commit_inode_delayed_items(trans, inode); if (ret) goto out; } mutex_lock(&inode->log_mutex); /* * For symlinks, we must always log their content, which is stored in an * inline extent, otherwise we could end up with an empty symlink after * log replay, which is invalid on linux (symlink(2) returns -ENOENT if * one attempts to create an empty symlink). * We don't need to worry about flushing delalloc, because when we create * the inline extent when the symlink is created (we never have delalloc * for symlinks). */ if (S_ISLNK(inode->vfs_inode.i_mode)) inode_only = LOG_INODE_ALL; /* * Before logging the inode item, cache the value returned by * inode_logged(), because after that we have the need to figure out if * the inode was previously logged in this transaction. */ ret = inode_logged(trans, inode, path); if (ret < 0) goto out_unlock; ctx->logged_before = (ret == 1); ret = 0; /* * This is for cases where logging a directory could result in losing a * a file after replaying the log. For example, if we move a file from a * directory A to a directory B, then fsync directory A, we have no way * to known the file was moved from A to B, so logging just A would * result in losing the file after a log replay. */ if (full_dir_logging && inode->last_unlink_trans >= trans->transid) { ret = BTRFS_LOG_FORCE_COMMIT; goto out_unlock; } /* * a brute force approach to making sure we get the most uptodate * copies of everything. */ if (S_ISDIR(inode->vfs_inode.i_mode)) { clear_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags); if (ctx->logged_before) ret = drop_inode_items(trans, log, path, inode, BTRFS_XATTR_ITEM_KEY); } else { if (inode_only == LOG_INODE_EXISTS && ctx->logged_before) { /* * Make sure the new inode item we write to the log has * the same isize as the current one (if it exists). * This is necessary to prevent data loss after log * replay, and also to prevent doing a wrong expanding * truncate - for e.g. create file, write 4K into offset * 0, fsync, write 4K into offset 4096, add hard link, * fsync some other file (to sync log), power fail - if * we use the inode's current i_size, after log replay * we get a 8Kb file, with the last 4Kb extent as a hole * (zeroes), as if an expanding truncate happened, * instead of getting a file of 4Kb only. */ ret = logged_inode_size(log, inode, path, &logged_isize); if (ret) goto out_unlock; } if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags)) { if (inode_only == LOG_INODE_EXISTS) { max_key.type = BTRFS_XATTR_ITEM_KEY; if (ctx->logged_before) ret = drop_inode_items(trans, log, path, inode, max_key.type); } else { clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags); clear_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags); if (ctx->logged_before) ret = truncate_inode_items(trans, log, inode, 0, 0); } } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags) || inode_only == LOG_INODE_EXISTS) { if (inode_only == LOG_INODE_ALL) fast_search = true; max_key.type = BTRFS_XATTR_ITEM_KEY; if (ctx->logged_before) ret = drop_inode_items(trans, log, path, inode, max_key.type); } else { if (inode_only == LOG_INODE_ALL) fast_search = true; inode_item_dropped = false; goto log_extents; } } if (ret) goto out_unlock; /* * If we are logging a directory in full mode, collect the delayed items * before iterating the subvolume tree, so that we don't miss any new * dir index items in case they get flushed while or right after we are * iterating the subvolume tree. */ if (full_dir_logging && !ctx->logging_new_delayed_dentries) btrfs_log_get_delayed_items(inode, &delayed_ins_list, &delayed_del_list); ret = copy_inode_items_to_log(trans, inode, &min_key, &max_key, path, dst_path, logged_isize, inode_only, ctx, &need_log_inode_item); if (ret) goto out_unlock; btrfs_release_path(path); btrfs_release_path(dst_path); ret = btrfs_log_all_xattrs(trans, inode, path, dst_path); if (ret) goto out_unlock; xattrs_logged = true; if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) { btrfs_release_path(path); btrfs_release_path(dst_path); ret = btrfs_log_holes(trans, inode, path); if (ret) goto out_unlock; } log_extents: btrfs_release_path(path); btrfs_release_path(dst_path); if (need_log_inode_item) { ret = log_inode_item(trans, log, dst_path, inode, inode_item_dropped); if (ret) goto out_unlock; /* * If we are doing a fast fsync and the inode was logged before * in this transaction, we don't need to log the xattrs because * they were logged before. If xattrs were added, changed or * deleted since the last time we logged the inode, then we have * already logged them because the inode had the runtime flag * BTRFS_INODE_COPY_EVERYTHING set. */ if (!xattrs_logged && inode->logged_trans < trans->transid) { ret = btrfs_log_all_xattrs(trans, inode, path, dst_path); if (ret) goto out_unlock; btrfs_release_path(path); } } if (fast_search) { ret = btrfs_log_changed_extents(trans, inode, dst_path, ctx); if (ret) goto out_unlock; } else if (inode_only == LOG_INODE_ALL) { struct extent_map *em, *n; write_lock(&em_tree->lock); list_for_each_entry_safe(em, n, &em_tree->modified_extents, list) list_del_init(&em->list); write_unlock(&em_tree->lock); } if (full_dir_logging) { ret = log_directory_changes(trans, inode, path, dst_path, ctx); if (ret) goto out_unlock; ret = log_delayed_insertion_items(trans, inode, path, &delayed_ins_list, ctx); if (ret) goto out_unlock; ret = log_delayed_deletion_items(trans, inode, path, &delayed_del_list, ctx); if (ret) goto out_unlock; } spin_lock(&inode->lock); inode->logged_trans = trans->transid; /* * Don't update last_log_commit if we logged that an inode exists. * We do this for three reasons: * * 1) We might have had buffered writes to this inode that were * flushed and had their ordered extents completed in this * transaction, but we did not previously log the inode with * LOG_INODE_ALL. Later the inode was evicted and after that * it was loaded again and this LOG_INODE_EXISTS log operation * happened. We must make sure that if an explicit fsync against * the inode is performed later, it logs the new extents, an * updated inode item, etc, and syncs the log. The same logic * applies to direct IO writes instead of buffered writes. * * 2) When we log the inode with LOG_INODE_EXISTS, its inode item * is logged with an i_size of 0 or whatever value was logged * before. If later the i_size of the inode is increased by a * truncate operation, the log is synced through an fsync of * some other inode and then finally an explicit fsync against * this inode is made, we must make sure this fsync logs the * inode with the new i_size, the hole between old i_size and * the new i_size, and syncs the log. * * 3) If we are logging that an ancestor inode exists as part of * logging a new name from a link or rename operation, don't update * its last_log_commit - otherwise if an explicit fsync is made * against an ancestor, the fsync considers the inode in the log * and doesn't sync the log, resulting in the ancestor missing after * a power failure unless the log was synced as part of an fsync * against any other unrelated inode. */ if (inode_only != LOG_INODE_EXISTS) inode->last_log_commit = inode->last_sub_trans; spin_unlock(&inode->lock); /* * Reset the last_reflink_trans so that the next fsync does not need to * go through the slower path when logging extents and their checksums. */ if (inode_only == LOG_INODE_ALL) inode->last_reflink_trans = 0; out_unlock: mutex_unlock(&inode->log_mutex); out: btrfs_free_path(path); btrfs_free_path(dst_path); if (ret) free_conflicting_inodes(ctx); else ret = log_conflicting_inodes(trans, inode->root, ctx); if (full_dir_logging && !ctx->logging_new_delayed_dentries) { if (!ret) ret = log_new_delayed_dentries(trans, inode, &delayed_ins_list, ctx); btrfs_log_put_delayed_items(inode, &delayed_ins_list, &delayed_del_list); } return ret; } static int btrfs_log_all_parents(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct btrfs_log_ctx *ctx) { struct btrfs_fs_info *fs_info = trans->fs_info; int ret; struct btrfs_path *path; struct btrfs_key key; struct btrfs_root *root = inode->root; const u64 ino = btrfs_ino(inode); path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->skip_locking = 1; path->search_commit_root = 1; key.objectid = ino; key.type = BTRFS_INODE_REF_KEY; key.offset = 0; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; while (true) { struct extent_buffer *leaf = path->nodes[0]; int slot = path->slots[0]; u32 cur_offset = 0; u32 item_size; unsigned long ptr; if (slot >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(root, path); if (ret < 0) goto out; else if (ret > 0) break; continue; } btrfs_item_key_to_cpu(leaf, &key, slot); /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */ if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY) break; item_size = btrfs_item_size(leaf, slot); ptr = btrfs_item_ptr_offset(leaf, slot); while (cur_offset < item_size) { struct btrfs_key inode_key; struct inode *dir_inode; inode_key.type = BTRFS_INODE_ITEM_KEY; inode_key.offset = 0; if (key.type == BTRFS_INODE_EXTREF_KEY) { struct btrfs_inode_extref *extref; extref = (struct btrfs_inode_extref *) (ptr + cur_offset); inode_key.objectid = btrfs_inode_extref_parent( leaf, extref); cur_offset += sizeof(*extref); cur_offset += btrfs_inode_extref_name_len(leaf, extref); } else { inode_key.objectid = key.offset; cur_offset = item_size; } dir_inode = btrfs_iget(fs_info->sb, inode_key.objectid, root); /* * If the parent inode was deleted, return an error to * fallback to a transaction commit. This is to prevent * getting an inode that was moved from one parent A to * a parent B, got its former parent A deleted and then * it got fsync'ed, from existing at both parents after * a log replay (and the old parent still existing). * Example: * * mkdir /mnt/A * mkdir /mnt/B * touch /mnt/B/bar * sync * mv /mnt/B/bar /mnt/A/bar * mv -T /mnt/A /mnt/B * fsync /mnt/B/bar * <power fail> * * If we ignore the old parent B which got deleted, * after a log replay we would have file bar linked * at both parents and the old parent B would still * exist. */ if (IS_ERR(dir_inode)) { ret = PTR_ERR(dir_inode); goto out; } if (!need_log_inode(trans, BTRFS_I(dir_inode))) { btrfs_add_delayed_iput(BTRFS_I(dir_inode)); continue; } ctx->log_new_dentries = false; ret = btrfs_log_inode(trans, BTRFS_I(dir_inode), LOG_INODE_ALL, ctx); if (!ret && ctx->log_new_dentries) ret = log_new_dir_dentries(trans, BTRFS_I(dir_inode), ctx); btrfs_add_delayed_iput(BTRFS_I(dir_inode)); if (ret) goto out; } path->slots[0]++; } ret = 0; out: btrfs_free_path(path); return ret; } static int log_new_ancestors(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_log_ctx *ctx) { struct btrfs_key found_key; btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); while (true) { struct btrfs_fs_info *fs_info = root->fs_info; struct extent_buffer *leaf; int slot; struct btrfs_key search_key; struct inode *inode; u64 ino; int ret = 0; btrfs_release_path(path); ino = found_key.offset; search_key.objectid = found_key.offset; search_key.type = BTRFS_INODE_ITEM_KEY; search_key.offset = 0; inode = btrfs_iget(fs_info->sb, ino, root); if (IS_ERR(inode)) return PTR_ERR(inode); if (BTRFS_I(inode)->generation >= trans->transid && need_log_inode(trans, BTRFS_I(inode))) ret = btrfs_log_inode(trans, BTRFS_I(inode), LOG_INODE_EXISTS, ctx); btrfs_add_delayed_iput(BTRFS_I(inode)); if (ret) return ret; if (search_key.objectid == BTRFS_FIRST_FREE_OBJECTID) break; search_key.type = BTRFS_INODE_REF_KEY; ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); if (ret < 0) return ret; leaf = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(root, path); if (ret < 0) return ret; else if (ret > 0) return -ENOENT; leaf = path->nodes[0]; slot = path->slots[0]; } btrfs_item_key_to_cpu(leaf, &found_key, slot); if (found_key.objectid != search_key.objectid || found_key.type != BTRFS_INODE_REF_KEY) return -ENOENT; } return 0; } static int log_new_ancestors_fast(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct dentry *parent, struct btrfs_log_ctx *ctx) { struct btrfs_root *root = inode->root; struct dentry *old_parent = NULL; struct super_block *sb = inode->vfs_inode.i_sb; int ret = 0; while (true) { if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) break; inode = BTRFS_I(d_inode(parent)); if (root != inode->root) break; if (inode->generation >= trans->transid && need_log_inode(trans, inode)) { ret = btrfs_log_inode(trans, inode, LOG_INODE_EXISTS, ctx); if (ret) break; } if (IS_ROOT(parent)) break; parent = dget_parent(parent); dput(old_parent); old_parent = parent; } dput(old_parent); return ret; } static int log_all_new_ancestors(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct dentry *parent, struct btrfs_log_ctx *ctx) { struct btrfs_root *root = inode->root; const u64 ino = btrfs_ino(inode); struct btrfs_path *path; struct btrfs_key search_key; int ret; /* * For a single hard link case, go through a fast path that does not * need to iterate the fs/subvolume tree. */ if (inode->vfs_inode.i_nlink < 2) return log_new_ancestors_fast(trans, inode, parent, ctx); path = btrfs_alloc_path(); if (!path) return -ENOMEM; search_key.objectid = ino; search_key.type = BTRFS_INODE_REF_KEY; search_key.offset = 0; again: ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); if (ret < 0) goto out; if (ret == 0) path->slots[0]++; while (true) { struct extent_buffer *leaf = path->nodes[0]; int slot = path->slots[0]; struct btrfs_key found_key; if (slot >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(root, path); if (ret < 0) goto out; else if (ret > 0) break; continue; } btrfs_item_key_to_cpu(leaf, &found_key, slot); if (found_key.objectid != ino || found_key.type > BTRFS_INODE_EXTREF_KEY) break; /* * Don't deal with extended references because they are rare * cases and too complex to deal with (we would need to keep * track of which subitem we are processing for each item in * this loop, etc). So just return some error to fallback to * a transaction commit. */ if (found_key.type == BTRFS_INODE_EXTREF_KEY) { ret = -EMLINK; goto out; } /* * Logging ancestors needs to do more searches on the fs/subvol * tree, so it releases the path as needed to avoid deadlocks. * Keep track of the last inode ref key and resume from that key * after logging all new ancestors for the current hard link. */ memcpy(&search_key, &found_key, sizeof(search_key)); ret = log_new_ancestors(trans, root, path, ctx); if (ret) goto out; btrfs_release_path(path); goto again; } ret = 0; out: btrfs_free_path(path); return ret; } /* * helper function around btrfs_log_inode to make sure newly created * parent directories also end up in the log. A minimal inode and backref * only logging is done of any parent directories that are older than * the last committed transaction */ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct dentry *parent, int inode_only, struct btrfs_log_ctx *ctx) { struct btrfs_root *root = inode->root; struct btrfs_fs_info *fs_info = root->fs_info; int ret = 0; bool log_dentries = false; if (btrfs_test_opt(fs_info, NOTREELOG)) { ret = BTRFS_LOG_FORCE_COMMIT; goto end_no_trans; } if (btrfs_root_refs(&root->root_item) == 0) { ret = BTRFS_LOG_FORCE_COMMIT; goto end_no_trans; } /* * Skip already logged inodes or inodes corresponding to tmpfiles * (since logging them is pointless, a link count of 0 means they * will never be accessible). */ if ((btrfs_inode_in_log(inode, trans->transid) && list_empty(&ctx->ordered_extents)) || inode->vfs_inode.i_nlink == 0) { ret = BTRFS_NO_LOG_SYNC; goto end_no_trans; } ret = start_log_trans(trans, root, ctx); if (ret) goto end_no_trans; ret = btrfs_log_inode(trans, inode, inode_only, ctx); if (ret) goto end_trans; /* * for regular files, if its inode is already on disk, we don't * have to worry about the parents at all. This is because * we can use the last_unlink_trans field to record renames * and other fun in this file. */ if (S_ISREG(inode->vfs_inode.i_mode) && inode->generation < trans->transid && inode->last_unlink_trans < trans->transid) { ret = 0; goto end_trans; } if (S_ISDIR(inode->vfs_inode.i_mode) && ctx->log_new_dentries) log_dentries = true; /* * On unlink we must make sure all our current and old parent directory * inodes are fully logged. This is to prevent leaving dangling * directory index entries in directories that were our parents but are * not anymore. Not doing this results in old parent directory being * impossible to delete after log replay (rmdir will always fail with * error -ENOTEMPTY). * * Example 1: * * mkdir testdir * touch testdir/foo * ln testdir/foo testdir/bar * sync * unlink testdir/bar * xfs_io -c fsync testdir/foo * <power failure> * mount fs, triggers log replay * * If we don't log the parent directory (testdir), after log replay the * directory still has an entry pointing to the file inode using the bar * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and * the file inode has a link count of 1. * * Example 2: * * mkdir testdir * touch foo * ln foo testdir/foo2 * ln foo testdir/foo3 * sync * unlink testdir/foo3 * xfs_io -c fsync foo * <power failure> * mount fs, triggers log replay * * Similar as the first example, after log replay the parent directory * testdir still has an entry pointing to the inode file with name foo3 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item * and has a link count of 2. */ if (inode->last_unlink_trans >= trans->transid) { ret = btrfs_log_all_parents(trans, inode, ctx); if (ret) goto end_trans; } ret = log_all_new_ancestors(trans, inode, parent, ctx); if (ret) goto end_trans; if (log_dentries) ret = log_new_dir_dentries(trans, inode, ctx); else ret = 0; end_trans: if (ret < 0) { btrfs_set_log_full_commit(trans); ret = BTRFS_LOG_FORCE_COMMIT; } if (ret) btrfs_remove_log_ctx(root, ctx); btrfs_end_log_trans(root); end_no_trans: return ret; } /* * it is not safe to log dentry if the chunk root has added new * chunks. This returns 0 if the dentry was logged, and 1 otherwise. * If this returns 1, you must commit the transaction to safely get your * data on disk. */ int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, struct dentry *dentry, struct btrfs_log_ctx *ctx) { struct dentry *parent = dget_parent(dentry); int ret; ret = btrfs_log_inode_parent(trans, BTRFS_I(d_inode(dentry)), parent, LOG_INODE_ALL, ctx); dput(parent); return ret; } /* * should be called during mount to recover any replay any log trees * from the FS */ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) { int ret; struct btrfs_path *path; struct btrfs_trans_handle *trans; struct btrfs_key key; struct btrfs_key found_key; struct btrfs_root *log; struct btrfs_fs_info *fs_info = log_root_tree->fs_info; struct walk_control wc = { .process_func = process_one_buffer, .stage = LOG_WALK_PIN_ONLY, }; path = btrfs_alloc_path(); if (!path) return -ENOMEM; set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); trans = btrfs_start_transaction(fs_info->tree_root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto error; } wc.trans = trans; wc.pin = 1; ret = walk_log_tree(trans, log_root_tree, &wc); if (ret) { btrfs_abort_transaction(trans, ret); goto error; } again: key.objectid = BTRFS_TREE_LOG_OBJECTID; key.offset = (u64)-1; key.type = BTRFS_ROOT_ITEM_KEY; while (1) { ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0); if (ret < 0) { btrfs_abort_transaction(trans, ret); goto error; } if (ret > 0) { if (path->slots[0] == 0) break; path->slots[0]--; } btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); btrfs_release_path(path); if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID) break; log = btrfs_read_tree_root(log_root_tree, &found_key); if (IS_ERR(log)) { ret = PTR_ERR(log); btrfs_abort_transaction(trans, ret); goto error; } wc.replay_dest = btrfs_get_fs_root(fs_info, found_key.offset, true); if (IS_ERR(wc.replay_dest)) { ret = PTR_ERR(wc.replay_dest); /* * We didn't find the subvol, likely because it was * deleted. This is ok, simply skip this log and go to * the next one. * * We need to exclude the root because we can't have * other log replays overwriting this log as we'll read * it back in a few more times. This will keep our * block from being modified, and we'll just bail for * each subsequent pass. */ if (ret == -ENOENT) ret = btrfs_pin_extent_for_log_replay(trans, log->node->start, log->node->len); btrfs_put_root(log); if (!ret) goto next; btrfs_abort_transaction(trans, ret); goto error; } wc.replay_dest->log_root = log; ret = btrfs_record_root_in_trans(trans, wc.replay_dest); if (ret) /* The loop needs to continue due to the root refs */ btrfs_abort_transaction(trans, ret); else ret = walk_log_tree(trans, log, &wc); if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) { ret = fixup_inode_link_counts(trans, wc.replay_dest, path); if (ret) btrfs_abort_transaction(trans, ret); } if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) { struct btrfs_root *root = wc.replay_dest; btrfs_release_path(path); /* * We have just replayed everything, and the highest * objectid of fs roots probably has changed in case * some inode_item's got replayed. * * root->objectid_mutex is not acquired as log replay * could only happen during mount. */ ret = btrfs_init_root_free_objectid(root); if (ret) btrfs_abort_transaction(trans, ret); } wc.replay_dest->log_root = NULL; btrfs_put_root(wc.replay_dest); btrfs_put_root(log); if (ret) goto error; next: if (found_key.offset == 0) break; key.offset = found_key.offset - 1; } btrfs_release_path(path); /* step one is to pin it all, step two is to replay just inodes */ if (wc.pin) { wc.pin = 0; wc.process_func = replay_one_buffer; wc.stage = LOG_WALK_REPLAY_INODES; goto again; } /* step three is to replay everything */ if (wc.stage < LOG_WALK_REPLAY_ALL) { wc.stage++; goto again; } btrfs_free_path(path); /* step 4: commit the transaction, which also unpins the blocks */ ret = btrfs_commit_transaction(trans); if (ret) return ret; log_root_tree->log_root = NULL; clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); btrfs_put_root(log_root_tree); return 0; error: if (wc.trans) btrfs_end_transaction(wc.trans); clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); btrfs_free_path(path); return ret; } /* * there are some corner cases where we want to force a full * commit instead of allowing a directory to be logged. * * They revolve around files there were unlinked from the directory, and * this function updates the parent directory so that a full commit is * properly done if it is fsync'd later after the unlinks are done. * * Must be called before the unlink operations (updates to the subvolume tree, * inodes, etc) are done. */ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans, struct btrfs_inode *dir, struct btrfs_inode *inode, bool for_rename) { /* * when we're logging a file, if it hasn't been renamed * or unlinked, and its inode is fully committed on disk, * we don't have to worry about walking up the directory chain * to log its parents. * * So, we use the last_unlink_trans field to put this transid * into the file. When the file is logged we check it and * don't log the parents if the file is fully on disk. */ mutex_lock(&inode->log_mutex); inode->last_unlink_trans = trans->transid; mutex_unlock(&inode->log_mutex); if (!for_rename) return; /* * If this directory was already logged, any new names will be logged * with btrfs_log_new_name() and old names will be deleted from the log * tree with btrfs_del_dir_entries_in_log() or with * btrfs_del_inode_ref_in_log(). */ if (inode_logged(trans, dir, NULL) == 1) return; /* * If the inode we're about to unlink was logged before, the log will be * properly updated with the new name with btrfs_log_new_name() and the * old name removed with btrfs_del_dir_entries_in_log() or with * btrfs_del_inode_ref_in_log(). */ if (inode_logged(trans, inode, NULL) == 1) return; /* * when renaming files across directories, if the directory * there we're unlinking from gets fsync'd later on, there's * no way to find the destination directory later and fsync it * properly. So, we have to be conservative and force commits * so the new name gets discovered. */ mutex_lock(&dir->log_mutex); dir->last_unlink_trans = trans->transid; mutex_unlock(&dir->log_mutex); } /* * Make sure that if someone attempts to fsync the parent directory of a deleted * snapshot, it ends up triggering a transaction commit. This is to guarantee * that after replaying the log tree of the parent directory's root we will not * see the snapshot anymore and at log replay time we will not see any log tree * corresponding to the deleted snapshot's root, which could lead to replaying * it after replaying the log tree of the parent directory (which would replay * the snapshot delete operation). * * Must be called before the actual snapshot destroy operation (updates to the * parent root and tree of tree roots trees, etc) are done. */ void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans, struct btrfs_inode *dir) { mutex_lock(&dir->log_mutex); dir->last_unlink_trans = trans->transid; mutex_unlock(&dir->log_mutex); } /* * Update the log after adding a new name for an inode. * * @trans: Transaction handle. * @old_dentry: The dentry associated with the old name and the old * parent directory. * @old_dir: The inode of the previous parent directory for the case * of a rename. For a link operation, it must be NULL. * @old_dir_index: The index number associated with the old name, meaningful * only for rename operations (when @old_dir is not NULL). * Ignored for link operations. * @parent: The dentry associated with the directory under which the * new name is located. * * Call this after adding a new name for an inode, as a result of a link or * rename operation, and it will properly update the log to reflect the new name. */ void btrfs_log_new_name(struct btrfs_trans_handle *trans, struct dentry *old_dentry, struct btrfs_inode *old_dir, u64 old_dir_index, struct dentry *parent) { struct btrfs_inode *inode = BTRFS_I(d_inode(old_dentry)); struct btrfs_root *root = inode->root; struct btrfs_log_ctx ctx; bool log_pinned = false; int ret; /* * this will force the logging code to walk the dentry chain * up for the file */ if (!S_ISDIR(inode->vfs_inode.i_mode)) inode->last_unlink_trans = trans->transid; /* * if this inode hasn't been logged and directory we're renaming it * from hasn't been logged, we don't need to log it */ ret = inode_logged(trans, inode, NULL); if (ret < 0) { goto out; } else if (ret == 0) { if (!old_dir) return; /* * If the inode was not logged and we are doing a rename (old_dir is not * NULL), check if old_dir was logged - if it was not we can return and * do nothing. */ ret = inode_logged(trans, old_dir, NULL); if (ret < 0) goto out; else if (ret == 0) return; } ret = 0; /* * If we are doing a rename (old_dir is not NULL) from a directory that * was previously logged, make sure that on log replay we get the old * dir entry deleted. This is needed because we will also log the new * name of the renamed inode, so we need to make sure that after log * replay we don't end up with both the new and old dir entries existing. */ if (old_dir && old_dir->logged_trans == trans->transid) { struct btrfs_root *log = old_dir->root->log_root; struct btrfs_path *path; struct fscrypt_name fname; ASSERT(old_dir_index >= BTRFS_DIR_START_INDEX); ret = fscrypt_setup_filename(&old_dir->vfs_inode, &old_dentry->d_name, 0, &fname); if (ret) goto out; /* * We have two inodes to update in the log, the old directory and * the inode that got renamed, so we must pin the log to prevent * anyone from syncing the log until we have updated both inodes * in the log. */ ret = join_running_log_trans(root); /* * At least one of the inodes was logged before, so this should * not fail, but if it does, it's not serious, just bail out and * mark the log for a full commit. */ if (WARN_ON_ONCE(ret < 0)) { fscrypt_free_filename(&fname); goto out; } log_pinned = true; path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; fscrypt_free_filename(&fname); goto out; } /* * Other concurrent task might be logging the old directory, * as it can be triggered when logging other inode that had or * still has a dentry in the old directory. We lock the old * directory's log_mutex to ensure the deletion of the old * name is persisted, because during directory logging we * delete all BTRFS_DIR_LOG_INDEX_KEY keys and the deletion of * the old name's dir index item is in the delayed items, so * it could be missed by an in progress directory logging. */ mutex_lock(&old_dir->log_mutex); ret = del_logged_dentry(trans, log, path, btrfs_ino(old_dir), &fname.disk_name, old_dir_index); if (ret > 0) { /* * The dentry does not exist in the log, so record its * deletion. */ btrfs_release_path(path); ret = insert_dir_log_key(trans, log, path, btrfs_ino(old_dir), old_dir_index, old_dir_index); } mutex_unlock(&old_dir->log_mutex); btrfs_free_path(path); fscrypt_free_filename(&fname); if (ret < 0) goto out; } btrfs_init_log_ctx(&ctx, &inode->vfs_inode); ctx.logging_new_name = true; /* * We don't care about the return value. If we fail to log the new name * then we know the next attempt to sync the log will fallback to a full * transaction commit (due to a call to btrfs_set_log_full_commit()), so * we don't need to worry about getting a log committed that has an * inconsistent state after a rename operation. */ btrfs_log_inode_parent(trans, inode, parent, LOG_INODE_EXISTS, &ctx); ASSERT(list_empty(&ctx.conflict_inodes)); out: /* * If an error happened mark the log for a full commit because it's not * consistent and up to date or we couldn't find out if one of the * inodes was logged before in this transaction. Do it before unpinning * the log, to avoid any races with someone else trying to commit it. */ if (ret < 0) btrfs_set_log_full_commit(trans); if (log_pinned) btrfs_end_log_trans(root); }
linux-master
fs/btrfs/tree-log.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2007 Oracle. All rights reserved. */ #include <linux/sched.h> #include <linux/sched/signal.h> #include <linux/pagemap.h> #include <linux/writeback.h> #include <linux/blkdev.h> #include <linux/sort.h> #include <linux/rcupdate.h> #include <linux/kthread.h> #include <linux/slab.h> #include <linux/ratelimit.h> #include <linux/percpu_counter.h> #include <linux/lockdep.h> #include <linux/crc32c.h> #include "ctree.h" #include "extent-tree.h" #include "tree-log.h" #include "disk-io.h" #include "print-tree.h" #include "volumes.h" #include "raid56.h" #include "locking.h" #include "free-space-cache.h" #include "free-space-tree.h" #include "sysfs.h" #include "qgroup.h" #include "ref-verify.h" #include "space-info.h" #include "block-rsv.h" #include "delalloc-space.h" #include "discard.h" #include "rcu-string.h" #include "zoned.h" #include "dev-replace.h" #include "fs.h" #include "accessors.h" #include "root-tree.h" #include "file-item.h" #include "orphan.h" #include "tree-checker.h" #undef SCRAMBLE_DELAYED_REFS static int __btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_node *node, u64 parent, u64 root_objectid, u64 owner_objectid, u64 owner_offset, int refs_to_drop, struct btrfs_delayed_extent_op *extra_op); static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, struct extent_buffer *leaf, struct btrfs_extent_item *ei); static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, u64 parent, u64 root_objectid, u64 flags, u64 owner, u64 offset, struct btrfs_key *ins, int ref_mod); static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_node *node, struct btrfs_delayed_extent_op *extent_op); static int find_next_key(struct btrfs_path *path, int level, struct btrfs_key *key); static int block_group_bits(struct btrfs_block_group *cache, u64 bits) { return (cache->flags & bits) == bits; } /* simple helper to search for an existing data extent at a given offset */ int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len) { struct btrfs_root *root = btrfs_extent_root(fs_info, start); int ret; struct btrfs_key key; struct btrfs_path *path; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = start; key.offset = len; key.type = BTRFS_EXTENT_ITEM_KEY; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); btrfs_free_path(path); return ret; } /* * helper function to lookup reference count and flags of a tree block. * * the head node for delayed ref is used to store the sum of all the * reference count modifications queued up in the rbtree. the head * node may also store the extent flags to set. This way you can check * to see what the reference count and extent flags would be if all of * the delayed refs are not processed. */ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 bytenr, u64 offset, int metadata, u64 *refs, u64 *flags) { struct btrfs_root *extent_root; struct btrfs_delayed_ref_head *head; struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_path *path; struct btrfs_extent_item *ei; struct extent_buffer *leaf; struct btrfs_key key; u32 item_size; u64 num_refs; u64 extent_flags; int ret; /* * If we don't have skinny metadata, don't bother doing anything * different */ if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) { offset = fs_info->nodesize; metadata = 0; } path = btrfs_alloc_path(); if (!path) return -ENOMEM; if (!trans) { path->skip_locking = 1; path->search_commit_root = 1; } search_again: key.objectid = bytenr; key.offset = offset; if (metadata) key.type = BTRFS_METADATA_ITEM_KEY; else key.type = BTRFS_EXTENT_ITEM_KEY; extent_root = btrfs_extent_root(fs_info, bytenr); ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); if (ret < 0) goto out_free; if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) { if (path->slots[0]) { path->slots[0]--; btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.objectid == bytenr && key.type == BTRFS_EXTENT_ITEM_KEY && key.offset == fs_info->nodesize) ret = 0; } } if (ret == 0) { leaf = path->nodes[0]; item_size = btrfs_item_size(leaf, path->slots[0]); if (item_size >= sizeof(*ei)) { ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); num_refs = btrfs_extent_refs(leaf, ei); extent_flags = btrfs_extent_flags(leaf, ei); } else { ret = -EUCLEAN; btrfs_err(fs_info, "unexpected extent item size, has %u expect >= %zu", item_size, sizeof(*ei)); if (trans) btrfs_abort_transaction(trans, ret); else btrfs_handle_fs_error(fs_info, ret, NULL); goto out_free; } BUG_ON(num_refs == 0); } else { num_refs = 0; extent_flags = 0; ret = 0; } if (!trans) goto out; delayed_refs = &trans->transaction->delayed_refs; spin_lock(&delayed_refs->lock); head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); if (head) { if (!mutex_trylock(&head->mutex)) { refcount_inc(&head->refs); spin_unlock(&delayed_refs->lock); btrfs_release_path(path); /* * Mutex was contended, block until it's released and try * again */ mutex_lock(&head->mutex); mutex_unlock(&head->mutex); btrfs_put_delayed_ref_head(head); goto search_again; } spin_lock(&head->lock); if (head->extent_op && head->extent_op->update_flags) extent_flags |= head->extent_op->flags_to_set; else BUG_ON(num_refs == 0); num_refs += head->ref_mod; spin_unlock(&head->lock); mutex_unlock(&head->mutex); } spin_unlock(&delayed_refs->lock); out: WARN_ON(num_refs == 0); if (refs) *refs = num_refs; if (flags) *flags = extent_flags; out_free: btrfs_free_path(path); return ret; } /* * Back reference rules. Back refs have three main goals: * * 1) differentiate between all holders of references to an extent so that * when a reference is dropped we can make sure it was a valid reference * before freeing the extent. * * 2) Provide enough information to quickly find the holders of an extent * if we notice a given block is corrupted or bad. * * 3) Make it easy to migrate blocks for FS shrinking or storage pool * maintenance. This is actually the same as #2, but with a slightly * different use case. * * There are two kinds of back refs. The implicit back refs is optimized * for pointers in non-shared tree blocks. For a given pointer in a block, * back refs of this kind provide information about the block's owner tree * and the pointer's key. These information allow us to find the block by * b-tree searching. The full back refs is for pointers in tree blocks not * referenced by their owner trees. The location of tree block is recorded * in the back refs. Actually the full back refs is generic, and can be * used in all cases the implicit back refs is used. The major shortcoming * of the full back refs is its overhead. Every time a tree block gets * COWed, we have to update back refs entry for all pointers in it. * * For a newly allocated tree block, we use implicit back refs for * pointers in it. This means most tree related operations only involve * implicit back refs. For a tree block created in old transaction, the * only way to drop a reference to it is COW it. So we can detect the * event that tree block loses its owner tree's reference and do the * back refs conversion. * * When a tree block is COWed through a tree, there are four cases: * * The reference count of the block is one and the tree is the block's * owner tree. Nothing to do in this case. * * The reference count of the block is one and the tree is not the * block's owner tree. In this case, full back refs is used for pointers * in the block. Remove these full back refs, add implicit back refs for * every pointers in the new block. * * The reference count of the block is greater than one and the tree is * the block's owner tree. In this case, implicit back refs is used for * pointers in the block. Add full back refs for every pointers in the * block, increase lower level extents' reference counts. The original * implicit back refs are entailed to the new block. * * The reference count of the block is greater than one and the tree is * not the block's owner tree. Add implicit back refs for every pointer in * the new block, increase lower level extents' reference count. * * Back Reference Key composing: * * The key objectid corresponds to the first byte in the extent, * The key type is used to differentiate between types of back refs. * There are different meanings of the key offset for different types * of back refs. * * File extents can be referenced by: * * - multiple snapshots, subvolumes, or different generations in one subvol * - different files inside a single subvolume * - different offsets inside a file (bookend extents in file.c) * * The extent ref structure for the implicit back refs has fields for: * * - Objectid of the subvolume root * - objectid of the file holding the reference * - original offset in the file * - how many bookend extents * * The key offset for the implicit back refs is hash of the first * three fields. * * The extent ref structure for the full back refs has field for: * * - number of pointers in the tree leaf * * The key offset for the implicit back refs is the first byte of * the tree leaf * * When a file extent is allocated, The implicit back refs is used. * the fields are filled in: * * (root_key.objectid, inode objectid, offset in file, 1) * * When a file extent is removed file truncation, we find the * corresponding implicit back refs and check the following fields: * * (btrfs_header_owner(leaf), inode objectid, offset in file) * * Btree extents can be referenced by: * * - Different subvolumes * * Both the implicit back refs and the full back refs for tree blocks * only consist of key. The key offset for the implicit back refs is * objectid of block's owner tree. The key offset for the full back refs * is the first byte of parent block. * * When implicit back refs is used, information about the lowest key and * level of the tree block are required. These information are stored in * tree block info structure. */ /* * is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required, * is_data == BTRFS_REF_TYPE_DATA, data type is requiried, * is_data == BTRFS_REF_TYPE_ANY, either type is OK. */ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, struct btrfs_extent_inline_ref *iref, enum btrfs_inline_ref_type is_data) { int type = btrfs_extent_inline_ref_type(eb, iref); u64 offset = btrfs_extent_inline_ref_offset(eb, iref); if (type == BTRFS_TREE_BLOCK_REF_KEY || type == BTRFS_SHARED_BLOCK_REF_KEY || type == BTRFS_SHARED_DATA_REF_KEY || type == BTRFS_EXTENT_DATA_REF_KEY) { if (is_data == BTRFS_REF_TYPE_BLOCK) { if (type == BTRFS_TREE_BLOCK_REF_KEY) return type; if (type == BTRFS_SHARED_BLOCK_REF_KEY) { ASSERT(eb->fs_info); /* * Every shared one has parent tree block, * which must be aligned to sector size. */ if (offset && IS_ALIGNED(offset, eb->fs_info->sectorsize)) return type; } } else if (is_data == BTRFS_REF_TYPE_DATA) { if (type == BTRFS_EXTENT_DATA_REF_KEY) return type; if (type == BTRFS_SHARED_DATA_REF_KEY) { ASSERT(eb->fs_info); /* * Every shared one has parent tree block, * which must be aligned to sector size. */ if (offset && IS_ALIGNED(offset, eb->fs_info->sectorsize)) return type; } } else { ASSERT(is_data == BTRFS_REF_TYPE_ANY); return type; } } WARN_ON(1); btrfs_print_leaf(eb); btrfs_err(eb->fs_info, "eb %llu iref 0x%lx invalid extent inline ref type %d", eb->start, (unsigned long)iref, type); return BTRFS_REF_TYPE_INVALID; } u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset) { u32 high_crc = ~(u32)0; u32 low_crc = ~(u32)0; __le64 lenum; lenum = cpu_to_le64(root_objectid); high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum)); lenum = cpu_to_le64(owner); low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum)); lenum = cpu_to_le64(offset); low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum)); return ((u64)high_crc << 31) ^ (u64)low_crc; } static u64 hash_extent_data_ref_item(struct extent_buffer *leaf, struct btrfs_extent_data_ref *ref) { return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref), btrfs_extent_data_ref_objectid(leaf, ref), btrfs_extent_data_ref_offset(leaf, ref)); } static int match_extent_data_ref(struct extent_buffer *leaf, struct btrfs_extent_data_ref *ref, u64 root_objectid, u64 owner, u64 offset) { if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid || btrfs_extent_data_ref_objectid(leaf, ref) != owner || btrfs_extent_data_ref_offset(leaf, ref) != offset) return 0; return 1; } static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans, struct btrfs_path *path, u64 bytenr, u64 parent, u64 root_objectid, u64 owner, u64 offset) { struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); struct btrfs_key key; struct btrfs_extent_data_ref *ref; struct extent_buffer *leaf; u32 nritems; int ret; int recow; int err = -ENOENT; key.objectid = bytenr; if (parent) { key.type = BTRFS_SHARED_DATA_REF_KEY; key.offset = parent; } else { key.type = BTRFS_EXTENT_DATA_REF_KEY; key.offset = hash_extent_data_ref(root_objectid, owner, offset); } again: recow = 0; ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret < 0) { err = ret; goto fail; } if (parent) { if (!ret) return 0; goto fail; } leaf = path->nodes[0]; nritems = btrfs_header_nritems(leaf); while (1) { if (path->slots[0] >= nritems) { ret = btrfs_next_leaf(root, path); if (ret < 0) err = ret; if (ret) goto fail; leaf = path->nodes[0]; nritems = btrfs_header_nritems(leaf); recow = 1; } btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); if (key.objectid != bytenr || key.type != BTRFS_EXTENT_DATA_REF_KEY) goto fail; ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_data_ref); if (match_extent_data_ref(leaf, ref, root_objectid, owner, offset)) { if (recow) { btrfs_release_path(path); goto again; } err = 0; break; } path->slots[0]++; } fail: return err; } static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans, struct btrfs_path *path, u64 bytenr, u64 parent, u64 root_objectid, u64 owner, u64 offset, int refs_to_add) { struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); struct btrfs_key key; struct extent_buffer *leaf; u32 size; u32 num_refs; int ret; key.objectid = bytenr; if (parent) { key.type = BTRFS_SHARED_DATA_REF_KEY; key.offset = parent; size = sizeof(struct btrfs_shared_data_ref); } else { key.type = BTRFS_EXTENT_DATA_REF_KEY; key.offset = hash_extent_data_ref(root_objectid, owner, offset); size = sizeof(struct btrfs_extent_data_ref); } ret = btrfs_insert_empty_item(trans, root, path, &key, size); if (ret && ret != -EEXIST) goto fail; leaf = path->nodes[0]; if (parent) { struct btrfs_shared_data_ref *ref; ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_shared_data_ref); if (ret == 0) { btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add); } else { num_refs = btrfs_shared_data_ref_count(leaf, ref); num_refs += refs_to_add; btrfs_set_shared_data_ref_count(leaf, ref, num_refs); } } else { struct btrfs_extent_data_ref *ref; while (ret == -EEXIST) { ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_data_ref); if (match_extent_data_ref(leaf, ref, root_objectid, owner, offset)) break; btrfs_release_path(path); key.offset++; ret = btrfs_insert_empty_item(trans, root, path, &key, size); if (ret && ret != -EEXIST) goto fail; leaf = path->nodes[0]; } ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_data_ref); if (ret == 0) { btrfs_set_extent_data_ref_root(leaf, ref, root_objectid); btrfs_set_extent_data_ref_objectid(leaf, ref, owner); btrfs_set_extent_data_ref_offset(leaf, ref, offset); btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add); } else { num_refs = btrfs_extent_data_ref_count(leaf, ref); num_refs += refs_to_add; btrfs_set_extent_data_ref_count(leaf, ref, num_refs); } } btrfs_mark_buffer_dirty(leaf); ret = 0; fail: btrfs_release_path(path); return ret; } static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int refs_to_drop) { struct btrfs_key key; struct btrfs_extent_data_ref *ref1 = NULL; struct btrfs_shared_data_ref *ref2 = NULL; struct extent_buffer *leaf; u32 num_refs = 0; int ret = 0; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { ref1 = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_data_ref); num_refs = btrfs_extent_data_ref_count(leaf, ref1); } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { ref2 = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_shared_data_ref); num_refs = btrfs_shared_data_ref_count(leaf, ref2); } else { btrfs_err(trans->fs_info, "unrecognized backref key (%llu %u %llu)", key.objectid, key.type, key.offset); btrfs_abort_transaction(trans, -EUCLEAN); return -EUCLEAN; } BUG_ON(num_refs < refs_to_drop); num_refs -= refs_to_drop; if (num_refs == 0) { ret = btrfs_del_item(trans, root, path); } else { if (key.type == BTRFS_EXTENT_DATA_REF_KEY) btrfs_set_extent_data_ref_count(leaf, ref1, num_refs); else if (key.type == BTRFS_SHARED_DATA_REF_KEY) btrfs_set_shared_data_ref_count(leaf, ref2, num_refs); btrfs_mark_buffer_dirty(leaf); } return ret; } static noinline u32 extent_data_ref_count(struct btrfs_path *path, struct btrfs_extent_inline_ref *iref) { struct btrfs_key key; struct extent_buffer *leaf; struct btrfs_extent_data_ref *ref1; struct btrfs_shared_data_ref *ref2; u32 num_refs = 0; int type; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); if (iref) { /* * If type is invalid, we should have bailed out earlier than * this call. */ type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA); ASSERT(type != BTRFS_REF_TYPE_INVALID); if (type == BTRFS_EXTENT_DATA_REF_KEY) { ref1 = (struct btrfs_extent_data_ref *)(&iref->offset); num_refs = btrfs_extent_data_ref_count(leaf, ref1); } else { ref2 = (struct btrfs_shared_data_ref *)(iref + 1); num_refs = btrfs_shared_data_ref_count(leaf, ref2); } } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { ref1 = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_data_ref); num_refs = btrfs_extent_data_ref_count(leaf, ref1); } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { ref2 = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_shared_data_ref); num_refs = btrfs_shared_data_ref_count(leaf, ref2); } else { WARN_ON(1); } return num_refs; } static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans, struct btrfs_path *path, u64 bytenr, u64 parent, u64 root_objectid) { struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); struct btrfs_key key; int ret; key.objectid = bytenr; if (parent) { key.type = BTRFS_SHARED_BLOCK_REF_KEY; key.offset = parent; } else { key.type = BTRFS_TREE_BLOCK_REF_KEY; key.offset = root_objectid; } ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret > 0) ret = -ENOENT; return ret; } static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans, struct btrfs_path *path, u64 bytenr, u64 parent, u64 root_objectid) { struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); struct btrfs_key key; int ret; key.objectid = bytenr; if (parent) { key.type = BTRFS_SHARED_BLOCK_REF_KEY; key.offset = parent; } else { key.type = BTRFS_TREE_BLOCK_REF_KEY; key.offset = root_objectid; } ret = btrfs_insert_empty_item(trans, root, path, &key, 0); btrfs_release_path(path); return ret; } static inline int extent_ref_type(u64 parent, u64 owner) { int type; if (owner < BTRFS_FIRST_FREE_OBJECTID) { if (parent > 0) type = BTRFS_SHARED_BLOCK_REF_KEY; else type = BTRFS_TREE_BLOCK_REF_KEY; } else { if (parent > 0) type = BTRFS_SHARED_DATA_REF_KEY; else type = BTRFS_EXTENT_DATA_REF_KEY; } return type; } static int find_next_key(struct btrfs_path *path, int level, struct btrfs_key *key) { for (; level < BTRFS_MAX_LEVEL; level++) { if (!path->nodes[level]) break; if (path->slots[level] + 1 >= btrfs_header_nritems(path->nodes[level])) continue; if (level == 0) btrfs_item_key_to_cpu(path->nodes[level], key, path->slots[level] + 1); else btrfs_node_key_to_cpu(path->nodes[level], key, path->slots[level] + 1); return 0; } return 1; } /* * look for inline back ref. if back ref is found, *ref_ret is set * to the address of inline back ref, and 0 is returned. * * if back ref isn't found, *ref_ret is set to the address where it * should be inserted, and -ENOENT is returned. * * if insert is true and there are too many inline back refs, the path * points to the extent item, and -EAGAIN is returned. * * NOTE: inline back refs are ordered in the same way that back ref * items in the tree are ordered. */ static noinline_for_stack int lookup_inline_extent_backref(struct btrfs_trans_handle *trans, struct btrfs_path *path, struct btrfs_extent_inline_ref **ref_ret, u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, u64 owner, u64 offset, int insert) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_root *root = btrfs_extent_root(fs_info, bytenr); struct btrfs_key key; struct extent_buffer *leaf; struct btrfs_extent_item *ei; struct btrfs_extent_inline_ref *iref; u64 flags; u64 item_size; unsigned long ptr; unsigned long end; int extra_size; int type; int want; int ret; int err = 0; bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); int needed; key.objectid = bytenr; key.type = BTRFS_EXTENT_ITEM_KEY; key.offset = num_bytes; want = extent_ref_type(parent, owner); if (insert) { extra_size = btrfs_extent_inline_ref_size(want); path->search_for_extension = 1; path->keep_locks = 1; } else extra_size = -1; /* * Owner is our level, so we can just add one to get the level for the * block we are interested in. */ if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) { key.type = BTRFS_METADATA_ITEM_KEY; key.offset = owner; } again: ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1); if (ret < 0) { err = ret; goto out; } /* * We may be a newly converted file system which still has the old fat * extent entries for metadata, so try and see if we have one of those. */ if (ret > 0 && skinny_metadata) { skinny_metadata = false; if (path->slots[0]) { path->slots[0]--; btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.objectid == bytenr && key.type == BTRFS_EXTENT_ITEM_KEY && key.offset == num_bytes) ret = 0; } if (ret) { key.objectid = bytenr; key.type = BTRFS_EXTENT_ITEM_KEY; key.offset = num_bytes; btrfs_release_path(path); goto again; } } if (ret && !insert) { err = -ENOENT; goto out; } else if (WARN_ON(ret)) { btrfs_print_leaf(path->nodes[0]); btrfs_err(fs_info, "extent item not found for insert, bytenr %llu num_bytes %llu parent %llu root_objectid %llu owner %llu offset %llu", bytenr, num_bytes, parent, root_objectid, owner, offset); err = -EIO; goto out; } leaf = path->nodes[0]; item_size = btrfs_item_size(leaf, path->slots[0]); if (unlikely(item_size < sizeof(*ei))) { err = -EUCLEAN; btrfs_err(fs_info, "unexpected extent item size, has %llu expect >= %zu", item_size, sizeof(*ei)); btrfs_abort_transaction(trans, err); goto out; } ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); flags = btrfs_extent_flags(leaf, ei); ptr = (unsigned long)(ei + 1); end = (unsigned long)ei + item_size; if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) { ptr += sizeof(struct btrfs_tree_block_info); BUG_ON(ptr > end); } if (owner >= BTRFS_FIRST_FREE_OBJECTID) needed = BTRFS_REF_TYPE_DATA; else needed = BTRFS_REF_TYPE_BLOCK; err = -ENOENT; while (1) { if (ptr >= end) { if (ptr > end) { err = -EUCLEAN; btrfs_print_leaf(path->nodes[0]); btrfs_crit(fs_info, "overrun extent record at slot %d while looking for inline extent for root %llu owner %llu offset %llu parent %llu", path->slots[0], root_objectid, owner, offset, parent); } break; } iref = (struct btrfs_extent_inline_ref *)ptr; type = btrfs_get_extent_inline_ref_type(leaf, iref, needed); if (type == BTRFS_REF_TYPE_INVALID) { err = -EUCLEAN; goto out; } if (want < type) break; if (want > type) { ptr += btrfs_extent_inline_ref_size(type); continue; } if (type == BTRFS_EXTENT_DATA_REF_KEY) { struct btrfs_extent_data_ref *dref; dref = (struct btrfs_extent_data_ref *)(&iref->offset); if (match_extent_data_ref(leaf, dref, root_objectid, owner, offset)) { err = 0; break; } if (hash_extent_data_ref_item(leaf, dref) < hash_extent_data_ref(root_objectid, owner, offset)) break; } else { u64 ref_offset; ref_offset = btrfs_extent_inline_ref_offset(leaf, iref); if (parent > 0) { if (parent == ref_offset) { err = 0; break; } if (ref_offset < parent) break; } else { if (root_objectid == ref_offset) { err = 0; break; } if (ref_offset < root_objectid) break; } } ptr += btrfs_extent_inline_ref_size(type); } if (err == -ENOENT && insert) { if (item_size + extra_size >= BTRFS_MAX_EXTENT_ITEM_SIZE(root)) { err = -EAGAIN; goto out; } /* * To add new inline back ref, we have to make sure * there is no corresponding back ref item. * For simplicity, we just do not add new inline back * ref if there is any kind of item for this block */ if (find_next_key(path, 0, &key) == 0 && key.objectid == bytenr && key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) { err = -EAGAIN; goto out; } } *ref_ret = (struct btrfs_extent_inline_ref *)ptr; out: if (insert) { path->keep_locks = 0; path->search_for_extension = 0; btrfs_unlock_up_safe(path, 1); } return err; } /* * helper to add new inline back ref */ static noinline_for_stack void setup_inline_extent_backref(struct btrfs_fs_info *fs_info, struct btrfs_path *path, struct btrfs_extent_inline_ref *iref, u64 parent, u64 root_objectid, u64 owner, u64 offset, int refs_to_add, struct btrfs_delayed_extent_op *extent_op) { struct extent_buffer *leaf; struct btrfs_extent_item *ei; unsigned long ptr; unsigned long end; unsigned long item_offset; u64 refs; int size; int type; leaf = path->nodes[0]; ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); item_offset = (unsigned long)iref - (unsigned long)ei; type = extent_ref_type(parent, owner); size = btrfs_extent_inline_ref_size(type); btrfs_extend_item(path, size); ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); refs = btrfs_extent_refs(leaf, ei); refs += refs_to_add; btrfs_set_extent_refs(leaf, ei, refs); if (extent_op) __run_delayed_extent_op(extent_op, leaf, ei); ptr = (unsigned long)ei + item_offset; end = (unsigned long)ei + btrfs_item_size(leaf, path->slots[0]); if (ptr < end - size) memmove_extent_buffer(leaf, ptr + size, ptr, end - size - ptr); iref = (struct btrfs_extent_inline_ref *)ptr; btrfs_set_extent_inline_ref_type(leaf, iref, type); if (type == BTRFS_EXTENT_DATA_REF_KEY) { struct btrfs_extent_data_ref *dref; dref = (struct btrfs_extent_data_ref *)(&iref->offset); btrfs_set_extent_data_ref_root(leaf, dref, root_objectid); btrfs_set_extent_data_ref_objectid(leaf, dref, owner); btrfs_set_extent_data_ref_offset(leaf, dref, offset); btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add); } else if (type == BTRFS_SHARED_DATA_REF_KEY) { struct btrfs_shared_data_ref *sref; sref = (struct btrfs_shared_data_ref *)(iref + 1); btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add); btrfs_set_extent_inline_ref_offset(leaf, iref, parent); } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) { btrfs_set_extent_inline_ref_offset(leaf, iref, parent); } else { btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid); } btrfs_mark_buffer_dirty(leaf); } static int lookup_extent_backref(struct btrfs_trans_handle *trans, struct btrfs_path *path, struct btrfs_extent_inline_ref **ref_ret, u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, u64 owner, u64 offset) { int ret; ret = lookup_inline_extent_backref(trans, path, ref_ret, bytenr, num_bytes, parent, root_objectid, owner, offset, 0); if (ret != -ENOENT) return ret; btrfs_release_path(path); *ref_ret = NULL; if (owner < BTRFS_FIRST_FREE_OBJECTID) { ret = lookup_tree_block_ref(trans, path, bytenr, parent, root_objectid); } else { ret = lookup_extent_data_ref(trans, path, bytenr, parent, root_objectid, owner, offset); } return ret; } /* * helper to update/remove inline back ref */ static noinline_for_stack int update_inline_extent_backref(struct btrfs_path *path, struct btrfs_extent_inline_ref *iref, int refs_to_mod, struct btrfs_delayed_extent_op *extent_op) { struct extent_buffer *leaf = path->nodes[0]; struct btrfs_fs_info *fs_info = leaf->fs_info; struct btrfs_extent_item *ei; struct btrfs_extent_data_ref *dref = NULL; struct btrfs_shared_data_ref *sref = NULL; unsigned long ptr; unsigned long end; u32 item_size; int size; int type; u64 refs; ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); refs = btrfs_extent_refs(leaf, ei); if (unlikely(refs_to_mod < 0 && refs + refs_to_mod <= 0)) { struct btrfs_key key; u32 extent_size; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); if (key.type == BTRFS_METADATA_ITEM_KEY) extent_size = fs_info->nodesize; else extent_size = key.offset; btrfs_print_leaf(leaf); btrfs_err(fs_info, "invalid refs_to_mod for extent %llu num_bytes %u, has %d expect >= -%llu", key.objectid, extent_size, refs_to_mod, refs); return -EUCLEAN; } refs += refs_to_mod; btrfs_set_extent_refs(leaf, ei, refs); if (extent_op) __run_delayed_extent_op(extent_op, leaf, ei); type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY); /* * Function btrfs_get_extent_inline_ref_type() has already printed * error messages. */ if (unlikely(type == BTRFS_REF_TYPE_INVALID)) return -EUCLEAN; if (type == BTRFS_EXTENT_DATA_REF_KEY) { dref = (struct btrfs_extent_data_ref *)(&iref->offset); refs = btrfs_extent_data_ref_count(leaf, dref); } else if (type == BTRFS_SHARED_DATA_REF_KEY) { sref = (struct btrfs_shared_data_ref *)(iref + 1); refs = btrfs_shared_data_ref_count(leaf, sref); } else { refs = 1; /* * For tree blocks we can only drop one ref for it, and tree * blocks should not have refs > 1. * * Furthermore if we're inserting a new inline backref, we * won't reach this path either. That would be * setup_inline_extent_backref(). */ if (unlikely(refs_to_mod != -1)) { struct btrfs_key key; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_print_leaf(leaf); btrfs_err(fs_info, "invalid refs_to_mod for tree block %llu, has %d expect -1", key.objectid, refs_to_mod); return -EUCLEAN; } } if (unlikely(refs_to_mod < 0 && refs < -refs_to_mod)) { struct btrfs_key key; u32 extent_size; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); if (key.type == BTRFS_METADATA_ITEM_KEY) extent_size = fs_info->nodesize; else extent_size = key.offset; btrfs_print_leaf(leaf); btrfs_err(fs_info, "invalid refs_to_mod for backref entry, iref %lu extent %llu num_bytes %u, has %d expect >= -%llu", (unsigned long)iref, key.objectid, extent_size, refs_to_mod, refs); return -EUCLEAN; } refs += refs_to_mod; if (refs > 0) { if (type == BTRFS_EXTENT_DATA_REF_KEY) btrfs_set_extent_data_ref_count(leaf, dref, refs); else btrfs_set_shared_data_ref_count(leaf, sref, refs); } else { size = btrfs_extent_inline_ref_size(type); item_size = btrfs_item_size(leaf, path->slots[0]); ptr = (unsigned long)iref; end = (unsigned long)ei + item_size; if (ptr + size < end) memmove_extent_buffer(leaf, ptr, ptr + size, end - ptr - size); item_size -= size; btrfs_truncate_item(path, item_size, 1); } btrfs_mark_buffer_dirty(leaf); return 0; } static noinline_for_stack int insert_inline_extent_backref(struct btrfs_trans_handle *trans, struct btrfs_path *path, u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, u64 owner, u64 offset, int refs_to_add, struct btrfs_delayed_extent_op *extent_op) { struct btrfs_extent_inline_ref *iref; int ret; ret = lookup_inline_extent_backref(trans, path, &iref, bytenr, num_bytes, parent, root_objectid, owner, offset, 1); if (ret == 0) { /* * We're adding refs to a tree block we already own, this * should not happen at all. */ if (owner < BTRFS_FIRST_FREE_OBJECTID) { btrfs_print_leaf(path->nodes[0]); btrfs_crit(trans->fs_info, "adding refs to an existing tree ref, bytenr %llu num_bytes %llu root_objectid %llu slot %u", bytenr, num_bytes, root_objectid, path->slots[0]); return -EUCLEAN; } ret = update_inline_extent_backref(path, iref, refs_to_add, extent_op); } else if (ret == -ENOENT) { setup_inline_extent_backref(trans->fs_info, path, iref, parent, root_objectid, owner, offset, refs_to_add, extent_op); ret = 0; } return ret; } static int remove_extent_backref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_extent_inline_ref *iref, int refs_to_drop, int is_data) { int ret = 0; BUG_ON(!is_data && refs_to_drop != 1); if (iref) ret = update_inline_extent_backref(path, iref, -refs_to_drop, NULL); else if (is_data) ret = remove_extent_data_ref(trans, root, path, refs_to_drop); else ret = btrfs_del_item(trans, root, path); return ret; } static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len, u64 *discarded_bytes) { int j, ret = 0; u64 bytes_left, end; u64 aligned_start = ALIGN(start, 1 << SECTOR_SHIFT); if (WARN_ON(start != aligned_start)) { len -= aligned_start - start; len = round_down(len, 1 << SECTOR_SHIFT); start = aligned_start; } *discarded_bytes = 0; if (!len) return 0; end = start + len; bytes_left = len; /* Skip any superblocks on this device. */ for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) { u64 sb_start = btrfs_sb_offset(j); u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE; u64 size = sb_start - start; if (!in_range(sb_start, start, bytes_left) && !in_range(sb_end, start, bytes_left) && !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE)) continue; /* * Superblock spans beginning of range. Adjust start and * try again. */ if (sb_start <= start) { start += sb_end - start; if (start > end) { bytes_left = 0; break; } bytes_left = end - start; continue; } if (size) { ret = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT, size >> SECTOR_SHIFT, GFP_NOFS); if (!ret) *discarded_bytes += size; else if (ret != -EOPNOTSUPP) return ret; } start = sb_end; if (start > end) { bytes_left = 0; break; } bytes_left = end - start; } if (bytes_left) { ret = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT, bytes_left >> SECTOR_SHIFT, GFP_NOFS); if (!ret) *discarded_bytes += bytes_left; } return ret; } static int do_discard_extent(struct btrfs_discard_stripe *stripe, u64 *bytes) { struct btrfs_device *dev = stripe->dev; struct btrfs_fs_info *fs_info = dev->fs_info; struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; u64 phys = stripe->physical; u64 len = stripe->length; u64 discarded = 0; int ret = 0; /* Zone reset on a zoned filesystem */ if (btrfs_can_zone_reset(dev, phys, len)) { u64 src_disc; ret = btrfs_reset_device_zone(dev, phys, len, &discarded); if (ret) goto out; if (!btrfs_dev_replace_is_ongoing(dev_replace) || dev != dev_replace->srcdev) goto out; src_disc = discarded; /* Send to replace target as well */ ret = btrfs_reset_device_zone(dev_replace->tgtdev, phys, len, &discarded); discarded += src_disc; } else if (bdev_max_discard_sectors(stripe->dev->bdev)) { ret = btrfs_issue_discard(dev->bdev, phys, len, &discarded); } else { ret = 0; *bytes = 0; } out: *bytes = discarded; return ret; } int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes, u64 *actual_bytes) { int ret = 0; u64 discarded_bytes = 0; u64 end = bytenr + num_bytes; u64 cur = bytenr; /* * Avoid races with device replace and make sure the devices in the * stripes don't go away while we are discarding. */ btrfs_bio_counter_inc_blocked(fs_info); while (cur < end) { struct btrfs_discard_stripe *stripes; unsigned int num_stripes; int i; num_bytes = end - cur; stripes = btrfs_map_discard(fs_info, cur, &num_bytes, &num_stripes); if (IS_ERR(stripes)) { ret = PTR_ERR(stripes); if (ret == -EOPNOTSUPP) ret = 0; break; } for (i = 0; i < num_stripes; i++) { struct btrfs_discard_stripe *stripe = stripes + i; u64 bytes; if (!stripe->dev->bdev) { ASSERT(btrfs_test_opt(fs_info, DEGRADED)); continue; } if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &stripe->dev->dev_state)) continue; ret = do_discard_extent(stripe, &bytes); if (ret) { /* * Keep going if discard is not supported by the * device. */ if (ret != -EOPNOTSUPP) break; ret = 0; } else { discarded_bytes += bytes; } } kfree(stripes); if (ret) break; cur += num_bytes; } btrfs_bio_counter_dec(fs_info); if (actual_bytes) *actual_bytes = discarded_bytes; return ret; } /* Can return -ENOMEM */ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, struct btrfs_ref *generic_ref) { struct btrfs_fs_info *fs_info = trans->fs_info; int ret; ASSERT(generic_ref->type != BTRFS_REF_NOT_SET && generic_ref->action); BUG_ON(generic_ref->type == BTRFS_REF_METADATA && generic_ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID); if (generic_ref->type == BTRFS_REF_METADATA) ret = btrfs_add_delayed_tree_ref(trans, generic_ref, NULL); else ret = btrfs_add_delayed_data_ref(trans, generic_ref, 0); btrfs_ref_tree_mod(fs_info, generic_ref); return ret; } /* * __btrfs_inc_extent_ref - insert backreference for a given extent * * The counterpart is in __btrfs_free_extent(), with examples and more details * how it works. * * @trans: Handle of transaction * * @node: The delayed ref node used to get the bytenr/length for * extent whose references are incremented. * * @parent: If this is a shared extent (BTRFS_SHARED_DATA_REF_KEY/ * BTRFS_SHARED_BLOCK_REF_KEY) then it holds the logical * bytenr of the parent block. Since new extents are always * created with indirect references, this will only be the case * when relocating a shared extent. In that case, root_objectid * will be BTRFS_TREE_RELOC_OBJECTID. Otherwise, parent must * be 0 * * @root_objectid: The id of the root where this modification has originated, * this can be either one of the well-known metadata trees or * the subvolume id which references this extent. * * @owner: For data extents it is the inode number of the owning file. * For metadata extents this parameter holds the level in the * tree of the extent. * * @offset: For metadata extents the offset is ignored and is currently * always passed as 0. For data extents it is the fileoffset * this extent belongs to. * * @refs_to_add Number of references to add * * @extent_op Pointer to a structure, holding information necessary when * updating a tree block's flags * */ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_node *node, u64 parent, u64 root_objectid, u64 owner, u64 offset, int refs_to_add, struct btrfs_delayed_extent_op *extent_op) { struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_extent_item *item; struct btrfs_key key; u64 bytenr = node->bytenr; u64 num_bytes = node->num_bytes; u64 refs; int ret; path = btrfs_alloc_path(); if (!path) return -ENOMEM; /* this will setup the path even if it fails to insert the back ref */ ret = insert_inline_extent_backref(trans, path, bytenr, num_bytes, parent, root_objectid, owner, offset, refs_to_add, extent_op); if ((ret < 0 && ret != -EAGAIN) || !ret) goto out; /* * Ok we had -EAGAIN which means we didn't have space to insert and * inline extent ref, so just update the reference count and add a * normal backref. */ leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); refs = btrfs_extent_refs(leaf, item); btrfs_set_extent_refs(leaf, item, refs + refs_to_add); if (extent_op) __run_delayed_extent_op(extent_op, leaf, item); btrfs_mark_buffer_dirty(leaf); btrfs_release_path(path); /* now insert the actual backref */ if (owner < BTRFS_FIRST_FREE_OBJECTID) { BUG_ON(refs_to_add != 1); ret = insert_tree_block_ref(trans, path, bytenr, parent, root_objectid); } else { ret = insert_extent_data_ref(trans, path, bytenr, parent, root_objectid, owner, offset, refs_to_add); } if (ret) btrfs_abort_transaction(trans, ret); out: btrfs_free_path(path); return ret; } static int run_delayed_data_ref(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_node *node, struct btrfs_delayed_extent_op *extent_op, bool insert_reserved) { int ret = 0; struct btrfs_delayed_data_ref *ref; struct btrfs_key ins; u64 parent = 0; u64 ref_root = 0; u64 flags = 0; ins.objectid = node->bytenr; ins.offset = node->num_bytes; ins.type = BTRFS_EXTENT_ITEM_KEY; ref = btrfs_delayed_node_to_data_ref(node); trace_run_delayed_data_ref(trans->fs_info, node, ref, node->action); if (node->type == BTRFS_SHARED_DATA_REF_KEY) parent = ref->parent; ref_root = ref->root; if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { if (extent_op) flags |= extent_op->flags_to_set; ret = alloc_reserved_file_extent(trans, parent, ref_root, flags, ref->objectid, ref->offset, &ins, node->ref_mod); } else if (node->action == BTRFS_ADD_DELAYED_REF) { ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root, ref->objectid, ref->offset, node->ref_mod, extent_op); } else if (node->action == BTRFS_DROP_DELAYED_REF) { ret = __btrfs_free_extent(trans, node, parent, ref_root, ref->objectid, ref->offset, node->ref_mod, extent_op); } else { BUG(); } return ret; } static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, struct extent_buffer *leaf, struct btrfs_extent_item *ei) { u64 flags = btrfs_extent_flags(leaf, ei); if (extent_op->update_flags) { flags |= extent_op->flags_to_set; btrfs_set_extent_flags(leaf, ei, flags); } if (extent_op->update_key) { struct btrfs_tree_block_info *bi; BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)); bi = (struct btrfs_tree_block_info *)(ei + 1); btrfs_set_tree_block_key(leaf, bi, &extent_op->key); } } static int run_delayed_extent_op(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *head, struct btrfs_delayed_extent_op *extent_op) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_root *root; struct btrfs_key key; struct btrfs_path *path; struct btrfs_extent_item *ei; struct extent_buffer *leaf; u32 item_size; int ret; int err = 0; int metadata = 1; if (TRANS_ABORTED(trans)) return 0; if (!btrfs_fs_incompat(fs_info, SKINNY_METADATA)) metadata = 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = head->bytenr; if (metadata) { key.type = BTRFS_METADATA_ITEM_KEY; key.offset = extent_op->level; } else { key.type = BTRFS_EXTENT_ITEM_KEY; key.offset = head->num_bytes; } root = btrfs_extent_root(fs_info, key.objectid); again: ret = btrfs_search_slot(trans, root, &key, path, 0, 1); if (ret < 0) { err = ret; goto out; } if (ret > 0) { if (metadata) { if (path->slots[0] > 0) { path->slots[0]--; btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.objectid == head->bytenr && key.type == BTRFS_EXTENT_ITEM_KEY && key.offset == head->num_bytes) ret = 0; } if (ret > 0) { btrfs_release_path(path); metadata = 0; key.objectid = head->bytenr; key.offset = head->num_bytes; key.type = BTRFS_EXTENT_ITEM_KEY; goto again; } } else { err = -EIO; goto out; } } leaf = path->nodes[0]; item_size = btrfs_item_size(leaf, path->slots[0]); if (unlikely(item_size < sizeof(*ei))) { err = -EUCLEAN; btrfs_err(fs_info, "unexpected extent item size, has %u expect >= %zu", item_size, sizeof(*ei)); btrfs_abort_transaction(trans, err); goto out; } ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); __run_delayed_extent_op(extent_op, leaf, ei); btrfs_mark_buffer_dirty(leaf); out: btrfs_free_path(path); return err; } static int run_delayed_tree_ref(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_node *node, struct btrfs_delayed_extent_op *extent_op, bool insert_reserved) { int ret = 0; struct btrfs_delayed_tree_ref *ref; u64 parent = 0; u64 ref_root = 0; ref = btrfs_delayed_node_to_tree_ref(node); trace_run_delayed_tree_ref(trans->fs_info, node, ref, node->action); if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) parent = ref->parent; ref_root = ref->root; if (node->ref_mod != 1) { btrfs_err(trans->fs_info, "btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu", node->bytenr, node->ref_mod, node->action, ref_root, parent); return -EIO; } if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { BUG_ON(!extent_op || !extent_op->update_flags); ret = alloc_reserved_tree_block(trans, node, extent_op); } else if (node->action == BTRFS_ADD_DELAYED_REF) { ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root, ref->level, 0, 1, extent_op); } else if (node->action == BTRFS_DROP_DELAYED_REF) { ret = __btrfs_free_extent(trans, node, parent, ref_root, ref->level, 0, 1, extent_op); } else { BUG(); } return ret; } /* helper function to actually process a single delayed ref entry */ static int run_one_delayed_ref(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_node *node, struct btrfs_delayed_extent_op *extent_op, bool insert_reserved) { int ret = 0; if (TRANS_ABORTED(trans)) { if (insert_reserved) btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1); return 0; } if (node->type == BTRFS_TREE_BLOCK_REF_KEY || node->type == BTRFS_SHARED_BLOCK_REF_KEY) ret = run_delayed_tree_ref(trans, node, extent_op, insert_reserved); else if (node->type == BTRFS_EXTENT_DATA_REF_KEY || node->type == BTRFS_SHARED_DATA_REF_KEY) ret = run_delayed_data_ref(trans, node, extent_op, insert_reserved); else BUG(); if (ret && insert_reserved) btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1); if (ret < 0) btrfs_err(trans->fs_info, "failed to run delayed ref for logical %llu num_bytes %llu type %u action %u ref_mod %d: %d", node->bytenr, node->num_bytes, node->type, node->action, node->ref_mod, ret); return ret; } static inline struct btrfs_delayed_ref_node * select_delayed_ref(struct btrfs_delayed_ref_head *head) { struct btrfs_delayed_ref_node *ref; if (RB_EMPTY_ROOT(&head->ref_tree.rb_root)) return NULL; /* * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first. * This is to prevent a ref count from going down to zero, which deletes * the extent item from the extent tree, when there still are references * to add, which would fail because they would not find the extent item. */ if (!list_empty(&head->ref_add_list)) return list_first_entry(&head->ref_add_list, struct btrfs_delayed_ref_node, add_list); ref = rb_entry(rb_first_cached(&head->ref_tree), struct btrfs_delayed_ref_node, ref_node); ASSERT(list_empty(&ref->add_list)); return ref; } static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, struct btrfs_delayed_ref_head *head) { spin_lock(&delayed_refs->lock); head->processing = false; delayed_refs->num_heads_ready++; spin_unlock(&delayed_refs->lock); btrfs_delayed_ref_unlock(head); } static struct btrfs_delayed_extent_op *cleanup_extent_op( struct btrfs_delayed_ref_head *head) { struct btrfs_delayed_extent_op *extent_op = head->extent_op; if (!extent_op) return NULL; if (head->must_insert_reserved) { head->extent_op = NULL; btrfs_free_delayed_extent_op(extent_op); return NULL; } return extent_op; } static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *head) { struct btrfs_delayed_extent_op *extent_op; int ret; extent_op = cleanup_extent_op(head); if (!extent_op) return 0; head->extent_op = NULL; spin_unlock(&head->lock); ret = run_delayed_extent_op(trans, head, extent_op); btrfs_free_delayed_extent_op(extent_op); return ret ? ret : 1; } void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, struct btrfs_delayed_ref_root *delayed_refs, struct btrfs_delayed_ref_head *head) { int nr_items = 1; /* Dropping this ref head update. */ /* * We had csum deletions accounted for in our delayed refs rsv, we need * to drop the csum leaves for this update from our delayed_refs_rsv. */ if (head->total_ref_mod < 0 && head->is_data) { spin_lock(&delayed_refs->lock); delayed_refs->pending_csums -= head->num_bytes; spin_unlock(&delayed_refs->lock); nr_items += btrfs_csum_bytes_to_leaves(fs_info, head->num_bytes); } btrfs_delayed_refs_rsv_release(fs_info, nr_items); } static int cleanup_ref_head(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *head) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_delayed_ref_root *delayed_refs; int ret; delayed_refs = &trans->transaction->delayed_refs; ret = run_and_cleanup_extent_op(trans, head); if (ret < 0) { unselect_delayed_ref_head(delayed_refs, head); btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret); return ret; } else if (ret) { return ret; } /* * Need to drop our head ref lock and re-acquire the delayed ref lock * and then re-check to make sure nobody got added. */ spin_unlock(&head->lock); spin_lock(&delayed_refs->lock); spin_lock(&head->lock); if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) { spin_unlock(&head->lock); spin_unlock(&delayed_refs->lock); return 1; } btrfs_delete_ref_head(delayed_refs, head); spin_unlock(&head->lock); spin_unlock(&delayed_refs->lock); if (head->must_insert_reserved) { btrfs_pin_extent(trans, head->bytenr, head->num_bytes, 1); if (head->is_data) { struct btrfs_root *csum_root; csum_root = btrfs_csum_root(fs_info, head->bytenr); ret = btrfs_del_csums(trans, csum_root, head->bytenr, head->num_bytes); } } btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); trace_run_delayed_ref_head(fs_info, head, 0); btrfs_delayed_ref_unlock(head); btrfs_put_delayed_ref_head(head); return ret; } static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head( struct btrfs_trans_handle *trans) { struct btrfs_delayed_ref_root *delayed_refs = &trans->transaction->delayed_refs; struct btrfs_delayed_ref_head *head = NULL; int ret; spin_lock(&delayed_refs->lock); head = btrfs_select_ref_head(delayed_refs); if (!head) { spin_unlock(&delayed_refs->lock); return head; } /* * Grab the lock that says we are going to process all the refs for * this head */ ret = btrfs_delayed_ref_lock(delayed_refs, head); spin_unlock(&delayed_refs->lock); /* * We may have dropped the spin lock to get the head mutex lock, and * that might have given someone else time to free the head. If that's * true, it has been removed from our list and we can move on. */ if (ret == -EAGAIN) head = ERR_PTR(-EAGAIN); return head; } static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *locked_ref) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_extent_op *extent_op; struct btrfs_delayed_ref_node *ref; bool must_insert_reserved; int ret; delayed_refs = &trans->transaction->delayed_refs; lockdep_assert_held(&locked_ref->mutex); lockdep_assert_held(&locked_ref->lock); while ((ref = select_delayed_ref(locked_ref))) { if (ref->seq && btrfs_check_delayed_seq(fs_info, ref->seq)) { spin_unlock(&locked_ref->lock); unselect_delayed_ref_head(delayed_refs, locked_ref); return -EAGAIN; } rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree); RB_CLEAR_NODE(&ref->ref_node); if (!list_empty(&ref->add_list)) list_del(&ref->add_list); /* * When we play the delayed ref, also correct the ref_mod on * head */ switch (ref->action) { case BTRFS_ADD_DELAYED_REF: case BTRFS_ADD_DELAYED_EXTENT: locked_ref->ref_mod -= ref->ref_mod; break; case BTRFS_DROP_DELAYED_REF: locked_ref->ref_mod += ref->ref_mod; break; default: WARN_ON(1); } atomic_dec(&delayed_refs->num_entries); /* * Record the must_insert_reserved flag before we drop the * spin lock. */ must_insert_reserved = locked_ref->must_insert_reserved; locked_ref->must_insert_reserved = false; extent_op = locked_ref->extent_op; locked_ref->extent_op = NULL; spin_unlock(&locked_ref->lock); ret = run_one_delayed_ref(trans, ref, extent_op, must_insert_reserved); btrfs_free_delayed_extent_op(extent_op); if (ret) { unselect_delayed_ref_head(delayed_refs, locked_ref); btrfs_put_delayed_ref(ref); return ret; } btrfs_put_delayed_ref(ref); cond_resched(); spin_lock(&locked_ref->lock); btrfs_merge_delayed_refs(fs_info, delayed_refs, locked_ref); } return 0; } /* * Returns 0 on success or if called with an already aborted transaction. * Returns -ENOMEM or -EIO on failure and will abort the transaction. */ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, unsigned long nr) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_head *locked_ref = NULL; int ret; unsigned long count = 0; delayed_refs = &trans->transaction->delayed_refs; do { if (!locked_ref) { locked_ref = btrfs_obtain_ref_head(trans); if (IS_ERR_OR_NULL(locked_ref)) { if (PTR_ERR(locked_ref) == -EAGAIN) { continue; } else { break; } } count++; } /* * We need to try and merge add/drops of the same ref since we * can run into issues with relocate dropping the implicit ref * and then it being added back again before the drop can * finish. If we merged anything we need to re-loop so we can * get a good ref. * Or we can get node references of the same type that weren't * merged when created due to bumps in the tree mod seq, and * we need to merge them to prevent adding an inline extent * backref before dropping it (triggering a BUG_ON at * insert_inline_extent_backref()). */ spin_lock(&locked_ref->lock); btrfs_merge_delayed_refs(fs_info, delayed_refs, locked_ref); ret = btrfs_run_delayed_refs_for_head(trans, locked_ref); if (ret < 0 && ret != -EAGAIN) { /* * Error, btrfs_run_delayed_refs_for_head already * unlocked everything so just bail out */ return ret; } else if (!ret) { /* * Success, perform the usual cleanup of a processed * head */ ret = cleanup_ref_head(trans, locked_ref); if (ret > 0 ) { /* We dropped our lock, we need to loop. */ ret = 0; continue; } else if (ret) { return ret; } } /* * Either success case or btrfs_run_delayed_refs_for_head * returned -EAGAIN, meaning we need to select another head */ locked_ref = NULL; cond_resched(); } while ((nr != -1 && count < nr) || locked_ref); return 0; } #ifdef SCRAMBLE_DELAYED_REFS /* * Normally delayed refs get processed in ascending bytenr order. This * correlates in most cases to the order added. To expose dependencies on this * order, we start to process the tree in the middle instead of the beginning */ static u64 find_middle(struct rb_root *root) { struct rb_node *n = root->rb_node; struct btrfs_delayed_ref_node *entry; int alt = 1; u64 middle; u64 first = 0, last = 0; n = rb_first(root); if (n) { entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); first = entry->bytenr; } n = rb_last(root); if (n) { entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); last = entry->bytenr; } n = root->rb_node; while (n) { entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); WARN_ON(!entry->in_tree); middle = entry->bytenr; if (alt) n = n->rb_left; else n = n->rb_right; alt = 1 - alt; } return middle; } #endif /* * this starts processing the delayed reference count updates and * extent insertions we have queued up so far. count can be * 0, which means to process everything in the tree at the start * of the run (but not newly added entries), or it can be some target * number you'd like to process. * * Returns 0 on success or if called with an aborted transaction * Returns <0 on error and aborts the transaction */ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, unsigned long count) { struct btrfs_fs_info *fs_info = trans->fs_info; struct rb_node *node; struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_head *head; int ret; int run_all = count == (unsigned long)-1; /* We'll clean this up in btrfs_cleanup_transaction */ if (TRANS_ABORTED(trans)) return 0; if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags)) return 0; delayed_refs = &trans->transaction->delayed_refs; if (count == 0) count = delayed_refs->num_heads_ready; again: #ifdef SCRAMBLE_DELAYED_REFS delayed_refs->run_delayed_start = find_middle(&delayed_refs->root); #endif ret = __btrfs_run_delayed_refs(trans, count); if (ret < 0) { btrfs_abort_transaction(trans, ret); return ret; } if (run_all) { btrfs_create_pending_block_groups(trans); spin_lock(&delayed_refs->lock); node = rb_first_cached(&delayed_refs->href_root); if (!node) { spin_unlock(&delayed_refs->lock); goto out; } head = rb_entry(node, struct btrfs_delayed_ref_head, href_node); refcount_inc(&head->refs); spin_unlock(&delayed_refs->lock); /* Mutex was contended, block until it's released and retry. */ mutex_lock(&head->mutex); mutex_unlock(&head->mutex); btrfs_put_delayed_ref_head(head); cond_resched(); goto again; } out: return 0; } int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, struct extent_buffer *eb, u64 flags) { struct btrfs_delayed_extent_op *extent_op; int level = btrfs_header_level(eb); int ret; extent_op = btrfs_alloc_delayed_extent_op(); if (!extent_op) return -ENOMEM; extent_op->flags_to_set = flags; extent_op->update_flags = true; extent_op->update_key = false; extent_op->level = level; ret = btrfs_add_delayed_extent_op(trans, eb->start, eb->len, extent_op); if (ret) btrfs_free_delayed_extent_op(extent_op); return ret; } static noinline int check_delayed_ref(struct btrfs_root *root, struct btrfs_path *path, u64 objectid, u64 offset, u64 bytenr) { struct btrfs_delayed_ref_head *head; struct btrfs_delayed_ref_node *ref; struct btrfs_delayed_data_ref *data_ref; struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_transaction *cur_trans; struct rb_node *node; int ret = 0; spin_lock(&root->fs_info->trans_lock); cur_trans = root->fs_info->running_transaction; if (cur_trans) refcount_inc(&cur_trans->use_count); spin_unlock(&root->fs_info->trans_lock); if (!cur_trans) return 0; delayed_refs = &cur_trans->delayed_refs; spin_lock(&delayed_refs->lock); head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); if (!head) { spin_unlock(&delayed_refs->lock); btrfs_put_transaction(cur_trans); return 0; } if (!mutex_trylock(&head->mutex)) { if (path->nowait) { spin_unlock(&delayed_refs->lock); btrfs_put_transaction(cur_trans); return -EAGAIN; } refcount_inc(&head->refs); spin_unlock(&delayed_refs->lock); btrfs_release_path(path); /* * Mutex was contended, block until it's released and let * caller try again */ mutex_lock(&head->mutex); mutex_unlock(&head->mutex); btrfs_put_delayed_ref_head(head); btrfs_put_transaction(cur_trans); return -EAGAIN; } spin_unlock(&delayed_refs->lock); spin_lock(&head->lock); /* * XXX: We should replace this with a proper search function in the * future. */ for (node = rb_first_cached(&head->ref_tree); node; node = rb_next(node)) { ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node); /* If it's a shared ref we know a cross reference exists */ if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) { ret = 1; break; } data_ref = btrfs_delayed_node_to_data_ref(ref); /* * If our ref doesn't match the one we're currently looking at * then we have a cross reference. */ if (data_ref->root != root->root_key.objectid || data_ref->objectid != objectid || data_ref->offset != offset) { ret = 1; break; } } spin_unlock(&head->lock); mutex_unlock(&head->mutex); btrfs_put_transaction(cur_trans); return ret; } static noinline int check_committed_ref(struct btrfs_root *root, struct btrfs_path *path, u64 objectid, u64 offset, u64 bytenr, bool strict) { struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr); struct extent_buffer *leaf; struct btrfs_extent_data_ref *ref; struct btrfs_extent_inline_ref *iref; struct btrfs_extent_item *ei; struct btrfs_key key; u32 item_size; int type; int ret; key.objectid = bytenr; key.offset = (u64)-1; key.type = BTRFS_EXTENT_ITEM_KEY; ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); if (ret < 0) goto out; BUG_ON(ret == 0); /* Corruption */ ret = -ENOENT; if (path->slots[0] == 0) goto out; path->slots[0]--; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY) goto out; ret = 1; item_size = btrfs_item_size(leaf, path->slots[0]); ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); /* If extent item has more than 1 inline ref then it's shared */ if (item_size != sizeof(*ei) + btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY)) goto out; /* * If extent created before last snapshot => it's shared unless the * snapshot has been deleted. Use the heuristic if strict is false. */ if (!strict && (btrfs_extent_generation(leaf, ei) <= btrfs_root_last_snapshot(&root->root_item))) goto out; iref = (struct btrfs_extent_inline_ref *)(ei + 1); /* If this extent has SHARED_DATA_REF then it's shared */ type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA); if (type != BTRFS_EXTENT_DATA_REF_KEY) goto out; ref = (struct btrfs_extent_data_ref *)(&iref->offset); if (btrfs_extent_refs(leaf, ei) != btrfs_extent_data_ref_count(leaf, ref) || btrfs_extent_data_ref_root(leaf, ref) != root->root_key.objectid || btrfs_extent_data_ref_objectid(leaf, ref) != objectid || btrfs_extent_data_ref_offset(leaf, ref) != offset) goto out; ret = 0; out: return ret; } int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset, u64 bytenr, bool strict, struct btrfs_path *path) { int ret; do { ret = check_committed_ref(root, path, objectid, offset, bytenr, strict); if (ret && ret != -ENOENT) goto out; ret = check_delayed_ref(root, path, objectid, offset, bytenr); } while (ret == -EAGAIN); out: btrfs_release_path(path); if (btrfs_is_data_reloc_root(root)) WARN_ON(ret > 0); return ret; } static int __btrfs_mod_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, int full_backref, int inc) { struct btrfs_fs_info *fs_info = root->fs_info; u64 bytenr; u64 num_bytes; u64 parent; u64 ref_root; u32 nritems; struct btrfs_key key; struct btrfs_file_extent_item *fi; struct btrfs_ref generic_ref = { 0 }; bool for_reloc = btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC); int i; int action; int level; int ret = 0; if (btrfs_is_testing(fs_info)) return 0; ref_root = btrfs_header_owner(buf); nritems = btrfs_header_nritems(buf); level = btrfs_header_level(buf); if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && level == 0) return 0; if (full_backref) parent = buf->start; else parent = 0; if (inc) action = BTRFS_ADD_DELAYED_REF; else action = BTRFS_DROP_DELAYED_REF; for (i = 0; i < nritems; i++) { if (level == 0) { btrfs_item_key_to_cpu(buf, &key, i); if (key.type != BTRFS_EXTENT_DATA_KEY) continue; fi = btrfs_item_ptr(buf, i, struct btrfs_file_extent_item); if (btrfs_file_extent_type(buf, fi) == BTRFS_FILE_EXTENT_INLINE) continue; bytenr = btrfs_file_extent_disk_bytenr(buf, fi); if (bytenr == 0) continue; num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi); key.offset -= btrfs_file_extent_offset(buf, fi); btrfs_init_generic_ref(&generic_ref, action, bytenr, num_bytes, parent); btrfs_init_data_ref(&generic_ref, ref_root, key.objectid, key.offset, root->root_key.objectid, for_reloc); if (inc) ret = btrfs_inc_extent_ref(trans, &generic_ref); else ret = btrfs_free_extent(trans, &generic_ref); if (ret) goto fail; } else { bytenr = btrfs_node_blockptr(buf, i); num_bytes = fs_info->nodesize; btrfs_init_generic_ref(&generic_ref, action, bytenr, num_bytes, parent); btrfs_init_tree_ref(&generic_ref, level - 1, ref_root, root->root_key.objectid, for_reloc); if (inc) ret = btrfs_inc_extent_ref(trans, &generic_ref); else ret = btrfs_free_extent(trans, &generic_ref); if (ret) goto fail; } } return 0; fail: return ret; } int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, int full_backref) { return __btrfs_mod_ref(trans, root, buf, full_backref, 1); } int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, int full_backref) { return __btrfs_mod_ref(trans, root, buf, full_backref, 0); } static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data) { struct btrfs_fs_info *fs_info = root->fs_info; u64 flags; u64 ret; if (data) flags = BTRFS_BLOCK_GROUP_DATA; else if (root == fs_info->chunk_root) flags = BTRFS_BLOCK_GROUP_SYSTEM; else flags = BTRFS_BLOCK_GROUP_METADATA; ret = btrfs_get_alloc_profile(fs_info, flags); return ret; } static u64 first_logical_byte(struct btrfs_fs_info *fs_info) { struct rb_node *leftmost; u64 bytenr = 0; read_lock(&fs_info->block_group_cache_lock); /* Get the block group with the lowest logical start address. */ leftmost = rb_first_cached(&fs_info->block_group_cache_tree); if (leftmost) { struct btrfs_block_group *bg; bg = rb_entry(leftmost, struct btrfs_block_group, cache_node); bytenr = bg->start; } read_unlock(&fs_info->block_group_cache_lock); return bytenr; } static int pin_down_extent(struct btrfs_trans_handle *trans, struct btrfs_block_group *cache, u64 bytenr, u64 num_bytes, int reserved) { struct btrfs_fs_info *fs_info = cache->fs_info; spin_lock(&cache->space_info->lock); spin_lock(&cache->lock); cache->pinned += num_bytes; btrfs_space_info_update_bytes_pinned(fs_info, cache->space_info, num_bytes); if (reserved) { cache->reserved -= num_bytes; cache->space_info->bytes_reserved -= num_bytes; } spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); set_extent_bit(&trans->transaction->pinned_extents, bytenr, bytenr + num_bytes - 1, EXTENT_DIRTY, NULL); return 0; } int btrfs_pin_extent(struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes, int reserved) { struct btrfs_block_group *cache; cache = btrfs_lookup_block_group(trans->fs_info, bytenr); BUG_ON(!cache); /* Logic error */ pin_down_extent(trans, cache, bytenr, num_bytes, reserved); btrfs_put_block_group(cache); return 0; } /* * this function must be called within transaction */ int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes) { struct btrfs_block_group *cache; int ret; cache = btrfs_lookup_block_group(trans->fs_info, bytenr); if (!cache) return -EINVAL; /* * Fully cache the free space first so that our pin removes the free space * from the cache. */ ret = btrfs_cache_block_group(cache, true); if (ret) goto out; pin_down_extent(trans, cache, bytenr, num_bytes, 0); /* remove us from the free space cache (if we're there at all) */ ret = btrfs_remove_free_space(cache, bytenr, num_bytes); out: btrfs_put_block_group(cache); return ret; } static int __exclude_logged_extent(struct btrfs_fs_info *fs_info, u64 start, u64 num_bytes) { int ret; struct btrfs_block_group *block_group; block_group = btrfs_lookup_block_group(fs_info, start); if (!block_group) return -EINVAL; ret = btrfs_cache_block_group(block_group, true); if (ret) goto out; ret = btrfs_remove_free_space(block_group, start, num_bytes); out: btrfs_put_block_group(block_group); return ret; } int btrfs_exclude_logged_extents(struct extent_buffer *eb) { struct btrfs_fs_info *fs_info = eb->fs_info; struct btrfs_file_extent_item *item; struct btrfs_key key; int found_type; int i; int ret = 0; if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) return 0; for (i = 0; i < btrfs_header_nritems(eb); i++) { btrfs_item_key_to_cpu(eb, &key, i); if (key.type != BTRFS_EXTENT_DATA_KEY) continue; item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); found_type = btrfs_file_extent_type(eb, item); if (found_type == BTRFS_FILE_EXTENT_INLINE) continue; if (btrfs_file_extent_disk_bytenr(eb, item) == 0) continue; key.objectid = btrfs_file_extent_disk_bytenr(eb, item); key.offset = btrfs_file_extent_disk_num_bytes(eb, item); ret = __exclude_logged_extent(fs_info, key.objectid, key.offset); if (ret) break; } return ret; } static void btrfs_inc_block_group_reservations(struct btrfs_block_group *bg) { atomic_inc(&bg->reservations); } /* * Returns the free cluster for the given space info and sets empty_cluster to * what it should be based on the mount options. */ static struct btrfs_free_cluster * fetch_cluster_info(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info, u64 *empty_cluster) { struct btrfs_free_cluster *ret = NULL; *empty_cluster = 0; if (btrfs_mixed_space_info(space_info)) return ret; if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { ret = &fs_info->meta_alloc_cluster; if (btrfs_test_opt(fs_info, SSD)) *empty_cluster = SZ_2M; else *empty_cluster = SZ_64K; } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(fs_info, SSD_SPREAD)) { *empty_cluster = SZ_2M; ret = &fs_info->data_alloc_cluster; } return ret; } static int unpin_extent_range(struct btrfs_fs_info *fs_info, u64 start, u64 end, const bool return_free_space) { struct btrfs_block_group *cache = NULL; struct btrfs_space_info *space_info; struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; struct btrfs_free_cluster *cluster = NULL; u64 len; u64 total_unpinned = 0; u64 empty_cluster = 0; bool readonly; while (start <= end) { readonly = false; if (!cache || start >= cache->start + cache->length) { if (cache) btrfs_put_block_group(cache); total_unpinned = 0; cache = btrfs_lookup_block_group(fs_info, start); BUG_ON(!cache); /* Logic error */ cluster = fetch_cluster_info(fs_info, cache->space_info, &empty_cluster); empty_cluster <<= 1; } len = cache->start + cache->length - start; len = min(len, end + 1 - start); if (return_free_space) btrfs_add_free_space(cache, start, len); start += len; total_unpinned += len; space_info = cache->space_info; /* * If this space cluster has been marked as fragmented and we've * unpinned enough in this block group to potentially allow a * cluster to be created inside of it go ahead and clear the * fragmented check. */ if (cluster && cluster->fragmented && total_unpinned > empty_cluster) { spin_lock(&cluster->lock); cluster->fragmented = 0; spin_unlock(&cluster->lock); } spin_lock(&space_info->lock); spin_lock(&cache->lock); cache->pinned -= len; btrfs_space_info_update_bytes_pinned(fs_info, space_info, -len); space_info->max_extent_size = 0; if (cache->ro) { space_info->bytes_readonly += len; readonly = true; } else if (btrfs_is_zoned(fs_info)) { /* Need reset before reusing in a zoned block group */ space_info->bytes_zone_unusable += len; readonly = true; } spin_unlock(&cache->lock); if (!readonly && return_free_space && global_rsv->space_info == space_info) { spin_lock(&global_rsv->lock); if (!global_rsv->full) { u64 to_add = min(len, global_rsv->size - global_rsv->reserved); global_rsv->reserved += to_add; btrfs_space_info_update_bytes_may_use(fs_info, space_info, to_add); if (global_rsv->reserved >= global_rsv->size) global_rsv->full = 1; len -= to_add; } spin_unlock(&global_rsv->lock); } /* Add to any tickets we may have */ if (!readonly && return_free_space && len) btrfs_try_granting_tickets(fs_info, space_info); spin_unlock(&space_info->lock); } if (cache) btrfs_put_block_group(cache); return 0; } int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_block_group *block_group, *tmp; struct list_head *deleted_bgs; struct extent_io_tree *unpin; u64 start; u64 end; int ret; unpin = &trans->transaction->pinned_extents; while (!TRANS_ABORTED(trans)) { struct extent_state *cached_state = NULL; mutex_lock(&fs_info->unused_bg_unpin_mutex); if (!find_first_extent_bit(unpin, 0, &start, &end, EXTENT_DIRTY, &cached_state)) { mutex_unlock(&fs_info->unused_bg_unpin_mutex); break; } if (btrfs_test_opt(fs_info, DISCARD_SYNC)) ret = btrfs_discard_extent(fs_info, start, end + 1 - start, NULL); clear_extent_dirty(unpin, start, end, &cached_state); unpin_extent_range(fs_info, start, end, true); mutex_unlock(&fs_info->unused_bg_unpin_mutex); free_extent_state(cached_state); cond_resched(); } if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) { btrfs_discard_calc_delay(&fs_info->discard_ctl); btrfs_discard_schedule_work(&fs_info->discard_ctl, true); } /* * Transaction is finished. We don't need the lock anymore. We * do need to clean up the block groups in case of a transaction * abort. */ deleted_bgs = &trans->transaction->deleted_bgs; list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) { u64 trimmed = 0; ret = -EROFS; if (!TRANS_ABORTED(trans)) ret = btrfs_discard_extent(fs_info, block_group->start, block_group->length, &trimmed); list_del_init(&block_group->bg_list); btrfs_unfreeze_block_group(block_group); btrfs_put_block_group(block_group); if (ret) { const char *errstr = btrfs_decode_error(ret); btrfs_warn(fs_info, "discard failed while removing blockgroup: errno=%d %s", ret, errstr); } } return 0; } static int do_free_extent_accounting(struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes, bool is_data) { int ret; if (is_data) { struct btrfs_root *csum_root; csum_root = btrfs_csum_root(trans->fs_info, bytenr); ret = btrfs_del_csums(trans, csum_root, bytenr, num_bytes); if (ret) { btrfs_abort_transaction(trans, ret); return ret; } } ret = add_to_free_space_tree(trans, bytenr, num_bytes); if (ret) { btrfs_abort_transaction(trans, ret); return ret; } ret = btrfs_update_block_group(trans, bytenr, num_bytes, false); if (ret) btrfs_abort_transaction(trans, ret); return ret; } #define abort_and_dump(trans, path, fmt, args...) \ ({ \ btrfs_abort_transaction(trans, -EUCLEAN); \ btrfs_print_leaf(path->nodes[0]); \ btrfs_crit(trans->fs_info, fmt, ##args); \ }) /* * Drop one or more refs of @node. * * 1. Locate the extent refs. * It's either inline in EXTENT/METADATA_ITEM or in keyed SHARED_* item. * Locate it, then reduce the refs number or remove the ref line completely. * * 2. Update the refs count in EXTENT/METADATA_ITEM * * Inline backref case: * * in extent tree we have: * * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 16201 itemsize 82 * refs 2 gen 6 flags DATA * extent data backref root FS_TREE objectid 258 offset 0 count 1 * extent data backref root FS_TREE objectid 257 offset 0 count 1 * * This function gets called with: * * node->bytenr = 13631488 * node->num_bytes = 1048576 * root_objectid = FS_TREE * owner_objectid = 257 * owner_offset = 0 * refs_to_drop = 1 * * Then we should get some like: * * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 16201 itemsize 82 * refs 1 gen 6 flags DATA * extent data backref root FS_TREE objectid 258 offset 0 count 1 * * Keyed backref case: * * in extent tree we have: * * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 3971 itemsize 24 * refs 754 gen 6 flags DATA * [...] * item 2 key (13631488 EXTENT_DATA_REF <HASH>) itemoff 3915 itemsize 28 * extent data backref root FS_TREE objectid 866 offset 0 count 1 * * This function get called with: * * node->bytenr = 13631488 * node->num_bytes = 1048576 * root_objectid = FS_TREE * owner_objectid = 866 * owner_offset = 0 * refs_to_drop = 1 * * Then we should get some like: * * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 3971 itemsize 24 * refs 753 gen 6 flags DATA * * And that (13631488 EXTENT_DATA_REF <HASH>) gets removed. */ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_node *node, u64 parent, u64 root_objectid, u64 owner_objectid, u64 owner_offset, int refs_to_drop, struct btrfs_delayed_extent_op *extent_op) { struct btrfs_fs_info *info = trans->fs_info; struct btrfs_key key; struct btrfs_path *path; struct btrfs_root *extent_root; struct extent_buffer *leaf; struct btrfs_extent_item *ei; struct btrfs_extent_inline_ref *iref; int ret; int is_data; int extent_slot = 0; int found_extent = 0; int num_to_del = 1; u32 item_size; u64 refs; u64 bytenr = node->bytenr; u64 num_bytes = node->num_bytes; bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA); extent_root = btrfs_extent_root(info, bytenr); ASSERT(extent_root); path = btrfs_alloc_path(); if (!path) return -ENOMEM; is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID; if (!is_data && refs_to_drop != 1) { btrfs_crit(info, "invalid refs_to_drop, dropping more than 1 refs for tree block %llu refs_to_drop %u", node->bytenr, refs_to_drop); ret = -EINVAL; btrfs_abort_transaction(trans, ret); goto out; } if (is_data) skinny_metadata = false; ret = lookup_extent_backref(trans, path, &iref, bytenr, num_bytes, parent, root_objectid, owner_objectid, owner_offset); if (ret == 0) { /* * Either the inline backref or the SHARED_DATA_REF/ * SHARED_BLOCK_REF is found * * Here is a quick path to locate EXTENT/METADATA_ITEM. * It's possible the EXTENT/METADATA_ITEM is near current slot. */ extent_slot = path->slots[0]; while (extent_slot >= 0) { btrfs_item_key_to_cpu(path->nodes[0], &key, extent_slot); if (key.objectid != bytenr) break; if (key.type == BTRFS_EXTENT_ITEM_KEY && key.offset == num_bytes) { found_extent = 1; break; } if (key.type == BTRFS_METADATA_ITEM_KEY && key.offset == owner_objectid) { found_extent = 1; break; } /* Quick path didn't find the EXTEMT/METADATA_ITEM */ if (path->slots[0] - extent_slot > 5) break; extent_slot--; } if (!found_extent) { if (iref) { abort_and_dump(trans, path, "invalid iref slot %u, no EXTENT/METADATA_ITEM found but has inline extent ref", path->slots[0]); ret = -EUCLEAN; goto out; } /* Must be SHARED_* item, remove the backref first */ ret = remove_extent_backref(trans, extent_root, path, NULL, refs_to_drop, is_data); if (ret) { btrfs_abort_transaction(trans, ret); goto out; } btrfs_release_path(path); /* Slow path to locate EXTENT/METADATA_ITEM */ key.objectid = bytenr; key.type = BTRFS_EXTENT_ITEM_KEY; key.offset = num_bytes; if (!is_data && skinny_metadata) { key.type = BTRFS_METADATA_ITEM_KEY; key.offset = owner_objectid; } ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1); if (ret > 0 && skinny_metadata && path->slots[0]) { /* * Couldn't find our skinny metadata item, * see if we have ye olde extent item. */ path->slots[0]--; btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.objectid == bytenr && key.type == BTRFS_EXTENT_ITEM_KEY && key.offset == num_bytes) ret = 0; } if (ret > 0 && skinny_metadata) { skinny_metadata = false; key.objectid = bytenr; key.type = BTRFS_EXTENT_ITEM_KEY; key.offset = num_bytes; btrfs_release_path(path); ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1); } if (ret) { if (ret > 0) btrfs_print_leaf(path->nodes[0]); btrfs_err(info, "umm, got %d back from search, was looking for %llu, slot %d", ret, bytenr, path->slots[0]); } if (ret < 0) { btrfs_abort_transaction(trans, ret); goto out; } extent_slot = path->slots[0]; } } else if (WARN_ON(ret == -ENOENT)) { abort_and_dump(trans, path, "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu slot %d", bytenr, parent, root_objectid, owner_objectid, owner_offset, path->slots[0]); goto out; } else { btrfs_abort_transaction(trans, ret); goto out; } leaf = path->nodes[0]; item_size = btrfs_item_size(leaf, extent_slot); if (unlikely(item_size < sizeof(*ei))) { ret = -EUCLEAN; btrfs_err(trans->fs_info, "unexpected extent item size, has %u expect >= %zu", item_size, sizeof(*ei)); btrfs_abort_transaction(trans, ret); goto out; } ei = btrfs_item_ptr(leaf, extent_slot, struct btrfs_extent_item); if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID && key.type == BTRFS_EXTENT_ITEM_KEY) { struct btrfs_tree_block_info *bi; if (item_size < sizeof(*ei) + sizeof(*bi)) { abort_and_dump(trans, path, "invalid extent item size for key (%llu, %u, %llu) slot %u owner %llu, has %u expect >= %zu", key.objectid, key.type, key.offset, path->slots[0], owner_objectid, item_size, sizeof(*ei) + sizeof(*bi)); ret = -EUCLEAN; goto out; } bi = (struct btrfs_tree_block_info *)(ei + 1); WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi)); } refs = btrfs_extent_refs(leaf, ei); if (refs < refs_to_drop) { abort_and_dump(trans, path, "trying to drop %d refs but we only have %llu for bytenr %llu slot %u", refs_to_drop, refs, bytenr, path->slots[0]); ret = -EUCLEAN; goto out; } refs -= refs_to_drop; if (refs > 0) { if (extent_op) __run_delayed_extent_op(extent_op, leaf, ei); /* * In the case of inline back ref, reference count will * be updated by remove_extent_backref */ if (iref) { if (!found_extent) { abort_and_dump(trans, path, "invalid iref, got inlined extent ref but no EXTENT/METADATA_ITEM found, slot %u", path->slots[0]); ret = -EUCLEAN; goto out; } } else { btrfs_set_extent_refs(leaf, ei, refs); btrfs_mark_buffer_dirty(leaf); } if (found_extent) { ret = remove_extent_backref(trans, extent_root, path, iref, refs_to_drop, is_data); if (ret) { btrfs_abort_transaction(trans, ret); goto out; } } } else { /* In this branch refs == 1 */ if (found_extent) { if (is_data && refs_to_drop != extent_data_ref_count(path, iref)) { abort_and_dump(trans, path, "invalid refs_to_drop, current refs %u refs_to_drop %u slot %u", extent_data_ref_count(path, iref), refs_to_drop, path->slots[0]); ret = -EUCLEAN; goto out; } if (iref) { if (path->slots[0] != extent_slot) { abort_and_dump(trans, path, "invalid iref, extent item key (%llu %u %llu) slot %u doesn't have wanted iref", key.objectid, key.type, key.offset, path->slots[0]); ret = -EUCLEAN; goto out; } } else { /* * No inline ref, we must be at SHARED_* item, * And it's single ref, it must be: * | extent_slot ||extent_slot + 1| * [ EXTENT/METADATA_ITEM ][ SHARED_* ITEM ] */ if (path->slots[0] != extent_slot + 1) { abort_and_dump(trans, path, "invalid SHARED_* item slot %u, previous item is not EXTENT/METADATA_ITEM", path->slots[0]); ret = -EUCLEAN; goto out; } path->slots[0] = extent_slot; num_to_del = 2; } } ret = btrfs_del_items(trans, extent_root, path, path->slots[0], num_to_del); if (ret) { btrfs_abort_transaction(trans, ret); goto out; } btrfs_release_path(path); ret = do_free_extent_accounting(trans, bytenr, num_bytes, is_data); } btrfs_release_path(path); out: btrfs_free_path(path); return ret; } /* * when we free an block, it is possible (and likely) that we free the last * delayed ref for that extent as well. This searches the delayed ref tree for * a given extent, and if there are no other delayed refs to be processed, it * removes it from the tree. */ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, u64 bytenr) { struct btrfs_delayed_ref_head *head; struct btrfs_delayed_ref_root *delayed_refs; int ret = 0; delayed_refs = &trans->transaction->delayed_refs; spin_lock(&delayed_refs->lock); head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); if (!head) goto out_delayed_unlock; spin_lock(&head->lock); if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root)) goto out; if (cleanup_extent_op(head) != NULL) goto out; /* * waiting for the lock here would deadlock. If someone else has it * locked they are already in the process of dropping it anyway */ if (!mutex_trylock(&head->mutex)) goto out; btrfs_delete_ref_head(delayed_refs, head); head->processing = false; spin_unlock(&head->lock); spin_unlock(&delayed_refs->lock); BUG_ON(head->extent_op); if (head->must_insert_reserved) ret = 1; btrfs_cleanup_ref_head_accounting(trans->fs_info, delayed_refs, head); mutex_unlock(&head->mutex); btrfs_put_delayed_ref_head(head); return ret; out: spin_unlock(&head->lock); out_delayed_unlock: spin_unlock(&delayed_refs->lock); return 0; } void btrfs_free_tree_block(struct btrfs_trans_handle *trans, u64 root_id, struct extent_buffer *buf, u64 parent, int last_ref) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_ref generic_ref = { 0 }; int ret; btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF, buf->start, buf->len, parent); btrfs_init_tree_ref(&generic_ref, btrfs_header_level(buf), root_id, 0, false); if (root_id != BTRFS_TREE_LOG_OBJECTID) { btrfs_ref_tree_mod(fs_info, &generic_ref); ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL); BUG_ON(ret); /* -ENOMEM */ } if (last_ref && btrfs_header_generation(buf) == trans->transid) { struct btrfs_block_group *cache; bool must_pin = false; if (root_id != BTRFS_TREE_LOG_OBJECTID) { ret = check_ref_cleanup(trans, buf->start); if (!ret) { btrfs_redirty_list_add(trans->transaction, buf); goto out; } } cache = btrfs_lookup_block_group(fs_info, buf->start); if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { pin_down_extent(trans, cache, buf->start, buf->len, 1); btrfs_put_block_group(cache); goto out; } /* * If there are tree mod log users we may have recorded mod log * operations for this node. If we re-allocate this node we * could replay operations on this node that happened when it * existed in a completely different root. For example if it * was part of root A, then was reallocated to root B, and we * are doing a btrfs_old_search_slot(root b), we could replay * operations that happened when the block was part of root A, * giving us an inconsistent view of the btree. * * We are safe from races here because at this point no other * node or root points to this extent buffer, so if after this * check a new tree mod log user joins we will not have an * existing log of operations on this node that we have to * contend with. */ if (test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags)) must_pin = true; if (must_pin || btrfs_is_zoned(fs_info)) { btrfs_redirty_list_add(trans->transaction, buf); pin_down_extent(trans, cache, buf->start, buf->len, 1); btrfs_put_block_group(cache); goto out; } WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); btrfs_add_free_space(cache, buf->start, buf->len); btrfs_free_reserved_bytes(cache, buf->len, 0); btrfs_put_block_group(cache); trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len); } out: if (last_ref) { /* * Deleting the buffer, clear the corrupt flag since it doesn't * matter anymore. */ clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags); } } /* Can return -ENOMEM */ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref) { struct btrfs_fs_info *fs_info = trans->fs_info; int ret; if (btrfs_is_testing(fs_info)) return 0; /* * tree log blocks never actually go into the extent allocation * tree, just update pinning info and exit early. */ if ((ref->type == BTRFS_REF_METADATA && ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID) || (ref->type == BTRFS_REF_DATA && ref->data_ref.owning_root == BTRFS_TREE_LOG_OBJECTID)) { /* unlocks the pinned mutex */ btrfs_pin_extent(trans, ref->bytenr, ref->len, 1); ret = 0; } else if (ref->type == BTRFS_REF_METADATA) { ret = btrfs_add_delayed_tree_ref(trans, ref, NULL); } else { ret = btrfs_add_delayed_data_ref(trans, ref, 0); } if (!((ref->type == BTRFS_REF_METADATA && ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID) || (ref->type == BTRFS_REF_DATA && ref->data_ref.owning_root == BTRFS_TREE_LOG_OBJECTID))) btrfs_ref_tree_mod(fs_info, ref); return ret; } enum btrfs_loop_type { /* * Start caching block groups but do not wait for progress or for them * to be done. */ LOOP_CACHING_NOWAIT, /* * Wait for the block group free_space >= the space we're waiting for if * the block group isn't cached. */ LOOP_CACHING_WAIT, /* * Allow allocations to happen from block groups that do not yet have a * size classification. */ LOOP_UNSET_SIZE_CLASS, /* * Allocate a chunk and then retry the allocation. */ LOOP_ALLOC_CHUNK, /* * Ignore the size class restrictions for this allocation. */ LOOP_WRONG_SIZE_CLASS, /* * Ignore the empty size, only try to allocate the number of bytes * needed for this allocation. */ LOOP_NO_EMPTY_SIZE, }; static inline void btrfs_lock_block_group(struct btrfs_block_group *cache, int delalloc) { if (delalloc) down_read(&cache->data_rwsem); } static inline void btrfs_grab_block_group(struct btrfs_block_group *cache, int delalloc) { btrfs_get_block_group(cache); if (delalloc) down_read(&cache->data_rwsem); } static struct btrfs_block_group *btrfs_lock_cluster( struct btrfs_block_group *block_group, struct btrfs_free_cluster *cluster, int delalloc) __acquires(&cluster->refill_lock) { struct btrfs_block_group *used_bg = NULL; spin_lock(&cluster->refill_lock); while (1) { used_bg = cluster->block_group; if (!used_bg) return NULL; if (used_bg == block_group) return used_bg; btrfs_get_block_group(used_bg); if (!delalloc) return used_bg; if (down_read_trylock(&used_bg->data_rwsem)) return used_bg; spin_unlock(&cluster->refill_lock); /* We should only have one-level nested. */ down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING); spin_lock(&cluster->refill_lock); if (used_bg == cluster->block_group) return used_bg; up_read(&used_bg->data_rwsem); btrfs_put_block_group(used_bg); } } static inline void btrfs_release_block_group(struct btrfs_block_group *cache, int delalloc) { if (delalloc) up_read(&cache->data_rwsem); btrfs_put_block_group(cache); } /* * Helper function for find_free_extent(). * * Return -ENOENT to inform caller that we need fallback to unclustered mode. * Return >0 to inform caller that we find nothing * Return 0 means we have found a location and set ffe_ctl->found_offset. */ static int find_free_extent_clustered(struct btrfs_block_group *bg, struct find_free_extent_ctl *ffe_ctl, struct btrfs_block_group **cluster_bg_ret) { struct btrfs_block_group *cluster_bg; struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; u64 aligned_cluster; u64 offset; int ret; cluster_bg = btrfs_lock_cluster(bg, last_ptr, ffe_ctl->delalloc); if (!cluster_bg) goto refill_cluster; if (cluster_bg != bg && (cluster_bg->ro || !block_group_bits(cluster_bg, ffe_ctl->flags))) goto release_cluster; offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr, ffe_ctl->num_bytes, cluster_bg->start, &ffe_ctl->max_extent_size); if (offset) { /* We have a block, we're done */ spin_unlock(&last_ptr->refill_lock); trace_btrfs_reserve_extent_cluster(cluster_bg, ffe_ctl); *cluster_bg_ret = cluster_bg; ffe_ctl->found_offset = offset; return 0; } WARN_ON(last_ptr->block_group != cluster_bg); release_cluster: /* * If we are on LOOP_NO_EMPTY_SIZE, we can't set up a new clusters, so * lets just skip it and let the allocator find whatever block it can * find. If we reach this point, we will have tried the cluster * allocator plenty of times and not have found anything, so we are * likely way too fragmented for the clustering stuff to find anything. * * However, if the cluster is taken from the current block group, * release the cluster first, so that we stand a better chance of * succeeding in the unclustered allocation. */ if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE && cluster_bg != bg) { spin_unlock(&last_ptr->refill_lock); btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc); return -ENOENT; } /* This cluster didn't work out, free it and start over */ btrfs_return_cluster_to_free_space(NULL, last_ptr); if (cluster_bg != bg) btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc); refill_cluster: if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE) { spin_unlock(&last_ptr->refill_lock); return -ENOENT; } aligned_cluster = max_t(u64, ffe_ctl->empty_cluster + ffe_ctl->empty_size, bg->full_stripe_len); ret = btrfs_find_space_cluster(bg, last_ptr, ffe_ctl->search_start, ffe_ctl->num_bytes, aligned_cluster); if (ret == 0) { /* Now pull our allocation out of this cluster */ offset = btrfs_alloc_from_cluster(bg, last_ptr, ffe_ctl->num_bytes, ffe_ctl->search_start, &ffe_ctl->max_extent_size); if (offset) { /* We found one, proceed */ spin_unlock(&last_ptr->refill_lock); ffe_ctl->found_offset = offset; trace_btrfs_reserve_extent_cluster(bg, ffe_ctl); return 0; } } /* * At this point we either didn't find a cluster or we weren't able to * allocate a block from our cluster. Free the cluster we've been * trying to use, and go to the next block group. */ btrfs_return_cluster_to_free_space(NULL, last_ptr); spin_unlock(&last_ptr->refill_lock); return 1; } /* * Return >0 to inform caller that we find nothing * Return 0 when we found an free extent and set ffe_ctrl->found_offset */ static int find_free_extent_unclustered(struct btrfs_block_group *bg, struct find_free_extent_ctl *ffe_ctl) { struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; u64 offset; /* * We are doing an unclustered allocation, set the fragmented flag so * we don't bother trying to setup a cluster again until we get more * space. */ if (unlikely(last_ptr)) { spin_lock(&last_ptr->lock); last_ptr->fragmented = 1; spin_unlock(&last_ptr->lock); } if (ffe_ctl->cached) { struct btrfs_free_space_ctl *free_space_ctl; free_space_ctl = bg->free_space_ctl; spin_lock(&free_space_ctl->tree_lock); if (free_space_ctl->free_space < ffe_ctl->num_bytes + ffe_ctl->empty_cluster + ffe_ctl->empty_size) { ffe_ctl->total_free_space = max_t(u64, ffe_ctl->total_free_space, free_space_ctl->free_space); spin_unlock(&free_space_ctl->tree_lock); return 1; } spin_unlock(&free_space_ctl->tree_lock); } offset = btrfs_find_space_for_alloc(bg, ffe_ctl->search_start, ffe_ctl->num_bytes, ffe_ctl->empty_size, &ffe_ctl->max_extent_size); if (!offset) return 1; ffe_ctl->found_offset = offset; return 0; } static int do_allocation_clustered(struct btrfs_block_group *block_group, struct find_free_extent_ctl *ffe_ctl, struct btrfs_block_group **bg_ret) { int ret; /* We want to try and use the cluster allocator, so lets look there */ if (ffe_ctl->last_ptr && ffe_ctl->use_cluster) { ret = find_free_extent_clustered(block_group, ffe_ctl, bg_ret); if (ret >= 0) return ret; /* ret == -ENOENT case falls through */ } return find_free_extent_unclustered(block_group, ffe_ctl); } /* * Tree-log block group locking * ============================ * * fs_info::treelog_bg_lock protects the fs_info::treelog_bg which * indicates the starting address of a block group, which is reserved only * for tree-log metadata. * * Lock nesting * ============ * * space_info::lock * block_group::lock * fs_info::treelog_bg_lock */ /* * Simple allocator for sequential-only block group. It only allows sequential * allocation. No need to play with trees. This function also reserves the * bytes as in btrfs_add_reserved_bytes. */ static int do_allocation_zoned(struct btrfs_block_group *block_group, struct find_free_extent_ctl *ffe_ctl, struct btrfs_block_group **bg_ret) { struct btrfs_fs_info *fs_info = block_group->fs_info; struct btrfs_space_info *space_info = block_group->space_info; struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; u64 start = block_group->start; u64 num_bytes = ffe_ctl->num_bytes; u64 avail; u64 bytenr = block_group->start; u64 log_bytenr; u64 data_reloc_bytenr; int ret = 0; bool skip = false; ASSERT(btrfs_is_zoned(block_group->fs_info)); /* * Do not allow non-tree-log blocks in the dedicated tree-log block * group, and vice versa. */ spin_lock(&fs_info->treelog_bg_lock); log_bytenr = fs_info->treelog_bg; if (log_bytenr && ((ffe_ctl->for_treelog && bytenr != log_bytenr) || (!ffe_ctl->for_treelog && bytenr == log_bytenr))) skip = true; spin_unlock(&fs_info->treelog_bg_lock); if (skip) return 1; /* * Do not allow non-relocation blocks in the dedicated relocation block * group, and vice versa. */ spin_lock(&fs_info->relocation_bg_lock); data_reloc_bytenr = fs_info->data_reloc_bg; if (data_reloc_bytenr && ((ffe_ctl->for_data_reloc && bytenr != data_reloc_bytenr) || (!ffe_ctl->for_data_reloc && bytenr == data_reloc_bytenr))) skip = true; spin_unlock(&fs_info->relocation_bg_lock); if (skip) return 1; /* Check RO and no space case before trying to activate it */ spin_lock(&block_group->lock); if (block_group->ro || btrfs_zoned_bg_is_full(block_group)) { ret = 1; /* * May need to clear fs_info->{treelog,data_reloc}_bg. * Return the error after taking the locks. */ } spin_unlock(&block_group->lock); /* Metadata block group is activated at write time. */ if (!ret && (block_group->flags & BTRFS_BLOCK_GROUP_DATA) && !btrfs_zone_activate(block_group)) { ret = 1; /* * May need to clear fs_info->{treelog,data_reloc}_bg. * Return the error after taking the locks. */ } spin_lock(&space_info->lock); spin_lock(&block_group->lock); spin_lock(&fs_info->treelog_bg_lock); spin_lock(&fs_info->relocation_bg_lock); if (ret) goto out; ASSERT(!ffe_ctl->for_treelog || block_group->start == fs_info->treelog_bg || fs_info->treelog_bg == 0); ASSERT(!ffe_ctl->for_data_reloc || block_group->start == fs_info->data_reloc_bg || fs_info->data_reloc_bg == 0); if (block_group->ro || (!ffe_ctl->for_data_reloc && test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))) { ret = 1; goto out; } /* * Do not allow currently using block group to be tree-log dedicated * block group. */ if (ffe_ctl->for_treelog && !fs_info->treelog_bg && (block_group->used || block_group->reserved)) { ret = 1; goto out; } /* * Do not allow currently used block group to be the data relocation * dedicated block group. */ if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg && (block_group->used || block_group->reserved)) { ret = 1; goto out; } WARN_ON_ONCE(block_group->alloc_offset > block_group->zone_capacity); avail = block_group->zone_capacity - block_group->alloc_offset; if (avail < num_bytes) { if (ffe_ctl->max_extent_size < avail) { /* * With sequential allocator, free space is always * contiguous */ ffe_ctl->max_extent_size = avail; ffe_ctl->total_free_space = avail; } ret = 1; goto out; } if (ffe_ctl->for_treelog && !fs_info->treelog_bg) fs_info->treelog_bg = block_group->start; if (ffe_ctl->for_data_reloc) { if (!fs_info->data_reloc_bg) fs_info->data_reloc_bg = block_group->start; /* * Do not allow allocations from this block group, unless it is * for data relocation. Compared to increasing the ->ro, setting * the ->zoned_data_reloc_ongoing flag still allows nocow * writers to come in. See btrfs_inc_nocow_writers(). * * We need to disable an allocation to avoid an allocation of * regular (non-relocation data) extent. With mix of relocation * extents and regular extents, we can dispatch WRITE commands * (for relocation extents) and ZONE APPEND commands (for * regular extents) at the same time to the same zone, which * easily break the write pointer. * * Also, this flag avoids this block group to be zone finished. */ set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags); } ffe_ctl->found_offset = start + block_group->alloc_offset; block_group->alloc_offset += num_bytes; spin_lock(&ctl->tree_lock); ctl->free_space -= num_bytes; spin_unlock(&ctl->tree_lock); /* * We do not check if found_offset is aligned to stripesize. The * address is anyway rewritten when using zone append writing. */ ffe_ctl->search_start = ffe_ctl->found_offset; out: if (ret && ffe_ctl->for_treelog) fs_info->treelog_bg = 0; if (ret && ffe_ctl->for_data_reloc) fs_info->data_reloc_bg = 0; spin_unlock(&fs_info->relocation_bg_lock); spin_unlock(&fs_info->treelog_bg_lock); spin_unlock(&block_group->lock); spin_unlock(&space_info->lock); return ret; } static int do_allocation(struct btrfs_block_group *block_group, struct find_free_extent_ctl *ffe_ctl, struct btrfs_block_group **bg_ret) { switch (ffe_ctl->policy) { case BTRFS_EXTENT_ALLOC_CLUSTERED: return do_allocation_clustered(block_group, ffe_ctl, bg_ret); case BTRFS_EXTENT_ALLOC_ZONED: return do_allocation_zoned(block_group, ffe_ctl, bg_ret); default: BUG(); } } static void release_block_group(struct btrfs_block_group *block_group, struct find_free_extent_ctl *ffe_ctl, int delalloc) { switch (ffe_ctl->policy) { case BTRFS_EXTENT_ALLOC_CLUSTERED: ffe_ctl->retry_uncached = false; break; case BTRFS_EXTENT_ALLOC_ZONED: /* Nothing to do */ break; default: BUG(); } BUG_ON(btrfs_bg_flags_to_raid_index(block_group->flags) != ffe_ctl->index); btrfs_release_block_group(block_group, delalloc); } static void found_extent_clustered(struct find_free_extent_ctl *ffe_ctl, struct btrfs_key *ins) { struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; if (!ffe_ctl->use_cluster && last_ptr) { spin_lock(&last_ptr->lock); last_ptr->window_start = ins->objectid; spin_unlock(&last_ptr->lock); } } static void found_extent(struct find_free_extent_ctl *ffe_ctl, struct btrfs_key *ins) { switch (ffe_ctl->policy) { case BTRFS_EXTENT_ALLOC_CLUSTERED: found_extent_clustered(ffe_ctl, ins); break; case BTRFS_EXTENT_ALLOC_ZONED: /* Nothing to do */ break; default: BUG(); } } static int can_allocate_chunk_zoned(struct btrfs_fs_info *fs_info, struct find_free_extent_ctl *ffe_ctl) { /* Block group's activeness is not a requirement for METADATA block groups. */ if (!(ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA)) return 0; /* If we can activate new zone, just allocate a chunk and use it */ if (btrfs_can_activate_zone(fs_info->fs_devices, ffe_ctl->flags)) return 0; /* * We already reached the max active zones. Try to finish one block * group to make a room for a new block group. This is only possible * for a data block group because btrfs_zone_finish() may need to wait * for a running transaction which can cause a deadlock for metadata * allocation. */ if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) { int ret = btrfs_zone_finish_one_bg(fs_info); if (ret == 1) return 0; else if (ret < 0) return ret; } /* * If we have enough free space left in an already active block group * and we can't activate any other zone now, do not allow allocating a * new chunk and let find_free_extent() retry with a smaller size. */ if (ffe_ctl->max_extent_size >= ffe_ctl->min_alloc_size) return -ENOSPC; /* * Even min_alloc_size is not left in any block groups. Since we cannot * activate a new block group, allocating it may not help. Let's tell a * caller to try again and hope it progress something by writing some * parts of the region. That is only possible for data block groups, * where a part of the region can be written. */ if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) return -EAGAIN; /* * We cannot activate a new block group and no enough space left in any * block groups. So, allocating a new block group may not help. But, * there is nothing to do anyway, so let's go with it. */ return 0; } static int can_allocate_chunk(struct btrfs_fs_info *fs_info, struct find_free_extent_ctl *ffe_ctl) { switch (ffe_ctl->policy) { case BTRFS_EXTENT_ALLOC_CLUSTERED: return 0; case BTRFS_EXTENT_ALLOC_ZONED: return can_allocate_chunk_zoned(fs_info, ffe_ctl); default: BUG(); } } /* * Return >0 means caller needs to re-search for free extent * Return 0 means we have the needed free extent. * Return <0 means we failed to locate any free extent. */ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info, struct btrfs_key *ins, struct find_free_extent_ctl *ffe_ctl, bool full_search) { struct btrfs_root *root = fs_info->chunk_root; int ret; if ((ffe_ctl->loop == LOOP_CACHING_NOWAIT) && ffe_ctl->have_caching_bg && !ffe_ctl->orig_have_caching_bg) ffe_ctl->orig_have_caching_bg = true; if (ins->objectid) { found_extent(ffe_ctl, ins); return 0; } if (ffe_ctl->loop >= LOOP_CACHING_WAIT && ffe_ctl->have_caching_bg) return 1; ffe_ctl->index++; if (ffe_ctl->index < BTRFS_NR_RAID_TYPES) return 1; /* See the comments for btrfs_loop_type for an explanation of the phases. */ if (ffe_ctl->loop < LOOP_NO_EMPTY_SIZE) { ffe_ctl->index = 0; /* * We want to skip the LOOP_CACHING_WAIT step if we don't have * any uncached bgs and we've already done a full search * through. */ if (ffe_ctl->loop == LOOP_CACHING_NOWAIT && (!ffe_ctl->orig_have_caching_bg && full_search)) ffe_ctl->loop++; ffe_ctl->loop++; if (ffe_ctl->loop == LOOP_ALLOC_CHUNK) { struct btrfs_trans_handle *trans; int exist = 0; /* Check if allocation policy allows to create a new chunk */ ret = can_allocate_chunk(fs_info, ffe_ctl); if (ret) return ret; trans = current->journal_info; if (trans) exist = 1; else trans = btrfs_join_transaction(root); if (IS_ERR(trans)) { ret = PTR_ERR(trans); return ret; } ret = btrfs_chunk_alloc(trans, ffe_ctl->flags, CHUNK_ALLOC_FORCE_FOR_EXTENT); /* Do not bail out on ENOSPC since we can do more. */ if (ret == -ENOSPC) { ret = 0; ffe_ctl->loop++; } else if (ret < 0) btrfs_abort_transaction(trans, ret); else ret = 0; if (!exist) btrfs_end_transaction(trans); if (ret) return ret; } if (ffe_ctl->loop == LOOP_NO_EMPTY_SIZE) { if (ffe_ctl->policy != BTRFS_EXTENT_ALLOC_CLUSTERED) return -ENOSPC; /* * Don't loop again if we already have no empty_size and * no empty_cluster. */ if (ffe_ctl->empty_size == 0 && ffe_ctl->empty_cluster == 0) return -ENOSPC; ffe_ctl->empty_size = 0; ffe_ctl->empty_cluster = 0; } return 1; } return -ENOSPC; } static bool find_free_extent_check_size_class(struct find_free_extent_ctl *ffe_ctl, struct btrfs_block_group *bg) { if (ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED) return true; if (!btrfs_block_group_should_use_size_class(bg)) return true; if (ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS) return true; if (ffe_ctl->loop >= LOOP_UNSET_SIZE_CLASS && bg->size_class == BTRFS_BG_SZ_NONE) return true; return ffe_ctl->size_class == bg->size_class; } static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info, struct find_free_extent_ctl *ffe_ctl, struct btrfs_space_info *space_info, struct btrfs_key *ins) { /* * If our free space is heavily fragmented we may not be able to make * big contiguous allocations, so instead of doing the expensive search * for free space, simply return ENOSPC with our max_extent_size so we * can go ahead and search for a more manageable chunk. * * If our max_extent_size is large enough for our allocation simply * disable clustering since we will likely not be able to find enough * space to create a cluster and induce latency trying. */ if (space_info->max_extent_size) { spin_lock(&space_info->lock); if (space_info->max_extent_size && ffe_ctl->num_bytes > space_info->max_extent_size) { ins->offset = space_info->max_extent_size; spin_unlock(&space_info->lock); return -ENOSPC; } else if (space_info->max_extent_size) { ffe_ctl->use_cluster = false; } spin_unlock(&space_info->lock); } ffe_ctl->last_ptr = fetch_cluster_info(fs_info, space_info, &ffe_ctl->empty_cluster); if (ffe_ctl->last_ptr) { struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; spin_lock(&last_ptr->lock); if (last_ptr->block_group) ffe_ctl->hint_byte = last_ptr->window_start; if (last_ptr->fragmented) { /* * We still set window_start so we can keep track of the * last place we found an allocation to try and save * some time. */ ffe_ctl->hint_byte = last_ptr->window_start; ffe_ctl->use_cluster = false; } spin_unlock(&last_ptr->lock); } return 0; } static int prepare_allocation(struct btrfs_fs_info *fs_info, struct find_free_extent_ctl *ffe_ctl, struct btrfs_space_info *space_info, struct btrfs_key *ins) { switch (ffe_ctl->policy) { case BTRFS_EXTENT_ALLOC_CLUSTERED: return prepare_allocation_clustered(fs_info, ffe_ctl, space_info, ins); case BTRFS_EXTENT_ALLOC_ZONED: if (ffe_ctl->for_treelog) { spin_lock(&fs_info->treelog_bg_lock); if (fs_info->treelog_bg) ffe_ctl->hint_byte = fs_info->treelog_bg; spin_unlock(&fs_info->treelog_bg_lock); } if (ffe_ctl->for_data_reloc) { spin_lock(&fs_info->relocation_bg_lock); if (fs_info->data_reloc_bg) ffe_ctl->hint_byte = fs_info->data_reloc_bg; spin_unlock(&fs_info->relocation_bg_lock); } return 0; default: BUG(); } } /* * walks the btree of allocated extents and find a hole of a given size. * The key ins is changed to record the hole: * ins->objectid == start position * ins->flags = BTRFS_EXTENT_ITEM_KEY * ins->offset == the size of the hole. * Any available blocks before search_start are skipped. * * If there is no suitable free space, we will record the max size of * the free space extent currently. * * The overall logic and call chain: * * find_free_extent() * |- Iterate through all block groups * | |- Get a valid block group * | |- Try to do clustered allocation in that block group * | |- Try to do unclustered allocation in that block group * | |- Check if the result is valid * | | |- If valid, then exit * | |- Jump to next block group * | * |- Push harder to find free extents * |- If not found, re-iterate all block groups */ static noinline int find_free_extent(struct btrfs_root *root, struct btrfs_key *ins, struct find_free_extent_ctl *ffe_ctl) { struct btrfs_fs_info *fs_info = root->fs_info; int ret = 0; int cache_block_group_error = 0; struct btrfs_block_group *block_group = NULL; struct btrfs_space_info *space_info; bool full_search = false; WARN_ON(ffe_ctl->num_bytes < fs_info->sectorsize); ffe_ctl->search_start = 0; /* For clustered allocation */ ffe_ctl->empty_cluster = 0; ffe_ctl->last_ptr = NULL; ffe_ctl->use_cluster = true; ffe_ctl->have_caching_bg = false; ffe_ctl->orig_have_caching_bg = false; ffe_ctl->index = btrfs_bg_flags_to_raid_index(ffe_ctl->flags); ffe_ctl->loop = 0; ffe_ctl->retry_uncached = false; ffe_ctl->cached = 0; ffe_ctl->max_extent_size = 0; ffe_ctl->total_free_space = 0; ffe_ctl->found_offset = 0; ffe_ctl->policy = BTRFS_EXTENT_ALLOC_CLUSTERED; ffe_ctl->size_class = btrfs_calc_block_group_size_class(ffe_ctl->num_bytes); if (btrfs_is_zoned(fs_info)) ffe_ctl->policy = BTRFS_EXTENT_ALLOC_ZONED; ins->type = BTRFS_EXTENT_ITEM_KEY; ins->objectid = 0; ins->offset = 0; trace_find_free_extent(root, ffe_ctl); space_info = btrfs_find_space_info(fs_info, ffe_ctl->flags); if (!space_info) { btrfs_err(fs_info, "No space info for %llu", ffe_ctl->flags); return -ENOSPC; } ret = prepare_allocation(fs_info, ffe_ctl, space_info, ins); if (ret < 0) return ret; ffe_ctl->search_start = max(ffe_ctl->search_start, first_logical_byte(fs_info)); ffe_ctl->search_start = max(ffe_ctl->search_start, ffe_ctl->hint_byte); if (ffe_ctl->search_start == ffe_ctl->hint_byte) { block_group = btrfs_lookup_block_group(fs_info, ffe_ctl->search_start); /* * we don't want to use the block group if it doesn't match our * allocation bits, or if its not cached. * * However if we are re-searching with an ideal block group * picked out then we don't care that the block group is cached. */ if (block_group && block_group_bits(block_group, ffe_ctl->flags) && block_group->cached != BTRFS_CACHE_NO) { down_read(&space_info->groups_sem); if (list_empty(&block_group->list) || block_group->ro) { /* * someone is removing this block group, * we can't jump into the have_block_group * target because our list pointers are not * valid */ btrfs_put_block_group(block_group); up_read(&space_info->groups_sem); } else { ffe_ctl->index = btrfs_bg_flags_to_raid_index( block_group->flags); btrfs_lock_block_group(block_group, ffe_ctl->delalloc); ffe_ctl->hinted = true; goto have_block_group; } } else if (block_group) { btrfs_put_block_group(block_group); } } search: trace_find_free_extent_search_loop(root, ffe_ctl); ffe_ctl->have_caching_bg = false; if (ffe_ctl->index == btrfs_bg_flags_to_raid_index(ffe_ctl->flags) || ffe_ctl->index == 0) full_search = true; down_read(&space_info->groups_sem); list_for_each_entry(block_group, &space_info->block_groups[ffe_ctl->index], list) { struct btrfs_block_group *bg_ret; ffe_ctl->hinted = false; /* If the block group is read-only, we can skip it entirely. */ if (unlikely(block_group->ro)) { if (ffe_ctl->for_treelog) btrfs_clear_treelog_bg(block_group); if (ffe_ctl->for_data_reloc) btrfs_clear_data_reloc_bg(block_group); continue; } btrfs_grab_block_group(block_group, ffe_ctl->delalloc); ffe_ctl->search_start = block_group->start; /* * this can happen if we end up cycling through all the * raid types, but we want to make sure we only allocate * for the proper type. */ if (!block_group_bits(block_group, ffe_ctl->flags)) { u64 extra = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID56_MASK | BTRFS_BLOCK_GROUP_RAID10; /* * if they asked for extra copies and this block group * doesn't provide them, bail. This does allow us to * fill raid0 from raid1. */ if ((ffe_ctl->flags & extra) && !(block_group->flags & extra)) goto loop; /* * This block group has different flags than we want. * It's possible that we have MIXED_GROUP flag but no * block group is mixed. Just skip such block group. */ btrfs_release_block_group(block_group, ffe_ctl->delalloc); continue; } have_block_group: trace_find_free_extent_have_block_group(root, ffe_ctl, block_group); ffe_ctl->cached = btrfs_block_group_done(block_group); if (unlikely(!ffe_ctl->cached)) { ffe_ctl->have_caching_bg = true; ret = btrfs_cache_block_group(block_group, false); /* * If we get ENOMEM here or something else we want to * try other block groups, because it may not be fatal. * However if we can't find anything else we need to * save our return here so that we return the actual * error that caused problems, not ENOSPC. */ if (ret < 0) { if (!cache_block_group_error) cache_block_group_error = ret; ret = 0; goto loop; } ret = 0; } if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) { if (!cache_block_group_error) cache_block_group_error = -EIO; goto loop; } if (!find_free_extent_check_size_class(ffe_ctl, block_group)) goto loop; bg_ret = NULL; ret = do_allocation(block_group, ffe_ctl, &bg_ret); if (ret > 0) goto loop; if (bg_ret && bg_ret != block_group) { btrfs_release_block_group(block_group, ffe_ctl->delalloc); block_group = bg_ret; } /* Checks */ ffe_ctl->search_start = round_up(ffe_ctl->found_offset, fs_info->stripesize); /* move on to the next group */ if (ffe_ctl->search_start + ffe_ctl->num_bytes > block_group->start + block_group->length) { btrfs_add_free_space_unused(block_group, ffe_ctl->found_offset, ffe_ctl->num_bytes); goto loop; } if (ffe_ctl->found_offset < ffe_ctl->search_start) btrfs_add_free_space_unused(block_group, ffe_ctl->found_offset, ffe_ctl->search_start - ffe_ctl->found_offset); ret = btrfs_add_reserved_bytes(block_group, ffe_ctl->ram_bytes, ffe_ctl->num_bytes, ffe_ctl->delalloc, ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS); if (ret == -EAGAIN) { btrfs_add_free_space_unused(block_group, ffe_ctl->found_offset, ffe_ctl->num_bytes); goto loop; } btrfs_inc_block_group_reservations(block_group); /* we are all good, lets return */ ins->objectid = ffe_ctl->search_start; ins->offset = ffe_ctl->num_bytes; trace_btrfs_reserve_extent(block_group, ffe_ctl); btrfs_release_block_group(block_group, ffe_ctl->delalloc); break; loop: if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT && !ffe_ctl->retry_uncached) { ffe_ctl->retry_uncached = true; btrfs_wait_block_group_cache_progress(block_group, ffe_ctl->num_bytes + ffe_ctl->empty_cluster + ffe_ctl->empty_size); goto have_block_group; } release_block_group(block_group, ffe_ctl, ffe_ctl->delalloc); cond_resched(); } up_read(&space_info->groups_sem); ret = find_free_extent_update_loop(fs_info, ins, ffe_ctl, full_search); if (ret > 0) goto search; if (ret == -ENOSPC && !cache_block_group_error) { /* * Use ffe_ctl->total_free_space as fallback if we can't find * any contiguous hole. */ if (!ffe_ctl->max_extent_size) ffe_ctl->max_extent_size = ffe_ctl->total_free_space; spin_lock(&space_info->lock); space_info->max_extent_size = ffe_ctl->max_extent_size; spin_unlock(&space_info->lock); ins->offset = ffe_ctl->max_extent_size; } else if (ret == -ENOSPC) { ret = cache_block_group_error; } return ret; } /* * btrfs_reserve_extent - entry point to the extent allocator. Tries to find a * hole that is at least as big as @num_bytes. * * @root - The root that will contain this extent * * @ram_bytes - The amount of space in ram that @num_bytes take. This * is used for accounting purposes. This value differs * from @num_bytes only in the case of compressed extents. * * @num_bytes - Number of bytes to allocate on-disk. * * @min_alloc_size - Indicates the minimum amount of space that the * allocator should try to satisfy. In some cases * @num_bytes may be larger than what is required and if * the filesystem is fragmented then allocation fails. * However, the presence of @min_alloc_size gives a * chance to try and satisfy the smaller allocation. * * @empty_size - A hint that you plan on doing more COW. This is the * size in bytes the allocator should try to find free * next to the block it returns. This is just a hint and * may be ignored by the allocator. * * @hint_byte - Hint to the allocator to start searching above the byte * address passed. It might be ignored. * * @ins - This key is modified to record the found hole. It will * have the following values: * ins->objectid == start position * ins->flags = BTRFS_EXTENT_ITEM_KEY * ins->offset == the size of the hole. * * @is_data - Boolean flag indicating whether an extent is * allocated for data (true) or metadata (false) * * @delalloc - Boolean flag indicating whether this allocation is for * delalloc or not. If 'true' data_rwsem of block groups * is going to be acquired. * * * Returns 0 when an allocation succeeded or < 0 when an error occurred. In * case -ENOSPC is returned then @ins->offset will contain the size of the * largest available hole the allocator managed to find. */ int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, u64 num_bytes, u64 min_alloc_size, u64 empty_size, u64 hint_byte, struct btrfs_key *ins, int is_data, int delalloc) { struct btrfs_fs_info *fs_info = root->fs_info; struct find_free_extent_ctl ffe_ctl = {}; bool final_tried = num_bytes == min_alloc_size; u64 flags; int ret; bool for_treelog = (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); bool for_data_reloc = (btrfs_is_data_reloc_root(root) && is_data); flags = get_alloc_profile_by_root(root, is_data); again: WARN_ON(num_bytes < fs_info->sectorsize); ffe_ctl.ram_bytes = ram_bytes; ffe_ctl.num_bytes = num_bytes; ffe_ctl.min_alloc_size = min_alloc_size; ffe_ctl.empty_size = empty_size; ffe_ctl.flags = flags; ffe_ctl.delalloc = delalloc; ffe_ctl.hint_byte = hint_byte; ffe_ctl.for_treelog = for_treelog; ffe_ctl.for_data_reloc = for_data_reloc; ret = find_free_extent(root, ins, &ffe_ctl); if (!ret && !is_data) { btrfs_dec_block_group_reservations(fs_info, ins->objectid); } else if (ret == -ENOSPC) { if (!final_tried && ins->offset) { num_bytes = min(num_bytes >> 1, ins->offset); num_bytes = round_down(num_bytes, fs_info->sectorsize); num_bytes = max(num_bytes, min_alloc_size); ram_bytes = num_bytes; if (num_bytes == min_alloc_size) final_tried = true; goto again; } else if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { struct btrfs_space_info *sinfo; sinfo = btrfs_find_space_info(fs_info, flags); btrfs_err(fs_info, "allocation failed flags %llu, wanted %llu tree-log %d, relocation: %d", flags, num_bytes, for_treelog, for_data_reloc); if (sinfo) btrfs_dump_space_info(fs_info, sinfo, num_bytes, 1); } } return ret; } int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len, int delalloc) { struct btrfs_block_group *cache; cache = btrfs_lookup_block_group(fs_info, start); if (!cache) { btrfs_err(fs_info, "Unable to find block group for %llu", start); return -ENOSPC; } btrfs_add_free_space(cache, start, len); btrfs_free_reserved_bytes(cache, len, delalloc); trace_btrfs_reserved_extent_free(fs_info, start, len); btrfs_put_block_group(cache); return 0; } int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans, u64 start, u64 len) { struct btrfs_block_group *cache; int ret = 0; cache = btrfs_lookup_block_group(trans->fs_info, start); if (!cache) { btrfs_err(trans->fs_info, "unable to find block group for %llu", start); return -ENOSPC; } ret = pin_down_extent(trans, cache, start, len, 1); btrfs_put_block_group(cache); return ret; } static int alloc_reserved_extent(struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes) { struct btrfs_fs_info *fs_info = trans->fs_info; int ret; ret = remove_from_free_space_tree(trans, bytenr, num_bytes); if (ret) return ret; ret = btrfs_update_block_group(trans, bytenr, num_bytes, true); if (ret) { ASSERT(!ret); btrfs_err(fs_info, "update block group failed for %llu %llu", bytenr, num_bytes); return ret; } trace_btrfs_reserved_extent_alloc(fs_info, bytenr, num_bytes); return 0; } static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, u64 parent, u64 root_objectid, u64 flags, u64 owner, u64 offset, struct btrfs_key *ins, int ref_mod) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_root *extent_root; int ret; struct btrfs_extent_item *extent_item; struct btrfs_extent_inline_ref *iref; struct btrfs_path *path; struct extent_buffer *leaf; int type; u32 size; if (parent > 0) type = BTRFS_SHARED_DATA_REF_KEY; else type = BTRFS_EXTENT_DATA_REF_KEY; size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type); path = btrfs_alloc_path(); if (!path) return -ENOMEM; extent_root = btrfs_extent_root(fs_info, ins->objectid); ret = btrfs_insert_empty_item(trans, extent_root, path, ins, size); if (ret) { btrfs_free_path(path); return ret; } leaf = path->nodes[0]; extent_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); btrfs_set_extent_refs(leaf, extent_item, ref_mod); btrfs_set_extent_generation(leaf, extent_item, trans->transid); btrfs_set_extent_flags(leaf, extent_item, flags | BTRFS_EXTENT_FLAG_DATA); iref = (struct btrfs_extent_inline_ref *)(extent_item + 1); btrfs_set_extent_inline_ref_type(leaf, iref, type); if (parent > 0) { struct btrfs_shared_data_ref *ref; ref = (struct btrfs_shared_data_ref *)(iref + 1); btrfs_set_extent_inline_ref_offset(leaf, iref, parent); btrfs_set_shared_data_ref_count(leaf, ref, ref_mod); } else { struct btrfs_extent_data_ref *ref; ref = (struct btrfs_extent_data_ref *)(&iref->offset); btrfs_set_extent_data_ref_root(leaf, ref, root_objectid); btrfs_set_extent_data_ref_objectid(leaf, ref, owner); btrfs_set_extent_data_ref_offset(leaf, ref, offset); btrfs_set_extent_data_ref_count(leaf, ref, ref_mod); } btrfs_mark_buffer_dirty(path->nodes[0]); btrfs_free_path(path); return alloc_reserved_extent(trans, ins->objectid, ins->offset); } static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_node *node, struct btrfs_delayed_extent_op *extent_op) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_root *extent_root; int ret; struct btrfs_extent_item *extent_item; struct btrfs_key extent_key; struct btrfs_tree_block_info *block_info; struct btrfs_extent_inline_ref *iref; struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_delayed_tree_ref *ref; u32 size = sizeof(*extent_item) + sizeof(*iref); u64 flags = extent_op->flags_to_set; bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); ref = btrfs_delayed_node_to_tree_ref(node); extent_key.objectid = node->bytenr; if (skinny_metadata) { extent_key.offset = ref->level; extent_key.type = BTRFS_METADATA_ITEM_KEY; } else { extent_key.offset = node->num_bytes; extent_key.type = BTRFS_EXTENT_ITEM_KEY; size += sizeof(*block_info); } path = btrfs_alloc_path(); if (!path) return -ENOMEM; extent_root = btrfs_extent_root(fs_info, extent_key.objectid); ret = btrfs_insert_empty_item(trans, extent_root, path, &extent_key, size); if (ret) { btrfs_free_path(path); return ret; } leaf = path->nodes[0]; extent_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); btrfs_set_extent_refs(leaf, extent_item, 1); btrfs_set_extent_generation(leaf, extent_item, trans->transid); btrfs_set_extent_flags(leaf, extent_item, flags | BTRFS_EXTENT_FLAG_TREE_BLOCK); if (skinny_metadata) { iref = (struct btrfs_extent_inline_ref *)(extent_item + 1); } else { block_info = (struct btrfs_tree_block_info *)(extent_item + 1); btrfs_set_tree_block_key(leaf, block_info, &extent_op->key); btrfs_set_tree_block_level(leaf, block_info, ref->level); iref = (struct btrfs_extent_inline_ref *)(block_info + 1); } if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) { btrfs_set_extent_inline_ref_type(leaf, iref, BTRFS_SHARED_BLOCK_REF_KEY); btrfs_set_extent_inline_ref_offset(leaf, iref, ref->parent); } else { btrfs_set_extent_inline_ref_type(leaf, iref, BTRFS_TREE_BLOCK_REF_KEY); btrfs_set_extent_inline_ref_offset(leaf, iref, ref->root); } btrfs_mark_buffer_dirty(leaf); btrfs_free_path(path); return alloc_reserved_extent(trans, node->bytenr, fs_info->nodesize); } int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 owner, u64 offset, u64 ram_bytes, struct btrfs_key *ins) { struct btrfs_ref generic_ref = { 0 }; BUG_ON(root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT, ins->objectid, ins->offset, 0); btrfs_init_data_ref(&generic_ref, root->root_key.objectid, owner, offset, 0, false); btrfs_ref_tree_mod(root->fs_info, &generic_ref); return btrfs_add_delayed_data_ref(trans, &generic_ref, ram_bytes); } /* * this is used by the tree logging recovery code. It records that * an extent has been allocated and makes sure to clear the free * space cache bits as well */ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, u64 root_objectid, u64 owner, u64 offset, struct btrfs_key *ins) { struct btrfs_fs_info *fs_info = trans->fs_info; int ret; struct btrfs_block_group *block_group; struct btrfs_space_info *space_info; /* * Mixed block groups will exclude before processing the log so we only * need to do the exclude dance if this fs isn't mixed. */ if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) { ret = __exclude_logged_extent(fs_info, ins->objectid, ins->offset); if (ret) return ret; } block_group = btrfs_lookup_block_group(fs_info, ins->objectid); if (!block_group) return -EINVAL; space_info = block_group->space_info; spin_lock(&space_info->lock); spin_lock(&block_group->lock); space_info->bytes_reserved += ins->offset; block_group->reserved += ins->offset; spin_unlock(&block_group->lock); spin_unlock(&space_info->lock); ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner, offset, ins, 1); if (ret) btrfs_pin_extent(trans, ins->objectid, ins->offset, 1); btrfs_put_block_group(block_group); return ret; } static struct extent_buffer * btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, int level, u64 owner, enum btrfs_lock_nesting nest) { struct btrfs_fs_info *fs_info = root->fs_info; struct extent_buffer *buf; u64 lockdep_owner = owner; buf = btrfs_find_create_tree_block(fs_info, bytenr, owner, level); if (IS_ERR(buf)) return buf; /* * Extra safety check in case the extent tree is corrupted and extent * allocator chooses to use a tree block which is already used and * locked. */ if (buf->lock_owner == current->pid) { btrfs_err_rl(fs_info, "tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected", buf->start, btrfs_header_owner(buf), current->pid); free_extent_buffer(buf); return ERR_PTR(-EUCLEAN); } /* * The reloc trees are just snapshots, so we need them to appear to be * just like any other fs tree WRT lockdep. * * The exception however is in replace_path() in relocation, where we * hold the lock on the original fs root and then search for the reloc * root. At that point we need to make sure any reloc root buffers are * set to the BTRFS_TREE_RELOC_OBJECTID lockdep class in order to make * lockdep happy. */ if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID && !test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state)) lockdep_owner = BTRFS_FS_TREE_OBJECTID; /* btrfs_clear_buffer_dirty() accesses generation field. */ btrfs_set_header_generation(buf, trans->transid); /* * This needs to stay, because we could allocate a freed block from an * old tree into a new tree, so we need to make sure this new block is * set to the appropriate level and owner. */ btrfs_set_buffer_lockdep_class(lockdep_owner, buf, level); __btrfs_tree_lock(buf, nest); btrfs_clear_buffer_dirty(trans, buf); clear_bit(EXTENT_BUFFER_STALE, &buf->bflags); clear_bit(EXTENT_BUFFER_NO_CHECK, &buf->bflags); set_extent_buffer_uptodate(buf); memzero_extent_buffer(buf, 0, sizeof(struct btrfs_header)); btrfs_set_header_level(buf, level); btrfs_set_header_bytenr(buf, buf->start); btrfs_set_header_generation(buf, trans->transid); btrfs_set_header_backref_rev(buf, BTRFS_MIXED_BACKREF_REV); btrfs_set_header_owner(buf, owner); write_extent_buffer_fsid(buf, fs_info->fs_devices->metadata_uuid); write_extent_buffer_chunk_tree_uuid(buf, fs_info->chunk_tree_uuid); if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { buf->log_index = root->log_transid % 2; /* * we allow two log transactions at a time, use different * EXTENT bit to differentiate dirty pages. */ if (buf->log_index == 0) set_extent_bit(&root->dirty_log_pages, buf->start, buf->start + buf->len - 1, EXTENT_DIRTY, NULL); else set_extent_bit(&root->dirty_log_pages, buf->start, buf->start + buf->len - 1, EXTENT_NEW, NULL); } else { buf->log_index = -1; set_extent_bit(&trans->transaction->dirty_pages, buf->start, buf->start + buf->len - 1, EXTENT_DIRTY, NULL); } /* this returns a buffer locked for blocking */ return buf; } /* * finds a free extent and does all the dirty work required for allocation * returns the tree buffer or an ERR_PTR on error. */ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 parent, u64 root_objectid, const struct btrfs_disk_key *key, int level, u64 hint, u64 empty_size, enum btrfs_lock_nesting nest) { struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_key ins; struct btrfs_block_rsv *block_rsv; struct extent_buffer *buf; struct btrfs_delayed_extent_op *extent_op; struct btrfs_ref generic_ref = { 0 }; u64 flags = 0; int ret; u32 blocksize = fs_info->nodesize; bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS if (btrfs_is_testing(fs_info)) { buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr, level, root_objectid, nest); if (!IS_ERR(buf)) root->alloc_bytenr += blocksize; return buf; } #endif block_rsv = btrfs_use_block_rsv(trans, root, blocksize); if (IS_ERR(block_rsv)) return ERR_CAST(block_rsv); ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize, empty_size, hint, &ins, 0, 0); if (ret) goto out_unuse; buf = btrfs_init_new_buffer(trans, root, ins.objectid, level, root_objectid, nest); if (IS_ERR(buf)) { ret = PTR_ERR(buf); goto out_free_reserved; } if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { if (parent == 0) parent = ins.objectid; flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; } else BUG_ON(parent > 0); if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { extent_op = btrfs_alloc_delayed_extent_op(); if (!extent_op) { ret = -ENOMEM; goto out_free_buf; } if (key) memcpy(&extent_op->key, key, sizeof(extent_op->key)); else memset(&extent_op->key, 0, sizeof(extent_op->key)); extent_op->flags_to_set = flags; extent_op->update_key = skinny_metadata ? false : true; extent_op->update_flags = true; extent_op->level = level; btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT, ins.objectid, ins.offset, parent); btrfs_init_tree_ref(&generic_ref, level, root_objectid, root->root_key.objectid, false); btrfs_ref_tree_mod(fs_info, &generic_ref); ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, extent_op); if (ret) goto out_free_delayed; } return buf; out_free_delayed: btrfs_free_delayed_extent_op(extent_op); out_free_buf: btrfs_tree_unlock(buf); free_extent_buffer(buf); out_free_reserved: btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0); out_unuse: btrfs_unuse_block_rsv(fs_info, block_rsv, blocksize); return ERR_PTR(ret); } struct walk_control { u64 refs[BTRFS_MAX_LEVEL]; u64 flags[BTRFS_MAX_LEVEL]; struct btrfs_key update_progress; struct btrfs_key drop_progress; int drop_level; int stage; int level; int shared_level; int update_ref; int keep_locks; int reada_slot; int reada_count; int restarted; }; #define DROP_REFERENCE 1 #define UPDATE_BACKREF 2 static noinline void reada_walk_down(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct walk_control *wc, struct btrfs_path *path) { struct btrfs_fs_info *fs_info = root->fs_info; u64 bytenr; u64 generation; u64 refs; u64 flags; u32 nritems; struct btrfs_key key; struct extent_buffer *eb; int ret; int slot; int nread = 0; if (path->slots[wc->level] < wc->reada_slot) { wc->reada_count = wc->reada_count * 2 / 3; wc->reada_count = max(wc->reada_count, 2); } else { wc->reada_count = wc->reada_count * 3 / 2; wc->reada_count = min_t(int, wc->reada_count, BTRFS_NODEPTRS_PER_BLOCK(fs_info)); } eb = path->nodes[wc->level]; nritems = btrfs_header_nritems(eb); for (slot = path->slots[wc->level]; slot < nritems; slot++) { if (nread >= wc->reada_count) break; cond_resched(); bytenr = btrfs_node_blockptr(eb, slot); generation = btrfs_node_ptr_generation(eb, slot); if (slot == path->slots[wc->level]) goto reada; if (wc->stage == UPDATE_BACKREF && generation <= root->root_key.offset) continue; /* We don't lock the tree block, it's OK to be racy here */ ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, wc->level - 1, 1, &refs, &flags); /* We don't care about errors in readahead. */ if (ret < 0) continue; BUG_ON(refs == 0); if (wc->stage == DROP_REFERENCE) { if (refs == 1) goto reada; if (wc->level == 1 && (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) continue; if (!wc->update_ref || generation <= root->root_key.offset) continue; btrfs_node_key_to_cpu(eb, &key, slot); ret = btrfs_comp_cpu_keys(&key, &wc->update_progress); if (ret < 0) continue; } else { if (wc->level == 1 && (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) continue; } reada: btrfs_readahead_node_child(eb, slot); nread++; } wc->reada_slot = slot; } /* * helper to process tree block while walking down the tree. * * when wc->stage == UPDATE_BACKREF, this function updates * back refs for pointers in the block. * * NOTE: return value 1 means we should stop walking down. */ static noinline int walk_down_proc(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct walk_control *wc, int lookup_info) { struct btrfs_fs_info *fs_info = root->fs_info; int level = wc->level; struct extent_buffer *eb = path->nodes[level]; u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF; int ret; if (wc->stage == UPDATE_BACKREF && btrfs_header_owner(eb) != root->root_key.objectid) return 1; /* * when reference count of tree block is 1, it won't increase * again. once full backref flag is set, we never clear it. */ if (lookup_info && ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) || (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) { BUG_ON(!path->locks[level]); ret = btrfs_lookup_extent_info(trans, fs_info, eb->start, level, 1, &wc->refs[level], &wc->flags[level]); BUG_ON(ret == -ENOMEM); if (ret) return ret; BUG_ON(wc->refs[level] == 0); } if (wc->stage == DROP_REFERENCE) { if (wc->refs[level] > 1) return 1; if (path->locks[level] && !wc->keep_locks) { btrfs_tree_unlock_rw(eb, path->locks[level]); path->locks[level] = 0; } return 0; } /* wc->stage == UPDATE_BACKREF */ if (!(wc->flags[level] & flag)) { BUG_ON(!path->locks[level]); ret = btrfs_inc_ref(trans, root, eb, 1); BUG_ON(ret); /* -ENOMEM */ ret = btrfs_dec_ref(trans, root, eb, 0); BUG_ON(ret); /* -ENOMEM */ ret = btrfs_set_disk_extent_flags(trans, eb, flag); BUG_ON(ret); /* -ENOMEM */ wc->flags[level] |= flag; } /* * the block is shared by multiple trees, so it's not good to * keep the tree lock */ if (path->locks[level] && level > 0) { btrfs_tree_unlock_rw(eb, path->locks[level]); path->locks[level] = 0; } return 0; } /* * This is used to verify a ref exists for this root to deal with a bug where we * would have a drop_progress key that hadn't been updated properly. */ static int check_ref_exists(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 parent, int level) { struct btrfs_path *path; struct btrfs_extent_inline_ref *iref; int ret; path = btrfs_alloc_path(); if (!path) return -ENOMEM; ret = lookup_extent_backref(trans, path, &iref, bytenr, root->fs_info->nodesize, parent, root->root_key.objectid, level, 0); btrfs_free_path(path); if (ret == -ENOENT) return 0; if (ret < 0) return ret; return 1; } /* * helper to process tree block pointer. * * when wc->stage == DROP_REFERENCE, this function checks * reference count of the block pointed to. if the block * is shared and we need update back refs for the subtree * rooted at the block, this function changes wc->stage to * UPDATE_BACKREF. if the block is shared and there is no * need to update back, this function drops the reference * to the block. * * NOTE: return value 1 means we should stop walking down. */ static noinline int do_walk_down(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct walk_control *wc, int *lookup_info) { struct btrfs_fs_info *fs_info = root->fs_info; u64 bytenr; u64 generation; u64 parent; struct btrfs_tree_parent_check check = { 0 }; struct btrfs_key key; struct btrfs_ref ref = { 0 }; struct extent_buffer *next; int level = wc->level; int reada = 0; int ret = 0; bool need_account = false; generation = btrfs_node_ptr_generation(path->nodes[level], path->slots[level]); /* * if the lower level block was created before the snapshot * was created, we know there is no need to update back refs * for the subtree */ if (wc->stage == UPDATE_BACKREF && generation <= root->root_key.offset) { *lookup_info = 1; return 1; } bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]); check.level = level - 1; check.transid = generation; check.owner_root = root->root_key.objectid; check.has_first_key = true; btrfs_node_key_to_cpu(path->nodes[level], &check.first_key, path->slots[level]); next = find_extent_buffer(fs_info, bytenr); if (!next) { next = btrfs_find_create_tree_block(fs_info, bytenr, root->root_key.objectid, level - 1); if (IS_ERR(next)) return PTR_ERR(next); reada = 1; } btrfs_tree_lock(next); ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1, &wc->refs[level - 1], &wc->flags[level - 1]); if (ret < 0) goto out_unlock; if (unlikely(wc->refs[level - 1] == 0)) { btrfs_err(fs_info, "Missing references."); ret = -EIO; goto out_unlock; } *lookup_info = 0; if (wc->stage == DROP_REFERENCE) { if (wc->refs[level - 1] > 1) { need_account = true; if (level == 1 && (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) goto skip; if (!wc->update_ref || generation <= root->root_key.offset) goto skip; btrfs_node_key_to_cpu(path->nodes[level], &key, path->slots[level]); ret = btrfs_comp_cpu_keys(&key, &wc->update_progress); if (ret < 0) goto skip; wc->stage = UPDATE_BACKREF; wc->shared_level = level - 1; } } else { if (level == 1 && (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) goto skip; } if (!btrfs_buffer_uptodate(next, generation, 0)) { btrfs_tree_unlock(next); free_extent_buffer(next); next = NULL; *lookup_info = 1; } if (!next) { if (reada && level == 1) reada_walk_down(trans, root, wc, path); next = read_tree_block(fs_info, bytenr, &check); if (IS_ERR(next)) { return PTR_ERR(next); } else if (!extent_buffer_uptodate(next)) { free_extent_buffer(next); return -EIO; } btrfs_tree_lock(next); } level--; ASSERT(level == btrfs_header_level(next)); if (level != btrfs_header_level(next)) { btrfs_err(root->fs_info, "mismatched level"); ret = -EIO; goto out_unlock; } path->nodes[level] = next; path->slots[level] = 0; path->locks[level] = BTRFS_WRITE_LOCK; wc->level = level; if (wc->level == 1) wc->reada_slot = 0; return 0; skip: wc->refs[level - 1] = 0; wc->flags[level - 1] = 0; if (wc->stage == DROP_REFERENCE) { if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) { parent = path->nodes[level]->start; } else { ASSERT(root->root_key.objectid == btrfs_header_owner(path->nodes[level])); if (root->root_key.objectid != btrfs_header_owner(path->nodes[level])) { btrfs_err(root->fs_info, "mismatched block owner"); ret = -EIO; goto out_unlock; } parent = 0; } /* * If we had a drop_progress we need to verify the refs are set * as expected. If we find our ref then we know that from here * on out everything should be correct, and we can clear the * ->restarted flag. */ if (wc->restarted) { ret = check_ref_exists(trans, root, bytenr, parent, level - 1); if (ret < 0) goto out_unlock; if (ret == 0) goto no_delete; ret = 0; wc->restarted = 0; } /* * Reloc tree doesn't contribute to qgroup numbers, and we have * already accounted them at merge time (replace_path), * thus we could skip expensive subtree trace here. */ if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && need_account) { ret = btrfs_qgroup_trace_subtree(trans, next, generation, level - 1); if (ret) { btrfs_err_rl(fs_info, "Error %d accounting shared subtree. Quota is out of sync, rescan required.", ret); } } /* * We need to update the next key in our walk control so we can * update the drop_progress key accordingly. We don't care if * find_next_key doesn't find a key because that means we're at * the end and are going to clean up now. */ wc->drop_level = level; find_next_key(path, level, &wc->drop_progress); btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr, fs_info->nodesize, parent); btrfs_init_tree_ref(&ref, level - 1, root->root_key.objectid, 0, false); ret = btrfs_free_extent(trans, &ref); if (ret) goto out_unlock; } no_delete: *lookup_info = 1; ret = 1; out_unlock: btrfs_tree_unlock(next); free_extent_buffer(next); return ret; } /* * helper to process tree block while walking up the tree. * * when wc->stage == DROP_REFERENCE, this function drops * reference count on the block. * * when wc->stage == UPDATE_BACKREF, this function changes * wc->stage back to DROP_REFERENCE if we changed wc->stage * to UPDATE_BACKREF previously while processing the block. * * NOTE: return value 1 means we should stop walking up. */ static noinline int walk_up_proc(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct walk_control *wc) { struct btrfs_fs_info *fs_info = root->fs_info; int ret; int level = wc->level; struct extent_buffer *eb = path->nodes[level]; u64 parent = 0; if (wc->stage == UPDATE_BACKREF) { BUG_ON(wc->shared_level < level); if (level < wc->shared_level) goto out; ret = find_next_key(path, level + 1, &wc->update_progress); if (ret > 0) wc->update_ref = 0; wc->stage = DROP_REFERENCE; wc->shared_level = -1; path->slots[level] = 0; /* * check reference count again if the block isn't locked. * we should start walking down the tree again if reference * count is one. */ if (!path->locks[level]) { BUG_ON(level == 0); btrfs_tree_lock(eb); path->locks[level] = BTRFS_WRITE_LOCK; ret = btrfs_lookup_extent_info(trans, fs_info, eb->start, level, 1, &wc->refs[level], &wc->flags[level]); if (ret < 0) { btrfs_tree_unlock_rw(eb, path->locks[level]); path->locks[level] = 0; return ret; } BUG_ON(wc->refs[level] == 0); if (wc->refs[level] == 1) { btrfs_tree_unlock_rw(eb, path->locks[level]); path->locks[level] = 0; return 1; } } } /* wc->stage == DROP_REFERENCE */ BUG_ON(wc->refs[level] > 1 && !path->locks[level]); if (wc->refs[level] == 1) { if (level == 0) { if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) ret = btrfs_dec_ref(trans, root, eb, 1); else ret = btrfs_dec_ref(trans, root, eb, 0); BUG_ON(ret); /* -ENOMEM */ if (is_fstree(root->root_key.objectid)) { ret = btrfs_qgroup_trace_leaf_items(trans, eb); if (ret) { btrfs_err_rl(fs_info, "error %d accounting leaf items, quota is out of sync, rescan required", ret); } } } /* Make block locked assertion in btrfs_clear_buffer_dirty happy. */ if (!path->locks[level]) { btrfs_tree_lock(eb); path->locks[level] = BTRFS_WRITE_LOCK; } btrfs_clear_buffer_dirty(trans, eb); } if (eb == root->node) { if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) parent = eb->start; else if (root->root_key.objectid != btrfs_header_owner(eb)) goto owner_mismatch; } else { if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF) parent = path->nodes[level + 1]->start; else if (root->root_key.objectid != btrfs_header_owner(path->nodes[level + 1])) goto owner_mismatch; } btrfs_free_tree_block(trans, btrfs_root_id(root), eb, parent, wc->refs[level] == 1); out: wc->refs[level] = 0; wc->flags[level] = 0; return 0; owner_mismatch: btrfs_err_rl(fs_info, "unexpected tree owner, have %llu expect %llu", btrfs_header_owner(eb), root->root_key.objectid); return -EUCLEAN; } static noinline int walk_down_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct walk_control *wc) { int level = wc->level; int lookup_info = 1; int ret = 0; while (level >= 0) { ret = walk_down_proc(trans, root, path, wc, lookup_info); if (ret) break; if (level == 0) break; if (path->slots[level] >= btrfs_header_nritems(path->nodes[level])) break; ret = do_walk_down(trans, root, path, wc, &lookup_info); if (ret > 0) { path->slots[level]++; continue; } else if (ret < 0) break; level = wc->level; } return (ret == 1) ? 0 : ret; } static noinline int walk_up_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct walk_control *wc, int max_level) { int level = wc->level; int ret; path->slots[level] = btrfs_header_nritems(path->nodes[level]); while (level < max_level && path->nodes[level]) { wc->level = level; if (path->slots[level] + 1 < btrfs_header_nritems(path->nodes[level])) { path->slots[level]++; return 0; } else { ret = walk_up_proc(trans, root, path, wc); if (ret > 0) return 0; if (ret < 0) return ret; if (path->locks[level]) { btrfs_tree_unlock_rw(path->nodes[level], path->locks[level]); path->locks[level] = 0; } free_extent_buffer(path->nodes[level]); path->nodes[level] = NULL; level++; } } return 1; } /* * drop a subvolume tree. * * this function traverses the tree freeing any blocks that only * referenced by the tree. * * when a shared tree block is found. this function decreases its * reference count by one. if update_ref is true, this function * also make sure backrefs for the shared block and all lower level * blocks are properly updated. * * If called with for_reloc == 0, may exit early with -EAGAIN */ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc) { const bool is_reloc_root = (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID); struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_path *path; struct btrfs_trans_handle *trans; struct btrfs_root *tree_root = fs_info->tree_root; struct btrfs_root_item *root_item = &root->root_item; struct walk_control *wc; struct btrfs_key key; int err = 0; int ret; int level; bool root_dropped = false; bool unfinished_drop = false; btrfs_debug(fs_info, "Drop subvolume %llu", root->root_key.objectid); path = btrfs_alloc_path(); if (!path) { err = -ENOMEM; goto out; } wc = kzalloc(sizeof(*wc), GFP_NOFS); if (!wc) { btrfs_free_path(path); err = -ENOMEM; goto out; } /* * Use join to avoid potential EINTR from transaction start. See * wait_reserve_ticket and the whole reservation callchain. */ if (for_reloc) trans = btrfs_join_transaction(tree_root); else trans = btrfs_start_transaction(tree_root, 0); if (IS_ERR(trans)) { err = PTR_ERR(trans); goto out_free; } err = btrfs_run_delayed_items(trans); if (err) goto out_end_trans; /* * This will help us catch people modifying the fs tree while we're * dropping it. It is unsafe to mess with the fs tree while it's being * dropped as we unlock the root node and parent nodes as we walk down * the tree, assuming nothing will change. If something does change * then we'll have stale information and drop references to blocks we've * already dropped. */ set_bit(BTRFS_ROOT_DELETING, &root->state); unfinished_drop = test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state); if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { level = btrfs_header_level(root->node); path->nodes[level] = btrfs_lock_root_node(root); path->slots[level] = 0; path->locks[level] = BTRFS_WRITE_LOCK; memset(&wc->update_progress, 0, sizeof(wc->update_progress)); } else { btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); memcpy(&wc->update_progress, &key, sizeof(wc->update_progress)); level = btrfs_root_drop_level(root_item); BUG_ON(level == 0); path->lowest_level = level; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); path->lowest_level = 0; if (ret < 0) { err = ret; goto out_end_trans; } WARN_ON(ret > 0); /* * unlock our path, this is safe because only this * function is allowed to delete this snapshot */ btrfs_unlock_up_safe(path, 0); level = btrfs_header_level(root->node); while (1) { btrfs_tree_lock(path->nodes[level]); path->locks[level] = BTRFS_WRITE_LOCK; ret = btrfs_lookup_extent_info(trans, fs_info, path->nodes[level]->start, level, 1, &wc->refs[level], &wc->flags[level]); if (ret < 0) { err = ret; goto out_end_trans; } BUG_ON(wc->refs[level] == 0); if (level == btrfs_root_drop_level(root_item)) break; btrfs_tree_unlock(path->nodes[level]); path->locks[level] = 0; WARN_ON(wc->refs[level] != 1); level--; } } wc->restarted = test_bit(BTRFS_ROOT_DEAD_TREE, &root->state); wc->level = level; wc->shared_level = -1; wc->stage = DROP_REFERENCE; wc->update_ref = update_ref; wc->keep_locks = 0; wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info); while (1) { ret = walk_down_tree(trans, root, path, wc); if (ret < 0) { btrfs_abort_transaction(trans, ret); err = ret; break; } ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL); if (ret < 0) { btrfs_abort_transaction(trans, ret); err = ret; break; } if (ret > 0) { BUG_ON(wc->stage != DROP_REFERENCE); break; } if (wc->stage == DROP_REFERENCE) { wc->drop_level = wc->level; btrfs_node_key_to_cpu(path->nodes[wc->drop_level], &wc->drop_progress, path->slots[wc->drop_level]); } btrfs_cpu_key_to_disk(&root_item->drop_progress, &wc->drop_progress); btrfs_set_root_drop_level(root_item, wc->drop_level); BUG_ON(wc->level == 0); if (btrfs_should_end_transaction(trans) || (!for_reloc && btrfs_need_cleaner_sleep(fs_info))) { ret = btrfs_update_root(trans, tree_root, &root->root_key, root_item); if (ret) { btrfs_abort_transaction(trans, ret); err = ret; goto out_end_trans; } if (!is_reloc_root) btrfs_set_last_root_drop_gen(fs_info, trans->transid); btrfs_end_transaction_throttle(trans); if (!for_reloc && btrfs_need_cleaner_sleep(fs_info)) { btrfs_debug(fs_info, "drop snapshot early exit"); err = -EAGAIN; goto out_free; } /* * Use join to avoid potential EINTR from transaction * start. See wait_reserve_ticket and the whole * reservation callchain. */ if (for_reloc) trans = btrfs_join_transaction(tree_root); else trans = btrfs_start_transaction(tree_root, 0); if (IS_ERR(trans)) { err = PTR_ERR(trans); goto out_free; } } } btrfs_release_path(path); if (err) goto out_end_trans; ret = btrfs_del_root(trans, &root->root_key); if (ret) { btrfs_abort_transaction(trans, ret); err = ret; goto out_end_trans; } if (!is_reloc_root) { ret = btrfs_find_root(tree_root, &root->root_key, path, NULL, NULL); if (ret < 0) { btrfs_abort_transaction(trans, ret); err = ret; goto out_end_trans; } else if (ret > 0) { /* if we fail to delete the orphan item this time * around, it'll get picked up the next time. * * The most common failure here is just -ENOENT. */ btrfs_del_orphan_item(trans, tree_root, root->root_key.objectid); } } /* * This subvolume is going to be completely dropped, and won't be * recorded as dirty roots, thus pertrans meta rsv will not be freed at * commit transaction time. So free it here manually. */ btrfs_qgroup_convert_reserved_meta(root, INT_MAX); btrfs_qgroup_free_meta_all_pertrans(root); if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) btrfs_add_dropped_root(trans, root); else btrfs_put_root(root); root_dropped = true; out_end_trans: if (!is_reloc_root) btrfs_set_last_root_drop_gen(fs_info, trans->transid); btrfs_end_transaction_throttle(trans); out_free: kfree(wc); btrfs_free_path(path); out: /* * We were an unfinished drop root, check to see if there are any * pending, and if not clear and wake up any waiters. */ if (!err && unfinished_drop) btrfs_maybe_wake_unfinished_drop(fs_info); /* * So if we need to stop dropping the snapshot for whatever reason we * need to make sure to add it back to the dead root list so that we * keep trying to do the work later. This also cleans up roots if we * don't have it in the radix (like when we recover after a power fail * or unmount) so we don't leak memory. */ if (!for_reloc && !root_dropped) btrfs_add_dead_root(root); return err; } /* * drop subtree rooted at tree block 'node'. * * NOTE: this function will unlock and release tree block 'node' * only used by relocation code */ int btrfs_drop_subtree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *node, struct extent_buffer *parent) { struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_path *path; struct walk_control *wc; int level; int parent_level; int ret = 0; int wret; BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); path = btrfs_alloc_path(); if (!path) return -ENOMEM; wc = kzalloc(sizeof(*wc), GFP_NOFS); if (!wc) { btrfs_free_path(path); return -ENOMEM; } btrfs_assert_tree_write_locked(parent); parent_level = btrfs_header_level(parent); atomic_inc(&parent->refs); path->nodes[parent_level] = parent; path->slots[parent_level] = btrfs_header_nritems(parent); btrfs_assert_tree_write_locked(node); level = btrfs_header_level(node); path->nodes[level] = node; path->slots[level] = 0; path->locks[level] = BTRFS_WRITE_LOCK; wc->refs[parent_level] = 1; wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF; wc->level = level; wc->shared_level = -1; wc->stage = DROP_REFERENCE; wc->update_ref = 0; wc->keep_locks = 1; wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info); while (1) { wret = walk_down_tree(trans, root, path, wc); if (wret < 0) { ret = wret; break; } wret = walk_up_tree(trans, root, path, wc, parent_level); if (wret < 0) ret = wret; if (wret != 0) break; } kfree(wc); btrfs_free_path(path); return ret; } int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, u64 start, u64 end) { return unpin_extent_range(fs_info, start, end, false); } /* * It used to be that old block groups would be left around forever. * Iterating over them would be enough to trim unused space. Since we * now automatically remove them, we also need to iterate over unallocated * space. * * We don't want a transaction for this since the discard may take a * substantial amount of time. We don't require that a transaction be * running, but we do need to take a running transaction into account * to ensure that we're not discarding chunks that were released or * allocated in the current transaction. * * Holding the chunks lock will prevent other threads from allocating * or releasing chunks, but it won't prevent a running transaction * from committing and releasing the memory that the pending chunks * list head uses. For that, we need to take a reference to the * transaction and hold the commit root sem. We only need to hold * it while performing the free space search since we have already * held back allocations. */ static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed) { u64 start = BTRFS_DEVICE_RANGE_RESERVED, len = 0, end = 0; int ret; *trimmed = 0; /* Discard not supported = nothing to do. */ if (!bdev_max_discard_sectors(device->bdev)) return 0; /* Not writable = nothing to do. */ if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) return 0; /* No free space = nothing to do. */ if (device->total_bytes <= device->bytes_used) return 0; ret = 0; while (1) { struct btrfs_fs_info *fs_info = device->fs_info; u64 bytes; ret = mutex_lock_interruptible(&fs_info->chunk_mutex); if (ret) break; find_first_clear_extent_bit(&device->alloc_state, start, &start, &end, CHUNK_TRIMMED | CHUNK_ALLOCATED); /* Check if there are any CHUNK_* bits left */ if (start > device->total_bytes) { WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); btrfs_warn_in_rcu(fs_info, "ignoring attempt to trim beyond device size: offset %llu length %llu device %s device size %llu", start, end - start + 1, btrfs_dev_name(device), device->total_bytes); mutex_unlock(&fs_info->chunk_mutex); ret = 0; break; } /* Ensure we skip the reserved space on each device. */ start = max_t(u64, start, BTRFS_DEVICE_RANGE_RESERVED); /* * If find_first_clear_extent_bit find a range that spans the * end of the device it will set end to -1, in this case it's up * to the caller to trim the value to the size of the device. */ end = min(end, device->total_bytes - 1); len = end - start + 1; /* We didn't find any extents */ if (!len) { mutex_unlock(&fs_info->chunk_mutex); ret = 0; break; } ret = btrfs_issue_discard(device->bdev, start, len, &bytes); if (!ret) set_extent_bit(&device->alloc_state, start, start + bytes - 1, CHUNK_TRIMMED, NULL); mutex_unlock(&fs_info->chunk_mutex); if (ret) break; start += len; *trimmed += bytes; if (fatal_signal_pending(current)) { ret = -ERESTARTSYS; break; } cond_resched(); } return ret; } /* * Trim the whole filesystem by: * 1) trimming the free space in each block group * 2) trimming the unallocated space on each device * * This will also continue trimming even if a block group or device encounters * an error. The return value will be the last error, or 0 if nothing bad * happens. */ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) { struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; struct btrfs_block_group *cache = NULL; struct btrfs_device *device; u64 group_trimmed; u64 range_end = U64_MAX; u64 start; u64 end; u64 trimmed = 0; u64 bg_failed = 0; u64 dev_failed = 0; int bg_ret = 0; int dev_ret = 0; int ret = 0; if (range->start == U64_MAX) return -EINVAL; /* * Check range overflow if range->len is set. * The default range->len is U64_MAX. */ if (range->len != U64_MAX && check_add_overflow(range->start, range->len, &range_end)) return -EINVAL; cache = btrfs_lookup_first_block_group(fs_info, range->start); for (; cache; cache = btrfs_next_block_group(cache)) { if (cache->start >= range_end) { btrfs_put_block_group(cache); break; } start = max(range->start, cache->start); end = min(range_end, cache->start + cache->length); if (end - start >= range->minlen) { if (!btrfs_block_group_done(cache)) { ret = btrfs_cache_block_group(cache, true); if (ret) { bg_failed++; bg_ret = ret; continue; } } ret = btrfs_trim_block_group(cache, &group_trimmed, start, end, range->minlen); trimmed += group_trimmed; if (ret) { bg_failed++; bg_ret = ret; continue; } } } if (bg_failed) btrfs_warn(fs_info, "failed to trim %llu block group(s), last error %d", bg_failed, bg_ret); mutex_lock(&fs_devices->device_list_mutex); list_for_each_entry(device, &fs_devices->devices, dev_list) { if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) continue; ret = btrfs_trim_free_extents(device, &group_trimmed); if (ret) { dev_failed++; dev_ret = ret; break; } trimmed += group_trimmed; } mutex_unlock(&fs_devices->device_list_mutex); if (dev_failed) btrfs_warn(fs_info, "failed to trim %llu device(s), last error %d", dev_failed, dev_ret); range->len = trimmed; if (bg_ret) return bg_ret; return dev_ret; }
linux-master
fs/btrfs/extent-tree.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2013 Fusion IO. All rights reserved. */ #include <linux/slab.h> #include "btrfs-tests.h" #include "../ctree.h" #include "../disk-io.h" #include "../free-space-cache.h" #include "../block-group.h" #define BITS_PER_BITMAP (PAGE_SIZE * 8UL) /* * This test just does basic sanity checking, making sure we can add an extent * entry and remove space from either end and the middle, and make sure we can * remove space that covers adjacent extent entries. */ static int test_extents(struct btrfs_block_group *cache) { int ret = 0; test_msg("running extent only tests"); /* First just make sure we can remove an entire entry */ ret = btrfs_add_free_space(cache, 0, SZ_4M); if (ret) { test_err("error adding initial extents %d", ret); return ret; } ret = btrfs_remove_free_space(cache, 0, SZ_4M); if (ret) { test_err("error removing extent %d", ret); return ret; } if (test_check_exists(cache, 0, SZ_4M)) { test_err("full remove left some lingering space"); return -1; } /* Ok edge and middle cases now */ ret = btrfs_add_free_space(cache, 0, SZ_4M); if (ret) { test_err("error adding half extent %d", ret); return ret; } ret = btrfs_remove_free_space(cache, 3 * SZ_1M, SZ_1M); if (ret) { test_err("error removing tail end %d", ret); return ret; } ret = btrfs_remove_free_space(cache, 0, SZ_1M); if (ret) { test_err("error removing front end %d", ret); return ret; } ret = btrfs_remove_free_space(cache, SZ_2M, 4096); if (ret) { test_err("error removing middle piece %d", ret); return ret; } if (test_check_exists(cache, 0, SZ_1M)) { test_err("still have space at the front"); return -1; } if (test_check_exists(cache, SZ_2M, 4096)) { test_err("still have space in the middle"); return -1; } if (test_check_exists(cache, 3 * SZ_1M, SZ_1M)) { test_err("still have space at the end"); return -1; } /* Cleanup */ btrfs_remove_free_space_cache(cache); return 0; } static int test_bitmaps(struct btrfs_block_group *cache, u32 sectorsize) { u64 next_bitmap_offset; int ret; test_msg("running bitmap only tests"); ret = test_add_free_space_entry(cache, 0, SZ_4M, 1); if (ret) { test_err("couldn't create a bitmap entry %d", ret); return ret; } ret = btrfs_remove_free_space(cache, 0, SZ_4M); if (ret) { test_err("error removing bitmap full range %d", ret); return ret; } if (test_check_exists(cache, 0, SZ_4M)) { test_err("left some space in bitmap"); return -1; } ret = test_add_free_space_entry(cache, 0, SZ_4M, 1); if (ret) { test_err("couldn't add to our bitmap entry %d", ret); return ret; } ret = btrfs_remove_free_space(cache, SZ_1M, SZ_2M); if (ret) { test_err("couldn't remove middle chunk %d", ret); return ret; } /* * The first bitmap we have starts at offset 0 so the next one is just * at the end of the first bitmap. */ next_bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize); /* Test a bit straddling two bitmaps */ ret = test_add_free_space_entry(cache, next_bitmap_offset - SZ_2M, SZ_4M, 1); if (ret) { test_err("couldn't add space that straddles two bitmaps %d", ret); return ret; } ret = btrfs_remove_free_space(cache, next_bitmap_offset - SZ_1M, SZ_2M); if (ret) { test_err("couldn't remove overlapping space %d", ret); return ret; } if (test_check_exists(cache, next_bitmap_offset - SZ_1M, SZ_2M)) { test_err("left some space when removing overlapping"); return -1; } btrfs_remove_free_space_cache(cache); return 0; } /* This is the high grade jackassery */ static int test_bitmaps_and_extents(struct btrfs_block_group *cache, u32 sectorsize) { u64 bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize); int ret; test_msg("running bitmap and extent tests"); /* * First let's do something simple, an extent at the same offset as the * bitmap, but the free space completely in the extent and then * completely in the bitmap. */ ret = test_add_free_space_entry(cache, SZ_4M, SZ_1M, 1); if (ret) { test_err("couldn't create bitmap entry %d", ret); return ret; } ret = test_add_free_space_entry(cache, 0, SZ_1M, 0); if (ret) { test_err("couldn't add extent entry %d", ret); return ret; } ret = btrfs_remove_free_space(cache, 0, SZ_1M); if (ret) { test_err("couldn't remove extent entry %d", ret); return ret; } if (test_check_exists(cache, 0, SZ_1M)) { test_err("left remnants after our remove"); return -1; } /* Now to add back the extent entry and remove from the bitmap */ ret = test_add_free_space_entry(cache, 0, SZ_1M, 0); if (ret) { test_err("couldn't re-add extent entry %d", ret); return ret; } ret = btrfs_remove_free_space(cache, SZ_4M, SZ_1M); if (ret) { test_err("couldn't remove from bitmap %d", ret); return ret; } if (test_check_exists(cache, SZ_4M, SZ_1M)) { test_err("left remnants in the bitmap"); return -1; } /* * Ok so a little more evil, extent entry and bitmap at the same offset, * removing an overlapping chunk. */ ret = test_add_free_space_entry(cache, SZ_1M, SZ_4M, 1); if (ret) { test_err("couldn't add to a bitmap %d", ret); return ret; } ret = btrfs_remove_free_space(cache, SZ_512K, 3 * SZ_1M); if (ret) { test_err("couldn't remove overlapping space %d", ret); return ret; } if (test_check_exists(cache, SZ_512K, 3 * SZ_1M)) { test_err("left over pieces after removing overlapping"); return -1; } btrfs_remove_free_space_cache(cache); /* Now with the extent entry offset into the bitmap */ ret = test_add_free_space_entry(cache, SZ_4M, SZ_4M, 1); if (ret) { test_err("couldn't add space to the bitmap %d", ret); return ret; } ret = test_add_free_space_entry(cache, SZ_2M, SZ_2M, 0); if (ret) { test_err("couldn't add extent to the cache %d", ret); return ret; } ret = btrfs_remove_free_space(cache, 3 * SZ_1M, SZ_4M); if (ret) { test_err("problem removing overlapping space %d", ret); return ret; } if (test_check_exists(cache, 3 * SZ_1M, SZ_4M)) { test_err("left something behind when removing space"); return -1; } /* * This has blown up in the past, the extent entry starts before the * bitmap entry, but we're trying to remove an offset that falls * completely within the bitmap range and is in both the extent entry * and the bitmap entry, looks like this * * [ extent ] * [ bitmap ] * [ del ] */ btrfs_remove_free_space_cache(cache); ret = test_add_free_space_entry(cache, bitmap_offset + SZ_4M, SZ_4M, 1); if (ret) { test_err("couldn't add bitmap %d", ret); return ret; } ret = test_add_free_space_entry(cache, bitmap_offset - SZ_1M, 5 * SZ_1M, 0); if (ret) { test_err("couldn't add extent entry %d", ret); return ret; } ret = btrfs_remove_free_space(cache, bitmap_offset + SZ_1M, 5 * SZ_1M); if (ret) { test_err("failed to free our space %d", ret); return ret; } if (test_check_exists(cache, bitmap_offset + SZ_1M, 5 * SZ_1M)) { test_err("left stuff over"); return -1; } btrfs_remove_free_space_cache(cache); /* * This blew up before, we have part of the free space in a bitmap and * then the entirety of the rest of the space in an extent. This used * to return -EAGAIN back from btrfs_remove_extent, make sure this * doesn't happen. */ ret = test_add_free_space_entry(cache, SZ_1M, SZ_2M, 1); if (ret) { test_err("couldn't add bitmap entry %d", ret); return ret; } ret = test_add_free_space_entry(cache, 3 * SZ_1M, SZ_1M, 0); if (ret) { test_err("couldn't add extent entry %d", ret); return ret; } ret = btrfs_remove_free_space(cache, SZ_1M, 3 * SZ_1M); if (ret) { test_err("error removing bitmap and extent overlapping %d", ret); return ret; } btrfs_remove_free_space_cache(cache); return 0; } /* Used by test_steal_space_from_bitmap_to_extent(). */ static bool test_use_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info) { return ctl->free_extents > 0; } /* Used by test_steal_space_from_bitmap_to_extent(). */ static int check_num_extents_and_bitmaps(const struct btrfs_block_group *cache, const int num_extents, const int num_bitmaps) { if (cache->free_space_ctl->free_extents != num_extents) { test_err( "incorrect # of extent entries in the cache: %d, expected %d", cache->free_space_ctl->free_extents, num_extents); return -EINVAL; } if (cache->free_space_ctl->total_bitmaps != num_bitmaps) { test_err( "incorrect # of extent entries in the cache: %d, expected %d", cache->free_space_ctl->total_bitmaps, num_bitmaps); return -EINVAL; } return 0; } /* Used by test_steal_space_from_bitmap_to_extent(). */ static int check_cache_empty(struct btrfs_block_group *cache) { u64 offset; u64 max_extent_size; /* * Now lets confirm that there's absolutely no free space left to * allocate. */ if (cache->free_space_ctl->free_space != 0) { test_err("cache free space is not 0"); return -EINVAL; } /* And any allocation request, no matter how small, should fail now. */ offset = btrfs_find_space_for_alloc(cache, 0, 4096, 0, &max_extent_size); if (offset != 0) { test_err("space allocation did not fail, returned offset: %llu", offset); return -EINVAL; } /* And no extent nor bitmap entries in the cache anymore. */ return check_num_extents_and_bitmaps(cache, 0, 0); } /* * Before we were able to steal free space from a bitmap entry to an extent * entry, we could end up with 2 entries representing a contiguous free space. * One would be an extent entry and the other a bitmap entry. Since in order * to allocate space to a caller we use only 1 entry, we couldn't return that * whole range to the caller if it was requested. This forced the caller to * either assume ENOSPC or perform several smaller space allocations, which * wasn't optimal as they could be spread all over the block group while under * concurrency (extra overhead and fragmentation). * * This stealing approach is beneficial, since we always prefer to allocate * from extent entries, both for clustered and non-clustered allocation * requests. */ static int test_steal_space_from_bitmap_to_extent(struct btrfs_block_group *cache, u32 sectorsize) { int ret; u64 offset; u64 max_extent_size; const struct btrfs_free_space_op test_free_space_ops = { .use_bitmap = test_use_bitmap, }; const struct btrfs_free_space_op *orig_free_space_ops; test_msg("running space stealing from bitmap to extent tests"); /* * For this test, we want to ensure we end up with an extent entry * immediately adjacent to a bitmap entry, where the bitmap starts * at an offset where the extent entry ends. We keep adding and * removing free space to reach into this state, but to get there * we need to reach a point where marking new free space doesn't * result in adding new extent entries or merging the new space * with existing extent entries - the space ends up being marked * in an existing bitmap that covers the new free space range. * * To get there, we need to reach the threshold defined set at * cache->free_space_ctl->extents_thresh, which currently is * 256 extents on a x86_64 system at least, and a few other * conditions (check free_space_cache.c). Instead of making the * test much longer and complicated, use a "use_bitmap" operation * that forces use of bitmaps as soon as we have at least 1 * extent entry. */ orig_free_space_ops = cache->free_space_ctl->op; cache->free_space_ctl->op = &test_free_space_ops; /* * Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[ */ ret = test_add_free_space_entry(cache, SZ_128M - SZ_256K, SZ_128K, 0); if (ret) { test_err("couldn't add extent entry %d", ret); return ret; } /* Bitmap entry covering free space range [128Mb + 512Kb, 256Mb[ */ ret = test_add_free_space_entry(cache, SZ_128M + SZ_512K, SZ_128M - SZ_512K, 1); if (ret) { test_err("couldn't add bitmap entry %d", ret); return ret; } ret = check_num_extents_and_bitmaps(cache, 2, 1); if (ret) return ret; /* * Now make only the first 256Kb of the bitmap marked as free, so that * we end up with only the following ranges marked as free space: * * [128Mb - 256Kb, 128Mb - 128Kb[ * [128Mb + 512Kb, 128Mb + 768Kb[ */ ret = btrfs_remove_free_space(cache, SZ_128M + 768 * SZ_1K, SZ_128M - 768 * SZ_1K); if (ret) { test_err("failed to free part of bitmap space %d", ret); return ret; } /* Confirm that only those 2 ranges are marked as free. */ if (!test_check_exists(cache, SZ_128M - SZ_256K, SZ_128K)) { test_err("free space range missing"); return -ENOENT; } if (!test_check_exists(cache, SZ_128M + SZ_512K, SZ_256K)) { test_err("free space range missing"); return -ENOENT; } /* * Confirm that the bitmap range [128Mb + 768Kb, 256Mb[ isn't marked * as free anymore. */ if (test_check_exists(cache, SZ_128M + 768 * SZ_1K, SZ_128M - 768 * SZ_1K)) { test_err("bitmap region not removed from space cache"); return -EINVAL; } /* * Confirm that the region [128Mb + 256Kb, 128Mb + 512Kb[, which is * covered by the bitmap, isn't marked as free. */ if (test_check_exists(cache, SZ_128M + SZ_256K, SZ_256K)) { test_err("invalid bitmap region marked as free"); return -EINVAL; } /* * Confirm that the region [128Mb, 128Mb + 256Kb[, which is covered * by the bitmap too, isn't marked as free either. */ if (test_check_exists(cache, SZ_128M, SZ_256K)) { test_err("invalid bitmap region marked as free"); return -EINVAL; } /* * Now lets mark the region [128Mb, 128Mb + 512Kb[ as free too. But, * lets make sure the free space cache marks it as free in the bitmap, * and doesn't insert a new extent entry to represent this region. */ ret = btrfs_add_free_space(cache, SZ_128M, SZ_512K); if (ret) { test_err("error adding free space: %d", ret); return ret; } /* Confirm the region is marked as free. */ if (!test_check_exists(cache, SZ_128M, SZ_512K)) { test_err("bitmap region not marked as free"); return -ENOENT; } /* * Confirm that no new extent entries or bitmap entries were added to * the cache after adding that free space region. */ ret = check_num_extents_and_bitmaps(cache, 2, 1); if (ret) return ret; /* * Now lets add a small free space region to the right of the previous * one, which is not contiguous with it and is part of the bitmap too. * The goal is to test that the bitmap entry space stealing doesn't * steal this space region. */ ret = btrfs_add_free_space(cache, SZ_128M + SZ_16M, sectorsize); if (ret) { test_err("error adding free space: %d", ret); return ret; } /* * Confirm that no new extent entries or bitmap entries were added to * the cache after adding that free space region. */ ret = check_num_extents_and_bitmaps(cache, 2, 1); if (ret) return ret; /* * Now mark the region [128Mb - 128Kb, 128Mb[ as free too. This will * expand the range covered by the existing extent entry that represents * the free space [128Mb - 256Kb, 128Mb - 128Kb[. */ ret = btrfs_add_free_space(cache, SZ_128M - SZ_128K, SZ_128K); if (ret) { test_err("error adding free space: %d", ret); return ret; } /* Confirm the region is marked as free. */ if (!test_check_exists(cache, SZ_128M - SZ_128K, SZ_128K)) { test_err("extent region not marked as free"); return -ENOENT; } /* * Confirm that our extent entry didn't stole all free space from the * bitmap, because of the small 4Kb free space region. */ ret = check_num_extents_and_bitmaps(cache, 2, 1); if (ret) return ret; /* * So now we have the range [128Mb - 256Kb, 128Mb + 768Kb[ as free * space. Without stealing bitmap free space into extent entry space, * we would have all this free space represented by 2 entries in the * cache: * * extent entry covering range: [128Mb - 256Kb, 128Mb[ * bitmap entry covering range: [128Mb, 128Mb + 768Kb[ * * Attempting to allocate the whole free space (1Mb) would fail, because * we can't allocate from multiple entries. * With the bitmap free space stealing, we get a single extent entry * that represents the 1Mb free space, and therefore we're able to * allocate the whole free space at once. */ if (!test_check_exists(cache, SZ_128M - SZ_256K, SZ_1M)) { test_err("expected region not marked as free"); return -ENOENT; } if (cache->free_space_ctl->free_space != (SZ_1M + sectorsize)) { test_err("cache free space is not 1Mb + %u", sectorsize); return -EINVAL; } offset = btrfs_find_space_for_alloc(cache, 0, SZ_1M, 0, &max_extent_size); if (offset != (SZ_128M - SZ_256K)) { test_err( "failed to allocate 1Mb from space cache, returned offset is: %llu", offset); return -EINVAL; } /* * All that remains is a sectorsize free space region in a bitmap. * Confirm. */ ret = check_num_extents_and_bitmaps(cache, 1, 1); if (ret) return ret; if (cache->free_space_ctl->free_space != sectorsize) { test_err("cache free space is not %u", sectorsize); return -EINVAL; } offset = btrfs_find_space_for_alloc(cache, 0, sectorsize, 0, &max_extent_size); if (offset != (SZ_128M + SZ_16M)) { test_err("failed to allocate %u, returned offset : %llu", sectorsize, offset); return -EINVAL; } ret = check_cache_empty(cache); if (ret) return ret; btrfs_remove_free_space_cache(cache); /* * Now test a similar scenario, but where our extent entry is located * to the right of the bitmap entry, so that we can check that stealing * space from a bitmap to the front of an extent entry works. */ /* * Extent entry covering free space range [128Mb + 128Kb, 128Mb + 256Kb[ */ ret = test_add_free_space_entry(cache, SZ_128M + SZ_128K, SZ_128K, 0); if (ret) { test_err("couldn't add extent entry %d", ret); return ret; } /* Bitmap entry covering free space range [0, 128Mb - 512Kb[ */ ret = test_add_free_space_entry(cache, 0, SZ_128M - SZ_512K, 1); if (ret) { test_err("couldn't add bitmap entry %d", ret); return ret; } ret = check_num_extents_and_bitmaps(cache, 2, 1); if (ret) return ret; /* * Now make only the last 256Kb of the bitmap marked as free, so that * we end up with only the following ranges marked as free space: * * [128Mb + 128b, 128Mb + 256Kb[ * [128Mb - 768Kb, 128Mb - 512Kb[ */ ret = btrfs_remove_free_space(cache, 0, SZ_128M - 768 * SZ_1K); if (ret) { test_err("failed to free part of bitmap space %d", ret); return ret; } /* Confirm that only those 2 ranges are marked as free. */ if (!test_check_exists(cache, SZ_128M + SZ_128K, SZ_128K)) { test_err("free space range missing"); return -ENOENT; } if (!test_check_exists(cache, SZ_128M - 768 * SZ_1K, SZ_256K)) { test_err("free space range missing"); return -ENOENT; } /* * Confirm that the bitmap range [0, 128Mb - 768Kb[ isn't marked * as free anymore. */ if (test_check_exists(cache, 0, SZ_128M - 768 * SZ_1K)) { test_err("bitmap region not removed from space cache"); return -EINVAL; } /* * Confirm that the region [128Mb - 512Kb, 128Mb[, which is * covered by the bitmap, isn't marked as free. */ if (test_check_exists(cache, SZ_128M - SZ_512K, SZ_512K)) { test_err("invalid bitmap region marked as free"); return -EINVAL; } /* * Now lets mark the region [128Mb - 512Kb, 128Mb[ as free too. But, * lets make sure the free space cache marks it as free in the bitmap, * and doesn't insert a new extent entry to represent this region. */ ret = btrfs_add_free_space(cache, SZ_128M - SZ_512K, SZ_512K); if (ret) { test_err("error adding free space: %d", ret); return ret; } /* Confirm the region is marked as free. */ if (!test_check_exists(cache, SZ_128M - SZ_512K, SZ_512K)) { test_err("bitmap region not marked as free"); return -ENOENT; } /* * Confirm that no new extent entries or bitmap entries were added to * the cache after adding that free space region. */ ret = check_num_extents_and_bitmaps(cache, 2, 1); if (ret) return ret; /* * Now lets add a small free space region to the left of the previous * one, which is not contiguous with it and is part of the bitmap too. * The goal is to test that the bitmap entry space stealing doesn't * steal this space region. */ ret = btrfs_add_free_space(cache, SZ_32M, 2 * sectorsize); if (ret) { test_err("error adding free space: %d", ret); return ret; } /* * Now mark the region [128Mb, 128Mb + 128Kb[ as free too. This will * expand the range covered by the existing extent entry that represents * the free space [128Mb + 128Kb, 128Mb + 256Kb[. */ ret = btrfs_add_free_space(cache, SZ_128M, SZ_128K); if (ret) { test_err("error adding free space: %d", ret); return ret; } /* Confirm the region is marked as free. */ if (!test_check_exists(cache, SZ_128M, SZ_128K)) { test_err("extent region not marked as free"); return -ENOENT; } /* * Confirm that our extent entry didn't stole all free space from the * bitmap, because of the small 2 * sectorsize free space region. */ ret = check_num_extents_and_bitmaps(cache, 2, 1); if (ret) return ret; /* * So now we have the range [128Mb - 768Kb, 128Mb + 256Kb[ as free * space. Without stealing bitmap free space into extent entry space, * we would have all this free space represented by 2 entries in the * cache: * * extent entry covering range: [128Mb, 128Mb + 256Kb[ * bitmap entry covering range: [128Mb - 768Kb, 128Mb[ * * Attempting to allocate the whole free space (1Mb) would fail, because * we can't allocate from multiple entries. * With the bitmap free space stealing, we get a single extent entry * that represents the 1Mb free space, and therefore we're able to * allocate the whole free space at once. */ if (!test_check_exists(cache, SZ_128M - 768 * SZ_1K, SZ_1M)) { test_err("expected region not marked as free"); return -ENOENT; } if (cache->free_space_ctl->free_space != (SZ_1M + 2 * sectorsize)) { test_err("cache free space is not 1Mb + %u", 2 * sectorsize); return -EINVAL; } offset = btrfs_find_space_for_alloc(cache, 0, SZ_1M, 0, &max_extent_size); if (offset != (SZ_128M - 768 * SZ_1K)) { test_err( "failed to allocate 1Mb from space cache, returned offset is: %llu", offset); return -EINVAL; } /* * All that remains is 2 * sectorsize free space region * in a bitmap. Confirm. */ ret = check_num_extents_and_bitmaps(cache, 1, 1); if (ret) return ret; if (cache->free_space_ctl->free_space != 2 * sectorsize) { test_err("cache free space is not %u", 2 * sectorsize); return -EINVAL; } offset = btrfs_find_space_for_alloc(cache, 0, 2 * sectorsize, 0, &max_extent_size); if (offset != SZ_32M) { test_err("failed to allocate %u, offset: %llu", 2 * sectorsize, offset); return -EINVAL; } ret = check_cache_empty(cache); if (ret) return ret; cache->free_space_ctl->op = orig_free_space_ops; btrfs_remove_free_space_cache(cache); return 0; } static bool bytes_index_use_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info) { return true; } static int test_bytes_index(struct btrfs_block_group *cache, u32 sectorsize) { const struct btrfs_free_space_op test_free_space_ops = { .use_bitmap = bytes_index_use_bitmap, }; const struct btrfs_free_space_op *orig_free_space_ops; struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; struct btrfs_free_space *entry; struct rb_node *node; u64 offset, max_extent_size, bytes; int ret, i; test_msg("running bytes index tests"); /* First just validate that it does everything in order. */ offset = 0; for (i = 0; i < 10; i++) { bytes = (i + 1) * SZ_1M; ret = test_add_free_space_entry(cache, offset, bytes, 0); if (ret) { test_err("couldn't add extent entry %d\n", ret); return ret; } offset += bytes + sectorsize; } for (node = rb_first_cached(&ctl->free_space_bytes), i = 9; node; node = rb_next(node), i--) { entry = rb_entry(node, struct btrfs_free_space, bytes_index); bytes = (i + 1) * SZ_1M; if (entry->bytes != bytes) { test_err("invalid bytes index order, found %llu expected %llu", entry->bytes, bytes); return -EINVAL; } } /* Now validate bitmaps do the correct thing. */ btrfs_remove_free_space_cache(cache); for (i = 0; i < 2; i++) { offset = i * BITS_PER_BITMAP * sectorsize; bytes = (i + 1) * SZ_1M; ret = test_add_free_space_entry(cache, offset, bytes, 1); if (ret) { test_err("couldn't add bitmap entry"); return ret; } } for (node = rb_first_cached(&ctl->free_space_bytes), i = 1; node; node = rb_next(node), i--) { entry = rb_entry(node, struct btrfs_free_space, bytes_index); bytes = (i + 1) * SZ_1M; if (entry->bytes != bytes) { test_err("invalid bytes index order, found %llu expected %llu", entry->bytes, bytes); return -EINVAL; } } /* Now validate bitmaps with different ->max_extent_size. */ btrfs_remove_free_space_cache(cache); orig_free_space_ops = cache->free_space_ctl->op; cache->free_space_ctl->op = &test_free_space_ops; ret = test_add_free_space_entry(cache, 0, sectorsize, 1); if (ret) { test_err("couldn't add bitmap entry"); return ret; } offset = BITS_PER_BITMAP * sectorsize; ret = test_add_free_space_entry(cache, offset, sectorsize, 1); if (ret) { test_err("couldn't add bitmap_entry"); return ret; } /* * Now set a bunch of sectorsize extents in the first entry so it's * ->bytes is large. */ for (i = 2; i < 20; i += 2) { offset = sectorsize * i; ret = btrfs_add_free_space(cache, offset, sectorsize); if (ret) { test_err("error populating sparse bitmap %d", ret); return ret; } } /* * Now set a contiguous extent in the second bitmap so its * ->max_extent_size is larger than the first bitmaps. */ offset = (BITS_PER_BITMAP * sectorsize) + sectorsize; ret = btrfs_add_free_space(cache, offset, sectorsize); if (ret) { test_err("error adding contiguous extent %d", ret); return ret; } /* * Since we don't set ->max_extent_size unless we search everything * should be indexed on bytes. */ entry = rb_entry(rb_first_cached(&ctl->free_space_bytes), struct btrfs_free_space, bytes_index); if (entry->bytes != (10 * sectorsize)) { test_err("error, wrong entry in the first slot in bytes_index"); return -EINVAL; } max_extent_size = 0; offset = btrfs_find_space_for_alloc(cache, cache->start, sectorsize * 3, 0, &max_extent_size); if (offset != 0) { test_err("found space to alloc even though we don't have enough space"); return -EINVAL; } if (max_extent_size != (2 * sectorsize)) { test_err("got the wrong max_extent size %llu expected %llu", max_extent_size, (unsigned long long)(2 * sectorsize)); return -EINVAL; } /* * The search should have re-arranged the bytes index to use the * ->max_extent_size, validate it's now what we expect it to be. */ entry = rb_entry(rb_first_cached(&ctl->free_space_bytes), struct btrfs_free_space, bytes_index); if (entry->bytes != (2 * sectorsize)) { test_err("error, the bytes index wasn't recalculated properly"); return -EINVAL; } /* Add another sectorsize to re-arrange the tree back to ->bytes. */ offset = (BITS_PER_BITMAP * sectorsize) - sectorsize; ret = btrfs_add_free_space(cache, offset, sectorsize); if (ret) { test_err("error adding extent to the sparse entry %d", ret); return ret; } entry = rb_entry(rb_first_cached(&ctl->free_space_bytes), struct btrfs_free_space, bytes_index); if (entry->bytes != (11 * sectorsize)) { test_err("error, wrong entry in the first slot in bytes_index"); return -EINVAL; } /* * Now make sure we find our correct entry after searching that will * result in a re-arranging of the tree. */ max_extent_size = 0; offset = btrfs_find_space_for_alloc(cache, cache->start, sectorsize * 2, 0, &max_extent_size); if (offset != (BITS_PER_BITMAP * sectorsize)) { test_err("error, found %llu instead of %llu for our alloc", offset, (unsigned long long)(BITS_PER_BITMAP * sectorsize)); return -EINVAL; } cache->free_space_ctl->op = orig_free_space_ops; btrfs_remove_free_space_cache(cache); return 0; } int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize) { struct btrfs_fs_info *fs_info; struct btrfs_block_group *cache; struct btrfs_root *root = NULL; int ret = -ENOMEM; test_msg("running btrfs free space cache tests"); fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize); if (!fs_info) { test_std_err(TEST_ALLOC_FS_INFO); return -ENOMEM; } /* * For ppc64 (with 64k page size), bytes per bitmap might be * larger than 1G. To make bitmap test available in ppc64, * alloc dummy block group whose size cross bitmaps. */ cache = btrfs_alloc_dummy_block_group(fs_info, BITS_PER_BITMAP * sectorsize + PAGE_SIZE); if (!cache) { test_std_err(TEST_ALLOC_BLOCK_GROUP); btrfs_free_dummy_fs_info(fs_info); return 0; } root = btrfs_alloc_dummy_root(fs_info); if (IS_ERR(root)) { test_std_err(TEST_ALLOC_ROOT); ret = PTR_ERR(root); goto out; } root->root_key.objectid = BTRFS_EXTENT_TREE_OBJECTID; root->root_key.type = BTRFS_ROOT_ITEM_KEY; root->root_key.offset = 0; btrfs_global_root_insert(root); ret = test_extents(cache); if (ret) goto out; ret = test_bitmaps(cache, sectorsize); if (ret) goto out; ret = test_bitmaps_and_extents(cache, sectorsize); if (ret) goto out; ret = test_steal_space_from_bitmap_to_extent(cache, sectorsize); if (ret) goto out; ret = test_bytes_index(cache, sectorsize); out: btrfs_free_dummy_block_group(cache); btrfs_free_dummy_root(root); btrfs_free_dummy_fs_info(fs_info); return ret; }
linux-master
fs/btrfs/tests/free-space-tests.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2015 Facebook. All rights reserved. */ #include <linux/types.h> #include "btrfs-tests.h" #include "../ctree.h" #include "../disk-io.h" #include "../free-space-tree.h" #include "../transaction.h" #include "../block-group.h" #include "../accessors.h" struct free_space_extent { u64 start; u64 length; }; static int __check_free_space_extents(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, struct btrfs_block_group *cache, struct btrfs_path *path, const struct free_space_extent * const extents, unsigned int num_extents) { struct btrfs_free_space_info *info; struct btrfs_key key; int prev_bit = 0, bit; u64 extent_start = 0, offset, end; u32 flags, extent_count; unsigned int i; int ret; info = search_free_space_info(trans, cache, path, 0); if (IS_ERR(info)) { test_err("could not find free space info"); ret = PTR_ERR(info); goto out; } flags = btrfs_free_space_flags(path->nodes[0], info); extent_count = btrfs_free_space_extent_count(path->nodes[0], info); if (extent_count != num_extents) { test_err("extent count is wrong"); ret = -EINVAL; goto out; } if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) { if (path->slots[0] != 0) goto invalid; end = cache->start + cache->length; i = 0; while (++path->slots[0] < btrfs_header_nritems(path->nodes[0])) { btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.type != BTRFS_FREE_SPACE_BITMAP_KEY) goto invalid; offset = key.objectid; while (offset < key.objectid + key.offset) { bit = free_space_test_bit(cache, path, offset); if (prev_bit == 0 && bit == 1) { extent_start = offset; } else if (prev_bit == 1 && bit == 0) { if (i >= num_extents || extent_start != extents[i].start || offset - extent_start != extents[i].length) goto invalid; i++; } prev_bit = bit; offset += fs_info->sectorsize; } } if (prev_bit == 1) { if (i >= num_extents || extent_start != extents[i].start || end - extent_start != extents[i].length) goto invalid; i++; } if (i != num_extents) goto invalid; } else { if (btrfs_header_nritems(path->nodes[0]) != num_extents + 1 || path->slots[0] != 0) goto invalid; for (i = 0; i < num_extents; i++) { path->slots[0]++; btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.type != BTRFS_FREE_SPACE_EXTENT_KEY || key.objectid != extents[i].start || key.offset != extents[i].length) goto invalid; } } ret = 0; out: btrfs_release_path(path); return ret; invalid: test_err("free space tree is invalid"); ret = -EINVAL; goto out; } static int check_free_space_extents(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, struct btrfs_block_group *cache, struct btrfs_path *path, const struct free_space_extent * const extents, unsigned int num_extents) { struct btrfs_free_space_info *info; u32 flags; int ret; info = search_free_space_info(trans, cache, path, 0); if (IS_ERR(info)) { test_err("could not find free space info"); btrfs_release_path(path); return PTR_ERR(info); } flags = btrfs_free_space_flags(path->nodes[0], info); btrfs_release_path(path); ret = __check_free_space_extents(trans, fs_info, cache, path, extents, num_extents); if (ret) return ret; /* Flip it to the other format and check that for good measure. */ if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) { ret = convert_free_space_to_extents(trans, cache, path); if (ret) { test_err("could not convert to extents"); return ret; } } else { ret = convert_free_space_to_bitmaps(trans, cache, path); if (ret) { test_err("could not convert to bitmaps"); return ret; } } return __check_free_space_extents(trans, fs_info, cache, path, extents, num_extents); } static int test_empty_block_group(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { const struct free_space_extent extents[] = { {cache->start, cache->length}, }; return check_free_space_extents(trans, fs_info, cache, path, extents, ARRAY_SIZE(extents)); } static int test_remove_all(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { const struct free_space_extent extents[] = {}; int ret; ret = __remove_from_free_space_tree(trans, cache, path, cache->start, cache->length); if (ret) { test_err("could not remove free space"); return ret; } return check_free_space_extents(trans, fs_info, cache, path, extents, ARRAY_SIZE(extents)); } static int test_remove_beginning(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { const struct free_space_extent extents[] = { {cache->start + alignment, cache->length - alignment}, }; int ret; ret = __remove_from_free_space_tree(trans, cache, path, cache->start, alignment); if (ret) { test_err("could not remove free space"); return ret; } return check_free_space_extents(trans, fs_info, cache, path, extents, ARRAY_SIZE(extents)); } static int test_remove_end(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { const struct free_space_extent extents[] = { {cache->start, cache->length - alignment}, }; int ret; ret = __remove_from_free_space_tree(trans, cache, path, cache->start + cache->length - alignment, alignment); if (ret) { test_err("could not remove free space"); return ret; } return check_free_space_extents(trans, fs_info, cache, path, extents, ARRAY_SIZE(extents)); } static int test_remove_middle(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { const struct free_space_extent extents[] = { {cache->start, alignment}, {cache->start + 2 * alignment, cache->length - 2 * alignment}, }; int ret; ret = __remove_from_free_space_tree(trans, cache, path, cache->start + alignment, alignment); if (ret) { test_err("could not remove free space"); return ret; } return check_free_space_extents(trans, fs_info, cache, path, extents, ARRAY_SIZE(extents)); } static int test_merge_left(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { const struct free_space_extent extents[] = { {cache->start, 2 * alignment}, }; int ret; ret = __remove_from_free_space_tree(trans, cache, path, cache->start, cache->length); if (ret) { test_err("could not remove free space"); return ret; } ret = __add_to_free_space_tree(trans, cache, path, cache->start, alignment); if (ret) { test_err("could not add free space"); return ret; } ret = __add_to_free_space_tree(trans, cache, path, cache->start + alignment, alignment); if (ret) { test_err("could not add free space"); return ret; } return check_free_space_extents(trans, fs_info, cache, path, extents, ARRAY_SIZE(extents)); } static int test_merge_right(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { const struct free_space_extent extents[] = { {cache->start + alignment, 2 * alignment}, }; int ret; ret = __remove_from_free_space_tree(trans, cache, path, cache->start, cache->length); if (ret) { test_err("could not remove free space"); return ret; } ret = __add_to_free_space_tree(trans, cache, path, cache->start + 2 * alignment, alignment); if (ret) { test_err("could not add free space"); return ret; } ret = __add_to_free_space_tree(trans, cache, path, cache->start + alignment, alignment); if (ret) { test_err("could not add free space"); return ret; } return check_free_space_extents(trans, fs_info, cache, path, extents, ARRAY_SIZE(extents)); } static int test_merge_both(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { const struct free_space_extent extents[] = { {cache->start, 3 * alignment}, }; int ret; ret = __remove_from_free_space_tree(trans, cache, path, cache->start, cache->length); if (ret) { test_err("could not remove free space"); return ret; } ret = __add_to_free_space_tree(trans, cache, path, cache->start, alignment); if (ret) { test_err("could not add free space"); return ret; } ret = __add_to_free_space_tree(trans, cache, path, cache->start + 2 * alignment, alignment); if (ret) { test_err("could not add free space"); return ret; } ret = __add_to_free_space_tree(trans, cache, path, cache->start + alignment, alignment); if (ret) { test_err("could not add free space"); return ret; } return check_free_space_extents(trans, fs_info, cache, path, extents, ARRAY_SIZE(extents)); } static int test_merge_none(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, struct btrfs_block_group *cache, struct btrfs_path *path, u32 alignment) { const struct free_space_extent extents[] = { {cache->start, alignment}, {cache->start + 2 * alignment, alignment}, {cache->start + 4 * alignment, alignment}, }; int ret; ret = __remove_from_free_space_tree(trans, cache, path, cache->start, cache->length); if (ret) { test_err("could not remove free space"); return ret; } ret = __add_to_free_space_tree(trans, cache, path, cache->start, alignment); if (ret) { test_err("could not add free space"); return ret; } ret = __add_to_free_space_tree(trans, cache, path, cache->start + 4 * alignment, alignment); if (ret) { test_err("could not add free space"); return ret; } ret = __add_to_free_space_tree(trans, cache, path, cache->start + 2 * alignment, alignment); if (ret) { test_err("could not add free space"); return ret; } return check_free_space_extents(trans, fs_info, cache, path, extents, ARRAY_SIZE(extents)); } typedef int (*test_func_t)(struct btrfs_trans_handle *, struct btrfs_fs_info *, struct btrfs_block_group *, struct btrfs_path *, u32 alignment); static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize, u32 nodesize, u32 alignment) { struct btrfs_fs_info *fs_info; struct btrfs_root *root = NULL; struct btrfs_block_group *cache = NULL; struct btrfs_trans_handle trans; struct btrfs_path *path = NULL; int ret; fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize); if (!fs_info) { test_std_err(TEST_ALLOC_FS_INFO); ret = -ENOMEM; goto out; } root = btrfs_alloc_dummy_root(fs_info); if (IS_ERR(root)) { test_std_err(TEST_ALLOC_ROOT); ret = PTR_ERR(root); goto out; } btrfs_set_super_compat_ro_flags(root->fs_info->super_copy, BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE); root->root_key.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID; root->root_key.type = BTRFS_ROOT_ITEM_KEY; root->root_key.offset = 0; btrfs_global_root_insert(root); root->fs_info->tree_root = root; root->node = alloc_test_extent_buffer(root->fs_info, nodesize); if (IS_ERR(root->node)) { test_std_err(TEST_ALLOC_EXTENT_BUFFER); ret = PTR_ERR(root->node); goto out; } btrfs_set_header_level(root->node, 0); btrfs_set_header_nritems(root->node, 0); root->alloc_bytenr += 2 * nodesize; cache = btrfs_alloc_dummy_block_group(fs_info, 8 * alignment); if (!cache) { test_std_err(TEST_ALLOC_BLOCK_GROUP); ret = -ENOMEM; goto out; } cache->bitmap_low_thresh = 0; cache->bitmap_high_thresh = (u32)-1; set_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &cache->runtime_flags); cache->fs_info = root->fs_info; btrfs_init_dummy_trans(&trans, root->fs_info); path = btrfs_alloc_path(); if (!path) { test_std_err(TEST_ALLOC_ROOT); ret = -ENOMEM; goto out; } ret = add_block_group_free_space(&trans, cache); if (ret) { test_err("could not add block group free space"); goto out; } if (bitmaps) { ret = convert_free_space_to_bitmaps(&trans, cache, path); if (ret) { test_err("could not convert block group to bitmaps"); goto out; } } ret = test_func(&trans, root->fs_info, cache, path, alignment); if (ret) goto out; ret = remove_block_group_free_space(&trans, cache); if (ret) { test_err("could not remove block group free space"); goto out; } if (btrfs_header_nritems(root->node) != 0) { test_err("free space tree has leftover items"); ret = -EINVAL; goto out; } ret = 0; out: btrfs_free_path(path); btrfs_free_dummy_block_group(cache); btrfs_free_dummy_root(root); btrfs_free_dummy_fs_info(fs_info); return ret; } static int run_test_both_formats(test_func_t test_func, u32 sectorsize, u32 nodesize, u32 alignment) { int test_ret = 0; int ret; ret = run_test(test_func, 0, sectorsize, nodesize, alignment); if (ret) { test_err( "%ps failed with extents, sectorsize=%u, nodesize=%u, alignment=%u", test_func, sectorsize, nodesize, alignment); test_ret = ret; } ret = run_test(test_func, 1, sectorsize, nodesize, alignment); if (ret) { test_err( "%ps failed with bitmaps, sectorsize=%u, nodesize=%u, alignment=%u", test_func, sectorsize, nodesize, alignment); test_ret = ret; } return test_ret; } int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize) { test_func_t tests[] = { test_empty_block_group, test_remove_all, test_remove_beginning, test_remove_end, test_remove_middle, test_merge_left, test_merge_right, test_merge_both, test_merge_none, }; u32 bitmap_alignment; int test_ret = 0; int i; /* * Align some operations to a page to flush out bugs in the extent * buffer bitmap handling of highmem. */ bitmap_alignment = BTRFS_FREE_SPACE_BITMAP_BITS * PAGE_SIZE; test_msg("running free space tree tests"); for (i = 0; i < ARRAY_SIZE(tests); i++) { int ret; ret = run_test_both_formats(tests[i], sectorsize, nodesize, sectorsize); if (ret) test_ret = ret; ret = run_test_both_formats(tests[i], sectorsize, nodesize, bitmap_alignment); if (ret) test_ret = ret; } return test_ret; }
linux-master
fs/btrfs/tests/free-space-tree-tests.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2017 Oracle. All rights reserved. */ #include <linux/types.h> #include "btrfs-tests.h" #include "../ctree.h" #include "../btrfs_inode.h" #include "../volumes.h" #include "../disk-io.h" #include "../block-group.h" static void free_extent_map_tree(struct extent_map_tree *em_tree) { struct extent_map *em; struct rb_node *node; write_lock(&em_tree->lock); while (!RB_EMPTY_ROOT(&em_tree->map.rb_root)) { node = rb_first_cached(&em_tree->map); em = rb_entry(node, struct extent_map, rb_node); remove_extent_mapping(em_tree, em); #ifdef CONFIG_BTRFS_DEBUG if (refcount_read(&em->refs) != 1) { test_err( "em leak: em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx) refs %d", em->start, em->len, em->block_start, em->block_len, refcount_read(&em->refs)); refcount_set(&em->refs, 1); } #endif free_extent_map(em); } write_unlock(&em_tree->lock); } /* * Test scenario: * * Suppose that no extent map has been loaded into memory yet, there is a file * extent [0, 16K), followed by another file extent [16K, 20K), two dio reads * are entering btrfs_get_extent() concurrently, t1 is reading [8K, 16K), t2 is * reading [0, 8K) * * t1 t2 * btrfs_get_extent() btrfs_get_extent() * -> lookup_extent_mapping() ->lookup_extent_mapping() * -> add_extent_mapping(0, 16K) * -> return em * ->add_extent_mapping(0, 16K) * -> #handle -EEXIST */ static int test_case_1(struct btrfs_fs_info *fs_info, struct extent_map_tree *em_tree) { struct extent_map *em; u64 start = 0; u64 len = SZ_8K; int ret; em = alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); return -ENOMEM; } /* Add [0, 16K) */ em->start = 0; em->len = SZ_16K; em->block_start = 0; em->block_len = SZ_16K; write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em, 0); write_unlock(&em_tree->lock); if (ret < 0) { test_err("cannot add extent range [0, 16K)"); goto out; } free_extent_map(em); /* Add [16K, 20K) following [0, 16K) */ em = alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); ret = -ENOMEM; goto out; } em->start = SZ_16K; em->len = SZ_4K; em->block_start = SZ_32K; /* avoid merging */ em->block_len = SZ_4K; write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em, 0); write_unlock(&em_tree->lock); if (ret < 0) { test_err("cannot add extent range [16K, 20K)"); goto out; } free_extent_map(em); em = alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); ret = -ENOMEM; goto out; } /* Add [0, 8K), should return [0, 16K) instead. */ em->start = start; em->len = len; em->block_start = start; em->block_len = len; write_lock(&em_tree->lock); ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len); write_unlock(&em_tree->lock); if (ret) { test_err("case1 [%llu %llu]: ret %d", start, start + len, ret); goto out; } if (em && (em->start != 0 || extent_map_end(em) != SZ_16K || em->block_start != 0 || em->block_len != SZ_16K)) { test_err( "case1 [%llu %llu]: ret %d return a wrong em (start %llu len %llu block_start %llu block_len %llu", start, start + len, ret, em->start, em->len, em->block_start, em->block_len); ret = -EINVAL; } free_extent_map(em); out: free_extent_map_tree(em_tree); return ret; } /* * Test scenario: * * Reading the inline ending up with EEXIST, ie. read an inline * extent and discard page cache and read it again. */ static int test_case_2(struct btrfs_fs_info *fs_info, struct extent_map_tree *em_tree) { struct extent_map *em; int ret; em = alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); return -ENOMEM; } /* Add [0, 1K) */ em->start = 0; em->len = SZ_1K; em->block_start = EXTENT_MAP_INLINE; em->block_len = (u64)-1; write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em, 0); write_unlock(&em_tree->lock); if (ret < 0) { test_err("cannot add extent range [0, 1K)"); goto out; } free_extent_map(em); /* Add [4K, 8K) following [0, 1K) */ em = alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); ret = -ENOMEM; goto out; } em->start = SZ_4K; em->len = SZ_4K; em->block_start = SZ_4K; em->block_len = SZ_4K; write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em, 0); write_unlock(&em_tree->lock); if (ret < 0) { test_err("cannot add extent range [4K, 8K)"); goto out; } free_extent_map(em); em = alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); ret = -ENOMEM; goto out; } /* Add [0, 1K) */ em->start = 0; em->len = SZ_1K; em->block_start = EXTENT_MAP_INLINE; em->block_len = (u64)-1; write_lock(&em_tree->lock); ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len); write_unlock(&em_tree->lock); if (ret) { test_err("case2 [0 1K]: ret %d", ret); goto out; } if (em && (em->start != 0 || extent_map_end(em) != SZ_1K || em->block_start != EXTENT_MAP_INLINE || em->block_len != (u64)-1)) { test_err( "case2 [0 1K]: ret %d return a wrong em (start %llu len %llu block_start %llu block_len %llu", ret, em->start, em->len, em->block_start, em->block_len); ret = -EINVAL; } free_extent_map(em); out: free_extent_map_tree(em_tree); return ret; } static int __test_case_3(struct btrfs_fs_info *fs_info, struct extent_map_tree *em_tree, u64 start) { struct extent_map *em; u64 len = SZ_4K; int ret; em = alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); return -ENOMEM; } /* Add [4K, 8K) */ em->start = SZ_4K; em->len = SZ_4K; em->block_start = SZ_4K; em->block_len = SZ_4K; write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em, 0); write_unlock(&em_tree->lock); if (ret < 0) { test_err("cannot add extent range [4K, 8K)"); goto out; } free_extent_map(em); em = alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); ret = -ENOMEM; goto out; } /* Add [0, 16K) */ em->start = 0; em->len = SZ_16K; em->block_start = 0; em->block_len = SZ_16K; write_lock(&em_tree->lock); ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len); write_unlock(&em_tree->lock); if (ret) { test_err("case3 [0x%llx 0x%llx): ret %d", start, start + len, ret); goto out; } /* * Since bytes within em are contiguous, em->block_start is identical to * em->start. */ if (em && (start < em->start || start + len > extent_map_end(em) || em->start != em->block_start || em->len != em->block_len)) { test_err( "case3 [0x%llx 0x%llx): ret %d em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx)", start, start + len, ret, em->start, em->len, em->block_start, em->block_len); ret = -EINVAL; } free_extent_map(em); out: free_extent_map_tree(em_tree); return ret; } /* * Test scenario: * * Suppose that no extent map has been loaded into memory yet. * There is a file extent [0, 16K), two jobs are running concurrently * against it, t1 is buffered writing to [4K, 8K) and t2 is doing dio * read from [0, 4K) or [8K, 12K) or [12K, 16K). * * t1 goes ahead of t2 and adds em [4K, 8K) into tree. * * t1 t2 * cow_file_range() btrfs_get_extent() * -> lookup_extent_mapping() * -> add_extent_mapping() * -> add_extent_mapping() */ static int test_case_3(struct btrfs_fs_info *fs_info, struct extent_map_tree *em_tree) { int ret; ret = __test_case_3(fs_info, em_tree, 0); if (ret) return ret; ret = __test_case_3(fs_info, em_tree, SZ_8K); if (ret) return ret; ret = __test_case_3(fs_info, em_tree, (12 * SZ_1K)); return ret; } static int __test_case_4(struct btrfs_fs_info *fs_info, struct extent_map_tree *em_tree, u64 start) { struct extent_map *em; u64 len = SZ_4K; int ret; em = alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); return -ENOMEM; } /* Add [0K, 8K) */ em->start = 0; em->len = SZ_8K; em->block_start = 0; em->block_len = SZ_8K; write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em, 0); write_unlock(&em_tree->lock); if (ret < 0) { test_err("cannot add extent range [0, 8K)"); goto out; } free_extent_map(em); em = alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); ret = -ENOMEM; goto out; } /* Add [8K, 32K) */ em->start = SZ_8K; em->len = 24 * SZ_1K; em->block_start = SZ_16K; /* avoid merging */ em->block_len = 24 * SZ_1K; write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em, 0); write_unlock(&em_tree->lock); if (ret < 0) { test_err("cannot add extent range [8K, 32K)"); goto out; } free_extent_map(em); em = alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); ret = -ENOMEM; goto out; } /* Add [0K, 32K) */ em->start = 0; em->len = SZ_32K; em->block_start = 0; em->block_len = SZ_32K; write_lock(&em_tree->lock); ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len); write_unlock(&em_tree->lock); if (ret) { test_err("case4 [0x%llx 0x%llx): ret %d", start, len, ret); goto out; } if (em && (start < em->start || start + len > extent_map_end(em))) { test_err( "case4 [0x%llx 0x%llx): ret %d, added wrong em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx)", start, len, ret, em->start, em->len, em->block_start, em->block_len); ret = -EINVAL; } free_extent_map(em); out: free_extent_map_tree(em_tree); return ret; } /* * Test scenario: * * Suppose that no extent map has been loaded into memory yet. * There is a file extent [0, 32K), two jobs are running concurrently * against it, t1 is doing dio write to [8K, 32K) and t2 is doing dio * read from [0, 4K) or [4K, 8K). * * t1 goes ahead of t2 and splits em [0, 32K) to em [0K, 8K) and [8K 32K). * * t1 t2 * btrfs_get_blocks_direct() btrfs_get_blocks_direct() * -> btrfs_get_extent() -> btrfs_get_extent() * -> lookup_extent_mapping() * -> add_extent_mapping() -> lookup_extent_mapping() * # load [0, 32K) * -> btrfs_new_extent_direct() * -> btrfs_drop_extent_cache() * # split [0, 32K) * -> add_extent_mapping() * # add [8K, 32K) * -> add_extent_mapping() * # handle -EEXIST when adding * # [0, 32K) */ static int test_case_4(struct btrfs_fs_info *fs_info, struct extent_map_tree *em_tree) { int ret; ret = __test_case_4(fs_info, em_tree, 0); if (ret) return ret; ret = __test_case_4(fs_info, em_tree, SZ_4K); return ret; } static int add_compressed_extent(struct extent_map_tree *em_tree, u64 start, u64 len, u64 block_start) { struct extent_map *em; int ret; em = alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); return -ENOMEM; } em->start = start; em->len = len; em->block_start = block_start; em->block_len = SZ_4K; set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em, 0); write_unlock(&em_tree->lock); free_extent_map(em); if (ret < 0) { test_err("cannot add extent map [%llu, %llu)", start, start + len); return ret; } return 0; } struct extent_range { u64 start; u64 len; }; /* The valid states of the tree after every drop, as described below. */ struct extent_range valid_ranges[][7] = { { { .start = 0, .len = SZ_8K }, /* [0, 8K) */ { .start = SZ_4K * 3, .len = SZ_4K * 3}, /* [12k, 24k) */ { .start = SZ_4K * 6, .len = SZ_4K * 3}, /* [24k, 36k) */ { .start = SZ_32K + SZ_4K, .len = SZ_4K}, /* [36k, 40k) */ { .start = SZ_4K * 10, .len = SZ_4K * 6}, /* [40k, 64k) */ }, { { .start = 0, .len = SZ_8K }, /* [0, 8K) */ { .start = SZ_4K * 5, .len = SZ_4K}, /* [20k, 24k) */ { .start = SZ_4K * 6, .len = SZ_4K * 3}, /* [24k, 36k) */ { .start = SZ_32K + SZ_4K, .len = SZ_4K}, /* [36k, 40k) */ { .start = SZ_4K * 10, .len = SZ_4K * 6}, /* [40k, 64k) */ }, { { .start = 0, .len = SZ_8K }, /* [0, 8K) */ { .start = SZ_4K * 5, .len = SZ_4K}, /* [20k, 24k) */ { .start = SZ_4K * 6, .len = SZ_4K}, /* [24k, 28k) */ { .start = SZ_32K, .len = SZ_4K}, /* [32k, 36k) */ { .start = SZ_32K + SZ_4K, .len = SZ_4K}, /* [36k, 40k) */ { .start = SZ_4K * 10, .len = SZ_4K * 6}, /* [40k, 64k) */ }, { { .start = 0, .len = SZ_8K}, /* [0, 8K) */ { .start = SZ_4K * 5, .len = SZ_4K}, /* [20k, 24k) */ { .start = SZ_4K * 6, .len = SZ_4K}, /* [24k, 28k) */ } }; static int validate_range(struct extent_map_tree *em_tree, int index) { struct rb_node *n; int i; for (i = 0, n = rb_first_cached(&em_tree->map); valid_ranges[index][i].len && n; i++, n = rb_next(n)) { struct extent_map *entry = rb_entry(n, struct extent_map, rb_node); if (entry->start != valid_ranges[index][i].start) { test_err("mapping has start %llu expected %llu", entry->start, valid_ranges[index][i].start); return -EINVAL; } if (entry->len != valid_ranges[index][i].len) { test_err("mapping has len %llu expected %llu", entry->len, valid_ranges[index][i].len); return -EINVAL; } } /* * We exited because we don't have any more entries in the extent_map * but we still expect more valid entries. */ if (valid_ranges[index][i].len) { test_err("missing an entry"); return -EINVAL; } /* We exited the loop but still have entries in the extent map. */ if (n) { test_err("we have a left over entry in the extent map we didn't expect"); return -EINVAL; } return 0; } /* * Test scenario: * * Test the various edge cases of btrfs_drop_extent_map_range, create the * following ranges * * [0, 12k)[12k, 24k)[24k, 36k)[36k, 40k)[40k,64k) * * And then we'll drop: * * [8k, 12k) - test the single front split * [12k, 20k) - test the single back split * [28k, 32k) - test the double split * [32k, 64k) - test whole em dropping * * They'll have the EXTENT_FLAG_COMPRESSED flag set to keep the em tree from * merging the em's. */ static int test_case_5(void) { struct extent_map_tree *em_tree; struct inode *inode; u64 start, end; int ret; test_msg("Running btrfs_drop_extent_map_range tests"); inode = btrfs_new_test_inode(); if (!inode) { test_std_err(TEST_ALLOC_INODE); return -ENOMEM; } em_tree = &BTRFS_I(inode)->extent_tree; /* [0, 12k) */ ret = add_compressed_extent(em_tree, 0, SZ_4K * 3, 0); if (ret) { test_err("cannot add extent range [0, 12K)"); goto out; } /* [12k, 24k) */ ret = add_compressed_extent(em_tree, SZ_4K * 3, SZ_4K * 3, SZ_4K); if (ret) { test_err("cannot add extent range [12k, 24k)"); goto out; } /* [24k, 36k) */ ret = add_compressed_extent(em_tree, SZ_4K * 6, SZ_4K * 3, SZ_8K); if (ret) { test_err("cannot add extent range [12k, 24k)"); goto out; } /* [36k, 40k) */ ret = add_compressed_extent(em_tree, SZ_32K + SZ_4K, SZ_4K, SZ_4K * 3); if (ret) { test_err("cannot add extent range [12k, 24k)"); goto out; } /* [40k, 64k) */ ret = add_compressed_extent(em_tree, SZ_4K * 10, SZ_4K * 6, SZ_16K); if (ret) { test_err("cannot add extent range [12k, 24k)"); goto out; } /* Drop [8k, 12k) */ start = SZ_8K; end = (3 * SZ_4K) - 1; btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, false); ret = validate_range(&BTRFS_I(inode)->extent_tree, 0); if (ret) goto out; /* Drop [12k, 20k) */ start = SZ_4K * 3; end = SZ_16K + SZ_4K - 1; btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, false); ret = validate_range(&BTRFS_I(inode)->extent_tree, 1); if (ret) goto out; /* Drop [28k, 32k) */ start = SZ_32K - SZ_4K; end = SZ_32K - 1; btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, false); ret = validate_range(&BTRFS_I(inode)->extent_tree, 2); if (ret) goto out; /* Drop [32k, 64k) */ start = SZ_32K; end = SZ_64K - 1; btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, false); ret = validate_range(&BTRFS_I(inode)->extent_tree, 3); if (ret) goto out; out: iput(inode); return ret; } /* * Test the btrfs_add_extent_mapping helper which will attempt to create an em * for areas between two existing ems. Validate it doesn't do this when there * are two unmerged em's side by side. */ static int test_case_6(struct btrfs_fs_info *fs_info, struct extent_map_tree *em_tree) { struct extent_map *em = NULL; int ret; ret = add_compressed_extent(em_tree, 0, SZ_4K, 0); if (ret) goto out; ret = add_compressed_extent(em_tree, SZ_4K, SZ_4K, 0); if (ret) goto out; em = alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); return -ENOMEM; } em->start = SZ_4K; em->len = SZ_4K; em->block_start = SZ_16K; em->block_len = SZ_16K; write_lock(&em_tree->lock); ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, 0, SZ_8K); write_unlock(&em_tree->lock); if (ret != 0) { test_err("got an error when adding our em: %d", ret); goto out; } ret = -EINVAL; if (em->start != 0) { test_err("unexpected em->start at %llu, wanted 0", em->start); goto out; } if (em->len != SZ_4K) { test_err("unexpected em->len %llu, expected 4K", em->len); goto out; } ret = 0; out: free_extent_map(em); free_extent_map_tree(em_tree); return ret; } /* * Regression test for btrfs_drop_extent_map_range. Calling with skip_pinned == * true would mess up the start/end calculations and subsequent splits would be * incorrect. */ static int test_case_7(void) { struct extent_map_tree *em_tree; struct extent_map *em; struct inode *inode; int ret; test_msg("Running btrfs_drop_extent_cache with pinned"); inode = btrfs_new_test_inode(); if (!inode) { test_std_err(TEST_ALLOC_INODE); return -ENOMEM; } em_tree = &BTRFS_I(inode)->extent_tree; em = alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); ret = -ENOMEM; goto out; } /* [0, 16K), pinned */ em->start = 0; em->len = SZ_16K; em->block_start = 0; em->block_len = SZ_4K; set_bit(EXTENT_FLAG_PINNED, &em->flags); write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em, 0); write_unlock(&em_tree->lock); if (ret < 0) { test_err("couldn't add extent map"); goto out; } free_extent_map(em); em = alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); ret = -ENOMEM; goto out; } /* [32K, 48K), not pinned */ em->start = SZ_32K; em->len = SZ_16K; em->block_start = SZ_32K; em->block_len = SZ_16K; write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em, 0); write_unlock(&em_tree->lock); if (ret < 0) { test_err("couldn't add extent map"); goto out; } free_extent_map(em); /* * Drop [0, 36K) This should skip the [0, 4K) extent and then split the * [32K, 48K) extent. */ btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (36 * SZ_1K) - 1, true); /* Make sure our extent maps look sane. */ ret = -EINVAL; em = lookup_extent_mapping(em_tree, 0, SZ_16K); if (!em) { test_err("didn't find an em at 0 as expected"); goto out; } if (em->start != 0) { test_err("em->start is %llu, expected 0", em->start); goto out; } if (em->len != SZ_16K) { test_err("em->len is %llu, expected 16K", em->len); goto out; } free_extent_map(em); read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, SZ_16K, SZ_16K); read_unlock(&em_tree->lock); if (em) { test_err("found an em when we weren't expecting one"); goto out; } read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, SZ_32K, SZ_16K); read_unlock(&em_tree->lock); if (!em) { test_err("didn't find an em at 32K as expected"); goto out; } if (em->start != (36 * SZ_1K)) { test_err("em->start is %llu, expected 36K", em->start); goto out; } if (em->len != (12 * SZ_1K)) { test_err("em->len is %llu, expected 12K", em->len); goto out; } free_extent_map(em); read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, 48 * SZ_1K, (u64)-1); read_unlock(&em_tree->lock); if (em) { test_err("found an unexpected em above 48K"); goto out; } ret = 0; out: free_extent_map(em); iput(inode); return ret; } struct rmap_test_vector { u64 raid_type; u64 physical_start; u64 data_stripe_size; u64 num_data_stripes; u64 num_stripes; /* Assume we won't have more than 5 physical stripes */ u64 data_stripe_phys_start[5]; bool expected_mapped_addr; /* Physical to logical addresses */ u64 mapped_logical[5]; }; static int test_rmap_block(struct btrfs_fs_info *fs_info, struct rmap_test_vector *test) { struct extent_map *em; struct map_lookup *map = NULL; u64 *logical = NULL; int i, out_ndaddrs, out_stripe_len; int ret; em = alloc_extent_map(); if (!em) { test_std_err(TEST_ALLOC_EXTENT_MAP); return -ENOMEM; } map = kmalloc(map_lookup_size(test->num_stripes), GFP_KERNEL); if (!map) { kfree(em); test_std_err(TEST_ALLOC_EXTENT_MAP); return -ENOMEM; } set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); /* Start at 4GiB logical address */ em->start = SZ_4G; em->len = test->data_stripe_size * test->num_data_stripes; em->block_len = em->len; em->orig_block_len = test->data_stripe_size; em->map_lookup = map; map->num_stripes = test->num_stripes; map->type = test->raid_type; for (i = 0; i < map->num_stripes; i++) { struct btrfs_device *dev = btrfs_alloc_dummy_device(fs_info); if (IS_ERR(dev)) { test_err("cannot allocate device"); ret = PTR_ERR(dev); goto out; } map->stripes[i].dev = dev; map->stripes[i].physical = test->data_stripe_phys_start[i]; } write_lock(&fs_info->mapping_tree.lock); ret = add_extent_mapping(&fs_info->mapping_tree, em, 0); write_unlock(&fs_info->mapping_tree.lock); if (ret) { test_err("error adding block group mapping to mapping tree"); goto out_free; } ret = btrfs_rmap_block(fs_info, em->start, btrfs_sb_offset(1), &logical, &out_ndaddrs, &out_stripe_len); if (ret || (out_ndaddrs == 0 && test->expected_mapped_addr)) { test_err("didn't rmap anything but expected %d", test->expected_mapped_addr); goto out; } if (out_stripe_len != BTRFS_STRIPE_LEN) { test_err("calculated stripe length doesn't match"); goto out; } if (out_ndaddrs != test->expected_mapped_addr) { for (i = 0; i < out_ndaddrs; i++) test_msg("mapped %llu", logical[i]); test_err("unexpected number of mapped addresses: %d", out_ndaddrs); goto out; } for (i = 0; i < out_ndaddrs; i++) { if (logical[i] != test->mapped_logical[i]) { test_err("unexpected logical address mapped"); goto out; } } ret = 0; out: write_lock(&fs_info->mapping_tree.lock); remove_extent_mapping(&fs_info->mapping_tree, em); write_unlock(&fs_info->mapping_tree.lock); /* For us */ free_extent_map(em); out_free: /* For the tree */ free_extent_map(em); kfree(logical); return ret; } int btrfs_test_extent_map(void) { struct btrfs_fs_info *fs_info = NULL; struct extent_map_tree *em_tree; int ret = 0, i; struct rmap_test_vector rmap_tests[] = { { /* * Test a chunk with 2 data stripes one of which * intersects the physical address of the super block * is correctly recognised. */ .raid_type = BTRFS_BLOCK_GROUP_RAID1, .physical_start = SZ_64M - SZ_4M, .data_stripe_size = SZ_256M, .num_data_stripes = 2, .num_stripes = 2, .data_stripe_phys_start = {SZ_64M - SZ_4M, SZ_64M - SZ_4M + SZ_256M}, .expected_mapped_addr = true, .mapped_logical= {SZ_4G + SZ_4M} }, { /* * Test that out-of-range physical addresses are * ignored */ /* SINGLE chunk type */ .raid_type = 0, .physical_start = SZ_4G, .data_stripe_size = SZ_256M, .num_data_stripes = 1, .num_stripes = 1, .data_stripe_phys_start = {SZ_256M}, .expected_mapped_addr = false, .mapped_logical = {0} } }; test_msg("running extent_map tests"); /* * Note: the fs_info is not set up completely, we only need * fs_info::fsid for the tracepoint. */ fs_info = btrfs_alloc_dummy_fs_info(PAGE_SIZE, PAGE_SIZE); if (!fs_info) { test_std_err(TEST_ALLOC_FS_INFO); return -ENOMEM; } em_tree = kzalloc(sizeof(*em_tree), GFP_KERNEL); if (!em_tree) { ret = -ENOMEM; goto out; } extent_map_tree_init(em_tree); ret = test_case_1(fs_info, em_tree); if (ret) goto out; ret = test_case_2(fs_info, em_tree); if (ret) goto out; ret = test_case_3(fs_info, em_tree); if (ret) goto out; ret = test_case_4(fs_info, em_tree); if (ret) goto out; ret = test_case_5(); if (ret) goto out; ret = test_case_6(fs_info, em_tree); if (ret) goto out; ret = test_case_7(); if (ret) goto out; test_msg("running rmap tests"); for (i = 0; i < ARRAY_SIZE(rmap_tests); i++) { ret = test_rmap_block(fs_info, &rmap_tests[i]); if (ret) goto out; } out: kfree(em_tree); btrfs_free_dummy_fs_info(fs_info); return ret; }
linux-master
fs/btrfs/tests/extent-map-tests.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2013 Fusion IO. All rights reserved. */ #include <linux/slab.h> #include "btrfs-tests.h" #include "../ctree.h" #include "../extent_io.h" #include "../disk-io.h" #include "../accessors.h" static int test_btrfs_split_item(u32 sectorsize, u32 nodesize) { struct btrfs_fs_info *fs_info; struct btrfs_path *path = NULL; struct btrfs_root *root = NULL; struct extent_buffer *eb; char *value = "mary had a little lamb"; char *split1 = "mary had a little"; char *split2 = " lamb"; char *split3 = "mary"; char *split4 = " had a little"; char buf[32]; struct btrfs_key key; u32 value_len = strlen(value); int ret = 0; test_msg("running btrfs_split_item tests"); fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize); if (!fs_info) { test_std_err(TEST_ALLOC_FS_INFO); return -ENOMEM; } root = btrfs_alloc_dummy_root(fs_info); if (IS_ERR(root)) { test_std_err(TEST_ALLOC_ROOT); ret = PTR_ERR(root); goto out; } path = btrfs_alloc_path(); if (!path) { test_std_err(TEST_ALLOC_PATH); ret = -ENOMEM; goto out; } eb = alloc_dummy_extent_buffer(fs_info, nodesize); path->nodes[0] = eb; if (!eb) { test_std_err(TEST_ALLOC_EXTENT_BUFFER); ret = -ENOMEM; goto out; } path->slots[0] = 0; key.objectid = 0; key.type = BTRFS_EXTENT_CSUM_KEY; key.offset = 0; btrfs_setup_item_for_insert(root, path, &key, value_len); write_extent_buffer(eb, value, btrfs_item_ptr_offset(eb, 0), value_len); key.offset = 3; /* * Passing NULL trans here should be safe because we have plenty of * space in this leaf to split the item without having to split the * leaf. */ ret = btrfs_split_item(NULL, root, path, &key, 17); if (ret) { test_err("split item failed %d", ret); goto out; } /* * Read the first slot, it should have the original key and contain only * 'mary had a little' */ btrfs_item_key_to_cpu(eb, &key, 0); if (key.objectid != 0 || key.type != BTRFS_EXTENT_CSUM_KEY || key.offset != 0) { test_err("invalid key at slot 0"); ret = -EINVAL; goto out; } if (btrfs_item_size(eb, 0) != strlen(split1)) { test_err("invalid len in the first split"); ret = -EINVAL; goto out; } read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 0), strlen(split1)); if (memcmp(buf, split1, strlen(split1))) { test_err( "data in the buffer doesn't match what it should in the first split have='%.*s' want '%s'", (int)strlen(split1), buf, split1); ret = -EINVAL; goto out; } btrfs_item_key_to_cpu(eb, &key, 1); if (key.objectid != 0 || key.type != BTRFS_EXTENT_CSUM_KEY || key.offset != 3) { test_err("invalid key at slot 1"); ret = -EINVAL; goto out; } if (btrfs_item_size(eb, 1) != strlen(split2)) { test_err("invalid len in the second split"); ret = -EINVAL; goto out; } read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 1), strlen(split2)); if (memcmp(buf, split2, strlen(split2))) { test_err( "data in the buffer doesn't match what it should in the second split"); ret = -EINVAL; goto out; } key.offset = 1; /* Do it again so we test memmoving the other items in the leaf */ ret = btrfs_split_item(NULL, root, path, &key, 4); if (ret) { test_err("second split item failed %d", ret); goto out; } btrfs_item_key_to_cpu(eb, &key, 0); if (key.objectid != 0 || key.type != BTRFS_EXTENT_CSUM_KEY || key.offset != 0) { test_err("invalid key at slot 0"); ret = -EINVAL; goto out; } if (btrfs_item_size(eb, 0) != strlen(split3)) { test_err("invalid len in the first split"); ret = -EINVAL; goto out; } read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 0), strlen(split3)); if (memcmp(buf, split3, strlen(split3))) { test_err( "data in the buffer doesn't match what it should in the third split"); ret = -EINVAL; goto out; } btrfs_item_key_to_cpu(eb, &key, 1); if (key.objectid != 0 || key.type != BTRFS_EXTENT_CSUM_KEY || key.offset != 1) { test_err("invalid key at slot 1"); ret = -EINVAL; goto out; } if (btrfs_item_size(eb, 1) != strlen(split4)) { test_err("invalid len in the second split"); ret = -EINVAL; goto out; } read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 1), strlen(split4)); if (memcmp(buf, split4, strlen(split4))) { test_err( "data in the buffer doesn't match what it should in the fourth split"); ret = -EINVAL; goto out; } btrfs_item_key_to_cpu(eb, &key, 2); if (key.objectid != 0 || key.type != BTRFS_EXTENT_CSUM_KEY || key.offset != 3) { test_err("invalid key at slot 2"); ret = -EINVAL; goto out; } if (btrfs_item_size(eb, 2) != strlen(split2)) { test_err("invalid len in the second split"); ret = -EINVAL; goto out; } read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 2), strlen(split2)); if (memcmp(buf, split2, strlen(split2))) { test_err( "data in the buffer doesn't match what it should in the last chunk"); ret = -EINVAL; goto out; } out: btrfs_free_path(path); btrfs_free_dummy_root(root); btrfs_free_dummy_fs_info(fs_info); return ret; } int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize) { test_msg("running extent buffer operation tests"); return test_btrfs_split_item(sectorsize, nodesize); }
linux-master
fs/btrfs/tests/extent-buffer-tests.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2013 Facebook. All rights reserved. */ #include <linux/types.h> #include "btrfs-tests.h" #include "../ctree.h" #include "../transaction.h" #include "../disk-io.h" #include "../qgroup.h" #include "../backref.h" #include "../fs.h" #include "../accessors.h" static int insert_normal_tree_ref(struct btrfs_root *root, u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid) { struct btrfs_trans_handle trans; struct btrfs_extent_item *item; struct btrfs_extent_inline_ref *iref; struct btrfs_tree_block_info *block_info; struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_key ins; u32 size = sizeof(*item) + sizeof(*iref) + sizeof(*block_info); int ret; btrfs_init_dummy_trans(&trans, NULL); ins.objectid = bytenr; ins.type = BTRFS_EXTENT_ITEM_KEY; ins.offset = num_bytes; path = btrfs_alloc_path(); if (!path) { test_std_err(TEST_ALLOC_ROOT); return -ENOMEM; } ret = btrfs_insert_empty_item(&trans, root, path, &ins, size); if (ret) { test_err("couldn't insert ref %d", ret); btrfs_free_path(path); return ret; } leaf = path->nodes[0]; item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); btrfs_set_extent_refs(leaf, item, 1); btrfs_set_extent_generation(leaf, item, 1); btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_TREE_BLOCK); block_info = (struct btrfs_tree_block_info *)(item + 1); btrfs_set_tree_block_level(leaf, block_info, 0); iref = (struct btrfs_extent_inline_ref *)(block_info + 1); if (parent > 0) { btrfs_set_extent_inline_ref_type(leaf, iref, BTRFS_SHARED_BLOCK_REF_KEY); btrfs_set_extent_inline_ref_offset(leaf, iref, parent); } else { btrfs_set_extent_inline_ref_type(leaf, iref, BTRFS_TREE_BLOCK_REF_KEY); btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid); } btrfs_free_path(path); return 0; } static int add_tree_ref(struct btrfs_root *root, u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid) { struct btrfs_trans_handle trans; struct btrfs_extent_item *item; struct btrfs_path *path; struct btrfs_key key; u64 refs; int ret; btrfs_init_dummy_trans(&trans, NULL); key.objectid = bytenr; key.type = BTRFS_EXTENT_ITEM_KEY; key.offset = num_bytes; path = btrfs_alloc_path(); if (!path) { test_std_err(TEST_ALLOC_ROOT); return -ENOMEM; } ret = btrfs_search_slot(&trans, root, &key, path, 0, 1); if (ret) { test_err("couldn't find extent ref"); btrfs_free_path(path); return ret; } item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item); refs = btrfs_extent_refs(path->nodes[0], item); btrfs_set_extent_refs(path->nodes[0], item, refs + 1); btrfs_release_path(path); key.objectid = bytenr; if (parent) { key.type = BTRFS_SHARED_BLOCK_REF_KEY; key.offset = parent; } else { key.type = BTRFS_TREE_BLOCK_REF_KEY; key.offset = root_objectid; } ret = btrfs_insert_empty_item(&trans, root, path, &key, 0); if (ret) test_err("failed to insert backref"); btrfs_free_path(path); return ret; } static int remove_extent_item(struct btrfs_root *root, u64 bytenr, u64 num_bytes) { struct btrfs_trans_handle trans; struct btrfs_key key; struct btrfs_path *path; int ret; btrfs_init_dummy_trans(&trans, NULL); key.objectid = bytenr; key.type = BTRFS_EXTENT_ITEM_KEY; key.offset = num_bytes; path = btrfs_alloc_path(); if (!path) { test_std_err(TEST_ALLOC_ROOT); return -ENOMEM; } ret = btrfs_search_slot(&trans, root, &key, path, -1, 1); if (ret) { test_err("didn't find our key %d", ret); btrfs_free_path(path); return ret; } btrfs_del_item(&trans, root, path); btrfs_free_path(path); return 0; } static int remove_extent_ref(struct btrfs_root *root, u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid) { struct btrfs_trans_handle trans; struct btrfs_extent_item *item; struct btrfs_path *path; struct btrfs_key key; u64 refs; int ret; btrfs_init_dummy_trans(&trans, NULL); key.objectid = bytenr; key.type = BTRFS_EXTENT_ITEM_KEY; key.offset = num_bytes; path = btrfs_alloc_path(); if (!path) { test_std_err(TEST_ALLOC_ROOT); return -ENOMEM; } ret = btrfs_search_slot(&trans, root, &key, path, 0, 1); if (ret) { test_err("couldn't find extent ref"); btrfs_free_path(path); return ret; } item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item); refs = btrfs_extent_refs(path->nodes[0], item); btrfs_set_extent_refs(path->nodes[0], item, refs - 1); btrfs_release_path(path); key.objectid = bytenr; if (parent) { key.type = BTRFS_SHARED_BLOCK_REF_KEY; key.offset = parent; } else { key.type = BTRFS_TREE_BLOCK_REF_KEY; key.offset = root_objectid; } ret = btrfs_search_slot(&trans, root, &key, path, -1, 1); if (ret) { test_err("couldn't find backref %d", ret); btrfs_free_path(path); return ret; } btrfs_del_item(&trans, root, path); btrfs_free_path(path); return ret; } static int test_no_shared_qgroup(struct btrfs_root *root, u32 sectorsize, u32 nodesize) { struct btrfs_backref_walk_ctx ctx = { 0 }; struct btrfs_trans_handle trans; struct btrfs_fs_info *fs_info = root->fs_info; struct ulist *old_roots = NULL; struct ulist *new_roots = NULL; int ret; btrfs_init_dummy_trans(&trans, fs_info); test_msg("running qgroup add/remove tests"); ret = btrfs_create_qgroup(&trans, BTRFS_FS_TREE_OBJECTID); if (ret) { test_err("couldn't create a qgroup %d", ret); return ret; } ctx.bytenr = nodesize; ctx.trans = &trans; ctx.fs_info = fs_info; /* * Since the test trans doesn't have the complicated delayed refs, * we can only call btrfs_qgroup_account_extent() directly to test * quota. */ ret = btrfs_find_all_roots(&ctx, false); if (ret) { test_err("couldn't find old roots: %d", ret); return ret; } old_roots = ctx.roots; ctx.roots = NULL; ret = insert_normal_tree_ref(root, nodesize, nodesize, 0, BTRFS_FS_TREE_OBJECTID); if (ret) { ulist_free(old_roots); return ret; } ret = btrfs_find_all_roots(&ctx, false); if (ret) { ulist_free(old_roots); test_err("couldn't find old roots: %d", ret); return ret; } new_roots = ctx.roots; ctx.roots = NULL; ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots, new_roots); if (ret) { test_err("couldn't account space for a qgroup %d", ret); return ret; } /* btrfs_qgroup_account_extent() always frees the ulists passed to it. */ old_roots = NULL; new_roots = NULL; if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID, nodesize, nodesize)) { test_err("qgroup counts didn't match expected values"); return -EINVAL; } ret = btrfs_find_all_roots(&ctx, false); if (ret) { test_err("couldn't find old roots: %d", ret); return ret; } old_roots = ctx.roots; ctx.roots = NULL; ret = remove_extent_item(root, nodesize, nodesize); if (ret) { ulist_free(old_roots); return -EINVAL; } ret = btrfs_find_all_roots(&ctx, false); if (ret) { ulist_free(old_roots); test_err("couldn't find old roots: %d", ret); return ret; } new_roots = ctx.roots; ctx.roots = NULL; ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots, new_roots); if (ret) { test_err("couldn't account space for a qgroup %d", ret); return -EINVAL; } if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID, 0, 0)) { test_err("qgroup counts didn't match expected values"); return -EINVAL; } return 0; } /* * Add a ref for two different roots to make sure the shared value comes out * right, also remove one of the roots and make sure the exclusive count is * adjusted properly. */ static int test_multiple_refs(struct btrfs_root *root, u32 sectorsize, u32 nodesize) { struct btrfs_backref_walk_ctx ctx = { 0 }; struct btrfs_trans_handle trans; struct btrfs_fs_info *fs_info = root->fs_info; struct ulist *old_roots = NULL; struct ulist *new_roots = NULL; int ret; btrfs_init_dummy_trans(&trans, fs_info); test_msg("running qgroup multiple refs test"); /* * We have BTRFS_FS_TREE_OBJECTID created already from the * previous test. */ ret = btrfs_create_qgroup(&trans, BTRFS_FIRST_FREE_OBJECTID); if (ret) { test_err("couldn't create a qgroup %d", ret); return ret; } ctx.bytenr = nodesize; ctx.trans = &trans; ctx.fs_info = fs_info; ret = btrfs_find_all_roots(&ctx, false); if (ret) { test_err("couldn't find old roots: %d", ret); return ret; } old_roots = ctx.roots; ctx.roots = NULL; ret = insert_normal_tree_ref(root, nodesize, nodesize, 0, BTRFS_FS_TREE_OBJECTID); if (ret) { ulist_free(old_roots); return ret; } ret = btrfs_find_all_roots(&ctx, false); if (ret) { ulist_free(old_roots); test_err("couldn't find old roots: %d", ret); return ret; } new_roots = ctx.roots; ctx.roots = NULL; ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots, new_roots); if (ret) { test_err("couldn't account space for a qgroup %d", ret); return ret; } if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID, nodesize, nodesize)) { test_err("qgroup counts didn't match expected values"); return -EINVAL; } ret = btrfs_find_all_roots(&ctx, false); if (ret) { test_err("couldn't find old roots: %d", ret); return ret; } old_roots = ctx.roots; ctx.roots = NULL; ret = add_tree_ref(root, nodesize, nodesize, 0, BTRFS_FIRST_FREE_OBJECTID); if (ret) { ulist_free(old_roots); return ret; } ret = btrfs_find_all_roots(&ctx, false); if (ret) { ulist_free(old_roots); test_err("couldn't find old roots: %d", ret); return ret; } new_roots = ctx.roots; ctx.roots = NULL; ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots, new_roots); if (ret) { test_err("couldn't account space for a qgroup %d", ret); return ret; } if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID, nodesize, 0)) { test_err("qgroup counts didn't match expected values"); return -EINVAL; } if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FIRST_FREE_OBJECTID, nodesize, 0)) { test_err("qgroup counts didn't match expected values"); return -EINVAL; } ret = btrfs_find_all_roots(&ctx, false); if (ret) { test_err("couldn't find old roots: %d", ret); return ret; } old_roots = ctx.roots; ctx.roots = NULL; ret = remove_extent_ref(root, nodesize, nodesize, 0, BTRFS_FIRST_FREE_OBJECTID); if (ret) { ulist_free(old_roots); return ret; } ret = btrfs_find_all_roots(&ctx, false); if (ret) { ulist_free(old_roots); test_err("couldn't find old roots: %d", ret); return ret; } new_roots = ctx.roots; ctx.roots = NULL; ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots, new_roots); if (ret) { test_err("couldn't account space for a qgroup %d", ret); return ret; } if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FIRST_FREE_OBJECTID, 0, 0)) { test_err("qgroup counts didn't match expected values"); return -EINVAL; } if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID, nodesize, nodesize)) { test_err("qgroup counts didn't match expected values"); return -EINVAL; } return 0; } int btrfs_test_qgroups(u32 sectorsize, u32 nodesize) { struct btrfs_fs_info *fs_info = NULL; struct btrfs_root *root; struct btrfs_root *tmp_root; int ret = 0; fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize); if (!fs_info) { test_std_err(TEST_ALLOC_FS_INFO); return -ENOMEM; } root = btrfs_alloc_dummy_root(fs_info); if (IS_ERR(root)) { test_std_err(TEST_ALLOC_ROOT); ret = PTR_ERR(root); goto out; } /* We are using this root as our extent root */ root->root_key.objectid = BTRFS_EXTENT_TREE_OBJECTID; root->root_key.type = BTRFS_ROOT_ITEM_KEY; root->root_key.offset = 0; btrfs_global_root_insert(root); /* * Some of the paths we test assume we have a filled out fs_info, so we * just need to add the root in there so we don't panic. */ root->fs_info->tree_root = root; root->fs_info->quota_root = root; set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); /* * Can't use bytenr 0, some things freak out * *cough*backref walking code*cough* */ root->node = alloc_test_extent_buffer(root->fs_info, nodesize); if (IS_ERR(root->node)) { test_err("couldn't allocate dummy buffer"); ret = PTR_ERR(root->node); goto out; } btrfs_set_header_level(root->node, 0); btrfs_set_header_nritems(root->node, 0); root->alloc_bytenr += 2 * nodesize; tmp_root = btrfs_alloc_dummy_root(fs_info); if (IS_ERR(tmp_root)) { test_std_err(TEST_ALLOC_ROOT); ret = PTR_ERR(tmp_root); goto out; } tmp_root->root_key.objectid = BTRFS_FS_TREE_OBJECTID; root->fs_info->fs_root = tmp_root; ret = btrfs_insert_fs_root(root->fs_info, tmp_root); if (ret) { test_err("couldn't insert fs root %d", ret); goto out; } btrfs_put_root(tmp_root); tmp_root = btrfs_alloc_dummy_root(fs_info); if (IS_ERR(tmp_root)) { test_std_err(TEST_ALLOC_ROOT); ret = PTR_ERR(tmp_root); goto out; } tmp_root->root_key.objectid = BTRFS_FIRST_FREE_OBJECTID; ret = btrfs_insert_fs_root(root->fs_info, tmp_root); if (ret) { test_err("couldn't insert fs root %d", ret); goto out; } btrfs_put_root(tmp_root); test_msg("running qgroup tests"); ret = test_no_shared_qgroup(root, sectorsize, nodesize); if (ret) goto out; ret = test_multiple_refs(root, sectorsize, nodesize); out: btrfs_free_dummy_root(root); btrfs_free_dummy_fs_info(fs_info); return ret; }
linux-master
fs/btrfs/tests/qgroup-tests.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2013 Fusion IO. All rights reserved. */ #include <linux/types.h> #include "btrfs-tests.h" #include "../ctree.h" #include "../btrfs_inode.h" #include "../disk-io.h" #include "../extent_io.h" #include "../volumes.h" #include "../compression.h" #include "../accessors.h" static void insert_extent(struct btrfs_root *root, u64 start, u64 len, u64 ram_bytes, u64 offset, u64 disk_bytenr, u64 disk_len, u32 type, u8 compression, int slot) { struct btrfs_path path; struct btrfs_file_extent_item *fi; struct extent_buffer *leaf = root->node; struct btrfs_key key; u32 value_len = sizeof(struct btrfs_file_extent_item); if (type == BTRFS_FILE_EXTENT_INLINE) value_len += len; memset(&path, 0, sizeof(path)); path.nodes[0] = leaf; path.slots[0] = slot; key.objectid = BTRFS_FIRST_FREE_OBJECTID; key.type = BTRFS_EXTENT_DATA_KEY; key.offset = start; btrfs_setup_item_for_insert(root, &path, &key, value_len); fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); btrfs_set_file_extent_generation(leaf, fi, 1); btrfs_set_file_extent_type(leaf, fi, type); btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr); btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_len); btrfs_set_file_extent_offset(leaf, fi, offset); btrfs_set_file_extent_num_bytes(leaf, fi, len); btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes); btrfs_set_file_extent_compression(leaf, fi, compression); btrfs_set_file_extent_encryption(leaf, fi, 0); btrfs_set_file_extent_other_encoding(leaf, fi, 0); } static void insert_inode_item_key(struct btrfs_root *root) { struct btrfs_path path; struct extent_buffer *leaf = root->node; struct btrfs_key key; u32 value_len = 0; memset(&path, 0, sizeof(path)); path.nodes[0] = leaf; path.slots[0] = 0; key.objectid = BTRFS_INODE_ITEM_KEY; key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; btrfs_setup_item_for_insert(root, &path, &key, value_len); } /* * Build the most complicated map of extents the earth has ever seen. We want * this so we can test all of the corner cases of btrfs_get_extent. Here is a * diagram of how the extents will look though this may not be possible we still * want to make sure everything acts normally (the last number is not inclusive) * * [0 - 6][ 6 - 4096 ][ 4096 - 4100][4100 - 8195][8195 - 12291] * [inline][hole but no extent][ hole ][ regular ][regular1 split] * * [12291 - 16387][16387 - 24579][24579 - 28675][ 28675 - 32771][32771 - 36867 ] * [ hole ][regular1 split][ prealloc ][ prealloc1 ][prealloc1 written] * * [36867 - 45059][45059 - 53251][53251 - 57347][57347 - 61443][61443- 69635] * [ prealloc1 ][ compressed ][ compressed1 ][ regular ][ compressed1] * * [69635-73731][ 73731 - 86019 ][86019-90115] * [ regular ][ hole but no extent][ regular ] */ static void setup_file_extents(struct btrfs_root *root, u32 sectorsize) { int slot = 0; u64 disk_bytenr = SZ_1M; u64 offset = 0; /* * Tree-checker has strict limits on inline extents that they can only * exist at file offset 0, thus we can only have one inline file extent * at most. */ insert_extent(root, offset, 6, 6, 0, 0, 0, BTRFS_FILE_EXTENT_INLINE, 0, slot); slot++; offset = sectorsize; /* Now another hole */ insert_extent(root, offset, 4, 4, 0, 0, 0, BTRFS_FILE_EXTENT_REG, 0, slot); slot++; offset += 4; /* Now for a regular extent */ insert_extent(root, offset, sectorsize - 1, sectorsize - 1, 0, disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot); slot++; disk_bytenr += sectorsize; offset += sectorsize - 1; /* * Now for 3 extents that were split from a hole punch so we test * offsets properly. */ insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr, 4 * sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot); slot++; offset += sectorsize; insert_extent(root, offset, sectorsize, sectorsize, 0, 0, 0, BTRFS_FILE_EXTENT_REG, 0, slot); slot++; offset += sectorsize; insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize, 2 * sectorsize, disk_bytenr, 4 * sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot); slot++; offset += 2 * sectorsize; disk_bytenr += 4 * sectorsize; /* Now for a unwritten prealloc extent */ insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_PREALLOC, 0, slot); slot++; offset += sectorsize; /* * We want to jack up disk_bytenr a little more so the em stuff doesn't * merge our records. */ disk_bytenr += 2 * sectorsize; /* * Now for a partially written prealloc extent, basically the same as * the hole punch example above. Ram_bytes never changes when you mark * extents written btw. */ insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr, 4 * sectorsize, BTRFS_FILE_EXTENT_PREALLOC, 0, slot); slot++; offset += sectorsize; insert_extent(root, offset, sectorsize, 4 * sectorsize, sectorsize, disk_bytenr, 4 * sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot); slot++; offset += sectorsize; insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize, 2 * sectorsize, disk_bytenr, 4 * sectorsize, BTRFS_FILE_EXTENT_PREALLOC, 0, slot); slot++; offset += 2 * sectorsize; disk_bytenr += 4 * sectorsize; /* Now a normal compressed extent */ insert_extent(root, offset, 2 * sectorsize, 2 * sectorsize, 0, disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot); slot++; offset += 2 * sectorsize; /* No merges */ disk_bytenr += 2 * sectorsize; /* Now a split compressed extent */ insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot); slot++; offset += sectorsize; insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr + sectorsize, sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot); slot++; offset += sectorsize; insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize, 2 * sectorsize, disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot); slot++; offset += 2 * sectorsize; disk_bytenr += 2 * sectorsize; /* Now extents that have a hole but no hole extent */ insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot); slot++; offset += 4 * sectorsize; disk_bytenr += sectorsize; insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot); } static unsigned long prealloc_only = 0; static unsigned long compressed_only = 0; static unsigned long vacancy_only = 0; static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) { struct btrfs_fs_info *fs_info = NULL; struct inode *inode = NULL; struct btrfs_root *root = NULL; struct extent_map *em = NULL; u64 orig_start; u64 disk_bytenr; u64 offset; int ret = -ENOMEM; test_msg("running btrfs_get_extent tests"); inode = btrfs_new_test_inode(); if (!inode) { test_std_err(TEST_ALLOC_INODE); return ret; } fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize); if (!fs_info) { test_std_err(TEST_ALLOC_FS_INFO); goto out; } root = btrfs_alloc_dummy_root(fs_info); if (IS_ERR(root)) { test_std_err(TEST_ALLOC_ROOT); goto out; } root->node = alloc_dummy_extent_buffer(fs_info, nodesize); if (!root->node) { test_std_err(TEST_ALLOC_ROOT); goto out; } btrfs_set_header_nritems(root->node, 0); btrfs_set_header_level(root->node, 0); ret = -EINVAL; /* First with no extents */ BTRFS_I(inode)->root = root; em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, sectorsize); if (IS_ERR(em)) { em = NULL; test_err("got an error when we shouldn't have"); goto out; } if (em->block_start != EXTENT_MAP_HOLE) { test_err("expected a hole, got %llu", em->block_start); goto out; } free_extent_map(em); btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false); /* * All of the magic numbers are based on the mapping setup in * setup_file_extents, so if you change anything there you need to * update the comment and update the expected values below. */ setup_file_extents(root, sectorsize); em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, (u64)-1); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; } if (em->block_start != EXTENT_MAP_INLINE) { test_err("expected an inline, got %llu", em->block_start); goto out; } /* * For inline extent, we always round up the em to sectorsize, as * they are either: * * a) a hidden hole * The range will be zeroed at inline extent read time. * * b) a file extent with unaligned bytenr * Tree checker will reject it. */ if (em->start != 0 || em->len != sectorsize) { test_err( "unexpected extent wanted start 0 len %u, got start %llu len %llu", sectorsize, em->start, em->len); goto out; } if (em->flags != 0) { test_err("unexpected flags set, want 0 have %lu", em->flags); goto out; } /* * We don't test anything else for inline since it doesn't get set * unless we have a page for it to write into. Maybe we should change * this? */ offset = em->start + em->len; free_extent_map(em); em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; } if (em->block_start != EXTENT_MAP_HOLE) { test_err("expected a hole, got %llu", em->block_start); goto out; } if (em->start != offset || em->len != 4) { test_err( "unexpected extent wanted start %llu len 4, got start %llu len %llu", offset, em->start, em->len); goto out; } if (em->flags != 0) { test_err("unexpected flags set, want 0 have %lu", em->flags); goto out; } offset = em->start + em->len; free_extent_map(em); /* Regular extent */ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; } if (em->block_start >= EXTENT_MAP_LAST_BYTE) { test_err("expected a real extent, got %llu", em->block_start); goto out; } if (em->start != offset || em->len != sectorsize - 1) { test_err( "unexpected extent wanted start %llu len 4095, got start %llu len %llu", offset, em->start, em->len); goto out; } if (em->flags != 0) { test_err("unexpected flags set, want 0 have %lu", em->flags); goto out; } if (em->orig_start != em->start) { test_err("wrong orig offset, want %llu, have %llu", em->start, em->orig_start); goto out; } offset = em->start + em->len; free_extent_map(em); /* The next 3 are split extents */ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; } if (em->block_start >= EXTENT_MAP_LAST_BYTE) { test_err("expected a real extent, got %llu", em->block_start); goto out; } if (em->start != offset || em->len != sectorsize) { test_err( "unexpected extent start %llu len %u, got start %llu len %llu", offset, sectorsize, em->start, em->len); goto out; } if (em->flags != 0) { test_err("unexpected flags set, want 0 have %lu", em->flags); goto out; } if (em->orig_start != em->start) { test_err("wrong orig offset, want %llu, have %llu", em->start, em->orig_start); goto out; } disk_bytenr = em->block_start; orig_start = em->start; offset = em->start + em->len; free_extent_map(em); em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; } if (em->block_start != EXTENT_MAP_HOLE) { test_err("expected a hole, got %llu", em->block_start); goto out; } if (em->start != offset || em->len != sectorsize) { test_err( "unexpected extent wanted start %llu len %u, got start %llu len %llu", offset, sectorsize, em->start, em->len); goto out; } if (em->flags != 0) { test_err("unexpected flags set, want 0 have %lu", em->flags); goto out; } offset = em->start + em->len; free_extent_map(em); em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; } if (em->block_start >= EXTENT_MAP_LAST_BYTE) { test_err("expected a real extent, got %llu", em->block_start); goto out; } if (em->start != offset || em->len != 2 * sectorsize) { test_err( "unexpected extent wanted start %llu len %u, got start %llu len %llu", offset, 2 * sectorsize, em->start, em->len); goto out; } if (em->flags != 0) { test_err("unexpected flags set, want 0 have %lu", em->flags); goto out; } if (em->orig_start != orig_start) { test_err("wrong orig offset, want %llu, have %llu", orig_start, em->orig_start); goto out; } disk_bytenr += (em->start - orig_start); if (em->block_start != disk_bytenr) { test_err("wrong block start, want %llu, have %llu", disk_bytenr, em->block_start); goto out; } offset = em->start + em->len; free_extent_map(em); /* Prealloc extent */ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; } if (em->block_start >= EXTENT_MAP_LAST_BYTE) { test_err("expected a real extent, got %llu", em->block_start); goto out; } if (em->start != offset || em->len != sectorsize) { test_err( "unexpected extent wanted start %llu len %u, got start %llu len %llu", offset, sectorsize, em->start, em->len); goto out; } if (em->flags != prealloc_only) { test_err("unexpected flags set, want %lu have %lu", prealloc_only, em->flags); goto out; } if (em->orig_start != em->start) { test_err("wrong orig offset, want %llu, have %llu", em->start, em->orig_start); goto out; } offset = em->start + em->len; free_extent_map(em); /* The next 3 are a half written prealloc extent */ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; } if (em->block_start >= EXTENT_MAP_LAST_BYTE) { test_err("expected a real extent, got %llu", em->block_start); goto out; } if (em->start != offset || em->len != sectorsize) { test_err( "unexpected extent wanted start %llu len %u, got start %llu len %llu", offset, sectorsize, em->start, em->len); goto out; } if (em->flags != prealloc_only) { test_err("unexpected flags set, want %lu have %lu", prealloc_only, em->flags); goto out; } if (em->orig_start != em->start) { test_err("wrong orig offset, want %llu, have %llu", em->start, em->orig_start); goto out; } disk_bytenr = em->block_start; orig_start = em->start; offset = em->start + em->len; free_extent_map(em); em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; } if (em->block_start >= EXTENT_MAP_HOLE) { test_err("expected a real extent, got %llu", em->block_start); goto out; } if (em->start != offset || em->len != sectorsize) { test_err( "unexpected extent wanted start %llu len %u, got start %llu len %llu", offset, sectorsize, em->start, em->len); goto out; } if (em->flags != 0) { test_err("unexpected flags set, want 0 have %lu", em->flags); goto out; } if (em->orig_start != orig_start) { test_err("unexpected orig offset, wanted %llu, have %llu", orig_start, em->orig_start); goto out; } if (em->block_start != (disk_bytenr + (em->start - em->orig_start))) { test_err("unexpected block start, wanted %llu, have %llu", disk_bytenr + (em->start - em->orig_start), em->block_start); goto out; } offset = em->start + em->len; free_extent_map(em); em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; } if (em->block_start >= EXTENT_MAP_LAST_BYTE) { test_err("expected a real extent, got %llu", em->block_start); goto out; } if (em->start != offset || em->len != 2 * sectorsize) { test_err( "unexpected extent wanted start %llu len %u, got start %llu len %llu", offset, 2 * sectorsize, em->start, em->len); goto out; } if (em->flags != prealloc_only) { test_err("unexpected flags set, want %lu have %lu", prealloc_only, em->flags); goto out; } if (em->orig_start != orig_start) { test_err("wrong orig offset, want %llu, have %llu", orig_start, em->orig_start); goto out; } if (em->block_start != (disk_bytenr + (em->start - em->orig_start))) { test_err("unexpected block start, wanted %llu, have %llu", disk_bytenr + (em->start - em->orig_start), em->block_start); goto out; } offset = em->start + em->len; free_extent_map(em); /* Now for the compressed extent */ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; } if (em->block_start >= EXTENT_MAP_LAST_BYTE) { test_err("expected a real extent, got %llu", em->block_start); goto out; } if (em->start != offset || em->len != 2 * sectorsize) { test_err( "unexpected extent wanted start %llu len %u, got start %llu len %llu", offset, 2 * sectorsize, em->start, em->len); goto out; } if (em->flags != compressed_only) { test_err("unexpected flags set, want %lu have %lu", compressed_only, em->flags); goto out; } if (em->orig_start != em->start) { test_err("wrong orig offset, want %llu, have %llu", em->start, em->orig_start); goto out; } if (em->compress_type != BTRFS_COMPRESS_ZLIB) { test_err("unexpected compress type, wanted %d, got %d", BTRFS_COMPRESS_ZLIB, em->compress_type); goto out; } offset = em->start + em->len; free_extent_map(em); /* Split compressed extent */ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; } if (em->block_start >= EXTENT_MAP_LAST_BYTE) { test_err("expected a real extent, got %llu", em->block_start); goto out; } if (em->start != offset || em->len != sectorsize) { test_err( "unexpected extent wanted start %llu len %u, got start %llu len %llu", offset, sectorsize, em->start, em->len); goto out; } if (em->flags != compressed_only) { test_err("unexpected flags set, want %lu have %lu", compressed_only, em->flags); goto out; } if (em->orig_start != em->start) { test_err("wrong orig offset, want %llu, have %llu", em->start, em->orig_start); goto out; } if (em->compress_type != BTRFS_COMPRESS_ZLIB) { test_err("unexpected compress type, wanted %d, got %d", BTRFS_COMPRESS_ZLIB, em->compress_type); goto out; } disk_bytenr = em->block_start; orig_start = em->start; offset = em->start + em->len; free_extent_map(em); em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; } if (em->block_start >= EXTENT_MAP_LAST_BYTE) { test_err("expected a real extent, got %llu", em->block_start); goto out; } if (em->start != offset || em->len != sectorsize) { test_err( "unexpected extent wanted start %llu len %u, got start %llu len %llu", offset, sectorsize, em->start, em->len); goto out; } if (em->flags != 0) { test_err("unexpected flags set, want 0 have %lu", em->flags); goto out; } if (em->orig_start != em->start) { test_err("wrong orig offset, want %llu, have %llu", em->start, em->orig_start); goto out; } offset = em->start + em->len; free_extent_map(em); em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; } if (em->block_start != disk_bytenr) { test_err("block start does not match, want %llu got %llu", disk_bytenr, em->block_start); goto out; } if (em->start != offset || em->len != 2 * sectorsize) { test_err( "unexpected extent wanted start %llu len %u, got start %llu len %llu", offset, 2 * sectorsize, em->start, em->len); goto out; } if (em->flags != compressed_only) { test_err("unexpected flags set, want %lu have %lu", compressed_only, em->flags); goto out; } if (em->orig_start != orig_start) { test_err("wrong orig offset, want %llu, have %llu", em->start, orig_start); goto out; } if (em->compress_type != BTRFS_COMPRESS_ZLIB) { test_err("unexpected compress type, wanted %d, got %d", BTRFS_COMPRESS_ZLIB, em->compress_type); goto out; } offset = em->start + em->len; free_extent_map(em); /* A hole between regular extents but no hole extent */ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset + 6, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; } if (em->block_start >= EXTENT_MAP_LAST_BYTE) { test_err("expected a real extent, got %llu", em->block_start); goto out; } if (em->start != offset || em->len != sectorsize) { test_err( "unexpected extent wanted start %llu len %u, got start %llu len %llu", offset, sectorsize, em->start, em->len); goto out; } if (em->flags != 0) { test_err("unexpected flags set, want 0 have %lu", em->flags); goto out; } if (em->orig_start != em->start) { test_err("wrong orig offset, want %llu, have %llu", em->start, em->orig_start); goto out; } offset = em->start + em->len; free_extent_map(em); em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, SZ_4M); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; } if (em->block_start != EXTENT_MAP_HOLE) { test_err("expected a hole extent, got %llu", em->block_start); goto out; } /* * Currently we just return a length that we requested rather than the * length of the actual hole, if this changes we'll have to change this * test. */ if (em->start != offset || em->len != 3 * sectorsize) { test_err( "unexpected extent wanted start %llu len %u, got start %llu len %llu", offset, 3 * sectorsize, em->start, em->len); goto out; } if (em->flags != vacancy_only) { test_err("unexpected flags set, want %lu have %lu", vacancy_only, em->flags); goto out; } if (em->orig_start != em->start) { test_err("wrong orig offset, want %llu, have %llu", em->start, em->orig_start); goto out; } offset = em->start + em->len; free_extent_map(em); em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; } if (em->block_start >= EXTENT_MAP_LAST_BYTE) { test_err("expected a real extent, got %llu", em->block_start); goto out; } if (em->start != offset || em->len != sectorsize) { test_err( "unexpected extent wanted start %llu len %u, got start %llu len %llu", offset, sectorsize, em->start, em->len); goto out; } if (em->flags != 0) { test_err("unexpected flags set, want 0 have %lu", em->flags); goto out; } if (em->orig_start != em->start) { test_err("wrong orig offset, want %llu, have %llu", em->start, em->orig_start); goto out; } ret = 0; out: if (!IS_ERR(em)) free_extent_map(em); iput(inode); btrfs_free_dummy_root(root); btrfs_free_dummy_fs_info(fs_info); return ret; } static int test_hole_first(u32 sectorsize, u32 nodesize) { struct btrfs_fs_info *fs_info = NULL; struct inode *inode = NULL; struct btrfs_root *root = NULL; struct extent_map *em = NULL; int ret = -ENOMEM; test_msg("running hole first btrfs_get_extent test"); inode = btrfs_new_test_inode(); if (!inode) { test_std_err(TEST_ALLOC_INODE); return ret; } fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize); if (!fs_info) { test_std_err(TEST_ALLOC_FS_INFO); goto out; } root = btrfs_alloc_dummy_root(fs_info); if (IS_ERR(root)) { test_std_err(TEST_ALLOC_ROOT); goto out; } root->node = alloc_dummy_extent_buffer(fs_info, nodesize); if (!root->node) { test_std_err(TEST_ALLOC_ROOT); goto out; } btrfs_set_header_nritems(root->node, 0); btrfs_set_header_level(root->node, 0); BTRFS_I(inode)->root = root; ret = -EINVAL; /* * Need a blank inode item here just so we don't confuse * btrfs_get_extent. */ insert_inode_item_key(root); insert_extent(root, sectorsize, sectorsize, sectorsize, 0, sectorsize, sectorsize, BTRFS_FILE_EXTENT_REG, 0, 1); em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, 2 * sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; } if (em->block_start != EXTENT_MAP_HOLE) { test_err("expected a hole, got %llu", em->block_start); goto out; } if (em->start != 0 || em->len != sectorsize) { test_err( "unexpected extent wanted start 0 len %u, got start %llu len %llu", sectorsize, em->start, em->len); goto out; } if (em->flags != vacancy_only) { test_err("wrong flags, wanted %lu, have %lu", vacancy_only, em->flags); goto out; } free_extent_map(em); em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, sectorsize, 2 * sectorsize); if (IS_ERR(em)) { test_err("got an error when we shouldn't have"); goto out; } if (em->block_start != sectorsize) { test_err("expected a real extent, got %llu", em->block_start); goto out; } if (em->start != sectorsize || em->len != sectorsize) { test_err( "unexpected extent wanted start %u len %u, got start %llu len %llu", sectorsize, sectorsize, em->start, em->len); goto out; } if (em->flags != 0) { test_err("unexpected flags set, wanted 0 got %lu", em->flags); goto out; } ret = 0; out: if (!IS_ERR(em)) free_extent_map(em); iput(inode); btrfs_free_dummy_root(root); btrfs_free_dummy_fs_info(fs_info); return ret; } static int test_extent_accounting(u32 sectorsize, u32 nodesize) { struct btrfs_fs_info *fs_info = NULL; struct inode *inode = NULL; struct btrfs_root *root = NULL; int ret = -ENOMEM; test_msg("running outstanding_extents tests"); inode = btrfs_new_test_inode(); if (!inode) { test_std_err(TEST_ALLOC_INODE); return ret; } fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize); if (!fs_info) { test_std_err(TEST_ALLOC_FS_INFO); goto out; } root = btrfs_alloc_dummy_root(fs_info); if (IS_ERR(root)) { test_std_err(TEST_ALLOC_ROOT); goto out; } BTRFS_I(inode)->root = root; /* [BTRFS_MAX_EXTENT_SIZE] */ ret = btrfs_set_extent_delalloc(BTRFS_I(inode), 0, BTRFS_MAX_EXTENT_SIZE - 1, 0, NULL); if (ret) { test_err("btrfs_set_extent_delalloc returned %d", ret); goto out; } if (BTRFS_I(inode)->outstanding_extents != 1) { ret = -EINVAL; test_err("miscount, wanted 1, got %u", BTRFS_I(inode)->outstanding_extents); goto out; } /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */ ret = btrfs_set_extent_delalloc(BTRFS_I(inode), BTRFS_MAX_EXTENT_SIZE, BTRFS_MAX_EXTENT_SIZE + sectorsize - 1, 0, NULL); if (ret) { test_err("btrfs_set_extent_delalloc returned %d", ret); goto out; } if (BTRFS_I(inode)->outstanding_extents != 2) { ret = -EINVAL; test_err("miscount, wanted 2, got %u", BTRFS_I(inode)->outstanding_extents); goto out; } /* [BTRFS_MAX_EXTENT_SIZE/2][sectorsize HOLE][the rest] */ ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, BTRFS_MAX_EXTENT_SIZE >> 1, (BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1, EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | EXTENT_UPTODATE, NULL); if (ret) { test_err("clear_extent_bit returned %d", ret); goto out; } if (BTRFS_I(inode)->outstanding_extents != 2) { ret = -EINVAL; test_err("miscount, wanted 2, got %u", BTRFS_I(inode)->outstanding_extents); goto out; } /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */ ret = btrfs_set_extent_delalloc(BTRFS_I(inode), BTRFS_MAX_EXTENT_SIZE >> 1, (BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1, 0, NULL); if (ret) { test_err("btrfs_set_extent_delalloc returned %d", ret); goto out; } if (BTRFS_I(inode)->outstanding_extents != 2) { ret = -EINVAL; test_err("miscount, wanted 2, got %u", BTRFS_I(inode)->outstanding_extents); goto out; } /* * [BTRFS_MAX_EXTENT_SIZE+sectorsize][sectorsize HOLE][BTRFS_MAX_EXTENT_SIZE+sectorsize] */ ret = btrfs_set_extent_delalloc(BTRFS_I(inode), BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize, (BTRFS_MAX_EXTENT_SIZE << 1) + 3 * sectorsize - 1, 0, NULL); if (ret) { test_err("btrfs_set_extent_delalloc returned %d", ret); goto out; } if (BTRFS_I(inode)->outstanding_extents != 4) { ret = -EINVAL; test_err("miscount, wanted 4, got %u", BTRFS_I(inode)->outstanding_extents); goto out; } /* * [BTRFS_MAX_EXTENT_SIZE+sectorsize][sectorsize][BTRFS_MAX_EXTENT_SIZE+sectorsize] */ ret = btrfs_set_extent_delalloc(BTRFS_I(inode), BTRFS_MAX_EXTENT_SIZE + sectorsize, BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, 0, NULL); if (ret) { test_err("btrfs_set_extent_delalloc returned %d", ret); goto out; } if (BTRFS_I(inode)->outstanding_extents != 3) { ret = -EINVAL; test_err("miscount, wanted 3, got %u", BTRFS_I(inode)->outstanding_extents); goto out; } /* [BTRFS_MAX_EXTENT_SIZE+4k][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4k] */ ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, BTRFS_MAX_EXTENT_SIZE + sectorsize, BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | EXTENT_UPTODATE, NULL); if (ret) { test_err("clear_extent_bit returned %d", ret); goto out; } if (BTRFS_I(inode)->outstanding_extents != 4) { ret = -EINVAL; test_err("miscount, wanted 4, got %u", BTRFS_I(inode)->outstanding_extents); goto out; } /* * Refill the hole again just for good measure, because I thought it * might fail and I'd rather satisfy my paranoia at this point. */ ret = btrfs_set_extent_delalloc(BTRFS_I(inode), BTRFS_MAX_EXTENT_SIZE + sectorsize, BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, 0, NULL); if (ret) { test_err("btrfs_set_extent_delalloc returned %d", ret); goto out; } if (BTRFS_I(inode)->outstanding_extents != 3) { ret = -EINVAL; test_err("miscount, wanted 3, got %u", BTRFS_I(inode)->outstanding_extents); goto out; } /* Empty */ ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1, EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | EXTENT_UPTODATE, NULL); if (ret) { test_err("clear_extent_bit returned %d", ret); goto out; } if (BTRFS_I(inode)->outstanding_extents) { ret = -EINVAL; test_err("miscount, wanted 0, got %u", BTRFS_I(inode)->outstanding_extents); goto out; } ret = 0; out: if (ret) clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1, EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | EXTENT_UPTODATE, NULL); iput(inode); btrfs_free_dummy_root(root); btrfs_free_dummy_fs_info(fs_info); return ret; } int btrfs_test_inodes(u32 sectorsize, u32 nodesize) { int ret; test_msg("running inode tests"); set_bit(EXTENT_FLAG_COMPRESSED, &compressed_only); set_bit(EXTENT_FLAG_PREALLOC, &prealloc_only); ret = test_btrfs_get_extent(sectorsize, nodesize); if (ret) return ret; ret = test_hole_first(sectorsize, nodesize); if (ret) return ret; return test_extent_accounting(sectorsize, nodesize); }
linux-master
fs/btrfs/tests/inode-tests.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2013 Fusion IO. All rights reserved. */ #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/sizes.h> #include "btrfs-tests.h" #include "../ctree.h" #include "../extent_io.h" #include "../btrfs_inode.h" #define PROCESS_UNLOCK (1 << 0) #define PROCESS_RELEASE (1 << 1) #define PROCESS_TEST_LOCKED (1 << 2) static noinline int process_page_range(struct inode *inode, u64 start, u64 end, unsigned long flags) { int ret; struct folio_batch fbatch; unsigned long index = start >> PAGE_SHIFT; unsigned long end_index = end >> PAGE_SHIFT; int i; int count = 0; int loops = 0; folio_batch_init(&fbatch); while (index <= end_index) { ret = filemap_get_folios_contig(inode->i_mapping, &index, end_index, &fbatch); for (i = 0; i < ret; i++) { struct folio *folio = fbatch.folios[i]; if (flags & PROCESS_TEST_LOCKED && !folio_test_locked(folio)) count++; if (flags & PROCESS_UNLOCK && folio_test_locked(folio)) folio_unlock(folio); if (flags & PROCESS_RELEASE) folio_put(folio); } folio_batch_release(&fbatch); cond_resched(); loops++; if (loops > 100000) { printk(KERN_ERR "stuck in a loop, start %llu, end %llu, ret %d\n", start, end, ret); break; } } return count; } #define STATE_FLAG_STR_LEN 256 #define PRINT_ONE_FLAG(state, dest, cur, name) \ ({ \ if (state->state & EXTENT_##name) \ cur += scnprintf(dest + cur, STATE_FLAG_STR_LEN - cur, \ "%s" #name, cur == 0 ? "" : "|"); \ }) static void extent_flag_to_str(const struct extent_state *state, char *dest) { int cur = 0; dest[0] = 0; PRINT_ONE_FLAG(state, dest, cur, DIRTY); PRINT_ONE_FLAG(state, dest, cur, UPTODATE); PRINT_ONE_FLAG(state, dest, cur, LOCKED); PRINT_ONE_FLAG(state, dest, cur, NEW); PRINT_ONE_FLAG(state, dest, cur, DELALLOC); PRINT_ONE_FLAG(state, dest, cur, DEFRAG); PRINT_ONE_FLAG(state, dest, cur, BOUNDARY); PRINT_ONE_FLAG(state, dest, cur, NODATASUM); PRINT_ONE_FLAG(state, dest, cur, CLEAR_META_RESV); PRINT_ONE_FLAG(state, dest, cur, NEED_WAIT); PRINT_ONE_FLAG(state, dest, cur, NORESERVE); PRINT_ONE_FLAG(state, dest, cur, QGROUP_RESERVED); PRINT_ONE_FLAG(state, dest, cur, CLEAR_DATA_RESV); } static void dump_extent_io_tree(const struct extent_io_tree *tree) { struct rb_node *node; char flags_str[STATE_FLAG_STR_LEN]; node = rb_first(&tree->state); test_msg("io tree content:"); while (node) { struct extent_state *state; state = rb_entry(node, struct extent_state, rb_node); extent_flag_to_str(state, flags_str); test_msg(" start=%llu len=%llu flags=%s", state->start, state->end + 1 - state->start, flags_str); node = rb_next(node); } } static int test_find_delalloc(u32 sectorsize) { struct inode *inode; struct extent_io_tree *tmp; struct page *page; struct page *locked_page = NULL; unsigned long index = 0; /* In this test we need at least 2 file extents at its maximum size */ u64 max_bytes = BTRFS_MAX_EXTENT_SIZE; u64 total_dirty = 2 * max_bytes; u64 start, end, test_start; bool found; int ret = -EINVAL; test_msg("running find delalloc tests"); inode = btrfs_new_test_inode(); if (!inode) { test_std_err(TEST_ALLOC_INODE); return -ENOMEM; } tmp = &BTRFS_I(inode)->io_tree; /* * Passing NULL as we don't have fs_info but tracepoints are not used * at this point */ extent_io_tree_init(NULL, tmp, IO_TREE_SELFTEST); /* * First go through and create and mark all of our pages dirty, we pin * everything to make sure our pages don't get evicted and screw up our * test. */ for (index = 0; index < (total_dirty >> PAGE_SHIFT); index++) { page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL); if (!page) { test_err("failed to allocate test page"); ret = -ENOMEM; goto out; } SetPageDirty(page); if (index) { unlock_page(page); } else { get_page(page); locked_page = page; } } /* Test this scenario * |--- delalloc ---| * |--- search ---| */ set_extent_bit(tmp, 0, sectorsize - 1, EXTENT_DELALLOC, NULL); start = 0; end = start + PAGE_SIZE - 1; found = find_lock_delalloc_range(inode, locked_page, &start, &end); if (!found) { test_err("should have found at least one delalloc"); goto out_bits; } if (start != 0 || end != (sectorsize - 1)) { test_err("expected start 0 end %u, got start %llu end %llu", sectorsize - 1, start, end); goto out_bits; } unlock_extent(tmp, start, end, NULL); unlock_page(locked_page); put_page(locked_page); /* * Test this scenario * * |--- delalloc ---| * |--- search ---| */ test_start = SZ_64M; locked_page = find_lock_page(inode->i_mapping, test_start >> PAGE_SHIFT); if (!locked_page) { test_err("couldn't find the locked page"); goto out_bits; } set_extent_bit(tmp, sectorsize, max_bytes - 1, EXTENT_DELALLOC, NULL); start = test_start; end = start + PAGE_SIZE - 1; found = find_lock_delalloc_range(inode, locked_page, &start, &end); if (!found) { test_err("couldn't find delalloc in our range"); goto out_bits; } if (start != test_start || end != max_bytes - 1) { test_err("expected start %llu end %llu, got start %llu, end %llu", test_start, max_bytes - 1, start, end); goto out_bits; } if (process_page_range(inode, start, end, PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) { test_err("there were unlocked pages in the range"); goto out_bits; } unlock_extent(tmp, start, end, NULL); /* locked_page was unlocked above */ put_page(locked_page); /* * Test this scenario * |--- delalloc ---| * |--- search ---| */ test_start = max_bytes + sectorsize; locked_page = find_lock_page(inode->i_mapping, test_start >> PAGE_SHIFT); if (!locked_page) { test_err("couldn't find the locked page"); goto out_bits; } start = test_start; end = start + PAGE_SIZE - 1; found = find_lock_delalloc_range(inode, locked_page, &start, &end); if (found) { test_err("found range when we shouldn't have"); goto out_bits; } if (end != test_start + PAGE_SIZE - 1) { test_err("did not return the proper end offset"); goto out_bits; } /* * Test this scenario * [------- delalloc -------| * [max_bytes]|-- search--| * * We are re-using our test_start from above since it works out well. */ set_extent_bit(tmp, max_bytes, total_dirty - 1, EXTENT_DELALLOC, NULL); start = test_start; end = start + PAGE_SIZE - 1; found = find_lock_delalloc_range(inode, locked_page, &start, &end); if (!found) { test_err("didn't find our range"); goto out_bits; } if (start != test_start || end != total_dirty - 1) { test_err("expected start %llu end %llu, got start %llu end %llu", test_start, total_dirty - 1, start, end); goto out_bits; } if (process_page_range(inode, start, end, PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) { test_err("pages in range were not all locked"); goto out_bits; } unlock_extent(tmp, start, end, NULL); /* * Now to test where we run into a page that is no longer dirty in the * range we want to find. */ page = find_get_page(inode->i_mapping, (max_bytes + SZ_1M) >> PAGE_SHIFT); if (!page) { test_err("couldn't find our page"); goto out_bits; } ClearPageDirty(page); put_page(page); /* We unlocked it in the previous test */ lock_page(locked_page); start = test_start; end = start + PAGE_SIZE - 1; /* * Currently if we fail to find dirty pages in the delalloc range we * will adjust max_bytes down to PAGE_SIZE and then re-search. If * this changes at any point in the future we will need to fix this * tests expected behavior. */ found = find_lock_delalloc_range(inode, locked_page, &start, &end); if (!found) { test_err("didn't find our range"); goto out_bits; } if (start != test_start && end != test_start + PAGE_SIZE - 1) { test_err("expected start %llu end %llu, got start %llu end %llu", test_start, test_start + PAGE_SIZE - 1, start, end); goto out_bits; } if (process_page_range(inode, start, end, PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) { test_err("pages in range were not all locked"); goto out_bits; } ret = 0; out_bits: if (ret) dump_extent_io_tree(tmp); clear_extent_bits(tmp, 0, total_dirty - 1, (unsigned)-1); out: if (locked_page) put_page(locked_page); process_page_range(inode, 0, total_dirty - 1, PROCESS_UNLOCK | PROCESS_RELEASE); iput(inode); return ret; } static int check_eb_bitmap(unsigned long *bitmap, struct extent_buffer *eb) { unsigned long i; for (i = 0; i < eb->len * BITS_PER_BYTE; i++) { int bit, bit1; bit = !!test_bit(i, bitmap); bit1 = !!extent_buffer_test_bit(eb, 0, i); if (bit1 != bit) { u8 has; u8 expect; read_extent_buffer(eb, &has, i / BITS_PER_BYTE, 1); expect = bitmap_get_value8(bitmap, ALIGN(i, BITS_PER_BYTE)); test_err( "bits do not match, start byte 0 bit %lu, byte %lu has 0x%02x expect 0x%02x", i, i / BITS_PER_BYTE, has, expect); return -EINVAL; } bit1 = !!extent_buffer_test_bit(eb, i / BITS_PER_BYTE, i % BITS_PER_BYTE); if (bit1 != bit) { u8 has; u8 expect; read_extent_buffer(eb, &has, i / BITS_PER_BYTE, 1); expect = bitmap_get_value8(bitmap, ALIGN(i, BITS_PER_BYTE)); test_err( "bits do not match, start byte %lu bit %lu, byte %lu has 0x%02x expect 0x%02x", i / BITS_PER_BYTE, i % BITS_PER_BYTE, i / BITS_PER_BYTE, has, expect); return -EINVAL; } } return 0; } static int test_bitmap_set(const char *name, unsigned long *bitmap, struct extent_buffer *eb, unsigned long byte_start, unsigned long bit_start, unsigned long bit_len) { int ret; bitmap_set(bitmap, byte_start * BITS_PER_BYTE + bit_start, bit_len); extent_buffer_bitmap_set(eb, byte_start, bit_start, bit_len); ret = check_eb_bitmap(bitmap, eb); if (ret < 0) test_err("%s test failed", name); return ret; } static int test_bitmap_clear(const char *name, unsigned long *bitmap, struct extent_buffer *eb, unsigned long byte_start, unsigned long bit_start, unsigned long bit_len) { int ret; bitmap_clear(bitmap, byte_start * BITS_PER_BYTE + bit_start, bit_len); extent_buffer_bitmap_clear(eb, byte_start, bit_start, bit_len); ret = check_eb_bitmap(bitmap, eb); if (ret < 0) test_err("%s test failed", name); return ret; } static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb) { unsigned long i, j; unsigned long byte_len = eb->len; u32 x; int ret; ret = test_bitmap_clear("clear all run 1", bitmap, eb, 0, 0, byte_len * BITS_PER_BYTE); if (ret < 0) return ret; ret = test_bitmap_set("set all", bitmap, eb, 0, 0, byte_len * BITS_PER_BYTE); if (ret < 0) return ret; ret = test_bitmap_clear("clear all run 2", bitmap, eb, 0, 0, byte_len * BITS_PER_BYTE); if (ret < 0) return ret; ret = test_bitmap_set("same byte set", bitmap, eb, 0, 2, 4); if (ret < 0) return ret; ret = test_bitmap_clear("same byte partial clear", bitmap, eb, 0, 4, 1); if (ret < 0) return ret; ret = test_bitmap_set("cross byte set", bitmap, eb, 2, 4, 8); if (ret < 0) return ret; ret = test_bitmap_set("cross multi byte set", bitmap, eb, 4, 4, 24); if (ret < 0) return ret; ret = test_bitmap_clear("cross byte clear", bitmap, eb, 2, 6, 4); if (ret < 0) return ret; ret = test_bitmap_clear("cross multi byte clear", bitmap, eb, 4, 6, 20); if (ret < 0) return ret; /* Straddling pages test */ if (byte_len > PAGE_SIZE) { ret = test_bitmap_set("cross page set", bitmap, eb, PAGE_SIZE - sizeof(long) / 2, 0, sizeof(long) * BITS_PER_BYTE); if (ret < 0) return ret; ret = test_bitmap_set("cross page set all", bitmap, eb, 0, 0, byte_len * BITS_PER_BYTE); if (ret < 0) return ret; ret = test_bitmap_clear("cross page clear", bitmap, eb, PAGE_SIZE - sizeof(long) / 2, 0, sizeof(long) * BITS_PER_BYTE); if (ret < 0) return ret; } /* * Generate a wonky pseudo-random bit pattern for the sake of not using * something repetitive that could miss some hypothetical off-by-n bug. */ x = 0; ret = test_bitmap_clear("clear all run 3", bitmap, eb, 0, 0, byte_len * BITS_PER_BYTE); if (ret < 0) return ret; for (i = 0; i < byte_len * BITS_PER_BYTE / 32; i++) { x = (0x19660dULL * (u64)x + 0x3c6ef35fULL) & 0xffffffffU; for (j = 0; j < 32; j++) { if (x & (1U << j)) { bitmap_set(bitmap, i * 32 + j, 1); extent_buffer_bitmap_set(eb, 0, i * 32 + j, 1); } } } ret = check_eb_bitmap(bitmap, eb); if (ret) { test_err("random bit pattern failed"); return ret; } return 0; } static int test_eb_bitmaps(u32 sectorsize, u32 nodesize) { struct btrfs_fs_info *fs_info; unsigned long *bitmap = NULL; struct extent_buffer *eb = NULL; int ret; test_msg("running extent buffer bitmap tests"); fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize); if (!fs_info) { test_std_err(TEST_ALLOC_FS_INFO); return -ENOMEM; } bitmap = kmalloc(nodesize, GFP_KERNEL); if (!bitmap) { test_err("couldn't allocate test bitmap"); ret = -ENOMEM; goto out; } eb = __alloc_dummy_extent_buffer(fs_info, 0, nodesize); if (!eb) { test_std_err(TEST_ALLOC_ROOT); ret = -ENOMEM; goto out; } ret = __test_eb_bitmaps(bitmap, eb); if (ret) goto out; free_extent_buffer(eb); /* * Test again for case where the tree block is sectorsize aligned but * not nodesize aligned. */ eb = __alloc_dummy_extent_buffer(fs_info, sectorsize, nodesize); if (!eb) { test_std_err(TEST_ALLOC_ROOT); ret = -ENOMEM; goto out; } ret = __test_eb_bitmaps(bitmap, eb); out: free_extent_buffer(eb); kfree(bitmap); btrfs_free_dummy_fs_info(fs_info); return ret; } static int test_find_first_clear_extent_bit(void) { struct extent_io_tree tree; u64 start, end; int ret = -EINVAL; test_msg("running find_first_clear_extent_bit test"); extent_io_tree_init(NULL, &tree, IO_TREE_SELFTEST); /* Test correct handling of empty tree */ find_first_clear_extent_bit(&tree, 0, &start, &end, CHUNK_TRIMMED); if (start != 0 || end != -1) { test_err( "error getting a range from completely empty tree: start %llu end %llu", start, end); goto out; } /* * Set 1M-4M alloc/discard and 32M-64M thus leaving a hole between * 4M-32M */ set_extent_bit(&tree, SZ_1M, SZ_4M - 1, CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL); find_first_clear_extent_bit(&tree, SZ_512K, &start, &end, CHUNK_TRIMMED | CHUNK_ALLOCATED); if (start != 0 || end != SZ_1M - 1) { test_err("error finding beginning range: start %llu end %llu", start, end); goto out; } /* Now add 32M-64M so that we have a hole between 4M-32M */ set_extent_bit(&tree, SZ_32M, SZ_64M - 1, CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL); /* * Request first hole starting at 12M, we should get 4M-32M */ find_first_clear_extent_bit(&tree, 12 * SZ_1M, &start, &end, CHUNK_TRIMMED | CHUNK_ALLOCATED); if (start != SZ_4M || end != SZ_32M - 1) { test_err("error finding trimmed range: start %llu end %llu", start, end); goto out; } /* * Search in the middle of allocated range, should get the next one * available, which happens to be unallocated -> 4M-32M */ find_first_clear_extent_bit(&tree, SZ_2M, &start, &end, CHUNK_TRIMMED | CHUNK_ALLOCATED); if (start != SZ_4M || end != SZ_32M - 1) { test_err("error finding next unalloc range: start %llu end %llu", start, end); goto out; } /* * Set 64M-72M with CHUNK_ALLOC flag, then search for CHUNK_TRIMMED flag * being unset in this range, we should get the entry in range 64M-72M */ set_extent_bit(&tree, SZ_64M, SZ_64M + SZ_8M - 1, CHUNK_ALLOCATED, NULL); find_first_clear_extent_bit(&tree, SZ_64M + SZ_1M, &start, &end, CHUNK_TRIMMED); if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) { test_err("error finding exact range: start %llu end %llu", start, end); goto out; } find_first_clear_extent_bit(&tree, SZ_64M - SZ_8M, &start, &end, CHUNK_TRIMMED); /* * Search in the middle of set range whose immediate neighbour doesn't * have the bits set so it must be returned */ if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) { test_err("error finding next alloc range: start %llu end %llu", start, end); goto out; } /* * Search beyond any known range, shall return after last known range * and end should be -1 */ find_first_clear_extent_bit(&tree, -1, &start, &end, CHUNK_TRIMMED); if (start != SZ_64M + SZ_8M || end != -1) { test_err( "error handling beyond end of range search: start %llu end %llu", start, end); goto out; } ret = 0; out: if (ret) dump_extent_io_tree(&tree); clear_extent_bits(&tree, 0, (u64)-1, CHUNK_TRIMMED | CHUNK_ALLOCATED); return ret; } static void dump_eb_and_memory_contents(struct extent_buffer *eb, void *memory, const char *test_name) { for (int i = 0; i < eb->len; i++) { struct page *page = eb->pages[i >> PAGE_SHIFT]; void *addr = page_address(page) + offset_in_page(i); if (memcmp(addr, memory + i, 1) != 0) { test_err("%s failed", test_name); test_err("eb and memory diffs at byte %u, eb has 0x%02x memory has 0x%02x", i, *(u8 *)addr, *(u8 *)(memory + i)); return; } } } static int verify_eb_and_memory(struct extent_buffer *eb, void *memory, const char *test_name) { for (int i = 0; i < (eb->len >> PAGE_SHIFT); i++) { void *eb_addr = page_address(eb->pages[i]); if (memcmp(memory + (i << PAGE_SHIFT), eb_addr, PAGE_SIZE) != 0) { dump_eb_and_memory_contents(eb, memory, test_name); return -EUCLEAN; } } return 0; } /* * Init both memory and extent buffer contents to the same randomly generated * contents. */ static void init_eb_and_memory(struct extent_buffer *eb, void *memory) { get_random_bytes(memory, eb->len); write_extent_buffer(eb, memory, 0, eb->len); } static int test_eb_mem_ops(u32 sectorsize, u32 nodesize) { struct btrfs_fs_info *fs_info; struct extent_buffer *eb = NULL; void *memory = NULL; int ret; test_msg("running extent buffer memory operation tests"); fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize); if (!fs_info) { test_std_err(TEST_ALLOC_FS_INFO); return -ENOMEM; } memory = kvzalloc(nodesize, GFP_KERNEL); if (!memory) { test_err("failed to allocate memory"); ret = -ENOMEM; goto out; } eb = __alloc_dummy_extent_buffer(fs_info, SZ_1M, nodesize); if (!eb) { test_std_err(TEST_ALLOC_EXTENT_BUFFER); ret = -ENOMEM; goto out; } init_eb_and_memory(eb, memory); ret = verify_eb_and_memory(eb, memory, "full eb write"); if (ret < 0) goto out; memcpy(memory, memory + 16, 16); memcpy_extent_buffer(eb, 0, 16, 16); ret = verify_eb_and_memory(eb, memory, "same page non-overlapping memcpy 1"); if (ret < 0) goto out; memcpy(memory, memory + 2048, 16); memcpy_extent_buffer(eb, 0, 2048, 16); ret = verify_eb_and_memory(eb, memory, "same page non-overlapping memcpy 2"); if (ret < 0) goto out; memcpy(memory, memory + 2048, 2048); memcpy_extent_buffer(eb, 0, 2048, 2048); ret = verify_eb_and_memory(eb, memory, "same page non-overlapping memcpy 3"); if (ret < 0) goto out; memmove(memory + 512, memory + 256, 512); memmove_extent_buffer(eb, 512, 256, 512); ret = verify_eb_and_memory(eb, memory, "same page overlapping memcpy 1"); if (ret < 0) goto out; memmove(memory + 2048, memory + 512, 2048); memmove_extent_buffer(eb, 2048, 512, 2048); ret = verify_eb_and_memory(eb, memory, "same page overlapping memcpy 2"); if (ret < 0) goto out; memmove(memory + 512, memory + 2048, 2048); memmove_extent_buffer(eb, 512, 2048, 2048); ret = verify_eb_and_memory(eb, memory, "same page overlapping memcpy 3"); if (ret < 0) goto out; if (nodesize > PAGE_SIZE) { memcpy(memory, memory + 4096 - 128, 256); memcpy_extent_buffer(eb, 0, 4096 - 128, 256); ret = verify_eb_and_memory(eb, memory, "cross page non-overlapping memcpy 1"); if (ret < 0) goto out; memcpy(memory + 4096 - 128, memory + 4096 + 128, 256); memcpy_extent_buffer(eb, 4096 - 128, 4096 + 128, 256); ret = verify_eb_and_memory(eb, memory, "cross page non-overlapping memcpy 2"); if (ret < 0) goto out; memmove(memory + 4096 - 128, memory + 4096 - 64, 256); memmove_extent_buffer(eb, 4096 - 128, 4096 - 64, 256); ret = verify_eb_and_memory(eb, memory, "cross page overlapping memcpy 1"); if (ret < 0) goto out; memmove(memory + 4096 - 64, memory + 4096 - 128, 256); memmove_extent_buffer(eb, 4096 - 64, 4096 - 128, 256); ret = verify_eb_and_memory(eb, memory, "cross page overlapping memcpy 2"); if (ret < 0) goto out; } out: free_extent_buffer(eb); kvfree(memory); btrfs_free_dummy_fs_info(fs_info); return ret; } int btrfs_test_extent_io(u32 sectorsize, u32 nodesize) { int ret; test_msg("running extent I/O tests"); ret = test_find_delalloc(sectorsize); if (ret) goto out; ret = test_find_first_clear_extent_bit(); if (ret) goto out; ret = test_eb_bitmaps(sectorsize, nodesize); if (ret) goto out; ret = test_eb_mem_ops(sectorsize, nodesize); out: return ret; }
linux-master
fs/btrfs/tests/extent-io-tests.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2013 Fusion IO. All rights reserved. */ #include <linux/fs.h> #include <linux/mount.h> #include <linux/pseudo_fs.h> #include <linux/magic.h> #include "btrfs-tests.h" #include "../ctree.h" #include "../free-space-cache.h" #include "../free-space-tree.h" #include "../transaction.h" #include "../volumes.h" #include "../disk-io.h" #include "../qgroup.h" #include "../block-group.h" #include "../fs.h" static struct vfsmount *test_mnt = NULL; const char *test_error[] = { [TEST_ALLOC_FS_INFO] = "cannot allocate fs_info", [TEST_ALLOC_ROOT] = "cannot allocate root", [TEST_ALLOC_EXTENT_BUFFER] = "cannot extent buffer", [TEST_ALLOC_PATH] = "cannot allocate path", [TEST_ALLOC_INODE] = "cannot allocate inode", [TEST_ALLOC_BLOCK_GROUP] = "cannot allocate block group", [TEST_ALLOC_EXTENT_MAP] = "cannot allocate extent map", }; static const struct super_operations btrfs_test_super_ops = { .alloc_inode = btrfs_alloc_inode, .destroy_inode = btrfs_test_destroy_inode, }; static int btrfs_test_init_fs_context(struct fs_context *fc) { struct pseudo_fs_context *ctx = init_pseudo(fc, BTRFS_TEST_MAGIC); if (!ctx) return -ENOMEM; ctx->ops = &btrfs_test_super_ops; return 0; } static struct file_system_type test_type = { .name = "btrfs_test_fs", .init_fs_context = btrfs_test_init_fs_context, .kill_sb = kill_anon_super, }; struct inode *btrfs_new_test_inode(void) { struct inode *inode; inode = new_inode(test_mnt->mnt_sb); if (!inode) return NULL; inode->i_mode = S_IFREG; inode->i_ino = BTRFS_FIRST_FREE_OBJECTID; BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY; BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID; BTRFS_I(inode)->location.offset = 0; inode_init_owner(&nop_mnt_idmap, inode, NULL, S_IFREG); return inode; } static int btrfs_init_test_fs(void) { int ret; ret = register_filesystem(&test_type); if (ret) { printk(KERN_ERR "btrfs: cannot register test file system\n"); return ret; } test_mnt = kern_mount(&test_type); if (IS_ERR(test_mnt)) { printk(KERN_ERR "btrfs: cannot mount test file system\n"); unregister_filesystem(&test_type); return PTR_ERR(test_mnt); } return 0; } static void btrfs_destroy_test_fs(void) { kern_unmount(test_mnt); unregister_filesystem(&test_type); } struct btrfs_device *btrfs_alloc_dummy_device(struct btrfs_fs_info *fs_info) { struct btrfs_device *dev; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return ERR_PTR(-ENOMEM); extent_io_tree_init(NULL, &dev->alloc_state, 0); INIT_LIST_HEAD(&dev->dev_list); list_add(&dev->dev_list, &fs_info->fs_devices->devices); return dev; } static void btrfs_free_dummy_device(struct btrfs_device *dev) { extent_io_tree_release(&dev->alloc_state); kfree(dev); } struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize) { struct btrfs_fs_info *fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_KERNEL); if (!fs_info) return fs_info; fs_info->fs_devices = kzalloc(sizeof(struct btrfs_fs_devices), GFP_KERNEL); if (!fs_info->fs_devices) { kfree(fs_info); return NULL; } INIT_LIST_HEAD(&fs_info->fs_devices->devices); fs_info->super_copy = kzalloc(sizeof(struct btrfs_super_block), GFP_KERNEL); if (!fs_info->super_copy) { kfree(fs_info->fs_devices); kfree(fs_info); return NULL; } btrfs_init_fs_info(fs_info); fs_info->nodesize = nodesize; fs_info->sectorsize = sectorsize; fs_info->sectorsize_bits = ilog2(sectorsize); set_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state); test_mnt->mnt_sb->s_fs_info = fs_info; return fs_info; } void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info) { struct radix_tree_iter iter; void **slot; struct btrfs_device *dev, *tmp; if (!fs_info) return; if (WARN_ON(!test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state))) return; test_mnt->mnt_sb->s_fs_info = NULL; spin_lock(&fs_info->buffer_lock); radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, 0) { struct extent_buffer *eb; eb = radix_tree_deref_slot_protected(slot, &fs_info->buffer_lock); if (!eb) continue; /* Shouldn't happen but that kind of thinking creates CVE's */ if (radix_tree_exception(eb)) { if (radix_tree_deref_retry(eb)) slot = radix_tree_iter_retry(&iter); continue; } slot = radix_tree_iter_resume(slot, &iter); spin_unlock(&fs_info->buffer_lock); free_extent_buffer_stale(eb); spin_lock(&fs_info->buffer_lock); } spin_unlock(&fs_info->buffer_lock); btrfs_mapping_tree_free(&fs_info->mapping_tree); list_for_each_entry_safe(dev, tmp, &fs_info->fs_devices->devices, dev_list) { btrfs_free_dummy_device(dev); } btrfs_free_qgroup_config(fs_info); btrfs_free_fs_roots(fs_info); kfree(fs_info->super_copy); btrfs_check_leaked_roots(fs_info); btrfs_extent_buffer_leak_debug_check(fs_info); kfree(fs_info->fs_devices); kfree(fs_info); } void btrfs_free_dummy_root(struct btrfs_root *root) { if (IS_ERR_OR_NULL(root)) return; /* Will be freed by btrfs_free_fs_roots */ if (WARN_ON(test_bit(BTRFS_ROOT_IN_RADIX, &root->state))) return; btrfs_global_root_delete(root); btrfs_put_root(root); } struct btrfs_block_group * btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info, unsigned long length) { struct btrfs_block_group *cache; cache = kzalloc(sizeof(*cache), GFP_KERNEL); if (!cache) return NULL; cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), GFP_KERNEL); if (!cache->free_space_ctl) { kfree(cache); return NULL; } cache->start = 0; cache->length = length; cache->full_stripe_len = fs_info->sectorsize; cache->fs_info = fs_info; INIT_LIST_HEAD(&cache->list); INIT_LIST_HEAD(&cache->cluster_list); INIT_LIST_HEAD(&cache->bg_list); btrfs_init_free_space_ctl(cache, cache->free_space_ctl); mutex_init(&cache->free_space_lock); return cache; } void btrfs_free_dummy_block_group(struct btrfs_block_group *cache) { if (!cache) return; btrfs_remove_free_space_cache(cache); kfree(cache->free_space_ctl); kfree(cache); } void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info) { memset(trans, 0, sizeof(*trans)); trans->transid = 1; trans->type = __TRANS_DUMMY; trans->fs_info = fs_info; } int btrfs_run_sanity_tests(void) { int ret, i; u32 sectorsize, nodesize; u32 test_sectorsize[] = { PAGE_SIZE, }; ret = btrfs_init_test_fs(); if (ret) return ret; for (i = 0; i < ARRAY_SIZE(test_sectorsize); i++) { sectorsize = test_sectorsize[i]; for (nodesize = sectorsize; nodesize <= BTRFS_MAX_METADATA_BLOCKSIZE; nodesize <<= 1) { pr_info("BTRFS: selftest: sectorsize: %u nodesize: %u\n", sectorsize, nodesize); ret = btrfs_test_free_space_cache(sectorsize, nodesize); if (ret) goto out; ret = btrfs_test_extent_buffer_operations(sectorsize, nodesize); if (ret) goto out; ret = btrfs_test_extent_io(sectorsize, nodesize); if (ret) goto out; ret = btrfs_test_inodes(sectorsize, nodesize); if (ret) goto out; ret = btrfs_test_qgroups(sectorsize, nodesize); if (ret) goto out; ret = btrfs_test_free_space_tree(sectorsize, nodesize); if (ret) goto out; } } ret = btrfs_test_extent_map(); out: btrfs_destroy_test_fs(); return ret; }
linux-master
fs/btrfs/tests/btrfs-tests.c
// SPDX-License-Identifier: GPL-2.0 /* * (C) 2001 Clemson University and The University of Chicago * * See COPYING in top-level directory. */ #include "protocol.h" #include "orangefs-kernel.h" #include "orangefs-bufmap.h" #include <linux/parser.h> #include <linux/hashtable.h> #include <linux/seq_file.h> /* a cache for orangefs-inode objects (i.e. orangefs inode private data) */ static struct kmem_cache *orangefs_inode_cache; /* list for storing orangefs specific superblocks in use */ LIST_HEAD(orangefs_superblocks); DEFINE_SPINLOCK(orangefs_superblocks_lock); enum { Opt_intr, Opt_acl, Opt_local_lock, Opt_err }; static const match_table_t tokens = { { Opt_acl, "acl" }, { Opt_intr, "intr" }, { Opt_local_lock, "local_lock" }, { Opt_err, NULL } }; uint64_t orangefs_features; static int orangefs_show_options(struct seq_file *m, struct dentry *root) { struct orangefs_sb_info_s *orangefs_sb = ORANGEFS_SB(root->d_sb); if (root->d_sb->s_flags & SB_POSIXACL) seq_puts(m, ",acl"); if (orangefs_sb->flags & ORANGEFS_OPT_INTR) seq_puts(m, ",intr"); if (orangefs_sb->flags & ORANGEFS_OPT_LOCAL_LOCK) seq_puts(m, ",local_lock"); return 0; } static int parse_mount_options(struct super_block *sb, char *options, int silent) { struct orangefs_sb_info_s *orangefs_sb = ORANGEFS_SB(sb); substring_t args[MAX_OPT_ARGS]; char *p; /* * Force any potential flags that might be set from the mount * to zero, ie, initialize to unset. */ sb->s_flags &= ~SB_POSIXACL; orangefs_sb->flags &= ~ORANGEFS_OPT_INTR; orangefs_sb->flags &= ~ORANGEFS_OPT_LOCAL_LOCK; while ((p = strsep(&options, ",")) != NULL) { int token; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_acl: sb->s_flags |= SB_POSIXACL; break; case Opt_intr: orangefs_sb->flags |= ORANGEFS_OPT_INTR; break; case Opt_local_lock: orangefs_sb->flags |= ORANGEFS_OPT_LOCAL_LOCK; break; default: goto fail; } } return 0; fail: if (!silent) gossip_err("Error: mount option [%s] is not supported.\n", p); return -EINVAL; } static void orangefs_inode_cache_ctor(void *req) { struct orangefs_inode_s *orangefs_inode = req; inode_init_once(&orangefs_inode->vfs_inode); init_rwsem(&orangefs_inode->xattr_sem); } static struct inode *orangefs_alloc_inode(struct super_block *sb) { struct orangefs_inode_s *orangefs_inode; orangefs_inode = alloc_inode_sb(sb, orangefs_inode_cache, GFP_KERNEL); if (!orangefs_inode) return NULL; /* * We want to clear everything except for rw_semaphore and the * vfs_inode. */ memset(&orangefs_inode->refn.khandle, 0, 16); orangefs_inode->refn.fs_id = ORANGEFS_FS_ID_NULL; orangefs_inode->last_failed_block_index_read = 0; memset(orangefs_inode->link_target, 0, sizeof(orangefs_inode->link_target)); gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_alloc_inode: allocated %p\n", &orangefs_inode->vfs_inode); return &orangefs_inode->vfs_inode; } static void orangefs_free_inode(struct inode *inode) { struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); struct orangefs_cached_xattr *cx; struct hlist_node *tmp; int i; hash_for_each_safe(orangefs_inode->xattr_cache, i, tmp, cx, node) { hlist_del(&cx->node); kfree(cx); } kmem_cache_free(orangefs_inode_cache, orangefs_inode); } static void orangefs_destroy_inode(struct inode *inode) { struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); gossip_debug(GOSSIP_SUPER_DEBUG, "%s: deallocated %p destroying inode %pU\n", __func__, orangefs_inode, get_khandle_from_ino(inode)); } static int orangefs_write_inode(struct inode *inode, struct writeback_control *wbc) { gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_write_inode\n"); return orangefs_inode_setattr(inode); } /* * NOTE: information filled in here is typically reflected in the * output of the system command 'df' */ static int orangefs_statfs(struct dentry *dentry, struct kstatfs *buf) { int ret = -ENOMEM; struct orangefs_kernel_op_s *new_op = NULL; int flags = 0; struct super_block *sb = NULL; sb = dentry->d_sb; gossip_debug(GOSSIP_SUPER_DEBUG, "%s: called on sb %p (fs_id is %d)\n", __func__, sb, (int)(ORANGEFS_SB(sb)->fs_id)); new_op = op_alloc(ORANGEFS_VFS_OP_STATFS); if (!new_op) return ret; new_op->upcall.req.statfs.fs_id = ORANGEFS_SB(sb)->fs_id; if (ORANGEFS_SB(sb)->flags & ORANGEFS_OPT_INTR) flags = ORANGEFS_OP_INTERRUPTIBLE; ret = service_operation(new_op, "orangefs_statfs", flags); if (new_op->downcall.status < 0) goto out_op_release; gossip_debug(GOSSIP_SUPER_DEBUG, "%s: got %ld blocks available | " "%ld blocks total | %ld block size | " "%ld files total | %ld files avail\n", __func__, (long)new_op->downcall.resp.statfs.blocks_avail, (long)new_op->downcall.resp.statfs.blocks_total, (long)new_op->downcall.resp.statfs.block_size, (long)new_op->downcall.resp.statfs.files_total, (long)new_op->downcall.resp.statfs.files_avail); buf->f_type = sb->s_magic; memcpy(&buf->f_fsid, &ORANGEFS_SB(sb)->fs_id, sizeof(buf->f_fsid)); buf->f_bsize = new_op->downcall.resp.statfs.block_size; buf->f_namelen = ORANGEFS_NAME_MAX; buf->f_blocks = (sector_t) new_op->downcall.resp.statfs.blocks_total; buf->f_bfree = (sector_t) new_op->downcall.resp.statfs.blocks_avail; buf->f_bavail = (sector_t) new_op->downcall.resp.statfs.blocks_avail; buf->f_files = (sector_t) new_op->downcall.resp.statfs.files_total; buf->f_ffree = (sector_t) new_op->downcall.resp.statfs.files_avail; buf->f_frsize = 0; out_op_release: op_release(new_op); gossip_debug(GOSSIP_SUPER_DEBUG, "%s: returning %d\n", __func__, ret); return ret; } /* * Remount as initiated by VFS layer. We just need to reparse the mount * options, no need to signal pvfs2-client-core about it. */ static int orangefs_remount_fs(struct super_block *sb, int *flags, char *data) { gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_remount_fs: called\n"); return parse_mount_options(sb, data, 1); } /* * Remount as initiated by pvfs2-client-core on restart. This is used to * repopulate mount information left from previous pvfs2-client-core. * * the idea here is that given a valid superblock, we're * re-initializing the user space client with the initial mount * information specified when the super block was first initialized. * this is very different than the first initialization/creation of a * superblock. we use the special service_priority_operation to make * sure that the mount gets ahead of any other pending operation that * is waiting for servicing. this means that the pvfs2-client won't * fail to start several times for all other pending operations before * the client regains all of the mount information from us. * NOTE: this function assumes that the request_mutex is already acquired! */ int orangefs_remount(struct orangefs_sb_info_s *orangefs_sb) { struct orangefs_kernel_op_s *new_op; int ret = -EINVAL; gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_remount: called\n"); new_op = op_alloc(ORANGEFS_VFS_OP_FS_MOUNT); if (!new_op) return -ENOMEM; strncpy(new_op->upcall.req.fs_mount.orangefs_config_server, orangefs_sb->devname, ORANGEFS_MAX_SERVER_ADDR_LEN); gossip_debug(GOSSIP_SUPER_DEBUG, "Attempting ORANGEFS Remount via host %s\n", new_op->upcall.req.fs_mount.orangefs_config_server); /* * we assume that the calling function has already acquired the * request_mutex to prevent other operations from bypassing * this one */ ret = service_operation(new_op, "orangefs_remount", ORANGEFS_OP_PRIORITY | ORANGEFS_OP_NO_MUTEX); gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_remount: mount got return value of %d\n", ret); if (ret == 0) { /* * store the id assigned to this sb -- it's just a * short-lived mapping that the system interface uses * to map this superblock to a particular mount entry */ orangefs_sb->id = new_op->downcall.resp.fs_mount.id; orangefs_sb->mount_pending = 0; } op_release(new_op); if (orangefs_userspace_version >= 20906) { new_op = op_alloc(ORANGEFS_VFS_OP_FEATURES); if (!new_op) return -ENOMEM; new_op->upcall.req.features.features = 0; ret = service_operation(new_op, "orangefs_features", ORANGEFS_OP_PRIORITY | ORANGEFS_OP_NO_MUTEX); if (!ret) orangefs_features = new_op->downcall.resp.features.features; else orangefs_features = 0; op_release(new_op); } else { orangefs_features = 0; } return ret; } int fsid_key_table_initialize(void) { return 0; } void fsid_key_table_finalize(void) { } static const struct super_operations orangefs_s_ops = { .alloc_inode = orangefs_alloc_inode, .free_inode = orangefs_free_inode, .destroy_inode = orangefs_destroy_inode, .write_inode = orangefs_write_inode, .drop_inode = generic_delete_inode, .statfs = orangefs_statfs, .remount_fs = orangefs_remount_fs, .show_options = orangefs_show_options, }; static struct dentry *orangefs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct orangefs_object_kref refn; if (fh_len < 5 || fh_type > 2) return NULL; ORANGEFS_khandle_from(&(refn.khandle), fid->raw, 16); refn.fs_id = (u32) fid->raw[4]; gossip_debug(GOSSIP_SUPER_DEBUG, "fh_to_dentry: handle %pU, fs_id %d\n", &refn.khandle, refn.fs_id); return d_obtain_alias(orangefs_iget(sb, &refn)); } static int orangefs_encode_fh(struct inode *inode, __u32 *fh, int *max_len, struct inode *parent) { int len = parent ? 10 : 5; int type = 1; struct orangefs_object_kref refn; if (*max_len < len) { gossip_err("fh buffer is too small for encoding\n"); *max_len = len; type = 255; goto out; } refn = ORANGEFS_I(inode)->refn; ORANGEFS_khandle_to(&refn.khandle, fh, 16); fh[4] = refn.fs_id; gossip_debug(GOSSIP_SUPER_DEBUG, "Encoding fh: handle %pU, fsid %u\n", &refn.khandle, refn.fs_id); if (parent) { refn = ORANGEFS_I(parent)->refn; ORANGEFS_khandle_to(&refn.khandle, (char *) fh + 20, 16); fh[9] = refn.fs_id; type = 2; gossip_debug(GOSSIP_SUPER_DEBUG, "Encoding parent: handle %pU, fsid %u\n", &refn.khandle, refn.fs_id); } *max_len = len; out: return type; } static const struct export_operations orangefs_export_ops = { .encode_fh = orangefs_encode_fh, .fh_to_dentry = orangefs_fh_to_dentry, }; static int orangefs_unmount(int id, __s32 fs_id, const char *devname) { struct orangefs_kernel_op_s *op; int r; op = op_alloc(ORANGEFS_VFS_OP_FS_UMOUNT); if (!op) return -ENOMEM; op->upcall.req.fs_umount.id = id; op->upcall.req.fs_umount.fs_id = fs_id; strncpy(op->upcall.req.fs_umount.orangefs_config_server, devname, ORANGEFS_MAX_SERVER_ADDR_LEN - 1); r = service_operation(op, "orangefs_fs_umount", 0); /* Not much to do about an error here. */ if (r) gossip_err("orangefs_unmount: service_operation %d\n", r); op_release(op); return r; } static int orangefs_fill_sb(struct super_block *sb, struct orangefs_fs_mount_response *fs_mount, void *data, int silent) { int ret; struct inode *root; struct dentry *root_dentry; struct orangefs_object_kref root_object; ORANGEFS_SB(sb)->sb = sb; ORANGEFS_SB(sb)->root_khandle = fs_mount->root_khandle; ORANGEFS_SB(sb)->fs_id = fs_mount->fs_id; ORANGEFS_SB(sb)->id = fs_mount->id; if (data) { ret = parse_mount_options(sb, data, silent); if (ret) return ret; } /* Hang the xattr handlers off the superblock */ sb->s_xattr = orangefs_xattr_handlers; sb->s_magic = ORANGEFS_SUPER_MAGIC; sb->s_op = &orangefs_s_ops; sb->s_d_op = &orangefs_dentry_operations; sb->s_blocksize = PAGE_SIZE; sb->s_blocksize_bits = PAGE_SHIFT; sb->s_maxbytes = MAX_LFS_FILESIZE; ret = super_setup_bdi(sb); if (ret) return ret; root_object.khandle = ORANGEFS_SB(sb)->root_khandle; root_object.fs_id = ORANGEFS_SB(sb)->fs_id; gossip_debug(GOSSIP_SUPER_DEBUG, "get inode %pU, fsid %d\n", &root_object.khandle, root_object.fs_id); root = orangefs_iget(sb, &root_object); if (IS_ERR(root)) return PTR_ERR(root); gossip_debug(GOSSIP_SUPER_DEBUG, "Allocated root inode [%p] with mode %x\n", root, root->i_mode); /* allocates and places root dentry in dcache */ root_dentry = d_make_root(root); if (!root_dentry) return -ENOMEM; sb->s_export_op = &orangefs_export_ops; sb->s_root = root_dentry; return 0; } struct dentry *orangefs_mount(struct file_system_type *fst, int flags, const char *devname, void *data) { int ret; struct super_block *sb = ERR_PTR(-EINVAL); struct orangefs_kernel_op_s *new_op; struct dentry *d = ERR_PTR(-EINVAL); gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_mount: called with devname %s\n", devname); if (!devname) { gossip_err("ERROR: device name not specified.\n"); return ERR_PTR(-EINVAL); } new_op = op_alloc(ORANGEFS_VFS_OP_FS_MOUNT); if (!new_op) return ERR_PTR(-ENOMEM); strncpy(new_op->upcall.req.fs_mount.orangefs_config_server, devname, ORANGEFS_MAX_SERVER_ADDR_LEN - 1); gossip_debug(GOSSIP_SUPER_DEBUG, "Attempting ORANGEFS Mount via host %s\n", new_op->upcall.req.fs_mount.orangefs_config_server); ret = service_operation(new_op, "orangefs_mount", 0); gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_mount: mount got return value of %d\n", ret); if (ret) goto free_op; if (new_op->downcall.resp.fs_mount.fs_id == ORANGEFS_FS_ID_NULL) { gossip_err("ERROR: Retrieved null fs_id\n"); ret = -EINVAL; goto free_op; } sb = sget(fst, NULL, set_anon_super, flags, NULL); if (IS_ERR(sb)) { d = ERR_CAST(sb); orangefs_unmount(new_op->downcall.resp.fs_mount.id, new_op->downcall.resp.fs_mount.fs_id, devname); goto free_op; } /* alloc and init our private orangefs sb info */ sb->s_fs_info = kzalloc(sizeof(struct orangefs_sb_info_s), GFP_KERNEL); if (!ORANGEFS_SB(sb)) { d = ERR_PTR(-ENOMEM); goto free_sb_and_op; } ret = orangefs_fill_sb(sb, &new_op->downcall.resp.fs_mount, data, flags & SB_SILENT ? 1 : 0); if (ret) { d = ERR_PTR(ret); goto free_sb_and_op; } /* * on successful mount, store the devname and data * used */ strncpy(ORANGEFS_SB(sb)->devname, devname, ORANGEFS_MAX_SERVER_ADDR_LEN - 1); /* mount_pending must be cleared */ ORANGEFS_SB(sb)->mount_pending = 0; /* * finally, add this sb to our list of known orangefs * sb's */ gossip_debug(GOSSIP_SUPER_DEBUG, "Adding SB %p to orangefs superblocks\n", ORANGEFS_SB(sb)); spin_lock(&orangefs_superblocks_lock); list_add_tail(&ORANGEFS_SB(sb)->list, &orangefs_superblocks); spin_unlock(&orangefs_superblocks_lock); op_release(new_op); /* Must be removed from the list now. */ ORANGEFS_SB(sb)->no_list = 0; if (orangefs_userspace_version >= 20906) { new_op = op_alloc(ORANGEFS_VFS_OP_FEATURES); if (!new_op) return ERR_PTR(-ENOMEM); new_op->upcall.req.features.features = 0; ret = service_operation(new_op, "orangefs_features", 0); orangefs_features = new_op->downcall.resp.features.features; op_release(new_op); } else { orangefs_features = 0; } return dget(sb->s_root); free_sb_and_op: /* Will call orangefs_kill_sb with sb not in list. */ ORANGEFS_SB(sb)->no_list = 1; /* ORANGEFS_VFS_OP_FS_UMOUNT is done by orangefs_kill_sb. */ deactivate_locked_super(sb); free_op: gossip_err("orangefs_mount: mount request failed with %d\n", ret); if (ret == -EINVAL) { gossip_err("Ensure that all orangefs-servers have the same FS configuration files\n"); gossip_err("Look at pvfs2-client-core log file (typically /tmp/pvfs2-client.log) for more details\n"); } op_release(new_op); return d; } void orangefs_kill_sb(struct super_block *sb) { int r; gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_kill_sb: called\n"); /* provided sb cleanup */ kill_anon_super(sb); if (!ORANGEFS_SB(sb)) { mutex_lock(&orangefs_request_mutex); mutex_unlock(&orangefs_request_mutex); return; } /* * issue the unmount to userspace to tell it to remove the * dynamic mount info it has for this superblock */ r = orangefs_unmount(ORANGEFS_SB(sb)->id, ORANGEFS_SB(sb)->fs_id, ORANGEFS_SB(sb)->devname); if (!r) ORANGEFS_SB(sb)->mount_pending = 1; if (!ORANGEFS_SB(sb)->no_list) { /* remove the sb from our list of orangefs specific sb's */ spin_lock(&orangefs_superblocks_lock); /* not list_del_init */ __list_del_entry(&ORANGEFS_SB(sb)->list); ORANGEFS_SB(sb)->list.prev = NULL; spin_unlock(&orangefs_superblocks_lock); } /* * make sure that ORANGEFS_DEV_REMOUNT_ALL loop that might've seen us * gets completed before we free the dang thing. */ mutex_lock(&orangefs_request_mutex); mutex_unlock(&orangefs_request_mutex); /* free the orangefs superblock private data */ kfree(ORANGEFS_SB(sb)); } int orangefs_inode_cache_initialize(void) { orangefs_inode_cache = kmem_cache_create_usercopy( "orangefs_inode_cache", sizeof(struct orangefs_inode_s), 0, ORANGEFS_CACHE_CREATE_FLAGS, offsetof(struct orangefs_inode_s, link_target), sizeof_field(struct orangefs_inode_s, link_target), orangefs_inode_cache_ctor); if (!orangefs_inode_cache) { gossip_err("Cannot create orangefs_inode_cache\n"); return -ENOMEM; } return 0; } int orangefs_inode_cache_finalize(void) { kmem_cache_destroy(orangefs_inode_cache); return 0; }
linux-master
fs/orangefs/super.c
// SPDX-License-Identifier: GPL-2.0 /* * What: /sys/kernel/debug/orangefs/debug-help * Date: June 2015 * Contact: Mike Marshall <[email protected]> * Description: * List of client and kernel debug keywords. * * * What: /sys/kernel/debug/orangefs/client-debug * Date: June 2015 * Contact: Mike Marshall <[email protected]> * Description: * Debug setting for "the client", the userspace * helper for the kernel module. * * * What: /sys/kernel/debug/orangefs/kernel-debug * Date: June 2015 * Contact: Mike Marshall <[email protected]> * Description: * Debug setting for the orangefs kernel module. * * Any of the keywords, or comma-separated lists * of keywords, from debug-help can be catted to * client-debug or kernel-debug. * * "none", "all" and "verbose" are special keywords * for client-debug. Setting client-debug to "all" * is kind of like trying to drink water from a * fire hose, "verbose" triggers most of the same * output except for the constant flow of output * from the main wait loop. * * "none" and "all" are similar settings for kernel-debug * no need for a "verbose". */ #include <linux/debugfs.h> #include <linux/slab.h> #include <linux/uaccess.h> #include "orangefs-debugfs.h" #include "protocol.h" #include "orangefs-kernel.h" #define DEBUG_HELP_STRING_SIZE 4096 #define HELP_STRING_UNINITIALIZED \ "Client Debug Keywords are unknown until the first time\n" \ "the client is started after boot.\n" #define ORANGEFS_KMOD_DEBUG_HELP_FILE "debug-help" #define ORANGEFS_KMOD_DEBUG_FILE "kernel-debug" #define ORANGEFS_CLIENT_DEBUG_FILE "client-debug" #define ORANGEFS_VERBOSE "verbose" #define ORANGEFS_ALL "all" /* * An array of client_debug_mask will be built to hold debug keyword/mask * values fetched from userspace. */ struct client_debug_mask { char *keyword; __u64 mask1; __u64 mask2; }; static void orangefs_kernel_debug_init(void); static int orangefs_debug_help_open(struct inode *, struct file *); static void *help_start(struct seq_file *, loff_t *); static void *help_next(struct seq_file *, void *, loff_t *); static void help_stop(struct seq_file *, void *); static int help_show(struct seq_file *, void *); static int orangefs_debug_open(struct inode *, struct file *); static ssize_t orangefs_debug_read(struct file *, char __user *, size_t, loff_t *); static ssize_t orangefs_debug_write(struct file *, const char __user *, size_t, loff_t *); static int orangefs_prepare_cdm_array(char *); static void debug_mask_to_string(void *, int); static void do_k_string(void *, int); static void do_c_string(void *, int); static int keyword_is_amalgam(char *); static int check_amalgam_keyword(void *, int); static void debug_string_to_mask(char *, void *, int); static void do_c_mask(int, char *, struct client_debug_mask **); static void do_k_mask(int, char *, __u64 **); static char kernel_debug_string[ORANGEFS_MAX_DEBUG_STRING_LEN] = "none"; static char *debug_help_string; static char client_debug_string[ORANGEFS_MAX_DEBUG_STRING_LEN]; static char client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN]; static struct dentry *client_debug_dentry; static struct dentry *debug_dir; static unsigned int kernel_mask_set_mod_init; static int orangefs_debug_disabled = 1; static int help_string_initialized; static const struct seq_operations help_debug_ops = { .start = help_start, .next = help_next, .stop = help_stop, .show = help_show, }; static const struct file_operations debug_help_fops = { .owner = THIS_MODULE, .open = orangefs_debug_help_open, .read = seq_read, .release = seq_release, .llseek = seq_lseek, }; static const struct file_operations kernel_debug_fops = { .owner = THIS_MODULE, .open = orangefs_debug_open, .read = orangefs_debug_read, .write = orangefs_debug_write, .llseek = generic_file_llseek, }; static int client_all_index; static int client_verbose_index; static struct client_debug_mask *cdm_array; static int cdm_element_count; static struct client_debug_mask client_debug_mask; /* * Used to protect data in ORANGEFS_KMOD_DEBUG_FILE and * ORANGEFS_KMOD_DEBUG_FILE. */ static DEFINE_MUTEX(orangefs_debug_lock); /* Used to protect data in ORANGEFS_KMOD_DEBUG_HELP_FILE */ static DEFINE_MUTEX(orangefs_help_file_lock); /* * initialize kmod debug operations, create orangefs debugfs dir and * ORANGEFS_KMOD_DEBUG_HELP_FILE. */ void orangefs_debugfs_init(int debug_mask) { /* convert input debug mask to a 64-bit unsigned integer */ orangefs_gossip_debug_mask = (unsigned long long)debug_mask; /* * set the kernel's gossip debug string; invalid mask values will * be ignored. */ debug_mask_to_string(&orangefs_gossip_debug_mask, 0); /* remove any invalid values from the mask */ debug_string_to_mask(kernel_debug_string, &orangefs_gossip_debug_mask, 0); /* * if the mask has a non-zero value, then indicate that the mask * was set when the kernel module was loaded. The orangefs dev ioctl * command will look at this boolean to determine if the kernel's * debug mask should be overwritten when the client-core is started. */ if (orangefs_gossip_debug_mask != 0) kernel_mask_set_mod_init = true; pr_info("%s: called with debug mask: :%s: :%llx:\n", __func__, kernel_debug_string, (unsigned long long)orangefs_gossip_debug_mask); debug_dir = debugfs_create_dir("orangefs", NULL); debugfs_create_file(ORANGEFS_KMOD_DEBUG_HELP_FILE, 0444, debug_dir, debug_help_string, &debug_help_fops); orangefs_debug_disabled = 0; orangefs_kernel_debug_init(); } /* * initialize the kernel-debug file. */ static void orangefs_kernel_debug_init(void) { static char k_buffer[ORANGEFS_MAX_DEBUG_STRING_LEN] = { }; gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: start\n", __func__); if (strlen(kernel_debug_string) + 1 < ORANGEFS_MAX_DEBUG_STRING_LEN) { strcpy(k_buffer, kernel_debug_string); strcat(k_buffer, "\n"); } else { strcpy(k_buffer, "none\n"); pr_info("%s: overflow 1!\n", __func__); } debugfs_create_file(ORANGEFS_KMOD_DEBUG_FILE, 0444, debug_dir, k_buffer, &kernel_debug_fops); } void orangefs_debugfs_cleanup(void) { debugfs_remove_recursive(debug_dir); kfree(debug_help_string); debug_help_string = NULL; } /* open ORANGEFS_KMOD_DEBUG_HELP_FILE */ static int orangefs_debug_help_open(struct inode *inode, struct file *file) { int rc = -ENODEV; int ret; gossip_debug(GOSSIP_DEBUGFS_DEBUG, "orangefs_debug_help_open: start\n"); if (orangefs_debug_disabled) goto out; ret = seq_open(file, &help_debug_ops); if (ret) goto out; ((struct seq_file *)(file->private_data))->private = inode->i_private; rc = 0; out: gossip_debug(GOSSIP_DEBUGFS_DEBUG, "orangefs_debug_help_open: rc:%d:\n", rc); return rc; } /* * I think start always gets called again after stop. Start * needs to return NULL when it is done. The whole "payload" * in this case is a single (long) string, so by the second * time we get to start (pos = 1), we're done. */ static void *help_start(struct seq_file *m, loff_t *pos) { void *payload = NULL; gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_start: start\n"); mutex_lock(&orangefs_help_file_lock); if (*pos == 0) payload = m->private; return payload; } static void *help_next(struct seq_file *m, void *v, loff_t *pos) { (*pos)++; gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_next: start\n"); return NULL; } static void help_stop(struct seq_file *m, void *p) { gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_stop: start\n"); mutex_unlock(&orangefs_help_file_lock); } static int help_show(struct seq_file *m, void *v) { gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_show: start\n"); seq_puts(m, v); return 0; } /* * initialize the client-debug file. */ static void orangefs_client_debug_init(void) { static char c_buffer[ORANGEFS_MAX_DEBUG_STRING_LEN] = { }; gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: start\n", __func__); if (strlen(client_debug_string) + 1 < ORANGEFS_MAX_DEBUG_STRING_LEN) { strcpy(c_buffer, client_debug_string); strcat(c_buffer, "\n"); } else { strcpy(c_buffer, "none\n"); pr_info("%s: overflow! 2\n", __func__); } client_debug_dentry = debugfs_create_file(ORANGEFS_CLIENT_DEBUG_FILE, 0444, debug_dir, c_buffer, &kernel_debug_fops); } /* open ORANGEFS_KMOD_DEBUG_FILE or ORANGEFS_CLIENT_DEBUG_FILE.*/ static int orangefs_debug_open(struct inode *inode, struct file *file) { int rc = -ENODEV; gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: orangefs_debug_disabled: %d\n", __func__, orangefs_debug_disabled); if (orangefs_debug_disabled) goto out; rc = 0; mutex_lock(&orangefs_debug_lock); file->private_data = inode->i_private; mutex_unlock(&orangefs_debug_lock); out: gossip_debug(GOSSIP_DEBUGFS_DEBUG, "orangefs_debug_open: rc: %d\n", rc); return rc; } static ssize_t orangefs_debug_read(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { char *buf; int sprintf_ret; ssize_t read_ret = -ENOMEM; gossip_debug(GOSSIP_DEBUGFS_DEBUG, "orangefs_debug_read: start\n"); buf = kmalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL); if (!buf) goto out; mutex_lock(&orangefs_debug_lock); sprintf_ret = sprintf(buf, "%s", (char *)file->private_data); mutex_unlock(&orangefs_debug_lock); read_ret = simple_read_from_buffer(ubuf, count, ppos, buf, sprintf_ret); kfree(buf); out: gossip_debug(GOSSIP_DEBUGFS_DEBUG, "orangefs_debug_read: ret: %zu\n", read_ret); return read_ret; } static ssize_t orangefs_debug_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { char *buf; int rc = -EFAULT; size_t silly = 0; char *debug_string; struct orangefs_kernel_op_s *new_op = NULL; struct client_debug_mask c_mask = { NULL, 0, 0 }; char *s; gossip_debug(GOSSIP_DEBUGFS_DEBUG, "orangefs_debug_write: %pD\n", file); if (count == 0) return 0; /* * Thwart users who try to jamb a ridiculous number * of bytes into the debug file... */ if (count > ORANGEFS_MAX_DEBUG_STRING_LEN + 1) { silly = count; count = ORANGEFS_MAX_DEBUG_STRING_LEN + 1; } buf = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL); if (!buf) goto out; if (copy_from_user(buf, ubuf, count - 1)) { gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: copy_from_user failed!\n", __func__); goto out; } /* * Map the keyword string from userspace into a valid debug mask. * The mapping process involves mapping the human-inputted string * into a valid mask, and then rebuilding the string from the * verified valid mask. * * A service operation is required to set a new client-side * debug mask. */ if (!strcmp(file->f_path.dentry->d_name.name, ORANGEFS_KMOD_DEBUG_FILE)) { debug_string_to_mask(buf, &orangefs_gossip_debug_mask, 0); debug_mask_to_string(&orangefs_gossip_debug_mask, 0); debug_string = kernel_debug_string; gossip_debug(GOSSIP_DEBUGFS_DEBUG, "New kernel debug string is %s\n", kernel_debug_string); } else { /* Can't reset client debug mask if client is not running. */ if (is_daemon_in_service()) { pr_info("%s: Client not running :%d:\n", __func__, is_daemon_in_service()); goto out; } debug_string_to_mask(buf, &c_mask, 1); debug_mask_to_string(&c_mask, 1); debug_string = client_debug_string; new_op = op_alloc(ORANGEFS_VFS_OP_PARAM); if (!new_op) { pr_info("%s: op_alloc failed!\n", __func__); goto out; } new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_TWO_MASK_VALUES; new_op->upcall.req.param.type = ORANGEFS_PARAM_REQUEST_SET; memset(new_op->upcall.req.param.s_value, 0, ORANGEFS_MAX_DEBUG_STRING_LEN); sprintf(new_op->upcall.req.param.s_value, "%llx %llx\n", c_mask.mask1, c_mask.mask2); /* service_operation returns 0 on success... */ rc = service_operation(new_op, "orangefs_param", ORANGEFS_OP_INTERRUPTIBLE); if (rc) gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: service_operation failed! rc:%d:\n", __func__, rc); op_release(new_op); } mutex_lock(&orangefs_debug_lock); s = file_inode(file)->i_private; memset(s, 0, ORANGEFS_MAX_DEBUG_STRING_LEN); sprintf(s, "%s\n", debug_string); mutex_unlock(&orangefs_debug_lock); *ppos += count; if (silly) rc = silly; else rc = count; out: gossip_debug(GOSSIP_DEBUGFS_DEBUG, "orangefs_debug_write: rc: %d\n", rc); kfree(buf); return rc; } /* * After obtaining a string representation of the client's debug * keywords and their associated masks, this function is called to build an * array of these values. */ static int orangefs_prepare_cdm_array(char *debug_array_string) { int i; int rc = -EINVAL; char *cds_head = NULL; char *cds_delimiter = NULL; int keyword_len = 0; gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__); /* * figure out how many elements the cdm_array needs. */ for (i = 0; i < strlen(debug_array_string); i++) if (debug_array_string[i] == '\n') cdm_element_count++; if (!cdm_element_count) { pr_info("No elements in client debug array string!\n"); goto out; } cdm_array = kcalloc(cdm_element_count, sizeof(*cdm_array), GFP_KERNEL); if (!cdm_array) { rc = -ENOMEM; goto out; } cds_head = debug_array_string; for (i = 0; i < cdm_element_count; i++) { cds_delimiter = strchr(cds_head, '\n'); *cds_delimiter = '\0'; keyword_len = strcspn(cds_head, " "); cdm_array[i].keyword = kzalloc(keyword_len + 1, GFP_KERNEL); if (!cdm_array[i].keyword) { rc = -ENOMEM; goto out; } sscanf(cds_head, "%s %llx %llx", cdm_array[i].keyword, (unsigned long long *)&(cdm_array[i].mask1), (unsigned long long *)&(cdm_array[i].mask2)); if (!strcmp(cdm_array[i].keyword, ORANGEFS_VERBOSE)) client_verbose_index = i; if (!strcmp(cdm_array[i].keyword, ORANGEFS_ALL)) client_all_index = i; cds_head = cds_delimiter + 1; } rc = cdm_element_count; gossip_debug(GOSSIP_UTILS_DEBUG, "%s: rc:%d:\n", __func__, rc); out: return rc; } /* * /sys/kernel/debug/orangefs/debug-help can be catted to * see all the available kernel and client debug keywords. * * When orangefs.ko initializes, we have no idea what keywords the * client supports, nor their associated masks. * * We pass through this function once at module-load and stamp a * boilerplate "we don't know" message for the client in the * debug-help file. We pass through here again when the client * starts and then we can fill out the debug-help file fully. * * The client might be restarted any number of times between * module reloads, we only build the debug-help file the first time. */ int orangefs_prepare_debugfs_help_string(int at_boot) { char *client_title = "Client Debug Keywords:\n"; char *kernel_title = "Kernel Debug Keywords:\n"; size_t string_size = DEBUG_HELP_STRING_SIZE; size_t result_size; size_t i; char *new; int rc = -EINVAL; gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__); if (at_boot) client_title = HELP_STRING_UNINITIALIZED; /* build a new debug_help_string. */ new = kzalloc(DEBUG_HELP_STRING_SIZE, GFP_KERNEL); if (!new) { rc = -ENOMEM; goto out; } /* * strlcat(dst, src, size) will append at most * "size - strlen(dst) - 1" bytes of src onto dst, * null terminating the result, and return the total * length of the string it tried to create. * * We'll just plow through here building our new debug * help string and let strlcat take care of assuring that * dst doesn't overflow. */ strlcat(new, client_title, string_size); if (!at_boot) { /* * fill the client keyword/mask array and remember * how many elements there were. */ cdm_element_count = orangefs_prepare_cdm_array(client_debug_array_string); if (cdm_element_count <= 0) { kfree(new); goto out; } for (i = 0; i < cdm_element_count; i++) { strlcat(new, "\t", string_size); strlcat(new, cdm_array[i].keyword, string_size); strlcat(new, "\n", string_size); } } strlcat(new, "\n", string_size); strlcat(new, kernel_title, string_size); for (i = 0; i < num_kmod_keyword_mask_map; i++) { strlcat(new, "\t", string_size); strlcat(new, s_kmod_keyword_mask_map[i].keyword, string_size); result_size = strlcat(new, "\n", string_size); } /* See if we tried to put too many bytes into "new"... */ if (result_size >= string_size) { kfree(new); goto out; } if (at_boot) { debug_help_string = new; } else { mutex_lock(&orangefs_help_file_lock); memset(debug_help_string, 0, DEBUG_HELP_STRING_SIZE); strlcat(debug_help_string, new, string_size); mutex_unlock(&orangefs_help_file_lock); kfree(new); } rc = 0; out: return rc; } /* * kernel = type 0 * client = type 1 */ static void debug_mask_to_string(void *mask, int type) { int i; int len = 0; char *debug_string; int element_count = 0; gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__); if (type) { debug_string = client_debug_string; element_count = cdm_element_count; } else { debug_string = kernel_debug_string; element_count = num_kmod_keyword_mask_map; } memset(debug_string, 0, ORANGEFS_MAX_DEBUG_STRING_LEN); /* * Some keywords, like "all" or "verbose", are amalgams of * numerous other keywords. Make a special check for those * before grinding through the whole mask only to find out * later... */ if (check_amalgam_keyword(mask, type)) goto out; /* Build the debug string. */ for (i = 0; i < element_count; i++) if (type) do_c_string(mask, i); else do_k_string(mask, i); len = strlen(debug_string); if ((len) && (type)) client_debug_string[len - 1] = '\0'; else if (len) kernel_debug_string[len - 1] = '\0'; else if (type) strcpy(client_debug_string, "none"); else strcpy(kernel_debug_string, "none"); out: gossip_debug(GOSSIP_UTILS_DEBUG, "%s: string:%s:\n", __func__, debug_string); return; } static void do_k_string(void *k_mask, int index) { __u64 *mask = (__u64 *) k_mask; if (keyword_is_amalgam((char *) s_kmod_keyword_mask_map[index].keyword)) goto out; if (*mask & s_kmod_keyword_mask_map[index].mask_val) { if ((strlen(kernel_debug_string) + strlen(s_kmod_keyword_mask_map[index].keyword)) < ORANGEFS_MAX_DEBUG_STRING_LEN - 1) { strcat(kernel_debug_string, s_kmod_keyword_mask_map[index].keyword); strcat(kernel_debug_string, ","); } else { gossip_err("%s: overflow!\n", __func__); strcpy(kernel_debug_string, ORANGEFS_ALL); goto out; } } out: return; } static void do_c_string(void *c_mask, int index) { struct client_debug_mask *mask = (struct client_debug_mask *) c_mask; if (keyword_is_amalgam(cdm_array[index].keyword)) goto out; if ((mask->mask1 & cdm_array[index].mask1) || (mask->mask2 & cdm_array[index].mask2)) { if ((strlen(client_debug_string) + strlen(cdm_array[index].keyword) + 1) < ORANGEFS_MAX_DEBUG_STRING_LEN - 2) { strcat(client_debug_string, cdm_array[index].keyword); strcat(client_debug_string, ","); } else { gossip_err("%s: overflow!\n", __func__); strcpy(client_debug_string, ORANGEFS_ALL); goto out; } } out: return; } static int keyword_is_amalgam(char *keyword) { int rc = 0; if ((!strcmp(keyword, ORANGEFS_ALL)) || (!strcmp(keyword, ORANGEFS_VERBOSE))) rc = 1; return rc; } /* * kernel = type 0 * client = type 1 * * return 1 if we found an amalgam. */ static int check_amalgam_keyword(void *mask, int type) { __u64 *k_mask; struct client_debug_mask *c_mask; int k_all_index = num_kmod_keyword_mask_map - 1; int rc = 0; if (type) { c_mask = (struct client_debug_mask *) mask; if ((c_mask->mask1 == cdm_array[client_all_index].mask1) && (c_mask->mask2 == cdm_array[client_all_index].mask2)) { strcpy(client_debug_string, ORANGEFS_ALL); rc = 1; goto out; } if ((c_mask->mask1 == cdm_array[client_verbose_index].mask1) && (c_mask->mask2 == cdm_array[client_verbose_index].mask2)) { strcpy(client_debug_string, ORANGEFS_VERBOSE); rc = 1; goto out; } } else { k_mask = (__u64 *) mask; if (*k_mask >= s_kmod_keyword_mask_map[k_all_index].mask_val) { strcpy(kernel_debug_string, ORANGEFS_ALL); rc = 1; goto out; } } out: return rc; } /* * kernel = type 0 * client = type 1 */ static void debug_string_to_mask(char *debug_string, void *mask, int type) { char *unchecked_keyword; int i; char *strsep_fodder = kstrdup(debug_string, GFP_KERNEL); char *original_pointer; int element_count = 0; struct client_debug_mask *c_mask = NULL; __u64 *k_mask = NULL; gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__); if (type) { c_mask = (struct client_debug_mask *)mask; element_count = cdm_element_count; } else { k_mask = (__u64 *)mask; *k_mask = 0; element_count = num_kmod_keyword_mask_map; } original_pointer = strsep_fodder; while ((unchecked_keyword = strsep(&strsep_fodder, ","))) if (strlen(unchecked_keyword)) { for (i = 0; i < element_count; i++) if (type) do_c_mask(i, unchecked_keyword, &c_mask); else do_k_mask(i, unchecked_keyword, &k_mask); } kfree(original_pointer); } static void do_c_mask(int i, char *unchecked_keyword, struct client_debug_mask **sane_mask) { if (!strcmp(cdm_array[i].keyword, unchecked_keyword)) { (**sane_mask).mask1 = (**sane_mask).mask1 | cdm_array[i].mask1; (**sane_mask).mask2 = (**sane_mask).mask2 | cdm_array[i].mask2; } } static void do_k_mask(int i, char *unchecked_keyword, __u64 **sane_mask) { if (!strcmp(s_kmod_keyword_mask_map[i].keyword, unchecked_keyword)) **sane_mask = (**sane_mask) | s_kmod_keyword_mask_map[i].mask_val; } int orangefs_debugfs_new_client_mask(void __user *arg) { struct dev_mask2_info_s mask2_info = {0}; int ret; ret = copy_from_user(&mask2_info, (void __user *)arg, sizeof(struct dev_mask2_info_s)); if (ret != 0) return -EIO; client_debug_mask.mask1 = mask2_info.mask1_value; client_debug_mask.mask2 = mask2_info.mask2_value; pr_info("%s: client debug mask has been been received " ":%llx: :%llx:\n", __func__, (unsigned long long)client_debug_mask.mask1, (unsigned long long)client_debug_mask.mask2); return ret; } int orangefs_debugfs_new_client_string(void __user *arg) { int ret; ret = copy_from_user(&client_debug_array_string, (void __user *)arg, ORANGEFS_MAX_DEBUG_STRING_LEN); if (ret != 0) { pr_info("%s: CLIENT_STRING: copy_from_user failed\n", __func__); return -EFAULT; } /* * The real client-core makes an effort to ensure * that actual strings that aren't too long to fit in * this buffer is what we get here. We're going to use * string functions on the stuff we got, so we'll make * this extra effort to try and keep from * flowing out of this buffer when we use the string * functions, even if somehow the stuff we end up * with here is garbage. */ client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN - 1] = '\0'; pr_info("%s: client debug array string has been received.\n", __func__); if (!help_string_initialized) { /* Build a proper debug help string. */ ret = orangefs_prepare_debugfs_help_string(0); if (ret) { gossip_err("%s: no debug help string \n", __func__); return ret; } } debug_mask_to_string(&client_debug_mask, 1); debugfs_remove(client_debug_dentry); orangefs_client_debug_init(); help_string_initialized++; return 0; } int orangefs_debugfs_new_debug(void __user *arg) { struct dev_mask_info_s mask_info = {0}; int ret; ret = copy_from_user(&mask_info, (void __user *)arg, sizeof(mask_info)); if (ret != 0) return -EIO; if (mask_info.mask_type == KERNEL_MASK) { if ((mask_info.mask_value == 0) && (kernel_mask_set_mod_init)) { /* * the kernel debug mask was set when the * kernel module was loaded; don't override * it if the client-core was started without * a value for ORANGEFS_KMODMASK. */ return 0; } debug_mask_to_string(&mask_info.mask_value, mask_info.mask_type); orangefs_gossip_debug_mask = mask_info.mask_value; pr_info("%s: kernel debug mask has been modified to " ":%s: :%llx:\n", __func__, kernel_debug_string, (unsigned long long)orangefs_gossip_debug_mask); } else if (mask_info.mask_type == CLIENT_MASK) { debug_mask_to_string(&mask_info.mask_value, mask_info.mask_type); pr_info("%s: client debug mask has been modified to" ":%s: :%llx:\n", __func__, client_debug_string, llu(mask_info.mask_value)); } else { gossip_err("Invalid mask type....\n"); return -EINVAL; } return ret; }
linux-master
fs/orangefs/orangefs-debugfs.c
// SPDX-License-Identifier: GPL-2.0 /* * (C) 2001 Clemson University and The University of Chicago * Copyright 2018 Omnibond Systems, L.L.C. * * See COPYING in top-level directory. */ /* * Linux VFS extended attribute operations. */ #include "protocol.h" #include "orangefs-kernel.h" #include "orangefs-bufmap.h" #include <linux/posix_acl_xattr.h> #include <linux/xattr.h> #include <linux/hashtable.h> #define SYSTEM_ORANGEFS_KEY "system.pvfs2." #define SYSTEM_ORANGEFS_KEY_LEN 13 /* * this function returns * 0 if the key corresponding to name is not meant to be printed as part * of a listxattr. * 1 if the key corresponding to name is meant to be returned as part of * a listxattr. * The ones that start SYSTEM_ORANGEFS_KEY are the ones to avoid printing. */ static int is_reserved_key(const char *key, size_t size) { if (size < SYSTEM_ORANGEFS_KEY_LEN) return 1; return strncmp(key, SYSTEM_ORANGEFS_KEY, SYSTEM_ORANGEFS_KEY_LEN) ? 1 : 0; } static inline int convert_to_internal_xattr_flags(int setxattr_flags) { int internal_flag = 0; if (setxattr_flags & XATTR_REPLACE) { /* Attribute must exist! */ internal_flag = ORANGEFS_XATTR_REPLACE; } else if (setxattr_flags & XATTR_CREATE) { /* Attribute must not exist */ internal_flag = ORANGEFS_XATTR_CREATE; } return internal_flag; } static unsigned int xattr_key(const char *key) { unsigned int i = 0; while (key) i += *key++; return i % 16; } static struct orangefs_cached_xattr *find_cached_xattr(struct inode *inode, const char *key) { struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); struct orangefs_cached_xattr *cx; struct hlist_head *h; struct hlist_node *tmp; h = &orangefs_inode->xattr_cache[xattr_key(key)]; if (hlist_empty(h)) return NULL; hlist_for_each_entry_safe(cx, tmp, h, node) { /* if (!time_before(jiffies, cx->timeout)) { hlist_del(&cx->node); kfree(cx); continue; }*/ if (!strcmp(cx->key, key)) return cx; } return NULL; } /* * Tries to get a specified key's attributes of a given * file into a user-specified buffer. Note that the getxattr * interface allows for the users to probe the size of an * extended attribute by passing in a value of 0 to size. * Thus our return value is always the size of the attribute * unless the key does not exist for the file and/or if * there were errors in fetching the attribute value. */ ssize_t orangefs_inode_getxattr(struct inode *inode, const char *name, void *buffer, size_t size) { struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); struct orangefs_kernel_op_s *new_op = NULL; struct orangefs_cached_xattr *cx; ssize_t ret = -ENOMEM; ssize_t length = 0; int fsuid; int fsgid; gossip_debug(GOSSIP_XATTR_DEBUG, "%s: name %s, buffer_size %zd\n", __func__, name, size); if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; if (strlen(name) >= ORANGEFS_MAX_XATTR_NAMELEN) return -EINVAL; fsuid = from_kuid(&init_user_ns, current_fsuid()); fsgid = from_kgid(&init_user_ns, current_fsgid()); gossip_debug(GOSSIP_XATTR_DEBUG, "getxattr on inode %pU, name %s " "(uid %o, gid %o)\n", get_khandle_from_ino(inode), name, fsuid, fsgid); down_read(&orangefs_inode->xattr_sem); cx = find_cached_xattr(inode, name); if (cx && time_before(jiffies, cx->timeout)) { if (cx->length == -1) { ret = -ENODATA; goto out_unlock; } else { if (size == 0) { ret = cx->length; goto out_unlock; } if (cx->length > size) { ret = -ERANGE; goto out_unlock; } memcpy(buffer, cx->val, cx->length); memset(buffer + cx->length, 0, size - cx->length); ret = cx->length; goto out_unlock; } } new_op = op_alloc(ORANGEFS_VFS_OP_GETXATTR); if (!new_op) goto out_unlock; new_op->upcall.req.getxattr.refn = orangefs_inode->refn; strcpy(new_op->upcall.req.getxattr.key, name); /* * NOTE: Although keys are meant to be NULL terminated textual * strings, I am going to explicitly pass the length just in case * we change this later on... */ new_op->upcall.req.getxattr.key_sz = strlen(name) + 1; ret = service_operation(new_op, "orangefs_inode_getxattr", get_interruptible_flag(inode)); if (ret != 0) { if (ret == -ENOENT) { ret = -ENODATA; gossip_debug(GOSSIP_XATTR_DEBUG, "orangefs_inode_getxattr: inode %pU key %s" " does not exist!\n", get_khandle_from_ino(inode), (char *)new_op->upcall.req.getxattr.key); cx = kmalloc(sizeof *cx, GFP_KERNEL); if (cx) { strcpy(cx->key, name); cx->length = -1; cx->timeout = jiffies + orangefs_getattr_timeout_msecs*HZ/1000; hash_add(orangefs_inode->xattr_cache, &cx->node, xattr_key(cx->key)); } } goto out_release_op; } /* * Length returned includes null terminator. */ length = new_op->downcall.resp.getxattr.val_sz; /* * Just return the length of the queried attribute. */ if (size == 0) { ret = length; goto out_release_op; } /* * Check to see if key length is > provided buffer size. */ if (length > size) { ret = -ERANGE; goto out_release_op; } memcpy(buffer, new_op->downcall.resp.getxattr.val, length); memset(buffer + length, 0, size - length); gossip_debug(GOSSIP_XATTR_DEBUG, "orangefs_inode_getxattr: inode %pU " "key %s key_sz %d, val_len %d\n", get_khandle_from_ino(inode), (char *)new_op-> upcall.req.getxattr.key, (int)new_op-> upcall.req.getxattr.key_sz, (int)ret); ret = length; if (cx) { strcpy(cx->key, name); memcpy(cx->val, buffer, length); cx->length = length; cx->timeout = jiffies + HZ; } else { cx = kmalloc(sizeof *cx, GFP_KERNEL); if (cx) { strcpy(cx->key, name); memcpy(cx->val, buffer, length); cx->length = length; cx->timeout = jiffies + HZ; hash_add(orangefs_inode->xattr_cache, &cx->node, xattr_key(cx->key)); } } out_release_op: op_release(new_op); out_unlock: up_read(&orangefs_inode->xattr_sem); return ret; } static int orangefs_inode_removexattr(struct inode *inode, const char *name, int flags) { struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); struct orangefs_kernel_op_s *new_op = NULL; struct orangefs_cached_xattr *cx; struct hlist_head *h; struct hlist_node *tmp; int ret = -ENOMEM; if (strlen(name) >= ORANGEFS_MAX_XATTR_NAMELEN) return -EINVAL; down_write(&orangefs_inode->xattr_sem); new_op = op_alloc(ORANGEFS_VFS_OP_REMOVEXATTR); if (!new_op) goto out_unlock; new_op->upcall.req.removexattr.refn = orangefs_inode->refn; /* * NOTE: Although keys are meant to be NULL terminated * textual strings, I am going to explicitly pass the * length just in case we change this later on... */ strcpy(new_op->upcall.req.removexattr.key, name); new_op->upcall.req.removexattr.key_sz = strlen(name) + 1; gossip_debug(GOSSIP_XATTR_DEBUG, "orangefs_inode_removexattr: key %s, key_sz %d\n", (char *)new_op->upcall.req.removexattr.key, (int)new_op->upcall.req.removexattr.key_sz); ret = service_operation(new_op, "orangefs_inode_removexattr", get_interruptible_flag(inode)); if (ret == -ENOENT) { /* * Request to replace a non-existent attribute is an error. */ if (flags & XATTR_REPLACE) ret = -ENODATA; else ret = 0; } gossip_debug(GOSSIP_XATTR_DEBUG, "orangefs_inode_removexattr: returning %d\n", ret); op_release(new_op); h = &orangefs_inode->xattr_cache[xattr_key(name)]; hlist_for_each_entry_safe(cx, tmp, h, node) { if (!strcmp(cx->key, name)) { hlist_del(&cx->node); kfree(cx); break; } } out_unlock: up_write(&orangefs_inode->xattr_sem); return ret; } /* * Tries to set an attribute for a given key on a file. * * Returns a -ve number on error and 0 on success. Key is text, but value * can be binary! */ int orangefs_inode_setxattr(struct inode *inode, const char *name, const void *value, size_t size, int flags) { struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); struct orangefs_kernel_op_s *new_op; int internal_flag = 0; struct orangefs_cached_xattr *cx; struct hlist_head *h; struct hlist_node *tmp; int ret = -ENOMEM; gossip_debug(GOSSIP_XATTR_DEBUG, "%s: name %s, buffer_size %zd\n", __func__, name, size); if (size > ORANGEFS_MAX_XATTR_VALUELEN) return -EINVAL; if (strlen(name) >= ORANGEFS_MAX_XATTR_NAMELEN) return -EINVAL; internal_flag = convert_to_internal_xattr_flags(flags); /* This is equivalent to a removexattr */ if (size == 0 && !value) { gossip_debug(GOSSIP_XATTR_DEBUG, "removing xattr (%s)\n", name); return orangefs_inode_removexattr(inode, name, flags); } gossip_debug(GOSSIP_XATTR_DEBUG, "setxattr on inode %pU, name %s\n", get_khandle_from_ino(inode), name); down_write(&orangefs_inode->xattr_sem); new_op = op_alloc(ORANGEFS_VFS_OP_SETXATTR); if (!new_op) goto out_unlock; new_op->upcall.req.setxattr.refn = orangefs_inode->refn; new_op->upcall.req.setxattr.flags = internal_flag; /* * NOTE: Although keys are meant to be NULL terminated textual * strings, I am going to explicitly pass the length just in * case we change this later on... */ strcpy(new_op->upcall.req.setxattr.keyval.key, name); new_op->upcall.req.setxattr.keyval.key_sz = strlen(name) + 1; memcpy(new_op->upcall.req.setxattr.keyval.val, value, size); new_op->upcall.req.setxattr.keyval.val_sz = size; gossip_debug(GOSSIP_XATTR_DEBUG, "orangefs_inode_setxattr: key %s, key_sz %d " " value size %zd\n", (char *)new_op->upcall.req.setxattr.keyval.key, (int)new_op->upcall.req.setxattr.keyval.key_sz, size); ret = service_operation(new_op, "orangefs_inode_setxattr", get_interruptible_flag(inode)); gossip_debug(GOSSIP_XATTR_DEBUG, "orangefs_inode_setxattr: returning %d\n", ret); /* when request is serviced properly, free req op struct */ op_release(new_op); h = &orangefs_inode->xattr_cache[xattr_key(name)]; hlist_for_each_entry_safe(cx, tmp, h, node) { if (!strcmp(cx->key, name)) { hlist_del(&cx->node); kfree(cx); break; } } out_unlock: up_write(&orangefs_inode->xattr_sem); return ret; } /* * Tries to get a specified object's keys into a user-specified buffer of a * given size. Note that like the previous instances of xattr routines, this * also allows you to pass in a NULL pointer and 0 size to probe the size for * subsequent memory allocations. Thus our return value is always the size of * all the keys unless there were errors in fetching the keys! */ ssize_t orangefs_listxattr(struct dentry *dentry, char *buffer, size_t size) { struct inode *inode = dentry->d_inode; struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); struct orangefs_kernel_op_s *new_op; __u64 token = ORANGEFS_ITERATE_START; ssize_t ret = -ENOMEM; ssize_t total = 0; int count_keys = 0; int key_size; int i = 0; int returned_count = 0; if (size > 0 && !buffer) { gossip_err("%s: bogus NULL pointers\n", __func__); return -EINVAL; } down_read(&orangefs_inode->xattr_sem); new_op = op_alloc(ORANGEFS_VFS_OP_LISTXATTR); if (!new_op) goto out_unlock; if (buffer && size > 0) memset(buffer, 0, size); try_again: key_size = 0; new_op->upcall.req.listxattr.refn = orangefs_inode->refn; new_op->upcall.req.listxattr.token = token; new_op->upcall.req.listxattr.requested_count = (size == 0) ? 0 : ORANGEFS_MAX_XATTR_LISTLEN; ret = service_operation(new_op, __func__, get_interruptible_flag(inode)); if (ret != 0) goto done; if (size == 0) { /* * This is a bit of a big upper limit, but I did not want to * spend too much time getting this correct, since users end * up allocating memory rather than us... */ total = new_op->downcall.resp.listxattr.returned_count * ORANGEFS_MAX_XATTR_NAMELEN; goto done; } returned_count = new_op->downcall.resp.listxattr.returned_count; if (returned_count < 0 || returned_count > ORANGEFS_MAX_XATTR_LISTLEN) { gossip_err("%s: impossible value for returned_count:%d:\n", __func__, returned_count); ret = -EIO; goto done; } /* * Check to see how much can be fit in the buffer. Fit only whole keys. */ for (i = 0; i < returned_count; i++) { if (new_op->downcall.resp.listxattr.lengths[i] < 0 || new_op->downcall.resp.listxattr.lengths[i] > ORANGEFS_MAX_XATTR_NAMELEN) { gossip_err("%s: impossible value for lengths[%d]\n", __func__, new_op->downcall.resp.listxattr.lengths[i]); ret = -EIO; goto done; } if (total + new_op->downcall.resp.listxattr.lengths[i] > size) goto done; /* * Since many dumb programs try to setxattr() on our reserved * xattrs this is a feeble attempt at defeating those by not * listing them in the output of listxattr.. sigh */ if (is_reserved_key(new_op->downcall.resp.listxattr.key + key_size, new_op->downcall.resp. listxattr.lengths[i])) { gossip_debug(GOSSIP_XATTR_DEBUG, "Copying key %d -> %s\n", i, new_op->downcall.resp.listxattr.key + key_size); memcpy(buffer + total, new_op->downcall.resp.listxattr.key + key_size, new_op->downcall.resp.listxattr.lengths[i]); total += new_op->downcall.resp.listxattr.lengths[i]; count_keys++; } else { gossip_debug(GOSSIP_XATTR_DEBUG, "[RESERVED] key %d -> %s\n", i, new_op->downcall.resp.listxattr.key + key_size); } key_size += new_op->downcall.resp.listxattr.lengths[i]; } /* * Since the buffer was large enough, we might have to continue * fetching more keys! */ token = new_op->downcall.resp.listxattr.token; if (token != ORANGEFS_ITERATE_END) goto try_again; done: gossip_debug(GOSSIP_XATTR_DEBUG, "%s: returning %d" " [size of buffer %ld] (filled in %d keys)\n", __func__, ret ? (int)ret : (int)total, (long)size, count_keys); op_release(new_op); if (ret == 0) ret = total; out_unlock: up_read(&orangefs_inode->xattr_sem); return ret; } static int orangefs_xattr_set_default(const struct xattr_handler *handler, struct mnt_idmap *idmap, struct dentry *unused, struct inode *inode, const char *name, const void *buffer, size_t size, int flags) { return orangefs_inode_setxattr(inode, name, buffer, size, flags); } static int orangefs_xattr_get_default(const struct xattr_handler *handler, struct dentry *unused, struct inode *inode, const char *name, void *buffer, size_t size) { return orangefs_inode_getxattr(inode, name, buffer, size); } static const struct xattr_handler orangefs_xattr_default_handler = { .prefix = "", /* match any name => handlers called with full name */ .get = orangefs_xattr_get_default, .set = orangefs_xattr_set_default, }; const struct xattr_handler *orangefs_xattr_handlers[] = { &orangefs_xattr_default_handler, NULL };
linux-master
fs/orangefs/xattr.c
// SPDX-License-Identifier: GPL-2.0 /* * (C) 2001 Clemson University and The University of Chicago * * Changes by Acxiom Corporation to add protocol version to kernel * communication, Copyright Acxiom Corporation, 2005. * * See COPYING in top-level directory. */ #include "protocol.h" #include "orangefs-kernel.h" #include "orangefs-dev-proto.h" #include "orangefs-bufmap.h" #include "orangefs-debugfs.h" #include <linux/debugfs.h> #include <linux/slab.h> /* this file implements the /dev/pvfs2-req device node */ uint32_t orangefs_userspace_version; static int open_access_count; static DEFINE_MUTEX(devreq_mutex); #define DUMP_DEVICE_ERROR() \ do { \ gossip_err("*****************************************************\n");\ gossip_err("ORANGEFS Device Error: You cannot open the device file "); \ gossip_err("\n/dev/%s more than once. Please make sure that\nthere " \ "are no ", ORANGEFS_REQDEVICE_NAME); \ gossip_err("instances of a program using this device\ncurrently " \ "running. (You must verify this!)\n"); \ gossip_err("For example, you can use the lsof program as follows:\n");\ gossip_err("'lsof | grep %s' (run this as root)\n", \ ORANGEFS_REQDEVICE_NAME); \ gossip_err(" open_access_count = %d\n", open_access_count); \ gossip_err("*****************************************************\n");\ } while (0) static int hash_func(__u64 tag, int table_size) { return do_div(tag, (unsigned int)table_size); } static void orangefs_devreq_add_op(struct orangefs_kernel_op_s *op) { int index = hash_func(op->tag, hash_table_size); list_add_tail(&op->list, &orangefs_htable_ops_in_progress[index]); } /* * find the op with this tag and remove it from the in progress * hash table. */ static struct orangefs_kernel_op_s *orangefs_devreq_remove_op(__u64 tag) { struct orangefs_kernel_op_s *op, *next; int index; index = hash_func(tag, hash_table_size); spin_lock(&orangefs_htable_ops_in_progress_lock); list_for_each_entry_safe(op, next, &orangefs_htable_ops_in_progress[index], list) { if (op->tag == tag && !op_state_purged(op) && !op_state_given_up(op)) { list_del_init(&op->list); spin_unlock(&orangefs_htable_ops_in_progress_lock); return op; } } spin_unlock(&orangefs_htable_ops_in_progress_lock); return NULL; } /* Returns whether any FS are still pending remounted */ static int mark_all_pending_mounts(void) { int unmounted = 1; struct orangefs_sb_info_s *orangefs_sb = NULL; spin_lock(&orangefs_superblocks_lock); list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) { /* All of these file system require a remount */ orangefs_sb->mount_pending = 1; unmounted = 0; } spin_unlock(&orangefs_superblocks_lock); return unmounted; } /* * Determine if a given file system needs to be remounted or not * Returns -1 on error * 0 if already mounted * 1 if needs remount */ static int fs_mount_pending(__s32 fsid) { int mount_pending = -1; struct orangefs_sb_info_s *orangefs_sb = NULL; spin_lock(&orangefs_superblocks_lock); list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) { if (orangefs_sb->fs_id == fsid) { mount_pending = orangefs_sb->mount_pending; break; } } spin_unlock(&orangefs_superblocks_lock); return mount_pending; } static int orangefs_devreq_open(struct inode *inode, struct file *file) { int ret = -EINVAL; /* in order to ensure that the filesystem driver sees correct UIDs */ if (file->f_cred->user_ns != &init_user_ns) { gossip_err("%s: device cannot be opened outside init_user_ns\n", __func__); goto out; } if (!(file->f_flags & O_NONBLOCK)) { gossip_err("%s: device cannot be opened in blocking mode\n", __func__); goto out; } ret = -EACCES; gossip_debug(GOSSIP_DEV_DEBUG, "client-core: opening device\n"); mutex_lock(&devreq_mutex); if (open_access_count == 0) { open_access_count = 1; ret = 0; } else { DUMP_DEVICE_ERROR(); } mutex_unlock(&devreq_mutex); out: gossip_debug(GOSSIP_DEV_DEBUG, "pvfs2-client-core: open device complete (ret = %d)\n", ret); return ret; } /* Function for read() callers into the device */ static ssize_t orangefs_devreq_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { struct orangefs_kernel_op_s *op, *temp; __s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION; static __s32 magic = ORANGEFS_DEVREQ_MAGIC; struct orangefs_kernel_op_s *cur_op; unsigned long ret; /* We do not support blocking IO. */ if (!(file->f_flags & O_NONBLOCK)) { gossip_err("%s: blocking read from client-core.\n", __func__); return -EINVAL; } /* * The client will do an ioctl to find MAX_DEV_REQ_UPSIZE, then * always read with that size buffer. */ if (count != MAX_DEV_REQ_UPSIZE) { gossip_err("orangefs: client-core tried to read wrong size\n"); return -EINVAL; } /* Check for an empty list before locking. */ if (list_empty(&orangefs_request_list)) return -EAGAIN; restart: cur_op = NULL; /* Get next op (if any) from top of list. */ spin_lock(&orangefs_request_list_lock); list_for_each_entry_safe(op, temp, &orangefs_request_list, list) { __s32 fsid; /* This lock is held past the end of the loop when we break. */ spin_lock(&op->lock); if (unlikely(op_state_purged(op) || op_state_given_up(op))) { spin_unlock(&op->lock); continue; } fsid = fsid_of_op(op); if (fsid != ORANGEFS_FS_ID_NULL) { int ret; /* Skip ops whose filesystem needs to be mounted. */ ret = fs_mount_pending(fsid); if (ret == 1) { gossip_debug(GOSSIP_DEV_DEBUG, "%s: mount pending, skipping op tag " "%llu %s\n", __func__, llu(op->tag), get_opname_string(op)); spin_unlock(&op->lock); continue; /* * Skip ops whose filesystem we don't know about unless * it is being mounted or unmounted. It is possible for * a filesystem we don't know about to be unmounted if * it fails to mount in the kernel after userspace has * been sent the mount request. */ /* XXX: is there a better way to detect this? */ } else if (ret == -1 && !(op->upcall.type == ORANGEFS_VFS_OP_FS_MOUNT || op->upcall.type == ORANGEFS_VFS_OP_GETATTR || op->upcall.type == ORANGEFS_VFS_OP_FS_UMOUNT)) { gossip_debug(GOSSIP_DEV_DEBUG, "orangefs: skipping op tag %llu %s\n", llu(op->tag), get_opname_string(op)); gossip_err( "orangefs: ERROR: fs_mount_pending %d\n", fsid); spin_unlock(&op->lock); continue; } } /* * Either this op does not pertain to a filesystem, is mounting * a filesystem, or pertains to a mounted filesystem. Let it * through. */ cur_op = op; break; } /* * At this point we either have a valid op and can continue or have not * found an op and must ask the client to try again later. */ if (!cur_op) { spin_unlock(&orangefs_request_list_lock); return -EAGAIN; } gossip_debug(GOSSIP_DEV_DEBUG, "%s: reading op tag %llu %s\n", __func__, llu(cur_op->tag), get_opname_string(cur_op)); /* * Such an op should never be on the list in the first place. If so, we * will abort. */ if (op_state_in_progress(cur_op) || op_state_serviced(cur_op)) { gossip_err("orangefs: ERROR: Current op already queued.\n"); list_del_init(&cur_op->list); spin_unlock(&cur_op->lock); spin_unlock(&orangefs_request_list_lock); return -EAGAIN; } list_del_init(&cur_op->list); spin_unlock(&orangefs_request_list_lock); spin_unlock(&cur_op->lock); /* Push the upcall out. */ ret = copy_to_user(buf, &proto_ver, sizeof(__s32)); if (ret != 0) goto error; ret = copy_to_user(buf + sizeof(__s32), &magic, sizeof(__s32)); if (ret != 0) goto error; ret = copy_to_user(buf + 2 * sizeof(__s32), &cur_op->tag, sizeof(__u64)); if (ret != 0) goto error; ret = copy_to_user(buf + 2 * sizeof(__s32) + sizeof(__u64), &cur_op->upcall, sizeof(struct orangefs_upcall_s)); if (ret != 0) goto error; spin_lock(&orangefs_htable_ops_in_progress_lock); spin_lock(&cur_op->lock); if (unlikely(op_state_given_up(cur_op))) { spin_unlock(&cur_op->lock); spin_unlock(&orangefs_htable_ops_in_progress_lock); complete(&cur_op->waitq); goto restart; } /* * Set the operation to be in progress and move it between lists since * it has been sent to the client. */ set_op_state_inprogress(cur_op); gossip_debug(GOSSIP_DEV_DEBUG, "%s: 1 op:%s: op_state:%d: process:%s:\n", __func__, get_opname_string(cur_op), cur_op->op_state, current->comm); orangefs_devreq_add_op(cur_op); spin_unlock(&cur_op->lock); spin_unlock(&orangefs_htable_ops_in_progress_lock); /* The client only asks to read one size buffer. */ return MAX_DEV_REQ_UPSIZE; error: /* * We were unable to copy the op data to the client. Put the op back in * list. If client has crashed, the op will be purged later when the * device is released. */ gossip_err("orangefs: Failed to copy data to user space\n"); spin_lock(&orangefs_request_list_lock); spin_lock(&cur_op->lock); if (likely(!op_state_given_up(cur_op))) { set_op_state_waiting(cur_op); gossip_debug(GOSSIP_DEV_DEBUG, "%s: 2 op:%s: op_state:%d: process:%s:\n", __func__, get_opname_string(cur_op), cur_op->op_state, current->comm); list_add(&cur_op->list, &orangefs_request_list); spin_unlock(&cur_op->lock); } else { spin_unlock(&cur_op->lock); complete(&cur_op->waitq); } spin_unlock(&orangefs_request_list_lock); return -EFAULT; } /* * Function for writev() callers into the device. * * Userspace should have written: * - __u32 version * - __u32 magic * - __u64 tag * - struct orangefs_downcall_s * - trailer buffer (in the case of READDIR operations) */ static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb, struct iov_iter *iter) { ssize_t ret; struct orangefs_kernel_op_s *op = NULL; struct { __u32 version; __u32 magic; __u64 tag; } head; int total = ret = iov_iter_count(iter); int downcall_size = sizeof(struct orangefs_downcall_s); int head_size = sizeof(head); gossip_debug(GOSSIP_DEV_DEBUG, "%s: total:%d: ret:%zd:\n", __func__, total, ret); if (total < MAX_DEV_REQ_DOWNSIZE) { gossip_err("%s: total:%d: must be at least:%u:\n", __func__, total, (unsigned int) MAX_DEV_REQ_DOWNSIZE); return -EFAULT; } if (!copy_from_iter_full(&head, head_size, iter)) { gossip_err("%s: failed to copy head.\n", __func__); return -EFAULT; } if (head.version < ORANGEFS_MINIMUM_USERSPACE_VERSION) { gossip_err("%s: userspace claims version" "%d, minimum version required: %d.\n", __func__, head.version, ORANGEFS_MINIMUM_USERSPACE_VERSION); return -EPROTO; } if (head.magic != ORANGEFS_DEVREQ_MAGIC) { gossip_err("Error: Device magic number does not match.\n"); return -EPROTO; } if (!orangefs_userspace_version) { orangefs_userspace_version = head.version; } else if (orangefs_userspace_version != head.version) { gossip_err("Error: userspace version changes\n"); return -EPROTO; } /* remove the op from the in progress hash table */ op = orangefs_devreq_remove_op(head.tag); if (!op) { gossip_debug(GOSSIP_DEV_DEBUG, "%s: No one's waiting for tag %llu\n", __func__, llu(head.tag)); return ret; } if (!copy_from_iter_full(&op->downcall, downcall_size, iter)) { gossip_err("%s: failed to copy downcall.\n", __func__); goto Efault; } if (op->downcall.status) goto wakeup; /* * We've successfully peeled off the head and the downcall. * Something has gone awry if total doesn't equal the * sum of head_size, downcall_size and trailer_size. */ if ((head_size + downcall_size + op->downcall.trailer_size) != total) { gossip_err("%s: funky write, head_size:%d" ": downcall_size:%d: trailer_size:%lld" ": total size:%d:\n", __func__, head_size, downcall_size, op->downcall.trailer_size, total); goto Efault; } /* Only READDIR operations should have trailers. */ if ((op->downcall.type != ORANGEFS_VFS_OP_READDIR) && (op->downcall.trailer_size != 0)) { gossip_err("%s: %x operation with trailer.", __func__, op->downcall.type); goto Efault; } /* READDIR operations should always have trailers. */ if ((op->downcall.type == ORANGEFS_VFS_OP_READDIR) && (op->downcall.trailer_size == 0)) { gossip_err("%s: %x operation with no trailer.", __func__, op->downcall.type); goto Efault; } if (op->downcall.type != ORANGEFS_VFS_OP_READDIR) goto wakeup; op->downcall.trailer_buf = vzalloc(op->downcall.trailer_size); if (!op->downcall.trailer_buf) goto Enomem; if (!copy_from_iter_full(op->downcall.trailer_buf, op->downcall.trailer_size, iter)) { gossip_err("%s: failed to copy trailer.\n", __func__); vfree(op->downcall.trailer_buf); goto Efault; } wakeup: /* * Return to vfs waitqueue, and back to service_operation * through wait_for_matching_downcall. */ spin_lock(&op->lock); if (unlikely(op_is_cancel(op))) { spin_unlock(&op->lock); put_cancel(op); } else if (unlikely(op_state_given_up(op))) { spin_unlock(&op->lock); complete(&op->waitq); } else { set_op_state_serviced(op); gossip_debug(GOSSIP_DEV_DEBUG, "%s: op:%s: op_state:%d: process:%s:\n", __func__, get_opname_string(op), op->op_state, current->comm); spin_unlock(&op->lock); } return ret; Efault: op->downcall.status = -(ORANGEFS_ERROR_BIT | 9); ret = -EFAULT; goto wakeup; Enomem: op->downcall.status = -(ORANGEFS_ERROR_BIT | 8); ret = -ENOMEM; goto wakeup; } /* * NOTE: gets called when the last reference to this device is dropped. * Using the open_access_count variable, we enforce a reference count * on this file so that it can be opened by only one process at a time. * the devreq_mutex is used to make sure all i/o has completed * before we call orangefs_bufmap_finalize, and similar such tricky * situations */ static int orangefs_devreq_release(struct inode *inode, struct file *file) { int unmounted = 0; gossip_debug(GOSSIP_DEV_DEBUG, "%s:pvfs2-client-core: exiting, closing device\n", __func__); mutex_lock(&devreq_mutex); orangefs_bufmap_finalize(); open_access_count = -1; unmounted = mark_all_pending_mounts(); gossip_debug(GOSSIP_DEV_DEBUG, "ORANGEFS Device Close: Filesystem(s) %s\n", (unmounted ? "UNMOUNTED" : "MOUNTED")); purge_waiting_ops(); purge_inprogress_ops(); orangefs_bufmap_run_down(); gossip_debug(GOSSIP_DEV_DEBUG, "pvfs2-client-core: device close complete\n"); open_access_count = 0; orangefs_userspace_version = 0; mutex_unlock(&devreq_mutex); return 0; } int is_daemon_in_service(void) { int in_service; /* * What this function does is checks if client-core is alive * based on the access count we maintain on the device. */ mutex_lock(&devreq_mutex); in_service = open_access_count == 1 ? 0 : -EIO; mutex_unlock(&devreq_mutex); return in_service; } bool __is_daemon_in_service(void) { return open_access_count == 1; } static inline long check_ioctl_command(unsigned int command) { /* Check for valid ioctl codes */ if (_IOC_TYPE(command) != ORANGEFS_DEV_MAGIC) { gossip_err("device ioctl magic numbers don't match! Did you rebuild pvfs2-client-core/libpvfs2? [cmd %x, magic %x != %x]\n", command, _IOC_TYPE(command), ORANGEFS_DEV_MAGIC); return -EINVAL; } /* and valid ioctl commands */ if (_IOC_NR(command) >= ORANGEFS_DEV_MAXNR || _IOC_NR(command) <= 0) { gossip_err("Invalid ioctl command number [%d >= %d]\n", _IOC_NR(command), ORANGEFS_DEV_MAXNR); return -ENOIOCTLCMD; } return 0; } static long dispatch_ioctl_command(unsigned int command, unsigned long arg) { static __s32 magic = ORANGEFS_DEVREQ_MAGIC; static __s32 max_up_size = MAX_DEV_REQ_UPSIZE; static __s32 max_down_size = MAX_DEV_REQ_DOWNSIZE; struct ORANGEFS_dev_map_desc user_desc; int ret = 0; int upstream_kmod = 1; struct orangefs_sb_info_s *orangefs_sb; /* mtmoore: add locking here */ switch (command) { case ORANGEFS_DEV_GET_MAGIC: return ((put_user(magic, (__s32 __user *) arg) == -EFAULT) ? -EIO : 0); case ORANGEFS_DEV_GET_MAX_UPSIZE: return ((put_user(max_up_size, (__s32 __user *) arg) == -EFAULT) ? -EIO : 0); case ORANGEFS_DEV_GET_MAX_DOWNSIZE: return ((put_user(max_down_size, (__s32 __user *) arg) == -EFAULT) ? -EIO : 0); case ORANGEFS_DEV_MAP: ret = copy_from_user(&user_desc, (struct ORANGEFS_dev_map_desc __user *) arg, sizeof(struct ORANGEFS_dev_map_desc)); /* WTF -EIO and not -EFAULT? */ return ret ? -EIO : orangefs_bufmap_initialize(&user_desc); case ORANGEFS_DEV_REMOUNT_ALL: gossip_debug(GOSSIP_DEV_DEBUG, "%s: got ORANGEFS_DEV_REMOUNT_ALL\n", __func__); /* * remount all mounted orangefs volumes to regain the lost * dynamic mount tables (if any) -- NOTE: this is done * without keeping the superblock list locked due to the * upcall/downcall waiting. also, the request mutex is * used to ensure that no operations will be serviced until * all of the remounts are serviced (to avoid ops between * mounts to fail) */ ret = mutex_lock_interruptible(&orangefs_request_mutex); if (ret < 0) return ret; gossip_debug(GOSSIP_DEV_DEBUG, "%s: priority remount in progress\n", __func__); spin_lock(&orangefs_superblocks_lock); list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) { /* * We have to drop the spinlock, so entries can be * removed. They can't be freed, though, so we just * keep the forward pointers and zero the back ones - * that way we can get to the rest of the list. */ if (!orangefs_sb->list.prev) continue; gossip_debug(GOSSIP_DEV_DEBUG, "%s: Remounting SB %p\n", __func__, orangefs_sb); spin_unlock(&orangefs_superblocks_lock); ret = orangefs_remount(orangefs_sb); spin_lock(&orangefs_superblocks_lock); if (ret) { gossip_debug(GOSSIP_DEV_DEBUG, "SB %p remount failed\n", orangefs_sb); break; } } spin_unlock(&orangefs_superblocks_lock); gossip_debug(GOSSIP_DEV_DEBUG, "%s: priority remount complete\n", __func__); mutex_unlock(&orangefs_request_mutex); return ret; case ORANGEFS_DEV_UPSTREAM: ret = copy_to_user((void __user *)arg, &upstream_kmod, sizeof(upstream_kmod)); if (ret != 0) return -EIO; else return ret; case ORANGEFS_DEV_CLIENT_MASK: return orangefs_debugfs_new_client_mask((void __user *)arg); case ORANGEFS_DEV_CLIENT_STRING: return orangefs_debugfs_new_client_string((void __user *)arg); case ORANGEFS_DEV_DEBUG: return orangefs_debugfs_new_debug((void __user *)arg); default: return -ENOIOCTLCMD; } return -ENOIOCTLCMD; } static long orangefs_devreq_ioctl(struct file *file, unsigned int command, unsigned long arg) { long ret; /* Check for properly constructed commands */ ret = check_ioctl_command(command); if (ret < 0) return (int)ret; return (int)dispatch_ioctl_command(command, arg); } #ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */ /* Compat structure for the ORANGEFS_DEV_MAP ioctl */ struct ORANGEFS_dev_map_desc32 { compat_uptr_t ptr; __s32 total_size; __s32 size; __s32 count; }; /* * 32 bit user-space apps' ioctl handlers when kernel modules * is compiled as a 64 bit one */ static long orangefs_devreq_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long args) { long ret; /* Check for properly constructed commands */ ret = check_ioctl_command(cmd); if (ret < 0) return ret; if (cmd == ORANGEFS_DEV_MAP) { struct ORANGEFS_dev_map_desc desc; struct ORANGEFS_dev_map_desc32 d32; if (copy_from_user(&d32, (void __user *)args, sizeof(d32))) return -EFAULT; desc.ptr = compat_ptr(d32.ptr); desc.total_size = d32.total_size; desc.size = d32.size; desc.count = d32.count; return orangefs_bufmap_initialize(&desc); } /* no other ioctl requires translation */ return dispatch_ioctl_command(cmd, args); } #endif /* CONFIG_COMPAT is in .config */ static __poll_t orangefs_devreq_poll(struct file *file, struct poll_table_struct *poll_table) { __poll_t poll_revent_mask = 0; poll_wait(file, &orangefs_request_list_waitq, poll_table); if (!list_empty(&orangefs_request_list)) poll_revent_mask |= EPOLLIN; return poll_revent_mask; } /* the assigned character device major number */ static int orangefs_dev_major; static const struct file_operations orangefs_devreq_file_operations = { .owner = THIS_MODULE, .read = orangefs_devreq_read, .write_iter = orangefs_devreq_write_iter, .open = orangefs_devreq_open, .release = orangefs_devreq_release, .unlocked_ioctl = orangefs_devreq_ioctl, #ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */ .compat_ioctl = orangefs_devreq_compat_ioctl, #endif .poll = orangefs_devreq_poll }; /* * Initialize orangefs device specific state: * Must be called at module load time only */ int orangefs_dev_init(void) { /* register orangefs-req device */ orangefs_dev_major = register_chrdev(0, ORANGEFS_REQDEVICE_NAME, &orangefs_devreq_file_operations); if (orangefs_dev_major < 0) { gossip_debug(GOSSIP_DEV_DEBUG, "Failed to register /dev/%s (error %d)\n", ORANGEFS_REQDEVICE_NAME, orangefs_dev_major); return orangefs_dev_major; } gossip_debug(GOSSIP_DEV_DEBUG, "*** /dev/%s character device registered ***\n", ORANGEFS_REQDEVICE_NAME); gossip_debug(GOSSIP_DEV_DEBUG, "'mknod /dev/%s c %d 0'.\n", ORANGEFS_REQDEVICE_NAME, orangefs_dev_major); return 0; } void orangefs_dev_cleanup(void) { unregister_chrdev(orangefs_dev_major, ORANGEFS_REQDEVICE_NAME); gossip_debug(GOSSIP_DEV_DEBUG, "*** /dev/%s character device unregistered ***\n", ORANGEFS_REQDEVICE_NAME); }
linux-master
fs/orangefs/devorangefs-req.c
// SPDX-License-Identifier: GPL-2.0 /* * (C) 2001 Clemson University and The University of Chicago * * See COPYING in top-level directory. */ #include "protocol.h" #include "orangefs-kernel.h" /* tags assigned to kernel upcall operations */ static __u64 next_tag_value; static DEFINE_SPINLOCK(next_tag_value_lock); /* the orangefs memory caches */ /* a cache for orangefs upcall/downcall operations */ static struct kmem_cache *op_cache; int op_cache_initialize(void) { op_cache = kmem_cache_create("orangefs_op_cache", sizeof(struct orangefs_kernel_op_s), 0, ORANGEFS_CACHE_CREATE_FLAGS, NULL); if (!op_cache) { gossip_err("Cannot create orangefs_op_cache\n"); return -ENOMEM; } /* initialize our atomic tag counter */ spin_lock(&next_tag_value_lock); next_tag_value = 100; spin_unlock(&next_tag_value_lock); return 0; } int op_cache_finalize(void) { kmem_cache_destroy(op_cache); return 0; } char *get_opname_string(struct orangefs_kernel_op_s *new_op) { if (new_op) { __s32 type = new_op->upcall.type; if (type == ORANGEFS_VFS_OP_FILE_IO) return "OP_FILE_IO"; else if (type == ORANGEFS_VFS_OP_LOOKUP) return "OP_LOOKUP"; else if (type == ORANGEFS_VFS_OP_CREATE) return "OP_CREATE"; else if (type == ORANGEFS_VFS_OP_GETATTR) return "OP_GETATTR"; else if (type == ORANGEFS_VFS_OP_REMOVE) return "OP_REMOVE"; else if (type == ORANGEFS_VFS_OP_MKDIR) return "OP_MKDIR"; else if (type == ORANGEFS_VFS_OP_READDIR) return "OP_READDIR"; else if (type == ORANGEFS_VFS_OP_READDIRPLUS) return "OP_READDIRPLUS"; else if (type == ORANGEFS_VFS_OP_SETATTR) return "OP_SETATTR"; else if (type == ORANGEFS_VFS_OP_SYMLINK) return "OP_SYMLINK"; else if (type == ORANGEFS_VFS_OP_RENAME) return "OP_RENAME"; else if (type == ORANGEFS_VFS_OP_STATFS) return "OP_STATFS"; else if (type == ORANGEFS_VFS_OP_TRUNCATE) return "OP_TRUNCATE"; else if (type == ORANGEFS_VFS_OP_RA_FLUSH) return "OP_RA_FLUSH"; else if (type == ORANGEFS_VFS_OP_FS_MOUNT) return "OP_FS_MOUNT"; else if (type == ORANGEFS_VFS_OP_FS_UMOUNT) return "OP_FS_UMOUNT"; else if (type == ORANGEFS_VFS_OP_GETXATTR) return "OP_GETXATTR"; else if (type == ORANGEFS_VFS_OP_SETXATTR) return "OP_SETXATTR"; else if (type == ORANGEFS_VFS_OP_LISTXATTR) return "OP_LISTXATTR"; else if (type == ORANGEFS_VFS_OP_REMOVEXATTR) return "OP_REMOVEXATTR"; else if (type == ORANGEFS_VFS_OP_PARAM) return "OP_PARAM"; else if (type == ORANGEFS_VFS_OP_PERF_COUNT) return "OP_PERF_COUNT"; else if (type == ORANGEFS_VFS_OP_CANCEL) return "OP_CANCEL"; else if (type == ORANGEFS_VFS_OP_FSYNC) return "OP_FSYNC"; else if (type == ORANGEFS_VFS_OP_FSKEY) return "OP_FSKEY"; else if (type == ORANGEFS_VFS_OP_FEATURES) return "OP_FEATURES"; } return "OP_UNKNOWN?"; } void orangefs_new_tag(struct orangefs_kernel_op_s *op) { spin_lock(&next_tag_value_lock); op->tag = next_tag_value++; if (next_tag_value == 0) next_tag_value = 100; spin_unlock(&next_tag_value_lock); } struct orangefs_kernel_op_s *op_alloc(__s32 type) { struct orangefs_kernel_op_s *new_op = NULL; new_op = kmem_cache_zalloc(op_cache, GFP_KERNEL); if (new_op) { INIT_LIST_HEAD(&new_op->list); spin_lock_init(&new_op->lock); init_completion(&new_op->waitq); new_op->upcall.type = ORANGEFS_VFS_OP_INVALID; new_op->downcall.type = ORANGEFS_VFS_OP_INVALID; new_op->downcall.status = -1; new_op->op_state = OP_VFS_STATE_UNKNOWN; /* initialize the op specific tag and upcall credentials */ orangefs_new_tag(new_op); new_op->upcall.type = type; new_op->attempts = 0; gossip_debug(GOSSIP_CACHE_DEBUG, "Alloced OP (%p: %llu %s)\n", new_op, llu(new_op->tag), get_opname_string(new_op)); new_op->upcall.uid = from_kuid(&init_user_ns, current_fsuid()); new_op->upcall.gid = from_kgid(&init_user_ns, current_fsgid()); } else { gossip_err("op_alloc: kmem_cache_zalloc failed!\n"); } return new_op; } void op_release(struct orangefs_kernel_op_s *orangefs_op) { if (orangefs_op) { gossip_debug(GOSSIP_CACHE_DEBUG, "Releasing OP (%p: %llu)\n", orangefs_op, llu(orangefs_op->tag)); kmem_cache_free(op_cache, orangefs_op); } else { gossip_err("NULL pointer in op_release\n"); } }
linux-master
fs/orangefs/orangefs-cache.c
// SPDX-License-Identifier: GPL-2.0 /* * (C) 2001 Clemson University and The University of Chicago * (C) 2011 Omnibond Systems * * Changes by Acxiom Corporation to implement generic service_operation() * function, Copyright Acxiom Corporation, 2005. * * See COPYING in top-level directory. */ /* * In-kernel waitqueue operations. */ #include "protocol.h" #include "orangefs-kernel.h" #include "orangefs-bufmap.h" static int wait_for_matching_downcall(struct orangefs_kernel_op_s *op, long timeout, int flags) __acquires(op->lock); static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *op) __releases(op->lock); /* * What we do in this function is to walk the list of operations that are * present in the request queue and mark them as purged. * NOTE: This is called from the device close after client-core has * guaranteed that no new operations could appear on the list since the * client-core is anyway going to exit. */ void purge_waiting_ops(void) { struct orangefs_kernel_op_s *op, *tmp; spin_lock(&orangefs_request_list_lock); list_for_each_entry_safe(op, tmp, &orangefs_request_list, list) { gossip_debug(GOSSIP_WAIT_DEBUG, "pvfs2-client-core: purging op tag %llu %s\n", llu(op->tag), get_opname_string(op)); set_op_state_purged(op); gossip_debug(GOSSIP_DEV_DEBUG, "%s: op:%s: op_state:%d: process:%s:\n", __func__, get_opname_string(op), op->op_state, current->comm); } spin_unlock(&orangefs_request_list_lock); } /* * submits a ORANGEFS operation and waits for it to complete * * Note op->downcall.status will contain the status of the operation (in * errno format), whether provided by pvfs2-client or a result of failure to * service the operation. If the caller wishes to distinguish, then * op->state can be checked to see if it was serviced or not. * * Returns contents of op->downcall.status for convenience */ int service_operation(struct orangefs_kernel_op_s *op, const char *op_name, int flags) { long timeout = MAX_SCHEDULE_TIMEOUT; int ret = 0; DEFINE_WAIT(wait_entry); op->upcall.tgid = current->tgid; op->upcall.pid = current->pid; retry_servicing: op->downcall.status = 0; gossip_debug(GOSSIP_WAIT_DEBUG, "%s: %s op:%p: process:%s: pid:%d:\n", __func__, op_name, op, current->comm, current->pid); /* * If ORANGEFS_OP_NO_MUTEX was set in flags, we need to avoid * acquiring the request_mutex because we're servicing a * high priority remount operation and the request_mutex is * already taken. */ if (!(flags & ORANGEFS_OP_NO_MUTEX)) { if (flags & ORANGEFS_OP_INTERRUPTIBLE) ret = mutex_lock_interruptible(&orangefs_request_mutex); else ret = mutex_lock_killable(&orangefs_request_mutex); /* * check to see if we were interrupted while waiting for * mutex */ if (ret < 0) { op->downcall.status = ret; gossip_debug(GOSSIP_WAIT_DEBUG, "%s: service_operation interrupted.\n", __func__); return ret; } } /* queue up the operation */ spin_lock(&orangefs_request_list_lock); spin_lock(&op->lock); set_op_state_waiting(op); gossip_debug(GOSSIP_DEV_DEBUG, "%s: op:%s: op_state:%d: process:%s:\n", __func__, get_opname_string(op), op->op_state, current->comm); /* add high priority remount op to the front of the line. */ if (flags & ORANGEFS_OP_PRIORITY) list_add(&op->list, &orangefs_request_list); else list_add_tail(&op->list, &orangefs_request_list); spin_unlock(&op->lock); wake_up_interruptible(&orangefs_request_list_waitq); if (!__is_daemon_in_service()) { gossip_debug(GOSSIP_WAIT_DEBUG, "%s:client core is NOT in service.\n", __func__); /* * Don't wait for the userspace component to return if * the filesystem is being umounted anyway. */ if (op->upcall.type == ORANGEFS_VFS_OP_FS_UMOUNT) timeout = 0; else timeout = op_timeout_secs * HZ; } spin_unlock(&orangefs_request_list_lock); if (!(flags & ORANGEFS_OP_NO_MUTEX)) mutex_unlock(&orangefs_request_mutex); ret = wait_for_matching_downcall(op, timeout, flags); gossip_debug(GOSSIP_WAIT_DEBUG, "%s: wait_for_matching_downcall returned %d for %p\n", __func__, ret, op); /* got matching downcall; make sure status is in errno format */ if (!ret) { spin_unlock(&op->lock); op->downcall.status = orangefs_normalize_to_errno(op->downcall.status); ret = op->downcall.status; goto out; } /* failed to get matching downcall */ if (ret == -ETIMEDOUT) { gossip_err("%s: %s -- wait timed out; aborting attempt.\n", __func__, op_name); } /* * remove a waiting op from the request list or * remove an in-progress op from the in-progress list. */ orangefs_clean_up_interrupted_operation(op); op->downcall.status = ret; /* retry if operation has not been serviced and if requested */ if (ret == -EAGAIN) { op->attempts++; timeout = op_timeout_secs * HZ; gossip_debug(GOSSIP_WAIT_DEBUG, "orangefs: tag %llu (%s)" " -- operation to be retried (%d attempt)\n", llu(op->tag), op_name, op->attempts); /* * io ops (ops that use the shared memory buffer) have * to be returned to their caller for a retry. Other ops * can just be recycled here. */ if (!op->uses_shared_memory) goto retry_servicing; } out: gossip_debug(GOSSIP_WAIT_DEBUG, "%s: %s returning: %d for %p.\n", __func__, op_name, ret, op); return ret; } /* This can get called on an I/O op if it had a bad service_operation. */ bool orangefs_cancel_op_in_progress(struct orangefs_kernel_op_s *op) { u64 tag = op->tag; if (!op_state_in_progress(op)) return false; op->slot_to_free = op->upcall.req.io.buf_index; memset(&op->upcall, 0, sizeof(op->upcall)); memset(&op->downcall, 0, sizeof(op->downcall)); op->upcall.type = ORANGEFS_VFS_OP_CANCEL; op->upcall.req.cancel.op_tag = tag; op->downcall.type = ORANGEFS_VFS_OP_INVALID; op->downcall.status = -1; orangefs_new_tag(op); spin_lock(&orangefs_request_list_lock); /* orangefs_request_list_lock is enough of a barrier here */ if (!__is_daemon_in_service()) { spin_unlock(&orangefs_request_list_lock); return false; } spin_lock(&op->lock); set_op_state_waiting(op); gossip_debug(GOSSIP_DEV_DEBUG, "%s: op:%s: op_state:%d: process:%s:\n", __func__, get_opname_string(op), op->op_state, current->comm); list_add(&op->list, &orangefs_request_list); spin_unlock(&op->lock); spin_unlock(&orangefs_request_list_lock); gossip_debug(GOSSIP_WAIT_DEBUG, "Attempting ORANGEFS operation cancellation of tag %llu\n", llu(tag)); return true; } /* * Change an op to the "given up" state and remove it from its list. */ static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *op) __releases(op->lock) { /* * handle interrupted cases depending on what state we were in when * the interruption is detected. * * Called with op->lock held. */ /* * List manipulation code elsewhere will ignore ops that * have been given up upon. */ op->op_state |= OP_VFS_STATE_GIVEN_UP; if (list_empty(&op->list)) { /* caught copying to/from daemon */ BUG_ON(op_state_serviced(op)); spin_unlock(&op->lock); wait_for_completion(&op->waitq); } else if (op_state_waiting(op)) { /* * upcall hasn't been read; remove op from upcall request * list. */ spin_unlock(&op->lock); spin_lock(&orangefs_request_list_lock); list_del_init(&op->list); spin_unlock(&orangefs_request_list_lock); gossip_debug(GOSSIP_WAIT_DEBUG, "Interrupted: Removed op %p from request_list\n", op); } else if (op_state_in_progress(op)) { /* op must be removed from the in progress htable */ spin_unlock(&op->lock); spin_lock(&orangefs_htable_ops_in_progress_lock); list_del_init(&op->list); spin_unlock(&orangefs_htable_ops_in_progress_lock); gossip_debug(GOSSIP_WAIT_DEBUG, "Interrupted: Removed op %p" " from htable_ops_in_progress\n", op); } else { spin_unlock(&op->lock); gossip_err("interrupted operation is in a weird state 0x%x\n", op->op_state); } reinit_completion(&op->waitq); } /* * Sleeps on waitqueue waiting for matching downcall. * If client-core finishes servicing, then we are good to go. * else if client-core exits, we get woken up here, and retry with a timeout * * When this call returns to the caller, the specified op will no * longer be in either the in_progress hash table or on the request list. * * Returns 0 on success and -errno on failure * Errors are: * EAGAIN in case we want the caller to requeue and try again.. * EINTR/EIO/ETIMEDOUT indicating we are done trying to service this * operation since client-core seems to be exiting too often * or if we were interrupted. * * Returns with op->lock taken. */ static int wait_for_matching_downcall(struct orangefs_kernel_op_s *op, long timeout, int flags) __acquires(op->lock) { long n; int writeback = flags & ORANGEFS_OP_WRITEBACK, interruptible = flags & ORANGEFS_OP_INTERRUPTIBLE; /* * There's a "schedule_timeout" inside of these wait * primitives, during which the op is out of the hands of the * user process that needs something done and is being * manipulated by the client-core process. */ if (writeback) n = wait_for_completion_io_timeout(&op->waitq, timeout); else if (!writeback && interruptible) n = wait_for_completion_interruptible_timeout(&op->waitq, timeout); else /* !writeback && !interruptible but compiler complains */ n = wait_for_completion_killable_timeout(&op->waitq, timeout); spin_lock(&op->lock); if (op_state_serviced(op)) return 0; if (unlikely(n < 0)) { gossip_debug(GOSSIP_WAIT_DEBUG, "%s: operation interrupted, tag %llu, %p\n", __func__, llu(op->tag), op); return -EINTR; } if (op_state_purged(op)) { gossip_debug(GOSSIP_WAIT_DEBUG, "%s: operation purged, tag %llu, %p, %d\n", __func__, llu(op->tag), op, op->attempts); return (op->attempts < ORANGEFS_PURGE_RETRY_COUNT) ? -EAGAIN : -EIO; } /* must have timed out, then... */ gossip_debug(GOSSIP_WAIT_DEBUG, "%s: operation timed out, tag %llu, %p, %d)\n", __func__, llu(op->tag), op, op->attempts); return -ETIMEDOUT; }
linux-master
fs/orangefs/waitqueue.c
// SPDX-License-Identifier: GPL-2.0 /* * (C) 2001 Clemson University and The University of Chicago * Copyright 2018 Omnibond Systems, L.L.C. * * See COPYING in top-level directory. */ #include <linux/kernel.h> #include "protocol.h" #include "orangefs-kernel.h" #include "orangefs-dev-proto.h" #include "orangefs-bufmap.h" __s32 fsid_of_op(struct orangefs_kernel_op_s *op) { __s32 fsid = ORANGEFS_FS_ID_NULL; if (op) { switch (op->upcall.type) { case ORANGEFS_VFS_OP_FILE_IO: fsid = op->upcall.req.io.refn.fs_id; break; case ORANGEFS_VFS_OP_LOOKUP: fsid = op->upcall.req.lookup.parent_refn.fs_id; break; case ORANGEFS_VFS_OP_CREATE: fsid = op->upcall.req.create.parent_refn.fs_id; break; case ORANGEFS_VFS_OP_GETATTR: fsid = op->upcall.req.getattr.refn.fs_id; break; case ORANGEFS_VFS_OP_REMOVE: fsid = op->upcall.req.remove.parent_refn.fs_id; break; case ORANGEFS_VFS_OP_MKDIR: fsid = op->upcall.req.mkdir.parent_refn.fs_id; break; case ORANGEFS_VFS_OP_READDIR: fsid = op->upcall.req.readdir.refn.fs_id; break; case ORANGEFS_VFS_OP_SETATTR: fsid = op->upcall.req.setattr.refn.fs_id; break; case ORANGEFS_VFS_OP_SYMLINK: fsid = op->upcall.req.sym.parent_refn.fs_id; break; case ORANGEFS_VFS_OP_RENAME: fsid = op->upcall.req.rename.old_parent_refn.fs_id; break; case ORANGEFS_VFS_OP_STATFS: fsid = op->upcall.req.statfs.fs_id; break; case ORANGEFS_VFS_OP_TRUNCATE: fsid = op->upcall.req.truncate.refn.fs_id; break; case ORANGEFS_VFS_OP_RA_FLUSH: fsid = op->upcall.req.ra_cache_flush.refn.fs_id; break; case ORANGEFS_VFS_OP_FS_UMOUNT: fsid = op->upcall.req.fs_umount.fs_id; break; case ORANGEFS_VFS_OP_GETXATTR: fsid = op->upcall.req.getxattr.refn.fs_id; break; case ORANGEFS_VFS_OP_SETXATTR: fsid = op->upcall.req.setxattr.refn.fs_id; break; case ORANGEFS_VFS_OP_LISTXATTR: fsid = op->upcall.req.listxattr.refn.fs_id; break; case ORANGEFS_VFS_OP_REMOVEXATTR: fsid = op->upcall.req.removexattr.refn.fs_id; break; case ORANGEFS_VFS_OP_FSYNC: fsid = op->upcall.req.fsync.refn.fs_id; break; default: break; } } return fsid; } static int orangefs_inode_flags(struct ORANGEFS_sys_attr_s *attrs) { int flags = 0; if (attrs->flags & ORANGEFS_IMMUTABLE_FL) flags |= S_IMMUTABLE; else flags &= ~S_IMMUTABLE; if (attrs->flags & ORANGEFS_APPEND_FL) flags |= S_APPEND; else flags &= ~S_APPEND; if (attrs->flags & ORANGEFS_NOATIME_FL) flags |= S_NOATIME; else flags &= ~S_NOATIME; return flags; } static int orangefs_inode_perms(struct ORANGEFS_sys_attr_s *attrs) { int perm_mode = 0; if (attrs->perms & ORANGEFS_O_EXECUTE) perm_mode |= S_IXOTH; if (attrs->perms & ORANGEFS_O_WRITE) perm_mode |= S_IWOTH; if (attrs->perms & ORANGEFS_O_READ) perm_mode |= S_IROTH; if (attrs->perms & ORANGEFS_G_EXECUTE) perm_mode |= S_IXGRP; if (attrs->perms & ORANGEFS_G_WRITE) perm_mode |= S_IWGRP; if (attrs->perms & ORANGEFS_G_READ) perm_mode |= S_IRGRP; if (attrs->perms & ORANGEFS_U_EXECUTE) perm_mode |= S_IXUSR; if (attrs->perms & ORANGEFS_U_WRITE) perm_mode |= S_IWUSR; if (attrs->perms & ORANGEFS_U_READ) perm_mode |= S_IRUSR; if (attrs->perms & ORANGEFS_G_SGID) perm_mode |= S_ISGID; if (attrs->perms & ORANGEFS_U_SUID) perm_mode |= S_ISUID; return perm_mode; } /* * NOTE: in kernel land, we never use the sys_attr->link_target for * anything, so don't bother copying it into the sys_attr object here. */ static inline void copy_attributes_from_inode(struct inode *inode, struct ORANGEFS_sys_attr_s *attrs) { struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); attrs->mask = 0; if (orangefs_inode->attr_valid & ATTR_UID) { attrs->owner = from_kuid(&init_user_ns, inode->i_uid); attrs->mask |= ORANGEFS_ATTR_SYS_UID; gossip_debug(GOSSIP_UTILS_DEBUG, "(UID) %d\n", attrs->owner); } if (orangefs_inode->attr_valid & ATTR_GID) { attrs->group = from_kgid(&init_user_ns, inode->i_gid); attrs->mask |= ORANGEFS_ATTR_SYS_GID; gossip_debug(GOSSIP_UTILS_DEBUG, "(GID) %d\n", attrs->group); } if (orangefs_inode->attr_valid & ATTR_ATIME) { attrs->mask |= ORANGEFS_ATTR_SYS_ATIME; if (orangefs_inode->attr_valid & ATTR_ATIME_SET) { attrs->atime = (time64_t)inode->i_atime.tv_sec; attrs->mask |= ORANGEFS_ATTR_SYS_ATIME_SET; } } if (orangefs_inode->attr_valid & ATTR_MTIME) { attrs->mask |= ORANGEFS_ATTR_SYS_MTIME; if (orangefs_inode->attr_valid & ATTR_MTIME_SET) { attrs->mtime = (time64_t)inode->i_mtime.tv_sec; attrs->mask |= ORANGEFS_ATTR_SYS_MTIME_SET; } } if (orangefs_inode->attr_valid & ATTR_CTIME) attrs->mask |= ORANGEFS_ATTR_SYS_CTIME; /* * ORANGEFS cannot set size with a setattr operation. Probably not * likely to be requested through the VFS, but just in case, don't * worry about ATTR_SIZE */ if (orangefs_inode->attr_valid & ATTR_MODE) { attrs->perms = ORANGEFS_util_translate_mode(inode->i_mode); attrs->mask |= ORANGEFS_ATTR_SYS_PERM; } } static int orangefs_inode_type(enum orangefs_ds_type objtype) { if (objtype == ORANGEFS_TYPE_METAFILE) return S_IFREG; else if (objtype == ORANGEFS_TYPE_DIRECTORY) return S_IFDIR; else if (objtype == ORANGEFS_TYPE_SYMLINK) return S_IFLNK; else return -1; } static void orangefs_make_bad_inode(struct inode *inode) { if (is_root_handle(inode)) { /* * if this occurs, the pvfs2-client-core was killed but we * can't afford to lose the inode operations and such * associated with the root handle in any case. */ gossip_debug(GOSSIP_UTILS_DEBUG, "*** NOT making bad root inode %pU\n", get_khandle_from_ino(inode)); } else { gossip_debug(GOSSIP_UTILS_DEBUG, "*** making bad inode %pU\n", get_khandle_from_ino(inode)); make_bad_inode(inode); } } static int orangefs_inode_is_stale(struct inode *inode, struct ORANGEFS_sys_attr_s *attrs, char *link_target) { struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); int type = orangefs_inode_type(attrs->objtype); /* * If the inode type or symlink target have changed then this * inode is stale. */ if (type == -1 || inode_wrong_type(inode, type)) { orangefs_make_bad_inode(inode); return 1; } if (type == S_IFLNK && strncmp(orangefs_inode->link_target, link_target, ORANGEFS_NAME_MAX)) { orangefs_make_bad_inode(inode); return 1; } return 0; } int orangefs_inode_getattr(struct inode *inode, int flags) { struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); struct orangefs_kernel_op_s *new_op; loff_t inode_size; int ret, type; gossip_debug(GOSSIP_UTILS_DEBUG, "%s: called on inode %pU flags %d\n", __func__, get_khandle_from_ino(inode), flags); again: spin_lock(&inode->i_lock); /* Must have all the attributes in the mask and be within cache time. */ if ((!flags && time_before(jiffies, orangefs_inode->getattr_time)) || orangefs_inode->attr_valid || inode->i_state & I_DIRTY_PAGES) { if (orangefs_inode->attr_valid) { spin_unlock(&inode->i_lock); write_inode_now(inode, 1); goto again; } spin_unlock(&inode->i_lock); return 0; } spin_unlock(&inode->i_lock); new_op = op_alloc(ORANGEFS_VFS_OP_GETATTR); if (!new_op) return -ENOMEM; new_op->upcall.req.getattr.refn = orangefs_inode->refn; /* * Size is the hardest attribute to get. The incremental cost of any * other attribute is essentially zero. */ if (flags) new_op->upcall.req.getattr.mask = ORANGEFS_ATTR_SYS_ALL_NOHINT; else new_op->upcall.req.getattr.mask = ORANGEFS_ATTR_SYS_ALL_NOHINT & ~ORANGEFS_ATTR_SYS_SIZE; ret = service_operation(new_op, __func__, get_interruptible_flag(inode)); if (ret != 0) goto out; again2: spin_lock(&inode->i_lock); /* Must have all the attributes in the mask and be within cache time. */ if ((!flags && time_before(jiffies, orangefs_inode->getattr_time)) || orangefs_inode->attr_valid || inode->i_state & I_DIRTY_PAGES) { if (orangefs_inode->attr_valid) { spin_unlock(&inode->i_lock); write_inode_now(inode, 1); goto again2; } if (inode->i_state & I_DIRTY_PAGES) { ret = 0; goto out_unlock; } gossip_debug(GOSSIP_UTILS_DEBUG, "%s: in cache or dirty\n", __func__); ret = 0; goto out_unlock; } if (!(flags & ORANGEFS_GETATTR_NEW)) { ret = orangefs_inode_is_stale(inode, &new_op->downcall.resp.getattr.attributes, new_op->downcall.resp.getattr.link_target); if (ret) { ret = -ESTALE; goto out_unlock; } } type = orangefs_inode_type(new_op-> downcall.resp.getattr.attributes.objtype); switch (type) { case S_IFREG: inode->i_flags = orangefs_inode_flags(&new_op-> downcall.resp.getattr.attributes); if (flags) { inode_size = (loff_t)new_op-> downcall.resp.getattr.attributes.size; inode->i_size = inode_size; inode->i_blkbits = ffs(new_op->downcall.resp.getattr. attributes.blksize); inode->i_bytes = inode_size; inode->i_blocks = (inode_size + 512 - inode_size % 512)/512; } break; case S_IFDIR: if (flags) { inode->i_size = PAGE_SIZE; inode_set_bytes(inode, inode->i_size); } set_nlink(inode, 1); break; case S_IFLNK: if (flags & ORANGEFS_GETATTR_NEW) { inode->i_size = (loff_t)strlen(new_op-> downcall.resp.getattr.link_target); ret = strscpy(orangefs_inode->link_target, new_op->downcall.resp.getattr.link_target, ORANGEFS_NAME_MAX); if (ret == -E2BIG) { ret = -EIO; goto out_unlock; } inode->i_link = orangefs_inode->link_target; } break; /* i.e. -1 */ default: /* XXX: ESTALE? This is what is done if it is not new. */ orangefs_make_bad_inode(inode); ret = -ESTALE; goto out_unlock; } inode->i_uid = make_kuid(&init_user_ns, new_op-> downcall.resp.getattr.attributes.owner); inode->i_gid = make_kgid(&init_user_ns, new_op-> downcall.resp.getattr.attributes.group); inode->i_atime.tv_sec = (time64_t)new_op-> downcall.resp.getattr.attributes.atime; inode->i_mtime.tv_sec = (time64_t)new_op-> downcall.resp.getattr.attributes.mtime; inode_set_ctime(inode, (time64_t)new_op->downcall.resp.getattr.attributes.ctime, 0); inode->i_atime.tv_nsec = 0; inode->i_mtime.tv_nsec = 0; /* special case: mark the root inode as sticky */ inode->i_mode = type | (is_root_handle(inode) ? S_ISVTX : 0) | orangefs_inode_perms(&new_op->downcall.resp.getattr.attributes); orangefs_inode->getattr_time = jiffies + orangefs_getattr_timeout_msecs*HZ/1000; ret = 0; out_unlock: spin_unlock(&inode->i_lock); out: op_release(new_op); return ret; } int orangefs_inode_check_changed(struct inode *inode) { struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); struct orangefs_kernel_op_s *new_op; int ret; gossip_debug(GOSSIP_UTILS_DEBUG, "%s: called on inode %pU\n", __func__, get_khandle_from_ino(inode)); new_op = op_alloc(ORANGEFS_VFS_OP_GETATTR); if (!new_op) return -ENOMEM; new_op->upcall.req.getattr.refn = orangefs_inode->refn; new_op->upcall.req.getattr.mask = ORANGEFS_ATTR_SYS_TYPE | ORANGEFS_ATTR_SYS_LNK_TARGET; ret = service_operation(new_op, __func__, get_interruptible_flag(inode)); if (ret != 0) goto out; ret = orangefs_inode_is_stale(inode, &new_op->downcall.resp.getattr.attributes, new_op->downcall.resp.getattr.link_target); out: op_release(new_op); return ret; } /* * issues a orangefs setattr request to make sure the new attribute values * take effect if successful. returns 0 on success; -errno otherwise */ int orangefs_inode_setattr(struct inode *inode) { struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); struct orangefs_kernel_op_s *new_op; int ret; new_op = op_alloc(ORANGEFS_VFS_OP_SETATTR); if (!new_op) return -ENOMEM; spin_lock(&inode->i_lock); new_op->upcall.uid = from_kuid(&init_user_ns, orangefs_inode->attr_uid); new_op->upcall.gid = from_kgid(&init_user_ns, orangefs_inode->attr_gid); new_op->upcall.req.setattr.refn = orangefs_inode->refn; copy_attributes_from_inode(inode, &new_op->upcall.req.setattr.attributes); orangefs_inode->attr_valid = 0; if (!new_op->upcall.req.setattr.attributes.mask) { spin_unlock(&inode->i_lock); op_release(new_op); return 0; } spin_unlock(&inode->i_lock); ret = service_operation(new_op, __func__, get_interruptible_flag(inode) | ORANGEFS_OP_WRITEBACK); gossip_debug(GOSSIP_UTILS_DEBUG, "orangefs_inode_setattr: returning %d\n", ret); if (ret) orangefs_make_bad_inode(inode); op_release(new_op); if (ret == 0) orangefs_inode->getattr_time = jiffies - 1; return ret; } /* * The following is a very dirty hack that is now a permanent part of the * ORANGEFS protocol. See protocol.h for more error definitions. */ /* The order matches include/orangefs-types.h in the OrangeFS source. */ static int PINT_errno_mapping[] = { 0, EPERM, ENOENT, EINTR, EIO, ENXIO, EBADF, EAGAIN, ENOMEM, EFAULT, EBUSY, EEXIST, ENODEV, ENOTDIR, EISDIR, EINVAL, EMFILE, EFBIG, ENOSPC, EROFS, EMLINK, EPIPE, EDEADLK, ENAMETOOLONG, ENOLCK, ENOSYS, ENOTEMPTY, ELOOP, EWOULDBLOCK, ENOMSG, EUNATCH, EBADR, EDEADLOCK, ENODATA, ETIME, ENONET, EREMOTE, ECOMM, EPROTO, EBADMSG, EOVERFLOW, ERESTART, EMSGSIZE, EPROTOTYPE, ENOPROTOOPT, EPROTONOSUPPORT, EOPNOTSUPP, EADDRINUSE, EADDRNOTAVAIL, ENETDOWN, ENETUNREACH, ENETRESET, ENOBUFS, ETIMEDOUT, ECONNREFUSED, EHOSTDOWN, EHOSTUNREACH, EALREADY, EACCES, ECONNRESET, ERANGE }; int orangefs_normalize_to_errno(__s32 error_code) { __u32 i; /* Success */ if (error_code == 0) { return 0; /* * This shouldn't ever happen. If it does it should be fixed on the * server. */ } else if (error_code > 0) { gossip_err("orangefs: error status received.\n"); gossip_err("orangefs: assuming error code is inverted.\n"); error_code = -error_code; } /* * XXX: This is very bad since error codes from ORANGEFS may not be * suitable for return into userspace. */ /* * Convert ORANGEFS error values into errno values suitable for return * from the kernel. */ if ((-error_code) & ORANGEFS_NON_ERRNO_ERROR_BIT) { if (((-error_code) & (ORANGEFS_ERROR_NUMBER_BITS|ORANGEFS_NON_ERRNO_ERROR_BIT| ORANGEFS_ERROR_BIT)) == ORANGEFS_ECANCEL) { /* * cancellation error codes generally correspond to * a timeout from the client's perspective */ error_code = -ETIMEDOUT; } else { /* assume a default error code */ gossip_err("%s: bad error code :%d:.\n", __func__, error_code); error_code = -EINVAL; } /* Convert ORANGEFS encoded errno values into regular errno values. */ } else if ((-error_code) & ORANGEFS_ERROR_BIT) { i = (-error_code) & ~(ORANGEFS_ERROR_BIT|ORANGEFS_ERROR_CLASS_BITS); if (i < ARRAY_SIZE(PINT_errno_mapping)) error_code = -PINT_errno_mapping[i]; else error_code = -EINVAL; /* * Only ORANGEFS protocol error codes should ever come here. Otherwise * there is a bug somewhere. */ } else { gossip_err("%s: unknown error code.\n", __func__); error_code = -EINVAL; } return error_code; } #define NUM_MODES 11 __s32 ORANGEFS_util_translate_mode(int mode) { int ret = 0; int i = 0; static int modes[NUM_MODES] = { S_IXOTH, S_IWOTH, S_IROTH, S_IXGRP, S_IWGRP, S_IRGRP, S_IXUSR, S_IWUSR, S_IRUSR, S_ISGID, S_ISUID }; static int orangefs_modes[NUM_MODES] = { ORANGEFS_O_EXECUTE, ORANGEFS_O_WRITE, ORANGEFS_O_READ, ORANGEFS_G_EXECUTE, ORANGEFS_G_WRITE, ORANGEFS_G_READ, ORANGEFS_U_EXECUTE, ORANGEFS_U_WRITE, ORANGEFS_U_READ, ORANGEFS_G_SGID, ORANGEFS_U_SUID }; for (i = 0; i < NUM_MODES; i++) if (mode & modes[i]) ret |= orangefs_modes[i]; return ret; } #undef NUM_MODES
linux-master
fs/orangefs/orangefs-utils.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2017 Omnibond Systems, L.L.C. */ #include "protocol.h" #include "orangefs-kernel.h" #include "orangefs-bufmap.h" struct orangefs_dir_part { struct orangefs_dir_part *next; size_t len; }; struct orangefs_dir { __u64 token; struct orangefs_dir_part *part; loff_t end; int error; }; #define PART_SHIFT (24) #define PART_SIZE (1<<24) #define PART_MASK (~(PART_SIZE - 1)) /* * There can be up to 512 directory entries. Each entry is encoded as * follows: * 4 bytes: string size (n) * n bytes: string * 1 byte: trailing zero * padding to 8 bytes * 16 bytes: khandle * padding to 8 bytes * * The trailer_buf starts with a struct orangefs_readdir_response_s * which must be skipped to get to the directory data. * * The data which is received from the userspace daemon is termed a * part and is stored in a linked list in case more than one part is * needed for a large directory. * * The position pointer (ctx->pos) encodes the part and offset on which * to begin reading at. Bits above PART_SHIFT encode the part and bits * below PART_SHIFT encode the offset. Parts are stored in a linked * list which grows as data is received from the server. The overhead * associated with managing the list is presumed to be small compared to * the overhead of communicating with the server. * * As data is received from the server, it is placed at the end of the * part list. Data is parsed from the current position as it is needed. * When data is determined to be corrupt, it is either because the * userspace component has sent back corrupt data or because the file * pointer has been moved to an invalid location. Since the two cannot * be differentiated, return EIO. * * Part zero is synthesized to contains `.' and `..'. Part one is the * first part of the part list. */ static int do_readdir(struct orangefs_inode_s *oi, struct orangefs_dir *od, struct dentry *dentry, struct orangefs_kernel_op_s *op) { struct orangefs_readdir_response_s *resp; int bufi, r; /* * Despite the badly named field, readdir does not use shared * memory. However, there are a limited number of readdir * slots, which must be allocated here. This flag simply tells * the op scheduler to return the op here for retry. */ op->uses_shared_memory = 1; op->upcall.req.readdir.refn = oi->refn; op->upcall.req.readdir.token = od->token; op->upcall.req.readdir.max_dirent_count = ORANGEFS_MAX_DIRENT_COUNT_READDIR; again: bufi = orangefs_readdir_index_get(); if (bufi < 0) { od->error = bufi; return bufi; } op->upcall.req.readdir.buf_index = bufi; r = service_operation(op, "orangefs_readdir", get_interruptible_flag(dentry->d_inode)); orangefs_readdir_index_put(bufi); if (op_state_purged(op)) { if (r == -EAGAIN) { vfree(op->downcall.trailer_buf); goto again; } else if (r == -EIO) { vfree(op->downcall.trailer_buf); od->error = r; return r; } } if (r < 0) { vfree(op->downcall.trailer_buf); od->error = r; return r; } else if (op->downcall.status) { vfree(op->downcall.trailer_buf); od->error = op->downcall.status; return op->downcall.status; } /* * The maximum size is size per entry times the 512 entries plus * the header. This is well under the limit. */ if (op->downcall.trailer_size > PART_SIZE) { vfree(op->downcall.trailer_buf); od->error = -EIO; return -EIO; } resp = (struct orangefs_readdir_response_s *) op->downcall.trailer_buf; od->token = resp->token; return 0; } static int parse_readdir(struct orangefs_dir *od, struct orangefs_kernel_op_s *op) { struct orangefs_dir_part *part, *new; size_t count; count = 1; part = od->part; while (part) { count++; if (part->next) part = part->next; else break; } new = (void *)op->downcall.trailer_buf; new->next = NULL; new->len = op->downcall.trailer_size - sizeof(struct orangefs_readdir_response_s); if (!od->part) od->part = new; else part->next = new; count++; od->end = count << PART_SHIFT; return 0; } static int orangefs_dir_more(struct orangefs_inode_s *oi, struct orangefs_dir *od, struct dentry *dentry) { struct orangefs_kernel_op_s *op; int r; op = op_alloc(ORANGEFS_VFS_OP_READDIR); if (!op) { od->error = -ENOMEM; return -ENOMEM; } r = do_readdir(oi, od, dentry, op); if (r) { od->error = r; goto out; } r = parse_readdir(od, op); if (r) { od->error = r; goto out; } od->error = 0; out: op_release(op); return od->error; } static int fill_from_part(struct orangefs_dir_part *part, struct dir_context *ctx) { const int offset = sizeof(struct orangefs_readdir_response_s); struct orangefs_khandle *khandle; __u32 *len, padlen; loff_t i; char *s; i = ctx->pos & ~PART_MASK; /* The file offset from userspace is too large. */ if (i > part->len) return 1; /* * If the seek pointer is positioned just before an entry it * should find the next entry. */ if (i % 8) i = i + (8 - i%8)%8; while (i < part->len) { if (part->len < i + sizeof *len) break; len = (void *)part + offset + i; /* * len is the size of the string itself. padlen is the * total size of the encoded string. */ padlen = (sizeof *len + *len + 1) + (8 - (sizeof *len + *len + 1)%8)%8; if (part->len < i + padlen + sizeof *khandle) goto next; s = (void *)part + offset + i + sizeof *len; if (s[*len] != 0) goto next; khandle = (void *)part + offset + i + padlen; if (!dir_emit(ctx, s, *len, orangefs_khandle_to_ino(khandle), DT_UNKNOWN)) return 0; i += padlen + sizeof *khandle; i = i + (8 - i%8)%8; BUG_ON(i > part->len); ctx->pos = (ctx->pos & PART_MASK) | i; continue; next: i += 8; } return 1; } static int orangefs_dir_fill(struct orangefs_inode_s *oi, struct orangefs_dir *od, struct dentry *dentry, struct dir_context *ctx) { struct orangefs_dir_part *part; size_t count; count = ((ctx->pos & PART_MASK) >> PART_SHIFT) - 1; part = od->part; while (part->next && count) { count--; part = part->next; } /* This means the userspace file offset is invalid. */ if (count) { od->error = -EIO; return -EIO; } while (part && part->len) { int r; r = fill_from_part(part, ctx); if (r < 0) { od->error = r; return r; } else if (r == 0) { /* Userspace buffer is full. */ break; } else { /* * The part ran out of data. Move to the next * part. */ ctx->pos = (ctx->pos & PART_MASK) + (1 << PART_SHIFT); part = part->next; } } return 0; } static loff_t orangefs_dir_llseek(struct file *file, loff_t offset, int whence) { struct orangefs_dir *od = file->private_data; /* * Delete the stored data so userspace sees new directory * entries. */ if (!whence && offset < od->end) { struct orangefs_dir_part *part = od->part; while (part) { struct orangefs_dir_part *next = part->next; vfree(part); part = next; } od->token = ORANGEFS_ITERATE_START; od->part = NULL; od->end = 1 << PART_SHIFT; } return default_llseek(file, offset, whence); } static int orangefs_dir_iterate(struct file *file, struct dir_context *ctx) { struct orangefs_inode_s *oi; struct orangefs_dir *od; struct dentry *dentry; int r; dentry = file->f_path.dentry; oi = ORANGEFS_I(dentry->d_inode); od = file->private_data; if (od->error) return od->error; if (ctx->pos == 0) { if (!dir_emit_dot(file, ctx)) return 0; ctx->pos++; } if (ctx->pos == 1) { if (!dir_emit_dotdot(file, ctx)) return 0; ctx->pos = 1 << PART_SHIFT; } /* * The seek position is in the first synthesized part but is not * valid. */ if ((ctx->pos & PART_MASK) == 0) return -EIO; r = 0; /* * Must read more if the user has sought past what has been read * so far. Stop a user who has sought past the end. */ while (od->token != ORANGEFS_ITERATE_END && ctx->pos > od->end) { r = orangefs_dir_more(oi, od, dentry); if (r) return r; } if (od->token == ORANGEFS_ITERATE_END && ctx->pos > od->end) return -EIO; /* Then try to fill if there's any left in the buffer. */ if (ctx->pos < od->end) { r = orangefs_dir_fill(oi, od, dentry, ctx); if (r) return r; } /* Finally get some more and try to fill. */ if (od->token != ORANGEFS_ITERATE_END) { r = orangefs_dir_more(oi, od, dentry); if (r) return r; r = orangefs_dir_fill(oi, od, dentry, ctx); } return r; } static int orangefs_dir_open(struct inode *inode, struct file *file) { struct orangefs_dir *od; file->private_data = kmalloc(sizeof(struct orangefs_dir), GFP_KERNEL); if (!file->private_data) return -ENOMEM; od = file->private_data; od->token = ORANGEFS_ITERATE_START; od->part = NULL; od->end = 1 << PART_SHIFT; od->error = 0; return 0; } static int orangefs_dir_release(struct inode *inode, struct file *file) { struct orangefs_dir *od = file->private_data; struct orangefs_dir_part *part = od->part; while (part) { struct orangefs_dir_part *next = part->next; vfree(part); part = next; } kfree(od); return 0; } const struct file_operations orangefs_dir_operations = { .llseek = orangefs_dir_llseek, .read = generic_read_dir, .iterate_shared = orangefs_dir_iterate, .open = orangefs_dir_open, .release = orangefs_dir_release };
linux-master
fs/orangefs/dir.c
// SPDX-License-Identifier: GPL-2.0 /* * (C) 2001 Clemson University and The University of Chicago * Copyright 2018 Omnibond Systems, L.L.C. * * See COPYING in top-level directory. */ /* * Linux VFS inode operations. */ #include <linux/blkdev.h> #include <linux/fileattr.h> #include "protocol.h" #include "orangefs-kernel.h" #include "orangefs-bufmap.h" static int orangefs_writepage_locked(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; struct orangefs_write_range *wr = NULL; struct iov_iter iter; struct bio_vec bv; size_t len, wlen; ssize_t ret; loff_t off; set_page_writeback(page); len = i_size_read(inode); if (PagePrivate(page)) { wr = (struct orangefs_write_range *)page_private(page); WARN_ON(wr->pos >= len); off = wr->pos; if (off + wr->len > len) wlen = len - off; else wlen = wr->len; } else { WARN_ON(1); off = page_offset(page); if (off + PAGE_SIZE > len) wlen = len - off; else wlen = PAGE_SIZE; } /* Should've been handled in orangefs_invalidate_folio. */ WARN_ON(off == len || off + wlen > len); WARN_ON(wlen == 0); bvec_set_page(&bv, page, wlen, off % PAGE_SIZE); iov_iter_bvec(&iter, ITER_SOURCE, &bv, 1, wlen); ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, wlen, len, wr, NULL, NULL); if (ret < 0) { SetPageError(page); mapping_set_error(page->mapping, ret); } else { ret = 0; } kfree(detach_page_private(page)); return ret; } static int orangefs_writepage(struct page *page, struct writeback_control *wbc) { int ret; ret = orangefs_writepage_locked(page, wbc); unlock_page(page); end_page_writeback(page); return ret; } struct orangefs_writepages { loff_t off; size_t len; kuid_t uid; kgid_t gid; int maxpages; int npages; struct page **pages; struct bio_vec *bv; }; static int orangefs_writepages_work(struct orangefs_writepages *ow, struct writeback_control *wbc) { struct inode *inode = ow->pages[0]->mapping->host; struct orangefs_write_range *wrp, wr; struct iov_iter iter; ssize_t ret; size_t len; loff_t off; int i; len = i_size_read(inode); for (i = 0; i < ow->npages; i++) { set_page_writeback(ow->pages[i]); bvec_set_page(&ow->bv[i], ow->pages[i], min(page_offset(ow->pages[i]) + PAGE_SIZE, ow->off + ow->len) - max(ow->off, page_offset(ow->pages[i])), i == 0 ? ow->off - page_offset(ow->pages[i]) : 0); } iov_iter_bvec(&iter, ITER_SOURCE, ow->bv, ow->npages, ow->len); WARN_ON(ow->off >= len); if (ow->off + ow->len > len) ow->len = len - ow->off; off = ow->off; wr.uid = ow->uid; wr.gid = ow->gid; ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, ow->len, 0, &wr, NULL, NULL); if (ret < 0) { for (i = 0; i < ow->npages; i++) { SetPageError(ow->pages[i]); mapping_set_error(ow->pages[i]->mapping, ret); if (PagePrivate(ow->pages[i])) { wrp = (struct orangefs_write_range *) page_private(ow->pages[i]); ClearPagePrivate(ow->pages[i]); put_page(ow->pages[i]); kfree(wrp); } end_page_writeback(ow->pages[i]); unlock_page(ow->pages[i]); } } else { ret = 0; for (i = 0; i < ow->npages; i++) { if (PagePrivate(ow->pages[i])) { wrp = (struct orangefs_write_range *) page_private(ow->pages[i]); ClearPagePrivate(ow->pages[i]); put_page(ow->pages[i]); kfree(wrp); } end_page_writeback(ow->pages[i]); unlock_page(ow->pages[i]); } } return ret; } static int orangefs_writepages_callback(struct folio *folio, struct writeback_control *wbc, void *data) { struct orangefs_writepages *ow = data; struct orangefs_write_range *wr = folio->private; int ret; if (!wr) { folio_unlock(folio); /* It's not private so there's nothing to write, right? */ printk("writepages_callback not private!\n"); BUG(); return 0; } ret = -1; if (ow->npages == 0) { ow->off = wr->pos; ow->len = wr->len; ow->uid = wr->uid; ow->gid = wr->gid; ow->pages[ow->npages++] = &folio->page; ret = 0; goto done; } if (!uid_eq(ow->uid, wr->uid) || !gid_eq(ow->gid, wr->gid)) { orangefs_writepages_work(ow, wbc); ow->npages = 0; ret = -1; goto done; } if (ow->off + ow->len == wr->pos) { ow->len += wr->len; ow->pages[ow->npages++] = &folio->page; ret = 0; goto done; } done: if (ret == -1) { if (ow->npages) { orangefs_writepages_work(ow, wbc); ow->npages = 0; } ret = orangefs_writepage_locked(&folio->page, wbc); mapping_set_error(folio->mapping, ret); folio_unlock(folio); folio_end_writeback(folio); } else { if (ow->npages == ow->maxpages) { orangefs_writepages_work(ow, wbc); ow->npages = 0; } } return ret; } static int orangefs_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct orangefs_writepages *ow; struct blk_plug plug; int ret; ow = kzalloc(sizeof(struct orangefs_writepages), GFP_KERNEL); if (!ow) return -ENOMEM; ow->maxpages = orangefs_bufmap_size_query()/PAGE_SIZE; ow->pages = kcalloc(ow->maxpages, sizeof(struct page *), GFP_KERNEL); if (!ow->pages) { kfree(ow); return -ENOMEM; } ow->bv = kcalloc(ow->maxpages, sizeof(struct bio_vec), GFP_KERNEL); if (!ow->bv) { kfree(ow->pages); kfree(ow); return -ENOMEM; } blk_start_plug(&plug); ret = write_cache_pages(mapping, wbc, orangefs_writepages_callback, ow); if (ow->npages) ret = orangefs_writepages_work(ow, wbc); blk_finish_plug(&plug); kfree(ow->pages); kfree(ow->bv); kfree(ow); return ret; } static int orangefs_launder_folio(struct folio *); static void orangefs_readahead(struct readahead_control *rac) { loff_t offset; struct iov_iter iter; struct inode *inode = rac->mapping->host; struct xarray *i_pages; struct folio *folio; loff_t new_start = readahead_pos(rac); int ret; size_t new_len = 0; loff_t bytes_remaining = inode->i_size - readahead_pos(rac); loff_t pages_remaining = bytes_remaining / PAGE_SIZE; if (pages_remaining >= 1024) new_len = 4194304; else if (pages_remaining > readahead_count(rac)) new_len = bytes_remaining; if (new_len) readahead_expand(rac, new_start, new_len); offset = readahead_pos(rac); i_pages = &rac->mapping->i_pages; iov_iter_xarray(&iter, ITER_DEST, i_pages, offset, readahead_length(rac)); /* read in the pages. */ if ((ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, &offset, &iter, readahead_length(rac), inode->i_size, NULL, NULL, rac->file)) < 0) gossip_debug(GOSSIP_FILE_DEBUG, "%s: wait_for_direct_io failed. \n", __func__); else ret = 0; /* clean up. */ while ((folio = readahead_folio(rac))) { if (!ret) folio_mark_uptodate(folio); folio_unlock(folio); } } static int orangefs_read_folio(struct file *file, struct folio *folio) { struct inode *inode = folio->mapping->host; struct iov_iter iter; struct bio_vec bv; ssize_t ret; loff_t off; /* offset of this folio in the file */ if (folio_test_dirty(folio)) orangefs_launder_folio(folio); off = folio_pos(folio); bvec_set_folio(&bv, folio, folio_size(folio), 0); iov_iter_bvec(&iter, ITER_DEST, &bv, 1, folio_size(folio)); ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, &off, &iter, folio_size(folio), inode->i_size, NULL, NULL, file); /* this will only zero remaining unread portions of the folio data */ iov_iter_zero(~0U, &iter); /* takes care of potential aliasing */ flush_dcache_folio(folio); if (ret < 0) { folio_set_error(folio); } else { folio_mark_uptodate(folio); ret = 0; } /* unlock the folio after the ->read_folio() routine completes */ folio_unlock(folio); return ret; } static int orangefs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, struct page **pagep, void **fsdata) { struct orangefs_write_range *wr; struct folio *folio; struct page *page; pgoff_t index; int ret; index = pos >> PAGE_SHIFT; page = grab_cache_page_write_begin(mapping, index); if (!page) return -ENOMEM; *pagep = page; folio = page_folio(page); if (folio_test_dirty(folio) && !folio_test_private(folio)) { /* * Should be impossible. If it happens, launder the page * since we don't know what's dirty. This will WARN in * orangefs_writepage_locked. */ ret = orangefs_launder_folio(folio); if (ret) return ret; } if (folio_test_private(folio)) { struct orangefs_write_range *wr; wr = folio_get_private(folio); if (wr->pos + wr->len == pos && uid_eq(wr->uid, current_fsuid()) && gid_eq(wr->gid, current_fsgid())) { wr->len += len; goto okay; } else { ret = orangefs_launder_folio(folio); if (ret) return ret; } } wr = kmalloc(sizeof *wr, GFP_KERNEL); if (!wr) return -ENOMEM; wr->pos = pos; wr->len = len; wr->uid = current_fsuid(); wr->gid = current_fsgid(); folio_attach_private(folio, wr); okay: return 0; } static int orangefs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = page->mapping->host; loff_t last_pos = pos + copied; /* * No need to use i_size_read() here, the i_size * cannot change under us because we hold the i_mutex. */ if (last_pos > inode->i_size) i_size_write(inode, last_pos); /* zero the stale part of the page if we did a short copy */ if (!PageUptodate(page)) { unsigned from = pos & (PAGE_SIZE - 1); if (copied < len) { zero_user(page, from + copied, len - copied); } /* Set fully written pages uptodate. */ if (pos == page_offset(page) && (len == PAGE_SIZE || pos + len == inode->i_size)) { zero_user_segment(page, from + copied, PAGE_SIZE); SetPageUptodate(page); } } set_page_dirty(page); unlock_page(page); put_page(page); mark_inode_dirty_sync(file_inode(file)); return copied; } static void orangefs_invalidate_folio(struct folio *folio, size_t offset, size_t length) { struct orangefs_write_range *wr = folio_get_private(folio); if (offset == 0 && length == PAGE_SIZE) { kfree(folio_detach_private(folio)); return; /* write range entirely within invalidate range (or equal) */ } else if (folio_pos(folio) + offset <= wr->pos && wr->pos + wr->len <= folio_pos(folio) + offset + length) { kfree(folio_detach_private(folio)); /* XXX is this right? only caller in fs */ folio_cancel_dirty(folio); return; /* invalidate range chops off end of write range */ } else if (wr->pos < folio_pos(folio) + offset && wr->pos + wr->len <= folio_pos(folio) + offset + length && folio_pos(folio) + offset < wr->pos + wr->len) { size_t x; x = wr->pos + wr->len - (folio_pos(folio) + offset); WARN_ON(x > wr->len); wr->len -= x; wr->uid = current_fsuid(); wr->gid = current_fsgid(); /* invalidate range chops off beginning of write range */ } else if (folio_pos(folio) + offset <= wr->pos && folio_pos(folio) + offset + length < wr->pos + wr->len && wr->pos < folio_pos(folio) + offset + length) { size_t x; x = folio_pos(folio) + offset + length - wr->pos; WARN_ON(x > wr->len); wr->pos += x; wr->len -= x; wr->uid = current_fsuid(); wr->gid = current_fsgid(); /* invalidate range entirely within write range (punch hole) */ } else if (wr->pos < folio_pos(folio) + offset && folio_pos(folio) + offset + length < wr->pos + wr->len) { /* XXX what do we do here... should not WARN_ON */ WARN_ON(1); /* punch hole */ /* * should we just ignore this and write it out anyway? * it hardly makes sense */ return; /* non-overlapping ranges */ } else { /* WARN if they do overlap */ if (!((folio_pos(folio) + offset + length <= wr->pos) ^ (wr->pos + wr->len <= folio_pos(folio) + offset))) { WARN_ON(1); printk("invalidate range offset %llu length %zu\n", folio_pos(folio) + offset, length); printk("write range offset %llu length %zu\n", wr->pos, wr->len); } return; } /* * Above there are returns where wr is freed or where we WARN. * Thus the following runs if wr was modified above. */ orangefs_launder_folio(folio); } static bool orangefs_release_folio(struct folio *folio, gfp_t foo) { return !folio_test_private(folio); } static void orangefs_free_folio(struct folio *folio) { kfree(folio_detach_private(folio)); } static int orangefs_launder_folio(struct folio *folio) { int r = 0; struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = 0, }; folio_wait_writeback(folio); if (folio_clear_dirty_for_io(folio)) { r = orangefs_writepage_locked(&folio->page, &wbc); folio_end_writeback(folio); } return r; } static ssize_t orangefs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { /* * Comment from original do_readv_writev: * Common entry point for read/write/readv/writev * This function will dispatch it to either the direct I/O * or buffered I/O path depending on the mount options and/or * augmented/extended metadata attached to the file. * Note: File extended attributes override any mount options. */ struct file *file = iocb->ki_filp; loff_t pos = iocb->ki_pos; enum ORANGEFS_io_type type = iov_iter_rw(iter) == WRITE ? ORANGEFS_IO_WRITE : ORANGEFS_IO_READ; loff_t *offset = &pos; struct inode *inode = file->f_mapping->host; struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); struct orangefs_khandle *handle = &orangefs_inode->refn.khandle; size_t count = iov_iter_count(iter); ssize_t total_count = 0; ssize_t ret = -EINVAL; gossip_debug(GOSSIP_FILE_DEBUG, "%s-BEGIN(%pU): count(%d) after estimate_max_iovecs.\n", __func__, handle, (int)count); if (type == ORANGEFS_IO_WRITE) { gossip_debug(GOSSIP_FILE_DEBUG, "%s(%pU): proceeding with offset : %llu, " "size %d\n", __func__, handle, llu(*offset), (int)count); } if (count == 0) { ret = 0; goto out; } while (iov_iter_count(iter)) { size_t each_count = iov_iter_count(iter); size_t amt_complete; /* how much to transfer in this loop iteration */ if (each_count > orangefs_bufmap_size_query()) each_count = orangefs_bufmap_size_query(); gossip_debug(GOSSIP_FILE_DEBUG, "%s(%pU): size of each_count(%d)\n", __func__, handle, (int)each_count); gossip_debug(GOSSIP_FILE_DEBUG, "%s(%pU): BEFORE wait_for_io: offset is %d\n", __func__, handle, (int)*offset); ret = wait_for_direct_io(type, inode, offset, iter, each_count, 0, NULL, NULL, file); gossip_debug(GOSSIP_FILE_DEBUG, "%s(%pU): return from wait_for_io:%d\n", __func__, handle, (int)ret); if (ret < 0) goto out; *offset += ret; total_count += ret; amt_complete = ret; gossip_debug(GOSSIP_FILE_DEBUG, "%s(%pU): AFTER wait_for_io: offset is %d\n", __func__, handle, (int)*offset); /* * if we got a short I/O operations, * fall out and return what we got so far */ if (amt_complete < each_count) break; } /*end while */ out: if (total_count > 0) ret = total_count; if (ret > 0) { if (type == ORANGEFS_IO_READ) { file_accessed(file); } else { file_update_time(file); if (*offset > i_size_read(inode)) i_size_write(inode, *offset); } } gossip_debug(GOSSIP_FILE_DEBUG, "%s(%pU): Value(%d) returned.\n", __func__, handle, (int)ret); return ret; } /** ORANGEFS2 implementation of address space operations */ static const struct address_space_operations orangefs_address_operations = { .writepage = orangefs_writepage, .readahead = orangefs_readahead, .read_folio = orangefs_read_folio, .writepages = orangefs_writepages, .dirty_folio = filemap_dirty_folio, .write_begin = orangefs_write_begin, .write_end = orangefs_write_end, .invalidate_folio = orangefs_invalidate_folio, .release_folio = orangefs_release_folio, .free_folio = orangefs_free_folio, .launder_folio = orangefs_launder_folio, .direct_IO = orangefs_direct_IO, }; vm_fault_t orangefs_page_mkwrite(struct vm_fault *vmf) { struct folio *folio = page_folio(vmf->page); struct inode *inode = file_inode(vmf->vma->vm_file); struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); unsigned long *bitlock = &orangefs_inode->bitlock; vm_fault_t ret; struct orangefs_write_range *wr; sb_start_pagefault(inode->i_sb); if (wait_on_bit(bitlock, 1, TASK_KILLABLE)) { ret = VM_FAULT_RETRY; goto out; } folio_lock(folio); if (folio_test_dirty(folio) && !folio_test_private(folio)) { /* * Should be impossible. If it happens, launder the folio * since we don't know what's dirty. This will WARN in * orangefs_writepage_locked. */ if (orangefs_launder_folio(folio)) { ret = VM_FAULT_LOCKED|VM_FAULT_RETRY; goto out; } } if (folio_test_private(folio)) { wr = folio_get_private(folio); if (uid_eq(wr->uid, current_fsuid()) && gid_eq(wr->gid, current_fsgid())) { wr->pos = page_offset(vmf->page); wr->len = PAGE_SIZE; goto okay; } else { if (orangefs_launder_folio(folio)) { ret = VM_FAULT_LOCKED|VM_FAULT_RETRY; goto out; } } } wr = kmalloc(sizeof *wr, GFP_KERNEL); if (!wr) { ret = VM_FAULT_LOCKED|VM_FAULT_RETRY; goto out; } wr->pos = page_offset(vmf->page); wr->len = PAGE_SIZE; wr->uid = current_fsuid(); wr->gid = current_fsgid(); folio_attach_private(folio, wr); okay: file_update_time(vmf->vma->vm_file); if (folio->mapping != inode->i_mapping) { folio_unlock(folio); ret = VM_FAULT_LOCKED|VM_FAULT_NOPAGE; goto out; } /* * We mark the folio dirty already here so that when freeze is in * progress, we are guaranteed that writeback during freezing will * see the dirty folio and writeprotect it again. */ folio_mark_dirty(folio); folio_wait_stable(folio); ret = VM_FAULT_LOCKED; out: sb_end_pagefault(inode->i_sb); return ret; } static int orangefs_setattr_size(struct inode *inode, struct iattr *iattr) { struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); struct orangefs_kernel_op_s *new_op; loff_t orig_size; int ret = -EINVAL; gossip_debug(GOSSIP_INODE_DEBUG, "%s: %pU: Handle is %pU | fs_id %d | size is %llu\n", __func__, get_khandle_from_ino(inode), &orangefs_inode->refn.khandle, orangefs_inode->refn.fs_id, iattr->ia_size); /* Ensure that we have a up to date size, so we know if it changed. */ ret = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_SIZE); if (ret == -ESTALE) ret = -EIO; if (ret) { gossip_err("%s: orangefs_inode_getattr failed, ret:%d:.\n", __func__, ret); return ret; } orig_size = i_size_read(inode); /* This is truncate_setsize in a different order. */ truncate_pagecache(inode, iattr->ia_size); i_size_write(inode, iattr->ia_size); if (iattr->ia_size > orig_size) pagecache_isize_extended(inode, orig_size, iattr->ia_size); new_op = op_alloc(ORANGEFS_VFS_OP_TRUNCATE); if (!new_op) return -ENOMEM; new_op->upcall.req.truncate.refn = orangefs_inode->refn; new_op->upcall.req.truncate.size = (__s64) iattr->ia_size; ret = service_operation(new_op, __func__, get_interruptible_flag(inode)); /* * the truncate has no downcall members to retrieve, but * the status value tells us if it went through ok or not */ gossip_debug(GOSSIP_INODE_DEBUG, "%s: ret:%d:\n", __func__, ret); op_release(new_op); if (ret != 0) return ret; if (orig_size != i_size_read(inode)) iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME; return ret; } int __orangefs_setattr(struct inode *inode, struct iattr *iattr) { int ret; if (iattr->ia_valid & ATTR_MODE) { if (iattr->ia_mode & (S_ISVTX)) { if (is_root_handle(inode)) { /* * allow sticky bit to be set on root (since * it shows up that way by default anyhow), * but don't show it to the server */ iattr->ia_mode -= S_ISVTX; } else { gossip_debug(GOSSIP_UTILS_DEBUG, "User attempted to set sticky bit on non-root directory; returning EINVAL.\n"); ret = -EINVAL; goto out; } } if (iattr->ia_mode & (S_ISUID)) { gossip_debug(GOSSIP_UTILS_DEBUG, "Attempting to set setuid bit (not supported); returning EINVAL.\n"); ret = -EINVAL; goto out; } } if (iattr->ia_valid & ATTR_SIZE) { ret = orangefs_setattr_size(inode, iattr); if (ret) goto out; } again: spin_lock(&inode->i_lock); if (ORANGEFS_I(inode)->attr_valid) { if (uid_eq(ORANGEFS_I(inode)->attr_uid, current_fsuid()) && gid_eq(ORANGEFS_I(inode)->attr_gid, current_fsgid())) { ORANGEFS_I(inode)->attr_valid = iattr->ia_valid; } else { spin_unlock(&inode->i_lock); write_inode_now(inode, 1); goto again; } } else { ORANGEFS_I(inode)->attr_valid = iattr->ia_valid; ORANGEFS_I(inode)->attr_uid = current_fsuid(); ORANGEFS_I(inode)->attr_gid = current_fsgid(); } setattr_copy(&nop_mnt_idmap, inode, iattr); spin_unlock(&inode->i_lock); mark_inode_dirty(inode); ret = 0; out: return ret; } int __orangefs_setattr_mode(struct dentry *dentry, struct iattr *iattr) { int ret; struct inode *inode = d_inode(dentry); ret = __orangefs_setattr(inode, iattr); /* change mode on a file that has ACLs */ if (!ret && (iattr->ia_valid & ATTR_MODE)) ret = posix_acl_chmod(&nop_mnt_idmap, dentry, inode->i_mode); return ret; } /* * Change attributes of an object referenced by dentry. */ int orangefs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *iattr) { int ret; gossip_debug(GOSSIP_INODE_DEBUG, "__orangefs_setattr: called on %pd\n", dentry); ret = setattr_prepare(&nop_mnt_idmap, dentry, iattr); if (ret) goto out; ret = __orangefs_setattr_mode(dentry, iattr); sync_inode_metadata(d_inode(dentry), 1); out: gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_setattr: returning %d\n", ret); return ret; } /* * Obtain attributes of an object given a dentry */ int orangefs_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int flags) { int ret; struct inode *inode = path->dentry->d_inode; gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_getattr: called on %pd mask %u\n", path->dentry, request_mask); ret = orangefs_inode_getattr(inode, request_mask & STATX_SIZE ? ORANGEFS_GETATTR_SIZE : 0); if (ret == 0) { generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat); /* override block size reported to stat */ if (!(request_mask & STATX_SIZE)) stat->result_mask &= ~STATX_SIZE; generic_fill_statx_attr(inode, stat); } return ret; } int orangefs_permission(struct mnt_idmap *idmap, struct inode *inode, int mask) { int ret; if (mask & MAY_NOT_BLOCK) return -ECHILD; gossip_debug(GOSSIP_INODE_DEBUG, "%s: refreshing\n", __func__); /* Make sure the permission (and other common attrs) are up to date. */ ret = orangefs_inode_getattr(inode, 0); if (ret < 0) return ret; return generic_permission(&nop_mnt_idmap, inode, mask); } int orangefs_update_time(struct inode *inode, int flags) { struct iattr iattr; gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_update_time: %pU\n", get_khandle_from_ino(inode)); flags = generic_update_time(inode, flags); memset(&iattr, 0, sizeof iattr); if (flags & S_ATIME) iattr.ia_valid |= ATTR_ATIME; if (flags & S_CTIME) iattr.ia_valid |= ATTR_CTIME; if (flags & S_MTIME) iattr.ia_valid |= ATTR_MTIME; return __orangefs_setattr(inode, &iattr); } static int orangefs_fileattr_get(struct dentry *dentry, struct fileattr *fa) { u64 val = 0; int ret; gossip_debug(GOSSIP_FILE_DEBUG, "%s: called on %pd\n", __func__, dentry); ret = orangefs_inode_getxattr(d_inode(dentry), "user.pvfs2.meta_hint", &val, sizeof(val)); if (ret < 0 && ret != -ENODATA) return ret; gossip_debug(GOSSIP_FILE_DEBUG, "%s: flags=%u\n", __func__, (u32) val); fileattr_fill_flags(fa, val); return 0; } static int orangefs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, struct fileattr *fa) { u64 val = 0; gossip_debug(GOSSIP_FILE_DEBUG, "%s: called on %pd\n", __func__, dentry); /* * ORANGEFS_MIRROR_FL is set internally when the mirroring mode is * turned on for a file. The user is not allowed to turn on this bit, * but the bit is present if the user first gets the flags and then * updates the flags with some new settings. So, we ignore it in the * following edit. bligon. */ if (fileattr_has_fsx(fa) || (fa->flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NOATIME_FL | ORANGEFS_MIRROR_FL))) { gossip_err("%s: only supports setting one of FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NOATIME_FL\n", __func__); return -EOPNOTSUPP; } val = fa->flags; gossip_debug(GOSSIP_FILE_DEBUG, "%s: flags=%u\n", __func__, (u32) val); return orangefs_inode_setxattr(d_inode(dentry), "user.pvfs2.meta_hint", &val, sizeof(val), 0); } /* ORANGEFS2 implementation of VFS inode operations for files */ static const struct inode_operations orangefs_file_inode_operations = { .get_inode_acl = orangefs_get_acl, .set_acl = orangefs_set_acl, .setattr = orangefs_setattr, .getattr = orangefs_getattr, .listxattr = orangefs_listxattr, .permission = orangefs_permission, .update_time = orangefs_update_time, .fileattr_get = orangefs_fileattr_get, .fileattr_set = orangefs_fileattr_set, }; static int orangefs_init_iops(struct inode *inode) { inode->i_mapping->a_ops = &orangefs_address_operations; switch (inode->i_mode & S_IFMT) { case S_IFREG: inode->i_op = &orangefs_file_inode_operations; inode->i_fop = &orangefs_file_operations; break; case S_IFLNK: inode->i_op = &orangefs_symlink_inode_operations; break; case S_IFDIR: inode->i_op = &orangefs_dir_inode_operations; inode->i_fop = &orangefs_dir_operations; break; default: gossip_debug(GOSSIP_INODE_DEBUG, "%s: unsupported mode\n", __func__); return -EINVAL; } return 0; } /* * Given an ORANGEFS object identifier (fsid, handle), convert it into * a ino_t type that will be used as a hash-index from where the handle will * be searched for in the VFS hash table of inodes. */ static inline ino_t orangefs_handle_hash(struct orangefs_object_kref *ref) { if (!ref) return 0; return orangefs_khandle_to_ino(&(ref->khandle)); } /* * Called to set up an inode from iget5_locked. */ static int orangefs_set_inode(struct inode *inode, void *data) { struct orangefs_object_kref *ref = (struct orangefs_object_kref *) data; ORANGEFS_I(inode)->refn.fs_id = ref->fs_id; ORANGEFS_I(inode)->refn.khandle = ref->khandle; ORANGEFS_I(inode)->attr_valid = 0; hash_init(ORANGEFS_I(inode)->xattr_cache); ORANGEFS_I(inode)->mapping_time = jiffies - 1; ORANGEFS_I(inode)->bitlock = 0; return 0; } /* * Called to determine if handles match. */ static int orangefs_test_inode(struct inode *inode, void *data) { struct orangefs_object_kref *ref = (struct orangefs_object_kref *) data; struct orangefs_inode_s *orangefs_inode = NULL; orangefs_inode = ORANGEFS_I(inode); /* test handles and fs_ids... */ return (!ORANGEFS_khandle_cmp(&(orangefs_inode->refn.khandle), &(ref->khandle)) && orangefs_inode->refn.fs_id == ref->fs_id); } /* * Front-end to lookup the inode-cache maintained by the VFS using the ORANGEFS * file handle. * * @sb: the file system super block instance. * @ref: The ORANGEFS object for which we are trying to locate an inode. */ struct inode *orangefs_iget(struct super_block *sb, struct orangefs_object_kref *ref) { struct inode *inode = NULL; unsigned long hash; int error; hash = orangefs_handle_hash(ref); inode = iget5_locked(sb, hash, orangefs_test_inode, orangefs_set_inode, ref); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; error = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_NEW); if (error) { iget_failed(inode); return ERR_PTR(error); } inode->i_ino = hash; /* needed for stat etc */ orangefs_init_iops(inode); unlock_new_inode(inode); gossip_debug(GOSSIP_INODE_DEBUG, "iget handle %pU, fsid %d hash %ld i_ino %lu\n", &ref->khandle, ref->fs_id, hash, inode->i_ino); return inode; } /* * Allocate an inode for a newly created file and insert it into the inode hash. */ struct inode *orangefs_new_inode(struct super_block *sb, struct inode *dir, umode_t mode, dev_t dev, struct orangefs_object_kref *ref) { struct posix_acl *acl = NULL, *default_acl = NULL; unsigned long hash = orangefs_handle_hash(ref); struct inode *inode; int error; gossip_debug(GOSSIP_INODE_DEBUG, "%s:(sb is %p | MAJOR(dev)=%u | MINOR(dev)=%u mode=%o)\n", __func__, sb, MAJOR(dev), MINOR(dev), mode); inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); error = posix_acl_create(dir, &mode, &default_acl, &acl); if (error) goto out_iput; orangefs_set_inode(inode, ref); inode->i_ino = hash; /* needed for stat etc */ error = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_NEW); if (error) goto out_iput; orangefs_init_iops(inode); inode->i_rdev = dev; if (default_acl) { error = __orangefs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); if (error) goto out_iput; } if (acl) { error = __orangefs_set_acl(inode, acl, ACL_TYPE_ACCESS); if (error) goto out_iput; } error = insert_inode_locked4(inode, hash, orangefs_test_inode, ref); if (error < 0) goto out_iput; gossip_debug(GOSSIP_INODE_DEBUG, "Initializing ACL's for inode %pU\n", get_khandle_from_ino(inode)); if (mode != inode->i_mode) { struct iattr iattr = { .ia_mode = mode, .ia_valid = ATTR_MODE, }; inode->i_mode = mode; __orangefs_setattr(inode, &iattr); __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode); } posix_acl_release(acl); posix_acl_release(default_acl); return inode; out_iput: iput(inode); posix_acl_release(acl); posix_acl_release(default_acl); return ERR_PTR(error); }
linux-master
fs/orangefs/inode.c
// SPDX-License-Identifier: GPL-2.0 /* * (C) 2001 Clemson University and The University of Chicago * * See COPYING in top-level directory. */ /* * Linux VFS namei operations. */ #include "protocol.h" #include "orangefs-kernel.h" /* * Get a newly allocated inode to go with a negative dentry. */ static int orangefs_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, bool exclusive) { struct orangefs_inode_s *parent = ORANGEFS_I(dir); struct orangefs_kernel_op_s *new_op; struct orangefs_object_kref ref; struct inode *inode; struct iattr iattr; int ret; gossip_debug(GOSSIP_NAME_DEBUG, "%s: %pd\n", __func__, dentry); new_op = op_alloc(ORANGEFS_VFS_OP_CREATE); if (!new_op) return -ENOMEM; new_op->upcall.req.create.parent_refn = parent->refn; fill_default_sys_attrs(new_op->upcall.req.create.attributes, ORANGEFS_TYPE_METAFILE, mode); strncpy(new_op->upcall.req.create.d_name, dentry->d_name.name, ORANGEFS_NAME_MAX - 1); ret = service_operation(new_op, __func__, get_interruptible_flag(dir)); gossip_debug(GOSSIP_NAME_DEBUG, "%s: %pd: handle:%pU: fsid:%d: new_op:%p: ret:%d:\n", __func__, dentry, &new_op->downcall.resp.create.refn.khandle, new_op->downcall.resp.create.refn.fs_id, new_op, ret); if (ret < 0) goto out; ref = new_op->downcall.resp.create.refn; inode = orangefs_new_inode(dir->i_sb, dir, S_IFREG | mode, 0, &ref); if (IS_ERR(inode)) { gossip_err("%s: Failed to allocate inode for file :%pd:\n", __func__, dentry); ret = PTR_ERR(inode); goto out; } gossip_debug(GOSSIP_NAME_DEBUG, "%s: Assigned inode :%pU: for file :%pd:\n", __func__, get_khandle_from_ino(inode), dentry); d_instantiate_new(dentry, inode); orangefs_set_timeout(dentry); gossip_debug(GOSSIP_NAME_DEBUG, "%s: dentry instantiated for %pd\n", __func__, dentry); memset(&iattr, 0, sizeof iattr); iattr.ia_valid |= ATTR_MTIME | ATTR_CTIME; iattr.ia_mtime = iattr.ia_ctime = current_time(dir); __orangefs_setattr(dir, &iattr); ret = 0; out: op_release(new_op); gossip_debug(GOSSIP_NAME_DEBUG, "%s: %pd: returning %d\n", __func__, dentry, ret); return ret; } /* * Attempt to resolve an object name (dentry->d_name), parent handle, and * fsid into a handle for the object. */ static struct dentry *orangefs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct orangefs_inode_s *parent = ORANGEFS_I(dir); struct orangefs_kernel_op_s *new_op; struct inode *inode; int ret = -EINVAL; /* * in theory we could skip a lookup here (if the intent is to * create) in order to avoid a potentially failed lookup, but * leaving it in can skip a valid lookup and try to create a file * that already exists (e.g. the vfs already handles checking for * -EEXIST on O_EXCL opens, which is broken if we skip this lookup * in the create path) */ gossip_debug(GOSSIP_NAME_DEBUG, "%s called on %pd\n", __func__, dentry); if (dentry->d_name.len > (ORANGEFS_NAME_MAX - 1)) return ERR_PTR(-ENAMETOOLONG); new_op = op_alloc(ORANGEFS_VFS_OP_LOOKUP); if (!new_op) return ERR_PTR(-ENOMEM); new_op->upcall.req.lookup.sym_follow = ORANGEFS_LOOKUP_LINK_NO_FOLLOW; gossip_debug(GOSSIP_NAME_DEBUG, "%s:%s:%d using parent %pU\n", __FILE__, __func__, __LINE__, &parent->refn.khandle); new_op->upcall.req.lookup.parent_refn = parent->refn; strncpy(new_op->upcall.req.lookup.d_name, dentry->d_name.name, ORANGEFS_NAME_MAX - 1); gossip_debug(GOSSIP_NAME_DEBUG, "%s: doing lookup on %s under %pU,%d\n", __func__, new_op->upcall.req.lookup.d_name, &new_op->upcall.req.lookup.parent_refn.khandle, new_op->upcall.req.lookup.parent_refn.fs_id); ret = service_operation(new_op, __func__, get_interruptible_flag(dir)); gossip_debug(GOSSIP_NAME_DEBUG, "Lookup Got %pU, fsid %d (ret=%d)\n", &new_op->downcall.resp.lookup.refn.khandle, new_op->downcall.resp.lookup.refn.fs_id, ret); if (ret == 0) { orangefs_set_timeout(dentry); inode = orangefs_iget(dir->i_sb, &new_op->downcall.resp.lookup.refn); } else if (ret == -ENOENT) { inode = NULL; } else { /* must be a non-recoverable error */ inode = ERR_PTR(ret); } op_release(new_op); return d_splice_alias(inode, dentry); } /* return 0 on success; non-zero otherwise */ static int orangefs_unlink(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; struct orangefs_inode_s *parent = ORANGEFS_I(dir); struct orangefs_kernel_op_s *new_op; struct iattr iattr; int ret; gossip_debug(GOSSIP_NAME_DEBUG, "%s: called on %pd\n" " (inode %pU): Parent is %pU | fs_id %d\n", __func__, dentry, get_khandle_from_ino(inode), &parent->refn.khandle, parent->refn.fs_id); new_op = op_alloc(ORANGEFS_VFS_OP_REMOVE); if (!new_op) return -ENOMEM; new_op->upcall.req.remove.parent_refn = parent->refn; strncpy(new_op->upcall.req.remove.d_name, dentry->d_name.name, ORANGEFS_NAME_MAX - 1); ret = service_operation(new_op, "orangefs_unlink", get_interruptible_flag(inode)); gossip_debug(GOSSIP_NAME_DEBUG, "%s: service_operation returned:%d:\n", __func__, ret); op_release(new_op); if (!ret) { drop_nlink(inode); memset(&iattr, 0, sizeof iattr); iattr.ia_valid |= ATTR_MTIME | ATTR_CTIME; iattr.ia_mtime = iattr.ia_ctime = current_time(dir); __orangefs_setattr(dir, &iattr); } return ret; } static int orangefs_symlink(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, const char *symname) { struct orangefs_inode_s *parent = ORANGEFS_I(dir); struct orangefs_kernel_op_s *new_op; struct orangefs_object_kref ref; struct inode *inode; struct iattr iattr; int mode = 0755; int ret; gossip_debug(GOSSIP_NAME_DEBUG, "%s: called\n", __func__); if (!symname) return -EINVAL; if (strlen(symname)+1 > ORANGEFS_NAME_MAX) return -ENAMETOOLONG; new_op = op_alloc(ORANGEFS_VFS_OP_SYMLINK); if (!new_op) return -ENOMEM; new_op->upcall.req.sym.parent_refn = parent->refn; fill_default_sys_attrs(new_op->upcall.req.sym.attributes, ORANGEFS_TYPE_SYMLINK, mode); strncpy(new_op->upcall.req.sym.entry_name, dentry->d_name.name, ORANGEFS_NAME_MAX - 1); strncpy(new_op->upcall.req.sym.target, symname, ORANGEFS_NAME_MAX - 1); ret = service_operation(new_op, __func__, get_interruptible_flag(dir)); gossip_debug(GOSSIP_NAME_DEBUG, "Symlink Got ORANGEFS handle %pU on fsid %d (ret=%d)\n", &new_op->downcall.resp.sym.refn.khandle, new_op->downcall.resp.sym.refn.fs_id, ret); if (ret < 0) { gossip_debug(GOSSIP_NAME_DEBUG, "%s: failed with error code %d\n", __func__, ret); goto out; } ref = new_op->downcall.resp.sym.refn; inode = orangefs_new_inode(dir->i_sb, dir, S_IFLNK | mode, 0, &ref); if (IS_ERR(inode)) { gossip_err ("*** Failed to allocate orangefs symlink inode\n"); ret = PTR_ERR(inode); goto out; } /* * This is necessary because orangefs_inode_getattr will not * re-read symlink size as it is impossible for it to change. * Invalidating the cache does not help. orangefs_new_inode * does not set the correct size (it does not know symname). */ inode->i_size = strlen(symname); gossip_debug(GOSSIP_NAME_DEBUG, "Assigned symlink inode new number of %pU\n", get_khandle_from_ino(inode)); d_instantiate_new(dentry, inode); orangefs_set_timeout(dentry); gossip_debug(GOSSIP_NAME_DEBUG, "Inode (Symlink) %pU -> %pd\n", get_khandle_from_ino(inode), dentry); memset(&iattr, 0, sizeof iattr); iattr.ia_valid |= ATTR_MTIME | ATTR_CTIME; iattr.ia_mtime = iattr.ia_ctime = current_time(dir); __orangefs_setattr(dir, &iattr); ret = 0; out: op_release(new_op); return ret; } static int orangefs_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) { struct orangefs_inode_s *parent = ORANGEFS_I(dir); struct orangefs_kernel_op_s *new_op; struct orangefs_object_kref ref; struct inode *inode; struct iattr iattr; int ret; new_op = op_alloc(ORANGEFS_VFS_OP_MKDIR); if (!new_op) return -ENOMEM; new_op->upcall.req.mkdir.parent_refn = parent->refn; fill_default_sys_attrs(new_op->upcall.req.mkdir.attributes, ORANGEFS_TYPE_DIRECTORY, mode); strncpy(new_op->upcall.req.mkdir.d_name, dentry->d_name.name, ORANGEFS_NAME_MAX - 1); ret = service_operation(new_op, __func__, get_interruptible_flag(dir)); gossip_debug(GOSSIP_NAME_DEBUG, "Mkdir Got ORANGEFS handle %pU on fsid %d\n", &new_op->downcall.resp.mkdir.refn.khandle, new_op->downcall.resp.mkdir.refn.fs_id); if (ret < 0) { gossip_debug(GOSSIP_NAME_DEBUG, "%s: failed with error code %d\n", __func__, ret); goto out; } ref = new_op->downcall.resp.mkdir.refn; inode = orangefs_new_inode(dir->i_sb, dir, S_IFDIR | mode, 0, &ref); if (IS_ERR(inode)) { gossip_err("*** Failed to allocate orangefs dir inode\n"); ret = PTR_ERR(inode); goto out; } gossip_debug(GOSSIP_NAME_DEBUG, "Assigned dir inode new number of %pU\n", get_khandle_from_ino(inode)); d_instantiate_new(dentry, inode); orangefs_set_timeout(dentry); gossip_debug(GOSSIP_NAME_DEBUG, "Inode (Directory) %pU -> %pd\n", get_khandle_from_ino(inode), dentry); /* * NOTE: we have no good way to keep nlink consistent for directories * across clients; keep constant at 1. */ memset(&iattr, 0, sizeof iattr); iattr.ia_valid |= ATTR_MTIME | ATTR_CTIME; iattr.ia_mtime = iattr.ia_ctime = current_time(dir); __orangefs_setattr(dir, &iattr); out: op_release(new_op); return ret; } static int orangefs_rename(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { struct orangefs_kernel_op_s *new_op; struct iattr iattr; int ret; if (flags) return -EINVAL; gossip_debug(GOSSIP_NAME_DEBUG, "orangefs_rename: called (%pd2 => %pd2) ct=%d\n", old_dentry, new_dentry, d_count(new_dentry)); memset(&iattr, 0, sizeof iattr); iattr.ia_valid |= ATTR_MTIME | ATTR_CTIME; iattr.ia_mtime = iattr.ia_ctime = current_time(new_dir); __orangefs_setattr(new_dir, &iattr); new_op = op_alloc(ORANGEFS_VFS_OP_RENAME); if (!new_op) return -EINVAL; new_op->upcall.req.rename.old_parent_refn = ORANGEFS_I(old_dir)->refn; new_op->upcall.req.rename.new_parent_refn = ORANGEFS_I(new_dir)->refn; strncpy(new_op->upcall.req.rename.d_old_name, old_dentry->d_name.name, ORANGEFS_NAME_MAX - 1); strncpy(new_op->upcall.req.rename.d_new_name, new_dentry->d_name.name, ORANGEFS_NAME_MAX - 1); ret = service_operation(new_op, "orangefs_rename", get_interruptible_flag(old_dentry->d_inode)); gossip_debug(GOSSIP_NAME_DEBUG, "orangefs_rename: got downcall status %d\n", ret); if (new_dentry->d_inode) inode_set_ctime_current(d_inode(new_dentry)); op_release(new_op); return ret; } /* ORANGEFS implementation of VFS inode operations for directories */ const struct inode_operations orangefs_dir_inode_operations = { .lookup = orangefs_lookup, .get_inode_acl = orangefs_get_acl, .set_acl = orangefs_set_acl, .create = orangefs_create, .unlink = orangefs_unlink, .symlink = orangefs_symlink, .mkdir = orangefs_mkdir, .rmdir = orangefs_unlink, .rename = orangefs_rename, .setattr = orangefs_setattr, .getattr = orangefs_getattr, .listxattr = orangefs_listxattr, .permission = orangefs_permission, .update_time = orangefs_update_time, };
linux-master
fs/orangefs/namei.c
// SPDX-License-Identifier: GPL-2.0-only /* * (C) 2001 Clemson University and The University of Chicago * * Changes by Acxiom Corporation to add proc file handler for pvfs2 client * parameters, Copyright Acxiom Corporation, 2005. * * See COPYING in top-level directory. */ #include "protocol.h" #include "orangefs-kernel.h" #include "orangefs-debugfs.h" #include "orangefs-sysfs.h" /* ORANGEFS_VERSION is a ./configure define */ #ifndef ORANGEFS_VERSION #define ORANGEFS_VERSION "upstream" #endif /* * global variables declared here */ struct orangefs_stats orangefs_stats; /* the size of the hash tables for ops in progress */ int hash_table_size = 509; static ulong module_parm_debug_mask; __u64 orangefs_gossip_debug_mask; int op_timeout_secs = ORANGEFS_DEFAULT_OP_TIMEOUT_SECS; int slot_timeout_secs = ORANGEFS_DEFAULT_SLOT_TIMEOUT_SECS; int orangefs_cache_timeout_msecs = 500; int orangefs_dcache_timeout_msecs = 50; int orangefs_getattr_timeout_msecs = 50; MODULE_LICENSE("GPL"); MODULE_AUTHOR("ORANGEFS Development Team"); MODULE_DESCRIPTION("The Linux Kernel VFS interface to ORANGEFS"); MODULE_PARM_DESC(module_parm_debug_mask, "debugging level (see orangefs-debug.h for values)"); MODULE_PARM_DESC(op_timeout_secs, "Operation timeout in seconds"); MODULE_PARM_DESC(slot_timeout_secs, "Slot timeout in seconds"); MODULE_PARM_DESC(hash_table_size, "size of hash table for operations in progress"); static struct file_system_type orangefs_fs_type = { .name = "pvfs2", .mount = orangefs_mount, .kill_sb = orangefs_kill_sb, .owner = THIS_MODULE, }; module_param(hash_table_size, int, 0); module_param(module_parm_debug_mask, ulong, 0644); module_param(op_timeout_secs, int, 0); module_param(slot_timeout_secs, int, 0); /* * Blocks non-priority requests from being queued for servicing. This * could be used for protecting the request list data structure, but * for now it's only being used to stall the op addition to the request * list */ DEFINE_MUTEX(orangefs_request_mutex); /* hash table for storing operations waiting for matching downcall */ struct list_head *orangefs_htable_ops_in_progress; DEFINE_SPINLOCK(orangefs_htable_ops_in_progress_lock); /* list for queueing upcall operations */ LIST_HEAD(orangefs_request_list); /* used to protect the above orangefs_request_list */ DEFINE_SPINLOCK(orangefs_request_list_lock); /* used for incoming request notification */ DECLARE_WAIT_QUEUE_HEAD(orangefs_request_list_waitq); static int __init orangefs_init(void) { int ret; __u32 i = 0; if (op_timeout_secs < 0) op_timeout_secs = 0; if (slot_timeout_secs < 0) slot_timeout_secs = 0; /* initialize global book keeping data structures */ ret = op_cache_initialize(); if (ret < 0) goto out; ret = orangefs_inode_cache_initialize(); if (ret < 0) goto cleanup_op; orangefs_htable_ops_in_progress = kcalloc(hash_table_size, sizeof(struct list_head), GFP_KERNEL); if (!orangefs_htable_ops_in_progress) { ret = -ENOMEM; goto cleanup_inode; } /* initialize a doubly linked at each hash table index */ for (i = 0; i < hash_table_size; i++) INIT_LIST_HEAD(&orangefs_htable_ops_in_progress[i]); ret = fsid_key_table_initialize(); if (ret < 0) goto cleanup_progress_table; /* * Build the contents of /sys/kernel/debug/orangefs/debug-help * from the keywords in the kernel keyword/mask array. * * The keywords in the client keyword/mask array are * unknown at boot time. * * orangefs_prepare_debugfs_help_string will be used again * later to rebuild the debug-help-string after the client starts * and passes along the needed info. The argument signifies * which time orangefs_prepare_debugfs_help_string is being * called. */ ret = orangefs_prepare_debugfs_help_string(1); if (ret) goto cleanup_key_table; orangefs_debugfs_init(module_parm_debug_mask); ret = orangefs_sysfs_init(); if (ret) goto sysfs_init_failed; /* Initialize the orangefsdev subsystem. */ ret = orangefs_dev_init(); if (ret < 0) { gossip_err("%s: could not initialize device subsystem %d!\n", __func__, ret); goto cleanup_sysfs; } ret = register_filesystem(&orangefs_fs_type); if (ret == 0) { pr_info("%s: module version %s loaded\n", __func__, ORANGEFS_VERSION); goto out; } orangefs_dev_cleanup(); cleanup_sysfs: orangefs_sysfs_exit(); sysfs_init_failed: orangefs_debugfs_cleanup(); cleanup_key_table: fsid_key_table_finalize(); cleanup_progress_table: kfree(orangefs_htable_ops_in_progress); cleanup_inode: orangefs_inode_cache_finalize(); cleanup_op: op_cache_finalize(); out: return ret; } static void __exit orangefs_exit(void) { int i = 0; gossip_debug(GOSSIP_INIT_DEBUG, "orangefs: orangefs_exit called\n"); unregister_filesystem(&orangefs_fs_type); orangefs_debugfs_cleanup(); orangefs_sysfs_exit(); fsid_key_table_finalize(); orangefs_dev_cleanup(); BUG_ON(!list_empty(&orangefs_request_list)); for (i = 0; i < hash_table_size; i++) BUG_ON(!list_empty(&orangefs_htable_ops_in_progress[i])); orangefs_inode_cache_finalize(); op_cache_finalize(); kfree(orangefs_htable_ops_in_progress); pr_info("orangefs: module version %s unloaded\n", ORANGEFS_VERSION); } /* * What we do in this function is to walk the list of operations * that are in progress in the hash table and mark them as purged as well. */ void purge_inprogress_ops(void) { int i; for (i = 0; i < hash_table_size; i++) { struct orangefs_kernel_op_s *op; struct orangefs_kernel_op_s *next; spin_lock(&orangefs_htable_ops_in_progress_lock); list_for_each_entry_safe(op, next, &orangefs_htable_ops_in_progress[i], list) { set_op_state_purged(op); gossip_debug(GOSSIP_DEV_DEBUG, "%s: op:%s: op_state:%d: process:%s:\n", __func__, get_opname_string(op), op->op_state, current->comm); } spin_unlock(&orangefs_htable_ops_in_progress_lock); } } module_init(orangefs_init); module_exit(orangefs_exit);
linux-master
fs/orangefs/orangefs-mod.c
// SPDX-License-Identifier: GPL-2.0 /* * (C) 2001 Clemson University and The University of Chicago * * See COPYING in top-level directory. */ #include "protocol.h" #include "orangefs-kernel.h" #include "orangefs-bufmap.h" #include <linux/posix_acl_xattr.h> struct posix_acl *orangefs_get_acl(struct inode *inode, int type, bool rcu) { struct posix_acl *acl; int ret; char *key = NULL, *value = NULL; if (rcu) return ERR_PTR(-ECHILD); switch (type) { case ACL_TYPE_ACCESS: key = XATTR_NAME_POSIX_ACL_ACCESS; break; case ACL_TYPE_DEFAULT: key = XATTR_NAME_POSIX_ACL_DEFAULT; break; default: gossip_err("orangefs_get_acl: bogus value of type %d\n", type); return ERR_PTR(-EINVAL); } /* * Rather than incurring a network call just to determine the exact * length of the attribute, I just allocate a max length to save on * the network call. Conceivably, we could pass NULL to * orangefs_inode_getxattr() to probe the length of the value, but * I don't do that for now. */ value = kmalloc(ORANGEFS_MAX_XATTR_VALUELEN, GFP_KERNEL); if (!value) return ERR_PTR(-ENOMEM); gossip_debug(GOSSIP_ACL_DEBUG, "inode %pU, key %s, type %d\n", get_khandle_from_ino(inode), key, type); ret = orangefs_inode_getxattr(inode, key, value, ORANGEFS_MAX_XATTR_VALUELEN); /* if the key exists, convert it to an in-memory rep */ if (ret > 0) { acl = posix_acl_from_xattr(&init_user_ns, value, ret); } else if (ret == -ENODATA || ret == -ENOSYS) { acl = NULL; } else { gossip_err("inode %pU retrieving acl's failed with error %d\n", get_khandle_from_ino(inode), ret); acl = ERR_PTR(ret); } /* kfree(NULL) is safe, so don't worry if value ever got used */ kfree(value); return acl; } int __orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type) { int error = 0; void *value = NULL; size_t size = 0; const char *name = NULL; switch (type) { case ACL_TYPE_ACCESS: name = XATTR_NAME_POSIX_ACL_ACCESS; break; case ACL_TYPE_DEFAULT: name = XATTR_NAME_POSIX_ACL_DEFAULT; break; default: gossip_err("%s: invalid type %d!\n", __func__, type); return -EINVAL; } gossip_debug(GOSSIP_ACL_DEBUG, "%s: inode %pU, key %s type %d\n", __func__, get_khandle_from_ino(inode), name, type); if (acl) { size = posix_acl_xattr_size(acl->a_count); value = kmalloc(size, GFP_KERNEL); if (!value) return -ENOMEM; error = posix_acl_to_xattr(&init_user_ns, acl, value, size); if (error < 0) goto out; } gossip_debug(GOSSIP_ACL_DEBUG, "%s: name %s, value %p, size %zd, acl %p\n", __func__, name, value, size, acl); /* * Go ahead and set the extended attribute now. NOTE: Suppose acl * was NULL, then value will be NULL and size will be 0 and that * will xlate to a removexattr. However, we don't want removexattr * complain if attributes does not exist. */ error = orangefs_inode_setxattr(inode, name, value, size, 0); out: kfree(value); if (!error) set_cached_acl(inode, type, acl); return error; } int orangefs_set_acl(struct mnt_idmap *idmap, struct dentry *dentry, struct posix_acl *acl, int type) { int error; struct iattr iattr; int rc; struct inode *inode = d_inode(dentry); memset(&iattr, 0, sizeof iattr); if (type == ACL_TYPE_ACCESS && acl) { /* * posix_acl_update_mode checks to see if the permissions * described by the ACL can be encoded into the * object's mode. If so, it sets "acl" to NULL * and "mode" to the new desired value. It is up to * us to propagate the new mode back to the server... */ error = posix_acl_update_mode(&nop_mnt_idmap, inode, &iattr.ia_mode, &acl); if (error) { gossip_err("%s: posix_acl_update_mode err: %d\n", __func__, error); return error; } if (inode->i_mode != iattr.ia_mode) iattr.ia_valid = ATTR_MODE; } rc = __orangefs_set_acl(inode, acl, type); if (!rc && (iattr.ia_valid == ATTR_MODE)) rc = __orangefs_setattr_mode(dentry, &iattr); return rc; }
linux-master
fs/orangefs/acl.c
// SPDX-License-Identifier: GPL-2.0 /* * (C) 2001 Clemson University and The University of Chicago * * See COPYING in top-level directory. */ #include "protocol.h" #include "orangefs-kernel.h" #include "orangefs-bufmap.h" const struct inode_operations orangefs_symlink_inode_operations = { .get_link = simple_get_link, .setattr = orangefs_setattr, .getattr = orangefs_getattr, .listxattr = orangefs_listxattr, .permission = orangefs_permission, .update_time = orangefs_update_time, };
linux-master
fs/orangefs/symlink.c
// SPDX-License-Identifier: GPL-2.0 /* * (C) 2001 Clemson University and The University of Chicago * Copyright 2018 Omnibond Systems, L.L.C. * * See COPYING in top-level directory. */ /* * Linux VFS file operations. */ #include "protocol.h" #include "orangefs-kernel.h" #include "orangefs-bufmap.h" #include <linux/fs.h> #include <linux/filelock.h> #include <linux/pagemap.h> static int flush_racache(struct inode *inode) { struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); struct orangefs_kernel_op_s *new_op; int ret; gossip_debug(GOSSIP_UTILS_DEBUG, "%s: %pU: Handle is %pU | fs_id %d\n", __func__, get_khandle_from_ino(inode), &orangefs_inode->refn.khandle, orangefs_inode->refn.fs_id); new_op = op_alloc(ORANGEFS_VFS_OP_RA_FLUSH); if (!new_op) return -ENOMEM; new_op->upcall.req.ra_cache_flush.refn = orangefs_inode->refn; ret = service_operation(new_op, "orangefs_flush_racache", get_interruptible_flag(inode)); gossip_debug(GOSSIP_UTILS_DEBUG, "%s: got return value of %d\n", __func__, ret); op_release(new_op); return ret; } /* * Post and wait for the I/O upcall to finish */ ssize_t wait_for_direct_io(enum ORANGEFS_io_type type, struct inode *inode, loff_t *offset, struct iov_iter *iter, size_t total_size, loff_t readahead_size, struct orangefs_write_range *wr, int *index_return, struct file *file) { struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); struct orangefs_khandle *handle = &orangefs_inode->refn.khandle; struct orangefs_kernel_op_s *new_op = NULL; int buffer_index; ssize_t ret; size_t copy_amount; int open_for_read; int open_for_write; new_op = op_alloc(ORANGEFS_VFS_OP_FILE_IO); if (!new_op) return -ENOMEM; /* synchronous I/O */ new_op->upcall.req.io.readahead_size = readahead_size; new_op->upcall.req.io.io_type = type; new_op->upcall.req.io.refn = orangefs_inode->refn; populate_shared_memory: /* get a shared buffer index */ buffer_index = orangefs_bufmap_get(); if (buffer_index < 0) { ret = buffer_index; gossip_debug(GOSSIP_FILE_DEBUG, "%s: orangefs_bufmap_get failure (%zd)\n", __func__, ret); goto out; } gossip_debug(GOSSIP_FILE_DEBUG, "%s(%pU): GET op %p -> buffer_index %d\n", __func__, handle, new_op, buffer_index); new_op->uses_shared_memory = 1; new_op->upcall.req.io.buf_index = buffer_index; new_op->upcall.req.io.count = total_size; new_op->upcall.req.io.offset = *offset; if (type == ORANGEFS_IO_WRITE && wr) { new_op->upcall.uid = from_kuid(&init_user_ns, wr->uid); new_op->upcall.gid = from_kgid(&init_user_ns, wr->gid); } /* * Orangefs has no open, and orangefs checks file permissions * on each file access. Posix requires that file permissions * be checked on open and nowhere else. Orangefs-through-the-kernel * needs to seem posix compliant. * * The VFS opens files, even if the filesystem provides no * method. We can see if a file was successfully opened for * read and or for write by looking at file->f_mode. * * When writes are flowing from the page cache, file is no * longer available. We can trust the VFS to have checked * file->f_mode before writing to the page cache. * * The mode of a file might change between when it is opened * and IO commences, or it might be created with an arbitrary mode. * * We'll make sure we don't hit EACCES during the IO stage by * using UID 0. Some of the time we have access without changing * to UID 0 - how to check? */ if (file) { open_for_write = file->f_mode & FMODE_WRITE; open_for_read = file->f_mode & FMODE_READ; } else { open_for_write = 1; open_for_read = 0; /* not relevant? */ } if ((type == ORANGEFS_IO_WRITE) && open_for_write) new_op->upcall.uid = 0; if ((type == ORANGEFS_IO_READ) && open_for_read) new_op->upcall.uid = 0; gossip_debug(GOSSIP_FILE_DEBUG, "%s(%pU): offset: %llu total_size: %zd\n", __func__, handle, llu(*offset), total_size); /* * Stage 1: copy the buffers into client-core's address space */ if (type == ORANGEFS_IO_WRITE && total_size) { ret = orangefs_bufmap_copy_from_iovec(iter, buffer_index, total_size); if (ret < 0) { gossip_err("%s: Failed to copy-in buffers. Please make sure that the pvfs2-client is running. %ld\n", __func__, (long)ret); goto out; } } gossip_debug(GOSSIP_FILE_DEBUG, "%s(%pU): Calling post_io_request with tag (%llu)\n", __func__, handle, llu(new_op->tag)); /* Stage 2: Service the I/O operation */ ret = service_operation(new_op, type == ORANGEFS_IO_WRITE ? "file_write" : "file_read", get_interruptible_flag(inode)); /* * If service_operation() returns -EAGAIN #and# the operation was * purged from orangefs_request_list or htable_ops_in_progress, then * we know that the client was restarted, causing the shared memory * area to be wiped clean. To restart a write operation in this * case, we must re-copy the data from the user's iovec to a NEW * shared memory location. To restart a read operation, we must get * a new shared memory location. */ if (ret == -EAGAIN && op_state_purged(new_op)) { orangefs_bufmap_put(buffer_index); if (type == ORANGEFS_IO_WRITE) iov_iter_revert(iter, total_size); gossip_debug(GOSSIP_FILE_DEBUG, "%s:going to repopulate_shared_memory.\n", __func__); goto populate_shared_memory; } if (ret < 0) { if (ret == -EINTR) { /* * We can't return EINTR if any data was written, * it's not POSIX. It is minimally acceptable * to give a partial write, the way NFS does. * * It would be optimal to return all or nothing, * but if a userspace write is bigger than * an IO buffer, and the interrupt occurs * between buffer writes, that would not be * possible. */ switch (new_op->op_state - OP_VFS_STATE_GIVEN_UP) { /* * If the op was waiting when the interrupt * occurred, then the client-core did not * trigger the write. */ case OP_VFS_STATE_WAITING: if (*offset == 0) ret = -EINTR; else ret = 0; break; /* * If the op was in progress when the interrupt * occurred, then the client-core was able to * trigger the write. */ case OP_VFS_STATE_INPROGR: if (type == ORANGEFS_IO_READ) ret = -EINTR; else ret = total_size; break; default: gossip_err("%s: unexpected op state :%d:.\n", __func__, new_op->op_state); ret = 0; break; } gossip_debug(GOSSIP_FILE_DEBUG, "%s: got EINTR, state:%d: %p\n", __func__, new_op->op_state, new_op); } else { gossip_err("%s: error in %s handle %pU, returning %zd\n", __func__, type == ORANGEFS_IO_READ ? "read from" : "write to", handle, ret); } if (orangefs_cancel_op_in_progress(new_op)) return ret; goto out; } /* * Stage 3: Post copy buffers from client-core's address space */ if (type == ORANGEFS_IO_READ && new_op->downcall.resp.io.amt_complete) { /* * NOTE: the iovector can either contain addresses which * can futher be kernel-space or user-space addresses. * or it can pointers to struct page's */ copy_amount = new_op->downcall.resp.io.amt_complete; ret = orangefs_bufmap_copy_to_iovec(iter, buffer_index, copy_amount); if (ret < 0) { gossip_err("%s: Failed to copy-out buffers. Please make sure that the pvfs2-client is running (%ld)\n", __func__, (long)ret); goto out; } } gossip_debug(GOSSIP_FILE_DEBUG, "%s(%pU): Amount %s, returned by the sys-io call:%d\n", __func__, handle, type == ORANGEFS_IO_READ ? "read" : "written", (int)new_op->downcall.resp.io.amt_complete); ret = new_op->downcall.resp.io.amt_complete; out: if (buffer_index >= 0) { orangefs_bufmap_put(buffer_index); gossip_debug(GOSSIP_FILE_DEBUG, "%s(%pU): PUT buffer_index %d\n", __func__, handle, buffer_index); } op_release(new_op); return ret; } int orangefs_revalidate_mapping(struct inode *inode) { struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); struct address_space *mapping = inode->i_mapping; unsigned long *bitlock = &orangefs_inode->bitlock; int ret; while (1) { ret = wait_on_bit(bitlock, 1, TASK_KILLABLE); if (ret) return ret; spin_lock(&inode->i_lock); if (test_bit(1, bitlock)) { spin_unlock(&inode->i_lock); continue; } if (!time_before(jiffies, orangefs_inode->mapping_time)) break; spin_unlock(&inode->i_lock); return 0; } set_bit(1, bitlock); smp_wmb(); spin_unlock(&inode->i_lock); unmap_mapping_range(mapping, 0, 0, 0); ret = filemap_write_and_wait(mapping); if (!ret) ret = invalidate_inode_pages2(mapping); orangefs_inode->mapping_time = jiffies + orangefs_cache_timeout_msecs*HZ/1000; clear_bit(1, bitlock); smp_mb__after_atomic(); wake_up_bit(bitlock, 1); return ret; } static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) { int ret; orangefs_stats.reads++; down_read(&file_inode(iocb->ki_filp)->i_rwsem); ret = orangefs_revalidate_mapping(file_inode(iocb->ki_filp)); if (ret) goto out; ret = generic_file_read_iter(iocb, iter); out: up_read(&file_inode(iocb->ki_filp)->i_rwsem); return ret; } static ssize_t orangefs_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct inode *inode = file_inode(in); ssize_t ret; orangefs_stats.reads++; down_read(&inode->i_rwsem); ret = orangefs_revalidate_mapping(inode); if (ret) goto out; ret = filemap_splice_read(in, ppos, pipe, len, flags); out: up_read(&inode->i_rwsem); return ret; } static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *iter) { int ret; orangefs_stats.writes++; if (iocb->ki_pos > i_size_read(file_inode(iocb->ki_filp))) { ret = orangefs_revalidate_mapping(file_inode(iocb->ki_filp)); if (ret) return ret; } ret = generic_file_write_iter(iocb, iter); return ret; } static vm_fault_t orangefs_fault(struct vm_fault *vmf) { struct file *file = vmf->vma->vm_file; int ret; ret = orangefs_inode_getattr(file->f_mapping->host, ORANGEFS_GETATTR_SIZE); if (ret == -ESTALE) ret = -EIO; if (ret) { gossip_err("%s: orangefs_inode_getattr failed, " "ret:%d:.\n", __func__, ret); return VM_FAULT_SIGBUS; } return filemap_fault(vmf); } static const struct vm_operations_struct orangefs_file_vm_ops = { .fault = orangefs_fault, .map_pages = filemap_map_pages, .page_mkwrite = orangefs_page_mkwrite, }; /* * Memory map a region of a file. */ static int orangefs_file_mmap(struct file *file, struct vm_area_struct *vma) { int ret; ret = orangefs_revalidate_mapping(file_inode(file)); if (ret) return ret; gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_mmap: called on %pD\n", file); /* set the sequential readahead hint */ vm_flags_mod(vma, VM_SEQ_READ, VM_RAND_READ); file_accessed(file); vma->vm_ops = &orangefs_file_vm_ops; return 0; } #define mapping_nrpages(idata) ((idata)->nrpages) /* * Called to notify the module that there are no more references to * this file (i.e. no processes have it open). * * \note Not called when each file is closed. */ static int orangefs_file_release(struct inode *inode, struct file *file) { gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_release: called on %pD\n", file); /* * remove all associated inode pages from the page cache and * readahead cache (if any); this forces an expensive refresh of * data for the next caller of mmap (or 'get_block' accesses) */ if (mapping_nrpages(file->f_mapping)) { if (orangefs_features & ORANGEFS_FEATURE_READAHEAD) { gossip_debug(GOSSIP_INODE_DEBUG, "calling flush_racache on %pU\n", get_khandle_from_ino(inode)); flush_racache(inode); gossip_debug(GOSSIP_INODE_DEBUG, "flush_racache finished\n"); } } return 0; } /* * Push all data for a specific file onto permanent storage. */ static int orangefs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { int ret; struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(file_inode(file)); struct orangefs_kernel_op_s *new_op = NULL; ret = filemap_write_and_wait_range(file_inode(file)->i_mapping, start, end); if (ret < 0) return ret; new_op = op_alloc(ORANGEFS_VFS_OP_FSYNC); if (!new_op) return -ENOMEM; new_op->upcall.req.fsync.refn = orangefs_inode->refn; ret = service_operation(new_op, "orangefs_fsync", get_interruptible_flag(file_inode(file))); gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_fsync got return value of %d\n", ret); op_release(new_op); return ret; } /* * Change the file pointer position for an instance of an open file. * * \note If .llseek is overriden, we must acquire lock as described in * Documentation/filesystems/locking.rst. * * Future upgrade could support SEEK_DATA and SEEK_HOLE but would * require much changes to the FS */ static loff_t orangefs_file_llseek(struct file *file, loff_t offset, int origin) { int ret = -EINVAL; struct inode *inode = file_inode(file); if (origin == SEEK_END) { /* * revalidate the inode's file size. * NOTE: We are only interested in file size here, * so we set mask accordingly. */ ret = orangefs_inode_getattr(file->f_mapping->host, ORANGEFS_GETATTR_SIZE); if (ret == -ESTALE) ret = -EIO; if (ret) { gossip_debug(GOSSIP_FILE_DEBUG, "%s:%s:%d calling make bad inode\n", __FILE__, __func__, __LINE__); return ret; } } gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_llseek: offset is %ld | origin is %d" " | inode size is %lu\n", (long)offset, origin, (unsigned long)i_size_read(inode)); return generic_file_llseek(file, offset, origin); } /* * Support local locks (locks that only this kernel knows about) * if Orangefs was mounted -o local_lock. */ static int orangefs_lock(struct file *filp, int cmd, struct file_lock *fl) { int rc = -EINVAL; if (ORANGEFS_SB(file_inode(filp)->i_sb)->flags & ORANGEFS_OPT_LOCAL_LOCK) { if (cmd == F_GETLK) { rc = 0; posix_test_lock(filp, fl); } else { rc = posix_lock_file(filp, fl, NULL); } } return rc; } static int orangefs_flush(struct file *file, fl_owner_t id) { /* * This is vfs_fsync_range(file, 0, LLONG_MAX, 0) without the * service_operation in orangefs_fsync. * * Do not send fsync to OrangeFS server on a close. Do send fsync * on an explicit fsync call. This duplicates historical OrangeFS * behavior. */ int r; r = filemap_write_and_wait_range(file->f_mapping, 0, LLONG_MAX); if (r > 0) return 0; else return r; } /** ORANGEFS implementation of VFS file operations */ const struct file_operations orangefs_file_operations = { .llseek = orangefs_file_llseek, .read_iter = orangefs_file_read_iter, .write_iter = orangefs_file_write_iter, .lock = orangefs_lock, .mmap = orangefs_file_mmap, .open = generic_file_open, .splice_read = orangefs_file_splice_read, .splice_write = iter_file_splice_write, .flush = orangefs_flush, .release = orangefs_file_release, .fsync = orangefs_fsync, };
linux-master
fs/orangefs/file.c
// SPDX-License-Identifier: GPL-2.0 /* * Documentation/ABI/stable/sysfs-fs-orangefs: * * What: /sys/fs/orangefs/perf_counter_reset * Date: June 2015 * Contact: Mike Marshall <[email protected]> * Description: * echo a 0 or a 1 into perf_counter_reset to * reset all the counters in * /sys/fs/orangefs/perf_counters * except ones with PINT_PERF_PRESERVE set. * * * What: /sys/fs/orangefs/perf_counters/... * Date: Jun 2015 * Contact: Mike Marshall <[email protected]> * Description: * Counters and settings for various caches. * Read only. * * * What: /sys/fs/orangefs/perf_time_interval_secs * Date: Jun 2015 * Contact: Mike Marshall <[email protected]> * Description: * Length of perf counter intervals in * seconds. * * * What: /sys/fs/orangefs/perf_history_size * Date: Jun 2015 * Contact: Mike Marshall <[email protected]> * Description: * The perf_counters cache statistics have N, or * perf_history_size, samples. The default is * one. * * Every perf_time_interval_secs the (first) * samples are reset. * * If N is greater than one, the "current" set * of samples is reset, and the samples from the * other N-1 intervals remain available. * * * What: /sys/fs/orangefs/op_timeout_secs * Date: Jun 2015 * Contact: Mike Marshall <[email protected]> * Description: * Service operation timeout in seconds. * * * What: /sys/fs/orangefs/slot_timeout_secs * Date: Jun 2015 * Contact: Mike Marshall <[email protected]> * Description: * "Slot" timeout in seconds. A "slot" * is an indexed buffer in the shared * memory segment used for communication * between the kernel module and userspace. * Slots are requested and waited for, * the wait times out after slot_timeout_secs. * * What: /sys/fs/orangefs/cache_timeout_msecs * Date: Mar 2018 * Contact: Martin Brandenburg <[email protected]> * Description: * Time in milliseconds between which * orangefs_revalidate_mapping will invalidate the page * cache. * * What: /sys/fs/orangefs/dcache_timeout_msecs * Date: Jul 2016 * Contact: Martin Brandenburg <[email protected]> * Description: * Time lookup is valid in milliseconds. * * What: /sys/fs/orangefs/getattr_timeout_msecs * Date: Jul 2016 * Contact: Martin Brandenburg <[email protected]> * Description: * Time getattr is valid in milliseconds. * * What: /sys/fs/orangefs/readahead_count * Date: Aug 2016 * Contact: Martin Brandenburg <[email protected]> * Description: * Readahead cache buffer count. * * What: /sys/fs/orangefs/readahead_size * Date: Aug 2016 * Contact: Martin Brandenburg <[email protected]> * Description: * Readahead cache buffer size. * * What: /sys/fs/orangefs/readahead_count_size * Date: Aug 2016 * Contact: Martin Brandenburg <[email protected]> * Description: * Readahead cache buffer count and size. * * What: /sys/fs/orangefs/readahead_readcnt * Date: Jan 2017 * Contact: Martin Brandenburg <[email protected]> * Description: * Number of buffers (in multiples of readahead_size) * which can be read ahead for a single file at once. * * What: /sys/fs/orangefs/acache/... * Date: Jun 2015 * Contact: Martin Brandenburg <[email protected]> * Description: * Attribute cache configurable settings. * * * What: /sys/fs/orangefs/ncache/... * Date: Jun 2015 * Contact: Mike Marshall <[email protected]> * Description: * Name cache configurable settings. * * * What: /sys/fs/orangefs/capcache/... * Date: Jun 2015 * Contact: Mike Marshall <[email protected]> * Description: * Capability cache configurable settings. * * * What: /sys/fs/orangefs/ccache/... * Date: Jun 2015 * Contact: Mike Marshall <[email protected]> * Description: * Credential cache configurable settings. * */ #include <linux/fs.h> #include <linux/kobject.h> #include <linux/string.h> #include <linux/sysfs.h> #include <linux/module.h> #include <linux/init.h> #include "protocol.h" #include "orangefs-kernel.h" #include "orangefs-sysfs.h" #define ORANGEFS_KOBJ_ID "orangefs" #define ACACHE_KOBJ_ID "acache" #define CAPCACHE_KOBJ_ID "capcache" #define CCACHE_KOBJ_ID "ccache" #define NCACHE_KOBJ_ID "ncache" #define PC_KOBJ_ID "pc" #define STATS_KOBJ_ID "stats" /* * Every item calls orangefs_attr_show and orangefs_attr_store through * orangefs_sysfs_ops. They look at the orangefs_attributes further below to * call one of sysfs_int_show, sysfs_int_store, sysfs_service_op_show, or * sysfs_service_op_store. */ struct orangefs_attribute { struct attribute attr; ssize_t (*show)(struct kobject *kobj, struct orangefs_attribute *attr, char *buf); ssize_t (*store)(struct kobject *kobj, struct orangefs_attribute *attr, const char *buf, size_t count); }; static ssize_t orangefs_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct orangefs_attribute *attribute; attribute = container_of(attr, struct orangefs_attribute, attr); if (!attribute->show) return -EIO; return attribute->show(kobj, attribute, buf); } static ssize_t orangefs_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t len) { struct orangefs_attribute *attribute; if (!strcmp(kobj->name, PC_KOBJ_ID) || !strcmp(kobj->name, STATS_KOBJ_ID)) return -EPERM; attribute = container_of(attr, struct orangefs_attribute, attr); if (!attribute->store) return -EIO; return attribute->store(kobj, attribute, buf, len); } static const struct sysfs_ops orangefs_sysfs_ops = { .show = orangefs_attr_show, .store = orangefs_attr_store, }; static ssize_t sysfs_int_show(struct kobject *kobj, struct orangefs_attribute *attr, char *buf) { int rc = -EIO; gossip_debug(GOSSIP_SYSFS_DEBUG, "sysfs_int_show: id:%s:\n", kobj->name); if (!strcmp(kobj->name, ORANGEFS_KOBJ_ID)) { if (!strcmp(attr->attr.name, "op_timeout_secs")) { rc = scnprintf(buf, PAGE_SIZE, "%d\n", op_timeout_secs); goto out; } else if (!strcmp(attr->attr.name, "slot_timeout_secs")) { rc = scnprintf(buf, PAGE_SIZE, "%d\n", slot_timeout_secs); goto out; } else if (!strcmp(attr->attr.name, "cache_timeout_msecs")) { rc = scnprintf(buf, PAGE_SIZE, "%d\n", orangefs_cache_timeout_msecs); goto out; } else if (!strcmp(attr->attr.name, "dcache_timeout_msecs")) { rc = scnprintf(buf, PAGE_SIZE, "%d\n", orangefs_dcache_timeout_msecs); goto out; } else if (!strcmp(attr->attr.name, "getattr_timeout_msecs")) { rc = scnprintf(buf, PAGE_SIZE, "%d\n", orangefs_getattr_timeout_msecs); goto out; } else { goto out; } } else if (!strcmp(kobj->name, STATS_KOBJ_ID)) { if (!strcmp(attr->attr.name, "reads")) { rc = scnprintf(buf, PAGE_SIZE, "%lu\n", orangefs_stats.reads); goto out; } else if (!strcmp(attr->attr.name, "writes")) { rc = scnprintf(buf, PAGE_SIZE, "%lu\n", orangefs_stats.writes); goto out; } else { goto out; } } out: return rc; } static ssize_t sysfs_int_store(struct kobject *kobj, struct orangefs_attribute *attr, const char *buf, size_t count) { int rc = 0; gossip_debug(GOSSIP_SYSFS_DEBUG, "sysfs_int_store: start attr->attr.name:%s: buf:%s:\n", attr->attr.name, buf); if (!strcmp(attr->attr.name, "op_timeout_secs")) { rc = kstrtoint(buf, 0, &op_timeout_secs); goto out; } else if (!strcmp(attr->attr.name, "slot_timeout_secs")) { rc = kstrtoint(buf, 0, &slot_timeout_secs); goto out; } else if (!strcmp(attr->attr.name, "cache_timeout_msecs")) { rc = kstrtoint(buf, 0, &orangefs_cache_timeout_msecs); goto out; } else if (!strcmp(attr->attr.name, "dcache_timeout_msecs")) { rc = kstrtoint(buf, 0, &orangefs_dcache_timeout_msecs); goto out; } else if (!strcmp(attr->attr.name, "getattr_timeout_msecs")) { rc = kstrtoint(buf, 0, &orangefs_getattr_timeout_msecs); goto out; } else { goto out; } out: if (rc) rc = -EINVAL; else rc = count; return rc; } /* * obtain attribute values from userspace with a service operation. */ static ssize_t sysfs_service_op_show(struct kobject *kobj, struct orangefs_attribute *attr, char *buf) { struct orangefs_kernel_op_s *new_op = NULL; int rc = 0; char *ser_op_type = NULL; __u32 op_alloc_type; gossip_debug(GOSSIP_SYSFS_DEBUG, "sysfs_service_op_show: id:%s:\n", kobj->name); if (strcmp(kobj->name, PC_KOBJ_ID)) op_alloc_type = ORANGEFS_VFS_OP_PARAM; else op_alloc_type = ORANGEFS_VFS_OP_PERF_COUNT; new_op = op_alloc(op_alloc_type); if (!new_op) return -ENOMEM; /* Can't do a service_operation if the client is not running... */ rc = is_daemon_in_service(); if (rc) { pr_info_ratelimited("%s: Client not running :%d:\n", __func__, is_daemon_in_service()); goto out; } if (strcmp(kobj->name, PC_KOBJ_ID)) new_op->upcall.req.param.type = ORANGEFS_PARAM_REQUEST_GET; if (!strcmp(kobj->name, ORANGEFS_KOBJ_ID)) { /* Drop unsupported requests first. */ if (!(orangefs_features & ORANGEFS_FEATURE_READAHEAD) && (!strcmp(attr->attr.name, "readahead_count") || !strcmp(attr->attr.name, "readahead_size") || !strcmp(attr->attr.name, "readahead_count_size") || !strcmp(attr->attr.name, "readahead_readcnt"))) { rc = -EINVAL; goto out; } if (!strcmp(attr->attr.name, "perf_history_size")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_PERF_HISTORY_SIZE; else if (!strcmp(attr->attr.name, "perf_time_interval_secs")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_PERF_TIME_INTERVAL_SECS; else if (!strcmp(attr->attr.name, "perf_counter_reset")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_PERF_RESET; else if (!strcmp(attr->attr.name, "readahead_count")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_READAHEAD_COUNT; else if (!strcmp(attr->attr.name, "readahead_size")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_READAHEAD_SIZE; else if (!strcmp(attr->attr.name, "readahead_count_size")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_READAHEAD_COUNT_SIZE; else if (!strcmp(attr->attr.name, "readahead_readcnt")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_READAHEAD_READCNT; } else if (!strcmp(kobj->name, ACACHE_KOBJ_ID)) { if (!strcmp(attr->attr.name, "timeout_msecs")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_ACACHE_TIMEOUT_MSECS; if (!strcmp(attr->attr.name, "hard_limit")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_ACACHE_HARD_LIMIT; if (!strcmp(attr->attr.name, "soft_limit")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_ACACHE_SOFT_LIMIT; if (!strcmp(attr->attr.name, "reclaim_percentage")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_ACACHE_RECLAIM_PERCENTAGE; } else if (!strcmp(kobj->name, CAPCACHE_KOBJ_ID)) { if (!strcmp(attr->attr.name, "timeout_secs")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_TIMEOUT_SECS; if (!strcmp(attr->attr.name, "hard_limit")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_HARD_LIMIT; if (!strcmp(attr->attr.name, "soft_limit")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_SOFT_LIMIT; if (!strcmp(attr->attr.name, "reclaim_percentage")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_RECLAIM_PERCENTAGE; } else if (!strcmp(kobj->name, CCACHE_KOBJ_ID)) { if (!strcmp(attr->attr.name, "timeout_secs")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CCACHE_TIMEOUT_SECS; if (!strcmp(attr->attr.name, "hard_limit")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CCACHE_HARD_LIMIT; if (!strcmp(attr->attr.name, "soft_limit")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CCACHE_SOFT_LIMIT; if (!strcmp(attr->attr.name, "reclaim_percentage")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CCACHE_RECLAIM_PERCENTAGE; } else if (!strcmp(kobj->name, NCACHE_KOBJ_ID)) { if (!strcmp(attr->attr.name, "timeout_msecs")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_NCACHE_TIMEOUT_MSECS; if (!strcmp(attr->attr.name, "hard_limit")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_NCACHE_HARD_LIMIT; if (!strcmp(attr->attr.name, "soft_limit")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_NCACHE_SOFT_LIMIT; if (!strcmp(attr->attr.name, "reclaim_percentage")) new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_NCACHE_RECLAIM_PERCENTAGE; } else if (!strcmp(kobj->name, PC_KOBJ_ID)) { if (!strcmp(attr->attr.name, ACACHE_KOBJ_ID)) new_op->upcall.req.perf_count.type = ORANGEFS_PERF_COUNT_REQUEST_ACACHE; if (!strcmp(attr->attr.name, CAPCACHE_KOBJ_ID)) new_op->upcall.req.perf_count.type = ORANGEFS_PERF_COUNT_REQUEST_CAPCACHE; if (!strcmp(attr->attr.name, NCACHE_KOBJ_ID)) new_op->upcall.req.perf_count.type = ORANGEFS_PERF_COUNT_REQUEST_NCACHE; } else { gossip_err("sysfs_service_op_show: unknown kobj_id:%s:\n", kobj->name); rc = -EINVAL; goto out; } if (strcmp(kobj->name, PC_KOBJ_ID)) ser_op_type = "orangefs_param"; else ser_op_type = "orangefs_perf_count"; /* * The service_operation will return an errno return code on * error, and zero on success. */ rc = service_operation(new_op, ser_op_type, ORANGEFS_OP_INTERRUPTIBLE); out: if (!rc) { if (strcmp(kobj->name, PC_KOBJ_ID)) { if (new_op->upcall.req.param.op == ORANGEFS_PARAM_REQUEST_OP_READAHEAD_COUNT_SIZE) { rc = scnprintf(buf, PAGE_SIZE, "%d %d\n", (int)new_op->downcall.resp.param.u. value32[0], (int)new_op->downcall.resp.param.u. value32[1]); } else { rc = scnprintf(buf, PAGE_SIZE, "%d\n", (int)new_op->downcall.resp.param.u.value64); } } else { rc = scnprintf( buf, PAGE_SIZE, "%s", new_op->downcall.resp.perf_count.buffer); } } op_release(new_op); return rc; } /* * pass attribute values back to userspace with a service operation. * * We have to do a memory allocation, an sscanf and a service operation. * And we have to evaluate what the user entered, to make sure the * value is within the range supported by the attribute. So, there's * a lot of return code checking and mapping going on here. * * We want to return 1 if we think everything went OK, and * EINVAL if not. */ static ssize_t sysfs_service_op_store(struct kobject *kobj, struct orangefs_attribute *attr, const char *buf, size_t count) { struct orangefs_kernel_op_s *new_op = NULL; int val = 0; int rc = 0; gossip_debug(GOSSIP_SYSFS_DEBUG, "sysfs_service_op_store: id:%s:\n", kobj->name); new_op = op_alloc(ORANGEFS_VFS_OP_PARAM); if (!new_op) return -EINVAL; /* sic */ /* Can't do a service_operation if the client is not running... */ rc = is_daemon_in_service(); if (rc) { pr_info("%s: Client not running :%d:\n", __func__, is_daemon_in_service()); goto out; } /* * The value we want to send back to userspace is in buf, unless this * there are two parameters, which is specially handled below. */ if (strcmp(kobj->name, ORANGEFS_KOBJ_ID) || strcmp(attr->attr.name, "readahead_count_size")) { rc = kstrtoint(buf, 0, &val); if (rc) goto out; } new_op->upcall.req.param.type = ORANGEFS_PARAM_REQUEST_SET; if (!strcmp(kobj->name, ORANGEFS_KOBJ_ID)) { /* Drop unsupported requests first. */ if (!(orangefs_features & ORANGEFS_FEATURE_READAHEAD) && (!strcmp(attr->attr.name, "readahead_count") || !strcmp(attr->attr.name, "readahead_size") || !strcmp(attr->attr.name, "readahead_count_size") || !strcmp(attr->attr.name, "readahead_readcnt"))) { rc = -EINVAL; goto out; } if (!strcmp(attr->attr.name, "perf_history_size")) { if (val > 0) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_PERF_HISTORY_SIZE; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "perf_time_interval_secs")) { if (val > 0) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_PERF_TIME_INTERVAL_SECS; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "perf_counter_reset")) { if ((val == 0) || (val == 1)) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_PERF_RESET; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "readahead_count")) { if ((val >= 0)) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_READAHEAD_COUNT; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "readahead_size")) { if ((val >= 0)) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_READAHEAD_SIZE; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "readahead_count_size")) { int val1, val2; rc = sscanf(buf, "%d %d", &val1, &val2); if (rc < 2) { rc = 0; goto out; } if ((val1 >= 0) && (val2 >= 0)) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_READAHEAD_COUNT_SIZE; } else { rc = 0; goto out; } new_op->upcall.req.param.u.value32[0] = val1; new_op->upcall.req.param.u.value32[1] = val2; goto value_set; } else if (!strcmp(attr->attr.name, "readahead_readcnt")) { if ((val >= 0)) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_READAHEAD_READCNT; } else { rc = 0; goto out; } } } else if (!strcmp(kobj->name, ACACHE_KOBJ_ID)) { if (!strcmp(attr->attr.name, "hard_limit")) { if (val > -1) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_ACACHE_HARD_LIMIT; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "soft_limit")) { if (val > -1) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_ACACHE_SOFT_LIMIT; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "reclaim_percentage")) { if ((val > -1) && (val < 101)) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_ACACHE_RECLAIM_PERCENTAGE; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "timeout_msecs")) { if (val > -1) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_ACACHE_TIMEOUT_MSECS; } else { rc = 0; goto out; } } } else if (!strcmp(kobj->name, CAPCACHE_KOBJ_ID)) { if (!strcmp(attr->attr.name, "hard_limit")) { if (val > -1) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_HARD_LIMIT; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "soft_limit")) { if (val > -1) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_SOFT_LIMIT; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "reclaim_percentage")) { if ((val > -1) && (val < 101)) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_RECLAIM_PERCENTAGE; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "timeout_secs")) { if (val > -1) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_TIMEOUT_SECS; } else { rc = 0; goto out; } } } else if (!strcmp(kobj->name, CCACHE_KOBJ_ID)) { if (!strcmp(attr->attr.name, "hard_limit")) { if (val > -1) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CCACHE_HARD_LIMIT; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "soft_limit")) { if (val > -1) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CCACHE_SOFT_LIMIT; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "reclaim_percentage")) { if ((val > -1) && (val < 101)) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CCACHE_RECLAIM_PERCENTAGE; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "timeout_secs")) { if (val > -1) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_CCACHE_TIMEOUT_SECS; } else { rc = 0; goto out; } } } else if (!strcmp(kobj->name, NCACHE_KOBJ_ID)) { if (!strcmp(attr->attr.name, "hard_limit")) { if (val > -1) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_NCACHE_HARD_LIMIT; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "soft_limit")) { if (val > -1) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_NCACHE_SOFT_LIMIT; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "reclaim_percentage")) { if ((val > -1) && (val < 101)) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_NCACHE_RECLAIM_PERCENTAGE; } else { rc = 0; goto out; } } else if (!strcmp(attr->attr.name, "timeout_msecs")) { if (val > -1) { new_op->upcall.req.param.op = ORANGEFS_PARAM_REQUEST_OP_NCACHE_TIMEOUT_MSECS; } else { rc = 0; goto out; } } } else { gossip_err("sysfs_service_op_store: unknown kobj_id:%s:\n", kobj->name); rc = -EINVAL; goto out; } new_op->upcall.req.param.u.value64 = val; value_set: /* * The service_operation will return a errno return code on * error, and zero on success. */ rc = service_operation(new_op, "orangefs_param", ORANGEFS_OP_INTERRUPTIBLE); if (rc < 0) { gossip_err("sysfs_service_op_store: service op returned:%d:\n", rc); rc = 0; } else { rc = count; } out: op_release(new_op); if (rc == -ENOMEM || rc == 0) rc = -EINVAL; return rc; } static struct orangefs_attribute op_timeout_secs_attribute = __ATTR(op_timeout_secs, 0664, sysfs_int_show, sysfs_int_store); static struct orangefs_attribute slot_timeout_secs_attribute = __ATTR(slot_timeout_secs, 0664, sysfs_int_show, sysfs_int_store); static struct orangefs_attribute cache_timeout_msecs_attribute = __ATTR(cache_timeout_msecs, 0664, sysfs_int_show, sysfs_int_store); static struct orangefs_attribute dcache_timeout_msecs_attribute = __ATTR(dcache_timeout_msecs, 0664, sysfs_int_show, sysfs_int_store); static struct orangefs_attribute getattr_timeout_msecs_attribute = __ATTR(getattr_timeout_msecs, 0664, sysfs_int_show, sysfs_int_store); static struct orangefs_attribute readahead_count_attribute = __ATTR(readahead_count, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute readahead_size_attribute = __ATTR(readahead_size, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute readahead_count_size_attribute = __ATTR(readahead_count_size, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute readahead_readcnt_attribute = __ATTR(readahead_readcnt, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute perf_counter_reset_attribute = __ATTR(perf_counter_reset, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute perf_history_size_attribute = __ATTR(perf_history_size, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute perf_time_interval_secs_attribute = __ATTR(perf_time_interval_secs, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct attribute *orangefs_default_attrs[] = { &op_timeout_secs_attribute.attr, &slot_timeout_secs_attribute.attr, &cache_timeout_msecs_attribute.attr, &dcache_timeout_msecs_attribute.attr, &getattr_timeout_msecs_attribute.attr, &readahead_count_attribute.attr, &readahead_size_attribute.attr, &readahead_count_size_attribute.attr, &readahead_readcnt_attribute.attr, &perf_counter_reset_attribute.attr, &perf_history_size_attribute.attr, &perf_time_interval_secs_attribute.attr, NULL, }; ATTRIBUTE_GROUPS(orangefs_default); static struct kobject *orangefs_obj; static void orangefs_obj_release(struct kobject *kobj) { kfree(orangefs_obj); orangefs_obj = NULL; } static struct kobj_type orangefs_ktype = { .sysfs_ops = &orangefs_sysfs_ops, .default_groups = orangefs_default_groups, .release = orangefs_obj_release, }; static struct orangefs_attribute acache_hard_limit_attribute = __ATTR(hard_limit, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute acache_reclaim_percent_attribute = __ATTR(reclaim_percentage, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute acache_soft_limit_attribute = __ATTR(soft_limit, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute acache_timeout_msecs_attribute = __ATTR(timeout_msecs, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct attribute *acache_orangefs_default_attrs[] = { &acache_hard_limit_attribute.attr, &acache_reclaim_percent_attribute.attr, &acache_soft_limit_attribute.attr, &acache_timeout_msecs_attribute.attr, NULL, }; ATTRIBUTE_GROUPS(acache_orangefs_default); static struct kobject *acache_orangefs_obj; static void acache_orangefs_obj_release(struct kobject *kobj) { kfree(acache_orangefs_obj); acache_orangefs_obj = NULL; } static struct kobj_type acache_orangefs_ktype = { .sysfs_ops = &orangefs_sysfs_ops, .default_groups = acache_orangefs_default_groups, .release = acache_orangefs_obj_release, }; static struct orangefs_attribute capcache_hard_limit_attribute = __ATTR(hard_limit, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute capcache_reclaim_percent_attribute = __ATTR(reclaim_percentage, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute capcache_soft_limit_attribute = __ATTR(soft_limit, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute capcache_timeout_secs_attribute = __ATTR(timeout_secs, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct attribute *capcache_orangefs_default_attrs[] = { &capcache_hard_limit_attribute.attr, &capcache_reclaim_percent_attribute.attr, &capcache_soft_limit_attribute.attr, &capcache_timeout_secs_attribute.attr, NULL, }; ATTRIBUTE_GROUPS(capcache_orangefs_default); static struct kobject *capcache_orangefs_obj; static void capcache_orangefs_obj_release(struct kobject *kobj) { kfree(capcache_orangefs_obj); capcache_orangefs_obj = NULL; } static struct kobj_type capcache_orangefs_ktype = { .sysfs_ops = &orangefs_sysfs_ops, .default_groups = capcache_orangefs_default_groups, .release = capcache_orangefs_obj_release, }; static struct orangefs_attribute ccache_hard_limit_attribute = __ATTR(hard_limit, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute ccache_reclaim_percent_attribute = __ATTR(reclaim_percentage, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute ccache_soft_limit_attribute = __ATTR(soft_limit, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute ccache_timeout_secs_attribute = __ATTR(timeout_secs, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct attribute *ccache_orangefs_default_attrs[] = { &ccache_hard_limit_attribute.attr, &ccache_reclaim_percent_attribute.attr, &ccache_soft_limit_attribute.attr, &ccache_timeout_secs_attribute.attr, NULL, }; ATTRIBUTE_GROUPS(ccache_orangefs_default); static struct kobject *ccache_orangefs_obj; static void ccache_orangefs_obj_release(struct kobject *kobj) { kfree(ccache_orangefs_obj); ccache_orangefs_obj = NULL; } static struct kobj_type ccache_orangefs_ktype = { .sysfs_ops = &orangefs_sysfs_ops, .default_groups = ccache_orangefs_default_groups, .release = ccache_orangefs_obj_release, }; static struct orangefs_attribute ncache_hard_limit_attribute = __ATTR(hard_limit, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute ncache_reclaim_percent_attribute = __ATTR(reclaim_percentage, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute ncache_soft_limit_attribute = __ATTR(soft_limit, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct orangefs_attribute ncache_timeout_msecs_attribute = __ATTR(timeout_msecs, 0664, sysfs_service_op_show, sysfs_service_op_store); static struct attribute *ncache_orangefs_default_attrs[] = { &ncache_hard_limit_attribute.attr, &ncache_reclaim_percent_attribute.attr, &ncache_soft_limit_attribute.attr, &ncache_timeout_msecs_attribute.attr, NULL, }; ATTRIBUTE_GROUPS(ncache_orangefs_default); static struct kobject *ncache_orangefs_obj; static void ncache_orangefs_obj_release(struct kobject *kobj) { kfree(ncache_orangefs_obj); ncache_orangefs_obj = NULL; } static struct kobj_type ncache_orangefs_ktype = { .sysfs_ops = &orangefs_sysfs_ops, .default_groups = ncache_orangefs_default_groups, .release = ncache_orangefs_obj_release, }; static struct orangefs_attribute pc_acache_attribute = __ATTR(acache, 0664, sysfs_service_op_show, NULL); static struct orangefs_attribute pc_capcache_attribute = __ATTR(capcache, 0664, sysfs_service_op_show, NULL); static struct orangefs_attribute pc_ncache_attribute = __ATTR(ncache, 0664, sysfs_service_op_show, NULL); static struct attribute *pc_orangefs_default_attrs[] = { &pc_acache_attribute.attr, &pc_capcache_attribute.attr, &pc_ncache_attribute.attr, NULL, }; ATTRIBUTE_GROUPS(pc_orangefs_default); static struct kobject *pc_orangefs_obj; static void pc_orangefs_obj_release(struct kobject *kobj) { kfree(pc_orangefs_obj); pc_orangefs_obj = NULL; } static struct kobj_type pc_orangefs_ktype = { .sysfs_ops = &orangefs_sysfs_ops, .default_groups = pc_orangefs_default_groups, .release = pc_orangefs_obj_release, }; static struct orangefs_attribute stats_reads_attribute = __ATTR(reads, 0664, sysfs_int_show, NULL); static struct orangefs_attribute stats_writes_attribute = __ATTR(writes, 0664, sysfs_int_show, NULL); static struct attribute *stats_orangefs_default_attrs[] = { &stats_reads_attribute.attr, &stats_writes_attribute.attr, NULL, }; ATTRIBUTE_GROUPS(stats_orangefs_default); static struct kobject *stats_orangefs_obj; static void stats_orangefs_obj_release(struct kobject *kobj) { kfree(stats_orangefs_obj); stats_orangefs_obj = NULL; } static struct kobj_type stats_orangefs_ktype = { .sysfs_ops = &orangefs_sysfs_ops, .default_groups = stats_orangefs_default_groups, .release = stats_orangefs_obj_release, }; int orangefs_sysfs_init(void) { int rc = -EINVAL; gossip_debug(GOSSIP_SYSFS_DEBUG, "orangefs_sysfs_init: start\n"); /* create /sys/fs/orangefs. */ orangefs_obj = kzalloc(sizeof(*orangefs_obj), GFP_KERNEL); if (!orangefs_obj) goto out; rc = kobject_init_and_add(orangefs_obj, &orangefs_ktype, fs_kobj, ORANGEFS_KOBJ_ID); if (rc) goto ofs_obj_bail; kobject_uevent(orangefs_obj, KOBJ_ADD); /* create /sys/fs/orangefs/acache. */ acache_orangefs_obj = kzalloc(sizeof(*acache_orangefs_obj), GFP_KERNEL); if (!acache_orangefs_obj) { rc = -EINVAL; goto ofs_obj_bail; } rc = kobject_init_and_add(acache_orangefs_obj, &acache_orangefs_ktype, orangefs_obj, ACACHE_KOBJ_ID); if (rc) goto acache_obj_bail; kobject_uevent(acache_orangefs_obj, KOBJ_ADD); /* create /sys/fs/orangefs/capcache. */ capcache_orangefs_obj = kzalloc(sizeof(*capcache_orangefs_obj), GFP_KERNEL); if (!capcache_orangefs_obj) { rc = -EINVAL; goto acache_obj_bail; } rc = kobject_init_and_add(capcache_orangefs_obj, &capcache_orangefs_ktype, orangefs_obj, CAPCACHE_KOBJ_ID); if (rc) goto capcache_obj_bail; kobject_uevent(capcache_orangefs_obj, KOBJ_ADD); /* create /sys/fs/orangefs/ccache. */ ccache_orangefs_obj = kzalloc(sizeof(*ccache_orangefs_obj), GFP_KERNEL); if (!ccache_orangefs_obj) { rc = -EINVAL; goto capcache_obj_bail; } rc = kobject_init_and_add(ccache_orangefs_obj, &ccache_orangefs_ktype, orangefs_obj, CCACHE_KOBJ_ID); if (rc) goto ccache_obj_bail; kobject_uevent(ccache_orangefs_obj, KOBJ_ADD); /* create /sys/fs/orangefs/ncache. */ ncache_orangefs_obj = kzalloc(sizeof(*ncache_orangefs_obj), GFP_KERNEL); if (!ncache_orangefs_obj) { rc = -EINVAL; goto ccache_obj_bail; } rc = kobject_init_and_add(ncache_orangefs_obj, &ncache_orangefs_ktype, orangefs_obj, NCACHE_KOBJ_ID); if (rc) goto ncache_obj_bail; kobject_uevent(ncache_orangefs_obj, KOBJ_ADD); /* create /sys/fs/orangefs/perf_counters. */ pc_orangefs_obj = kzalloc(sizeof(*pc_orangefs_obj), GFP_KERNEL); if (!pc_orangefs_obj) { rc = -EINVAL; goto ncache_obj_bail; } rc = kobject_init_and_add(pc_orangefs_obj, &pc_orangefs_ktype, orangefs_obj, "perf_counters"); if (rc) goto pc_obj_bail; kobject_uevent(pc_orangefs_obj, KOBJ_ADD); /* create /sys/fs/orangefs/stats. */ stats_orangefs_obj = kzalloc(sizeof(*stats_orangefs_obj), GFP_KERNEL); if (!stats_orangefs_obj) { rc = -EINVAL; goto pc_obj_bail; } rc = kobject_init_and_add(stats_orangefs_obj, &stats_orangefs_ktype, orangefs_obj, STATS_KOBJ_ID); if (rc) goto stats_obj_bail; kobject_uevent(stats_orangefs_obj, KOBJ_ADD); goto out; stats_obj_bail: kobject_put(stats_orangefs_obj); pc_obj_bail: kobject_put(pc_orangefs_obj); ncache_obj_bail: kobject_put(ncache_orangefs_obj); ccache_obj_bail: kobject_put(ccache_orangefs_obj); capcache_obj_bail: kobject_put(capcache_orangefs_obj); acache_obj_bail: kobject_put(acache_orangefs_obj); ofs_obj_bail: kobject_put(orangefs_obj); out: return rc; } void orangefs_sysfs_exit(void) { gossip_debug(GOSSIP_SYSFS_DEBUG, "orangefs_sysfs_exit: start\n"); kobject_put(acache_orangefs_obj); kobject_put(capcache_orangefs_obj); kobject_put(ccache_orangefs_obj); kobject_put(ncache_orangefs_obj); kobject_put(pc_orangefs_obj); kobject_put(stats_orangefs_obj); kobject_put(orangefs_obj); }
linux-master
fs/orangefs/orangefs-sysfs.c
// SPDX-License-Identifier: GPL-2.0 /* * (C) 2001 Clemson University and The University of Chicago * * See COPYING in top-level directory. */ #include "protocol.h" #include "orangefs-kernel.h" #include "orangefs-bufmap.h" struct slot_map { int c; wait_queue_head_t q; int count; unsigned long *map; }; static struct slot_map rw_map = { .c = -1, .q = __WAIT_QUEUE_HEAD_INITIALIZER(rw_map.q) }; static struct slot_map readdir_map = { .c = -1, .q = __WAIT_QUEUE_HEAD_INITIALIZER(readdir_map.q) }; static void install(struct slot_map *m, int count, unsigned long *map) { spin_lock(&m->q.lock); m->c = m->count = count; m->map = map; wake_up_all_locked(&m->q); spin_unlock(&m->q.lock); } static void mark_killed(struct slot_map *m) { spin_lock(&m->q.lock); m->c -= m->count + 1; spin_unlock(&m->q.lock); } static void run_down(struct slot_map *m) { DEFINE_WAIT(wait); spin_lock(&m->q.lock); if (m->c != -1) { for (;;) { if (likely(list_empty(&wait.entry))) __add_wait_queue_entry_tail(&m->q, &wait); set_current_state(TASK_UNINTERRUPTIBLE); if (m->c == -1) break; spin_unlock(&m->q.lock); schedule(); spin_lock(&m->q.lock); } __remove_wait_queue(&m->q, &wait); __set_current_state(TASK_RUNNING); } m->map = NULL; spin_unlock(&m->q.lock); } static void put(struct slot_map *m, int slot) { int v; spin_lock(&m->q.lock); __clear_bit(slot, m->map); v = ++m->c; if (v > 0) wake_up_locked(&m->q); if (unlikely(v == -1)) /* finished dying */ wake_up_all_locked(&m->q); spin_unlock(&m->q.lock); } static int wait_for_free(struct slot_map *m) { long left = slot_timeout_secs * HZ; DEFINE_WAIT(wait); do { long n = left, t; if (likely(list_empty(&wait.entry))) __add_wait_queue_entry_tail_exclusive(&m->q, &wait); set_current_state(TASK_INTERRUPTIBLE); if (m->c > 0) break; if (m->c < 0) { /* we are waiting for map to be installed */ /* it would better be there soon, or we go away */ if (n > ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ) n = ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ; } spin_unlock(&m->q.lock); t = schedule_timeout(n); spin_lock(&m->q.lock); if (unlikely(!t) && n != left && m->c < 0) left = t; else left = t + (left - n); if (signal_pending(current)) left = -EINTR; } while (left > 0); if (!list_empty(&wait.entry)) list_del(&wait.entry); else if (left <= 0 && waitqueue_active(&m->q)) __wake_up_locked_key(&m->q, TASK_INTERRUPTIBLE, NULL); __set_current_state(TASK_RUNNING); if (likely(left > 0)) return 0; return left < 0 ? -EINTR : -ETIMEDOUT; } static int get(struct slot_map *m) { int res = 0; spin_lock(&m->q.lock); if (unlikely(m->c <= 0)) res = wait_for_free(m); if (likely(!res)) { m->c--; res = find_first_zero_bit(m->map, m->count); __set_bit(res, m->map); } spin_unlock(&m->q.lock); return res; } /* used to describe mapped buffers */ struct orangefs_bufmap_desc { void __user *uaddr; /* user space address pointer */ struct page **page_array; /* array of mapped pages */ int array_count; /* size of above arrays */ struct list_head list_link; }; static struct orangefs_bufmap { int desc_size; int desc_shift; int desc_count; int total_size; int page_count; struct page **page_array; struct orangefs_bufmap_desc *desc_array; /* array to track usage of buffer descriptors */ unsigned long *buffer_index_array; /* array to track usage of buffer descriptors for readdir */ #define N DIV_ROUND_UP(ORANGEFS_READDIR_DEFAULT_DESC_COUNT, BITS_PER_LONG) unsigned long readdir_index_array[N]; #undef N } *__orangefs_bufmap; static DEFINE_SPINLOCK(orangefs_bufmap_lock); static void orangefs_bufmap_unmap(struct orangefs_bufmap *bufmap) { unpin_user_pages(bufmap->page_array, bufmap->page_count); } static void orangefs_bufmap_free(struct orangefs_bufmap *bufmap) { kfree(bufmap->page_array); kfree(bufmap->desc_array); bitmap_free(bufmap->buffer_index_array); kfree(bufmap); } /* * XXX: Can the size and shift change while the caller gives up the * XXX: lock between calling this and doing something useful? */ int orangefs_bufmap_size_query(void) { struct orangefs_bufmap *bufmap; int size = 0; spin_lock(&orangefs_bufmap_lock); bufmap = __orangefs_bufmap; if (bufmap) size = bufmap->desc_size; spin_unlock(&orangefs_bufmap_lock); return size; } int orangefs_bufmap_shift_query(void) { struct orangefs_bufmap *bufmap; int shift = 0; spin_lock(&orangefs_bufmap_lock); bufmap = __orangefs_bufmap; if (bufmap) shift = bufmap->desc_shift; spin_unlock(&orangefs_bufmap_lock); return shift; } static DECLARE_WAIT_QUEUE_HEAD(bufmap_waitq); static DECLARE_WAIT_QUEUE_HEAD(readdir_waitq); static struct orangefs_bufmap * orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc) { struct orangefs_bufmap *bufmap; bufmap = kzalloc(sizeof(*bufmap), GFP_KERNEL); if (!bufmap) goto out; bufmap->total_size = user_desc->total_size; bufmap->desc_count = user_desc->count; bufmap->desc_size = user_desc->size; bufmap->desc_shift = ilog2(bufmap->desc_size); bufmap->buffer_index_array = bitmap_zalloc(bufmap->desc_count, GFP_KERNEL); if (!bufmap->buffer_index_array) goto out_free_bufmap; bufmap->desc_array = kcalloc(bufmap->desc_count, sizeof(struct orangefs_bufmap_desc), GFP_KERNEL); if (!bufmap->desc_array) goto out_free_index_array; bufmap->page_count = bufmap->total_size / PAGE_SIZE; /* allocate storage to track our page mappings */ bufmap->page_array = kcalloc(bufmap->page_count, sizeof(struct page *), GFP_KERNEL); if (!bufmap->page_array) goto out_free_desc_array; return bufmap; out_free_desc_array: kfree(bufmap->desc_array); out_free_index_array: bitmap_free(bufmap->buffer_index_array); out_free_bufmap: kfree(bufmap); out: return NULL; } static int orangefs_bufmap_map(struct orangefs_bufmap *bufmap, struct ORANGEFS_dev_map_desc *user_desc) { int pages_per_desc = bufmap->desc_size / PAGE_SIZE; int offset = 0, ret, i; /* map the pages */ ret = pin_user_pages_fast((unsigned long)user_desc->ptr, bufmap->page_count, FOLL_WRITE, bufmap->page_array); if (ret < 0) return ret; if (ret != bufmap->page_count) { gossip_err("orangefs error: asked for %d pages, only got %d.\n", bufmap->page_count, ret); for (i = 0; i < ret; i++) { SetPageError(bufmap->page_array[i]); unpin_user_page(bufmap->page_array[i]); } return -ENOMEM; } /* * ideally we want to get kernel space pointers for each page, but * we can't kmap that many pages at once if highmem is being used. * so instead, we just kmap/kunmap the page address each time the * kaddr is needed. */ for (i = 0; i < bufmap->page_count; i++) flush_dcache_page(bufmap->page_array[i]); /* build a list of available descriptors */ for (offset = 0, i = 0; i < bufmap->desc_count; i++) { bufmap->desc_array[i].page_array = &bufmap->page_array[offset]; bufmap->desc_array[i].array_count = pages_per_desc; bufmap->desc_array[i].uaddr = (user_desc->ptr + (i * pages_per_desc * PAGE_SIZE)); offset += pages_per_desc; } return 0; } /* * orangefs_bufmap_initialize() * * initializes the mapped buffer interface * * returns 0 on success, -errno on failure */ int orangefs_bufmap_initialize(struct ORANGEFS_dev_map_desc *user_desc) { struct orangefs_bufmap *bufmap; int ret = -EINVAL; gossip_debug(GOSSIP_BUFMAP_DEBUG, "orangefs_bufmap_initialize: called (ptr (" "%p) sz (%d) cnt(%d).\n", user_desc->ptr, user_desc->size, user_desc->count); if (user_desc->total_size < 0 || user_desc->size < 0 || user_desc->count < 0) goto out; /* * sanity check alignment and size of buffer that caller wants to * work with */ if (PAGE_ALIGN((unsigned long)user_desc->ptr) != (unsigned long)user_desc->ptr) { gossip_err("orangefs error: memory alignment (front). %p\n", user_desc->ptr); goto out; } if (PAGE_ALIGN(((unsigned long)user_desc->ptr + user_desc->total_size)) != (unsigned long)(user_desc->ptr + user_desc->total_size)) { gossip_err("orangefs error: memory alignment (back).(%p + %d)\n", user_desc->ptr, user_desc->total_size); goto out; } if (user_desc->total_size != (user_desc->size * user_desc->count)) { gossip_err("orangefs error: user provided an oddly sized buffer: (%d, %d, %d)\n", user_desc->total_size, user_desc->size, user_desc->count); goto out; } if ((user_desc->size % PAGE_SIZE) != 0) { gossip_err("orangefs error: bufmap size not page size divisible (%d).\n", user_desc->size); goto out; } ret = -ENOMEM; bufmap = orangefs_bufmap_alloc(user_desc); if (!bufmap) goto out; ret = orangefs_bufmap_map(bufmap, user_desc); if (ret) goto out_free_bufmap; spin_lock(&orangefs_bufmap_lock); if (__orangefs_bufmap) { spin_unlock(&orangefs_bufmap_lock); gossip_err("orangefs: error: bufmap already initialized.\n"); ret = -EINVAL; goto out_unmap_bufmap; } __orangefs_bufmap = bufmap; install(&rw_map, bufmap->desc_count, bufmap->buffer_index_array); install(&readdir_map, ORANGEFS_READDIR_DEFAULT_DESC_COUNT, bufmap->readdir_index_array); spin_unlock(&orangefs_bufmap_lock); gossip_debug(GOSSIP_BUFMAP_DEBUG, "orangefs_bufmap_initialize: exiting normally\n"); return 0; out_unmap_bufmap: orangefs_bufmap_unmap(bufmap); out_free_bufmap: orangefs_bufmap_free(bufmap); out: return ret; } /* * orangefs_bufmap_finalize() * * shuts down the mapped buffer interface and releases any resources * associated with it * * no return value */ void orangefs_bufmap_finalize(void) { struct orangefs_bufmap *bufmap = __orangefs_bufmap; if (!bufmap) return; gossip_debug(GOSSIP_BUFMAP_DEBUG, "orangefs_bufmap_finalize: called\n"); mark_killed(&rw_map); mark_killed(&readdir_map); gossip_debug(GOSSIP_BUFMAP_DEBUG, "orangefs_bufmap_finalize: exiting normally\n"); } void orangefs_bufmap_run_down(void) { struct orangefs_bufmap *bufmap = __orangefs_bufmap; if (!bufmap) return; run_down(&rw_map); run_down(&readdir_map); spin_lock(&orangefs_bufmap_lock); __orangefs_bufmap = NULL; spin_unlock(&orangefs_bufmap_lock); orangefs_bufmap_unmap(bufmap); orangefs_bufmap_free(bufmap); } /* * orangefs_bufmap_get() * * gets a free mapped buffer descriptor, will sleep until one becomes * available if necessary * * returns slot on success, -errno on failure */ int orangefs_bufmap_get(void) { return get(&rw_map); } /* * orangefs_bufmap_put() * * returns a mapped buffer descriptor to the collection * * no return value */ void orangefs_bufmap_put(int buffer_index) { put(&rw_map, buffer_index); } /* * orangefs_readdir_index_get() * * gets a free descriptor, will sleep until one becomes * available if necessary. * Although the readdir buffers are not mapped into kernel space * we could do that at a later point of time. Regardless, these * indices are used by the client-core. * * returns slot on success, -errno on failure */ int orangefs_readdir_index_get(void) { return get(&readdir_map); } void orangefs_readdir_index_put(int buffer_index) { put(&readdir_map, buffer_index); } /* * we've been handed an iovec, we need to copy it to * the shared memory descriptor at "buffer_index". */ int orangefs_bufmap_copy_from_iovec(struct iov_iter *iter, int buffer_index, size_t size) { struct orangefs_bufmap_desc *to; int i; gossip_debug(GOSSIP_BUFMAP_DEBUG, "%s: buffer_index:%d: size:%zu:\n", __func__, buffer_index, size); to = &__orangefs_bufmap->desc_array[buffer_index]; for (i = 0; size; i++) { struct page *page = to->page_array[i]; size_t n = size; if (n > PAGE_SIZE) n = PAGE_SIZE; if (copy_page_from_iter(page, 0, n, iter) != n) return -EFAULT; size -= n; } return 0; } /* * we've been handed an iovec, we need to fill it from * the shared memory descriptor at "buffer_index". */ int orangefs_bufmap_copy_to_iovec(struct iov_iter *iter, int buffer_index, size_t size) { struct orangefs_bufmap_desc *from; int i; from = &__orangefs_bufmap->desc_array[buffer_index]; gossip_debug(GOSSIP_BUFMAP_DEBUG, "%s: buffer_index:%d: size:%zu:\n", __func__, buffer_index, size); for (i = 0; size; i++) { struct page *page = from->page_array[i]; size_t n = size; if (n > PAGE_SIZE) n = PAGE_SIZE; n = copy_page_to_iter(page, 0, n, iter); if (!n) return -EFAULT; size -= n; } return 0; } void orangefs_bufmap_page_fill(void *page_to, int buffer_index, int slot_index) { struct orangefs_bufmap_desc *from; void *page_from; from = &__orangefs_bufmap->desc_array[buffer_index]; page_from = kmap_atomic(from->page_array[slot_index]); memcpy(page_to, page_from, PAGE_SIZE); kunmap_atomic(page_from); }
linux-master
fs/orangefs/orangefs-bufmap.c
// SPDX-License-Identifier: GPL-2.0 /* * (C) 2001 Clemson University and The University of Chicago * * See COPYING in top-level directory. */ /* * Implementation of dentry (directory cache) functions. */ #include "protocol.h" #include "orangefs-kernel.h" /* Returns 1 if dentry can still be trusted, else 0. */ static int orangefs_revalidate_lookup(struct dentry *dentry) { struct dentry *parent_dentry = dget_parent(dentry); struct inode *parent_inode = parent_dentry->d_inode; struct orangefs_inode_s *parent = ORANGEFS_I(parent_inode); struct inode *inode = dentry->d_inode; struct orangefs_kernel_op_s *new_op; int ret = 0; int err = 0; gossip_debug(GOSSIP_DCACHE_DEBUG, "%s: attempting lookup.\n", __func__); new_op = op_alloc(ORANGEFS_VFS_OP_LOOKUP); if (!new_op) { ret = -ENOMEM; goto out_put_parent; } new_op->upcall.req.lookup.sym_follow = ORANGEFS_LOOKUP_LINK_NO_FOLLOW; new_op->upcall.req.lookup.parent_refn = parent->refn; strncpy(new_op->upcall.req.lookup.d_name, dentry->d_name.name, ORANGEFS_NAME_MAX - 1); gossip_debug(GOSSIP_DCACHE_DEBUG, "%s:%s:%d interrupt flag [%d]\n", __FILE__, __func__, __LINE__, get_interruptible_flag(parent_inode)); err = service_operation(new_op, "orangefs_lookup", get_interruptible_flag(parent_inode)); /* Positive dentry: reject if error or not the same inode. */ if (inode) { if (err) { gossip_debug(GOSSIP_DCACHE_DEBUG, "%s:%s:%d lookup failure.\n", __FILE__, __func__, __LINE__); goto out_drop; } if (!match_handle(new_op->downcall.resp.lookup.refn.khandle, inode)) { gossip_debug(GOSSIP_DCACHE_DEBUG, "%s:%s:%d no match.\n", __FILE__, __func__, __LINE__); goto out_drop; } /* Negative dentry: reject if success or error other than ENOENT. */ } else { gossip_debug(GOSSIP_DCACHE_DEBUG, "%s: negative dentry.\n", __func__); if (!err || err != -ENOENT) { if (new_op->downcall.status != 0) gossip_debug(GOSSIP_DCACHE_DEBUG, "%s:%s:%d lookup failure.\n", __FILE__, __func__, __LINE__); goto out_drop; } } orangefs_set_timeout(dentry); ret = 1; out_release_op: op_release(new_op); out_put_parent: dput(parent_dentry); return ret; out_drop: gossip_debug(GOSSIP_DCACHE_DEBUG, "%s:%s:%d revalidate failed\n", __FILE__, __func__, __LINE__); goto out_release_op; } /* * Verify that dentry is valid. * * Should return 1 if dentry can still be trusted, else 0. */ static int orangefs_d_revalidate(struct dentry *dentry, unsigned int flags) { int ret; unsigned long time = (unsigned long) dentry->d_fsdata; if (time_before(jiffies, time)) return 1; if (flags & LOOKUP_RCU) return -ECHILD; gossip_debug(GOSSIP_DCACHE_DEBUG, "%s: called on dentry %p.\n", __func__, dentry); /* skip root handle lookups. */ if (dentry->d_inode && is_root_handle(dentry->d_inode)) return 1; /* * If this passes, the positive dentry still exists or the negative * dentry still does not exist. */ if (!orangefs_revalidate_lookup(dentry)) return 0; /* We do not need to continue with negative dentries. */ if (!dentry->d_inode) { gossip_debug(GOSSIP_DCACHE_DEBUG, "%s: negative dentry or positive dentry and inode valid.\n", __func__); return 1; } /* Now we must perform a getattr to validate the inode contents. */ ret = orangefs_inode_check_changed(dentry->d_inode); if (ret < 0) { gossip_debug(GOSSIP_DCACHE_DEBUG, "%s:%s:%d getattr failure.\n", __FILE__, __func__, __LINE__); return 0; } return !ret; } const struct dentry_operations orangefs_dentry_operations = { .d_revalidate = orangefs_d_revalidate, };
linux-master
fs/orangefs/dcache.c
// SPDX-License-Identifier: GPL-2.0-only /* * This file contians vfs address (mmap) ops for 9P2000. * * Copyright (C) 2005 by Eric Van Hensbergen <[email protected]> * Copyright (C) 2002 by Ron Minnich <[email protected]> */ #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/pagemap.h> #include <linux/sched.h> #include <linux/swap.h> #include <linux/uio.h> #include <linux/netfs.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include "v9fs.h" #include "v9fs_vfs.h" #include "cache.h" #include "fid.h" /** * v9fs_issue_read - Issue a read from 9P * @subreq: The read to make */ static void v9fs_issue_read(struct netfs_io_subrequest *subreq) { struct netfs_io_request *rreq = subreq->rreq; struct p9_fid *fid = rreq->netfs_priv; struct iov_iter to; loff_t pos = subreq->start + subreq->transferred; size_t len = subreq->len - subreq->transferred; int total, err; iov_iter_xarray(&to, ITER_DEST, &rreq->mapping->i_pages, pos, len); total = p9_client_read(fid, pos, &to, &err); /* if we just extended the file size, any portion not in * cache won't be on server and is zeroes */ __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); netfs_subreq_terminated(subreq, err ?: total, false); } /** * v9fs_init_request - Initialise a read request * @rreq: The read request * @file: The file being read from */ static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file) { struct p9_fid *fid = file->private_data; BUG_ON(!fid); /* we might need to read from a fid that was opened write-only * for read-modify-write of page cache, use the writeback fid * for that */ WARN_ON(rreq->origin == NETFS_READ_FOR_WRITE && !(fid->mode & P9_ORDWR)); p9_fid_get(fid); rreq->netfs_priv = fid; return 0; } /** * v9fs_free_request - Cleanup request initialized by v9fs_init_rreq * @rreq: The I/O request to clean up */ static void v9fs_free_request(struct netfs_io_request *rreq) { struct p9_fid *fid = rreq->netfs_priv; p9_fid_put(fid); } /** * v9fs_begin_cache_operation - Begin a cache operation for a read * @rreq: The read request */ static int v9fs_begin_cache_operation(struct netfs_io_request *rreq) { #ifdef CONFIG_9P_FSCACHE struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(rreq->inode)); return fscache_begin_read_operation(&rreq->cache_resources, cookie); #else return -ENOBUFS; #endif } const struct netfs_request_ops v9fs_req_ops = { .init_request = v9fs_init_request, .free_request = v9fs_free_request, .begin_cache_operation = v9fs_begin_cache_operation, .issue_read = v9fs_issue_read, }; /** * v9fs_release_folio - release the private state associated with a folio * @folio: The folio to be released * @gfp: The caller's allocation restrictions * * Returns true if the page can be released, false otherwise. */ static bool v9fs_release_folio(struct folio *folio, gfp_t gfp) { if (folio_test_private(folio)) return false; #ifdef CONFIG_9P_FSCACHE if (folio_test_fscache(folio)) { if (current_is_kswapd() || !(gfp & __GFP_FS)) return false; folio_wait_fscache(folio); } fscache_note_page_release(v9fs_inode_cookie(V9FS_I(folio_inode(folio)))); #endif return true; } static void v9fs_invalidate_folio(struct folio *folio, size_t offset, size_t length) { folio_wait_fscache(folio); } #ifdef CONFIG_9P_FSCACHE static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error, bool was_async) { struct v9fs_inode *v9inode = priv; __le32 version; if (IS_ERR_VALUE(transferred_or_error) && transferred_or_error != -ENOBUFS) { version = cpu_to_le32(v9inode->qid.version); fscache_invalidate(v9fs_inode_cookie(v9inode), &version, i_size_read(&v9inode->netfs.inode), 0); } } #endif static int v9fs_vfs_write_folio_locked(struct folio *folio) { struct inode *inode = folio_inode(folio); loff_t start = folio_pos(folio); loff_t i_size = i_size_read(inode); struct iov_iter from; size_t len = folio_size(folio); struct p9_fid *writeback_fid; int err; struct v9fs_inode __maybe_unused *v9inode = V9FS_I(inode); struct fscache_cookie __maybe_unused *cookie = v9fs_inode_cookie(v9inode); if (start >= i_size) return 0; /* Simultaneous truncation occurred */ len = min_t(loff_t, i_size - start, len); iov_iter_xarray(&from, ITER_SOURCE, &folio_mapping(folio)->i_pages, start, len); writeback_fid = v9fs_fid_find_inode(inode, true, INVALID_UID, true); if (!writeback_fid) { WARN_ONCE(1, "folio expected an open fid inode->i_private=%p\n", inode->i_private); return -EINVAL; } folio_wait_fscache(folio); folio_start_writeback(folio); p9_client_write(writeback_fid, start, &from, &err); #ifdef CONFIG_9P_FSCACHE if (err == 0 && fscache_cookie_enabled(cookie) && test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) { folio_start_fscache(folio); fscache_write_to_cache(v9fs_inode_cookie(v9inode), folio_mapping(folio), start, len, i_size, v9fs_write_to_cache_done, v9inode, true); } #endif folio_end_writeback(folio); p9_fid_put(writeback_fid); return err; } static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc) { struct folio *folio = page_folio(page); int retval; p9_debug(P9_DEBUG_VFS, "folio %p\n", folio); retval = v9fs_vfs_write_folio_locked(folio); if (retval < 0) { if (retval == -EAGAIN) { folio_redirty_for_writepage(wbc, folio); retval = 0; } else { mapping_set_error(folio_mapping(folio), retval); } } else retval = 0; folio_unlock(folio); return retval; } static int v9fs_launder_folio(struct folio *folio) { int retval; if (folio_clear_dirty_for_io(folio)) { retval = v9fs_vfs_write_folio_locked(folio); if (retval) return retval; } folio_wait_fscache(folio); return 0; } /** * v9fs_direct_IO - 9P address space operation for direct I/O * @iocb: target I/O control block * @iter: The data/buffer to use * * The presence of v9fs_direct_IO() in the address space ops vector * allowes open() O_DIRECT flags which would have failed otherwise. * * In the non-cached mode, we shunt off direct read and write requests before * the VFS gets them, so this method should never be called. * * Direct IO is not 'yet' supported in the cached mode. Hence when * this routine is called through generic_file_aio_read(), the read/write fails * with an error. * */ static ssize_t v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; loff_t pos = iocb->ki_pos; ssize_t n; int err = 0; if (iov_iter_rw(iter) == WRITE) { n = p9_client_write(file->private_data, pos, iter, &err); if (n) { struct inode *inode = file_inode(file); loff_t i_size = i_size_read(inode); if (pos + n > i_size) inode_add_bytes(inode, pos + n - i_size); } } else { n = p9_client_read(file->private_data, pos, iter, &err); } return n ? n : err; } static int v9fs_write_begin(struct file *filp, struct address_space *mapping, loff_t pos, unsigned int len, struct page **subpagep, void **fsdata) { int retval; struct folio *folio; struct v9fs_inode *v9inode = V9FS_I(mapping->host); p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping); /* Prefetch area to be written into the cache if we're caching this * file. We need to do this before we get a lock on the page in case * there's more than one writer competing for the same cache block. */ retval = netfs_write_begin(&v9inode->netfs, filp, mapping, pos, len, &folio, fsdata); if (retval < 0) return retval; *subpagep = &folio->page; return retval; } static int v9fs_write_end(struct file *filp, struct address_space *mapping, loff_t pos, unsigned int len, unsigned int copied, struct page *subpage, void *fsdata) { loff_t last_pos = pos + copied; struct folio *folio = page_folio(subpage); struct inode *inode = mapping->host; p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping); if (!folio_test_uptodate(folio)) { if (unlikely(copied < len)) { copied = 0; goto out; } folio_mark_uptodate(folio); } /* * No need to use i_size_read() here, the i_size * cannot change under us because we hold the i_mutex. */ if (last_pos > inode->i_size) { inode_add_bytes(inode, last_pos - inode->i_size); i_size_write(inode, last_pos); #ifdef CONFIG_9P_FSCACHE fscache_update_cookie(v9fs_inode_cookie(V9FS_I(inode)), NULL, &last_pos); #endif } folio_mark_dirty(folio); out: folio_unlock(folio); folio_put(folio); return copied; } #ifdef CONFIG_9P_FSCACHE /* * Mark a page as having been made dirty and thus needing writeback. We also * need to pin the cache object to write back to. */ static bool v9fs_dirty_folio(struct address_space *mapping, struct folio *folio) { struct v9fs_inode *v9inode = V9FS_I(mapping->host); return fscache_dirty_folio(mapping, folio, v9fs_inode_cookie(v9inode)); } #else #define v9fs_dirty_folio filemap_dirty_folio #endif const struct address_space_operations v9fs_addr_operations = { .read_folio = netfs_read_folio, .readahead = netfs_readahead, .dirty_folio = v9fs_dirty_folio, .writepage = v9fs_vfs_writepage, .write_begin = v9fs_write_begin, .write_end = v9fs_write_end, .release_folio = v9fs_release_folio, .invalidate_folio = v9fs_invalidate_folio, .launder_folio = v9fs_launder_folio, .direct_IO = v9fs_direct_IO, };
linux-master
fs/9p/vfs_addr.c
// SPDX-License-Identifier: GPL-2.0-only /* * This file contians vfs dentry ops for the 9P2000 protocol. * * Copyright (C) 2004 by Eric Van Hensbergen <[email protected]> * Copyright (C) 2002 by Ron Minnich <[email protected]> */ #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/pagemap.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/namei.h> #include <linux/sched.h> #include <linux/slab.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include "v9fs.h" #include "v9fs_vfs.h" #include "fid.h" /** * v9fs_cached_dentry_delete - called when dentry refcount equals 0 * @dentry: dentry in question * */ static int v9fs_cached_dentry_delete(const struct dentry *dentry) { p9_debug(P9_DEBUG_VFS, " dentry: %pd (%p)\n", dentry, dentry); /* Don't cache negative dentries */ if (d_really_is_negative(dentry)) return 1; return 0; } /** * v9fs_dentry_release - called when dentry is going to be freed * @dentry: dentry that is being release * */ static void v9fs_dentry_release(struct dentry *dentry) { struct hlist_node *p, *n; p9_debug(P9_DEBUG_VFS, " dentry: %pd (%p)\n", dentry, dentry); hlist_for_each_safe(p, n, (struct hlist_head *)&dentry->d_fsdata) p9_fid_put(hlist_entry(p, struct p9_fid, dlist)); dentry->d_fsdata = NULL; } static int v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags) { struct p9_fid *fid; struct inode *inode; struct v9fs_inode *v9inode; if (flags & LOOKUP_RCU) return -ECHILD; inode = d_inode(dentry); if (!inode) goto out_valid; v9inode = V9FS_I(inode); if (v9inode->cache_validity & V9FS_INO_INVALID_ATTR) { int retval; struct v9fs_session_info *v9ses; fid = v9fs_fid_lookup(dentry); if (IS_ERR(fid)) return PTR_ERR(fid); v9ses = v9fs_inode2v9ses(inode); if (v9fs_proto_dotl(v9ses)) retval = v9fs_refresh_inode_dotl(fid, inode); else retval = v9fs_refresh_inode(fid, inode); p9_fid_put(fid); if (retval == -ENOENT) return 0; if (retval < 0) return retval; } out_valid: return 1; } const struct dentry_operations v9fs_cached_dentry_operations = { .d_revalidate = v9fs_lookup_revalidate, .d_weak_revalidate = v9fs_lookup_revalidate, .d_delete = v9fs_cached_dentry_delete, .d_release = v9fs_dentry_release, }; const struct dentry_operations v9fs_dentry_operations = { .d_delete = always_delete_dentry, .d_release = v9fs_dentry_release, };
linux-master
fs/9p/vfs_dentry.c
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (C) 2004 by Eric Van Hensbergen <[email protected]> * Copyright (C) 2002 by Ron Minnich <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/pagemap.h> #include <linux/mount.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/statfs.h> #include <linux/magic.h> #include <linux/fscache.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include "v9fs.h" #include "v9fs_vfs.h" #include "fid.h" #include "xattr.h" #include "acl.h" static const struct super_operations v9fs_super_ops, v9fs_super_ops_dotl; /** * v9fs_set_super - set the superblock * @s: super block * @data: file system specific data * */ static int v9fs_set_super(struct super_block *s, void *data) { s->s_fs_info = data; return set_anon_super(s, data); } /** * v9fs_fill_super - populate superblock with info * @sb: superblock * @v9ses: session information * @flags: flags propagated from v9fs_mount() * */ static int v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses, int flags) { int ret; sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_blocksize_bits = fls(v9ses->maxdata - 1); sb->s_blocksize = 1 << sb->s_blocksize_bits; sb->s_magic = V9FS_MAGIC; if (v9fs_proto_dotl(v9ses)) { sb->s_op = &v9fs_super_ops_dotl; if (!(v9ses->flags & V9FS_NO_XATTR)) sb->s_xattr = v9fs_xattr_handlers; } else { sb->s_op = &v9fs_super_ops; sb->s_time_max = U32_MAX; } sb->s_time_min = 0; ret = super_setup_bdi(sb); if (ret) return ret; if (!v9ses->cache) { sb->s_bdi->ra_pages = 0; sb->s_bdi->io_pages = 0; } else { sb->s_bdi->ra_pages = v9ses->maxdata >> PAGE_SHIFT; sb->s_bdi->io_pages = v9ses->maxdata >> PAGE_SHIFT; } sb->s_flags |= SB_ACTIVE; #ifdef CONFIG_9P_FS_POSIX_ACL if ((v9ses->flags & V9FS_ACL_MASK) == V9FS_POSIX_ACL) sb->s_flags |= SB_POSIXACL; #endif return 0; } /** * v9fs_mount - mount a superblock * @fs_type: file system type * @flags: mount flags * @dev_name: device name that was mounted * @data: mount options * */ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { struct super_block *sb = NULL; struct inode *inode = NULL; struct dentry *root = NULL; struct v9fs_session_info *v9ses = NULL; umode_t mode = 0777 | S_ISVTX; struct p9_fid *fid; int retval = 0; p9_debug(P9_DEBUG_VFS, "\n"); v9ses = kzalloc(sizeof(struct v9fs_session_info), GFP_KERNEL); if (!v9ses) return ERR_PTR(-ENOMEM); fid = v9fs_session_init(v9ses, dev_name, data); if (IS_ERR(fid)) { retval = PTR_ERR(fid); goto free_session; } sb = sget(fs_type, NULL, v9fs_set_super, flags, v9ses); if (IS_ERR(sb)) { retval = PTR_ERR(sb); goto clunk_fid; } retval = v9fs_fill_super(sb, v9ses, flags); if (retval) goto release_sb; if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) sb->s_d_op = &v9fs_cached_dentry_operations; else sb->s_d_op = &v9fs_dentry_operations; inode = v9fs_get_inode(sb, S_IFDIR | mode, 0); if (IS_ERR(inode)) { retval = PTR_ERR(inode); goto release_sb; } root = d_make_root(inode); if (!root) { retval = -ENOMEM; goto release_sb; } sb->s_root = root; if (v9fs_proto_dotl(v9ses)) { struct p9_stat_dotl *st = NULL; st = p9_client_getattr_dotl(fid, P9_STATS_BASIC); if (IS_ERR(st)) { retval = PTR_ERR(st); goto release_sb; } d_inode(root)->i_ino = v9fs_qid2ino(&st->qid); v9fs_stat2inode_dotl(st, d_inode(root), 0); kfree(st); } else { struct p9_wstat *st = NULL; st = p9_client_stat(fid); if (IS_ERR(st)) { retval = PTR_ERR(st); goto release_sb; } d_inode(root)->i_ino = v9fs_qid2ino(&st->qid); v9fs_stat2inode(st, d_inode(root), sb, 0); p9stat_free(st); kfree(st); } retval = v9fs_get_acl(inode, fid); if (retval) goto release_sb; v9fs_fid_add(root, &fid); p9_debug(P9_DEBUG_VFS, " simple set mount, return 0\n"); return dget(sb->s_root); clunk_fid: p9_fid_put(fid); v9fs_session_close(v9ses); free_session: kfree(v9ses); return ERR_PTR(retval); release_sb: /* * we will do the session_close and root dentry release * in the below call. But we need to clunk fid, because we haven't * attached the fid to dentry so it won't get clunked * automatically. */ p9_fid_put(fid); deactivate_locked_super(sb); return ERR_PTR(retval); } /** * v9fs_kill_super - Kill Superblock * @s: superblock * */ static void v9fs_kill_super(struct super_block *s) { struct v9fs_session_info *v9ses = s->s_fs_info; p9_debug(P9_DEBUG_VFS, " %p\n", s); kill_anon_super(s); v9fs_session_cancel(v9ses); v9fs_session_close(v9ses); kfree(v9ses); s->s_fs_info = NULL; p9_debug(P9_DEBUG_VFS, "exiting kill_super\n"); } static void v9fs_umount_begin(struct super_block *sb) { struct v9fs_session_info *v9ses; v9ses = sb->s_fs_info; v9fs_session_begin_cancel(v9ses); } static int v9fs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct v9fs_session_info *v9ses; struct p9_fid *fid; struct p9_rstatfs rs; int res; fid = v9fs_fid_lookup(dentry); if (IS_ERR(fid)) { res = PTR_ERR(fid); goto done; } v9ses = v9fs_dentry2v9ses(dentry); if (v9fs_proto_dotl(v9ses)) { res = p9_client_statfs(fid, &rs); if (res == 0) { buf->f_type = rs.type; buf->f_bsize = rs.bsize; buf->f_blocks = rs.blocks; buf->f_bfree = rs.bfree; buf->f_bavail = rs.bavail; buf->f_files = rs.files; buf->f_ffree = rs.ffree; buf->f_fsid = u64_to_fsid(rs.fsid); buf->f_namelen = rs.namelen; } if (res != -ENOSYS) goto done; } res = simple_statfs(dentry, buf); done: p9_fid_put(fid); return res; } static int v9fs_drop_inode(struct inode *inode) { struct v9fs_session_info *v9ses; v9ses = v9fs_inode2v9ses(inode); if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) return generic_drop_inode(inode); /* * in case of non cached mode always drop the * inode because we want the inode attribute * to always match that on the server. */ return 1; } static int v9fs_write_inode(struct inode *inode, struct writeback_control *wbc) { struct v9fs_inode *v9inode; /* * send an fsync request to server irrespective of * wbc->sync_mode. */ p9_debug(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode); v9inode = V9FS_I(inode); fscache_unpin_writeback(wbc, v9fs_inode_cookie(v9inode)); return 0; } static int v9fs_write_inode_dotl(struct inode *inode, struct writeback_control *wbc) { struct v9fs_inode *v9inode; v9inode = V9FS_I(inode); p9_debug(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode); fscache_unpin_writeback(wbc, v9fs_inode_cookie(v9inode)); return 0; } static const struct super_operations v9fs_super_ops = { .alloc_inode = v9fs_alloc_inode, .free_inode = v9fs_free_inode, .statfs = simple_statfs, .evict_inode = v9fs_evict_inode, .show_options = v9fs_show_options, .umount_begin = v9fs_umount_begin, .write_inode = v9fs_write_inode, }; static const struct super_operations v9fs_super_ops_dotl = { .alloc_inode = v9fs_alloc_inode, .free_inode = v9fs_free_inode, .statfs = v9fs_statfs, .drop_inode = v9fs_drop_inode, .evict_inode = v9fs_evict_inode, .show_options = v9fs_show_options, .umount_begin = v9fs_umount_begin, .write_inode = v9fs_write_inode_dotl, }; struct file_system_type v9fs_fs_type = { .name = "9p", .mount = v9fs_mount, .kill_sb = v9fs_kill_super, .owner = THIS_MODULE, .fs_flags = FS_RENAME_DOES_D_MOVE, }; MODULE_ALIAS_FS("9p");
linux-master
fs/9p/vfs_super.c
// SPDX-License-Identifier: LGPL-2.1 /* * Copyright IBM Corporation, 2010 * Author Aneesh Kumar K.V <[email protected]> */ #include <linux/module.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/uio.h> #include <linux/posix_acl_xattr.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include "fid.h" #include "xattr.h" ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name, void *buffer, size_t buffer_size) { ssize_t retval; u64 attr_size; struct p9_fid *attr_fid; struct kvec kvec = {.iov_base = buffer, .iov_len = buffer_size}; struct iov_iter to; int err; iov_iter_kvec(&to, ITER_DEST, &kvec, 1, buffer_size); attr_fid = p9_client_xattrwalk(fid, name, &attr_size); if (IS_ERR(attr_fid)) { retval = PTR_ERR(attr_fid); p9_debug(P9_DEBUG_VFS, "p9_client_attrwalk failed %zd\n", retval); return retval; } if (attr_size > buffer_size) { if (buffer_size) retval = -ERANGE; else if (attr_size > SSIZE_MAX) retval = -EOVERFLOW; else /* request to get the attr_size */ retval = attr_size; } else { iov_iter_truncate(&to, attr_size); retval = p9_client_read(attr_fid, 0, &to, &err); if (err) retval = err; } p9_fid_put(attr_fid); return retval; } /* * v9fs_xattr_get() * * Copy an extended attribute into the buffer * provided, or compute the buffer size required. * Buffer is NULL to compute the size of the buffer required. * * Returns a negative error number on failure, or the number of bytes * used / required on success. */ ssize_t v9fs_xattr_get(struct dentry *dentry, const char *name, void *buffer, size_t buffer_size) { struct p9_fid *fid; int ret; p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu\n", name, buffer_size); fid = v9fs_fid_lookup(dentry); if (IS_ERR(fid)) return PTR_ERR(fid); ret = v9fs_fid_xattr_get(fid, name, buffer, buffer_size); p9_fid_put(fid); return ret; } /* * v9fs_xattr_set() * * Create, replace or remove an extended attribute for this inode. Buffer * is NULL to remove an existing extended attribute, and non-NULL to * either replace an existing extended attribute, or create a new extended * attribute. The flags XATTR_REPLACE and XATTR_CREATE * specify that an extended attribute must exist and must not exist * previous to the call, respectively. * * Returns 0, or a negative error number on failure. */ int v9fs_xattr_set(struct dentry *dentry, const char *name, const void *value, size_t value_len, int flags) { int ret; struct p9_fid *fid; fid = v9fs_fid_lookup(dentry); if (IS_ERR(fid)) return PTR_ERR(fid); ret = v9fs_fid_xattr_set(fid, name, value, value_len, flags); p9_fid_put(fid); return ret; } int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name, const void *value, size_t value_len, int flags) { struct kvec kvec = {.iov_base = (void *)value, .iov_len = value_len}; struct iov_iter from; int retval, err; iov_iter_kvec(&from, ITER_SOURCE, &kvec, 1, value_len); p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu flags = %d\n", name, value_len, flags); /* Clone it */ fid = clone_fid(fid); if (IS_ERR(fid)) return PTR_ERR(fid); /* * On success fid points to xattr */ retval = p9_client_xattrcreate(fid, name, value_len, flags); if (retval < 0) p9_debug(P9_DEBUG_VFS, "p9_client_xattrcreate failed %d\n", retval); else p9_client_write(fid, 0, &from, &retval); err = p9_fid_put(fid); if (!retval && err) retval = err; return retval; } ssize_t v9fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) { return v9fs_xattr_get(dentry, NULL, buffer, buffer_size); } static int v9fs_xattr_handler_get(const struct xattr_handler *handler, struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size) { const char *full_name = xattr_full_name(handler, name); return v9fs_xattr_get(dentry, full_name, buffer, size); } static int v9fs_xattr_handler_set(const struct xattr_handler *handler, struct mnt_idmap *idmap, struct dentry *dentry, struct inode *inode, const char *name, const void *value, size_t size, int flags) { const char *full_name = xattr_full_name(handler, name); return v9fs_xattr_set(dentry, full_name, value, size, flags); } static struct xattr_handler v9fs_xattr_user_handler = { .prefix = XATTR_USER_PREFIX, .get = v9fs_xattr_handler_get, .set = v9fs_xattr_handler_set, }; static struct xattr_handler v9fs_xattr_trusted_handler = { .prefix = XATTR_TRUSTED_PREFIX, .get = v9fs_xattr_handler_get, .set = v9fs_xattr_handler_set, }; #ifdef CONFIG_9P_FS_SECURITY static struct xattr_handler v9fs_xattr_security_handler = { .prefix = XATTR_SECURITY_PREFIX, .get = v9fs_xattr_handler_get, .set = v9fs_xattr_handler_set, }; #endif const struct xattr_handler *v9fs_xattr_handlers[] = { &v9fs_xattr_user_handler, &v9fs_xattr_trusted_handler, #ifdef CONFIG_9P_FS_SECURITY &v9fs_xattr_security_handler, #endif NULL };
linux-master
fs/9p/xattr.c
// SPDX-License-Identifier: GPL-2.0-only /* * This file contains functions assisting in mapping VFS to 9P2000 * * Copyright (C) 2004-2008 by Eric Van Hensbergen <[email protected]> * Copyright (C) 2002 by Ron Minnich <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/cred.h> #include <linux/parser.h> #include <linux/slab.h> #include <linux/seq_file.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include <net/9p/transport.h> #include "v9fs.h" #include "v9fs_vfs.h" #include "cache.h" static DEFINE_SPINLOCK(v9fs_sessionlist_lock); static LIST_HEAD(v9fs_sessionlist); struct kmem_cache *v9fs_inode_cache; /* * Option Parsing (code inspired by NFS code) * NOTE: each transport will parse its own options */ enum { /* Options that take integer arguments */ Opt_debug, Opt_dfltuid, Opt_dfltgid, Opt_afid, /* String options */ Opt_uname, Opt_remotename, Opt_cache, Opt_cachetag, /* Options that take no arguments */ Opt_nodevmap, Opt_noxattr, Opt_directio, Opt_ignoreqv, /* Access options */ Opt_access, Opt_posixacl, /* Lock timeout option */ Opt_locktimeout, /* Error token */ Opt_err }; static const match_table_t tokens = { {Opt_debug, "debug=%x"}, {Opt_dfltuid, "dfltuid=%u"}, {Opt_dfltgid, "dfltgid=%u"}, {Opt_afid, "afid=%u"}, {Opt_uname, "uname=%s"}, {Opt_remotename, "aname=%s"}, {Opt_nodevmap, "nodevmap"}, {Opt_noxattr, "noxattr"}, {Opt_directio, "directio"}, {Opt_ignoreqv, "ignoreqv"}, {Opt_cache, "cache=%s"}, {Opt_cachetag, "cachetag=%s"}, {Opt_access, "access=%s"}, {Opt_posixacl, "posixacl"}, {Opt_locktimeout, "locktimeout=%u"}, {Opt_err, NULL} }; /* Interpret mount options for cache mode */ static int get_cache_mode(char *s) { int version = -EINVAL; if (!strcmp(s, "loose")) { version = CACHE_SC_LOOSE; p9_debug(P9_DEBUG_9P, "Cache mode: loose\n"); } else if (!strcmp(s, "fscache")) { version = CACHE_SC_FSCACHE; p9_debug(P9_DEBUG_9P, "Cache mode: fscache\n"); } else if (!strcmp(s, "mmap")) { version = CACHE_SC_MMAP; p9_debug(P9_DEBUG_9P, "Cache mode: mmap\n"); } else if (!strcmp(s, "readahead")) { version = CACHE_SC_READAHEAD; p9_debug(P9_DEBUG_9P, "Cache mode: readahead\n"); } else if (!strcmp(s, "none")) { version = CACHE_SC_NONE; p9_debug(P9_DEBUG_9P, "Cache mode: none\n"); } else if (kstrtoint(s, 0, &version) != 0) { version = -EINVAL; pr_info("Unknown Cache mode or invalid value %s\n", s); } return version; } /* * Display the mount options in /proc/mounts. */ int v9fs_show_options(struct seq_file *m, struct dentry *root) { struct v9fs_session_info *v9ses = root->d_sb->s_fs_info; if (v9ses->debug) seq_printf(m, ",debug=%x", v9ses->debug); if (!uid_eq(v9ses->dfltuid, V9FS_DEFUID)) seq_printf(m, ",dfltuid=%u", from_kuid_munged(&init_user_ns, v9ses->dfltuid)); if (!gid_eq(v9ses->dfltgid, V9FS_DEFGID)) seq_printf(m, ",dfltgid=%u", from_kgid_munged(&init_user_ns, v9ses->dfltgid)); if (v9ses->afid != ~0) seq_printf(m, ",afid=%u", v9ses->afid); if (strcmp(v9ses->uname, V9FS_DEFUSER) != 0) seq_printf(m, ",uname=%s", v9ses->uname); if (strcmp(v9ses->aname, V9FS_DEFANAME) != 0) seq_printf(m, ",aname=%s", v9ses->aname); if (v9ses->nodev) seq_puts(m, ",nodevmap"); if (v9ses->cache) seq_printf(m, ",cache=%x", v9ses->cache); #ifdef CONFIG_9P_FSCACHE if (v9ses->cachetag && (v9ses->cache & CACHE_FSCACHE)) seq_printf(m, ",cachetag=%s", v9ses->cachetag); #endif switch (v9ses->flags & V9FS_ACCESS_MASK) { case V9FS_ACCESS_USER: seq_puts(m, ",access=user"); break; case V9FS_ACCESS_ANY: seq_puts(m, ",access=any"); break; case V9FS_ACCESS_CLIENT: seq_puts(m, ",access=client"); break; case V9FS_ACCESS_SINGLE: seq_printf(m, ",access=%u", from_kuid_munged(&init_user_ns, v9ses->uid)); break; } if (v9ses->flags & V9FS_IGNORE_QV) seq_puts(m, ",ignoreqv"); if (v9ses->flags & V9FS_DIRECT_IO) seq_puts(m, ",directio"); if (v9ses->flags & V9FS_POSIX_ACL) seq_puts(m, ",posixacl"); if (v9ses->flags & V9FS_NO_XATTR) seq_puts(m, ",noxattr"); return p9_show_client_options(m, v9ses->clnt); } /** * v9fs_parse_options - parse mount options into session structure * @v9ses: existing v9fs session information * @opts: The mount option string * * Return 0 upon success, -ERRNO upon failure. */ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) { char *options, *tmp_options; substring_t args[MAX_OPT_ARGS]; char *p; int option = 0; char *s; int ret = 0; /* setup defaults */ v9ses->afid = ~0; v9ses->debug = 0; v9ses->cache = CACHE_NONE; #ifdef CONFIG_9P_FSCACHE v9ses->cachetag = NULL; #endif v9ses->session_lock_timeout = P9_LOCK_TIMEOUT; if (!opts) return 0; tmp_options = kstrdup(opts, GFP_KERNEL); if (!tmp_options) { ret = -ENOMEM; goto fail_option_alloc; } options = tmp_options; while ((p = strsep(&options, ",")) != NULL) { int token, r; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_debug: r = match_int(&args[0], &option); if (r < 0) { p9_debug(P9_DEBUG_ERROR, "integer field, but no integer?\n"); ret = r; } else { v9ses->debug = option; #ifdef CONFIG_NET_9P_DEBUG p9_debug_level = option; #endif } break; case Opt_dfltuid: r = match_int(&args[0], &option); if (r < 0) { p9_debug(P9_DEBUG_ERROR, "integer field, but no integer?\n"); ret = r; continue; } v9ses->dfltuid = make_kuid(current_user_ns(), option); if (!uid_valid(v9ses->dfltuid)) { p9_debug(P9_DEBUG_ERROR, "uid field, but not a uid?\n"); ret = -EINVAL; } break; case Opt_dfltgid: r = match_int(&args[0], &option); if (r < 0) { p9_debug(P9_DEBUG_ERROR, "integer field, but no integer?\n"); ret = r; continue; } v9ses->dfltgid = make_kgid(current_user_ns(), option); if (!gid_valid(v9ses->dfltgid)) { p9_debug(P9_DEBUG_ERROR, "gid field, but not a gid?\n"); ret = -EINVAL; } break; case Opt_afid: r = match_int(&args[0], &option); if (r < 0) { p9_debug(P9_DEBUG_ERROR, "integer field, but no integer?\n"); ret = r; } else { v9ses->afid = option; } break; case Opt_uname: kfree(v9ses->uname); v9ses->uname = match_strdup(&args[0]); if (!v9ses->uname) { ret = -ENOMEM; goto free_and_return; } break; case Opt_remotename: kfree(v9ses->aname); v9ses->aname = match_strdup(&args[0]); if (!v9ses->aname) { ret = -ENOMEM; goto free_and_return; } break; case Opt_nodevmap: v9ses->nodev = 1; break; case Opt_noxattr: v9ses->flags |= V9FS_NO_XATTR; break; case Opt_directio: v9ses->flags |= V9FS_DIRECT_IO; break; case Opt_ignoreqv: v9ses->flags |= V9FS_IGNORE_QV; break; case Opt_cachetag: #ifdef CONFIG_9P_FSCACHE kfree(v9ses->cachetag); v9ses->cachetag = match_strdup(&args[0]); if (!v9ses->cachetag) { ret = -ENOMEM; goto free_and_return; } #endif break; case Opt_cache: s = match_strdup(&args[0]); if (!s) { ret = -ENOMEM; p9_debug(P9_DEBUG_ERROR, "problem allocating copy of cache arg\n"); goto free_and_return; } r = get_cache_mode(s); if (r < 0) ret = r; else v9ses->cache = r; kfree(s); break; case Opt_access: s = match_strdup(&args[0]); if (!s) { ret = -ENOMEM; p9_debug(P9_DEBUG_ERROR, "problem allocating copy of access arg\n"); goto free_and_return; } v9ses->flags &= ~V9FS_ACCESS_MASK; if (strcmp(s, "user") == 0) v9ses->flags |= V9FS_ACCESS_USER; else if (strcmp(s, "any") == 0) v9ses->flags |= V9FS_ACCESS_ANY; else if (strcmp(s, "client") == 0) { v9ses->flags |= V9FS_ACCESS_CLIENT; } else { uid_t uid; v9ses->flags |= V9FS_ACCESS_SINGLE; r = kstrtouint(s, 10, &uid); if (r) { ret = r; pr_info("Unknown access argument %s: %d\n", s, r); kfree(s); continue; } v9ses->uid = make_kuid(current_user_ns(), uid); if (!uid_valid(v9ses->uid)) { ret = -EINVAL; pr_info("Unknown uid %s\n", s); } } kfree(s); break; case Opt_posixacl: #ifdef CONFIG_9P_FS_POSIX_ACL v9ses->flags |= V9FS_POSIX_ACL; #else p9_debug(P9_DEBUG_ERROR, "Not defined CONFIG_9P_FS_POSIX_ACL. Ignoring posixacl option\n"); #endif break; case Opt_locktimeout: r = match_int(&args[0], &option); if (r < 0) { p9_debug(P9_DEBUG_ERROR, "integer field, but no integer?\n"); ret = r; continue; } if (option < 1) { p9_debug(P9_DEBUG_ERROR, "locktimeout must be a greater than zero integer.\n"); ret = -EINVAL; continue; } v9ses->session_lock_timeout = (long)option * HZ; break; default: continue; } } free_and_return: kfree(tmp_options); fail_option_alloc: return ret; } /** * v9fs_session_init - initialize session * @v9ses: session information structure * @dev_name: device being mounted * @data: options * */ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses, const char *dev_name, char *data) { struct p9_fid *fid; int rc = -ENOMEM; v9ses->uname = kstrdup(V9FS_DEFUSER, GFP_KERNEL); if (!v9ses->uname) goto err_names; v9ses->aname = kstrdup(V9FS_DEFANAME, GFP_KERNEL); if (!v9ses->aname) goto err_names; init_rwsem(&v9ses->rename_sem); v9ses->uid = INVALID_UID; v9ses->dfltuid = V9FS_DEFUID; v9ses->dfltgid = V9FS_DEFGID; v9ses->clnt = p9_client_create(dev_name, data); if (IS_ERR(v9ses->clnt)) { rc = PTR_ERR(v9ses->clnt); p9_debug(P9_DEBUG_ERROR, "problem initializing 9p client\n"); goto err_names; } v9ses->flags = V9FS_ACCESS_USER; if (p9_is_proto_dotl(v9ses->clnt)) { v9ses->flags = V9FS_ACCESS_CLIENT; v9ses->flags |= V9FS_PROTO_2000L; } else if (p9_is_proto_dotu(v9ses->clnt)) { v9ses->flags |= V9FS_PROTO_2000U; } rc = v9fs_parse_options(v9ses, data); if (rc < 0) goto err_clnt; v9ses->maxdata = v9ses->clnt->msize - P9_IOHDRSZ; if (!v9fs_proto_dotl(v9ses) && ((v9ses->flags & V9FS_ACCESS_MASK) == V9FS_ACCESS_CLIENT)) { /* * We support ACCESS_CLIENT only for dotl. * Fall back to ACCESS_USER */ v9ses->flags &= ~V9FS_ACCESS_MASK; v9ses->flags |= V9FS_ACCESS_USER; } /*FIXME !! */ /* for legacy mode, fall back to V9FS_ACCESS_ANY */ if (!(v9fs_proto_dotu(v9ses) || v9fs_proto_dotl(v9ses)) && ((v9ses->flags&V9FS_ACCESS_MASK) == V9FS_ACCESS_USER)) { v9ses->flags &= ~V9FS_ACCESS_MASK; v9ses->flags |= V9FS_ACCESS_ANY; v9ses->uid = INVALID_UID; } if (!v9fs_proto_dotl(v9ses) || !((v9ses->flags & V9FS_ACCESS_MASK) == V9FS_ACCESS_CLIENT)) { /* * We support ACL checks on clinet only if the protocol is * 9P2000.L and access is V9FS_ACCESS_CLIENT. */ v9ses->flags &= ~V9FS_ACL_MASK; } fid = p9_client_attach(v9ses->clnt, NULL, v9ses->uname, INVALID_UID, v9ses->aname); if (IS_ERR(fid)) { rc = PTR_ERR(fid); p9_debug(P9_DEBUG_ERROR, "cannot attach\n"); goto err_clnt; } if ((v9ses->flags & V9FS_ACCESS_MASK) == V9FS_ACCESS_SINGLE) fid->uid = v9ses->uid; else fid->uid = INVALID_UID; #ifdef CONFIG_9P_FSCACHE /* register the session for caching */ if (v9ses->cache & CACHE_FSCACHE) { rc = v9fs_cache_session_get_cookie(v9ses, dev_name); if (rc < 0) goto err_clnt; } #endif spin_lock(&v9fs_sessionlist_lock); list_add(&v9ses->slist, &v9fs_sessionlist); spin_unlock(&v9fs_sessionlist_lock); return fid; err_clnt: #ifdef CONFIG_9P_FSCACHE kfree(v9ses->cachetag); #endif p9_client_destroy(v9ses->clnt); err_names: kfree(v9ses->uname); kfree(v9ses->aname); return ERR_PTR(rc); } /** * v9fs_session_close - shutdown a session * @v9ses: session information structure * */ void v9fs_session_close(struct v9fs_session_info *v9ses) { if (v9ses->clnt) { p9_client_destroy(v9ses->clnt); v9ses->clnt = NULL; } #ifdef CONFIG_9P_FSCACHE fscache_relinquish_volume(v9fs_session_cache(v9ses), NULL, false); kfree(v9ses->cachetag); #endif kfree(v9ses->uname); kfree(v9ses->aname); spin_lock(&v9fs_sessionlist_lock); list_del(&v9ses->slist); spin_unlock(&v9fs_sessionlist_lock); } /** * v9fs_session_cancel - terminate a session * @v9ses: session to terminate * * mark transport as disconnected and cancel all pending requests. */ void v9fs_session_cancel(struct v9fs_session_info *v9ses) { p9_debug(P9_DEBUG_ERROR, "cancel session %p\n", v9ses); p9_client_disconnect(v9ses->clnt); } /** * v9fs_session_begin_cancel - Begin terminate of a session * @v9ses: session to terminate * * After this call we don't allow any request other than clunk. */ void v9fs_session_begin_cancel(struct v9fs_session_info *v9ses) { p9_debug(P9_DEBUG_ERROR, "begin cancel session %p\n", v9ses); p9_client_begin_disconnect(v9ses->clnt); } static struct kobject *v9fs_kobj; #ifdef CONFIG_9P_FSCACHE /* * List caches associated with a session */ static ssize_t caches_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { ssize_t n = 0, count = 0, limit = PAGE_SIZE; struct v9fs_session_info *v9ses; spin_lock(&v9fs_sessionlist_lock); list_for_each_entry(v9ses, &v9fs_sessionlist, slist) { if (v9ses->cachetag) { n = snprintf(buf, limit, "%s\n", v9ses->cachetag); if (n < 0) { count = n; break; } count += n; limit -= n; } } spin_unlock(&v9fs_sessionlist_lock); return count; } static struct kobj_attribute v9fs_attr_cache = __ATTR_RO(caches); #endif /* CONFIG_9P_FSCACHE */ static struct attribute *v9fs_attrs[] = { #ifdef CONFIG_9P_FSCACHE &v9fs_attr_cache.attr, #endif NULL, }; static const struct attribute_group v9fs_attr_group = { .attrs = v9fs_attrs, }; /** * v9fs_sysfs_init - Initialize the v9fs sysfs interface * */ static int __init v9fs_sysfs_init(void) { v9fs_kobj = kobject_create_and_add("9p", fs_kobj); if (!v9fs_kobj) return -ENOMEM; if (sysfs_create_group(v9fs_kobj, &v9fs_attr_group)) { kobject_put(v9fs_kobj); return -ENOMEM; } return 0; } /** * v9fs_sysfs_cleanup - Unregister the v9fs sysfs interface * */ static void v9fs_sysfs_cleanup(void) { sysfs_remove_group(v9fs_kobj, &v9fs_attr_group); kobject_put(v9fs_kobj); } static void v9fs_inode_init_once(void *foo) { struct v9fs_inode *v9inode = (struct v9fs_inode *)foo; memset(&v9inode->qid, 0, sizeof(v9inode->qid)); inode_init_once(&v9inode->netfs.inode); } /** * v9fs_init_inode_cache - initialize a cache for 9P * Returns 0 on success. */ static int v9fs_init_inode_cache(void) { v9fs_inode_cache = kmem_cache_create("v9fs_inode_cache", sizeof(struct v9fs_inode), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD|SLAB_ACCOUNT), v9fs_inode_init_once); if (!v9fs_inode_cache) return -ENOMEM; return 0; } /** * v9fs_destroy_inode_cache - destroy the cache of 9P inode * */ static void v9fs_destroy_inode_cache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(v9fs_inode_cache); } static int v9fs_cache_register(void) { int ret; ret = v9fs_init_inode_cache(); if (ret < 0) return ret; return ret; } static void v9fs_cache_unregister(void) { v9fs_destroy_inode_cache(); } /** * init_v9fs - Initialize module * */ static int __init init_v9fs(void) { int err; pr_info("Installing v9fs 9p2000 file system support\n"); /* TODO: Setup list of registered trasnport modules */ err = v9fs_cache_register(); if (err < 0) { pr_err("Failed to register v9fs for caching\n"); return err; } err = v9fs_sysfs_init(); if (err < 0) { pr_err("Failed to register with sysfs\n"); goto out_cache; } err = register_filesystem(&v9fs_fs_type); if (err < 0) { pr_err("Failed to register filesystem\n"); goto out_sysfs_cleanup; } return 0; out_sysfs_cleanup: v9fs_sysfs_cleanup(); out_cache: v9fs_cache_unregister(); return err; } /** * exit_v9fs - shutdown module * */ static void __exit exit_v9fs(void) { v9fs_sysfs_cleanup(); v9fs_cache_unregister(); unregister_filesystem(&v9fs_fs_type); } module_init(init_v9fs) module_exit(exit_v9fs) MODULE_AUTHOR("Latchesar Ionkov <[email protected]>"); MODULE_AUTHOR("Eric Van Hensbergen <[email protected]>"); MODULE_AUTHOR("Ron Minnich <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
fs/9p/v9fs.c
// SPDX-License-Identifier: GPL-2.0-only /* * This file contains vfs inode ops for the 9P2000.L protocol. * * Copyright (C) 2004 by Eric Van Hensbergen <[email protected]> * Copyright (C) 2002 by Ron Minnich <[email protected]> */ #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/pagemap.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/namei.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/xattr.h> #include <linux/posix_acl.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include "v9fs.h" #include "v9fs_vfs.h" #include "fid.h" #include "cache.h" #include "xattr.h" #include "acl.h" static int v9fs_vfs_mknod_dotl(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t omode, dev_t rdev); /** * v9fs_get_fsgid_for_create - Helper function to get the gid for a new object * @dir_inode: The directory inode * * Helper function to get the gid for creating a * new file system object. This checks the S_ISGID to determine the owning * group of the new file system object. */ static kgid_t v9fs_get_fsgid_for_create(struct inode *dir_inode) { BUG_ON(dir_inode == NULL); if (dir_inode->i_mode & S_ISGID) { /* set_gid bit is set.*/ return dir_inode->i_gid; } return current_fsgid(); } static int v9fs_test_inode_dotl(struct inode *inode, void *data) { struct v9fs_inode *v9inode = V9FS_I(inode); struct p9_stat_dotl *st = (struct p9_stat_dotl *)data; /* don't match inode of different type */ if (inode_wrong_type(inode, st->st_mode)) return 0; if (inode->i_generation != st->st_gen) return 0; /* compare qid details */ if (memcmp(&v9inode->qid.version, &st->qid.version, sizeof(v9inode->qid.version))) return 0; if (v9inode->qid.type != st->qid.type) return 0; if (v9inode->qid.path != st->qid.path) return 0; return 1; } /* Always get a new inode */ static int v9fs_test_new_inode_dotl(struct inode *inode, void *data) { return 0; } static int v9fs_set_inode_dotl(struct inode *inode, void *data) { struct v9fs_inode *v9inode = V9FS_I(inode); struct p9_stat_dotl *st = (struct p9_stat_dotl *)data; memcpy(&v9inode->qid, &st->qid, sizeof(st->qid)); inode->i_generation = st->st_gen; return 0; } static struct inode *v9fs_qid_iget_dotl(struct super_block *sb, struct p9_qid *qid, struct p9_fid *fid, struct p9_stat_dotl *st, int new) { int retval; unsigned long i_ino; struct inode *inode; struct v9fs_session_info *v9ses = sb->s_fs_info; int (*test)(struct inode *inode, void *data); if (new) test = v9fs_test_new_inode_dotl; else test = v9fs_test_inode_dotl; i_ino = v9fs_qid2ino(qid); inode = iget5_locked(sb, i_ino, test, v9fs_set_inode_dotl, st); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; /* * initialize the inode with the stat info * FIXME!! we may need support for stale inodes * later. */ inode->i_ino = i_ino; retval = v9fs_init_inode(v9ses, inode, st->st_mode, new_decode_dev(st->st_rdev)); if (retval) goto error; v9fs_stat2inode_dotl(st, inode, 0); v9fs_cache_inode_get_cookie(inode); retval = v9fs_get_acl(inode, fid); if (retval) goto error; unlock_new_inode(inode); return inode; error: iget_failed(inode); return ERR_PTR(retval); } struct inode * v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid, struct super_block *sb, int new) { struct p9_stat_dotl *st; struct inode *inode = NULL; st = p9_client_getattr_dotl(fid, P9_STATS_BASIC | P9_STATS_GEN); if (IS_ERR(st)) return ERR_CAST(st); inode = v9fs_qid_iget_dotl(sb, &st->qid, fid, st, new); kfree(st); return inode; } struct dotl_openflag_map { int open_flag; int dotl_flag; }; static int v9fs_mapped_dotl_flags(int flags) { int i; int rflags = 0; struct dotl_openflag_map dotl_oflag_map[] = { { O_CREAT, P9_DOTL_CREATE }, { O_EXCL, P9_DOTL_EXCL }, { O_NOCTTY, P9_DOTL_NOCTTY }, { O_APPEND, P9_DOTL_APPEND }, { O_NONBLOCK, P9_DOTL_NONBLOCK }, { O_DSYNC, P9_DOTL_DSYNC }, { FASYNC, P9_DOTL_FASYNC }, { O_DIRECT, P9_DOTL_DIRECT }, { O_LARGEFILE, P9_DOTL_LARGEFILE }, { O_DIRECTORY, P9_DOTL_DIRECTORY }, { O_NOFOLLOW, P9_DOTL_NOFOLLOW }, { O_NOATIME, P9_DOTL_NOATIME }, { O_CLOEXEC, P9_DOTL_CLOEXEC }, { O_SYNC, P9_DOTL_SYNC}, }; for (i = 0; i < ARRAY_SIZE(dotl_oflag_map); i++) { if (flags & dotl_oflag_map[i].open_flag) rflags |= dotl_oflag_map[i].dotl_flag; } return rflags; } /** * v9fs_open_to_dotl_flags- convert Linux specific open flags to * plan 9 open flag. * @flags: flags to convert */ int v9fs_open_to_dotl_flags(int flags) { int rflags = 0; /* * We have same bits for P9_DOTL_READONLY, P9_DOTL_WRONLY * and P9_DOTL_NOACCESS */ rflags |= flags & O_ACCMODE; rflags |= v9fs_mapped_dotl_flags(flags); return rflags; } /** * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol. * @idmap: The user namespace of the mount * @dir: directory inode that is being created * @dentry: dentry that is being deleted * @omode: create permissions * @excl: True if the file must not yet exist * */ static int v9fs_vfs_create_dotl(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t omode, bool excl) { return v9fs_vfs_mknod_dotl(idmap, dir, dentry, omode, 0); } static int v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry, struct file *file, unsigned int flags, umode_t omode) { int err = 0; kgid_t gid; umode_t mode; int p9_omode = v9fs_open_to_dotl_flags(flags); const unsigned char *name = NULL; struct p9_qid qid; struct inode *inode; struct p9_fid *fid = NULL; struct p9_fid *dfid = NULL, *ofid = NULL; struct v9fs_session_info *v9ses; struct posix_acl *pacl = NULL, *dacl = NULL; struct dentry *res = NULL; if (d_in_lookup(dentry)) { res = v9fs_vfs_lookup(dir, dentry, 0); if (IS_ERR(res)) return PTR_ERR(res); if (res) dentry = res; } /* Only creates */ if (!(flags & O_CREAT) || d_really_is_positive(dentry)) return finish_no_open(file, res); v9ses = v9fs_inode2v9ses(dir); name = dentry->d_name.name; p9_debug(P9_DEBUG_VFS, "name:%s flags:0x%x mode:0x%x\n", name, flags, omode); dfid = v9fs_parent_fid(dentry); if (IS_ERR(dfid)) { err = PTR_ERR(dfid); p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err); goto out; } /* clone a fid to use for creation */ ofid = clone_fid(dfid); if (IS_ERR(ofid)) { err = PTR_ERR(ofid); p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err); goto out; } gid = v9fs_get_fsgid_for_create(dir); mode = omode; /* Update mode based on ACL value */ err = v9fs_acl_mode(dir, &mode, &dacl, &pacl); if (err) { p9_debug(P9_DEBUG_VFS, "Failed to get acl values in create %d\n", err); goto out; } if ((v9ses->cache & CACHE_WRITEBACK) && (p9_omode & P9_OWRITE)) { p9_omode = (p9_omode & ~P9_OWRITE) | P9_ORDWR; p9_debug(P9_DEBUG_CACHE, "write-only file with writeback enabled, creating w/ O_RDWR\n"); } err = p9_client_create_dotl(ofid, name, p9_omode, mode, gid, &qid); if (err < 0) { p9_debug(P9_DEBUG_VFS, "p9_client_open_dotl failed in create %d\n", err); goto out; } v9fs_invalidate_inode_attr(dir); /* instantiate inode and assign the unopened fid to the dentry */ fid = p9_client_walk(dfid, 1, &name, 1); if (IS_ERR(fid)) { err = PTR_ERR(fid); p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err); goto out; } inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb); if (IS_ERR(inode)) { err = PTR_ERR(inode); p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n", err); goto out; } /* Now set the ACL based on the default value */ v9fs_set_create_acl(inode, fid, dacl, pacl); v9fs_fid_add(dentry, &fid); d_instantiate(dentry, inode); /* Since we are opening a file, assign the open fid to the file */ err = finish_open(file, dentry, generic_file_open); if (err) goto out; file->private_data = ofid; #ifdef CONFIG_9P_FSCACHE if (v9ses->cache & CACHE_FSCACHE) { struct v9fs_inode *v9inode = V9FS_I(inode); fscache_use_cookie(v9fs_inode_cookie(v9inode), file->f_mode & FMODE_WRITE); } #endif v9fs_fid_add_modes(ofid, v9ses->flags, v9ses->cache, flags); v9fs_open_fid_add(inode, &ofid); file->f_mode |= FMODE_CREATED; out: p9_fid_put(dfid); p9_fid_put(ofid); p9_fid_put(fid); v9fs_put_acl(dacl, pacl); dput(res); return err; } /** * v9fs_vfs_mkdir_dotl - VFS mkdir hook to create a directory * @idmap: The idmap of the mount * @dir: inode that is being unlinked * @dentry: dentry that is being unlinked * @omode: mode for new directory * */ static int v9fs_vfs_mkdir_dotl(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t omode) { int err; struct v9fs_session_info *v9ses; struct p9_fid *fid = NULL, *dfid = NULL; kgid_t gid; const unsigned char *name; umode_t mode; struct inode *inode; struct p9_qid qid; struct posix_acl *dacl = NULL, *pacl = NULL; p9_debug(P9_DEBUG_VFS, "name %pd\n", dentry); v9ses = v9fs_inode2v9ses(dir); omode |= S_IFDIR; if (dir->i_mode & S_ISGID) omode |= S_ISGID; dfid = v9fs_parent_fid(dentry); if (IS_ERR(dfid)) { err = PTR_ERR(dfid); p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err); goto error; } gid = v9fs_get_fsgid_for_create(dir); mode = omode; /* Update mode based on ACL value */ err = v9fs_acl_mode(dir, &mode, &dacl, &pacl); if (err) { p9_debug(P9_DEBUG_VFS, "Failed to get acl values in mkdir %d\n", err); goto error; } name = dentry->d_name.name; err = p9_client_mkdir_dotl(dfid, name, mode, gid, &qid); if (err < 0) goto error; fid = p9_client_walk(dfid, 1, &name, 1); if (IS_ERR(fid)) { err = PTR_ERR(fid); p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err); goto error; } /* instantiate inode and assign the unopened fid to the dentry */ if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) { inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb); if (IS_ERR(inode)) { err = PTR_ERR(inode); p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n", err); goto error; } v9fs_fid_add(dentry, &fid); v9fs_set_create_acl(inode, fid, dacl, pacl); d_instantiate(dentry, inode); err = 0; } else { /* * Not in cached mode. No need to populate * inode with stat. We need to get an inode * so that we can set the acl with dentry */ inode = v9fs_get_inode(dir->i_sb, mode, 0); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto error; } v9fs_set_create_acl(inode, fid, dacl, pacl); d_instantiate(dentry, inode); } inc_nlink(dir); v9fs_invalidate_inode_attr(dir); error: p9_fid_put(fid); v9fs_put_acl(dacl, pacl); p9_fid_put(dfid); return err; } static int v9fs_vfs_getattr_dotl(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int flags) { struct dentry *dentry = path->dentry; struct v9fs_session_info *v9ses; struct p9_fid *fid; struct inode *inode = d_inode(dentry); struct p9_stat_dotl *st; p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry); v9ses = v9fs_dentry2v9ses(dentry); if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) { generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat); return 0; } else if (v9ses->cache) { if (S_ISREG(inode->i_mode)) { int retval = filemap_fdatawrite(inode->i_mapping); if (retval) p9_debug(P9_DEBUG_ERROR, "flushing writeback during getattr returned %d\n", retval); } } fid = v9fs_fid_lookup(dentry); if (IS_ERR(fid)) return PTR_ERR(fid); /* Ask for all the fields in stat structure. Server will return * whatever it supports */ st = p9_client_getattr_dotl(fid, P9_STATS_ALL); p9_fid_put(fid); if (IS_ERR(st)) return PTR_ERR(st); v9fs_stat2inode_dotl(st, d_inode(dentry), 0); generic_fillattr(&nop_mnt_idmap, request_mask, d_inode(dentry), stat); /* Change block size to what the server returned */ stat->blksize = st->st_blksize; kfree(st); return 0; } /* * Attribute flags. */ #define P9_ATTR_MODE (1 << 0) #define P9_ATTR_UID (1 << 1) #define P9_ATTR_GID (1 << 2) #define P9_ATTR_SIZE (1 << 3) #define P9_ATTR_ATIME (1 << 4) #define P9_ATTR_MTIME (1 << 5) #define P9_ATTR_CTIME (1 << 6) #define P9_ATTR_ATIME_SET (1 << 7) #define P9_ATTR_MTIME_SET (1 << 8) struct dotl_iattr_map { int iattr_valid; int p9_iattr_valid; }; static int v9fs_mapped_iattr_valid(int iattr_valid) { int i; int p9_iattr_valid = 0; struct dotl_iattr_map dotl_iattr_map[] = { { ATTR_MODE, P9_ATTR_MODE }, { ATTR_UID, P9_ATTR_UID }, { ATTR_GID, P9_ATTR_GID }, { ATTR_SIZE, P9_ATTR_SIZE }, { ATTR_ATIME, P9_ATTR_ATIME }, { ATTR_MTIME, P9_ATTR_MTIME }, { ATTR_CTIME, P9_ATTR_CTIME }, { ATTR_ATIME_SET, P9_ATTR_ATIME_SET }, { ATTR_MTIME_SET, P9_ATTR_MTIME_SET }, }; for (i = 0; i < ARRAY_SIZE(dotl_iattr_map); i++) { if (iattr_valid & dotl_iattr_map[i].iattr_valid) p9_iattr_valid |= dotl_iattr_map[i].p9_iattr_valid; } return p9_iattr_valid; } /** * v9fs_vfs_setattr_dotl - set file metadata * @idmap: idmap of the mount * @dentry: file whose metadata to set * @iattr: metadata assignment structure * */ int v9fs_vfs_setattr_dotl(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *iattr) { int retval, use_dentry = 0; struct inode *inode = d_inode(dentry); struct v9fs_session_info __maybe_unused *v9ses; struct p9_fid *fid = NULL; struct p9_iattr_dotl p9attr = { .uid = INVALID_UID, .gid = INVALID_GID, }; p9_debug(P9_DEBUG_VFS, "\n"); retval = setattr_prepare(&nop_mnt_idmap, dentry, iattr); if (retval) return retval; v9ses = v9fs_dentry2v9ses(dentry); p9attr.valid = v9fs_mapped_iattr_valid(iattr->ia_valid); if (iattr->ia_valid & ATTR_MODE) p9attr.mode = iattr->ia_mode; if (iattr->ia_valid & ATTR_UID) p9attr.uid = iattr->ia_uid; if (iattr->ia_valid & ATTR_GID) p9attr.gid = iattr->ia_gid; if (iattr->ia_valid & ATTR_SIZE) p9attr.size = iattr->ia_size; if (iattr->ia_valid & ATTR_ATIME_SET) { p9attr.atime_sec = iattr->ia_atime.tv_sec; p9attr.atime_nsec = iattr->ia_atime.tv_nsec; } if (iattr->ia_valid & ATTR_MTIME_SET) { p9attr.mtime_sec = iattr->ia_mtime.tv_sec; p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec; } if (iattr->ia_valid & ATTR_FILE) { fid = iattr->ia_file->private_data; WARN_ON(!fid); } if (!fid) { fid = v9fs_fid_lookup(dentry); use_dentry = 1; } if (IS_ERR(fid)) return PTR_ERR(fid); /* Write all dirty data */ if (S_ISREG(inode->i_mode)) { retval = filemap_fdatawrite(inode->i_mapping); if (retval < 0) p9_debug(P9_DEBUG_ERROR, "Flushing file prior to setattr failed: %d\n", retval); } retval = p9_client_setattr(fid, &p9attr); if (retval < 0) { if (use_dentry) p9_fid_put(fid); return retval; } if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size != i_size_read(inode)) { truncate_setsize(inode, iattr->ia_size); truncate_pagecache(inode, iattr->ia_size); #ifdef CONFIG_9P_FSCACHE if (v9ses->cache & CACHE_FSCACHE) fscache_resize_cookie(v9fs_inode_cookie(V9FS_I(inode)), iattr->ia_size); #endif } v9fs_invalidate_inode_attr(inode); setattr_copy(&nop_mnt_idmap, inode, iattr); mark_inode_dirty(inode); if (iattr->ia_valid & ATTR_MODE) { /* We also want to update ACL when we update mode bits */ retval = v9fs_acl_chmod(inode, fid); if (retval < 0) { if (use_dentry) p9_fid_put(fid); return retval; } } if (use_dentry) p9_fid_put(fid); return 0; } /** * v9fs_stat2inode_dotl - populate an inode structure with stat info * @stat: stat structure * @inode: inode to populate * @flags: ctrl flags (e.g. V9FS_STAT2INODE_KEEP_ISIZE) * */ void v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode, unsigned int flags) { umode_t mode; struct v9fs_inode *v9inode = V9FS_I(inode); if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) { inode->i_atime.tv_sec = stat->st_atime_sec; inode->i_atime.tv_nsec = stat->st_atime_nsec; inode->i_mtime.tv_sec = stat->st_mtime_sec; inode->i_mtime.tv_nsec = stat->st_mtime_nsec; inode_set_ctime(inode, stat->st_ctime_sec, stat->st_ctime_nsec); inode->i_uid = stat->st_uid; inode->i_gid = stat->st_gid; set_nlink(inode, stat->st_nlink); mode = stat->st_mode & S_IALLUGO; mode |= inode->i_mode & ~S_IALLUGO; inode->i_mode = mode; if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE)) v9fs_i_size_write(inode, stat->st_size); inode->i_blocks = stat->st_blocks; } else { if (stat->st_result_mask & P9_STATS_ATIME) { inode->i_atime.tv_sec = stat->st_atime_sec; inode->i_atime.tv_nsec = stat->st_atime_nsec; } if (stat->st_result_mask & P9_STATS_MTIME) { inode->i_mtime.tv_sec = stat->st_mtime_sec; inode->i_mtime.tv_nsec = stat->st_mtime_nsec; } if (stat->st_result_mask & P9_STATS_CTIME) { inode_set_ctime(inode, stat->st_ctime_sec, stat->st_ctime_nsec); } if (stat->st_result_mask & P9_STATS_UID) inode->i_uid = stat->st_uid; if (stat->st_result_mask & P9_STATS_GID) inode->i_gid = stat->st_gid; if (stat->st_result_mask & P9_STATS_NLINK) set_nlink(inode, stat->st_nlink); if (stat->st_result_mask & P9_STATS_MODE) { mode = stat->st_mode & S_IALLUGO; mode |= inode->i_mode & ~S_IALLUGO; inode->i_mode = mode; } if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE) && stat->st_result_mask & P9_STATS_SIZE) v9fs_i_size_write(inode, stat->st_size); if (stat->st_result_mask & P9_STATS_BLOCKS) inode->i_blocks = stat->st_blocks; } if (stat->st_result_mask & P9_STATS_GEN) inode->i_generation = stat->st_gen; /* Currently we don't support P9_STATS_BTIME and P9_STATS_DATA_VERSION * because the inode structure does not have fields for them. */ v9inode->cache_validity &= ~V9FS_INO_INVALID_ATTR; } static int v9fs_vfs_symlink_dotl(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, const char *symname) { int err; kgid_t gid; const unsigned char *name; struct p9_qid qid; struct inode *inode; struct p9_fid *dfid; struct p9_fid *fid = NULL; struct v9fs_session_info *v9ses; name = dentry->d_name.name; p9_debug(P9_DEBUG_VFS, "%lu,%s,%s\n", dir->i_ino, name, symname); v9ses = v9fs_inode2v9ses(dir); dfid = v9fs_parent_fid(dentry); if (IS_ERR(dfid)) { err = PTR_ERR(dfid); p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err); return err; } gid = v9fs_get_fsgid_for_create(dir); /* Server doesn't alter fid on TSYMLINK. Hence no need to clone it. */ err = p9_client_symlink(dfid, name, symname, gid, &qid); if (err < 0) { p9_debug(P9_DEBUG_VFS, "p9_client_symlink failed %d\n", err); goto error; } v9fs_invalidate_inode_attr(dir); if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) { /* Now walk from the parent so we can get an unopened fid. */ fid = p9_client_walk(dfid, 1, &name, 1); if (IS_ERR(fid)) { err = PTR_ERR(fid); p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err); goto error; } /* instantiate inode and assign the unopened fid to dentry */ inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb); if (IS_ERR(inode)) { err = PTR_ERR(inode); p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n", err); goto error; } v9fs_fid_add(dentry, &fid); d_instantiate(dentry, inode); err = 0; } else { /* Not in cached mode. No need to populate inode with stat */ inode = v9fs_get_inode(dir->i_sb, S_IFLNK, 0); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto error; } d_instantiate(dentry, inode); } error: p9_fid_put(fid); p9_fid_put(dfid); return err; } /** * v9fs_vfs_link_dotl - create a hardlink for dotl * @old_dentry: dentry for file to link to * @dir: inode destination for new link * @dentry: dentry for link * */ static int v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { int err; struct p9_fid *dfid, *oldfid; struct v9fs_session_info *v9ses; p9_debug(P9_DEBUG_VFS, "dir ino: %lu, old_name: %pd, new_name: %pd\n", dir->i_ino, old_dentry, dentry); v9ses = v9fs_inode2v9ses(dir); dfid = v9fs_parent_fid(dentry); if (IS_ERR(dfid)) return PTR_ERR(dfid); oldfid = v9fs_fid_lookup(old_dentry); if (IS_ERR(oldfid)) { p9_fid_put(dfid); return PTR_ERR(oldfid); } err = p9_client_link(dfid, oldfid, dentry->d_name.name); p9_fid_put(dfid); p9_fid_put(oldfid); if (err < 0) { p9_debug(P9_DEBUG_VFS, "p9_client_link failed %d\n", err); return err; } v9fs_invalidate_inode_attr(dir); if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) { /* Get the latest stat info from server. */ struct p9_fid *fid; fid = v9fs_fid_lookup(old_dentry); if (IS_ERR(fid)) return PTR_ERR(fid); v9fs_refresh_inode_dotl(fid, d_inode(old_dentry)); p9_fid_put(fid); } ihold(d_inode(old_dentry)); d_instantiate(dentry, d_inode(old_dentry)); return err; } /** * v9fs_vfs_mknod_dotl - create a special file * @idmap: The idmap of the mount * @dir: inode destination for new link * @dentry: dentry for file * @omode: mode for creation * @rdev: device associated with special file * */ static int v9fs_vfs_mknod_dotl(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t omode, dev_t rdev) { int err; kgid_t gid; const unsigned char *name; umode_t mode; struct v9fs_session_info *v9ses; struct p9_fid *fid = NULL, *dfid = NULL; struct inode *inode; struct p9_qid qid; struct posix_acl *dacl = NULL, *pacl = NULL; p9_debug(P9_DEBUG_VFS, " %lu,%pd mode: %x MAJOR: %u MINOR: %u\n", dir->i_ino, dentry, omode, MAJOR(rdev), MINOR(rdev)); v9ses = v9fs_inode2v9ses(dir); dfid = v9fs_parent_fid(dentry); if (IS_ERR(dfid)) { err = PTR_ERR(dfid); p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err); goto error; } gid = v9fs_get_fsgid_for_create(dir); mode = omode; /* Update mode based on ACL value */ err = v9fs_acl_mode(dir, &mode, &dacl, &pacl); if (err) { p9_debug(P9_DEBUG_VFS, "Failed to get acl values in mknod %d\n", err); goto error; } name = dentry->d_name.name; err = p9_client_mknod_dotl(dfid, name, mode, rdev, gid, &qid); if (err < 0) goto error; v9fs_invalidate_inode_attr(dir); fid = p9_client_walk(dfid, 1, &name, 1); if (IS_ERR(fid)) { err = PTR_ERR(fid); p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err); goto error; } /* instantiate inode and assign the unopened fid to the dentry */ if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) { inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb); if (IS_ERR(inode)) { err = PTR_ERR(inode); p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n", err); goto error; } v9fs_set_create_acl(inode, fid, dacl, pacl); v9fs_fid_add(dentry, &fid); d_instantiate(dentry, inode); err = 0; } else { /* * Not in cached mode. No need to populate inode with stat. * socket syscall returns a fd, so we need instantiate */ inode = v9fs_get_inode(dir->i_sb, mode, rdev); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto error; } v9fs_set_create_acl(inode, fid, dacl, pacl); d_instantiate(dentry, inode); } error: p9_fid_put(fid); v9fs_put_acl(dacl, pacl); p9_fid_put(dfid); return err; } /** * v9fs_vfs_get_link_dotl - follow a symlink path * @dentry: dentry for symlink * @inode: inode for symlink * @done: destructor for return value */ static const char * v9fs_vfs_get_link_dotl(struct dentry *dentry, struct inode *inode, struct delayed_call *done) { struct p9_fid *fid; char *target; int retval; if (!dentry) return ERR_PTR(-ECHILD); p9_debug(P9_DEBUG_VFS, "%pd\n", dentry); fid = v9fs_fid_lookup(dentry); if (IS_ERR(fid)) return ERR_CAST(fid); retval = p9_client_readlink(fid, &target); p9_fid_put(fid); if (retval) return ERR_PTR(retval); set_delayed_call(done, kfree_link, target); return target; } int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode) { struct p9_stat_dotl *st; struct v9fs_session_info *v9ses; unsigned int flags; v9ses = v9fs_inode2v9ses(inode); st = p9_client_getattr_dotl(fid, P9_STATS_ALL); if (IS_ERR(st)) return PTR_ERR(st); /* * Don't update inode if the file type is different */ if (inode_wrong_type(inode, st->st_mode)) goto out; /* * We don't want to refresh inode->i_size, * because we may have cached data */ flags = (v9ses->cache & CACHE_LOOSE) ? V9FS_STAT2INODE_KEEP_ISIZE : 0; v9fs_stat2inode_dotl(st, inode, flags); out: kfree(st); return 0; } const struct inode_operations v9fs_dir_inode_operations_dotl = { .create = v9fs_vfs_create_dotl, .atomic_open = v9fs_vfs_atomic_open_dotl, .lookup = v9fs_vfs_lookup, .link = v9fs_vfs_link_dotl, .symlink = v9fs_vfs_symlink_dotl, .unlink = v9fs_vfs_unlink, .mkdir = v9fs_vfs_mkdir_dotl, .rmdir = v9fs_vfs_rmdir, .mknod = v9fs_vfs_mknod_dotl, .rename = v9fs_vfs_rename, .getattr = v9fs_vfs_getattr_dotl, .setattr = v9fs_vfs_setattr_dotl, .listxattr = v9fs_listxattr, .get_inode_acl = v9fs_iop_get_inode_acl, .get_acl = v9fs_iop_get_acl, .set_acl = v9fs_iop_set_acl, }; const struct inode_operations v9fs_file_inode_operations_dotl = { .getattr = v9fs_vfs_getattr_dotl, .setattr = v9fs_vfs_setattr_dotl, .listxattr = v9fs_listxattr, .get_inode_acl = v9fs_iop_get_inode_acl, .get_acl = v9fs_iop_get_acl, .set_acl = v9fs_iop_set_acl, }; const struct inode_operations v9fs_symlink_inode_operations_dotl = { .get_link = v9fs_vfs_get_link_dotl, .getattr = v9fs_vfs_getattr_dotl, .setattr = v9fs_vfs_setattr_dotl, .listxattr = v9fs_listxattr, };
linux-master
fs/9p/vfs_inode_dotl.c
// SPDX-License-Identifier: GPL-2.0-only /* * V9FS FID Management * * Copyright (C) 2007 by Latchesar Ionkov <[email protected]> * Copyright (C) 2005, 2006 by Eric Van Hensbergen <[email protected]> */ #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/sched.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include "v9fs.h" #include "v9fs_vfs.h" #include "fid.h" static inline void __add_fid(struct dentry *dentry, struct p9_fid *fid) { hlist_add_head(&fid->dlist, (struct hlist_head *)&dentry->d_fsdata); } /** * v9fs_fid_add - add a fid to a dentry * @dentry: dentry that the fid is being added to * @pfid: fid to add, NULLed out * */ void v9fs_fid_add(struct dentry *dentry, struct p9_fid **pfid) { struct p9_fid *fid = *pfid; spin_lock(&dentry->d_lock); __add_fid(dentry, fid); spin_unlock(&dentry->d_lock); *pfid = NULL; } static bool v9fs_is_writeable(int mode) { if (mode & (P9_OWRITE|P9_ORDWR)) return true; else return false; } /** * v9fs_fid_find_inode - search for an open fid off of the inode list * @inode: return a fid pointing to a specific inode * @want_writeable: only consider fids which are writeable * @uid: return a fid belonging to the specified user * @any: ignore uid as a selection criteria * */ struct p9_fid *v9fs_fid_find_inode(struct inode *inode, bool want_writeable, kuid_t uid, bool any) { struct hlist_head *h; struct p9_fid *fid, *ret = NULL; p9_debug(P9_DEBUG_VFS, " inode: %p\n", inode); spin_lock(&inode->i_lock); h = (struct hlist_head *)&inode->i_private; hlist_for_each_entry(fid, h, ilist) { if (any || uid_eq(fid->uid, uid)) { if (want_writeable && !v9fs_is_writeable(fid->mode)) { p9_debug(P9_DEBUG_VFS, " mode: %x not writeable?\n", fid->mode); continue; } p9_fid_get(fid); ret = fid; break; } } spin_unlock(&inode->i_lock); return ret; } /** * v9fs_open_fid_add - add an open fid to an inode * @inode: inode that the fid is being added to * @pfid: fid to add, NULLed out * */ void v9fs_open_fid_add(struct inode *inode, struct p9_fid **pfid) { struct p9_fid *fid = *pfid; spin_lock(&inode->i_lock); hlist_add_head(&fid->ilist, (struct hlist_head *)&inode->i_private); spin_unlock(&inode->i_lock); *pfid = NULL; } /** * v9fs_fid_find - retrieve a fid that belongs to the specified uid * @dentry: dentry to look for fid in * @uid: return fid that belongs to the specified user * @any: if non-zero, return any fid associated with the dentry * */ static struct p9_fid *v9fs_fid_find(struct dentry *dentry, kuid_t uid, int any) { struct p9_fid *fid, *ret; p9_debug(P9_DEBUG_VFS, " dentry: %pd (%p) uid %d any %d\n", dentry, dentry, from_kuid(&init_user_ns, uid), any); ret = NULL; /* we'll recheck under lock if there's anything to look in */ if (dentry->d_fsdata) { struct hlist_head *h = (struct hlist_head *)&dentry->d_fsdata; spin_lock(&dentry->d_lock); hlist_for_each_entry(fid, h, dlist) { if (any || uid_eq(fid->uid, uid)) { ret = fid; p9_fid_get(ret); break; } } spin_unlock(&dentry->d_lock); } else { if (dentry->d_inode) ret = v9fs_fid_find_inode(dentry->d_inode, false, uid, any); } return ret; } /* * We need to hold v9ses->rename_sem as long as we hold references * to returned path array. Array element contain pointers to * dentry names. */ static int build_path_from_dentry(struct v9fs_session_info *v9ses, struct dentry *dentry, const unsigned char ***names) { int n = 0, i; const unsigned char **wnames; struct dentry *ds; for (ds = dentry; !IS_ROOT(ds); ds = ds->d_parent) n++; wnames = kmalloc_array(n, sizeof(char *), GFP_KERNEL); if (!wnames) goto err_out; for (ds = dentry, i = (n-1); i >= 0; i--, ds = ds->d_parent) wnames[i] = ds->d_name.name; *names = wnames; return n; err_out: return -ENOMEM; } static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry, kuid_t uid, int any) { struct dentry *ds; const unsigned char **wnames, *uname; int i, n, l, access; struct v9fs_session_info *v9ses; struct p9_fid *fid, *root_fid, *old_fid; v9ses = v9fs_dentry2v9ses(dentry); access = v9ses->flags & V9FS_ACCESS_MASK; fid = v9fs_fid_find(dentry, uid, any); if (fid) return fid; /* * we don't have a matching fid. To do a TWALK we need * parent fid. We need to prevent rename when we want to * look at the parent. */ down_read(&v9ses->rename_sem); ds = dentry->d_parent; fid = v9fs_fid_find(ds, uid, any); if (fid) { /* Found the parent fid do a lookup with that */ old_fid = fid; fid = p9_client_walk(old_fid, 1, &dentry->d_name.name, 1); p9_fid_put(old_fid); goto fid_out; } up_read(&v9ses->rename_sem); /* start from the root and try to do a lookup */ root_fid = v9fs_fid_find(dentry->d_sb->s_root, uid, any); if (!root_fid) { /* the user is not attached to the fs yet */ if (access == V9FS_ACCESS_SINGLE) return ERR_PTR(-EPERM); if (v9fs_proto_dotu(v9ses) || v9fs_proto_dotl(v9ses)) uname = NULL; else uname = v9ses->uname; fid = p9_client_attach(v9ses->clnt, NULL, uname, uid, v9ses->aname); if (IS_ERR(fid)) return fid; root_fid = p9_fid_get(fid); v9fs_fid_add(dentry->d_sb->s_root, &fid); } /* If we are root ourself just return that */ if (dentry->d_sb->s_root == dentry) return root_fid; /* * Do a multipath walk with attached root. * When walking parent we need to make sure we * don't have a parallel rename happening */ down_read(&v9ses->rename_sem); n = build_path_from_dentry(v9ses, dentry, &wnames); if (n < 0) { fid = ERR_PTR(n); goto err_out; } fid = root_fid; old_fid = root_fid; i = 0; while (i < n) { l = min(n - i, P9_MAXWELEM); /* * We need to hold rename lock when doing a multipath * walk to ensure none of the path components change */ fid = p9_client_walk(old_fid, l, &wnames[i], old_fid == root_fid /* clone */); /* non-cloning walk will return the same fid */ if (fid != old_fid) { p9_fid_put(old_fid); old_fid = fid; } if (IS_ERR(fid)) { kfree(wnames); goto err_out; } i += l; } kfree(wnames); fid_out: if (!IS_ERR(fid)) { spin_lock(&dentry->d_lock); if (d_unhashed(dentry)) { spin_unlock(&dentry->d_lock); p9_fid_put(fid); fid = ERR_PTR(-ENOENT); } else { __add_fid(dentry, fid); p9_fid_get(fid); spin_unlock(&dentry->d_lock); } } err_out: up_read(&v9ses->rename_sem); return fid; } /** * v9fs_fid_lookup - lookup for a fid, try to walk if not found * @dentry: dentry to look for fid in * * Look for a fid in the specified dentry for the current user. * If no fid is found, try to create one walking from a fid from the parent * dentry (if it has one), or the root dentry. If the user haven't accessed * the fs yet, attach now and walk from the root. */ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry) { kuid_t uid; int any, access; struct v9fs_session_info *v9ses; v9ses = v9fs_dentry2v9ses(dentry); access = v9ses->flags & V9FS_ACCESS_MASK; switch (access) { case V9FS_ACCESS_SINGLE: case V9FS_ACCESS_USER: case V9FS_ACCESS_CLIENT: uid = current_fsuid(); any = 0; break; case V9FS_ACCESS_ANY: uid = v9ses->uid; any = 1; break; default: uid = INVALID_UID; any = 0; break; } return v9fs_fid_lookup_with_uid(dentry, uid, any); }
linux-master
fs/9p/fid.c
// SPDX-License-Identifier: GPL-2.0-only /* * This file contains vfs inode ops for the 9P2000 protocol. * * Copyright (C) 2004 by Eric Van Hensbergen <[email protected]> * Copyright (C) 2002 by Ron Minnich <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/pagemap.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/namei.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/xattr.h> #include <linux/posix_acl.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include "v9fs.h" #include "v9fs_vfs.h" #include "fid.h" #include "cache.h" #include "xattr.h" #include "acl.h" static const struct inode_operations v9fs_dir_inode_operations; static const struct inode_operations v9fs_dir_inode_operations_dotu; static const struct inode_operations v9fs_file_inode_operations; static const struct inode_operations v9fs_symlink_inode_operations; /** * unixmode2p9mode - convert unix mode bits to plan 9 * @v9ses: v9fs session information * @mode: mode to convert * */ static u32 unixmode2p9mode(struct v9fs_session_info *v9ses, umode_t mode) { int res; res = mode & 0777; if (S_ISDIR(mode)) res |= P9_DMDIR; if (v9fs_proto_dotu(v9ses)) { if (v9ses->nodev == 0) { if (S_ISSOCK(mode)) res |= P9_DMSOCKET; if (S_ISFIFO(mode)) res |= P9_DMNAMEDPIPE; if (S_ISBLK(mode)) res |= P9_DMDEVICE; if (S_ISCHR(mode)) res |= P9_DMDEVICE; } if ((mode & S_ISUID) == S_ISUID) res |= P9_DMSETUID; if ((mode & S_ISGID) == S_ISGID) res |= P9_DMSETGID; if ((mode & S_ISVTX) == S_ISVTX) res |= P9_DMSETVTX; } return res; } /** * p9mode2perm- convert plan9 mode bits to unix permission bits * @v9ses: v9fs session information * @stat: p9_wstat from which mode need to be derived * */ static int p9mode2perm(struct v9fs_session_info *v9ses, struct p9_wstat *stat) { int res; int mode = stat->mode; res = mode & S_IALLUGO; if (v9fs_proto_dotu(v9ses)) { if ((mode & P9_DMSETUID) == P9_DMSETUID) res |= S_ISUID; if ((mode & P9_DMSETGID) == P9_DMSETGID) res |= S_ISGID; if ((mode & P9_DMSETVTX) == P9_DMSETVTX) res |= S_ISVTX; } return res; } /** * p9mode2unixmode- convert plan9 mode bits to unix mode bits * @v9ses: v9fs session information * @stat: p9_wstat from which mode need to be derived * @rdev: major number, minor number in case of device files. * */ static umode_t p9mode2unixmode(struct v9fs_session_info *v9ses, struct p9_wstat *stat, dev_t *rdev) { int res, r; u32 mode = stat->mode; *rdev = 0; res = p9mode2perm(v9ses, stat); if ((mode & P9_DMDIR) == P9_DMDIR) res |= S_IFDIR; else if ((mode & P9_DMSYMLINK) && (v9fs_proto_dotu(v9ses))) res |= S_IFLNK; else if ((mode & P9_DMSOCKET) && (v9fs_proto_dotu(v9ses)) && (v9ses->nodev == 0)) res |= S_IFSOCK; else if ((mode & P9_DMNAMEDPIPE) && (v9fs_proto_dotu(v9ses)) && (v9ses->nodev == 0)) res |= S_IFIFO; else if ((mode & P9_DMDEVICE) && (v9fs_proto_dotu(v9ses)) && (v9ses->nodev == 0)) { char type = 0; int major = -1, minor = -1; r = sscanf(stat->extension, "%c %i %i", &type, &major, &minor); if (r != 3) { p9_debug(P9_DEBUG_ERROR, "invalid device string, umode will be bogus: %s\n", stat->extension); return res; } switch (type) { case 'c': res |= S_IFCHR; break; case 'b': res |= S_IFBLK; break; default: p9_debug(P9_DEBUG_ERROR, "Unknown special type %c %s\n", type, stat->extension); } *rdev = MKDEV(major, minor); } else res |= S_IFREG; return res; } /** * v9fs_uflags2omode- convert posix open flags to plan 9 mode bits * @uflags: flags to convert * @extended: if .u extensions are active */ int v9fs_uflags2omode(int uflags, int extended) { int ret; switch (uflags&3) { default: case O_RDONLY: ret = P9_OREAD; break; case O_WRONLY: ret = P9_OWRITE; break; case O_RDWR: ret = P9_ORDWR; break; } if (extended) { if (uflags & O_EXCL) ret |= P9_OEXCL; if (uflags & O_APPEND) ret |= P9_OAPPEND; } return ret; } /** * v9fs_blank_wstat - helper function to setup a 9P stat structure * @wstat: structure to initialize * */ void v9fs_blank_wstat(struct p9_wstat *wstat) { wstat->type = ~0; wstat->dev = ~0; wstat->qid.type = ~0; wstat->qid.version = ~0; *((long long *)&wstat->qid.path) = ~0; wstat->mode = ~0; wstat->atime = ~0; wstat->mtime = ~0; wstat->length = ~0; wstat->name = NULL; wstat->uid = NULL; wstat->gid = NULL; wstat->muid = NULL; wstat->n_uid = INVALID_UID; wstat->n_gid = INVALID_GID; wstat->n_muid = INVALID_UID; wstat->extension = NULL; } /** * v9fs_alloc_inode - helper function to allocate an inode * @sb: The superblock to allocate the inode from */ struct inode *v9fs_alloc_inode(struct super_block *sb) { struct v9fs_inode *v9inode; v9inode = alloc_inode_sb(sb, v9fs_inode_cache, GFP_KERNEL); if (!v9inode) return NULL; v9inode->cache_validity = 0; mutex_init(&v9inode->v_mutex); return &v9inode->netfs.inode; } /** * v9fs_free_inode - destroy an inode * @inode: The inode to be freed */ void v9fs_free_inode(struct inode *inode) { kmem_cache_free(v9fs_inode_cache, V9FS_I(inode)); } /* * Set parameters for the netfs library */ static void v9fs_set_netfs_context(struct inode *inode) { struct v9fs_inode *v9inode = V9FS_I(inode); netfs_inode_init(&v9inode->netfs, &v9fs_req_ops); } int v9fs_init_inode(struct v9fs_session_info *v9ses, struct inode *inode, umode_t mode, dev_t rdev) { int err = 0; inode_init_owner(&nop_mnt_idmap, inode, NULL, mode); inode->i_blocks = 0; inode->i_rdev = rdev; inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode); inode->i_mapping->a_ops = &v9fs_addr_operations; inode->i_private = NULL; switch (mode & S_IFMT) { case S_IFIFO: case S_IFBLK: case S_IFCHR: case S_IFSOCK: if (v9fs_proto_dotl(v9ses)) { inode->i_op = &v9fs_file_inode_operations_dotl; } else if (v9fs_proto_dotu(v9ses)) { inode->i_op = &v9fs_file_inode_operations; } else { p9_debug(P9_DEBUG_ERROR, "special files without extended mode\n"); err = -EINVAL; goto error; } init_special_inode(inode, inode->i_mode, inode->i_rdev); break; case S_IFREG: if (v9fs_proto_dotl(v9ses)) { inode->i_op = &v9fs_file_inode_operations_dotl; inode->i_fop = &v9fs_file_operations_dotl; } else { inode->i_op = &v9fs_file_inode_operations; inode->i_fop = &v9fs_file_operations; } break; case S_IFLNK: if (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)) { p9_debug(P9_DEBUG_ERROR, "extended modes used with legacy protocol\n"); err = -EINVAL; goto error; } if (v9fs_proto_dotl(v9ses)) inode->i_op = &v9fs_symlink_inode_operations_dotl; else inode->i_op = &v9fs_symlink_inode_operations; break; case S_IFDIR: inc_nlink(inode); if (v9fs_proto_dotl(v9ses)) inode->i_op = &v9fs_dir_inode_operations_dotl; else if (v9fs_proto_dotu(v9ses)) inode->i_op = &v9fs_dir_inode_operations_dotu; else inode->i_op = &v9fs_dir_inode_operations; if (v9fs_proto_dotl(v9ses)) inode->i_fop = &v9fs_dir_operations_dotl; else inode->i_fop = &v9fs_dir_operations; break; default: p9_debug(P9_DEBUG_ERROR, "BAD mode 0x%hx S_IFMT 0x%x\n", mode, mode & S_IFMT); err = -EINVAL; goto error; } v9fs_set_netfs_context(inode); error: return err; } /** * v9fs_get_inode - helper function to setup an inode * @sb: superblock * @mode: mode to setup inode with * @rdev: The device numbers to set */ struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t rdev) { int err; struct inode *inode; struct v9fs_session_info *v9ses = sb->s_fs_info; p9_debug(P9_DEBUG_VFS, "super block: %p mode: %ho\n", sb, mode); inode = new_inode(sb); if (!inode) { pr_warn("%s (%d): Problem allocating inode\n", __func__, task_pid_nr(current)); return ERR_PTR(-ENOMEM); } err = v9fs_init_inode(v9ses, inode, mode, rdev); if (err) { iput(inode); return ERR_PTR(err); } return inode; } /** * v9fs_evict_inode - Remove an inode from the inode cache * @inode: inode to release * */ void v9fs_evict_inode(struct inode *inode) { struct v9fs_inode __maybe_unused *v9inode = V9FS_I(inode); __le32 __maybe_unused version; truncate_inode_pages_final(&inode->i_data); #ifdef CONFIG_9P_FSCACHE version = cpu_to_le32(v9inode->qid.version); fscache_clear_inode_writeback(v9fs_inode_cookie(v9inode), inode, &version); #endif clear_inode(inode); filemap_fdatawrite(&inode->i_data); #ifdef CONFIG_9P_FSCACHE fscache_relinquish_cookie(v9fs_inode_cookie(v9inode), false); #endif } static int v9fs_test_inode(struct inode *inode, void *data) { int umode; dev_t rdev; struct v9fs_inode *v9inode = V9FS_I(inode); struct p9_wstat *st = (struct p9_wstat *)data; struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode); umode = p9mode2unixmode(v9ses, st, &rdev); /* don't match inode of different type */ if (inode_wrong_type(inode, umode)) return 0; /* compare qid details */ if (memcmp(&v9inode->qid.version, &st->qid.version, sizeof(v9inode->qid.version))) return 0; if (v9inode->qid.type != st->qid.type) return 0; if (v9inode->qid.path != st->qid.path) return 0; return 1; } static int v9fs_test_new_inode(struct inode *inode, void *data) { return 0; } static int v9fs_set_inode(struct inode *inode, void *data) { struct v9fs_inode *v9inode = V9FS_I(inode); struct p9_wstat *st = (struct p9_wstat *)data; memcpy(&v9inode->qid, &st->qid, sizeof(st->qid)); return 0; } static struct inode *v9fs_qid_iget(struct super_block *sb, struct p9_qid *qid, struct p9_wstat *st, int new) { dev_t rdev; int retval; umode_t umode; unsigned long i_ino; struct inode *inode; struct v9fs_session_info *v9ses = sb->s_fs_info; int (*test)(struct inode *inode, void *data); if (new) test = v9fs_test_new_inode; else test = v9fs_test_inode; i_ino = v9fs_qid2ino(qid); inode = iget5_locked(sb, i_ino, test, v9fs_set_inode, st); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; /* * initialize the inode with the stat info * FIXME!! we may need support for stale inodes * later. */ inode->i_ino = i_ino; umode = p9mode2unixmode(v9ses, st, &rdev); retval = v9fs_init_inode(v9ses, inode, umode, rdev); if (retval) goto error; v9fs_stat2inode(st, inode, sb, 0); v9fs_cache_inode_get_cookie(inode); unlock_new_inode(inode); return inode; error: iget_failed(inode); return ERR_PTR(retval); } struct inode * v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, struct super_block *sb, int new) { struct p9_wstat *st; struct inode *inode = NULL; st = p9_client_stat(fid); if (IS_ERR(st)) return ERR_CAST(st); inode = v9fs_qid_iget(sb, &st->qid, st, new); p9stat_free(st); kfree(st); return inode; } /** * v9fs_at_to_dotl_flags- convert Linux specific AT flags to * plan 9 AT flag. * @flags: flags to convert */ static int v9fs_at_to_dotl_flags(int flags) { int rflags = 0; if (flags & AT_REMOVEDIR) rflags |= P9_DOTL_AT_REMOVEDIR; return rflags; } /** * v9fs_dec_count - helper functon to drop i_nlink. * * If a directory had nlink <= 2 (including . and ..), then we should not drop * the link count, which indicates the underlying exported fs doesn't maintain * nlink accurately. e.g. * - overlayfs sets nlink to 1 for merged dir * - ext4 (with dir_nlink feature enabled) sets nlink to 1 if a dir has more * than EXT4_LINK_MAX (65000) links. * * @inode: inode whose nlink is being dropped */ static void v9fs_dec_count(struct inode *inode) { if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2) drop_nlink(inode); } /** * v9fs_remove - helper function to remove files and directories * @dir: directory inode that is being deleted * @dentry: dentry that is being deleted * @flags: removing a directory * */ static int v9fs_remove(struct inode *dir, struct dentry *dentry, int flags) { struct inode *inode; int retval = -EOPNOTSUPP; struct p9_fid *v9fid, *dfid; struct v9fs_session_info *v9ses; p9_debug(P9_DEBUG_VFS, "inode: %p dentry: %p rmdir: %x\n", dir, dentry, flags); v9ses = v9fs_inode2v9ses(dir); inode = d_inode(dentry); dfid = v9fs_parent_fid(dentry); if (IS_ERR(dfid)) { retval = PTR_ERR(dfid); p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", retval); return retval; } if (v9fs_proto_dotl(v9ses)) retval = p9_client_unlinkat(dfid, dentry->d_name.name, v9fs_at_to_dotl_flags(flags)); p9_fid_put(dfid); if (retval == -EOPNOTSUPP) { /* Try the one based on path */ v9fid = v9fs_fid_clone(dentry); if (IS_ERR(v9fid)) return PTR_ERR(v9fid); retval = p9_client_remove(v9fid); } if (!retval) { /* * directories on unlink should have zero * link count */ if (flags & AT_REMOVEDIR) { clear_nlink(inode); v9fs_dec_count(dir); } else v9fs_dec_count(inode); v9fs_invalidate_inode_attr(inode); v9fs_invalidate_inode_attr(dir); /* invalidate all fids associated with dentry */ /* NOTE: This will not include open fids */ dentry->d_op->d_release(dentry); } return retval; } /** * v9fs_create - Create a file * @v9ses: session information * @dir: directory that dentry is being created in * @dentry: dentry that is being created * @extension: 9p2000.u extension string to support devices, etc. * @perm: create permissions * @mode: open mode * */ static struct p9_fid * v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir, struct dentry *dentry, char *extension, u32 perm, u8 mode) { int err; const unsigned char *name; struct p9_fid *dfid, *ofid = NULL, *fid = NULL; struct inode *inode; p9_debug(P9_DEBUG_VFS, "name %pd\n", dentry); name = dentry->d_name.name; dfid = v9fs_parent_fid(dentry); if (IS_ERR(dfid)) { err = PTR_ERR(dfid); p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err); return ERR_PTR(err); } /* clone a fid to use for creation */ ofid = clone_fid(dfid); if (IS_ERR(ofid)) { err = PTR_ERR(ofid); p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err); goto error; } err = p9_client_fcreate(ofid, name, perm, mode, extension); if (err < 0) { p9_debug(P9_DEBUG_VFS, "p9_client_fcreate failed %d\n", err); goto error; } if (!(perm & P9_DMLINK)) { /* now walk from the parent so we can get unopened fid */ fid = p9_client_walk(dfid, 1, &name, 1); if (IS_ERR(fid)) { err = PTR_ERR(fid); p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err); goto error; } /* * instantiate inode and assign the unopened fid to the dentry */ inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb); if (IS_ERR(inode)) { err = PTR_ERR(inode); p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n", err); goto error; } v9fs_fid_add(dentry, &fid); d_instantiate(dentry, inode); } p9_fid_put(dfid); return ofid; error: p9_fid_put(dfid); p9_fid_put(ofid); p9_fid_put(fid); return ERR_PTR(err); } /** * v9fs_vfs_create - VFS hook to create a regular file * @idmap: idmap of the mount * @dir: The parent directory * @dentry: The name of file to be created * @mode: The UNIX file mode to set * @excl: True if the file must not yet exist * * open(.., O_CREAT) is handled in v9fs_vfs_atomic_open(). This is only called * for mknod(2). * */ static int v9fs_vfs_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir); u32 perm = unixmode2p9mode(v9ses, mode); struct p9_fid *fid; /* P9_OEXCL? */ fid = v9fs_create(v9ses, dir, dentry, NULL, perm, P9_ORDWR); if (IS_ERR(fid)) return PTR_ERR(fid); v9fs_invalidate_inode_attr(dir); p9_fid_put(fid); return 0; } /** * v9fs_vfs_mkdir - VFS mkdir hook to create a directory * @idmap: idmap of the mount * @dir: inode that is being unlinked * @dentry: dentry that is being unlinked * @mode: mode for new directory * */ static int v9fs_vfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) { int err; u32 perm; struct p9_fid *fid; struct v9fs_session_info *v9ses; p9_debug(P9_DEBUG_VFS, "name %pd\n", dentry); err = 0; v9ses = v9fs_inode2v9ses(dir); perm = unixmode2p9mode(v9ses, mode | S_IFDIR); fid = v9fs_create(v9ses, dir, dentry, NULL, perm, P9_OREAD); if (IS_ERR(fid)) { err = PTR_ERR(fid); fid = NULL; } else { inc_nlink(dir); v9fs_invalidate_inode_attr(dir); } if (fid) p9_fid_put(fid); return err; } /** * v9fs_vfs_lookup - VFS lookup hook to "walk" to a new inode * @dir: inode that is being walked from * @dentry: dentry that is being walked to? * @flags: lookup flags (unused) * */ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct dentry *res; struct v9fs_session_info *v9ses; struct p9_fid *dfid, *fid; struct inode *inode; const unsigned char *name; p9_debug(P9_DEBUG_VFS, "dir: %p dentry: (%pd) %p flags: %x\n", dir, dentry, dentry, flags); if (dentry->d_name.len > NAME_MAX) return ERR_PTR(-ENAMETOOLONG); v9ses = v9fs_inode2v9ses(dir); /* We can walk d_parent because we hold the dir->i_mutex */ dfid = v9fs_parent_fid(dentry); if (IS_ERR(dfid)) return ERR_CAST(dfid); /* * Make sure we don't use a wrong inode due to parallel * unlink. For cached mode create calls request for new * inode. But with cache disabled, lookup should do this. */ name = dentry->d_name.name; fid = p9_client_walk(dfid, 1, &name, 1); p9_fid_put(dfid); if (fid == ERR_PTR(-ENOENT)) inode = NULL; else if (IS_ERR(fid)) inode = ERR_CAST(fid); else if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb); else inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb); /* * If we had a rename on the server and a parallel lookup * for the new name, then make sure we instantiate with * the new name. ie look up for a/b, while on server somebody * moved b under k and client parallely did a lookup for * k/b. */ res = d_splice_alias(inode, dentry); if (!IS_ERR(fid)) { if (!res) v9fs_fid_add(dentry, &fid); else if (!IS_ERR(res)) v9fs_fid_add(res, &fid); else p9_fid_put(fid); } return res; } static int v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry, struct file *file, unsigned int flags, umode_t mode) { int err; u32 perm; struct v9fs_inode __maybe_unused *v9inode; struct v9fs_session_info *v9ses; struct p9_fid *fid; struct dentry *res = NULL; struct inode *inode; int p9_omode; if (d_in_lookup(dentry)) { res = v9fs_vfs_lookup(dir, dentry, 0); if (IS_ERR(res)) return PTR_ERR(res); if (res) dentry = res; } /* Only creates */ if (!(flags & O_CREAT) || d_really_is_positive(dentry)) return finish_no_open(file, res); v9ses = v9fs_inode2v9ses(dir); perm = unixmode2p9mode(v9ses, mode); p9_omode = v9fs_uflags2omode(flags, v9fs_proto_dotu(v9ses)); if ((v9ses->cache & CACHE_WRITEBACK) && (p9_omode & P9_OWRITE)) { p9_omode = (p9_omode & ~P9_OWRITE) | P9_ORDWR; p9_debug(P9_DEBUG_CACHE, "write-only file with writeback enabled, creating w/ O_RDWR\n"); } fid = v9fs_create(v9ses, dir, dentry, NULL, perm, p9_omode); if (IS_ERR(fid)) { err = PTR_ERR(fid); goto error; } v9fs_invalidate_inode_attr(dir); inode = d_inode(dentry); v9inode = V9FS_I(inode); err = finish_open(file, dentry, generic_file_open); if (err) goto error; file->private_data = fid; #ifdef CONFIG_9P_FSCACHE if (v9ses->cache & CACHE_FSCACHE) fscache_use_cookie(v9fs_inode_cookie(v9inode), file->f_mode & FMODE_WRITE); #endif v9fs_fid_add_modes(fid, v9ses->flags, v9ses->cache, file->f_flags); v9fs_open_fid_add(inode, &fid); file->f_mode |= FMODE_CREATED; out: dput(res); return err; error: p9_fid_put(fid); goto out; } /** * v9fs_vfs_unlink - VFS unlink hook to delete an inode * @i: inode that is being unlinked * @d: dentry that is being unlinked * */ int v9fs_vfs_unlink(struct inode *i, struct dentry *d) { return v9fs_remove(i, d, 0); } /** * v9fs_vfs_rmdir - VFS unlink hook to delete a directory * @i: inode that is being unlinked * @d: dentry that is being unlinked * */ int v9fs_vfs_rmdir(struct inode *i, struct dentry *d) { return v9fs_remove(i, d, AT_REMOVEDIR); } /** * v9fs_vfs_rename - VFS hook to rename an inode * @idmap: The idmap of the mount * @old_dir: old dir inode * @old_dentry: old dentry * @new_dir: new dir inode * @new_dentry: new dentry * @flags: RENAME_* flags * */ int v9fs_vfs_rename(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { int retval; struct inode *old_inode; struct inode *new_inode; struct v9fs_session_info *v9ses; struct p9_fid *oldfid = NULL, *dfid = NULL; struct p9_fid *olddirfid = NULL; struct p9_fid *newdirfid = NULL; struct p9_wstat wstat; if (flags) return -EINVAL; p9_debug(P9_DEBUG_VFS, "\n"); old_inode = d_inode(old_dentry); new_inode = d_inode(new_dentry); v9ses = v9fs_inode2v9ses(old_inode); oldfid = v9fs_fid_lookup(old_dentry); if (IS_ERR(oldfid)) return PTR_ERR(oldfid); dfid = v9fs_parent_fid(old_dentry); olddirfid = clone_fid(dfid); p9_fid_put(dfid); dfid = NULL; if (IS_ERR(olddirfid)) { retval = PTR_ERR(olddirfid); goto error; } dfid = v9fs_parent_fid(new_dentry); newdirfid = clone_fid(dfid); p9_fid_put(dfid); dfid = NULL; if (IS_ERR(newdirfid)) { retval = PTR_ERR(newdirfid); goto error; } down_write(&v9ses->rename_sem); if (v9fs_proto_dotl(v9ses)) { retval = p9_client_renameat(olddirfid, old_dentry->d_name.name, newdirfid, new_dentry->d_name.name); if (retval == -EOPNOTSUPP) retval = p9_client_rename(oldfid, newdirfid, new_dentry->d_name.name); if (retval != -EOPNOTSUPP) goto error_locked; } if (old_dentry->d_parent != new_dentry->d_parent) { /* * 9P .u can only handle file rename in the same directory */ p9_debug(P9_DEBUG_ERROR, "old dir and new dir are different\n"); retval = -EXDEV; goto error_locked; } v9fs_blank_wstat(&wstat); wstat.muid = v9ses->uname; wstat.name = new_dentry->d_name.name; retval = p9_client_wstat(oldfid, &wstat); error_locked: if (!retval) { if (new_inode) { if (S_ISDIR(new_inode->i_mode)) clear_nlink(new_inode); else v9fs_dec_count(new_inode); } if (S_ISDIR(old_inode->i_mode)) { if (!new_inode) inc_nlink(new_dir); v9fs_dec_count(old_dir); } v9fs_invalidate_inode_attr(old_inode); v9fs_invalidate_inode_attr(old_dir); v9fs_invalidate_inode_attr(new_dir); /* successful rename */ d_move(old_dentry, new_dentry); } up_write(&v9ses->rename_sem); error: p9_fid_put(newdirfid); p9_fid_put(olddirfid); p9_fid_put(oldfid); return retval; } /** * v9fs_vfs_getattr - retrieve file metadata * @idmap: idmap of the mount * @path: Object to query * @stat: metadata structure to populate * @request_mask: Mask of STATX_xxx flags indicating the caller's interests * @flags: AT_STATX_xxx setting * */ static int v9fs_vfs_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int flags) { struct dentry *dentry = path->dentry; struct inode *inode = d_inode(dentry); struct v9fs_session_info *v9ses; struct p9_fid *fid; struct p9_wstat *st; p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry); v9ses = v9fs_dentry2v9ses(dentry); if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) { generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat); return 0; } else if (v9ses->cache & CACHE_WRITEBACK) { if (S_ISREG(inode->i_mode)) { int retval = filemap_fdatawrite(inode->i_mapping); if (retval) p9_debug(P9_DEBUG_ERROR, "flushing writeback during getattr returned %d\n", retval); } } fid = v9fs_fid_lookup(dentry); if (IS_ERR(fid)) return PTR_ERR(fid); st = p9_client_stat(fid); p9_fid_put(fid); if (IS_ERR(st)) return PTR_ERR(st); v9fs_stat2inode(st, d_inode(dentry), dentry->d_sb, 0); generic_fillattr(&nop_mnt_idmap, request_mask, d_inode(dentry), stat); p9stat_free(st); kfree(st); return 0; } /** * v9fs_vfs_setattr - set file metadata * @idmap: idmap of the mount * @dentry: file whose metadata to set * @iattr: metadata assignment structure * */ static int v9fs_vfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *iattr) { int retval, use_dentry = 0; struct inode *inode = d_inode(dentry); struct v9fs_session_info *v9ses; struct p9_fid *fid = NULL; struct p9_wstat wstat; p9_debug(P9_DEBUG_VFS, "\n"); retval = setattr_prepare(&nop_mnt_idmap, dentry, iattr); if (retval) return retval; v9ses = v9fs_dentry2v9ses(dentry); if (iattr->ia_valid & ATTR_FILE) { fid = iattr->ia_file->private_data; WARN_ON(!fid); } if (!fid) { fid = v9fs_fid_lookup(dentry); use_dentry = 1; } if (IS_ERR(fid)) return PTR_ERR(fid); v9fs_blank_wstat(&wstat); if (iattr->ia_valid & ATTR_MODE) wstat.mode = unixmode2p9mode(v9ses, iattr->ia_mode); if (iattr->ia_valid & ATTR_MTIME) wstat.mtime = iattr->ia_mtime.tv_sec; if (iattr->ia_valid & ATTR_ATIME) wstat.atime = iattr->ia_atime.tv_sec; if (iattr->ia_valid & ATTR_SIZE) wstat.length = iattr->ia_size; if (v9fs_proto_dotu(v9ses)) { if (iattr->ia_valid & ATTR_UID) wstat.n_uid = iattr->ia_uid; if (iattr->ia_valid & ATTR_GID) wstat.n_gid = iattr->ia_gid; } /* Write all dirty data */ if (d_is_reg(dentry)) { retval = filemap_fdatawrite(inode->i_mapping); if (retval) p9_debug(P9_DEBUG_ERROR, "flushing writeback during setattr returned %d\n", retval); } retval = p9_client_wstat(fid, &wstat); if (use_dentry) p9_fid_put(fid); if (retval < 0) return retval; if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size != i_size_read(inode)) { truncate_setsize(inode, iattr->ia_size); truncate_pagecache(inode, iattr->ia_size); #ifdef CONFIG_9P_FSCACHE if (v9ses->cache & CACHE_FSCACHE) { struct v9fs_inode *v9inode = V9FS_I(inode); fscache_resize_cookie(v9fs_inode_cookie(v9inode), iattr->ia_size); } #endif } v9fs_invalidate_inode_attr(inode); setattr_copy(&nop_mnt_idmap, inode, iattr); mark_inode_dirty(inode); return 0; } /** * v9fs_stat2inode - populate an inode structure with mistat info * @stat: Plan 9 metadata (mistat) structure * @inode: inode to populate * @sb: superblock of filesystem * @flags: control flags (e.g. V9FS_STAT2INODE_KEEP_ISIZE) * */ void v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode, struct super_block *sb, unsigned int flags) { umode_t mode; struct v9fs_session_info *v9ses = sb->s_fs_info; struct v9fs_inode *v9inode = V9FS_I(inode); set_nlink(inode, 1); inode->i_atime.tv_sec = stat->atime; inode->i_mtime.tv_sec = stat->mtime; inode_set_ctime(inode, stat->mtime, 0); inode->i_uid = v9ses->dfltuid; inode->i_gid = v9ses->dfltgid; if (v9fs_proto_dotu(v9ses)) { inode->i_uid = stat->n_uid; inode->i_gid = stat->n_gid; } if ((S_ISREG(inode->i_mode)) || (S_ISDIR(inode->i_mode))) { if (v9fs_proto_dotu(v9ses)) { unsigned int i_nlink; /* * Hadlink support got added later to the .u extension. * So there can be a server out there that doesn't * support this even with .u extension. That would * just leave us with stat->extension being an empty * string, though. */ /* HARDLINKCOUNT %u */ if (sscanf(stat->extension, " HARDLINKCOUNT %u", &i_nlink) == 1) set_nlink(inode, i_nlink); } } mode = p9mode2perm(v9ses, stat); mode |= inode->i_mode & ~S_IALLUGO; inode->i_mode = mode; if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE)) v9fs_i_size_write(inode, stat->length); /* not real number of blocks, but 512 byte ones ... */ inode->i_blocks = (stat->length + 512 - 1) >> 9; v9inode->cache_validity &= ~V9FS_INO_INVALID_ATTR; } /** * v9fs_qid2ino - convert qid into inode number * @qid: qid to hash * * BUG: potential for inode number collisions? */ ino_t v9fs_qid2ino(struct p9_qid *qid) { u64 path = qid->path + 2; ino_t i = 0; if (sizeof(ino_t) == sizeof(path)) memcpy(&i, &path, sizeof(ino_t)); else i = (ino_t) (path ^ (path >> 32)); return i; } /** * v9fs_vfs_get_link - follow a symlink path * @dentry: dentry for symlink * @inode: inode for symlink * @done: delayed call for when we are done with the return value */ static const char *v9fs_vfs_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done) { struct v9fs_session_info *v9ses; struct p9_fid *fid; struct p9_wstat *st; char *res; if (!dentry) return ERR_PTR(-ECHILD); v9ses = v9fs_dentry2v9ses(dentry); if (!v9fs_proto_dotu(v9ses)) return ERR_PTR(-EBADF); p9_debug(P9_DEBUG_VFS, "%pd\n", dentry); fid = v9fs_fid_lookup(dentry); if (IS_ERR(fid)) return ERR_CAST(fid); st = p9_client_stat(fid); p9_fid_put(fid); if (IS_ERR(st)) return ERR_CAST(st); if (!(st->mode & P9_DMSYMLINK)) { p9stat_free(st); kfree(st); return ERR_PTR(-EINVAL); } res = st->extension; st->extension = NULL; if (strlen(res) >= PATH_MAX) res[PATH_MAX - 1] = '\0'; p9stat_free(st); kfree(st); set_delayed_call(done, kfree_link, res); return res; } /** * v9fs_vfs_mkspecial - create a special file * @dir: inode to create special file in * @dentry: dentry to create * @perm: mode to create special file * @extension: 9p2000.u format extension string representing special file * */ static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry, u32 perm, const char *extension) { struct p9_fid *fid; struct v9fs_session_info *v9ses; v9ses = v9fs_inode2v9ses(dir); if (!v9fs_proto_dotu(v9ses)) { p9_debug(P9_DEBUG_ERROR, "not extended\n"); return -EPERM; } fid = v9fs_create(v9ses, dir, dentry, (char *) extension, perm, P9_OREAD); if (IS_ERR(fid)) return PTR_ERR(fid); v9fs_invalidate_inode_attr(dir); p9_fid_put(fid); return 0; } /** * v9fs_vfs_symlink - helper function to create symlinks * @idmap: idmap of the mount * @dir: directory inode containing symlink * @dentry: dentry for symlink * @symname: symlink data * * See Also: 9P2000.u RFC for more information * */ static int v9fs_vfs_symlink(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, const char *symname) { p9_debug(P9_DEBUG_VFS, " %lu,%pd,%s\n", dir->i_ino, dentry, symname); return v9fs_vfs_mkspecial(dir, dentry, P9_DMSYMLINK, symname); } #define U32_MAX_DIGITS 10 /** * v9fs_vfs_link - create a hardlink * @old_dentry: dentry for file to link to * @dir: inode destination for new link * @dentry: dentry for link * */ static int v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { int retval; char name[1 + U32_MAX_DIGITS + 2]; /* sign + number + \n + \0 */ struct p9_fid *oldfid; p9_debug(P9_DEBUG_VFS, " %lu,%pd,%pd\n", dir->i_ino, dentry, old_dentry); oldfid = v9fs_fid_clone(old_dentry); if (IS_ERR(oldfid)) return PTR_ERR(oldfid); sprintf(name, "%d\n", oldfid->fid); retval = v9fs_vfs_mkspecial(dir, dentry, P9_DMLINK, name); if (!retval) { v9fs_refresh_inode(oldfid, d_inode(old_dentry)); v9fs_invalidate_inode_attr(dir); } p9_fid_put(oldfid); return retval; } /** * v9fs_vfs_mknod - create a special file * @idmap: idmap of the mount * @dir: inode destination for new link * @dentry: dentry for file * @mode: mode for creation * @rdev: device associated with special file * */ static int v9fs_vfs_mknod(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir); int retval; char name[2 + U32_MAX_DIGITS + 1 + U32_MAX_DIGITS + 1]; u32 perm; p9_debug(P9_DEBUG_VFS, " %lu,%pd mode: %x MAJOR: %u MINOR: %u\n", dir->i_ino, dentry, mode, MAJOR(rdev), MINOR(rdev)); /* build extension */ if (S_ISBLK(mode)) sprintf(name, "b %u %u", MAJOR(rdev), MINOR(rdev)); else if (S_ISCHR(mode)) sprintf(name, "c %u %u", MAJOR(rdev), MINOR(rdev)); else *name = 0; perm = unixmode2p9mode(v9ses, mode); retval = v9fs_vfs_mkspecial(dir, dentry, perm, name); return retval; } int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode) { int umode; dev_t rdev; struct p9_wstat *st; struct v9fs_session_info *v9ses; unsigned int flags; v9ses = v9fs_inode2v9ses(inode); st = p9_client_stat(fid); if (IS_ERR(st)) return PTR_ERR(st); /* * Don't update inode if the file type is different */ umode = p9mode2unixmode(v9ses, st, &rdev); if (inode_wrong_type(inode, umode)) goto out; /* * We don't want to refresh inode->i_size, * because we may have cached data */ flags = (v9ses->cache & CACHE_LOOSE) ? V9FS_STAT2INODE_KEEP_ISIZE : 0; v9fs_stat2inode(st, inode, inode->i_sb, flags); out: p9stat_free(st); kfree(st); return 0; } static const struct inode_operations v9fs_dir_inode_operations_dotu = { .create = v9fs_vfs_create, .lookup = v9fs_vfs_lookup, .atomic_open = v9fs_vfs_atomic_open, .symlink = v9fs_vfs_symlink, .link = v9fs_vfs_link, .unlink = v9fs_vfs_unlink, .mkdir = v9fs_vfs_mkdir, .rmdir = v9fs_vfs_rmdir, .mknod = v9fs_vfs_mknod, .rename = v9fs_vfs_rename, .getattr = v9fs_vfs_getattr, .setattr = v9fs_vfs_setattr, }; static const struct inode_operations v9fs_dir_inode_operations = { .create = v9fs_vfs_create, .lookup = v9fs_vfs_lookup, .atomic_open = v9fs_vfs_atomic_open, .unlink = v9fs_vfs_unlink, .mkdir = v9fs_vfs_mkdir, .rmdir = v9fs_vfs_rmdir, .mknod = v9fs_vfs_mknod, .rename = v9fs_vfs_rename, .getattr = v9fs_vfs_getattr, .setattr = v9fs_vfs_setattr, }; static const struct inode_operations v9fs_file_inode_operations = { .getattr = v9fs_vfs_getattr, .setattr = v9fs_vfs_setattr, }; static const struct inode_operations v9fs_symlink_inode_operations = { .get_link = v9fs_vfs_get_link, .getattr = v9fs_vfs_getattr, .setattr = v9fs_vfs_setattr, };
linux-master
fs/9p/vfs_inode.c
// SPDX-License-Identifier: GPL-2.0-only /* * This file contains vfs directory ops for the 9P2000 protocol. * * Copyright (C) 2004 by Eric Van Hensbergen <[email protected]> * Copyright (C) 2002 by Ron Minnich <[email protected]> */ #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/uio.h> #include <linux/fscache.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include "v9fs.h" #include "v9fs_vfs.h" #include "fid.h" /** * struct p9_rdir - readdir accounting * @head: start offset of current dirread buffer * @tail: end offset of current dirread buffer * @buf: dirread buffer * * private structure for keeping track of readdir * allocated on demand */ struct p9_rdir { int head; int tail; uint8_t buf[]; }; /** * dt_type - return file type * @mistat: mistat structure * */ static inline int dt_type(struct p9_wstat *mistat) { unsigned long perm = mistat->mode; int rettype = DT_REG; if (perm & P9_DMDIR) rettype = DT_DIR; if (perm & P9_DMSYMLINK) rettype = DT_LNK; return rettype; } /** * v9fs_alloc_rdir_buf - Allocate buffer used for read and readdir * @filp: opened file structure * @buflen: Length in bytes of buffer to allocate * */ static struct p9_rdir *v9fs_alloc_rdir_buf(struct file *filp, int buflen) { struct p9_fid *fid = filp->private_data; if (!fid->rdir) fid->rdir = kzalloc(sizeof(struct p9_rdir) + buflen, GFP_KERNEL); return fid->rdir; } /** * v9fs_dir_readdir - iterate through a directory * @file: opened file structure * @ctx: actor we feed the entries to * */ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx) { bool over; struct p9_wstat st; int err = 0; struct p9_fid *fid; int buflen; struct p9_rdir *rdir; struct kvec kvec; p9_debug(P9_DEBUG_VFS, "name %pD\n", file); fid = file->private_data; buflen = fid->clnt->msize - P9_IOHDRSZ; rdir = v9fs_alloc_rdir_buf(file, buflen); if (!rdir) return -ENOMEM; kvec.iov_base = rdir->buf; kvec.iov_len = buflen; while (1) { if (rdir->tail == rdir->head) { struct iov_iter to; int n; iov_iter_kvec(&to, ITER_DEST, &kvec, 1, buflen); n = p9_client_read(file->private_data, ctx->pos, &to, &err); if (err) return err; if (n == 0) return 0; rdir->head = 0; rdir->tail = n; } while (rdir->head < rdir->tail) { err = p9stat_read(fid->clnt, rdir->buf + rdir->head, rdir->tail - rdir->head, &st); if (err <= 0) { p9_debug(P9_DEBUG_VFS, "returned %d\n", err); return -EIO; } over = !dir_emit(ctx, st.name, strlen(st.name), v9fs_qid2ino(&st.qid), dt_type(&st)); p9stat_free(&st); if (over) return 0; rdir->head += err; ctx->pos += err; } } } /** * v9fs_dir_readdir_dotl - iterate through a directory * @file: opened file structure * @ctx: actor we feed the entries to * */ static int v9fs_dir_readdir_dotl(struct file *file, struct dir_context *ctx) { int err = 0; struct p9_fid *fid; int buflen; struct p9_rdir *rdir; struct p9_dirent curdirent; p9_debug(P9_DEBUG_VFS, "name %pD\n", file); fid = file->private_data; buflen = fid->clnt->msize - P9_READDIRHDRSZ; rdir = v9fs_alloc_rdir_buf(file, buflen); if (!rdir) return -ENOMEM; while (1) { if (rdir->tail == rdir->head) { err = p9_client_readdir(fid, rdir->buf, buflen, ctx->pos); if (err <= 0) return err; rdir->head = 0; rdir->tail = err; } while (rdir->head < rdir->tail) { err = p9dirent_read(fid->clnt, rdir->buf + rdir->head, rdir->tail - rdir->head, &curdirent); if (err < 0) { p9_debug(P9_DEBUG_VFS, "returned %d\n", err); return -EIO; } if (!dir_emit(ctx, curdirent.d_name, strlen(curdirent.d_name), v9fs_qid2ino(&curdirent.qid), curdirent.d_type)) return 0; ctx->pos = curdirent.d_off; rdir->head += err; } } } /** * v9fs_dir_release - close a directory or a file * @inode: inode of the directory or file * @filp: file pointer to a directory or file * */ int v9fs_dir_release(struct inode *inode, struct file *filp) { struct v9fs_inode *v9inode = V9FS_I(inode); struct p9_fid *fid; __le32 version; loff_t i_size; int retval = 0, put_err; fid = filp->private_data; p9_debug(P9_DEBUG_VFS, "inode: %p filp: %p fid: %d\n", inode, filp, fid ? fid->fid : -1); if (fid) { if ((S_ISREG(inode->i_mode)) && (filp->f_mode & FMODE_WRITE)) retval = filemap_fdatawrite(inode->i_mapping); spin_lock(&inode->i_lock); hlist_del(&fid->ilist); spin_unlock(&inode->i_lock); put_err = p9_fid_put(fid); retval = retval < 0 ? retval : put_err; } if ((filp->f_mode & FMODE_WRITE)) { version = cpu_to_le32(v9inode->qid.version); i_size = i_size_read(inode); fscache_unuse_cookie(v9fs_inode_cookie(v9inode), &version, &i_size); } else { fscache_unuse_cookie(v9fs_inode_cookie(v9inode), NULL, NULL); } return retval; } const struct file_operations v9fs_dir_operations = { .read = generic_read_dir, .llseek = generic_file_llseek, .iterate_shared = v9fs_dir_readdir, .open = v9fs_file_open, .release = v9fs_dir_release, }; const struct file_operations v9fs_dir_operations_dotl = { .read = generic_read_dir, .llseek = generic_file_llseek, .iterate_shared = v9fs_dir_readdir_dotl, .open = v9fs_file_open, .release = v9fs_dir_release, .fsync = v9fs_file_fsync_dotl, };
linux-master
fs/9p/vfs_dir.c
// SPDX-License-Identifier: LGPL-2.1 /* * Copyright IBM Corporation, 2010 * Author Aneesh Kumar K.V <[email protected]> */ #include <linux/module.h> #include <linux/fs.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/posix_acl_xattr.h> #include "xattr.h" #include "acl.h" #include "v9fs.h" #include "v9fs_vfs.h" #include "fid.h" static struct posix_acl *v9fs_fid_get_acl(struct p9_fid *fid, const char *name) { ssize_t size; void *value = NULL; struct posix_acl *acl = NULL; size = v9fs_fid_xattr_get(fid, name, NULL, 0); if (size < 0) return ERR_PTR(size); if (size == 0) return ERR_PTR(-ENODATA); value = kzalloc(size, GFP_NOFS); if (!value) return ERR_PTR(-ENOMEM); size = v9fs_fid_xattr_get(fid, name, value, size); if (size < 0) acl = ERR_PTR(size); else if (size == 0) acl = ERR_PTR(-ENODATA); else acl = posix_acl_from_xattr(&init_user_ns, value, size); kfree(value); return acl; } static struct posix_acl *v9fs_acl_get(struct dentry *dentry, const char *name) { struct p9_fid *fid; struct posix_acl *acl = NULL; fid = v9fs_fid_lookup(dentry); if (IS_ERR(fid)) return ERR_CAST(fid); acl = v9fs_fid_get_acl(fid, name); p9_fid_put(fid); return acl; } static struct posix_acl *__v9fs_get_acl(struct p9_fid *fid, const char *name) { int retval; struct posix_acl *acl = NULL; acl = v9fs_fid_get_acl(fid, name); if (!IS_ERR(acl)) return acl; retval = PTR_ERR(acl); if (retval == -ENODATA || retval == -ENOSYS || retval == -EOPNOTSUPP) return NULL; /* map everything else to -EIO */ return ERR_PTR(-EIO); } int v9fs_get_acl(struct inode *inode, struct p9_fid *fid) { int retval = 0; struct posix_acl *pacl, *dacl; struct v9fs_session_info *v9ses; v9ses = v9fs_inode2v9ses(inode); if (((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) || ((v9ses->flags & V9FS_ACL_MASK) != V9FS_POSIX_ACL)) { set_cached_acl(inode, ACL_TYPE_DEFAULT, NULL); set_cached_acl(inode, ACL_TYPE_ACCESS, NULL); return 0; } /* get the default/access acl values and cache them */ dacl = __v9fs_get_acl(fid, XATTR_NAME_POSIX_ACL_DEFAULT); pacl = __v9fs_get_acl(fid, XATTR_NAME_POSIX_ACL_ACCESS); if (!IS_ERR(dacl) && !IS_ERR(pacl)) { set_cached_acl(inode, ACL_TYPE_DEFAULT, dacl); set_cached_acl(inode, ACL_TYPE_ACCESS, pacl); } else retval = -EIO; if (!IS_ERR(dacl)) posix_acl_release(dacl); if (!IS_ERR(pacl)) posix_acl_release(pacl); return retval; } static struct posix_acl *v9fs_get_cached_acl(struct inode *inode, int type) { struct posix_acl *acl; /* * 9p Always cache the acl value when * instantiating the inode (v9fs_inode_from_fid) */ acl = get_cached_acl(inode, type); BUG_ON(is_uncached_acl(acl)); return acl; } struct posix_acl *v9fs_iop_get_inode_acl(struct inode *inode, int type, bool rcu) { struct v9fs_session_info *v9ses; if (rcu) return ERR_PTR(-ECHILD); v9ses = v9fs_inode2v9ses(inode); if (((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) || ((v9ses->flags & V9FS_ACL_MASK) != V9FS_POSIX_ACL)) { /* * On access = client and acl = on mode get the acl * values from the server */ return NULL; } return v9fs_get_cached_acl(inode, type); } struct posix_acl *v9fs_iop_get_acl(struct mnt_idmap *idmap, struct dentry *dentry, int type) { struct v9fs_session_info *v9ses; v9ses = v9fs_dentry2v9ses(dentry); /* We allow set/get/list of acl when access=client is not specified. */ if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) return v9fs_acl_get(dentry, posix_acl_xattr_name(type)); return v9fs_get_cached_acl(d_inode(dentry), type); } int v9fs_iop_set_acl(struct mnt_idmap *idmap, struct dentry *dentry, struct posix_acl *acl, int type) { int retval; size_t size = 0; void *value = NULL; const char *acl_name; struct v9fs_session_info *v9ses; struct inode *inode = d_inode(dentry); if (acl) { retval = posix_acl_valid(inode->i_sb->s_user_ns, acl); if (retval) goto err_out; size = posix_acl_xattr_size(acl->a_count); value = kzalloc(size, GFP_NOFS); if (!value) { retval = -ENOMEM; goto err_out; } retval = posix_acl_to_xattr(&init_user_ns, acl, value, size); if (retval < 0) goto err_out; } /* * set the attribute on the remote. Without even looking at the * xattr value. We leave it to the server to validate */ acl_name = posix_acl_xattr_name(type); v9ses = v9fs_dentry2v9ses(dentry); if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) { retval = v9fs_xattr_set(dentry, acl_name, value, size, 0); goto err_out; } if (S_ISLNK(inode->i_mode)) { retval = -EOPNOTSUPP; goto err_out; } if (!inode_owner_or_capable(&nop_mnt_idmap, inode)) { retval = -EPERM; goto err_out; } switch (type) { case ACL_TYPE_ACCESS: if (acl) { struct iattr iattr = {}; struct posix_acl *acl_mode = acl; retval = posix_acl_update_mode(&nop_mnt_idmap, inode, &iattr.ia_mode, &acl_mode); if (retval) goto err_out; if (!acl_mode) { /* * ACL can be represented by the mode bits. * So don't update ACL below. */ kfree(value); value = NULL; size = 0; } iattr.ia_valid = ATTR_MODE; /* * FIXME should we update ctime ? * What is the following setxattr update the mode ? */ v9fs_vfs_setattr_dotl(&nop_mnt_idmap, dentry, &iattr); } break; case ACL_TYPE_DEFAULT: if (!S_ISDIR(inode->i_mode)) { retval = acl ? -EINVAL : 0; goto err_out; } break; } retval = v9fs_xattr_set(dentry, acl_name, value, size, 0); if (!retval) set_cached_acl(inode, type, acl); err_out: kfree(value); return retval; } static int v9fs_set_acl(struct p9_fid *fid, int type, struct posix_acl *acl) { int retval; char *name; size_t size; void *buffer; if (!acl) return 0; /* Set a setxattr request to server */ size = posix_acl_xattr_size(acl->a_count); buffer = kmalloc(size, GFP_KERNEL); if (!buffer) return -ENOMEM; retval = posix_acl_to_xattr(&init_user_ns, acl, buffer, size); if (retval < 0) goto err_free_out; switch (type) { case ACL_TYPE_ACCESS: name = XATTR_NAME_POSIX_ACL_ACCESS; break; case ACL_TYPE_DEFAULT: name = XATTR_NAME_POSIX_ACL_DEFAULT; break; default: BUG(); } retval = v9fs_fid_xattr_set(fid, name, buffer, size, 0); err_free_out: kfree(buffer); return retval; } int v9fs_acl_chmod(struct inode *inode, struct p9_fid *fid) { int retval = 0; struct posix_acl *acl; if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; acl = v9fs_get_cached_acl(inode, ACL_TYPE_ACCESS); if (acl) { retval = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode); if (retval) return retval; set_cached_acl(inode, ACL_TYPE_ACCESS, acl); retval = v9fs_set_acl(fid, ACL_TYPE_ACCESS, acl); posix_acl_release(acl); } return retval; } int v9fs_set_create_acl(struct inode *inode, struct p9_fid *fid, struct posix_acl *dacl, struct posix_acl *acl) { set_cached_acl(inode, ACL_TYPE_DEFAULT, dacl); set_cached_acl(inode, ACL_TYPE_ACCESS, acl); v9fs_set_acl(fid, ACL_TYPE_DEFAULT, dacl); v9fs_set_acl(fid, ACL_TYPE_ACCESS, acl); return 0; } void v9fs_put_acl(struct posix_acl *dacl, struct posix_acl *acl) { posix_acl_release(dacl); posix_acl_release(acl); } int v9fs_acl_mode(struct inode *dir, umode_t *modep, struct posix_acl **dpacl, struct posix_acl **pacl) { int retval = 0; umode_t mode = *modep; struct posix_acl *acl = NULL; if (!S_ISLNK(mode)) { acl = v9fs_get_cached_acl(dir, ACL_TYPE_DEFAULT); if (IS_ERR(acl)) return PTR_ERR(acl); if (!acl) mode &= ~current_umask(); } if (acl) { if (S_ISDIR(mode)) *dpacl = posix_acl_dup(acl); retval = __posix_acl_create(&acl, GFP_NOFS, &mode); if (retval < 0) return retval; if (retval > 0) *pacl = acl; else posix_acl_release(acl); } *modep = mode; return 0; }
linux-master
fs/9p/acl.c
// SPDX-License-Identifier: GPL-2.0-only /* * V9FS cache definitions. * * Copyright (C) 2009 by Abhishek Kulkarni <[email protected]> */ #include <linux/jiffies.h> #include <linux/file.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/sched.h> #include <linux/fs.h> #include <net/9p/9p.h> #include "v9fs.h" #include "cache.h" int v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses, const char *dev_name) { struct fscache_volume *vcookie; char *name, *p; name = kasprintf(GFP_KERNEL, "9p,%s,%s", dev_name, v9ses->cachetag ?: v9ses->aname); if (!name) return -ENOMEM; for (p = name; *p; p++) if (*p == '/') *p = ';'; vcookie = fscache_acquire_volume(name, NULL, NULL, 0); p9_debug(P9_DEBUG_FSC, "session %p get volume %p (%s)\n", v9ses, vcookie, name); if (IS_ERR(vcookie)) { if (vcookie != ERR_PTR(-EBUSY)) { kfree(name); return PTR_ERR(vcookie); } pr_err("Cache volume key already in use (%s)\n", name); vcookie = NULL; } v9ses->fscache = vcookie; kfree(name); return 0; } void v9fs_cache_inode_get_cookie(struct inode *inode) { struct v9fs_inode *v9inode = V9FS_I(inode); struct v9fs_session_info *v9ses; __le32 version; __le64 path; if (!S_ISREG(inode->i_mode)) return; if (WARN_ON(v9fs_inode_cookie(v9inode))) return; version = cpu_to_le32(v9inode->qid.version); path = cpu_to_le64(v9inode->qid.path); v9ses = v9fs_inode2v9ses(inode); v9inode->netfs.cache = fscache_acquire_cookie(v9fs_session_cache(v9ses), 0, &path, sizeof(path), &version, sizeof(version), i_size_read(&v9inode->netfs.inode)); if (v9inode->netfs.cache) mapping_set_release_always(inode->i_mapping); p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n", inode, v9fs_inode_cookie(v9inode)); }
linux-master
fs/9p/cache.c
// SPDX-License-Identifier: GPL-2.0-only /* * This file contians vfs file ops for 9P2000. * * Copyright (C) 2004 by Eric Van Hensbergen <[email protected]> * Copyright (C) 2002 by Ron Minnich <[email protected]> */ #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/filelock.h> #include <linux/sched.h> #include <linux/file.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/list.h> #include <linux/pagemap.h> #include <linux/utsname.h> #include <linux/uaccess.h> #include <linux/uio.h> #include <linux/slab.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include "v9fs.h" #include "v9fs_vfs.h" #include "fid.h" #include "cache.h" static const struct vm_operations_struct v9fs_mmap_file_vm_ops; /** * v9fs_file_open - open a file (or directory) * @inode: inode to be opened * @file: file being opened * */ int v9fs_file_open(struct inode *inode, struct file *file) { int err; struct v9fs_session_info *v9ses; struct p9_fid *fid; int omode; p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file); v9ses = v9fs_inode2v9ses(inode); if (v9fs_proto_dotl(v9ses)) omode = v9fs_open_to_dotl_flags(file->f_flags); else omode = v9fs_uflags2omode(file->f_flags, v9fs_proto_dotu(v9ses)); fid = file->private_data; if (!fid) { fid = v9fs_fid_clone(file_dentry(file)); if (IS_ERR(fid)) return PTR_ERR(fid); if ((v9ses->cache & CACHE_WRITEBACK) && (omode & P9_OWRITE)) { int writeback_omode = (omode & ~P9_OWRITE) | P9_ORDWR; p9_debug(P9_DEBUG_CACHE, "write-only file with writeback enabled, try opening O_RDWR\n"); err = p9_client_open(fid, writeback_omode); if (err < 0) { p9_debug(P9_DEBUG_CACHE, "could not open O_RDWR, disabling caches\n"); err = p9_client_open(fid, omode); fid->mode |= P9L_DIRECT; } } else { err = p9_client_open(fid, omode); } if (err < 0) { p9_fid_put(fid); return err; } if ((file->f_flags & O_APPEND) && (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses))) generic_file_llseek(file, 0, SEEK_END); file->private_data = fid; } #ifdef CONFIG_9P_FSCACHE if (v9ses->cache & CACHE_FSCACHE) fscache_use_cookie(v9fs_inode_cookie(V9FS_I(inode)), file->f_mode & FMODE_WRITE); #endif v9fs_fid_add_modes(fid, v9ses->flags, v9ses->cache, file->f_flags); v9fs_open_fid_add(inode, &fid); return 0; } /** * v9fs_file_lock - lock a file (or directory) * @filp: file to be locked * @cmd: lock command * @fl: file lock structure * * Bugs: this looks like a local only lock, we should extend into 9P * by using open exclusive */ static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl) { struct inode *inode = file_inode(filp); p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl); if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { filemap_write_and_wait(inode->i_mapping); invalidate_mapping_pages(&inode->i_data, 0, -1); } return 0; } static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl) { struct p9_flock flock; struct p9_fid *fid; uint8_t status = P9_LOCK_ERROR; int res = 0; unsigned char fl_type; struct v9fs_session_info *v9ses; fid = filp->private_data; BUG_ON(fid == NULL); BUG_ON((fl->fl_flags & FL_POSIX) != FL_POSIX); res = locks_lock_file_wait(filp, fl); if (res < 0) goto out; /* convert posix lock to p9 tlock args */ memset(&flock, 0, sizeof(flock)); /* map the lock type */ switch (fl->fl_type) { case F_RDLCK: flock.type = P9_LOCK_TYPE_RDLCK; break; case F_WRLCK: flock.type = P9_LOCK_TYPE_WRLCK; break; case F_UNLCK: flock.type = P9_LOCK_TYPE_UNLCK; break; } flock.start = fl->fl_start; if (fl->fl_end == OFFSET_MAX) flock.length = 0; else flock.length = fl->fl_end - fl->fl_start + 1; flock.proc_id = fl->fl_pid; flock.client_id = fid->clnt->name; if (IS_SETLKW(cmd)) flock.flags = P9_LOCK_FLAGS_BLOCK; v9ses = v9fs_inode2v9ses(file_inode(filp)); /* * if its a blocked request and we get P9_LOCK_BLOCKED as the status * for lock request, keep on trying */ for (;;) { res = p9_client_lock_dotl(fid, &flock, &status); if (res < 0) goto out_unlock; if (status != P9_LOCK_BLOCKED) break; if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd)) break; if (schedule_timeout_interruptible(v9ses->session_lock_timeout) != 0) break; /* * p9_client_lock_dotl overwrites flock.client_id with the * server message, free and reuse the client name */ if (flock.client_id != fid->clnt->name) { kfree(flock.client_id); flock.client_id = fid->clnt->name; } } /* map 9p status to VFS status */ switch (status) { case P9_LOCK_SUCCESS: res = 0; break; case P9_LOCK_BLOCKED: res = -EAGAIN; break; default: WARN_ONCE(1, "unknown lock status code: %d\n", status); fallthrough; case P9_LOCK_ERROR: case P9_LOCK_GRACE: res = -ENOLCK; break; } out_unlock: /* * incase server returned error for lock request, revert * it locally */ if (res < 0 && fl->fl_type != F_UNLCK) { fl_type = fl->fl_type; fl->fl_type = F_UNLCK; /* Even if this fails we want to return the remote error */ locks_lock_file_wait(filp, fl); fl->fl_type = fl_type; } if (flock.client_id != fid->clnt->name) kfree(flock.client_id); out: return res; } static int v9fs_file_getlock(struct file *filp, struct file_lock *fl) { struct p9_getlock glock; struct p9_fid *fid; int res = 0; fid = filp->private_data; BUG_ON(fid == NULL); posix_test_lock(filp, fl); /* * if we have a conflicting lock locally, no need to validate * with server */ if (fl->fl_type != F_UNLCK) return res; /* convert posix lock to p9 tgetlock args */ memset(&glock, 0, sizeof(glock)); glock.type = P9_LOCK_TYPE_UNLCK; glock.start = fl->fl_start; if (fl->fl_end == OFFSET_MAX) glock.length = 0; else glock.length = fl->fl_end - fl->fl_start + 1; glock.proc_id = fl->fl_pid; glock.client_id = fid->clnt->name; res = p9_client_getlock_dotl(fid, &glock); if (res < 0) goto out; /* map 9p lock type to os lock type */ switch (glock.type) { case P9_LOCK_TYPE_RDLCK: fl->fl_type = F_RDLCK; break; case P9_LOCK_TYPE_WRLCK: fl->fl_type = F_WRLCK; break; case P9_LOCK_TYPE_UNLCK: fl->fl_type = F_UNLCK; break; } if (glock.type != P9_LOCK_TYPE_UNLCK) { fl->fl_start = glock.start; if (glock.length == 0) fl->fl_end = OFFSET_MAX; else fl->fl_end = glock.start + glock.length - 1; fl->fl_pid = -glock.proc_id; } out: if (glock.client_id != fid->clnt->name) kfree(glock.client_id); return res; } /** * v9fs_file_lock_dotl - lock a file (or directory) * @filp: file to be locked * @cmd: lock command * @fl: file lock structure * */ static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl) { struct inode *inode = file_inode(filp); int ret = -ENOLCK; p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n", filp, cmd, fl, filp); if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { filemap_write_and_wait(inode->i_mapping); invalidate_mapping_pages(&inode->i_data, 0, -1); } if (IS_SETLK(cmd) || IS_SETLKW(cmd)) ret = v9fs_file_do_lock(filp, cmd, fl); else if (IS_GETLK(cmd)) ret = v9fs_file_getlock(filp, fl); else ret = -EINVAL; return ret; } /** * v9fs_file_flock_dotl - lock a file * @filp: file to be locked * @cmd: lock command * @fl: file lock structure * */ static int v9fs_file_flock_dotl(struct file *filp, int cmd, struct file_lock *fl) { struct inode *inode = file_inode(filp); int ret = -ENOLCK; p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n", filp, cmd, fl, filp); if (!(fl->fl_flags & FL_FLOCK)) goto out_err; if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { filemap_write_and_wait(inode->i_mapping); invalidate_mapping_pages(&inode->i_data, 0, -1); } /* Convert flock to posix lock */ fl->fl_flags |= FL_POSIX; fl->fl_flags ^= FL_FLOCK; if (IS_SETLK(cmd) | IS_SETLKW(cmd)) ret = v9fs_file_do_lock(filp, cmd, fl); else ret = -EINVAL; out_err: return ret; } /** * v9fs_file_read_iter - read from a file * @iocb: The operation parameters * @to: The buffer to read into * */ static ssize_t v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct p9_fid *fid = iocb->ki_filp->private_data; int ret, err = 0; p9_debug(P9_DEBUG_VFS, "fid %d count %zu offset %lld\n", fid->fid, iov_iter_count(to), iocb->ki_pos); if (!(fid->mode & P9L_DIRECT)) { p9_debug(P9_DEBUG_VFS, "(cached)\n"); return generic_file_read_iter(iocb, to); } if (iocb->ki_filp->f_flags & O_NONBLOCK) ret = p9_client_read_once(fid, iocb->ki_pos, to, &err); else ret = p9_client_read(fid, iocb->ki_pos, to, &err); if (!ret) return err; iocb->ki_pos += ret; return ret; } /* * v9fs_file_splice_read - splice-read from a file * @in: The 9p file to read from * @ppos: Where to find/update the file position * @pipe: The pipe to splice into * @len: The maximum amount of data to splice * @flags: SPLICE_F_* flags */ static ssize_t v9fs_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct p9_fid *fid = in->private_data; p9_debug(P9_DEBUG_VFS, "fid %d count %zu offset %lld\n", fid->fid, len, *ppos); if (fid->mode & P9L_DIRECT) return copy_splice_read(in, ppos, pipe, len, flags); return filemap_splice_read(in, ppos, pipe, len, flags); } /** * v9fs_file_write_iter - write to a file * @iocb: The operation parameters * @from: The data to write * */ static ssize_t v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct p9_fid *fid = file->private_data; ssize_t retval; loff_t origin; int err = 0; p9_debug(P9_DEBUG_VFS, "fid %d\n", fid->fid); if (!(fid->mode & (P9L_DIRECT | P9L_NOWRITECACHE))) { p9_debug(P9_DEBUG_CACHE, "(cached)\n"); return generic_file_write_iter(iocb, from); } retval = generic_write_checks(iocb, from); if (retval <= 0) return retval; origin = iocb->ki_pos; retval = p9_client_write(file->private_data, iocb->ki_pos, from, &err); if (retval > 0) { struct inode *inode = file_inode(file); loff_t i_size; unsigned long pg_start, pg_end; pg_start = origin >> PAGE_SHIFT; pg_end = (origin + retval - 1) >> PAGE_SHIFT; if (inode->i_mapping && inode->i_mapping->nrpages) invalidate_inode_pages2_range(inode->i_mapping, pg_start, pg_end); iocb->ki_pos += retval; i_size = i_size_read(inode); if (iocb->ki_pos > i_size) { inode_add_bytes(inode, iocb->ki_pos - i_size); /* * Need to serialize against i_size_write() in * v9fs_stat2inode() */ v9fs_i_size_write(inode, iocb->ki_pos); } return retval; } return err; } static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) { struct p9_fid *fid; struct inode *inode = filp->f_mapping->host; struct p9_wstat wstat; int retval; retval = file_write_and_wait_range(filp, start, end); if (retval) return retval; inode_lock(inode); p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync); fid = filp->private_data; v9fs_blank_wstat(&wstat); retval = p9_client_wstat(fid, &wstat); inode_unlock(inode); return retval; } int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end, int datasync) { struct p9_fid *fid; struct inode *inode = filp->f_mapping->host; int retval; retval = file_write_and_wait_range(filp, start, end); if (retval) return retval; inode_lock(inode); p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync); fid = filp->private_data; retval = p9_client_fsync(fid, datasync); inode_unlock(inode); return retval; } static int v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma) { int retval; struct inode *inode = file_inode(filp); struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode); p9_debug(P9_DEBUG_MMAP, "filp :%p\n", filp); if (!(v9ses->cache & CACHE_WRITEBACK)) { p9_debug(P9_DEBUG_CACHE, "(read-only mmap mode)"); return generic_file_readonly_mmap(filp, vma); } retval = generic_file_mmap(filp, vma); if (!retval) vma->vm_ops = &v9fs_mmap_file_vm_ops; return retval; } static vm_fault_t v9fs_vm_page_mkwrite(struct vm_fault *vmf) { struct folio *folio = page_folio(vmf->page); struct file *filp = vmf->vma->vm_file; struct inode *inode = file_inode(filp); p9_debug(P9_DEBUG_VFS, "folio %p fid %lx\n", folio, (unsigned long)filp->private_data); /* Wait for the page to be written to the cache before we allow it to * be modified. We then assume the entire page will need writing back. */ #ifdef CONFIG_9P_FSCACHE if (folio_test_fscache(folio) && folio_wait_fscache_killable(folio) < 0) return VM_FAULT_NOPAGE; #endif /* Update file times before taking page lock */ file_update_time(filp); if (folio_lock_killable(folio) < 0) return VM_FAULT_RETRY; if (folio_mapping(folio) != inode->i_mapping) goto out_unlock; folio_wait_stable(folio); return VM_FAULT_LOCKED; out_unlock: folio_unlock(folio); return VM_FAULT_NOPAGE; } static void v9fs_mmap_vm_close(struct vm_area_struct *vma) { struct inode *inode; struct writeback_control wbc = { .nr_to_write = LONG_MAX, .sync_mode = WB_SYNC_ALL, .range_start = (loff_t)vma->vm_pgoff * PAGE_SIZE, /* absolute end, byte at end included */ .range_end = (loff_t)vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start - 1), }; if (!(vma->vm_flags & VM_SHARED)) return; p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma); inode = file_inode(vma->vm_file); filemap_fdatawrite_wbc(inode->i_mapping, &wbc); } static const struct vm_operations_struct v9fs_mmap_file_vm_ops = { .close = v9fs_mmap_vm_close, .fault = filemap_fault, .map_pages = filemap_map_pages, .page_mkwrite = v9fs_vm_page_mkwrite, }; const struct file_operations v9fs_file_operations = { .llseek = generic_file_llseek, .read_iter = v9fs_file_read_iter, .write_iter = v9fs_file_write_iter, .open = v9fs_file_open, .release = v9fs_dir_release, .lock = v9fs_file_lock, .mmap = generic_file_readonly_mmap, .splice_read = v9fs_file_splice_read, .splice_write = iter_file_splice_write, .fsync = v9fs_file_fsync, }; const struct file_operations v9fs_file_operations_dotl = { .llseek = generic_file_llseek, .read_iter = v9fs_file_read_iter, .write_iter = v9fs_file_write_iter, .open = v9fs_file_open, .release = v9fs_dir_release, .lock = v9fs_file_lock_dotl, .flock = v9fs_file_flock_dotl, .mmap = v9fs_file_mmap, .splice_read = v9fs_file_splice_read, .splice_write = iter_file_splice_write, .fsync = v9fs_file_fsync_dotl, };
linux-master
fs/9p/vfs_file.c
// SPDX-License-Identifier: GPL-2.0 /* * fs-verity module initialization and logging * * Copyright 2019 Google LLC */ #include "fsverity_private.h" #include <linux/ratelimit.h> #ifdef CONFIG_SYSCTL static struct ctl_table_header *fsverity_sysctl_header; static struct ctl_table fsverity_sysctl_table[] = { #ifdef CONFIG_FS_VERITY_BUILTIN_SIGNATURES { .procname = "require_signatures", .data = &fsverity_require_signatures, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, #endif { } }; static void __init fsverity_init_sysctl(void) { fsverity_sysctl_header = register_sysctl("fs/verity", fsverity_sysctl_table); if (!fsverity_sysctl_header) panic("fsverity sysctl registration failed"); } #else /* CONFIG_SYSCTL */ static inline void fsverity_init_sysctl(void) { } #endif /* !CONFIG_SYSCTL */ void fsverity_msg(const struct inode *inode, const char *level, const char *fmt, ...) { static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); struct va_format vaf; va_list args; if (!__ratelimit(&rs)) return; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; if (inode) printk("%sfs-verity (%s, inode %lu): %pV\n", level, inode->i_sb->s_id, inode->i_ino, &vaf); else printk("%sfs-verity: %pV\n", level, &vaf); va_end(args); } static int __init fsverity_init(void) { fsverity_check_hash_algs(); fsverity_init_info_cache(); fsverity_init_workqueue(); fsverity_init_sysctl(); fsverity_init_signature(); return 0; } late_initcall(fsverity_init)
linux-master
fs/verity/init.c
// SPDX-License-Identifier: GPL-2.0 /* * Data verification functions, i.e. hooks for ->readahead() * * Copyright 2019 Google LLC */ #include "fsverity_private.h" #include <crypto/hash.h> #include <linux/bio.h> static struct workqueue_struct *fsverity_read_workqueue; /* * Returns true if the hash block with index @hblock_idx in the tree, located in * @hpage, has already been verified. */ static bool is_hash_block_verified(struct fsverity_info *vi, struct page *hpage, unsigned long hblock_idx) { bool verified; unsigned int blocks_per_page; unsigned int i; /* * When the Merkle tree block size and page size are the same, then the * ->hash_block_verified bitmap isn't allocated, and we use PG_checked * to directly indicate whether the page's block has been verified. * * Using PG_checked also guarantees that we re-verify hash pages that * get evicted and re-instantiated from the backing storage, as new * pages always start out with PG_checked cleared. */ if (!vi->hash_block_verified) return PageChecked(hpage); /* * When the Merkle tree block size and page size differ, we use a bitmap * to indicate whether each hash block has been verified. * * However, we still need to ensure that hash pages that get evicted and * re-instantiated from the backing storage are re-verified. To do * this, we use PG_checked again, but now it doesn't really mean * "checked". Instead, now it just serves as an indicator for whether * the hash page is newly instantiated or not. * * The first thread that sees PG_checked=0 must clear the corresponding * bitmap bits, then set PG_checked=1. This requires a spinlock. To * avoid having to take this spinlock in the common case of * PG_checked=1, we start with an opportunistic lockless read. */ if (PageChecked(hpage)) { /* * A read memory barrier is needed here to give ACQUIRE * semantics to the above PageChecked() test. */ smp_rmb(); return test_bit(hblock_idx, vi->hash_block_verified); } spin_lock(&vi->hash_page_init_lock); if (PageChecked(hpage)) { verified = test_bit(hblock_idx, vi->hash_block_verified); } else { blocks_per_page = vi->tree_params.blocks_per_page; hblock_idx = round_down(hblock_idx, blocks_per_page); for (i = 0; i < blocks_per_page; i++) clear_bit(hblock_idx + i, vi->hash_block_verified); /* * A write memory barrier is needed here to give RELEASE * semantics to the below SetPageChecked() operation. */ smp_wmb(); SetPageChecked(hpage); verified = false; } spin_unlock(&vi->hash_page_init_lock); return verified; } /* * Verify a single data block against the file's Merkle tree. * * In principle, we need to verify the entire path to the root node. However, * for efficiency the filesystem may cache the hash blocks. Therefore we need * only ascend the tree until an already-verified hash block is seen, and then * verify the path to that block. * * Return: %true if the data block is valid, else %false. */ static bool verify_data_block(struct inode *inode, struct fsverity_info *vi, const void *data, u64 data_pos, unsigned long max_ra_pages) { const struct merkle_tree_params *params = &vi->tree_params; const unsigned int hsize = params->digest_size; int level; u8 _want_hash[FS_VERITY_MAX_DIGEST_SIZE]; const u8 *want_hash; u8 real_hash[FS_VERITY_MAX_DIGEST_SIZE]; /* The hash blocks that are traversed, indexed by level */ struct { /* Page containing the hash block */ struct page *page; /* Mapped address of the hash block (will be within @page) */ const void *addr; /* Index of the hash block in the tree overall */ unsigned long index; /* Byte offset of the wanted hash relative to @addr */ unsigned int hoffset; } hblocks[FS_VERITY_MAX_LEVELS]; /* * The index of the previous level's block within that level; also the * index of that block's hash within the current level. */ u64 hidx = data_pos >> params->log_blocksize; /* Up to 1 + FS_VERITY_MAX_LEVELS pages may be mapped at once */ BUILD_BUG_ON(1 + FS_VERITY_MAX_LEVELS > KM_MAX_IDX); if (unlikely(data_pos >= inode->i_size)) { /* * This can happen in the data page spanning EOF when the Merkle * tree block size is less than the page size. The Merkle tree * doesn't cover data blocks fully past EOF. But the entire * page spanning EOF can be visible to userspace via a mmap, and * any part past EOF should be all zeroes. Therefore, we need * to verify that any data blocks fully past EOF are all zeroes. */ if (memchr_inv(data, 0, params->block_size)) { fsverity_err(inode, "FILE CORRUPTED! Data past EOF is not zeroed"); return false; } return true; } /* * Starting at the leaf level, ascend the tree saving hash blocks along * the way until we find a hash block that has already been verified, or * until we reach the root. */ for (level = 0; level < params->num_levels; level++) { unsigned long next_hidx; unsigned long hblock_idx; pgoff_t hpage_idx; unsigned int hblock_offset_in_page; unsigned int hoffset; struct page *hpage; const void *haddr; /* * The index of the block in the current level; also the index * of that block's hash within the next level. */ next_hidx = hidx >> params->log_arity; /* Index of the hash block in the tree overall */ hblock_idx = params->level_start[level] + next_hidx; /* Index of the hash page in the tree overall */ hpage_idx = hblock_idx >> params->log_blocks_per_page; /* Byte offset of the hash block within the page */ hblock_offset_in_page = (hblock_idx << params->log_blocksize) & ~PAGE_MASK; /* Byte offset of the hash within the block */ hoffset = (hidx << params->log_digestsize) & (params->block_size - 1); hpage = inode->i_sb->s_vop->read_merkle_tree_page(inode, hpage_idx, level == 0 ? min(max_ra_pages, params->tree_pages - hpage_idx) : 0); if (IS_ERR(hpage)) { fsverity_err(inode, "Error %ld reading Merkle tree page %lu", PTR_ERR(hpage), hpage_idx); goto error; } haddr = kmap_local_page(hpage) + hblock_offset_in_page; if (is_hash_block_verified(vi, hpage, hblock_idx)) { memcpy(_want_hash, haddr + hoffset, hsize); want_hash = _want_hash; kunmap_local(haddr); put_page(hpage); goto descend; } hblocks[level].page = hpage; hblocks[level].addr = haddr; hblocks[level].index = hblock_idx; hblocks[level].hoffset = hoffset; hidx = next_hidx; } want_hash = vi->root_hash; descend: /* Descend the tree verifying hash blocks. */ for (; level > 0; level--) { struct page *hpage = hblocks[level - 1].page; const void *haddr = hblocks[level - 1].addr; unsigned long hblock_idx = hblocks[level - 1].index; unsigned int hoffset = hblocks[level - 1].hoffset; if (fsverity_hash_block(params, inode, haddr, real_hash) != 0) goto error; if (memcmp(want_hash, real_hash, hsize) != 0) goto corrupted; /* * Mark the hash block as verified. This must be atomic and * idempotent, as the same hash block might be verified by * multiple threads concurrently. */ if (vi->hash_block_verified) set_bit(hblock_idx, vi->hash_block_verified); else SetPageChecked(hpage); memcpy(_want_hash, haddr + hoffset, hsize); want_hash = _want_hash; kunmap_local(haddr); put_page(hpage); } /* Finally, verify the data block. */ if (fsverity_hash_block(params, inode, data, real_hash) != 0) goto error; if (memcmp(want_hash, real_hash, hsize) != 0) goto corrupted; return true; corrupted: fsverity_err(inode, "FILE CORRUPTED! pos=%llu, level=%d, want_hash=%s:%*phN, real_hash=%s:%*phN", data_pos, level - 1, params->hash_alg->name, hsize, want_hash, params->hash_alg->name, hsize, real_hash); error: for (; level > 0; level--) { kunmap_local(hblocks[level - 1].addr); put_page(hblocks[level - 1].page); } return false; } static bool verify_data_blocks(struct folio *data_folio, size_t len, size_t offset, unsigned long max_ra_pages) { struct inode *inode = data_folio->mapping->host; struct fsverity_info *vi = inode->i_verity_info; const unsigned int block_size = vi->tree_params.block_size; u64 pos = (u64)data_folio->index << PAGE_SHIFT; if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offset, block_size))) return false; if (WARN_ON_ONCE(!folio_test_locked(data_folio) || folio_test_uptodate(data_folio))) return false; do { void *data; bool valid; data = kmap_local_folio(data_folio, offset); valid = verify_data_block(inode, vi, data, pos + offset, max_ra_pages); kunmap_local(data); if (!valid) return false; offset += block_size; len -= block_size; } while (len); return true; } /** * fsverity_verify_blocks() - verify data in a folio * @folio: the folio containing the data to verify * @len: the length of the data to verify in the folio * @offset: the offset of the data to verify in the folio * * Verify data that has just been read from a verity file. The data must be * located in a pagecache folio that is still locked and not yet uptodate. The * length and offset of the data must be Merkle tree block size aligned. * * Return: %true if the data is valid, else %false. */ bool fsverity_verify_blocks(struct folio *folio, size_t len, size_t offset) { return verify_data_blocks(folio, len, offset, 0); } EXPORT_SYMBOL_GPL(fsverity_verify_blocks); #ifdef CONFIG_BLOCK /** * fsverity_verify_bio() - verify a 'read' bio that has just completed * @bio: the bio to verify * * Verify the bio's data against the file's Merkle tree. All bio data segments * must be aligned to the file's Merkle tree block size. If any data fails * verification, then bio->bi_status is set to an error status. * * This is a helper function for use by the ->readahead() method of filesystems * that issue bios to read data directly into the page cache. Filesystems that * populate the page cache without issuing bios (e.g. non block-based * filesystems) must instead call fsverity_verify_page() directly on each page. * All filesystems must also call fsverity_verify_page() on holes. */ void fsverity_verify_bio(struct bio *bio) { struct folio_iter fi; unsigned long max_ra_pages = 0; if (bio->bi_opf & REQ_RAHEAD) { /* * If this bio is for data readahead, then we also do readahead * of the first (largest) level of the Merkle tree. Namely, * when a Merkle tree page is read, we also try to piggy-back on * some additional pages -- up to 1/4 the number of data pages. * * This improves sequential read performance, as it greatly * reduces the number of I/O requests made to the Merkle tree. */ max_ra_pages = bio->bi_iter.bi_size >> (PAGE_SHIFT + 2); } bio_for_each_folio_all(fi, bio) { if (!verify_data_blocks(fi.folio, fi.length, fi.offset, max_ra_pages)) { bio->bi_status = BLK_STS_IOERR; break; } } } EXPORT_SYMBOL_GPL(fsverity_verify_bio); #endif /* CONFIG_BLOCK */ /** * fsverity_enqueue_verify_work() - enqueue work on the fs-verity workqueue * @work: the work to enqueue * * Enqueue verification work for asynchronous processing. */ void fsverity_enqueue_verify_work(struct work_struct *work) { queue_work(fsverity_read_workqueue, work); } EXPORT_SYMBOL_GPL(fsverity_enqueue_verify_work); void __init fsverity_init_workqueue(void) { /* * Use a high-priority workqueue to prioritize verification work, which * blocks reads from completing, over regular application tasks. * * For performance reasons, don't use an unbound workqueue. Using an * unbound workqueue for crypto operations causes excessive scheduler * latency on ARM64. */ fsverity_read_workqueue = alloc_workqueue("fsverity_read_queue", WQ_HIGHPRI, num_online_cpus()); if (!fsverity_read_workqueue) panic("failed to allocate fsverity_read_queue"); }
linux-master
fs/verity/verify.c
// SPDX-License-Identifier: GPL-2.0-only /* * Ioctl to read verity metadata * * Copyright 2021 Google LLC */ #include "fsverity_private.h" #include <linux/backing-dev.h> #include <linux/highmem.h> #include <linux/sched/signal.h> #include <linux/uaccess.h> static int fsverity_read_merkle_tree(struct inode *inode, const struct fsverity_info *vi, void __user *buf, u64 offset, int length) { const struct fsverity_operations *vops = inode->i_sb->s_vop; u64 end_offset; unsigned int offs_in_page; pgoff_t index, last_index; int retval = 0; int err = 0; end_offset = min(offset + length, vi->tree_params.tree_size); if (offset >= end_offset) return 0; offs_in_page = offset_in_page(offset); last_index = (end_offset - 1) >> PAGE_SHIFT; /* * Iterate through each Merkle tree page in the requested range and copy * the requested portion to userspace. Note that the Merkle tree block * size isn't important here, as we are returning a byte stream; i.e., * we can just work with pages even if the tree block size != PAGE_SIZE. */ for (index = offset >> PAGE_SHIFT; index <= last_index; index++) { unsigned long num_ra_pages = min_t(unsigned long, last_index - index + 1, inode->i_sb->s_bdi->io_pages); unsigned int bytes_to_copy = min_t(u64, end_offset - offset, PAGE_SIZE - offs_in_page); struct page *page; const void *virt; page = vops->read_merkle_tree_page(inode, index, num_ra_pages); if (IS_ERR(page)) { err = PTR_ERR(page); fsverity_err(inode, "Error %d reading Merkle tree page %lu", err, index); break; } virt = kmap_local_page(page); if (copy_to_user(buf, virt + offs_in_page, bytes_to_copy)) { kunmap_local(virt); put_page(page); err = -EFAULT; break; } kunmap_local(virt); put_page(page); retval += bytes_to_copy; buf += bytes_to_copy; offset += bytes_to_copy; if (fatal_signal_pending(current)) { err = -EINTR; break; } cond_resched(); offs_in_page = 0; } return retval ? retval : err; } /* Copy the requested portion of the buffer to userspace. */ static int fsverity_read_buffer(void __user *dst, u64 offset, int length, const void *src, size_t src_length) { if (offset >= src_length) return 0; src += offset; src_length -= offset; length = min_t(size_t, length, src_length); if (copy_to_user(dst, src, length)) return -EFAULT; return length; } static int fsverity_read_descriptor(struct inode *inode, void __user *buf, u64 offset, int length) { struct fsverity_descriptor *desc; size_t desc_size; int res; res = fsverity_get_descriptor(inode, &desc); if (res) return res; /* don't include the builtin signature */ desc_size = offsetof(struct fsverity_descriptor, signature); desc->sig_size = 0; res = fsverity_read_buffer(buf, offset, length, desc, desc_size); kfree(desc); return res; } static int fsverity_read_signature(struct inode *inode, void __user *buf, u64 offset, int length) { struct fsverity_descriptor *desc; int res; res = fsverity_get_descriptor(inode, &desc); if (res) return res; if (desc->sig_size == 0) { res = -ENODATA; goto out; } /* * Include only the builtin signature. fsverity_get_descriptor() * already verified that sig_size is in-bounds. */ res = fsverity_read_buffer(buf, offset, length, desc->signature, le32_to_cpu(desc->sig_size)); out: kfree(desc); return res; } /** * fsverity_ioctl_read_metadata() - read verity metadata from a file * @filp: file to read the metadata from * @uarg: user pointer to fsverity_read_metadata_arg * * Return: length read on success, 0 on EOF, -errno on failure */ int fsverity_ioctl_read_metadata(struct file *filp, const void __user *uarg) { struct inode *inode = file_inode(filp); const struct fsverity_info *vi; struct fsverity_read_metadata_arg arg; int length; void __user *buf; vi = fsverity_get_info(inode); if (!vi) return -ENODATA; /* not a verity file */ /* * Note that we don't have to explicitly check that the file is open for * reading, since verity files can only be opened for reading. */ if (copy_from_user(&arg, uarg, sizeof(arg))) return -EFAULT; if (arg.__reserved) return -EINVAL; /* offset + length must not overflow. */ if (arg.offset + arg.length < arg.offset) return -EINVAL; /* Ensure that the return value will fit in INT_MAX. */ length = min_t(u64, arg.length, INT_MAX); buf = u64_to_user_ptr(arg.buf_ptr); switch (arg.metadata_type) { case FS_VERITY_METADATA_TYPE_MERKLE_TREE: return fsverity_read_merkle_tree(inode, vi, buf, arg.offset, length); case FS_VERITY_METADATA_TYPE_DESCRIPTOR: return fsverity_read_descriptor(inode, buf, arg.offset, length); case FS_VERITY_METADATA_TYPE_SIGNATURE: return fsverity_read_signature(inode, buf, arg.offset, length); default: return -EINVAL; } } EXPORT_SYMBOL_GPL(fsverity_ioctl_read_metadata);
linux-master
fs/verity/read_metadata.c
// SPDX-License-Identifier: GPL-2.0 /* * Verification of builtin signatures * * Copyright 2019 Google LLC */ /* * This file implements verification of fs-verity builtin signatures. Please * take great care before using this feature. It is not the only way to do * signatures with fs-verity, and the alternatives (such as userspace signature * verification, and IMA appraisal) can be much better. For details about the * limitations of this feature, see Documentation/filesystems/fsverity.rst. */ #include "fsverity_private.h" #include <linux/cred.h> #include <linux/key.h> #include <linux/slab.h> #include <linux/verification.h> /* * /proc/sys/fs/verity/require_signatures * If 1, all verity files must have a valid builtin signature. */ int fsverity_require_signatures; /* * Keyring that contains the trusted X.509 certificates. * * Only root (kuid=0) can modify this. Also, root may use * keyctl_restrict_keyring() to prevent any more additions. */ static struct key *fsverity_keyring; /** * fsverity_verify_signature() - check a verity file's signature * @vi: the file's fsverity_info * @signature: the file's built-in signature * @sig_size: size of signature in bytes, or 0 if no signature * * If the file includes a signature of its fs-verity file digest, verify it * against the certificates in the fs-verity keyring. * * Return: 0 on success (signature valid or not required); -errno on failure */ int fsverity_verify_signature(const struct fsverity_info *vi, const u8 *signature, size_t sig_size) { const struct inode *inode = vi->inode; const struct fsverity_hash_alg *hash_alg = vi->tree_params.hash_alg; struct fsverity_formatted_digest *d; int err; if (sig_size == 0) { if (fsverity_require_signatures) { fsverity_err(inode, "require_signatures=1, rejecting unsigned file!"); return -EPERM; } return 0; } if (fsverity_keyring->keys.nr_leaves_on_tree == 0) { /* * The ".fs-verity" keyring is empty, due to builtin signatures * being supported by the kernel but not actually being used. * In this case, verify_pkcs7_signature() would always return an * error, usually ENOKEY. It could also be EBADMSG if the * PKCS#7 is malformed, but that isn't very important to * distinguish. So, just skip to ENOKEY to avoid the attack * surface of the PKCS#7 parser, which would otherwise be * reachable by any task able to execute FS_IOC_ENABLE_VERITY. */ fsverity_err(inode, "fs-verity keyring is empty, rejecting signed file!"); return -ENOKEY; } d = kzalloc(sizeof(*d) + hash_alg->digest_size, GFP_KERNEL); if (!d) return -ENOMEM; memcpy(d->magic, "FSVerity", 8); d->digest_algorithm = cpu_to_le16(hash_alg - fsverity_hash_algs); d->digest_size = cpu_to_le16(hash_alg->digest_size); memcpy(d->digest, vi->file_digest, hash_alg->digest_size); err = verify_pkcs7_signature(d, sizeof(*d) + hash_alg->digest_size, signature, sig_size, fsverity_keyring, VERIFYING_UNSPECIFIED_SIGNATURE, NULL, NULL); kfree(d); if (err) { if (err == -ENOKEY) fsverity_err(inode, "File's signing cert isn't in the fs-verity keyring"); else if (err == -EKEYREJECTED) fsverity_err(inode, "Incorrect file signature"); else if (err == -EBADMSG) fsverity_err(inode, "Malformed file signature"); else fsverity_err(inode, "Error %d verifying file signature", err); return err; } return 0; } void __init fsverity_init_signature(void) { fsverity_keyring = keyring_alloc(".fs-verity", KUIDT_INIT(0), KGIDT_INIT(0), current_cred(), KEY_POS_SEARCH | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_WRITE | KEY_USR_SEARCH | KEY_USR_SETATTR, KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL); if (IS_ERR(fsverity_keyring)) panic("failed to allocate \".fs-verity\" keyring"); }
linux-master
fs/verity/signature.c
// SPDX-License-Identifier: GPL-2.0 /* * fs-verity hash algorithms * * Copyright 2019 Google LLC */ #include "fsverity_private.h" #include <crypto/hash.h> /* The hash algorithms supported by fs-verity */ struct fsverity_hash_alg fsverity_hash_algs[] = { [FS_VERITY_HASH_ALG_SHA256] = { .name = "sha256", .digest_size = SHA256_DIGEST_SIZE, .block_size = SHA256_BLOCK_SIZE, .algo_id = HASH_ALGO_SHA256, }, [FS_VERITY_HASH_ALG_SHA512] = { .name = "sha512", .digest_size = SHA512_DIGEST_SIZE, .block_size = SHA512_BLOCK_SIZE, .algo_id = HASH_ALGO_SHA512, }, }; static DEFINE_MUTEX(fsverity_hash_alg_init_mutex); /** * fsverity_get_hash_alg() - validate and prepare a hash algorithm * @inode: optional inode for logging purposes * @num: the hash algorithm number * * Get the struct fsverity_hash_alg for the given hash algorithm number, and * ensure it has a hash transform ready to go. The hash transforms are * allocated on-demand so that we don't waste resources unnecessarily, and * because the crypto modules may be initialized later than fs/verity/. * * Return: pointer to the hash alg on success, else an ERR_PTR() */ const struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode, unsigned int num) { struct fsverity_hash_alg *alg; struct crypto_shash *tfm; int err; if (num >= ARRAY_SIZE(fsverity_hash_algs) || !fsverity_hash_algs[num].name) { fsverity_warn(inode, "Unknown hash algorithm number: %u", num); return ERR_PTR(-EINVAL); } alg = &fsverity_hash_algs[num]; /* pairs with smp_store_release() below */ if (likely(smp_load_acquire(&alg->tfm) != NULL)) return alg; mutex_lock(&fsverity_hash_alg_init_mutex); if (alg->tfm != NULL) goto out_unlock; tfm = crypto_alloc_shash(alg->name, 0, 0); if (IS_ERR(tfm)) { if (PTR_ERR(tfm) == -ENOENT) { fsverity_warn(inode, "Missing crypto API support for hash algorithm \"%s\"", alg->name); alg = ERR_PTR(-ENOPKG); goto out_unlock; } fsverity_err(inode, "Error allocating hash algorithm \"%s\": %ld", alg->name, PTR_ERR(tfm)); alg = ERR_CAST(tfm); goto out_unlock; } err = -EINVAL; if (WARN_ON_ONCE(alg->digest_size != crypto_shash_digestsize(tfm))) goto err_free_tfm; if (WARN_ON_ONCE(alg->block_size != crypto_shash_blocksize(tfm))) goto err_free_tfm; pr_info("%s using implementation \"%s\"\n", alg->name, crypto_shash_driver_name(tfm)); /* pairs with smp_load_acquire() above */ smp_store_release(&alg->tfm, tfm); goto out_unlock; err_free_tfm: crypto_free_shash(tfm); alg = ERR_PTR(err); out_unlock: mutex_unlock(&fsverity_hash_alg_init_mutex); return alg; } /** * fsverity_prepare_hash_state() - precompute the initial hash state * @alg: hash algorithm * @salt: a salt which is to be prepended to all data to be hashed * @salt_size: salt size in bytes, possibly 0 * * Return: NULL if the salt is empty, otherwise the kmalloc()'ed precomputed * initial hash state on success or an ERR_PTR() on failure. */ const u8 *fsverity_prepare_hash_state(const struct fsverity_hash_alg *alg, const u8 *salt, size_t salt_size) { u8 *hashstate = NULL; SHASH_DESC_ON_STACK(desc, alg->tfm); u8 *padded_salt = NULL; size_t padded_salt_size; int err; desc->tfm = alg->tfm; if (salt_size == 0) return NULL; hashstate = kmalloc(crypto_shash_statesize(alg->tfm), GFP_KERNEL); if (!hashstate) return ERR_PTR(-ENOMEM); /* * Zero-pad the salt to the next multiple of the input size of the hash * algorithm's compression function, e.g. 64 bytes for SHA-256 or 128 * bytes for SHA-512. This ensures that the hash algorithm won't have * any bytes buffered internally after processing the salt, thus making * salted hashing just as fast as unsalted hashing. */ padded_salt_size = round_up(salt_size, alg->block_size); padded_salt = kzalloc(padded_salt_size, GFP_KERNEL); if (!padded_salt) { err = -ENOMEM; goto err_free; } memcpy(padded_salt, salt, salt_size); err = crypto_shash_init(desc); if (err) goto err_free; err = crypto_shash_update(desc, padded_salt, padded_salt_size); if (err) goto err_free; err = crypto_shash_export(desc, hashstate); if (err) goto err_free; out: kfree(padded_salt); return hashstate; err_free: kfree(hashstate); hashstate = ERR_PTR(err); goto out; } /** * fsverity_hash_block() - hash a single data or hash block * @params: the Merkle tree's parameters * @inode: inode for which the hashing is being done * @data: virtual address of a buffer containing the block to hash * @out: output digest, size 'params->digest_size' bytes * * Hash a single data or hash block. The hash is salted if a salt is specified * in the Merkle tree parameters. * * Return: 0 on success, -errno on failure */ int fsverity_hash_block(const struct merkle_tree_params *params, const struct inode *inode, const void *data, u8 *out) { SHASH_DESC_ON_STACK(desc, params->hash_alg->tfm); int err; desc->tfm = params->hash_alg->tfm; if (params->hashstate) { err = crypto_shash_import(desc, params->hashstate); if (err) { fsverity_err(inode, "Error %d importing hash state", err); return err; } err = crypto_shash_finup(desc, data, params->block_size, out); } else { err = crypto_shash_digest(desc, data, params->block_size, out); } if (err) fsverity_err(inode, "Error %d computing block hash", err); return err; } /** * fsverity_hash_buffer() - hash some data * @alg: the hash algorithm to use * @data: the data to hash * @size: size of data to hash, in bytes * @out: output digest, size 'alg->digest_size' bytes * * Return: 0 on success, -errno on failure */ int fsverity_hash_buffer(const struct fsverity_hash_alg *alg, const void *data, size_t size, u8 *out) { return crypto_shash_tfm_digest(alg->tfm, data, size, out); } void __init fsverity_check_hash_algs(void) { size_t i; /* * Sanity check the hash algorithms (could be a build-time check, but * they're in an array) */ for (i = 0; i < ARRAY_SIZE(fsverity_hash_algs); i++) { const struct fsverity_hash_alg *alg = &fsverity_hash_algs[i]; if (!alg->name) continue; /* * 0 must never be allocated as an FS_VERITY_HASH_ALG_* value, * as it is reserved for users that use 0 to mean unspecified or * a default value. fs/verity/ itself doesn't care and doesn't * have a default algorithm, but some users make use of this. */ BUG_ON(i == 0); BUG_ON(alg->digest_size > FS_VERITY_MAX_DIGEST_SIZE); /* * For efficiency, the implementation currently assumes the * digest and block sizes are powers of 2. This limitation can * be lifted if the code is updated to handle other values. */ BUG_ON(!is_power_of_2(alg->digest_size)); BUG_ON(!is_power_of_2(alg->block_size)); /* Verify that there is a valid mapping to HASH_ALGO_*. */ BUG_ON(alg->algo_id == 0); BUG_ON(alg->digest_size != hash_digest_size[alg->algo_id]); } }
linux-master
fs/verity/hash_algs.c
// SPDX-License-Identifier: GPL-2.0 /* * Ioctl to get a verity file's digest * * Copyright 2019 Google LLC */ #include "fsverity_private.h" #include <linux/uaccess.h> /** * fsverity_ioctl_measure() - get a verity file's digest * @filp: file to get digest of * @_uarg: user pointer to fsverity_digest * * Retrieve the file digest that the kernel is enforcing for reads from a verity * file. See the "FS_IOC_MEASURE_VERITY" section of * Documentation/filesystems/fsverity.rst for the documentation. * * Return: 0 on success, -errno on failure */ int fsverity_ioctl_measure(struct file *filp, void __user *_uarg) { const struct inode *inode = file_inode(filp); struct fsverity_digest __user *uarg = _uarg; const struct fsverity_info *vi; const struct fsverity_hash_alg *hash_alg; struct fsverity_digest arg; vi = fsverity_get_info(inode); if (!vi) return -ENODATA; /* not a verity file */ hash_alg = vi->tree_params.hash_alg; /* * The user specifies the digest_size their buffer has space for; we can * return the digest if it fits in the available space. We write back * the actual size, which may be shorter than the user-specified size. */ if (get_user(arg.digest_size, &uarg->digest_size)) return -EFAULT; if (arg.digest_size < hash_alg->digest_size) return -EOVERFLOW; memset(&arg, 0, sizeof(arg)); arg.digest_algorithm = hash_alg - fsverity_hash_algs; arg.digest_size = hash_alg->digest_size; if (copy_to_user(uarg, &arg, sizeof(arg))) return -EFAULT; if (copy_to_user(uarg->digest, vi->file_digest, hash_alg->digest_size)) return -EFAULT; return 0; } EXPORT_SYMBOL_GPL(fsverity_ioctl_measure); /** * fsverity_get_digest() - get a verity file's digest * @inode: inode to get digest of * @raw_digest: (out) the raw file digest * @alg: (out) the digest's algorithm, as a FS_VERITY_HASH_ALG_* value * @halg: (out) the digest's algorithm, as a HASH_ALGO_* value * * Retrieves the fsverity digest of the given file. The file must have been * opened at least once since the inode was last loaded into the inode cache; * otherwise this function will not recognize when fsverity is enabled. * * The file's fsverity digest consists of @raw_digest in combination with either * @alg or @halg. (The caller can choose which one of @alg or @halg to use.) * * IMPORTANT: Callers *must* make use of one of the two algorithm IDs, since * @raw_digest is meaningless without knowing which algorithm it uses! fsverity * provides no security guarantee for users who ignore the algorithm ID, even if * they use the digest size (since algorithms can share the same digest size). * * Return: The size of the raw digest in bytes, or 0 if the file doesn't have * fsverity enabled. */ int fsverity_get_digest(struct inode *inode, u8 raw_digest[FS_VERITY_MAX_DIGEST_SIZE], u8 *alg, enum hash_algo *halg) { const struct fsverity_info *vi; const struct fsverity_hash_alg *hash_alg; vi = fsverity_get_info(inode); if (!vi) return 0; /* not a verity file */ hash_alg = vi->tree_params.hash_alg; memcpy(raw_digest, vi->file_digest, hash_alg->digest_size); if (alg) *alg = hash_alg - fsverity_hash_algs; if (halg) *halg = hash_alg->algo_id; return hash_alg->digest_size; } EXPORT_SYMBOL_GPL(fsverity_get_digest);
linux-master
fs/verity/measure.c
// SPDX-License-Identifier: GPL-2.0 /* * Ioctl to enable verity on a file * * Copyright 2019 Google LLC */ #include "fsverity_private.h" #include <crypto/hash.h> #include <linux/mount.h> #include <linux/sched/signal.h> #include <linux/uaccess.h> struct block_buffer { u32 filled; bool is_root_hash; u8 *data; }; /* Hash a block, writing the result to the next level's pending block buffer. */ static int hash_one_block(struct inode *inode, const struct merkle_tree_params *params, struct block_buffer *cur) { struct block_buffer *next = cur + 1; int err; /* * Safety check to prevent a buffer overflow in case of a filesystem bug * that allows the file size to change despite deny_write_access(), or a * bug in the Merkle tree logic itself */ if (WARN_ON_ONCE(next->is_root_hash && next->filled != 0)) return -EINVAL; /* Zero-pad the block if it's shorter than the block size. */ memset(&cur->data[cur->filled], 0, params->block_size - cur->filled); err = fsverity_hash_block(params, inode, cur->data, &next->data[next->filled]); if (err) return err; next->filled += params->digest_size; cur->filled = 0; return 0; } static int write_merkle_tree_block(struct inode *inode, const u8 *buf, unsigned long index, const struct merkle_tree_params *params) { u64 pos = (u64)index << params->log_blocksize; int err; err = inode->i_sb->s_vop->write_merkle_tree_block(inode, buf, pos, params->block_size); if (err) fsverity_err(inode, "Error %d writing Merkle tree block %lu", err, index); return err; } /* * Build the Merkle tree for the given file using the given parameters, and * return the root hash in @root_hash. * * The tree is written to a filesystem-specific location as determined by the * ->write_merkle_tree_block() method. However, the blocks that comprise the * tree are the same for all filesystems. */ static int build_merkle_tree(struct file *filp, const struct merkle_tree_params *params, u8 *root_hash) { struct inode *inode = file_inode(filp); const u64 data_size = inode->i_size; const int num_levels = params->num_levels; struct block_buffer _buffers[1 + FS_VERITY_MAX_LEVELS + 1] = {}; struct block_buffer *buffers = &_buffers[1]; unsigned long level_offset[FS_VERITY_MAX_LEVELS]; int level; u64 offset; int err; if (data_size == 0) { /* Empty file is a special case; root hash is all 0's */ memset(root_hash, 0, params->digest_size); return 0; } /* * Allocate the block buffers. Buffer "-1" is for data blocks. * Buffers 0 <= level < num_levels are for the actual tree levels. * Buffer 'num_levels' is for the root hash. */ for (level = -1; level < num_levels; level++) { buffers[level].data = kzalloc(params->block_size, GFP_KERNEL); if (!buffers[level].data) { err = -ENOMEM; goto out; } } buffers[num_levels].data = root_hash; buffers[num_levels].is_root_hash = true; BUILD_BUG_ON(sizeof(level_offset) != sizeof(params->level_start)); memcpy(level_offset, params->level_start, sizeof(level_offset)); /* Hash each data block, also hashing the tree blocks as they fill up */ for (offset = 0; offset < data_size; offset += params->block_size) { ssize_t bytes_read; loff_t pos = offset; buffers[-1].filled = min_t(u64, params->block_size, data_size - offset); bytes_read = __kernel_read(filp, buffers[-1].data, buffers[-1].filled, &pos); if (bytes_read < 0) { err = bytes_read; fsverity_err(inode, "Error %d reading file data", err); goto out; } if (bytes_read != buffers[-1].filled) { err = -EINVAL; fsverity_err(inode, "Short read of file data"); goto out; } err = hash_one_block(inode, params, &buffers[-1]); if (err) goto out; for (level = 0; level < num_levels; level++) { if (buffers[level].filled + params->digest_size <= params->block_size) { /* Next block at @level isn't full yet */ break; } /* Next block at @level is full */ err = hash_one_block(inode, params, &buffers[level]); if (err) goto out; err = write_merkle_tree_block(inode, buffers[level].data, level_offset[level], params); if (err) goto out; level_offset[level]++; } if (fatal_signal_pending(current)) { err = -EINTR; goto out; } cond_resched(); } /* Finish all nonempty pending tree blocks. */ for (level = 0; level < num_levels; level++) { if (buffers[level].filled != 0) { err = hash_one_block(inode, params, &buffers[level]); if (err) goto out; err = write_merkle_tree_block(inode, buffers[level].data, level_offset[level], params); if (err) goto out; } } /* The root hash was filled by the last call to hash_one_block(). */ if (WARN_ON_ONCE(buffers[num_levels].filled != params->digest_size)) { err = -EINVAL; goto out; } err = 0; out: for (level = -1; level < num_levels; level++) kfree(buffers[level].data); return err; } static int enable_verity(struct file *filp, const struct fsverity_enable_arg *arg) { struct inode *inode = file_inode(filp); const struct fsverity_operations *vops = inode->i_sb->s_vop; struct merkle_tree_params params = { }; struct fsverity_descriptor *desc; size_t desc_size = struct_size(desc, signature, arg->sig_size); struct fsverity_info *vi; int err; /* Start initializing the fsverity_descriptor */ desc = kzalloc(desc_size, GFP_KERNEL); if (!desc) return -ENOMEM; desc->version = 1; desc->hash_algorithm = arg->hash_algorithm; desc->log_blocksize = ilog2(arg->block_size); /* Get the salt if the user provided one */ if (arg->salt_size && copy_from_user(desc->salt, u64_to_user_ptr(arg->salt_ptr), arg->salt_size)) { err = -EFAULT; goto out; } desc->salt_size = arg->salt_size; /* Get the builtin signature if the user provided one */ if (arg->sig_size && copy_from_user(desc->signature, u64_to_user_ptr(arg->sig_ptr), arg->sig_size)) { err = -EFAULT; goto out; } desc->sig_size = cpu_to_le32(arg->sig_size); desc->data_size = cpu_to_le64(inode->i_size); /* Prepare the Merkle tree parameters */ err = fsverity_init_merkle_tree_params(&params, inode, arg->hash_algorithm, desc->log_blocksize, desc->salt, desc->salt_size); if (err) goto out; /* * Start enabling verity on this file, serialized by the inode lock. * Fail if verity is already enabled or is already being enabled. */ inode_lock(inode); if (IS_VERITY(inode)) err = -EEXIST; else err = vops->begin_enable_verity(filp); inode_unlock(inode); if (err) goto out; /* * Build the Merkle tree. Don't hold the inode lock during this, since * on huge files this may take a very long time and we don't want to * force unrelated syscalls like chown() to block forever. We don't * need the inode lock here because deny_write_access() already prevents * the file from being written to or truncated, and we still serialize * ->begin_enable_verity() and ->end_enable_verity() using the inode * lock and only allow one process to be here at a time on a given file. */ BUILD_BUG_ON(sizeof(desc->root_hash) < FS_VERITY_MAX_DIGEST_SIZE); err = build_merkle_tree(filp, &params, desc->root_hash); if (err) { fsverity_err(inode, "Error %d building Merkle tree", err); goto rollback; } /* * Create the fsverity_info. Don't bother trying to save work by * reusing the merkle_tree_params from above. Instead, just create the * fsverity_info from the fsverity_descriptor as if it were just loaded * from disk. This is simpler, and it serves as an extra check that the * metadata we're writing is valid before actually enabling verity. */ vi = fsverity_create_info(inode, desc); if (IS_ERR(vi)) { err = PTR_ERR(vi); goto rollback; } /* * Tell the filesystem to finish enabling verity on the file. * Serialized with ->begin_enable_verity() by the inode lock. */ inode_lock(inode); err = vops->end_enable_verity(filp, desc, desc_size, params.tree_size); inode_unlock(inode); if (err) { fsverity_err(inode, "%ps() failed with err %d", vops->end_enable_verity, err); fsverity_free_info(vi); } else if (WARN_ON_ONCE(!IS_VERITY(inode))) { err = -EINVAL; fsverity_free_info(vi); } else { /* Successfully enabled verity */ /* * Readers can start using ->i_verity_info immediately, so it * can't be rolled back once set. So don't set it until just * after the filesystem has successfully enabled verity. */ fsverity_set_info(inode, vi); } out: kfree(params.hashstate); kfree(desc); return err; rollback: inode_lock(inode); (void)vops->end_enable_verity(filp, NULL, 0, params.tree_size); inode_unlock(inode); goto out; } /** * fsverity_ioctl_enable() - enable verity on a file * @filp: file to enable verity on * @uarg: user pointer to fsverity_enable_arg * * Enable fs-verity on a file. See the "FS_IOC_ENABLE_VERITY" section of * Documentation/filesystems/fsverity.rst for the documentation. * * Return: 0 on success, -errno on failure */ int fsverity_ioctl_enable(struct file *filp, const void __user *uarg) { struct inode *inode = file_inode(filp); struct fsverity_enable_arg arg; int err; if (copy_from_user(&arg, uarg, sizeof(arg))) return -EFAULT; if (arg.version != 1) return -EINVAL; if (arg.__reserved1 || memchr_inv(arg.__reserved2, 0, sizeof(arg.__reserved2))) return -EINVAL; if (!is_power_of_2(arg.block_size)) return -EINVAL; if (arg.salt_size > sizeof_field(struct fsverity_descriptor, salt)) return -EMSGSIZE; if (arg.sig_size > FS_VERITY_MAX_SIGNATURE_SIZE) return -EMSGSIZE; /* * Require a regular file with write access. But the actual fd must * still be readonly so that we can lock out all writers. This is * needed to guarantee that no writable fds exist to the file once it * has verity enabled, and to stabilize the data being hashed. */ err = file_permission(filp, MAY_WRITE); if (err) return err; /* * __kernel_read() is used while building the Merkle tree. So, we can't * allow file descriptors that were opened for ioctl access only, using * the special nonstandard access mode 3. O_RDONLY only, please! */ if (!(filp->f_mode & FMODE_READ)) return -EBADF; if (IS_APPEND(inode)) return -EPERM; if (S_ISDIR(inode->i_mode)) return -EISDIR; if (!S_ISREG(inode->i_mode)) return -EINVAL; err = mnt_want_write_file(filp); if (err) /* -EROFS */ return err; err = deny_write_access(filp); if (err) /* -ETXTBSY */ goto out_drop_write; err = enable_verity(filp, &arg); /* * We no longer drop the inode's pagecache after enabling verity. This * used to be done to try to avoid a race condition where pages could be * evicted after being used in the Merkle tree construction, then * re-instantiated by a concurrent read. Such pages are unverified, and * the backing storage could have filled them with different content, so * they shouldn't be used to fulfill reads once verity is enabled. * * But, dropping the pagecache has a big performance impact, and it * doesn't fully solve the race condition anyway. So for those reasons, * and also because this race condition isn't very important relatively * speaking (especially for small-ish files, where the chance of a page * being used, evicted, *and* re-instantiated all while enabling verity * is quite small), we no longer drop the inode's pagecache. */ /* * allow_write_access() is needed to pair with deny_write_access(). * Regardless, the filesystem won't allow writing to verity files. */ allow_write_access(filp); out_drop_write: mnt_drop_write_file(filp); return err; } EXPORT_SYMBOL_GPL(fsverity_ioctl_enable);
linux-master
fs/verity/enable.c
// SPDX-License-Identifier: GPL-2.0 /* * Opening fs-verity files * * Copyright 2019 Google LLC */ #include "fsverity_private.h" #include <linux/mm.h> #include <linux/slab.h> static struct kmem_cache *fsverity_info_cachep; /** * fsverity_init_merkle_tree_params() - initialize Merkle tree parameters * @params: the parameters struct to initialize * @inode: the inode for which the Merkle tree is being built * @hash_algorithm: number of hash algorithm to use * @log_blocksize: log base 2 of block size to use * @salt: pointer to salt (optional) * @salt_size: size of salt, possibly 0 * * Validate the hash algorithm and block size, then compute the tree topology * (num levels, num blocks in each level, etc.) and initialize @params. * * Return: 0 on success, -errno on failure */ int fsverity_init_merkle_tree_params(struct merkle_tree_params *params, const struct inode *inode, unsigned int hash_algorithm, unsigned int log_blocksize, const u8 *salt, size_t salt_size) { const struct fsverity_hash_alg *hash_alg; int err; u64 blocks; u64 blocks_in_level[FS_VERITY_MAX_LEVELS]; u64 offset; int level; memset(params, 0, sizeof(*params)); hash_alg = fsverity_get_hash_alg(inode, hash_algorithm); if (IS_ERR(hash_alg)) return PTR_ERR(hash_alg); params->hash_alg = hash_alg; params->digest_size = hash_alg->digest_size; params->hashstate = fsverity_prepare_hash_state(hash_alg, salt, salt_size); if (IS_ERR(params->hashstate)) { err = PTR_ERR(params->hashstate); params->hashstate = NULL; fsverity_err(inode, "Error %d preparing hash state", err); goto out_err; } /* * fs/verity/ directly assumes that the Merkle tree block size is a * power of 2 less than or equal to PAGE_SIZE. Another restriction * arises from the interaction between fs/verity/ and the filesystems * themselves: filesystems expect to be able to verify a single * filesystem block of data at a time. Therefore, the Merkle tree block * size must also be less than or equal to the filesystem block size. * * The above are the only hard limitations, so in theory the Merkle tree * block size could be as small as twice the digest size. However, * that's not useful, and it would result in some unusually deep and * large Merkle trees. So we currently require that the Merkle tree * block size be at least 1024 bytes. That's small enough to test the * sub-page block case on systems with 4K pages, but not too small. */ if (log_blocksize < 10 || log_blocksize > PAGE_SHIFT || log_blocksize > inode->i_blkbits) { fsverity_warn(inode, "Unsupported log_blocksize: %u", log_blocksize); err = -EINVAL; goto out_err; } params->log_blocksize = log_blocksize; params->block_size = 1 << log_blocksize; params->log_blocks_per_page = PAGE_SHIFT - log_blocksize; params->blocks_per_page = 1 << params->log_blocks_per_page; if (WARN_ON_ONCE(!is_power_of_2(params->digest_size))) { err = -EINVAL; goto out_err; } if (params->block_size < 2 * params->digest_size) { fsverity_warn(inode, "Merkle tree block size (%u) too small for hash algorithm \"%s\"", params->block_size, hash_alg->name); err = -EINVAL; goto out_err; } params->log_digestsize = ilog2(params->digest_size); params->log_arity = log_blocksize - params->log_digestsize; params->hashes_per_block = 1 << params->log_arity; /* * Compute the number of levels in the Merkle tree and create a map from * level to the starting block of that level. Level 'num_levels - 1' is * the root and is stored first. Level 0 is the level directly "above" * the data blocks and is stored last. */ /* Compute number of levels and the number of blocks in each level */ blocks = ((u64)inode->i_size + params->block_size - 1) >> log_blocksize; while (blocks > 1) { if (params->num_levels >= FS_VERITY_MAX_LEVELS) { fsverity_err(inode, "Too many levels in Merkle tree"); err = -EFBIG; goto out_err; } blocks = (blocks + params->hashes_per_block - 1) >> params->log_arity; blocks_in_level[params->num_levels++] = blocks; } /* Compute the starting block of each level */ offset = 0; for (level = (int)params->num_levels - 1; level >= 0; level--) { params->level_start[level] = offset; offset += blocks_in_level[level]; } /* * With block_size != PAGE_SIZE, an in-memory bitmap will need to be * allocated to track the "verified" status of hash blocks. Don't allow * this bitmap to get too large. For now, limit it to 1 MiB, which * limits the file size to about 4.4 TB with SHA-256 and 4K blocks. * * Together with the fact that the data, and thus also the Merkle tree, * cannot have more than ULONG_MAX pages, this implies that hash block * indices can always fit in an 'unsigned long'. But to be safe, we * explicitly check for that too. Note, this is only for hash block * indices; data block indices might not fit in an 'unsigned long'. */ if ((params->block_size != PAGE_SIZE && offset > 1 << 23) || offset > ULONG_MAX) { fsverity_err(inode, "Too many blocks in Merkle tree"); err = -EFBIG; goto out_err; } params->tree_size = offset << log_blocksize; params->tree_pages = PAGE_ALIGN(params->tree_size) >> PAGE_SHIFT; return 0; out_err: kfree(params->hashstate); memset(params, 0, sizeof(*params)); return err; } /* * Compute the file digest by hashing the fsverity_descriptor excluding the * builtin signature and with the sig_size field set to 0. */ static int compute_file_digest(const struct fsverity_hash_alg *hash_alg, struct fsverity_descriptor *desc, u8 *file_digest) { __le32 sig_size = desc->sig_size; int err; desc->sig_size = 0; err = fsverity_hash_buffer(hash_alg, desc, sizeof(*desc), file_digest); desc->sig_size = sig_size; return err; } /* * Create a new fsverity_info from the given fsverity_descriptor (with optional * appended builtin signature), and check the signature if present. The * fsverity_descriptor must have already undergone basic validation. */ struct fsverity_info *fsverity_create_info(const struct inode *inode, struct fsverity_descriptor *desc) { struct fsverity_info *vi; int err; vi = kmem_cache_zalloc(fsverity_info_cachep, GFP_KERNEL); if (!vi) return ERR_PTR(-ENOMEM); vi->inode = inode; err = fsverity_init_merkle_tree_params(&vi->tree_params, inode, desc->hash_algorithm, desc->log_blocksize, desc->salt, desc->salt_size); if (err) { fsverity_err(inode, "Error %d initializing Merkle tree parameters", err); goto fail; } memcpy(vi->root_hash, desc->root_hash, vi->tree_params.digest_size); err = compute_file_digest(vi->tree_params.hash_alg, desc, vi->file_digest); if (err) { fsverity_err(inode, "Error %d computing file digest", err); goto fail; } err = fsverity_verify_signature(vi, desc->signature, le32_to_cpu(desc->sig_size)); if (err) goto fail; if (vi->tree_params.block_size != PAGE_SIZE) { /* * When the Merkle tree block size and page size differ, we use * a bitmap to keep track of which hash blocks have been * verified. This bitmap must contain one bit per hash block, * including alignment to a page boundary at the end. * * Eventually, to support extremely large files in an efficient * way, it might be necessary to make pages of this bitmap * reclaimable. But for now, simply allocating the whole bitmap * is a simple solution that works well on the files on which * fsverity is realistically used. E.g., with SHA-256 and 4K * blocks, a 100MB file only needs a 24-byte bitmap, and the * bitmap for any file under 17GB fits in a 4K page. */ unsigned long num_bits = vi->tree_params.tree_pages << vi->tree_params.log_blocks_per_page; vi->hash_block_verified = kvcalloc(BITS_TO_LONGS(num_bits), sizeof(unsigned long), GFP_KERNEL); if (!vi->hash_block_verified) { err = -ENOMEM; goto fail; } spin_lock_init(&vi->hash_page_init_lock); } return vi; fail: fsverity_free_info(vi); return ERR_PTR(err); } void fsverity_set_info(struct inode *inode, struct fsverity_info *vi) { /* * Multiple tasks may race to set ->i_verity_info, so use * cmpxchg_release(). This pairs with the smp_load_acquire() in * fsverity_get_info(). I.e., here we publish ->i_verity_info with a * RELEASE barrier so that other tasks can ACQUIRE it. */ if (cmpxchg_release(&inode->i_verity_info, NULL, vi) != NULL) { /* Lost the race, so free the fsverity_info we allocated. */ fsverity_free_info(vi); /* * Afterwards, the caller may access ->i_verity_info directly, * so make sure to ACQUIRE the winning fsverity_info. */ (void)fsverity_get_info(inode); } } void fsverity_free_info(struct fsverity_info *vi) { if (!vi) return; kfree(vi->tree_params.hashstate); kvfree(vi->hash_block_verified); kmem_cache_free(fsverity_info_cachep, vi); } static bool validate_fsverity_descriptor(struct inode *inode, const struct fsverity_descriptor *desc, size_t desc_size) { if (desc_size < sizeof(*desc)) { fsverity_err(inode, "Unrecognized descriptor size: %zu bytes", desc_size); return false; } if (desc->version != 1) { fsverity_err(inode, "Unrecognized descriptor version: %u", desc->version); return false; } if (memchr_inv(desc->__reserved, 0, sizeof(desc->__reserved))) { fsverity_err(inode, "Reserved bits set in descriptor"); return false; } if (desc->salt_size > sizeof(desc->salt)) { fsverity_err(inode, "Invalid salt_size: %u", desc->salt_size); return false; } if (le64_to_cpu(desc->data_size) != inode->i_size) { fsverity_err(inode, "Wrong data_size: %llu (desc) != %lld (inode)", le64_to_cpu(desc->data_size), inode->i_size); return false; } if (le32_to_cpu(desc->sig_size) > desc_size - sizeof(*desc)) { fsverity_err(inode, "Signature overflows verity descriptor"); return false; } return true; } /* * Read the inode's fsverity_descriptor (with optional appended builtin * signature) from the filesystem, and do basic validation of it. */ int fsverity_get_descriptor(struct inode *inode, struct fsverity_descriptor **desc_ret) { int res; struct fsverity_descriptor *desc; res = inode->i_sb->s_vop->get_verity_descriptor(inode, NULL, 0); if (res < 0) { fsverity_err(inode, "Error %d getting verity descriptor size", res); return res; } if (res > FS_VERITY_MAX_DESCRIPTOR_SIZE) { fsverity_err(inode, "Verity descriptor is too large (%d bytes)", res); return -EMSGSIZE; } desc = kmalloc(res, GFP_KERNEL); if (!desc) return -ENOMEM; res = inode->i_sb->s_vop->get_verity_descriptor(inode, desc, res); if (res < 0) { fsverity_err(inode, "Error %d reading verity descriptor", res); kfree(desc); return res; } if (!validate_fsverity_descriptor(inode, desc, res)) { kfree(desc); return -EINVAL; } *desc_ret = desc; return 0; } /* Ensure the inode has an ->i_verity_info */ static int ensure_verity_info(struct inode *inode) { struct fsverity_info *vi = fsverity_get_info(inode); struct fsverity_descriptor *desc; int err; if (vi) return 0; err = fsverity_get_descriptor(inode, &desc); if (err) return err; vi = fsverity_create_info(inode, desc); if (IS_ERR(vi)) { err = PTR_ERR(vi); goto out_free_desc; } fsverity_set_info(inode, vi); err = 0; out_free_desc: kfree(desc); return err; } int __fsverity_file_open(struct inode *inode, struct file *filp) { if (filp->f_mode & FMODE_WRITE) return -EPERM; return ensure_verity_info(inode); } EXPORT_SYMBOL_GPL(__fsverity_file_open); int __fsverity_prepare_setattr(struct dentry *dentry, struct iattr *attr) { if (attr->ia_valid & ATTR_SIZE) return -EPERM; return 0; } EXPORT_SYMBOL_GPL(__fsverity_prepare_setattr); void __fsverity_cleanup_inode(struct inode *inode) { fsverity_free_info(inode->i_verity_info); inode->i_verity_info = NULL; } EXPORT_SYMBOL_GPL(__fsverity_cleanup_inode); void __init fsverity_init_info_cache(void) { fsverity_info_cachep = KMEM_CACHE_USERCOPY( fsverity_info, SLAB_RECLAIM_ACCOUNT | SLAB_PANIC, file_digest); }
linux-master
fs/verity/open.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/fs.h> #include <linux/buffer_head.h> #include <asm/div64.h> #include "omfs.h" unsigned long omfs_count_free(struct super_block *sb) { unsigned int i; unsigned long sum = 0; struct omfs_sb_info *sbi = OMFS_SB(sb); int nbits = sb->s_blocksize * 8; for (i = 0; i < sbi->s_imap_size; i++) sum += nbits - bitmap_weight(sbi->s_imap[i], nbits); return sum; } /* * Counts the run of zero bits starting at bit up to max. * It handles the case where a run might spill over a buffer. * Called with bitmap lock. */ static int count_run(unsigned long **addr, int nbits, int addrlen, int bit, int max) { int count = 0; int x; for (; addrlen > 0; addrlen--, addr++) { x = find_next_bit(*addr, nbits, bit); count += x - bit; if (x < nbits || count > max) return min(count, max); bit = 0; } return min(count, max); } /* * Sets or clears the run of count bits starting with bit. * Called with bitmap lock. */ static int set_run(struct super_block *sb, int map, int nbits, int bit, int count, int set) { int i; int err; struct buffer_head *bh; struct omfs_sb_info *sbi = OMFS_SB(sb); err = -ENOMEM; bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map); if (!bh) goto out; for (i = 0; i < count; i++, bit++) { if (bit >= nbits) { bit = 0; map++; mark_buffer_dirty(bh); brelse(bh); bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map); if (!bh) goto out; } if (set) { set_bit(bit, sbi->s_imap[map]); set_bit(bit, (unsigned long *)bh->b_data); } else { clear_bit(bit, sbi->s_imap[map]); clear_bit(bit, (unsigned long *)bh->b_data); } } mark_buffer_dirty(bh); brelse(bh); err = 0; out: return err; } /* * Tries to allocate exactly one block. Returns true if successful. */ int omfs_allocate_block(struct super_block *sb, u64 block) { struct buffer_head *bh; struct omfs_sb_info *sbi = OMFS_SB(sb); int bits_per_entry = 8 * sb->s_blocksize; unsigned int map, bit; int ret = 0; u64 tmp; tmp = block; bit = do_div(tmp, bits_per_entry); map = tmp; mutex_lock(&sbi->s_bitmap_lock); if (map >= sbi->s_imap_size || test_and_set_bit(bit, sbi->s_imap[map])) goto out; if (sbi->s_bitmap_ino > 0) { bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map); if (!bh) goto out; set_bit(bit, (unsigned long *)bh->b_data); mark_buffer_dirty(bh); brelse(bh); } ret = 1; out: mutex_unlock(&sbi->s_bitmap_lock); return ret; } /* * Tries to allocate a set of blocks. The request size depends on the * type: for inodes, we must allocate sbi->s_mirrors blocks, and for file * blocks, we try to allocate sbi->s_clustersize, but can always get away * with just one block. */ int omfs_allocate_range(struct super_block *sb, int min_request, int max_request, u64 *return_block, int *return_size) { struct omfs_sb_info *sbi = OMFS_SB(sb); int bits_per_entry = 8 * sb->s_blocksize; int ret = 0; int i, run, bit; mutex_lock(&sbi->s_bitmap_lock); for (i = 0; i < sbi->s_imap_size; i++) { bit = 0; while (bit < bits_per_entry) { bit = find_next_zero_bit(sbi->s_imap[i], bits_per_entry, bit); if (bit == bits_per_entry) break; run = count_run(&sbi->s_imap[i], bits_per_entry, sbi->s_imap_size-i, bit, max_request); if (run >= min_request) goto found; bit += run; } } ret = -ENOSPC; goto out; found: *return_block = (u64) i * bits_per_entry + bit; *return_size = run; ret = set_run(sb, i, bits_per_entry, bit, run, 1); out: mutex_unlock(&sbi->s_bitmap_lock); return ret; } /* * Clears count bits starting at a given block. */ int omfs_clear_range(struct super_block *sb, u64 block, int count) { struct omfs_sb_info *sbi = OMFS_SB(sb); int bits_per_entry = 8 * sb->s_blocksize; u64 tmp; unsigned int map, bit; int ret; tmp = block; bit = do_div(tmp, bits_per_entry); map = tmp; if (map >= sbi->s_imap_size) return 0; mutex_lock(&sbi->s_bitmap_lock); ret = set_run(sb, map, bits_per_entry, bit, count, 0); mutex_unlock(&sbi->s_bitmap_lock); return ret; }
linux-master
fs/omfs/bitmap.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMFS (as used by RIO Karma) directory operations. * Copyright (C) 2005 Bob Copeland <[email protected]> */ #include <linux/fs.h> #include <linux/ctype.h> #include <linux/buffer_head.h> #include "omfs.h" static int omfs_hash(const char *name, int namelen, int mod) { int i, hash = 0; for (i = 0; i < namelen; i++) hash ^= tolower(name[i]) << (i % 24); return hash % mod; } /* * Finds the bucket for a given name and reads the containing block; * *ofs is set to the offset of the first list entry. */ static struct buffer_head *omfs_get_bucket(struct inode *dir, const char *name, int namelen, int *ofs) { int nbuckets = (dir->i_size - OMFS_DIR_START)/8; int bucket = omfs_hash(name, namelen, nbuckets); *ofs = OMFS_DIR_START + bucket * 8; return omfs_bread(dir->i_sb, dir->i_ino); } static struct buffer_head *omfs_scan_list(struct inode *dir, u64 block, const char *name, int namelen, u64 *prev_block) { struct buffer_head *bh; struct omfs_inode *oi; int err = -ENOENT; *prev_block = ~0; while (block != ~0) { bh = omfs_bread(dir->i_sb, block); if (!bh) { err = -EIO; goto err; } oi = (struct omfs_inode *) bh->b_data; if (omfs_is_bad(OMFS_SB(dir->i_sb), &oi->i_head, block)) { brelse(bh); goto err; } if (strncmp(oi->i_name, name, namelen) == 0) return bh; *prev_block = block; block = be64_to_cpu(oi->i_sibling); brelse(bh); } err: return ERR_PTR(err); } static struct buffer_head *omfs_find_entry(struct inode *dir, const char *name, int namelen) { struct buffer_head *bh; int ofs; u64 block, dummy; bh = omfs_get_bucket(dir, name, namelen, &ofs); if (!bh) return ERR_PTR(-EIO); block = be64_to_cpu(*((__be64 *) &bh->b_data[ofs])); brelse(bh); return omfs_scan_list(dir, block, name, namelen, &dummy); } int omfs_make_empty(struct inode *inode, struct super_block *sb) { struct omfs_sb_info *sbi = OMFS_SB(sb); struct buffer_head *bh; struct omfs_inode *oi; bh = omfs_bread(sb, inode->i_ino); if (!bh) return -ENOMEM; memset(bh->b_data, 0, sizeof(struct omfs_inode)); if (S_ISDIR(inode->i_mode)) { memset(&bh->b_data[OMFS_DIR_START], 0xff, sbi->s_sys_blocksize - OMFS_DIR_START); } else omfs_make_empty_table(bh, OMFS_EXTENT_START); oi = (struct omfs_inode *) bh->b_data; oi->i_head.h_self = cpu_to_be64(inode->i_ino); oi->i_sibling = ~cpu_to_be64(0ULL); mark_buffer_dirty(bh); brelse(bh); return 0; } static int omfs_add_link(struct dentry *dentry, struct inode *inode) { struct inode *dir = d_inode(dentry->d_parent); const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; struct omfs_inode *oi; struct buffer_head *bh; u64 block; __be64 *entry; int ofs; /* just prepend to head of queue in proper bucket */ bh = omfs_get_bucket(dir, name, namelen, &ofs); if (!bh) goto out; entry = (__be64 *) &bh->b_data[ofs]; block = be64_to_cpu(*entry); *entry = cpu_to_be64(inode->i_ino); mark_buffer_dirty(bh); brelse(bh); /* now set the sibling and parent pointers on the new inode */ bh = omfs_bread(dir->i_sb, inode->i_ino); if (!bh) goto out; oi = (struct omfs_inode *) bh->b_data; memcpy(oi->i_name, name, namelen); memset(oi->i_name + namelen, 0, OMFS_NAMELEN - namelen); oi->i_sibling = cpu_to_be64(block); oi->i_parent = cpu_to_be64(dir->i_ino); mark_buffer_dirty(bh); brelse(bh); inode_set_ctime_current(dir); /* mark affected inodes dirty to rebuild checksums */ mark_inode_dirty(dir); mark_inode_dirty(inode); return 0; out: return -ENOMEM; } static int omfs_delete_entry(struct dentry *dentry) { struct inode *dir = d_inode(dentry->d_parent); struct inode *dirty; const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; struct omfs_inode *oi; struct buffer_head *bh, *bh2; __be64 *entry, next; u64 block, prev; int ofs; int err = -ENOMEM; /* delete the proper node in the bucket's linked list */ bh = omfs_get_bucket(dir, name, namelen, &ofs); if (!bh) goto out; entry = (__be64 *) &bh->b_data[ofs]; block = be64_to_cpu(*entry); bh2 = omfs_scan_list(dir, block, name, namelen, &prev); if (IS_ERR(bh2)) { err = PTR_ERR(bh2); goto out_free_bh; } oi = (struct omfs_inode *) bh2->b_data; next = oi->i_sibling; brelse(bh2); if (prev != ~0) { /* found in middle of list, get list ptr */ brelse(bh); bh = omfs_bread(dir->i_sb, prev); if (!bh) goto out; oi = (struct omfs_inode *) bh->b_data; entry = &oi->i_sibling; } *entry = next; mark_buffer_dirty(bh); if (prev != ~0) { dirty = omfs_iget(dir->i_sb, prev); if (!IS_ERR(dirty)) { mark_inode_dirty(dirty); iput(dirty); } } err = 0; out_free_bh: brelse(bh); out: return err; } static int omfs_dir_is_empty(struct inode *inode) { int nbuckets = (inode->i_size - OMFS_DIR_START) / 8; struct buffer_head *bh; u64 *ptr; int i; bh = omfs_bread(inode->i_sb, inode->i_ino); if (!bh) return 0; ptr = (u64 *) &bh->b_data[OMFS_DIR_START]; for (i = 0; i < nbuckets; i++, ptr++) if (*ptr != ~0) break; brelse(bh); return *ptr != ~0; } static int omfs_remove(struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(dentry); int ret; if (S_ISDIR(inode->i_mode) && !omfs_dir_is_empty(inode)) return -ENOTEMPTY; ret = omfs_delete_entry(dentry); if (ret) return ret; clear_nlink(inode); mark_inode_dirty(inode); mark_inode_dirty(dir); return 0; } static int omfs_add_node(struct inode *dir, struct dentry *dentry, umode_t mode) { int err; struct inode *inode = omfs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); err = omfs_make_empty(inode, dir->i_sb); if (err) goto out_free_inode; err = omfs_add_link(dentry, inode); if (err) goto out_free_inode; d_instantiate(dentry, inode); return 0; out_free_inode: iput(inode); return err; } static int omfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) { return omfs_add_node(dir, dentry, mode | S_IFDIR); } static int omfs_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { return omfs_add_node(dir, dentry, mode | S_IFREG); } static struct dentry *omfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct buffer_head *bh; struct inode *inode = NULL; if (dentry->d_name.len > OMFS_NAMELEN) return ERR_PTR(-ENAMETOOLONG); bh = omfs_find_entry(dir, dentry->d_name.name, dentry->d_name.len); if (!IS_ERR(bh)) { struct omfs_inode *oi = (struct omfs_inode *)bh->b_data; ino_t ino = be64_to_cpu(oi->i_head.h_self); brelse(bh); inode = omfs_iget(dir->i_sb, ino); } else if (bh != ERR_PTR(-ENOENT)) { inode = ERR_CAST(bh); } return d_splice_alias(inode, dentry); } /* sanity check block's self pointer */ int omfs_is_bad(struct omfs_sb_info *sbi, struct omfs_header *header, u64 fsblock) { int is_bad; u64 ino = be64_to_cpu(header->h_self); is_bad = ((ino != fsblock) || (ino < sbi->s_root_ino) || (ino > sbi->s_num_blocks)); if (is_bad) printk(KERN_WARNING "omfs: bad hash chain detected\n"); return is_bad; } static bool omfs_fill_chain(struct inode *dir, struct dir_context *ctx, u64 fsblock, int hindex) { /* follow chain in this bucket */ while (fsblock != ~0) { struct buffer_head *bh = omfs_bread(dir->i_sb, fsblock); struct omfs_inode *oi; u64 self; unsigned char d_type; if (!bh) return true; oi = (struct omfs_inode *) bh->b_data; if (omfs_is_bad(OMFS_SB(dir->i_sb), &oi->i_head, fsblock)) { brelse(bh); return true; } self = fsblock; fsblock = be64_to_cpu(oi->i_sibling); /* skip visited nodes */ if (hindex) { hindex--; brelse(bh); continue; } d_type = (oi->i_type == OMFS_DIR) ? DT_DIR : DT_REG; if (!dir_emit(ctx, oi->i_name, strnlen(oi->i_name, OMFS_NAMELEN), self, d_type)) { brelse(bh); return false; } brelse(bh); ctx->pos++; } return true; } static int omfs_rename(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { struct inode *new_inode = d_inode(new_dentry); struct inode *old_inode = d_inode(old_dentry); int err; if (flags & ~RENAME_NOREPLACE) return -EINVAL; if (new_inode) { /* overwriting existing file/dir */ err = omfs_remove(new_dir, new_dentry); if (err) goto out; } /* since omfs locates files by name, we need to unlink _before_ * adding the new link or we won't find the old one */ err = omfs_delete_entry(old_dentry); if (err) goto out; mark_inode_dirty(old_dir); err = omfs_add_link(new_dentry, old_inode); if (err) goto out; inode_set_ctime_current(old_inode); mark_inode_dirty(old_inode); out: return err; } static int omfs_readdir(struct file *file, struct dir_context *ctx) { struct inode *dir = file_inode(file); struct buffer_head *bh; __be64 *p; unsigned int hchain, hindex; int nbuckets; if (ctx->pos >> 32) return -EINVAL; if (ctx->pos < 1 << 20) { if (!dir_emit_dots(file, ctx)) return 0; ctx->pos = 1 << 20; } nbuckets = (dir->i_size - OMFS_DIR_START) / 8; /* high 12 bits store bucket + 1 and low 20 bits store hash index */ hchain = (ctx->pos >> 20) - 1; hindex = ctx->pos & 0xfffff; bh = omfs_bread(dir->i_sb, dir->i_ino); if (!bh) return -EINVAL; p = (__be64 *)(bh->b_data + OMFS_DIR_START) + hchain; for (; hchain < nbuckets; hchain++) { __u64 fsblock = be64_to_cpu(*p++); if (!omfs_fill_chain(dir, ctx, fsblock, hindex)) break; hindex = 0; ctx->pos = (hchain+2) << 20; } brelse(bh); return 0; } const struct inode_operations omfs_dir_inops = { .lookup = omfs_lookup, .mkdir = omfs_mkdir, .rename = omfs_rename, .create = omfs_create, .unlink = omfs_remove, .rmdir = omfs_remove, }; const struct file_operations omfs_dir_operations = { .read = generic_read_dir, .iterate_shared = omfs_readdir, .llseek = generic_file_llseek, };
linux-master
fs/omfs/dir.c
// SPDX-License-Identifier: GPL-2.0-only /* * Optimized MPEG FS - inode and super operations. * Copyright (C) 2006 Bob Copeland <[email protected]> */ #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/vfs.h> #include <linux/cred.h> #include <linux/parser.h> #include <linux/buffer_head.h> #include <linux/vmalloc.h> #include <linux/writeback.h> #include <linux/seq_file.h> #include <linux/crc-itu-t.h> #include "omfs.h" MODULE_AUTHOR("Bob Copeland <[email protected]>"); MODULE_DESCRIPTION("OMFS (ReplayTV/Karma) Filesystem for Linux"); MODULE_LICENSE("GPL"); struct buffer_head *omfs_bread(struct super_block *sb, sector_t block) { struct omfs_sb_info *sbi = OMFS_SB(sb); if (block >= sbi->s_num_blocks) return NULL; return sb_bread(sb, clus_to_blk(sbi, block)); } struct inode *omfs_new_inode(struct inode *dir, umode_t mode) { struct inode *inode; u64 new_block; int err; int len; struct omfs_sb_info *sbi = OMFS_SB(dir->i_sb); inode = new_inode(dir->i_sb); if (!inode) return ERR_PTR(-ENOMEM); err = omfs_allocate_range(dir->i_sb, sbi->s_mirrors, sbi->s_mirrors, &new_block, &len); if (err) goto fail; inode->i_ino = new_block; inode_init_owner(&nop_mnt_idmap, inode, NULL, mode); inode->i_mapping->a_ops = &omfs_aops; inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode); switch (mode & S_IFMT) { case S_IFDIR: inode->i_op = &omfs_dir_inops; inode->i_fop = &omfs_dir_operations; inode->i_size = sbi->s_sys_blocksize; inc_nlink(inode); break; case S_IFREG: inode->i_op = &omfs_file_inops; inode->i_fop = &omfs_file_operations; inode->i_size = 0; break; } insert_inode_hash(inode); mark_inode_dirty(inode); return inode; fail: make_bad_inode(inode); iput(inode); return ERR_PTR(err); } /* * Update the header checksums for a dirty inode based on its contents. * Caller is expected to hold the buffer head underlying oi and mark it * dirty. */ static void omfs_update_checksums(struct omfs_inode *oi) { int xor, i, ofs = 0, count; u16 crc = 0; unsigned char *ptr = (unsigned char *) oi; count = be32_to_cpu(oi->i_head.h_body_size); ofs = sizeof(struct omfs_header); crc = crc_itu_t(crc, ptr + ofs, count); oi->i_head.h_crc = cpu_to_be16(crc); xor = ptr[0]; for (i = 1; i < OMFS_XOR_COUNT; i++) xor ^= ptr[i]; oi->i_head.h_check_xor = xor; } static int __omfs_write_inode(struct inode *inode, int wait) { struct omfs_inode *oi; struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); struct buffer_head *bh, *bh2; u64 ctime; int i; int ret = -EIO; int sync_failed = 0; /* get current inode since we may have written sibling ptrs etc. */ bh = omfs_bread(inode->i_sb, inode->i_ino); if (!bh) goto out; oi = (struct omfs_inode *) bh->b_data; oi->i_head.h_self = cpu_to_be64(inode->i_ino); if (S_ISDIR(inode->i_mode)) oi->i_type = OMFS_DIR; else if (S_ISREG(inode->i_mode)) oi->i_type = OMFS_FILE; else { printk(KERN_WARNING "omfs: unknown file type: %d\n", inode->i_mode); goto out_brelse; } oi->i_head.h_body_size = cpu_to_be32(sbi->s_sys_blocksize - sizeof(struct omfs_header)); oi->i_head.h_version = 1; oi->i_head.h_type = OMFS_INODE_NORMAL; oi->i_head.h_magic = OMFS_IMAGIC; oi->i_size = cpu_to_be64(inode->i_size); ctime = inode_get_ctime(inode).tv_sec * 1000LL + ((inode_get_ctime(inode).tv_nsec + 999)/1000); oi->i_ctime = cpu_to_be64(ctime); omfs_update_checksums(oi); mark_buffer_dirty(bh); if (wait) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) sync_failed = 1; } /* if mirroring writes, copy to next fsblock */ for (i = 1; i < sbi->s_mirrors; i++) { bh2 = omfs_bread(inode->i_sb, inode->i_ino + i); if (!bh2) goto out_brelse; memcpy(bh2->b_data, bh->b_data, bh->b_size); mark_buffer_dirty(bh2); if (wait) { sync_dirty_buffer(bh2); if (buffer_req(bh2) && !buffer_uptodate(bh2)) sync_failed = 1; } brelse(bh2); } ret = (sync_failed) ? -EIO : 0; out_brelse: brelse(bh); out: return ret; } static int omfs_write_inode(struct inode *inode, struct writeback_control *wbc) { return __omfs_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL); } int omfs_sync_inode(struct inode *inode) { return __omfs_write_inode(inode, 1); } /* * called when an entry is deleted, need to clear the bits in the * bitmaps. */ static void omfs_evict_inode(struct inode *inode) { truncate_inode_pages_final(&inode->i_data); clear_inode(inode); if (inode->i_nlink) return; if (S_ISREG(inode->i_mode)) { inode->i_size = 0; omfs_shrink_inode(inode); } omfs_clear_range(inode->i_sb, inode->i_ino, 2); } struct inode *omfs_iget(struct super_block *sb, ino_t ino) { struct omfs_sb_info *sbi = OMFS_SB(sb); struct omfs_inode *oi; struct buffer_head *bh; u64 ctime; unsigned long nsecs; struct inode *inode; inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; bh = omfs_bread(inode->i_sb, ino); if (!bh) goto iget_failed; oi = (struct omfs_inode *)bh->b_data; /* check self */ if (ino != be64_to_cpu(oi->i_head.h_self)) goto fail_bh; inode->i_uid = sbi->s_uid; inode->i_gid = sbi->s_gid; ctime = be64_to_cpu(oi->i_ctime); nsecs = do_div(ctime, 1000) * 1000L; inode->i_atime.tv_sec = ctime; inode->i_mtime.tv_sec = ctime; inode_set_ctime(inode, ctime, nsecs); inode->i_atime.tv_nsec = nsecs; inode->i_mtime.tv_nsec = nsecs; inode->i_mapping->a_ops = &omfs_aops; switch (oi->i_type) { case OMFS_DIR: inode->i_mode = S_IFDIR | (S_IRWXUGO & ~sbi->s_dmask); inode->i_op = &omfs_dir_inops; inode->i_fop = &omfs_dir_operations; inode->i_size = sbi->s_sys_blocksize; inc_nlink(inode); break; case OMFS_FILE: inode->i_mode = S_IFREG | (S_IRWXUGO & ~sbi->s_fmask); inode->i_fop = &omfs_file_operations; inode->i_size = be64_to_cpu(oi->i_size); break; } brelse(bh); unlock_new_inode(inode); return inode; fail_bh: brelse(bh); iget_failed: iget_failed(inode); return ERR_PTR(-EIO); } static void omfs_put_super(struct super_block *sb) { struct omfs_sb_info *sbi = OMFS_SB(sb); kfree(sbi->s_imap); kfree(sbi); sb->s_fs_info = NULL; } static int omfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *s = dentry->d_sb; struct omfs_sb_info *sbi = OMFS_SB(s); u64 id = huge_encode_dev(s->s_bdev->bd_dev); buf->f_type = OMFS_MAGIC; buf->f_bsize = sbi->s_blocksize; buf->f_blocks = sbi->s_num_blocks; buf->f_files = sbi->s_num_blocks; buf->f_namelen = OMFS_NAMELEN; buf->f_fsid = u64_to_fsid(id); buf->f_bfree = buf->f_bavail = buf->f_ffree = omfs_count_free(s); return 0; } /* * Display the mount options in /proc/mounts. */ static int omfs_show_options(struct seq_file *m, struct dentry *root) { struct omfs_sb_info *sbi = OMFS_SB(root->d_sb); umode_t cur_umask = current_umask(); if (!uid_eq(sbi->s_uid, current_uid())) seq_printf(m, ",uid=%u", from_kuid_munged(&init_user_ns, sbi->s_uid)); if (!gid_eq(sbi->s_gid, current_gid())) seq_printf(m, ",gid=%u", from_kgid_munged(&init_user_ns, sbi->s_gid)); if (sbi->s_dmask == sbi->s_fmask) { if (sbi->s_fmask != cur_umask) seq_printf(m, ",umask=%o", sbi->s_fmask); } else { if (sbi->s_dmask != cur_umask) seq_printf(m, ",dmask=%o", sbi->s_dmask); if (sbi->s_fmask != cur_umask) seq_printf(m, ",fmask=%o", sbi->s_fmask); } return 0; } static const struct super_operations omfs_sops = { .write_inode = omfs_write_inode, .evict_inode = omfs_evict_inode, .put_super = omfs_put_super, .statfs = omfs_statfs, .show_options = omfs_show_options, }; /* * For Rio Karma, there is an on-disk free bitmap whose location is * stored in the root block. For ReplayTV, there is no such free bitmap * so we have to walk the tree. Both inodes and file data are allocated * from the same map. This array can be big (300k) so we allocate * in units of the blocksize. */ static int omfs_get_imap(struct super_block *sb) { unsigned int bitmap_size, array_size; int count; struct omfs_sb_info *sbi = OMFS_SB(sb); struct buffer_head *bh; unsigned long **ptr; sector_t block; bitmap_size = DIV_ROUND_UP(sbi->s_num_blocks, 8); array_size = DIV_ROUND_UP(bitmap_size, sb->s_blocksize); if (sbi->s_bitmap_ino == ~0ULL) goto out; sbi->s_imap_size = array_size; sbi->s_imap = kcalloc(array_size, sizeof(unsigned long *), GFP_KERNEL); if (!sbi->s_imap) goto nomem; block = clus_to_blk(sbi, sbi->s_bitmap_ino); if (block >= sbi->s_num_blocks) goto nomem; ptr = sbi->s_imap; for (count = bitmap_size; count > 0; count -= sb->s_blocksize) { bh = sb_bread(sb, block++); if (!bh) goto nomem_free; *ptr = kmemdup(bh->b_data, sb->s_blocksize, GFP_KERNEL); if (!*ptr) { brelse(bh); goto nomem_free; } if (count < sb->s_blocksize) memset((void *)*ptr + count, 0xff, sb->s_blocksize - count); brelse(bh); ptr++; } out: return 0; nomem_free: for (count = 0; count < array_size; count++) kfree(sbi->s_imap[count]); kfree(sbi->s_imap); nomem: sbi->s_imap = NULL; sbi->s_imap_size = 0; return -ENOMEM; } enum { Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask, Opt_err }; static const match_table_t tokens = { {Opt_uid, "uid=%u"}, {Opt_gid, "gid=%u"}, {Opt_umask, "umask=%o"}, {Opt_dmask, "dmask=%o"}, {Opt_fmask, "fmask=%o"}, {Opt_err, NULL}, }; static int parse_options(char *options, struct omfs_sb_info *sbi) { char *p; substring_t args[MAX_OPT_ARGS]; int option; if (!options) return 1; while ((p = strsep(&options, ",")) != NULL) { int token; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_uid: if (match_int(&args[0], &option)) return 0; sbi->s_uid = make_kuid(current_user_ns(), option); if (!uid_valid(sbi->s_uid)) return 0; break; case Opt_gid: if (match_int(&args[0], &option)) return 0; sbi->s_gid = make_kgid(current_user_ns(), option); if (!gid_valid(sbi->s_gid)) return 0; break; case Opt_umask: if (match_octal(&args[0], &option)) return 0; sbi->s_fmask = sbi->s_dmask = option; break; case Opt_dmask: if (match_octal(&args[0], &option)) return 0; sbi->s_dmask = option; break; case Opt_fmask: if (match_octal(&args[0], &option)) return 0; sbi->s_fmask = option; break; default: return 0; } } return 1; } static int omfs_fill_super(struct super_block *sb, void *data, int silent) { struct buffer_head *bh, *bh2; struct omfs_super_block *omfs_sb; struct omfs_root_block *omfs_rb; struct omfs_sb_info *sbi; struct inode *root; int ret = -EINVAL; sbi = kzalloc(sizeof(struct omfs_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; sb->s_fs_info = sbi; sbi->s_uid = current_uid(); sbi->s_gid = current_gid(); sbi->s_dmask = sbi->s_fmask = current_umask(); if (!parse_options((char *) data, sbi)) goto end; sb->s_maxbytes = 0xffffffff; sb->s_time_gran = NSEC_PER_MSEC; sb->s_time_min = 0; sb->s_time_max = U64_MAX / MSEC_PER_SEC; sb_set_blocksize(sb, 0x200); bh = sb_bread(sb, 0); if (!bh) goto end; omfs_sb = (struct omfs_super_block *)bh->b_data; if (omfs_sb->s_magic != cpu_to_be32(OMFS_MAGIC)) { if (!silent) printk(KERN_ERR "omfs: Invalid superblock (%x)\n", omfs_sb->s_magic); goto out_brelse_bh; } sb->s_magic = OMFS_MAGIC; sbi->s_num_blocks = be64_to_cpu(omfs_sb->s_num_blocks); sbi->s_blocksize = be32_to_cpu(omfs_sb->s_blocksize); sbi->s_mirrors = be32_to_cpu(omfs_sb->s_mirrors); sbi->s_root_ino = be64_to_cpu(omfs_sb->s_root_block); sbi->s_sys_blocksize = be32_to_cpu(omfs_sb->s_sys_blocksize); mutex_init(&sbi->s_bitmap_lock); if (sbi->s_num_blocks > OMFS_MAX_BLOCKS) { printk(KERN_ERR "omfs: sysblock number (%llx) is out of range\n", (unsigned long long)sbi->s_num_blocks); goto out_brelse_bh; } if (sbi->s_sys_blocksize > PAGE_SIZE) { printk(KERN_ERR "omfs: sysblock size (%d) is out of range\n", sbi->s_sys_blocksize); goto out_brelse_bh; } if (sbi->s_blocksize < sbi->s_sys_blocksize || sbi->s_blocksize > OMFS_MAX_BLOCK_SIZE) { printk(KERN_ERR "omfs: block size (%d) is out of range\n", sbi->s_blocksize); goto out_brelse_bh; } /* * Use sys_blocksize as the fs block since it is smaller than a * page while the fs blocksize can be larger. */ sb_set_blocksize(sb, sbi->s_sys_blocksize); /* * ...and the difference goes into a shift. sys_blocksize is always * a power of two factor of blocksize. */ sbi->s_block_shift = get_bitmask_order(sbi->s_blocksize) - get_bitmask_order(sbi->s_sys_blocksize); bh2 = omfs_bread(sb, be64_to_cpu(omfs_sb->s_root_block)); if (!bh2) goto out_brelse_bh; omfs_rb = (struct omfs_root_block *)bh2->b_data; sbi->s_bitmap_ino = be64_to_cpu(omfs_rb->r_bitmap); sbi->s_clustersize = be32_to_cpu(omfs_rb->r_clustersize); if (sbi->s_num_blocks != be64_to_cpu(omfs_rb->r_num_blocks)) { printk(KERN_ERR "omfs: block count discrepancy between " "super and root blocks (%llx, %llx)\n", (unsigned long long)sbi->s_num_blocks, (unsigned long long)be64_to_cpu(omfs_rb->r_num_blocks)); goto out_brelse_bh2; } if (sbi->s_bitmap_ino != ~0ULL && sbi->s_bitmap_ino > sbi->s_num_blocks) { printk(KERN_ERR "omfs: free space bitmap location is corrupt " "(%llx, total blocks %llx)\n", (unsigned long long) sbi->s_bitmap_ino, (unsigned long long) sbi->s_num_blocks); goto out_brelse_bh2; } if (sbi->s_clustersize < 1 || sbi->s_clustersize > OMFS_MAX_CLUSTER_SIZE) { printk(KERN_ERR "omfs: cluster size out of range (%d)", sbi->s_clustersize); goto out_brelse_bh2; } ret = omfs_get_imap(sb); if (ret) goto out_brelse_bh2; sb->s_op = &omfs_sops; root = omfs_iget(sb, be64_to_cpu(omfs_rb->r_root_dir)); if (IS_ERR(root)) { ret = PTR_ERR(root); goto out_brelse_bh2; } sb->s_root = d_make_root(root); if (!sb->s_root) { ret = -ENOMEM; goto out_brelse_bh2; } printk(KERN_DEBUG "omfs: Mounted volume %s\n", omfs_rb->r_name); ret = 0; out_brelse_bh2: brelse(bh2); out_brelse_bh: brelse(bh); end: if (ret) kfree(sbi); return ret; } static struct dentry *omfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, omfs_fill_super); } static struct file_system_type omfs_fs_type = { .owner = THIS_MODULE, .name = "omfs", .mount = omfs_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("omfs"); static int __init init_omfs_fs(void) { return register_filesystem(&omfs_fs_type); } static void __exit exit_omfs_fs(void) { unregister_filesystem(&omfs_fs_type); } module_init(init_omfs_fs); module_exit(exit_omfs_fs);
linux-master
fs/omfs/inode.c
// SPDX-License-Identifier: GPL-2.0-only /* * OMFS (as used by RIO Karma) file operations. * Copyright (C) 2005 Bob Copeland <[email protected]> */ #include <linux/module.h> #include <linux/fs.h> #include <linux/buffer_head.h> #include <linux/mpage.h> #include "omfs.h" static u32 omfs_max_extents(struct omfs_sb_info *sbi, int offset) { return (sbi->s_sys_blocksize - offset - sizeof(struct omfs_extent)) / sizeof(struct omfs_extent_entry); } void omfs_make_empty_table(struct buffer_head *bh, int offset) { struct omfs_extent *oe = (struct omfs_extent *) &bh->b_data[offset]; oe->e_next = ~cpu_to_be64(0ULL); oe->e_extent_count = cpu_to_be32(1), oe->e_fill = cpu_to_be32(0x22), oe->e_entry[0].e_cluster = ~cpu_to_be64(0ULL); oe->e_entry[0].e_blocks = ~cpu_to_be64(0ULL); } int omfs_shrink_inode(struct inode *inode) { struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); struct omfs_extent *oe; struct omfs_extent_entry *entry; struct buffer_head *bh; u64 next, last; u32 extent_count; u32 max_extents; int ret; /* traverse extent table, freeing each entry that is greater * than inode->i_size; */ next = inode->i_ino; /* only support truncate -> 0 for now */ ret = -EIO; if (inode->i_size != 0) goto out; bh = omfs_bread(inode->i_sb, next); if (!bh) goto out; oe = (struct omfs_extent *)(&bh->b_data[OMFS_EXTENT_START]); max_extents = omfs_max_extents(sbi, OMFS_EXTENT_START); for (;;) { if (omfs_is_bad(sbi, (struct omfs_header *) bh->b_data, next)) goto out_brelse; extent_count = be32_to_cpu(oe->e_extent_count); if (extent_count > max_extents) goto out_brelse; last = next; next = be64_to_cpu(oe->e_next); entry = oe->e_entry; /* ignore last entry as it is the terminator */ for (; extent_count > 1; extent_count--) { u64 start, count; start = be64_to_cpu(entry->e_cluster); count = be64_to_cpu(entry->e_blocks); omfs_clear_range(inode->i_sb, start, (int) count); entry++; } omfs_make_empty_table(bh, (char *) oe - bh->b_data); mark_buffer_dirty(bh); brelse(bh); if (last != inode->i_ino) omfs_clear_range(inode->i_sb, last, sbi->s_mirrors); if (next == ~0) break; bh = omfs_bread(inode->i_sb, next); if (!bh) goto out; oe = (struct omfs_extent *) (&bh->b_data[OMFS_EXTENT_CONT]); max_extents = omfs_max_extents(sbi, OMFS_EXTENT_CONT); } ret = 0; out: return ret; out_brelse: brelse(bh); return ret; } static void omfs_truncate(struct inode *inode) { omfs_shrink_inode(inode); mark_inode_dirty(inode); } /* * Add new blocks to the current extent, or create new entries/continuations * as necessary. */ static int omfs_grow_extent(struct inode *inode, struct omfs_extent *oe, u64 *ret_block) { struct omfs_extent_entry *terminator; struct omfs_extent_entry *entry = oe->e_entry; struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); u32 extent_count = be32_to_cpu(oe->e_extent_count); u64 new_block = 0; u32 max_count; int new_count; int ret = 0; /* reached the end of the extent table with no blocks mapped. * there are three possibilities for adding: grow last extent, * add a new extent to the current extent table, and add a * continuation inode. in last two cases need an allocator for * sbi->s_cluster_size */ /* TODO: handle holes */ /* should always have a terminator */ if (extent_count < 1) return -EIO; /* trivially grow current extent, if next block is not taken */ terminator = entry + extent_count - 1; if (extent_count > 1) { entry = terminator-1; new_block = be64_to_cpu(entry->e_cluster) + be64_to_cpu(entry->e_blocks); if (omfs_allocate_block(inode->i_sb, new_block)) { be64_add_cpu(&entry->e_blocks, 1); terminator->e_blocks = ~(cpu_to_be64( be64_to_cpu(~terminator->e_blocks) + 1)); goto out; } } max_count = omfs_max_extents(sbi, OMFS_EXTENT_START); /* TODO: add a continuation block here */ if (be32_to_cpu(oe->e_extent_count) > max_count-1) return -EIO; /* try to allocate a new cluster */ ret = omfs_allocate_range(inode->i_sb, 1, sbi->s_clustersize, &new_block, &new_count); if (ret) goto out_fail; /* copy terminator down an entry */ entry = terminator; terminator++; memcpy(terminator, entry, sizeof(struct omfs_extent_entry)); entry->e_cluster = cpu_to_be64(new_block); entry->e_blocks = cpu_to_be64((u64) new_count); terminator->e_blocks = ~(cpu_to_be64( be64_to_cpu(~terminator->e_blocks) + (u64) new_count)); /* write in new entry */ be32_add_cpu(&oe->e_extent_count, 1); out: *ret_block = new_block; out_fail: return ret; } /* * Scans across the directory table for a given file block number. * If block not found, return 0. */ static sector_t find_block(struct inode *inode, struct omfs_extent_entry *ent, sector_t block, int count, int *left) { /* count > 1 because of terminator */ sector_t searched = 0; for (; count > 1; count--) { int numblocks = clus_to_blk(OMFS_SB(inode->i_sb), be64_to_cpu(ent->e_blocks)); if (block >= searched && block < searched + numblocks) { /* * found it at cluster + (block - searched) * numblocks - (block - searched) is remainder */ *left = numblocks - (block - searched); return clus_to_blk(OMFS_SB(inode->i_sb), be64_to_cpu(ent->e_cluster)) + block - searched; } searched += numblocks; ent++; } return 0; } static int omfs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create) { struct buffer_head *bh; sector_t next, offset; int ret; u64 new_block; u32 max_extents; int extent_count; struct omfs_extent *oe; struct omfs_extent_entry *entry; struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); int max_blocks = bh_result->b_size >> inode->i_blkbits; int remain; ret = -EIO; bh = omfs_bread(inode->i_sb, inode->i_ino); if (!bh) goto out; oe = (struct omfs_extent *)(&bh->b_data[OMFS_EXTENT_START]); max_extents = omfs_max_extents(sbi, OMFS_EXTENT_START); next = inode->i_ino; for (;;) { if (omfs_is_bad(sbi, (struct omfs_header *) bh->b_data, next)) goto out_brelse; extent_count = be32_to_cpu(oe->e_extent_count); next = be64_to_cpu(oe->e_next); entry = oe->e_entry; if (extent_count > max_extents) goto out_brelse; offset = find_block(inode, entry, block, extent_count, &remain); if (offset > 0) { ret = 0; map_bh(bh_result, inode->i_sb, offset); if (remain > max_blocks) remain = max_blocks; bh_result->b_size = (remain << inode->i_blkbits); goto out_brelse; } if (next == ~0) break; brelse(bh); bh = omfs_bread(inode->i_sb, next); if (!bh) goto out; oe = (struct omfs_extent *) (&bh->b_data[OMFS_EXTENT_CONT]); max_extents = omfs_max_extents(sbi, OMFS_EXTENT_CONT); } if (create) { ret = omfs_grow_extent(inode, oe, &new_block); if (ret == 0) { mark_buffer_dirty(bh); mark_inode_dirty(inode); map_bh(bh_result, inode->i_sb, clus_to_blk(sbi, new_block)); } } out_brelse: brelse(bh); out: return ret; } static int omfs_read_folio(struct file *file, struct folio *folio) { return block_read_full_folio(folio, omfs_get_block); } static void omfs_readahead(struct readahead_control *rac) { mpage_readahead(rac, omfs_get_block); } static int omfs_writepages(struct address_space *mapping, struct writeback_control *wbc) { return mpage_writepages(mapping, wbc, omfs_get_block); } static void omfs_write_failed(struct address_space *mapping, loff_t to) { struct inode *inode = mapping->host; if (to > inode->i_size) { truncate_pagecache(inode, inode->i_size); omfs_truncate(inode); } } static int omfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int ret; ret = block_write_begin(mapping, pos, len, pagep, omfs_get_block); if (unlikely(ret)) omfs_write_failed(mapping, pos + len); return ret; } static sector_t omfs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping, block, omfs_get_block); } const struct file_operations omfs_file_operations = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .mmap = generic_file_mmap, .fsync = generic_file_fsync, .splice_read = filemap_splice_read, }; static int omfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); int error; error = setattr_prepare(&nop_mnt_idmap, dentry, attr); if (error) return error; if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size != i_size_read(inode)) { error = inode_newsize_ok(inode, attr->ia_size); if (error) return error; truncate_setsize(inode, attr->ia_size); omfs_truncate(inode); } setattr_copy(&nop_mnt_idmap, inode, attr); mark_inode_dirty(inode); return 0; } const struct inode_operations omfs_file_inops = { .setattr = omfs_setattr, }; const struct address_space_operations omfs_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .read_folio = omfs_read_folio, .readahead = omfs_readahead, .writepages = omfs_writepages, .write_begin = omfs_write_begin, .write_end = generic_write_end, .bmap = omfs_bmap, .migrate_folio = buffer_migrate_folio, };
linux-master
fs/omfs/file.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/hpfs/super.c * * Mikulas Patocka ([email protected]), 1998-1999 * * mounting, unmounting, error handling */ #include "hpfs_fn.h" #include <linux/module.h> #include <linux/parser.h> #include <linux/init.h> #include <linux/statfs.h> #include <linux/magic.h> #include <linux/sched.h> #include <linux/bitmap.h> #include <linux/slab.h> #include <linux/seq_file.h> /* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */ static void mark_dirty(struct super_block *s, int remount) { if (hpfs_sb(s)->sb_chkdsk && (remount || !sb_rdonly(s))) { struct buffer_head *bh; struct hpfs_spare_block *sb; if ((sb = hpfs_map_sector(s, 17, &bh, 0))) { sb->dirty = 1; sb->old_wrote = 0; mark_buffer_dirty(bh); sync_dirty_buffer(bh); brelse(bh); } } } /* Mark the filesystem clean (mark it dirty for chkdsk if chkdsk==2 or if there were errors) */ static void unmark_dirty(struct super_block *s) { struct buffer_head *bh; struct hpfs_spare_block *sb; if (sb_rdonly(s)) return; sync_blockdev(s->s_bdev); if ((sb = hpfs_map_sector(s, 17, &bh, 0))) { sb->dirty = hpfs_sb(s)->sb_chkdsk > 1 - hpfs_sb(s)->sb_was_error; sb->old_wrote = hpfs_sb(s)->sb_chkdsk >= 2 && !hpfs_sb(s)->sb_was_error; mark_buffer_dirty(bh); sync_dirty_buffer(bh); brelse(bh); } } /* Filesystem error... */ void hpfs_error(struct super_block *s, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; pr_err("filesystem error: %pV", &vaf); va_end(args); if (!hpfs_sb(s)->sb_was_error) { if (hpfs_sb(s)->sb_err == 2) { pr_cont("; crashing the system because you wanted it\n"); mark_dirty(s, 0); panic("HPFS panic"); } else if (hpfs_sb(s)->sb_err == 1) { if (sb_rdonly(s)) pr_cont("; already mounted read-only\n"); else { pr_cont("; remounting read-only\n"); mark_dirty(s, 0); s->s_flags |= SB_RDONLY; } } else if (sb_rdonly(s)) pr_cont("; going on - but anything won't be destroyed because it's read-only\n"); else pr_cont("; corrupted filesystem mounted read/write - your computer will explode within 20 seconds ... but you wanted it so!\n"); } else pr_cont("\n"); hpfs_sb(s)->sb_was_error = 1; } /* * A little trick to detect cycles in many hpfs structures and don't let the * kernel crash on corrupted filesystem. When first called, set c2 to 0. * * BTW. chkdsk doesn't detect cycles correctly. When I had 2 lost directories * nested each in other, chkdsk locked up happilly. */ int hpfs_stop_cycles(struct super_block *s, int key, int *c1, int *c2, char *msg) { if (*c2 && *c1 == key) { hpfs_error(s, "cycle detected on key %08x in %s", key, msg); return 1; } (*c2)++; if (!((*c2 - 1) & *c2)) *c1 = key; return 0; } static void free_sbi(struct hpfs_sb_info *sbi) { kfree(sbi->sb_cp_table); kfree(sbi->sb_bmp_dir); kfree(sbi); } static void lazy_free_sbi(struct rcu_head *rcu) { free_sbi(container_of(rcu, struct hpfs_sb_info, rcu)); } static void hpfs_put_super(struct super_block *s) { hpfs_lock(s); unmark_dirty(s); hpfs_unlock(s); call_rcu(&hpfs_sb(s)->rcu, lazy_free_sbi); } static unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno) { struct quad_buffer_head qbh; unsigned long *bits; unsigned count; bits = hpfs_map_4sectors(s, secno, &qbh, 0); if (!bits) return (unsigned)-1; count = bitmap_weight(bits, 2048 * BITS_PER_BYTE); hpfs_brelse4(&qbh); return count; } static unsigned count_bitmaps(struct super_block *s) { unsigned n, count, n_bands; n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14; count = 0; for (n = 0; n < COUNT_RD_AHEAD; n++) { hpfs_prefetch_bitmap(s, n); } for (n = 0; n < n_bands; n++) { unsigned c; hpfs_prefetch_bitmap(s, n + COUNT_RD_AHEAD); c = hpfs_count_one_bitmap(s, le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[n])); if (c != (unsigned)-1) count += c; } return count; } unsigned hpfs_get_free_dnodes(struct super_block *s) { struct hpfs_sb_info *sbi = hpfs_sb(s); if (sbi->sb_n_free_dnodes == (unsigned)-1) { unsigned c = hpfs_count_one_bitmap(s, sbi->sb_dmap); if (c == (unsigned)-1) return 0; sbi->sb_n_free_dnodes = c; } return sbi->sb_n_free_dnodes; } static int hpfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *s = dentry->d_sb; struct hpfs_sb_info *sbi = hpfs_sb(s); u64 id = huge_encode_dev(s->s_bdev->bd_dev); hpfs_lock(s); if (sbi->sb_n_free == (unsigned)-1) sbi->sb_n_free = count_bitmaps(s); buf->f_type = s->s_magic; buf->f_bsize = 512; buf->f_blocks = sbi->sb_fs_size; buf->f_bfree = sbi->sb_n_free; buf->f_bavail = sbi->sb_n_free; buf->f_files = sbi->sb_dirband_size / 4; buf->f_ffree = hpfs_get_free_dnodes(s); buf->f_fsid = u64_to_fsid(id); buf->f_namelen = 254; hpfs_unlock(s); return 0; } long hpfs_ioctl(struct file *file, unsigned cmd, unsigned long arg) { switch (cmd) { case FITRIM: { struct fstrim_range range; secno n_trimmed; int r; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (copy_from_user(&range, (struct fstrim_range __user *)arg, sizeof(range))) return -EFAULT; r = hpfs_trim_fs(file_inode(file)->i_sb, range.start >> 9, (range.start + range.len) >> 9, (range.minlen + 511) >> 9, &n_trimmed); if (r) return r; range.len = (u64)n_trimmed << 9; if (copy_to_user((struct fstrim_range __user *)arg, &range, sizeof(range))) return -EFAULT; return 0; } default: { return -ENOIOCTLCMD; } } } static struct kmem_cache * hpfs_inode_cachep; static struct inode *hpfs_alloc_inode(struct super_block *sb) { struct hpfs_inode_info *ei; ei = alloc_inode_sb(sb, hpfs_inode_cachep, GFP_NOFS); if (!ei) return NULL; return &ei->vfs_inode; } static void hpfs_free_inode(struct inode *inode) { kmem_cache_free(hpfs_inode_cachep, hpfs_i(inode)); } static void init_once(void *foo) { struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo; inode_init_once(&ei->vfs_inode); } static int init_inodecache(void) { hpfs_inode_cachep = kmem_cache_create("hpfs_inode_cache", sizeof(struct hpfs_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD|SLAB_ACCOUNT), init_once); if (hpfs_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(hpfs_inode_cachep); } /* * A tiny parser for option strings, stolen from dosfs. * Stolen again from read-only hpfs. * And updated for table-driven option parsing. */ enum { Opt_help, Opt_uid, Opt_gid, Opt_umask, Opt_case_lower, Opt_case_asis, Opt_check_none, Opt_check_normal, Opt_check_strict, Opt_err_cont, Opt_err_ro, Opt_err_panic, Opt_eas_no, Opt_eas_ro, Opt_eas_rw, Opt_chkdsk_no, Opt_chkdsk_errors, Opt_chkdsk_always, Opt_timeshift, Opt_err, }; static const match_table_t tokens = { {Opt_help, "help"}, {Opt_uid, "uid=%u"}, {Opt_gid, "gid=%u"}, {Opt_umask, "umask=%o"}, {Opt_case_lower, "case=lower"}, {Opt_case_asis, "case=asis"}, {Opt_check_none, "check=none"}, {Opt_check_normal, "check=normal"}, {Opt_check_strict, "check=strict"}, {Opt_err_cont, "errors=continue"}, {Opt_err_ro, "errors=remount-ro"}, {Opt_err_panic, "errors=panic"}, {Opt_eas_no, "eas=no"}, {Opt_eas_ro, "eas=ro"}, {Opt_eas_rw, "eas=rw"}, {Opt_chkdsk_no, "chkdsk=no"}, {Opt_chkdsk_errors, "chkdsk=errors"}, {Opt_chkdsk_always, "chkdsk=always"}, {Opt_timeshift, "timeshift=%d"}, {Opt_err, NULL}, }; static int parse_opts(char *opts, kuid_t *uid, kgid_t *gid, umode_t *umask, int *lowercase, int *eas, int *chk, int *errs, int *chkdsk, int *timeshift) { char *p; int option; if (!opts) return 1; /*pr_info("Parsing opts: '%s'\n",opts);*/ while ((p = strsep(&opts, ",")) != NULL) { substring_t args[MAX_OPT_ARGS]; int token; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_help: return 2; case Opt_uid: if (match_int(args, &option)) return 0; *uid = make_kuid(current_user_ns(), option); if (!uid_valid(*uid)) return 0; break; case Opt_gid: if (match_int(args, &option)) return 0; *gid = make_kgid(current_user_ns(), option); if (!gid_valid(*gid)) return 0; break; case Opt_umask: if (match_octal(args, &option)) return 0; *umask = option; break; case Opt_case_lower: *lowercase = 1; break; case Opt_case_asis: *lowercase = 0; break; case Opt_check_none: *chk = 0; break; case Opt_check_normal: *chk = 1; break; case Opt_check_strict: *chk = 2; break; case Opt_err_cont: *errs = 0; break; case Opt_err_ro: *errs = 1; break; case Opt_err_panic: *errs = 2; break; case Opt_eas_no: *eas = 0; break; case Opt_eas_ro: *eas = 1; break; case Opt_eas_rw: *eas = 2; break; case Opt_chkdsk_no: *chkdsk = 0; break; case Opt_chkdsk_errors: *chkdsk = 1; break; case Opt_chkdsk_always: *chkdsk = 2; break; case Opt_timeshift: { int m = 1; char *rhs = args[0].from; if (!rhs || !*rhs) return 0; if (*rhs == '-') m = -1; if (*rhs == '+' || *rhs == '-') rhs++; *timeshift = simple_strtoul(rhs, &rhs, 0) * m; if (*rhs) return 0; break; } default: return 0; } } return 1; } static inline void hpfs_help(void) { pr_info("\n\ HPFS filesystem options:\n\ help do not mount and display this text\n\ uid=xxx set uid of files that don't have uid specified in eas\n\ gid=xxx set gid of files that don't have gid specified in eas\n\ umask=xxx set mode of files that don't have mode specified in eas\n\ case=lower lowercase all files\n\ case=asis do not lowercase files (default)\n\ check=none no fs checks - kernel may crash on corrupted filesystem\n\ check=normal do some checks - it should not crash (default)\n\ check=strict do extra time-consuming checks, used for debugging\n\ errors=continue continue on errors\n\ errors=remount-ro remount read-only if errors found (default)\n\ errors=panic panic on errors\n\ chkdsk=no do not mark fs for chkdsking even if there were errors\n\ chkdsk=errors mark fs dirty if errors found (default)\n\ chkdsk=always always mark fs dirty - used for debugging\n\ eas=no ignore extended attributes\n\ eas=ro read but do not write extended attributes\n\ eas=rw r/w eas => enables chmod, chown, mknod, ln -s (default)\n\ timeshift=nnn add nnn seconds to file times\n\ \n"); } static int hpfs_remount_fs(struct super_block *s, int *flags, char *data) { kuid_t uid; kgid_t gid; umode_t umask; int lowercase, eas, chk, errs, chkdsk, timeshift; int o; struct hpfs_sb_info *sbi = hpfs_sb(s); sync_filesystem(s); *flags |= SB_NOATIME; hpfs_lock(s); uid = sbi->sb_uid; gid = sbi->sb_gid; umask = 0777 & ~sbi->sb_mode; lowercase = sbi->sb_lowercase; eas = sbi->sb_eas; chk = sbi->sb_chk; chkdsk = sbi->sb_chkdsk; errs = sbi->sb_err; timeshift = sbi->sb_timeshift; if (!(o = parse_opts(data, &uid, &gid, &umask, &lowercase, &eas, &chk, &errs, &chkdsk, &timeshift))) { pr_err("bad mount options.\n"); goto out_err; } if (o == 2) { hpfs_help(); goto out_err; } if (timeshift != sbi->sb_timeshift) { pr_err("timeshift can't be changed using remount.\n"); goto out_err; } unmark_dirty(s); sbi->sb_uid = uid; sbi->sb_gid = gid; sbi->sb_mode = 0777 & ~umask; sbi->sb_lowercase = lowercase; sbi->sb_eas = eas; sbi->sb_chk = chk; sbi->sb_chkdsk = chkdsk; sbi->sb_err = errs; sbi->sb_timeshift = timeshift; if (!(*flags & SB_RDONLY)) mark_dirty(s, 1); hpfs_unlock(s); return 0; out_err: hpfs_unlock(s); return -EINVAL; } static int hpfs_show_options(struct seq_file *seq, struct dentry *root) { struct hpfs_sb_info *sbi = hpfs_sb(root->d_sb); seq_printf(seq, ",uid=%u", from_kuid_munged(&init_user_ns, sbi->sb_uid)); seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, sbi->sb_gid)); seq_printf(seq, ",umask=%03o", (~sbi->sb_mode & 0777)); if (sbi->sb_lowercase) seq_printf(seq, ",case=lower"); if (!sbi->sb_chk) seq_printf(seq, ",check=none"); if (sbi->sb_chk == 2) seq_printf(seq, ",check=strict"); if (!sbi->sb_err) seq_printf(seq, ",errors=continue"); if (sbi->sb_err == 2) seq_printf(seq, ",errors=panic"); if (!sbi->sb_chkdsk) seq_printf(seq, ",chkdsk=no"); if (sbi->sb_chkdsk == 2) seq_printf(seq, ",chkdsk=always"); if (!sbi->sb_eas) seq_printf(seq, ",eas=no"); if (sbi->sb_eas == 1) seq_printf(seq, ",eas=ro"); if (sbi->sb_timeshift) seq_printf(seq, ",timeshift=%d", sbi->sb_timeshift); return 0; } /* Super operations */ static const struct super_operations hpfs_sops = { .alloc_inode = hpfs_alloc_inode, .free_inode = hpfs_free_inode, .evict_inode = hpfs_evict_inode, .put_super = hpfs_put_super, .statfs = hpfs_statfs, .remount_fs = hpfs_remount_fs, .show_options = hpfs_show_options, }; static int hpfs_fill_super(struct super_block *s, void *options, int silent) { struct buffer_head *bh0, *bh1, *bh2; struct hpfs_boot_block *bootblock; struct hpfs_super_block *superblock; struct hpfs_spare_block *spareblock; struct hpfs_sb_info *sbi; struct inode *root; kuid_t uid; kgid_t gid; umode_t umask; int lowercase, eas, chk, errs, chkdsk, timeshift; dnode_secno root_dno; struct hpfs_dirent *de = NULL; struct quad_buffer_head qbh; int o; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) { return -ENOMEM; } s->s_fs_info = sbi; mutex_init(&sbi->hpfs_mutex); hpfs_lock(s); uid = current_uid(); gid = current_gid(); umask = current_umask(); lowercase = 0; eas = 2; chk = 1; errs = 1; chkdsk = 1; timeshift = 0; if (!(o = parse_opts(options, &uid, &gid, &umask, &lowercase, &eas, &chk, &errs, &chkdsk, &timeshift))) { pr_err("bad mount options.\n"); goto bail0; } if (o==2) { hpfs_help(); goto bail0; } /*sbi->sb_mounting = 1;*/ sb_set_blocksize(s, 512); sbi->sb_fs_size = -1; if (!(bootblock = hpfs_map_sector(s, 0, &bh0, 0))) goto bail1; if (!(superblock = hpfs_map_sector(s, 16, &bh1, 1))) goto bail2; if (!(spareblock = hpfs_map_sector(s, 17, &bh2, 0))) goto bail3; /* Check magics */ if (/*le16_to_cpu(bootblock->magic) != BB_MAGIC ||*/ le32_to_cpu(superblock->magic) != SB_MAGIC || le32_to_cpu(spareblock->magic) != SP_MAGIC) { if (!silent) pr_err("Bad magic ... probably not HPFS\n"); goto bail4; } /* Check version */ if (!sb_rdonly(s) && superblock->funcversion != 2 && superblock->funcversion != 3) { pr_err("Bad version %d,%d. Mount readonly to go around\n", (int)superblock->version, (int)superblock->funcversion); pr_err("please try recent version of HPFS driver at http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi and if it still can't understand this format, contact author - [email protected]\n"); goto bail4; } s->s_flags |= SB_NOATIME; /* Fill superblock stuff */ s->s_magic = HPFS_SUPER_MAGIC; s->s_op = &hpfs_sops; s->s_d_op = &hpfs_dentry_operations; s->s_time_min = local_to_gmt(s, 0); s->s_time_max = local_to_gmt(s, U32_MAX); sbi->sb_root = le32_to_cpu(superblock->root); sbi->sb_fs_size = le32_to_cpu(superblock->n_sectors); sbi->sb_bitmaps = le32_to_cpu(superblock->bitmaps); sbi->sb_dirband_start = le32_to_cpu(superblock->dir_band_start); sbi->sb_dirband_size = le32_to_cpu(superblock->n_dir_band); sbi->sb_dmap = le32_to_cpu(superblock->dir_band_bitmap); sbi->sb_uid = uid; sbi->sb_gid = gid; sbi->sb_mode = 0777 & ~umask; sbi->sb_n_free = -1; sbi->sb_n_free_dnodes = -1; sbi->sb_lowercase = lowercase; sbi->sb_eas = eas; sbi->sb_chk = chk; sbi->sb_chkdsk = chkdsk; sbi->sb_err = errs; sbi->sb_timeshift = timeshift; sbi->sb_was_error = 0; sbi->sb_cp_table = NULL; sbi->sb_c_bitmap = -1; sbi->sb_max_fwd_alloc = 0xffffff; if (sbi->sb_fs_size >= 0x80000000) { hpfs_error(s, "invalid size in superblock: %08x", (unsigned)sbi->sb_fs_size); goto bail4; } if (spareblock->n_spares_used) hpfs_load_hotfix_map(s, spareblock); /* Load bitmap directory */ if (!(sbi->sb_bmp_dir = hpfs_load_bitmap_directory(s, le32_to_cpu(superblock->bitmaps)))) goto bail4; /* Check for general fs errors*/ if (spareblock->dirty && !spareblock->old_wrote) { if (errs == 2) { pr_err("Improperly stopped, not mounted\n"); goto bail4; } hpfs_error(s, "improperly stopped"); } if (!sb_rdonly(s)) { spareblock->dirty = 1; spareblock->old_wrote = 0; mark_buffer_dirty(bh2); } if (le32_to_cpu(spareblock->n_dnode_spares) != le32_to_cpu(spareblock->n_dnode_spares_free)) { if (errs >= 2) { pr_err("Spare dnodes used, try chkdsk\n"); mark_dirty(s, 0); goto bail4; } hpfs_error(s, "warning: spare dnodes used, try chkdsk"); if (errs == 0) pr_err("Proceeding, but your filesystem could be corrupted if you delete files or directories\n"); } if (chk) { unsigned a; if (le32_to_cpu(superblock->dir_band_end) - le32_to_cpu(superblock->dir_band_start) + 1 != le32_to_cpu(superblock->n_dir_band) || le32_to_cpu(superblock->dir_band_end) < le32_to_cpu(superblock->dir_band_start) || le32_to_cpu(superblock->n_dir_band) > 0x4000) { hpfs_error(s, "dir band size mismatch: dir_band_start==%08x, dir_band_end==%08x, n_dir_band==%08x", le32_to_cpu(superblock->dir_band_start), le32_to_cpu(superblock->dir_band_end), le32_to_cpu(superblock->n_dir_band)); goto bail4; } a = sbi->sb_dirband_size; sbi->sb_dirband_size = 0; if (hpfs_chk_sectors(s, le32_to_cpu(superblock->dir_band_start), le32_to_cpu(superblock->n_dir_band), "dir_band") || hpfs_chk_sectors(s, le32_to_cpu(superblock->dir_band_bitmap), 4, "dir_band_bitmap") || hpfs_chk_sectors(s, le32_to_cpu(superblock->bitmaps), 4, "bitmaps")) { mark_dirty(s, 0); goto bail4; } sbi->sb_dirband_size = a; } else pr_err("You really don't want any checks? You are crazy...\n"); /* Load code page table */ if (le32_to_cpu(spareblock->n_code_pages)) if (!(sbi->sb_cp_table = hpfs_load_code_page(s, le32_to_cpu(spareblock->code_page_dir)))) pr_err("code page support is disabled\n"); brelse(bh2); brelse(bh1); brelse(bh0); root = iget_locked(s, sbi->sb_root); if (!root) goto bail0; hpfs_init_inode(root); hpfs_read_inode(root); unlock_new_inode(root); s->s_root = d_make_root(root); if (!s->s_root) goto bail0; /* * find the root directory's . pointer & finish filling in the inode */ root_dno = hpfs_fnode_dno(s, sbi->sb_root); if (root_dno) de = map_dirent(root, root_dno, "\001\001", 2, NULL, &qbh); if (!de) hpfs_error(s, "unable to find root dir"); else { root->i_atime.tv_sec = local_to_gmt(s, le32_to_cpu(de->read_date)); root->i_atime.tv_nsec = 0; root->i_mtime.tv_sec = local_to_gmt(s, le32_to_cpu(de->write_date)); root->i_mtime.tv_nsec = 0; inode_set_ctime(root, local_to_gmt(s, le32_to_cpu(de->creation_date)), 0); hpfs_i(root)->i_ea_size = le32_to_cpu(de->ea_size); hpfs_i(root)->i_parent_dir = root->i_ino; if (root->i_size == -1) root->i_size = 2048; if (root->i_blocks == -1) root->i_blocks = 5; hpfs_brelse4(&qbh); } hpfs_unlock(s); return 0; bail4: brelse(bh2); bail3: brelse(bh1); bail2: brelse(bh0); bail1: bail0: hpfs_unlock(s); free_sbi(sbi); return -EINVAL; } static struct dentry *hpfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, hpfs_fill_super); } static struct file_system_type hpfs_fs_type = { .owner = THIS_MODULE, .name = "hpfs", .mount = hpfs_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("hpfs"); static int __init init_hpfs_fs(void) { int err = init_inodecache(); if (err) goto out1; err = register_filesystem(&hpfs_fs_type); if (err) goto out; return 0; out: destroy_inodecache(); out1: return err; } static void __exit exit_hpfs_fs(void) { unregister_filesystem(&hpfs_fs_type); destroy_inodecache(); } module_init(init_hpfs_fs) module_exit(exit_hpfs_fs) MODULE_LICENSE("GPL");
linux-master
fs/hpfs/super.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/hpfs/buffer.c * * Mikulas Patocka ([email protected]), 1998-1999 * * general buffer i/o */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/blkdev.h> #include "hpfs_fn.h" secno hpfs_search_hotfix_map(struct super_block *s, secno sec) { unsigned i; struct hpfs_sb_info *sbi = hpfs_sb(s); for (i = 0; unlikely(i < sbi->n_hotfixes); i++) { if (sbi->hotfix_from[i] == sec) { return sbi->hotfix_to[i]; } } return sec; } unsigned hpfs_search_hotfix_map_for_range(struct super_block *s, secno sec, unsigned n) { unsigned i; struct hpfs_sb_info *sbi = hpfs_sb(s); for (i = 0; unlikely(i < sbi->n_hotfixes); i++) { if (sbi->hotfix_from[i] >= sec && sbi->hotfix_from[i] < sec + n) { n = sbi->hotfix_from[i] - sec; } } return n; } void hpfs_prefetch_sectors(struct super_block *s, unsigned secno, int n) { struct buffer_head *bh; struct blk_plug plug; if (n <= 0 || unlikely(secno >= hpfs_sb(s)->sb_fs_size)) return; if (unlikely(hpfs_search_hotfix_map_for_range(s, secno, n) != n)) return; bh = sb_find_get_block(s, secno); if (bh) { if (buffer_uptodate(bh)) { brelse(bh); return; } brelse(bh); } blk_start_plug(&plug); while (n > 0) { if (unlikely(secno >= hpfs_sb(s)->sb_fs_size)) break; sb_breadahead(s, secno); secno++; n--; } blk_finish_plug(&plug); } /* Map a sector into a buffer and return pointers to it and to the buffer. */ void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp, int ahead) { struct buffer_head *bh; hpfs_lock_assert(s); hpfs_prefetch_sectors(s, secno, ahead); cond_resched(); *bhp = bh = sb_bread(s, hpfs_search_hotfix_map(s, secno)); if (bh != NULL) return bh->b_data; else { pr_err("%s(): read error\n", __func__); return NULL; } } /* Like hpfs_map_sector but don't read anything */ void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp) { struct buffer_head *bh; /*return hpfs_map_sector(s, secno, bhp, 0);*/ hpfs_lock_assert(s); cond_resched(); if ((*bhp = bh = sb_getblk(s, hpfs_search_hotfix_map(s, secno))) != NULL) { if (!buffer_uptodate(bh)) wait_on_buffer(bh); set_buffer_uptodate(bh); return bh->b_data; } else { pr_err("%s(): getblk failed\n", __func__); return NULL; } } /* Map 4 sectors into a 4buffer and return pointers to it and to the buffer. */ void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh, int ahead) { char *data; hpfs_lock_assert(s); cond_resched(); if (secno & 3) { pr_err("%s(): unaligned read\n", __func__); return NULL; } hpfs_prefetch_sectors(s, secno, 4 + ahead); if (!hpfs_map_sector(s, secno + 0, &qbh->bh[0], 0)) goto bail0; if (!hpfs_map_sector(s, secno + 1, &qbh->bh[1], 0)) goto bail1; if (!hpfs_map_sector(s, secno + 2, &qbh->bh[2], 0)) goto bail2; if (!hpfs_map_sector(s, secno + 3, &qbh->bh[3], 0)) goto bail3; if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) && likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) && likely(qbh->bh[3]->b_data == qbh->bh[0]->b_data + 3 * 512)) { return qbh->data = qbh->bh[0]->b_data; } qbh->data = data = kmalloc(2048, GFP_NOFS); if (!data) { pr_err("%s(): out of memory\n", __func__); goto bail4; } memcpy(data + 0 * 512, qbh->bh[0]->b_data, 512); memcpy(data + 1 * 512, qbh->bh[1]->b_data, 512); memcpy(data + 2 * 512, qbh->bh[2]->b_data, 512); memcpy(data + 3 * 512, qbh->bh[3]->b_data, 512); return data; bail4: brelse(qbh->bh[3]); bail3: brelse(qbh->bh[2]); bail2: brelse(qbh->bh[1]); bail1: brelse(qbh->bh[0]); bail0: return NULL; } /* Don't read sectors */ void *hpfs_get_4sectors(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh) { cond_resched(); hpfs_lock_assert(s); if (secno & 3) { pr_err("%s(): unaligned read\n", __func__); return NULL; } if (!hpfs_get_sector(s, secno + 0, &qbh->bh[0])) goto bail0; if (!hpfs_get_sector(s, secno + 1, &qbh->bh[1])) goto bail1; if (!hpfs_get_sector(s, secno + 2, &qbh->bh[2])) goto bail2; if (!hpfs_get_sector(s, secno + 3, &qbh->bh[3])) goto bail3; if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) && likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) && likely(qbh->bh[3]->b_data == qbh->bh[0]->b_data + 3 * 512)) { return qbh->data = qbh->bh[0]->b_data; } if (!(qbh->data = kmalloc(2048, GFP_NOFS))) { pr_err("%s(): out of memory\n", __func__); goto bail4; } return qbh->data; bail4: brelse(qbh->bh[3]); bail3: brelse(qbh->bh[2]); bail2: brelse(qbh->bh[1]); bail1: brelse(qbh->bh[0]); bail0: return NULL; } void hpfs_brelse4(struct quad_buffer_head *qbh) { if (unlikely(qbh->data != qbh->bh[0]->b_data)) kfree(qbh->data); brelse(qbh->bh[0]); brelse(qbh->bh[1]); brelse(qbh->bh[2]); brelse(qbh->bh[3]); } void hpfs_mark_4buffers_dirty(struct quad_buffer_head *qbh) { if (unlikely(qbh->data != qbh->bh[0]->b_data)) { memcpy(qbh->bh[0]->b_data, qbh->data + 0 * 512, 512); memcpy(qbh->bh[1]->b_data, qbh->data + 1 * 512, 512); memcpy(qbh->bh[2]->b_data, qbh->data + 2 * 512, 512); memcpy(qbh->bh[3]->b_data, qbh->data + 3 * 512, 512); } mark_buffer_dirty(qbh->bh[0]); mark_buffer_dirty(qbh->bh[1]); mark_buffer_dirty(qbh->bh[2]); mark_buffer_dirty(qbh->bh[3]); }
linux-master
fs/hpfs/buffer.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/hpfs/dnode.c * * Mikulas Patocka ([email protected]), 1998-1999 * * handling directory dnode tree - adding, deleteing & searching for dirents */ #include "hpfs_fn.h" static loff_t get_pos(struct dnode *d, struct hpfs_dirent *fde) { struct hpfs_dirent *de; struct hpfs_dirent *de_end = dnode_end_de(d); int i = 1; for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { if (de == fde) return ((loff_t) le32_to_cpu(d->self) << 4) | (loff_t)i; i++; } pr_info("%s(): not_found\n", __func__); return ((loff_t)le32_to_cpu(d->self) << 4) | (loff_t)1; } int hpfs_add_pos(struct inode *inode, loff_t *pos) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); int i = 0; loff_t **ppos; if (hpfs_inode->i_rddir_off) for (; hpfs_inode->i_rddir_off[i]; i++) if (hpfs_inode->i_rddir_off[i] == pos) return 0; if (!(i&0x0f)) { ppos = kmalloc_array(i + 0x11, sizeof(loff_t *), GFP_NOFS); if (!ppos) { pr_err("out of memory for position list\n"); return -ENOMEM; } if (hpfs_inode->i_rddir_off) { memcpy(ppos, hpfs_inode->i_rddir_off, i * sizeof(loff_t)); kfree(hpfs_inode->i_rddir_off); } hpfs_inode->i_rddir_off = ppos; } hpfs_inode->i_rddir_off[i] = pos; hpfs_inode->i_rddir_off[i + 1] = NULL; return 0; } void hpfs_del_pos(struct inode *inode, loff_t *pos) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); loff_t **i, **j; if (!hpfs_inode->i_rddir_off) goto not_f; for (i = hpfs_inode->i_rddir_off; *i; i++) if (*i == pos) goto fnd; goto not_f; fnd: for (j = i + 1; *j; j++) ; *i = *(j - 1); *(j - 1) = NULL; if (j - 1 == hpfs_inode->i_rddir_off) { kfree(hpfs_inode->i_rddir_off); hpfs_inode->i_rddir_off = NULL; } return; not_f: /*pr_warn("position pointer %p->%08x not found\n", pos, (int)*pos);*/ return; } static void for_all_poss(struct inode *inode, void (*f)(loff_t *, loff_t, loff_t), loff_t p1, loff_t p2) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); loff_t **i; if (!hpfs_inode->i_rddir_off) return; for (i = hpfs_inode->i_rddir_off; *i; i++) (*f)(*i, p1, p2); return; } static void hpfs_pos_subst(loff_t *p, loff_t f, loff_t t) { if (*p == f) *p = t; } /*void hpfs_hpfs_pos_substd(loff_t *p, loff_t f, loff_t t) { if ((*p & ~0x3f) == (f & ~0x3f)) *p = (t & ~0x3f) | (*p & 0x3f); }*/ static void hpfs_pos_ins(loff_t *p, loff_t d, loff_t c) { if ((*p & ~0x3f) == (d & ~0x3f) && (*p & 0x3f) >= (d & 0x3f)) { int n = (*p & 0x3f) + c; if (n > 0x3f) pr_err("%s(): %08x + %d\n", __func__, (int)*p, (int)c >> 8); else *p = (*p & ~0x3f) | n; } } static void hpfs_pos_del(loff_t *p, loff_t d, loff_t c) { if ((*p & ~0x3f) == (d & ~0x3f) && (*p & 0x3f) >= (d & 0x3f)) { int n = (*p & 0x3f) - c; if (n < 1) pr_err("%s(): %08x - %d\n", __func__, (int)*p, (int)c >> 8); else *p = (*p & ~0x3f) | n; } } static struct hpfs_dirent *dnode_pre_last_de(struct dnode *d) { struct hpfs_dirent *de, *de_end, *dee = NULL, *deee = NULL; de_end = dnode_end_de(d); for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { deee = dee; dee = de; } return deee; } static struct hpfs_dirent *dnode_last_de(struct dnode *d) { struct hpfs_dirent *de, *de_end, *dee = NULL; de_end = dnode_end_de(d); for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { dee = de; } return dee; } static void set_last_pointer(struct super_block *s, struct dnode *d, dnode_secno ptr) { struct hpfs_dirent *de; if (!(de = dnode_last_de(d))) { hpfs_error(s, "set_last_pointer: empty dnode %08x", le32_to_cpu(d->self)); return; } if (hpfs_sb(s)->sb_chk) { if (de->down) { hpfs_error(s, "set_last_pointer: dnode %08x has already last pointer %08x", le32_to_cpu(d->self), de_down_pointer(de)); return; } if (le16_to_cpu(de->length) != 32) { hpfs_error(s, "set_last_pointer: bad last dirent in dnode %08x", le32_to_cpu(d->self)); return; } } if (ptr) { le32_add_cpu(&d->first_free, 4); if (le32_to_cpu(d->first_free) > 2048) { hpfs_error(s, "set_last_pointer: too long dnode %08x", le32_to_cpu(d->self)); le32_add_cpu(&d->first_free, -4); return; } de->length = cpu_to_le16(36); de->down = 1; *(__le32 *)((char *)de + 32) = cpu_to_le32(ptr); } } /* Add an entry to dnode and don't care if it grows over 2048 bytes */ struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d, const unsigned char *name, unsigned namelen, secno down_ptr) { struct hpfs_dirent *de; struct hpfs_dirent *de_end = dnode_end_de(d); unsigned d_size = de_size(namelen, down_ptr); for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { int c = hpfs_compare_names(s, name, namelen, de->name, de->namelen, de->last); if (!c) { hpfs_error(s, "name (%c,%d) already exists in dnode %08x", *name, namelen, le32_to_cpu(d->self)); return NULL; } if (c < 0) break; } memmove((char *)de + d_size, de, (char *)de_end - (char *)de); memset(de, 0, d_size); if (down_ptr) { *(__le32 *)((char *)de + d_size - 4) = cpu_to_le32(down_ptr); de->down = 1; } de->length = cpu_to_le16(d_size); de->not_8x3 = hpfs_is_name_long(name, namelen); de->namelen = namelen; memcpy(de->name, name, namelen); le32_add_cpu(&d->first_free, d_size); return de; } /* Delete dirent and don't care about its subtree */ static void hpfs_delete_de(struct super_block *s, struct dnode *d, struct hpfs_dirent *de) { if (de->last) { hpfs_error(s, "attempt to delete last dirent in dnode %08x", le32_to_cpu(d->self)); return; } d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) - le16_to_cpu(de->length)); memmove(de, de_next_de(de), le32_to_cpu(d->first_free) + (char *)d - (char *)de); } static void fix_up_ptrs(struct super_block *s, struct dnode *d) { struct hpfs_dirent *de; struct hpfs_dirent *de_end = dnode_end_de(d); dnode_secno dno = le32_to_cpu(d->self); for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) if (de->down) { struct quad_buffer_head qbh; struct dnode *dd; if ((dd = hpfs_map_dnode(s, de_down_pointer(de), &qbh))) { if (le32_to_cpu(dd->up) != dno || dd->root_dnode) { dd->up = cpu_to_le32(dno); dd->root_dnode = 0; hpfs_mark_4buffers_dirty(&qbh); } hpfs_brelse4(&qbh); } } } /* Add an entry to dnode and do dnode splitting if required */ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno, const unsigned char *name, unsigned namelen, struct hpfs_dirent *new_de, dnode_secno down_ptr) { struct quad_buffer_head qbh, qbh1, qbh2; struct dnode *d, *ad, *rd, *nd = NULL; dnode_secno adno, rdno; struct hpfs_dirent *de; struct hpfs_dirent nde; unsigned char *nname; int h; int pos; struct buffer_head *bh; struct fnode *fnode; int c1, c2 = 0; if (!(nname = kmalloc(256, GFP_NOFS))) { pr_err("out of memory, can't add to dnode\n"); return 1; } go_up: if (namelen >= 256) { hpfs_error(i->i_sb, "%s(): namelen == %d", __func__, namelen); kfree(nd); kfree(nname); return 1; } if (!(d = hpfs_map_dnode(i->i_sb, dno, &qbh))) { kfree(nd); kfree(nname); return 1; } go_up_a: if (hpfs_sb(i->i_sb)->sb_chk) if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "hpfs_add_to_dnode")) { hpfs_brelse4(&qbh); kfree(nd); kfree(nname); return 1; } if (le32_to_cpu(d->first_free) + de_size(namelen, down_ptr) <= 2048) { loff_t t; copy_de(de=hpfs_add_de(i->i_sb, d, name, namelen, down_ptr), new_de); t = get_pos(d, de); for_all_poss(i, hpfs_pos_ins, t, 1); for_all_poss(i, hpfs_pos_subst, 4, t); for_all_poss(i, hpfs_pos_subst, 5, t + 1); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); kfree(nd); kfree(nname); return 0; } if (!nd) if (!(nd = kmalloc(0x924, GFP_NOFS))) { /* 0x924 is a max size of dnode after adding a dirent with max name length. We alloc this only once. There must not be any error while splitting dnodes, otherwise the whole directory, not only file we're adding, would be lost. */ pr_err("out of memory for dnode splitting\n"); hpfs_brelse4(&qbh); kfree(nname); return 1; } memcpy(nd, d, le32_to_cpu(d->first_free)); copy_de(de = hpfs_add_de(i->i_sb, nd, name, namelen, down_ptr), new_de); for_all_poss(i, hpfs_pos_ins, get_pos(nd, de), 1); h = ((char *)dnode_last_de(nd) - (char *)nd) / 2 + 10; if (!(ad = hpfs_alloc_dnode(i->i_sb, le32_to_cpu(d->up), &adno, &qbh1))) { hpfs_error(i->i_sb, "unable to alloc dnode - dnode tree will be corrupted"); hpfs_brelse4(&qbh); kfree(nd); kfree(nname); return 1; } i->i_size += 2048; i->i_blocks += 4; pos = 1; for (de = dnode_first_de(nd); (char *)de_next_de(de) - (char *)nd < h; de = de_next_de(de)) { copy_de(hpfs_add_de(i->i_sb, ad, de->name, de->namelen, de->down ? de_down_pointer(de) : 0), de); for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | pos, ((loff_t)adno << 4) | pos); pos++; } copy_de(new_de = &nde, de); memcpy(nname, de->name, de->namelen); name = nname; namelen = de->namelen; for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | pos, 4); down_ptr = adno; set_last_pointer(i->i_sb, ad, de->down ? de_down_pointer(de) : 0); de = de_next_de(de); memmove((char *)nd + 20, de, le32_to_cpu(nd->first_free) + (char *)nd - (char *)de); le32_add_cpu(&nd->first_free, -((char *)de - (char *)nd - 20)); memcpy(d, nd, le32_to_cpu(nd->first_free)); for_all_poss(i, hpfs_pos_del, (loff_t)dno << 4, pos); fix_up_ptrs(i->i_sb, ad); if (!d->root_dnode) { ad->up = d->up; dno = le32_to_cpu(ad->up); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); goto go_up; } if (!(rd = hpfs_alloc_dnode(i->i_sb, le32_to_cpu(d->up), &rdno, &qbh2))) { hpfs_error(i->i_sb, "unable to alloc dnode - dnode tree will be corrupted"); hpfs_brelse4(&qbh); hpfs_brelse4(&qbh1); kfree(nd); kfree(nname); return 1; } i->i_size += 2048; i->i_blocks += 4; rd->root_dnode = 1; rd->up = d->up; if (!(fnode = hpfs_map_fnode(i->i_sb, le32_to_cpu(d->up), &bh))) { hpfs_free_dnode(i->i_sb, rdno); hpfs_brelse4(&qbh); hpfs_brelse4(&qbh1); hpfs_brelse4(&qbh2); kfree(nd); kfree(nname); return 1; } fnode->u.external[0].disk_secno = cpu_to_le32(rdno); mark_buffer_dirty(bh); brelse(bh); hpfs_i(i)->i_dno = rdno; d->up = ad->up = cpu_to_le32(rdno); d->root_dnode = ad->root_dnode = 0; hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); qbh = qbh2; set_last_pointer(i->i_sb, rd, dno); dno = rdno; d = rd; goto go_up_a; } /* * Add an entry to directory btree. * I hate such crazy directory structure. * It's easy to read but terrible to write. * I wrote this directory code 4 times. * I hope, now it's finally bug-free. */ int hpfs_add_dirent(struct inode *i, const unsigned char *name, unsigned namelen, struct hpfs_dirent *new_de) { struct hpfs_inode_info *hpfs_inode = hpfs_i(i); struct dnode *d; struct hpfs_dirent *de, *de_end; struct quad_buffer_head qbh; dnode_secno dno; int c; int c1, c2 = 0; dno = hpfs_inode->i_dno; down: if (hpfs_sb(i->i_sb)->sb_chk) if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "hpfs_add_dirent")) return 1; if (!(d = hpfs_map_dnode(i->i_sb, dno, &qbh))) return 1; de_end = dnode_end_de(d); for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { if (!(c = hpfs_compare_names(i->i_sb, name, namelen, de->name, de->namelen, de->last))) { hpfs_brelse4(&qbh); return -1; } if (c < 0) { if (de->down) { dno = de_down_pointer(de); hpfs_brelse4(&qbh); goto down; } break; } } hpfs_brelse4(&qbh); if (hpfs_check_free_dnodes(i->i_sb, FREE_DNODES_ADD)) { c = 1; goto ret; } c = hpfs_add_to_dnode(i, dno, name, namelen, new_de, 0); ret: return c; } /* * Find dirent with higher name in 'from' subtree and move it to 'to' dnode. * Return the dnode we moved from (to be checked later if it's empty) */ static secno move_to_top(struct inode *i, dnode_secno from, dnode_secno to) { dnode_secno dno, ddno; dnode_secno chk_up = to; struct dnode *dnode; struct quad_buffer_head qbh; struct hpfs_dirent *de, *nde; int a; loff_t t; int c1, c2 = 0; dno = from; while (1) { if (hpfs_sb(i->i_sb)->sb_chk) if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "move_to_top")) return 0; if (!(dnode = hpfs_map_dnode(i->i_sb, dno, &qbh))) return 0; if (hpfs_sb(i->i_sb)->sb_chk) { if (le32_to_cpu(dnode->up) != chk_up) { hpfs_error(i->i_sb, "move_to_top: up pointer from %08x should be %08x, is %08x", dno, chk_up, le32_to_cpu(dnode->up)); hpfs_brelse4(&qbh); return 0; } chk_up = dno; } if (!(de = dnode_last_de(dnode))) { hpfs_error(i->i_sb, "move_to_top: dnode %08x has no last de", dno); hpfs_brelse4(&qbh); return 0; } if (!de->down) break; dno = de_down_pointer(de); hpfs_brelse4(&qbh); } while (!(de = dnode_pre_last_de(dnode))) { dnode_secno up = le32_to_cpu(dnode->up); hpfs_brelse4(&qbh); hpfs_free_dnode(i->i_sb, dno); i->i_size -= 2048; i->i_blocks -= 4; for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | 1, 5); if (up == to) return to; if (!(dnode = hpfs_map_dnode(i->i_sb, up, &qbh))) return 0; if (dnode->root_dnode) { hpfs_error(i->i_sb, "move_to_top: got to root_dnode while moving from %08x to %08x", from, to); hpfs_brelse4(&qbh); return 0; } de = dnode_last_de(dnode); if (!de || !de->down) { hpfs_error(i->i_sb, "move_to_top: dnode %08x doesn't point down to %08x", up, dno); hpfs_brelse4(&qbh); return 0; } le32_add_cpu(&dnode->first_free, -4); le16_add_cpu(&de->length, -4); de->down = 0; hpfs_mark_4buffers_dirty(&qbh); dno = up; } t = get_pos(dnode, de); for_all_poss(i, hpfs_pos_subst, t, 4); for_all_poss(i, hpfs_pos_subst, t + 1, 5); if (!(nde = kmalloc(le16_to_cpu(de->length), GFP_NOFS))) { hpfs_error(i->i_sb, "out of memory for dirent - directory will be corrupted"); hpfs_brelse4(&qbh); return 0; } memcpy(nde, de, le16_to_cpu(de->length)); ddno = de->down ? de_down_pointer(de) : 0; hpfs_delete_de(i->i_sb, dnode, de); set_last_pointer(i->i_sb, dnode, ddno); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); a = hpfs_add_to_dnode(i, to, nde->name, nde->namelen, nde, from); kfree(nde); if (a) return 0; return dno; } /* * Check if a dnode is empty and delete it from the tree * (chkdsk doesn't like empty dnodes) */ static void delete_empty_dnode(struct inode *i, dnode_secno dno) { struct hpfs_inode_info *hpfs_inode = hpfs_i(i); struct quad_buffer_head qbh; struct dnode *dnode; dnode_secno down, up, ndown; int p; struct hpfs_dirent *de; int c1, c2 = 0; try_it_again: if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "delete_empty_dnode")) return; if (!(dnode = hpfs_map_dnode(i->i_sb, dno, &qbh))) return; if (le32_to_cpu(dnode->first_free) > 56) goto end; if (le32_to_cpu(dnode->first_free) == 52 || le32_to_cpu(dnode->first_free) == 56) { struct hpfs_dirent *de_end; int root = dnode->root_dnode; up = le32_to_cpu(dnode->up); de = dnode_first_de(dnode); down = de->down ? de_down_pointer(de) : 0; if (hpfs_sb(i->i_sb)->sb_chk) if (root && !down) { hpfs_error(i->i_sb, "delete_empty_dnode: root dnode %08x is empty", dno); goto end; } hpfs_brelse4(&qbh); hpfs_free_dnode(i->i_sb, dno); i->i_size -= 2048; i->i_blocks -= 4; if (root) { struct fnode *fnode; struct buffer_head *bh; struct dnode *d1; struct quad_buffer_head qbh1; if (hpfs_sb(i->i_sb)->sb_chk) if (up != i->i_ino) { hpfs_error(i->i_sb, "bad pointer to fnode, dnode %08x, pointing to %08x, should be %08lx", dno, up, (unsigned long)i->i_ino); return; } if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) { d1->up = cpu_to_le32(up); d1->root_dnode = 1; hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } if ((fnode = hpfs_map_fnode(i->i_sb, up, &bh))) { fnode->u.external[0].disk_secno = cpu_to_le32(down); mark_buffer_dirty(bh); brelse(bh); } hpfs_inode->i_dno = down; for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | 1, (loff_t) 12); return; } if (!(dnode = hpfs_map_dnode(i->i_sb, up, &qbh))) return; p = 1; de_end = dnode_end_de(dnode); for (de = dnode_first_de(dnode); de < de_end; de = de_next_de(de), p++) if (de->down) if (de_down_pointer(de) == dno) goto fnd; hpfs_error(i->i_sb, "delete_empty_dnode: pointer to dnode %08x not found in dnode %08x", dno, up); goto end; fnd: for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | 1, ((loff_t)up << 4) | p); if (!down) { de->down = 0; le16_add_cpu(&de->length, -4); le32_add_cpu(&dnode->first_free, -4); memmove(de_next_de(de), (char *)de_next_de(de) + 4, (char *)dnode + le32_to_cpu(dnode->first_free) - (char *)de_next_de(de)); } else { struct dnode *d1; struct quad_buffer_head qbh1; *(dnode_secno *) ((void *) de + le16_to_cpu(de->length) - 4) = down; if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) { d1->up = cpu_to_le32(up); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } } } else { hpfs_error(i->i_sb, "delete_empty_dnode: dnode %08x, first_free == %03x", dno, le32_to_cpu(dnode->first_free)); goto end; } if (!de->last) { struct hpfs_dirent *de_next = de_next_de(de); struct hpfs_dirent *de_cp; struct dnode *d1; struct quad_buffer_head qbh1; if (!de_next->down) goto endm; ndown = de_down_pointer(de_next); if (!(de_cp = kmalloc(le16_to_cpu(de->length), GFP_NOFS))) { pr_err("out of memory for dtree balancing\n"); goto endm; } memcpy(de_cp, de, le16_to_cpu(de->length)); hpfs_delete_de(i->i_sb, dnode, de); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | p, 4); for_all_poss(i, hpfs_pos_del, ((loff_t)up << 4) | p, 1); if (de_cp->down) if ((d1 = hpfs_map_dnode(i->i_sb, de_down_pointer(de_cp), &qbh1))) { d1->up = cpu_to_le32(ndown); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } hpfs_add_to_dnode(i, ndown, de_cp->name, de_cp->namelen, de_cp, de_cp->down ? de_down_pointer(de_cp) : 0); /*pr_info("UP-TO-DNODE: %08x (ndown = %08x, down = %08x, dno = %08x)\n", up, ndown, down, dno);*/ dno = up; kfree(de_cp); goto try_it_again; } else { struct hpfs_dirent *de_prev = dnode_pre_last_de(dnode); struct hpfs_dirent *de_cp; struct dnode *d1; struct quad_buffer_head qbh1; dnode_secno dlp; if (!de_prev) { hpfs_error(i->i_sb, "delete_empty_dnode: empty dnode %08x", up); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); dno = up; goto try_it_again; } if (!de_prev->down) goto endm; ndown = de_down_pointer(de_prev); if ((d1 = hpfs_map_dnode(i->i_sb, ndown, &qbh1))) { struct hpfs_dirent *del = dnode_last_de(d1); dlp = del->down ? de_down_pointer(del) : 0; if (!dlp && down) { if (le32_to_cpu(d1->first_free) > 2044) { if (hpfs_sb(i->i_sb)->sb_chk >= 2) { pr_err("unbalanced dnode tree, see hpfs.txt 4 more info\n"); pr_err("terminating balancing operation\n"); } hpfs_brelse4(&qbh1); goto endm; } if (hpfs_sb(i->i_sb)->sb_chk >= 2) { pr_err("unbalanced dnode tree, see hpfs.txt 4 more info\n"); pr_err("goin'on\n"); } le16_add_cpu(&del->length, 4); del->down = 1; le32_add_cpu(&d1->first_free, 4); } if (dlp && !down) { le16_add_cpu(&del->length, -4); del->down = 0; le32_add_cpu(&d1->first_free, -4); } else if (down) *(__le32 *) ((void *) del + le16_to_cpu(del->length) - 4) = cpu_to_le32(down); } else goto endm; if (!(de_cp = kmalloc(le16_to_cpu(de_prev->length), GFP_NOFS))) { pr_err("out of memory for dtree balancing\n"); hpfs_brelse4(&qbh1); goto endm; } hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); memcpy(de_cp, de_prev, le16_to_cpu(de_prev->length)); hpfs_delete_de(i->i_sb, dnode, de_prev); if (!de_prev->down) { le16_add_cpu(&de_prev->length, 4); de_prev->down = 1; le32_add_cpu(&dnode->first_free, 4); } *(__le32 *) ((void *) de_prev + le16_to_cpu(de_prev->length) - 4) = cpu_to_le32(ndown); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | (p - 1), 4); for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | p, ((loff_t)up << 4) | (p - 1)); if (down) if ((d1 = hpfs_map_dnode(i->i_sb, de_down_pointer(de), &qbh1))) { d1->up = cpu_to_le32(ndown); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } hpfs_add_to_dnode(i, ndown, de_cp->name, de_cp->namelen, de_cp, dlp); dno = up; kfree(de_cp); goto try_it_again; } endm: hpfs_mark_4buffers_dirty(&qbh); end: hpfs_brelse4(&qbh); } /* Delete dirent from directory */ int hpfs_remove_dirent(struct inode *i, dnode_secno dno, struct hpfs_dirent *de, struct quad_buffer_head *qbh, int depth) { struct dnode *dnode = qbh->data; dnode_secno down = 0; loff_t t; if (de->first || de->last) { hpfs_error(i->i_sb, "hpfs_remove_dirent: attempt to delete first or last dirent in dnode %08x", dno); hpfs_brelse4(qbh); return 1; } if (de->down) down = de_down_pointer(de); if (depth && (de->down || (de == dnode_first_de(dnode) && de_next_de(de)->last))) { if (hpfs_check_free_dnodes(i->i_sb, FREE_DNODES_DEL)) { hpfs_brelse4(qbh); return 2; } } for_all_poss(i, hpfs_pos_del, (t = get_pos(dnode, de)) + 1, 1); hpfs_delete_de(i->i_sb, dnode, de); hpfs_mark_4buffers_dirty(qbh); hpfs_brelse4(qbh); if (down) { dnode_secno a = move_to_top(i, down, dno); for_all_poss(i, hpfs_pos_subst, 5, t); if (a) delete_empty_dnode(i, a); return !a; } delete_empty_dnode(i, dno); return 0; } void hpfs_count_dnodes(struct super_block *s, dnode_secno dno, int *n_dnodes, int *n_subdirs, int *n_items) { struct dnode *dnode; struct quad_buffer_head qbh; struct hpfs_dirent *de; dnode_secno ptr, odno = 0; int c1, c2 = 0; int d1, d2 = 0; go_down: if (n_dnodes) (*n_dnodes)++; if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, dno, &c1, &c2, "hpfs_count_dnodes #1")) return; ptr = 0; go_up: if (!(dnode = hpfs_map_dnode(s, dno, &qbh))) return; if (hpfs_sb(s)->sb_chk) if (odno && odno != -1 && le32_to_cpu(dnode->up) != odno) hpfs_error(s, "hpfs_count_dnodes: bad up pointer; dnode %08x, down %08x points to %08x", odno, dno, le32_to_cpu(dnode->up)); de = dnode_first_de(dnode); if (ptr) while(1) { if (de->down) if (de_down_pointer(de) == ptr) goto process_de; if (de->last) { hpfs_brelse4(&qbh); hpfs_error(s, "hpfs_count_dnodes: pointer to dnode %08x not found in dnode %08x, got here from %08x", ptr, dno, odno); return; } de = de_next_de(de); } next_de: if (de->down) { odno = dno; dno = de_down_pointer(de); hpfs_brelse4(&qbh); goto go_down; } process_de: if (!de->first && !de->last && de->directory && n_subdirs) (*n_subdirs)++; if (!de->first && !de->last && n_items) (*n_items)++; if ((de = de_next_de(de)) < dnode_end_de(dnode)) goto next_de; ptr = dno; dno = le32_to_cpu(dnode->up); if (dnode->root_dnode) { hpfs_brelse4(&qbh); return; } hpfs_brelse4(&qbh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, ptr, &d1, &d2, "hpfs_count_dnodes #2")) return; odno = -1; goto go_up; } static struct hpfs_dirent *map_nth_dirent(struct super_block *s, dnode_secno dno, int n, struct quad_buffer_head *qbh, struct dnode **dn) { int i; struct hpfs_dirent *de, *de_end; struct dnode *dnode; dnode = hpfs_map_dnode(s, dno, qbh); if (!dnode) return NULL; if (dn) *dn=dnode; de = dnode_first_de(dnode); de_end = dnode_end_de(dnode); for (i = 1; de < de_end; i++, de = de_next_de(de)) { if (i == n) { return de; } if (de->last) break; } hpfs_brelse4(qbh); hpfs_error(s, "map_nth_dirent: n too high; dnode = %08x, requested %08x", dno, n); return NULL; } dnode_secno hpfs_de_as_down_as_possible(struct super_block *s, dnode_secno dno) { struct quad_buffer_head qbh; dnode_secno d = dno; dnode_secno up = 0; struct hpfs_dirent *de; int c1, c2 = 0; again: if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, d, &c1, &c2, "hpfs_de_as_down_as_possible")) return d; if (!(de = map_nth_dirent(s, d, 1, &qbh, NULL))) return dno; if (hpfs_sb(s)->sb_chk) if (up && le32_to_cpu(((struct dnode *)qbh.data)->up) != up) hpfs_error(s, "hpfs_de_as_down_as_possible: bad up pointer; dnode %08x, down %08x points to %08x", up, d, le32_to_cpu(((struct dnode *)qbh.data)->up)); if (!de->down) { hpfs_brelse4(&qbh); return d; } up = d; d = de_down_pointer(de); hpfs_brelse4(&qbh); goto again; } struct hpfs_dirent *map_pos_dirent(struct inode *inode, loff_t *posp, struct quad_buffer_head *qbh) { loff_t pos; unsigned c; dnode_secno dno; struct hpfs_dirent *de, *d; struct hpfs_dirent *up_de; struct hpfs_dirent *end_up_de; struct dnode *dnode; struct dnode *up_dnode; struct quad_buffer_head qbh0; pos = *posp; dno = pos >> 6 << 2; pos &= 077; if (!(de = map_nth_dirent(inode->i_sb, dno, pos, qbh, &dnode))) goto bail; /* Going to the next dirent */ if ((d = de_next_de(de)) < dnode_end_de(dnode)) { if (!(++*posp & 077)) { hpfs_error(inode->i_sb, "map_pos_dirent: pos crossed dnode boundary; pos = %08llx", (unsigned long long)*posp); goto bail; } /* We're going down the tree */ if (d->down) { *posp = ((loff_t) hpfs_de_as_down_as_possible(inode->i_sb, de_down_pointer(d)) << 4) + 1; } return de; } /* Going up */ if (dnode->root_dnode) goto bail; if (!(up_dnode = hpfs_map_dnode(inode->i_sb, le32_to_cpu(dnode->up), &qbh0))) goto bail; end_up_de = dnode_end_de(up_dnode); c = 0; for (up_de = dnode_first_de(up_dnode); up_de < end_up_de; up_de = de_next_de(up_de)) { if (!(++c & 077)) hpfs_error(inode->i_sb, "map_pos_dirent: pos crossed dnode boundary; dnode = %08x", le32_to_cpu(dnode->up)); if (up_de->down && de_down_pointer(up_de) == dno) { *posp = ((loff_t) le32_to_cpu(dnode->up) << 4) + c; hpfs_brelse4(&qbh0); return de; } } hpfs_error(inode->i_sb, "map_pos_dirent: pointer to dnode %08x not found in parent dnode %08x", dno, le32_to_cpu(dnode->up)); hpfs_brelse4(&qbh0); bail: *posp = 12; return de; } /* Find a dirent in tree */ struct hpfs_dirent *map_dirent(struct inode *inode, dnode_secno dno, const unsigned char *name, unsigned len, dnode_secno *dd, struct quad_buffer_head *qbh) { struct dnode *dnode; struct hpfs_dirent *de; struct hpfs_dirent *de_end; int c1, c2 = 0; if (!S_ISDIR(inode->i_mode)) hpfs_error(inode->i_sb, "map_dirent: not a directory\n"); again: if (hpfs_sb(inode->i_sb)->sb_chk) if (hpfs_stop_cycles(inode->i_sb, dno, &c1, &c2, "map_dirent")) return NULL; if (!(dnode = hpfs_map_dnode(inode->i_sb, dno, qbh))) return NULL; de_end = dnode_end_de(dnode); for (de = dnode_first_de(dnode); de < de_end; de = de_next_de(de)) { int t = hpfs_compare_names(inode->i_sb, name, len, de->name, de->namelen, de->last); if (!t) { if (dd) *dd = dno; return de; } if (t < 0) { if (de->down) { dno = de_down_pointer(de); hpfs_brelse4(qbh); goto again; } break; } } hpfs_brelse4(qbh); return NULL; } /* * Remove empty directory. In normal cases it is only one dnode with two * entries, but we must handle also such obscure cases when it's a tree * of empty dnodes. */ void hpfs_remove_dtree(struct super_block *s, dnode_secno dno) { struct quad_buffer_head qbh; struct dnode *dnode; struct hpfs_dirent *de; dnode_secno d1, d2, rdno = dno; while (1) { if (!(dnode = hpfs_map_dnode(s, dno, &qbh))) return; de = dnode_first_de(dnode); if (de->last) { if (de->down) d1 = de_down_pointer(de); else goto error; hpfs_brelse4(&qbh); hpfs_free_dnode(s, dno); dno = d1; } else break; } if (!de->first) goto error; d1 = de->down ? de_down_pointer(de) : 0; de = de_next_de(de); if (!de->last) goto error; d2 = de->down ? de_down_pointer(de) : 0; hpfs_brelse4(&qbh); hpfs_free_dnode(s, dno); do { while (d1) { if (!(dnode = hpfs_map_dnode(s, dno = d1, &qbh))) return; de = dnode_first_de(dnode); if (!de->last) goto error; d1 = de->down ? de_down_pointer(de) : 0; hpfs_brelse4(&qbh); hpfs_free_dnode(s, dno); } d1 = d2; d2 = 0; } while (d1); return; error: hpfs_brelse4(&qbh); hpfs_free_dnode(s, dno); hpfs_error(s, "directory %08x is corrupted or not empty", rdno); } /* * Find dirent for specified fnode. Use truncated 15-char name in fnode as * a help for searching. */ struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno, struct fnode *f, struct quad_buffer_head *qbh) { unsigned char *name1; unsigned char *name2; int name1len, name2len; struct dnode *d; dnode_secno dno, downd; struct fnode *upf; struct buffer_head *bh; struct hpfs_dirent *de, *de_end; int c; int c1, c2 = 0; int d1, d2 = 0; name1 = f->name; if (!(name2 = kmalloc(256, GFP_NOFS))) { pr_err("out of memory, can't map dirent\n"); return NULL; } if (f->len <= 15) memcpy(name2, name1, name1len = name2len = f->len); else { memcpy(name2, name1, 15); memset(name2 + 15, 0xff, 256 - 15); /*name2[15] = 0xff;*/ name1len = 15; name2len = 256; } if (!(upf = hpfs_map_fnode(s, le32_to_cpu(f->up), &bh))) { kfree(name2); return NULL; } if (!fnode_is_dir(upf)) { brelse(bh); hpfs_error(s, "fnode %08x has non-directory parent %08x", fno, le32_to_cpu(f->up)); kfree(name2); return NULL; } dno = le32_to_cpu(upf->u.external[0].disk_secno); brelse(bh); go_down: downd = 0; go_up: if (!(d = hpfs_map_dnode(s, dno, qbh))) { kfree(name2); return NULL; } de_end = dnode_end_de(d); de = dnode_first_de(d); if (downd) { while (de < de_end) { if (de->down) if (de_down_pointer(de) == downd) goto f; de = de_next_de(de); } hpfs_error(s, "pointer to dnode %08x not found in dnode %08x", downd, dno); hpfs_brelse4(qbh); kfree(name2); return NULL; } next_de: if (le32_to_cpu(de->fnode) == fno) { kfree(name2); return de; } c = hpfs_compare_names(s, name1, name1len, de->name, de->namelen, de->last); if (c < 0 && de->down) { dno = de_down_pointer(de); hpfs_brelse4(qbh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, dno, &c1, &c2, "map_fnode_dirent #1")) { kfree(name2); return NULL; } goto go_down; } f: if (le32_to_cpu(de->fnode) == fno) { kfree(name2); return de; } c = hpfs_compare_names(s, name2, name2len, de->name, de->namelen, de->last); if (c < 0 && !de->last) goto not_found; if ((de = de_next_de(de)) < de_end) goto next_de; if (d->root_dnode) goto not_found; downd = dno; dno = le32_to_cpu(d->up); hpfs_brelse4(qbh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, downd, &d1, &d2, "map_fnode_dirent #2")) { kfree(name2); return NULL; } goto go_up; not_found: hpfs_brelse4(qbh); hpfs_error(s, "dirent for fnode %08x not found", fno); kfree(name2); return NULL; }
linux-master
fs/hpfs/dnode.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/hpfs/anode.c * * Mikulas Patocka ([email protected]), 1998-1999 * * handling HPFS anode tree that contains file allocation info */ #include "hpfs_fn.h" /* Find a sector in allocation tree */ secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode, struct bplus_header *btree, unsigned sec, struct buffer_head *bh) { anode_secno a = -1; struct anode *anode; int i; int c1, c2 = 0; go_down: if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1; if (bp_internal(btree)) { for (i = 0; i < btree->n_used_nodes; i++) if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) { a = le32_to_cpu(btree->u.internal[i].down); brelse(bh); if (!(anode = hpfs_map_anode(s, a, &bh))) return -1; btree = &anode->btree; goto go_down; } hpfs_error(s, "sector %08x not found in internal anode %08x", sec, a); brelse(bh); return -1; } for (i = 0; i < btree->n_used_nodes; i++) if (le32_to_cpu(btree->u.external[i].file_secno) <= sec && le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > sec) { a = le32_to_cpu(btree->u.external[i].disk_secno) + sec - le32_to_cpu(btree->u.external[i].file_secno); if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, a, 1, "data")) { brelse(bh); return -1; } if (inode) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); hpfs_inode->i_file_sec = le32_to_cpu(btree->u.external[i].file_secno); hpfs_inode->i_disk_sec = le32_to_cpu(btree->u.external[i].disk_secno); hpfs_inode->i_n_secs = le32_to_cpu(btree->u.external[i].length); } brelse(bh); return a; } hpfs_error(s, "sector %08x not found in external anode %08x", sec, a); brelse(bh); return -1; } /* Add a sector to tree */ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsigned fsecno) { struct bplus_header *btree; struct anode *anode = NULL, *ranode = NULL; struct fnode *fnode; anode_secno a, na = -1, ra, up = -1; secno se; struct buffer_head *bh, *bh1, *bh2; int n; unsigned fs; int c1, c2 = 0; if (fnod) { if (!(fnode = hpfs_map_fnode(s, node, &bh))) return -1; btree = &fnode->btree; } else { if (!(anode = hpfs_map_anode(s, node, &bh))) return -1; btree = &anode->btree; } a = node; go_down: if ((n = btree->n_used_nodes - 1) < -!!fnod) { hpfs_error(s, "anode %08x has no entries", a); brelse(bh); return -1; } if (bp_internal(btree)) { a = le32_to_cpu(btree->u.internal[n].down); btree->u.internal[n].file_secno = cpu_to_le32(-1); mark_buffer_dirty(bh); brelse(bh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_add_sector_to_btree #1")) return -1; if (!(anode = hpfs_map_anode(s, a, &bh))) return -1; btree = &anode->btree; goto go_down; } if (n >= 0) { if (le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length) != fsecno) { hpfs_error(s, "allocated size %08x, trying to add sector %08x, %cnode %08x", le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length), fsecno, fnod?'f':'a', node); brelse(bh); return -1; } if (hpfs_alloc_if_possible(s, se = le32_to_cpu(btree->u.external[n].disk_secno) + le32_to_cpu(btree->u.external[n].length))) { le32_add_cpu(&btree->u.external[n].length, 1); mark_buffer_dirty(bh); brelse(bh); return se; } } else { if (fsecno) { hpfs_error(s, "empty file %08x, trying to add sector %08x", node, fsecno); brelse(bh); return -1; } se = !fnod ? node : (node + 16384) & ~16383; } if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M))) { brelse(bh); return -1; } fs = n < 0 ? 0 : le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length); if (!btree->n_free_nodes) { up = a != node ? le32_to_cpu(anode->up) : -1; if (!(anode = hpfs_alloc_anode(s, a, &na, &bh1))) { brelse(bh); hpfs_free_sectors(s, se, 1); return -1; } if (a == node && fnod) { anode->up = cpu_to_le32(node); anode->btree.flags |= BP_fnode_parent; anode->btree.n_used_nodes = btree->n_used_nodes; anode->btree.first_free = btree->first_free; anode->btree.n_free_nodes = 40 - anode->btree.n_used_nodes; memcpy(&anode->u, &btree->u, btree->n_used_nodes * 12); btree->flags |= BP_internal; btree->n_free_nodes = 11; btree->n_used_nodes = 1; btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree); btree->u.internal[0].file_secno = cpu_to_le32(-1); btree->u.internal[0].down = cpu_to_le32(na); mark_buffer_dirty(bh); } else if (!(ranode = hpfs_alloc_anode(s, /*a*/0, &ra, &bh2))) { brelse(bh); brelse(bh1); hpfs_free_sectors(s, se, 1); hpfs_free_sectors(s, na, 1); return -1; } brelse(bh); bh = bh1; btree = &anode->btree; } btree->n_free_nodes--; n = btree->n_used_nodes++; le16_add_cpu(&btree->first_free, 12); btree->u.external[n].disk_secno = cpu_to_le32(se); btree->u.external[n].file_secno = cpu_to_le32(fs); btree->u.external[n].length = cpu_to_le32(1); mark_buffer_dirty(bh); brelse(bh); if ((a == node && fnod) || na == -1) return se; c2 = 0; while (up != (anode_secno)-1) { struct anode *new_anode; if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, up, &c1, &c2, "hpfs_add_sector_to_btree #2")) return -1; if (up != node || !fnod) { if (!(anode = hpfs_map_anode(s, up, &bh))) return -1; btree = &anode->btree; } else { if (!(fnode = hpfs_map_fnode(s, up, &bh))) return -1; btree = &fnode->btree; } if (btree->n_free_nodes) { btree->n_free_nodes--; n = btree->n_used_nodes++; le16_add_cpu(&btree->first_free, 8); btree->u.internal[n].file_secno = cpu_to_le32(-1); btree->u.internal[n].down = cpu_to_le32(na); btree->u.internal[n-1].file_secno = cpu_to_le32(fs); mark_buffer_dirty(bh); brelse(bh); brelse(bh2); hpfs_free_sectors(s, ra, 1); if ((anode = hpfs_map_anode(s, na, &bh))) { anode->up = cpu_to_le32(up); if (up == node && fnod) anode->btree.flags |= BP_fnode_parent; else anode->btree.flags &= ~BP_fnode_parent; mark_buffer_dirty(bh); brelse(bh); } return se; } up = up != node ? le32_to_cpu(anode->up) : -1; btree->u.internal[btree->n_used_nodes - 1].file_secno = cpu_to_le32(/*fs*/-1); mark_buffer_dirty(bh); brelse(bh); a = na; if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) { anode = new_anode; /*anode->up = cpu_to_le32(up != -1 ? up : ra);*/ anode->btree.flags |= BP_internal; anode->btree.n_used_nodes = 1; anode->btree.n_free_nodes = 59; anode->btree.first_free = cpu_to_le16(16); anode->btree.u.internal[0].down = cpu_to_le32(a); anode->btree.u.internal[0].file_secno = cpu_to_le32(-1); mark_buffer_dirty(bh); brelse(bh); if ((anode = hpfs_map_anode(s, a, &bh))) { anode->up = cpu_to_le32(na); mark_buffer_dirty(bh); brelse(bh); } } else na = a; } if ((anode = hpfs_map_anode(s, na, &bh))) { anode->up = cpu_to_le32(node); if (fnod) anode->btree.flags |= BP_fnode_parent; mark_buffer_dirty(bh); brelse(bh); } if (!fnod) { if (!(anode = hpfs_map_anode(s, node, &bh))) { brelse(bh2); return -1; } btree = &anode->btree; } else { if (!(fnode = hpfs_map_fnode(s, node, &bh))) { brelse(bh2); return -1; } btree = &fnode->btree; } ranode->up = cpu_to_le32(node); memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free)); if (fnod) ranode->btree.flags |= BP_fnode_parent; ranode->btree.n_free_nodes = (bp_internal(&ranode->btree) ? 60 : 40) - ranode->btree.n_used_nodes; if (bp_internal(&ranode->btree)) for (n = 0; n < ranode->btree.n_used_nodes; n++) { struct anode *unode; if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) { unode->up = cpu_to_le32(ra); unode->btree.flags &= ~BP_fnode_parent; mark_buffer_dirty(bh1); brelse(bh1); } } btree->flags |= BP_internal; btree->n_free_nodes = fnod ? 10 : 58; btree->n_used_nodes = 2; btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree); btree->u.internal[0].file_secno = cpu_to_le32(fs); btree->u.internal[0].down = cpu_to_le32(ra); btree->u.internal[1].file_secno = cpu_to_le32(-1); btree->u.internal[1].down = cpu_to_le32(na); mark_buffer_dirty(bh); brelse(bh); mark_buffer_dirty(bh2); brelse(bh2); return se; } /* * Remove allocation tree. Recursion would look much nicer but * I want to avoid it because it can cause stack overflow. */ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree) { struct bplus_header *btree1 = btree; struct anode *anode = NULL; anode_secno ano = 0, oano; struct buffer_head *bh; int level = 0; int pos = 0; int i; int c1, c2 = 0; int d1, d2; go_down: d2 = 0; while (bp_internal(btree1)) { ano = le32_to_cpu(btree1->u.internal[pos].down); if (level) brelse(bh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1")) return; if (!(anode = hpfs_map_anode(s, ano, &bh))) return; btree1 = &anode->btree; level++; pos = 0; } for (i = 0; i < btree1->n_used_nodes; i++) hpfs_free_sectors(s, le32_to_cpu(btree1->u.external[i].disk_secno), le32_to_cpu(btree1->u.external[i].length)); go_up: if (!level) return; brelse(bh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, ano, &c1, &c2, "hpfs_remove_btree #2")) return; hpfs_free_sectors(s, ano, 1); oano = ano; ano = le32_to_cpu(anode->up); if (--level) { if (!(anode = hpfs_map_anode(s, ano, &bh))) return; btree1 = &anode->btree; } else btree1 = btree; for (i = 0; i < btree1->n_used_nodes; i++) { if (le32_to_cpu(btree1->u.internal[i].down) == oano) { if ((pos = i + 1) < btree1->n_used_nodes) goto go_down; else goto go_up; } } hpfs_error(s, "reference to anode %08x not found in anode %08x " "(probably bad up pointer)", oano, level ? ano : -1); if (level) brelse(bh); } /* Just a wrapper around hpfs_bplus_lookup .. used for reading eas */ static secno anode_lookup(struct super_block *s, anode_secno a, unsigned sec) { struct anode *anode; struct buffer_head *bh; if (!(anode = hpfs_map_anode(s, a, &bh))) return -1; return hpfs_bplus_lookup(s, NULL, &anode->btree, sec, bh); } int hpfs_ea_read(struct super_block *s, secno a, int ano, unsigned pos, unsigned len, char *buf) { struct buffer_head *bh; char *data; secno sec; unsigned l; while (len) { if (ano) { if ((sec = anode_lookup(s, a, pos >> 9)) == -1) return -1; } else sec = a + (pos >> 9); if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #1")) return -1; if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9))) return -1; l = 0x200 - (pos & 0x1ff); if (l > len) l = len; memcpy(buf, data + (pos & 0x1ff), l); brelse(bh); buf += l; pos += l; len -= l; } return 0; } int hpfs_ea_write(struct super_block *s, secno a, int ano, unsigned pos, unsigned len, const char *buf) { struct buffer_head *bh; char *data; secno sec; unsigned l; while (len) { if (ano) { if ((sec = anode_lookup(s, a, pos >> 9)) == -1) return -1; } else sec = a + (pos >> 9); if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #2")) return -1; if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9))) return -1; l = 0x200 - (pos & 0x1ff); if (l > len) l = len; memcpy(data + (pos & 0x1ff), buf, l); mark_buffer_dirty(bh); brelse(bh); buf += l; pos += l; len -= l; } return 0; } void hpfs_ea_remove(struct super_block *s, secno a, int ano, unsigned len) { struct anode *anode; struct buffer_head *bh; if (ano) { if (!(anode = hpfs_map_anode(s, a, &bh))) return; hpfs_remove_btree(s, &anode->btree); brelse(bh); hpfs_free_sectors(s, a, 1); } else hpfs_free_sectors(s, a, (len + 511) >> 9); } /* Truncate allocation tree. Doesn't join anodes - I hope it doesn't matter */ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs) { struct fnode *fnode; struct anode *anode; struct buffer_head *bh; struct bplus_header *btree; anode_secno node = f; int i, j, nodes; int c1, c2 = 0; if (fno) { if (!(fnode = hpfs_map_fnode(s, f, &bh))) return; btree = &fnode->btree; } else { if (!(anode = hpfs_map_anode(s, f, &bh))) return; btree = &anode->btree; } if (!secs) { hpfs_remove_btree(s, btree); if (fno) { btree->n_free_nodes = 8; btree->n_used_nodes = 0; btree->first_free = cpu_to_le16(8); btree->flags &= ~BP_internal; mark_buffer_dirty(bh); } else hpfs_free_sectors(s, f, 1); brelse(bh); return; } while (bp_internal(btree)) { nodes = btree->n_used_nodes + btree->n_free_nodes; for (i = 0; i < btree->n_used_nodes; i++) if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f; brelse(bh); hpfs_error(s, "internal btree %08x doesn't end with -1", node); return; f: for (j = i + 1; j < btree->n_used_nodes; j++) hpfs_ea_remove(s, le32_to_cpu(btree->u.internal[j].down), 1, 0); btree->n_used_nodes = i + 1; btree->n_free_nodes = nodes - btree->n_used_nodes; btree->first_free = cpu_to_le16(8 + 8 * btree->n_used_nodes); mark_buffer_dirty(bh); if (btree->u.internal[i].file_secno == cpu_to_le32(secs)) { brelse(bh); return; } node = le32_to_cpu(btree->u.internal[i].down); brelse(bh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree")) return; if (!(anode = hpfs_map_anode(s, node, &bh))) return; btree = &anode->btree; } nodes = btree->n_used_nodes + btree->n_free_nodes; for (i = 0; i < btree->n_used_nodes; i++) if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) >= secs) goto ff; brelse(bh); return; ff: if (secs <= le32_to_cpu(btree->u.external[i].file_secno)) { hpfs_error(s, "there is an allocation error in file %08x, sector %08x", f, secs); if (i) i--; } else if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > secs) { hpfs_free_sectors(s, le32_to_cpu(btree->u.external[i].disk_secno) + secs - le32_to_cpu(btree->u.external[i].file_secno), le32_to_cpu(btree->u.external[i].length) - secs + le32_to_cpu(btree->u.external[i].file_secno)); /* I hope gcc optimizes this :-) */ btree->u.external[i].length = cpu_to_le32(secs - le32_to_cpu(btree->u.external[i].file_secno)); } for (j = i + 1; j < btree->n_used_nodes; j++) hpfs_free_sectors(s, le32_to_cpu(btree->u.external[j].disk_secno), le32_to_cpu(btree->u.external[j].length)); btree->n_used_nodes = i + 1; btree->n_free_nodes = nodes - btree->n_used_nodes; btree->first_free = cpu_to_le16(8 + 12 * btree->n_used_nodes); mark_buffer_dirty(bh); brelse(bh); } /* Remove file or directory and it's eas - note that directory must be empty when this is called. */ void hpfs_remove_fnode(struct super_block *s, fnode_secno fno) { struct buffer_head *bh; struct fnode *fnode; struct extended_attribute *ea; struct extended_attribute *ea_end; if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return; if (!fnode_is_dir(fnode)) hpfs_remove_btree(s, &fnode->btree); else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno)); ea_end = fnode_end_ea(fnode); for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) if (ea_indirect(ea)) hpfs_ea_remove(s, ea_sec(ea), ea_in_anode(ea), ea_len(ea)); hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l)); brelse(bh); hpfs_free_sectors(s, fno, 1); }
linux-master
fs/hpfs/anode.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/hpfs/alloc.c * * Mikulas Patocka ([email protected]), 1998-1999 * * HPFS bitmap operations */ #include "hpfs_fn.h" static void hpfs_claim_alloc(struct super_block *s, secno sec) { struct hpfs_sb_info *sbi = hpfs_sb(s); if (sbi->sb_n_free != (unsigned)-1) { if (unlikely(!sbi->sb_n_free)) { hpfs_error(s, "free count underflow, allocating sector %08x", sec); sbi->sb_n_free = -1; return; } sbi->sb_n_free--; } } static void hpfs_claim_free(struct super_block *s, secno sec) { struct hpfs_sb_info *sbi = hpfs_sb(s); if (sbi->sb_n_free != (unsigned)-1) { if (unlikely(sbi->sb_n_free >= sbi->sb_fs_size)) { hpfs_error(s, "free count overflow, freeing sector %08x", sec); sbi->sb_n_free = -1; return; } sbi->sb_n_free++; } } static void hpfs_claim_dirband_alloc(struct super_block *s, secno sec) { struct hpfs_sb_info *sbi = hpfs_sb(s); if (sbi->sb_n_free_dnodes != (unsigned)-1) { if (unlikely(!sbi->sb_n_free_dnodes)) { hpfs_error(s, "dirband free count underflow, allocating sector %08x", sec); sbi->sb_n_free_dnodes = -1; return; } sbi->sb_n_free_dnodes--; } } static void hpfs_claim_dirband_free(struct super_block *s, secno sec) { struct hpfs_sb_info *sbi = hpfs_sb(s); if (sbi->sb_n_free_dnodes != (unsigned)-1) { if (unlikely(sbi->sb_n_free_dnodes >= sbi->sb_dirband_size / 4)) { hpfs_error(s, "dirband free count overflow, freeing sector %08x", sec); sbi->sb_n_free_dnodes = -1; return; } sbi->sb_n_free_dnodes++; } } /* * Check if a sector is allocated in bitmap * This is really slow. Turned on only if chk==2 */ static int chk_if_allocated(struct super_block *s, secno sec, char *msg) { struct quad_buffer_head qbh; __le32 *bmp; if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "chk"))) goto fail; if ((le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f)) & 1) { hpfs_error(s, "sector '%s' - %08x not allocated in bitmap", msg, sec); goto fail1; } hpfs_brelse4(&qbh); if (sec >= hpfs_sb(s)->sb_dirband_start && sec < hpfs_sb(s)->sb_dirband_start + hpfs_sb(s)->sb_dirband_size) { unsigned ssec = (sec - hpfs_sb(s)->sb_dirband_start) / 4; if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) goto fail; if ((le32_to_cpu(bmp[ssec >> 5]) >> (ssec & 0x1f)) & 1) { hpfs_error(s, "sector '%s' - %08x not allocated in directory bitmap", msg, sec); goto fail1; } hpfs_brelse4(&qbh); } return 0; fail1: hpfs_brelse4(&qbh); fail: return 1; } /* * Check if sector(s) have proper number and additionally check if they're * allocated in bitmap. */ int hpfs_chk_sectors(struct super_block *s, secno start, int len, char *msg) { if (start + len < start || start < 0x12 || start + len > hpfs_sb(s)->sb_fs_size) { hpfs_error(s, "sector(s) '%s' badly placed at %08x", msg, start); return 1; } if (hpfs_sb(s)->sb_chk>=2) { int i; for (i = 0; i < len; i++) if (chk_if_allocated(s, start + i, msg)) return 1; } return 0; } static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigned forward) { struct quad_buffer_head qbh; __le32 *bmp; unsigned bs = near & ~0x3fff; unsigned nr = (near & 0x3fff) & ~(n - 1); /*unsigned mnr;*/ unsigned i, q; int a, b; secno ret = 0; if (n != 1 && n != 4) { hpfs_error(s, "Bad allocation size: %d", n); return 0; } if (bs != ~0x3fff) { if (!(bmp = hpfs_map_bitmap(s, near >> 14, &qbh, "aib"))) goto uls; } else { if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) goto uls; } if (!tstbits(bmp, nr, n + forward)) { ret = bs + nr; goto rt; } q = nr + n; b = 0; while ((a = tstbits(bmp, q, n + forward)) != 0) { q += a; if (n != 1) q = ((q-1)&~(n-1))+n; if (!b) { if (q>>5 != nr>>5) { b = 1; q = nr & 0x1f; } } else if (q > nr) break; } if (!a) { ret = bs + q; goto rt; } nr >>= 5; /*for (i = nr + 1; i != nr; i++, i &= 0x1ff) */ i = nr; do { if (!le32_to_cpu(bmp[i])) goto cont; if (n + forward >= 0x3f && le32_to_cpu(bmp[i]) != 0xffffffff) goto cont; q = i<<5; if (i > 0) { unsigned k = le32_to_cpu(bmp[i-1]); while (k & 0x80000000) { q--; k <<= 1; } } if (n != 1) q = ((q-1)&~(n-1))+n; while ((a = tstbits(bmp, q, n + forward)) != 0) { q += a; if (n != 1) q = ((q-1)&~(n-1))+n; if (q>>5 > i) break; } if (!a) { ret = bs + q; goto rt; } cont: i++, i &= 0x1ff; } while (i != nr); rt: if (ret) { if (hpfs_sb(s)->sb_chk && ((ret >> 14) != (bs >> 14) || (le32_to_cpu(bmp[(ret & 0x3fff) >> 5]) | ~(((1 << n) - 1) << (ret & 0x1f))) != 0xffffffff)) { hpfs_error(s, "Allocation doesn't work! Wanted %d, allocated at %08x", n, ret); ret = 0; goto b; } bmp[(ret & 0x3fff) >> 5] &= cpu_to_le32(~(((1 << n) - 1) << (ret & 0x1f))); hpfs_mark_4buffers_dirty(&qbh); } b: hpfs_brelse4(&qbh); uls: return ret; } /* * Allocation strategy: 1) search place near the sector specified * 2) search bitmap where free sectors last found * 3) search all bitmaps * 4) search all bitmaps ignoring number of pre-allocated * sectors */ secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forward) { secno sec; int i; unsigned n_bmps; struct hpfs_sb_info *sbi = hpfs_sb(s); int f_p = 0; int near_bmp; if (forward < 0) { forward = -forward; f_p = 1; } n_bmps = (sbi->sb_fs_size + 0x4000 - 1) >> 14; if (near && near < sbi->sb_fs_size) { if ((sec = alloc_in_bmp(s, near, n, f_p ? forward : forward/4))) goto ret; near_bmp = near >> 14; } else near_bmp = n_bmps / 2; /* if (b != -1) { if ((sec = alloc_in_bmp(s, b<<14, n, f_p ? forward : forward/2))) { b &= 0x0fffffff; goto ret; } if (b > 0x10000000) if ((sec = alloc_in_bmp(s, (b&0xfffffff)<<14, n, f_p ? forward : 0))) goto ret; */ if (!f_p) if (forward > sbi->sb_max_fwd_alloc) forward = sbi->sb_max_fwd_alloc; less_fwd: for (i = 0; i < n_bmps; i++) { if (near_bmp+i < n_bmps && ((sec = alloc_in_bmp(s, (near_bmp+i) << 14, n, forward)))) { sbi->sb_c_bitmap = near_bmp+i; goto ret; } if (!forward) { if (near_bmp-i-1 >= 0 && ((sec = alloc_in_bmp(s, (near_bmp-i-1) << 14, n, forward)))) { sbi->sb_c_bitmap = near_bmp-i-1; goto ret; } } else { if (near_bmp+i >= n_bmps && ((sec = alloc_in_bmp(s, (near_bmp+i-n_bmps) << 14, n, forward)))) { sbi->sb_c_bitmap = near_bmp+i-n_bmps; goto ret; } } if (i == 1 && sbi->sb_c_bitmap != -1 && ((sec = alloc_in_bmp(s, (sbi->sb_c_bitmap) << 14, n, forward)))) { goto ret; } } if (!f_p) { if (forward) { sbi->sb_max_fwd_alloc = forward * 3 / 4; forward /= 2; goto less_fwd; } } sec = 0; ret: if (sec) { i = 0; do hpfs_claim_alloc(s, sec + i); while (unlikely(++i < n)); } if (sec && f_p) { for (i = 0; i < forward; i++) { if (!hpfs_alloc_if_possible(s, sec + n + i)) { hpfs_error(s, "Prealloc doesn't work! Wanted %d, allocated at %08x, can't allocate %d", forward, sec, i); sec = 0; break; } } } return sec; } static secno alloc_in_dirband(struct super_block *s, secno near) { unsigned nr = near; secno sec; struct hpfs_sb_info *sbi = hpfs_sb(s); if (nr < sbi->sb_dirband_start) nr = sbi->sb_dirband_start; if (nr >= sbi->sb_dirband_start + sbi->sb_dirband_size) nr = sbi->sb_dirband_start + sbi->sb_dirband_size - 4; nr -= sbi->sb_dirband_start; nr >>= 2; sec = alloc_in_bmp(s, (~0x3fff) | nr, 1, 0); if (!sec) return 0; hpfs_claim_dirband_alloc(s, sec); return ((sec & 0x3fff) << 2) + sbi->sb_dirband_start; } /* Alloc sector if it's free */ int hpfs_alloc_if_possible(struct super_block *s, secno sec) { struct quad_buffer_head qbh; __le32 *bmp; if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "aip"))) goto end; if (le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) & (1 << (sec & 0x1f))) { bmp[(sec & 0x3fff) >> 5] &= cpu_to_le32(~(1 << (sec & 0x1f))); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); hpfs_claim_alloc(s, sec); return 1; } hpfs_brelse4(&qbh); end: return 0; } /* Free sectors in bitmaps */ void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n) { struct quad_buffer_head qbh; __le32 *bmp; struct hpfs_sb_info *sbi = hpfs_sb(s); /*pr_info("2 - ");*/ if (!n) return; if (sec < 0x12) { hpfs_error(s, "Trying to free reserved sector %08x", sec); return; } sbi->sb_max_fwd_alloc += n > 0xffff ? 0xffff : n; if (sbi->sb_max_fwd_alloc > 0xffffff) sbi->sb_max_fwd_alloc = 0xffffff; new_map: if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "free"))) { return; } new_tst: if ((le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f) & 1)) { hpfs_error(s, "sector %08x not allocated", sec); hpfs_brelse4(&qbh); return; } bmp[(sec & 0x3fff) >> 5] |= cpu_to_le32(1 << (sec & 0x1f)); hpfs_claim_free(s, sec); if (!--n) { hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); return; } if (!(++sec & 0x3fff)) { hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); goto new_map; } goto new_tst; } /* * Check if there are at least n free dnodes on the filesystem. * Called before adding to dnode. If we run out of space while * splitting dnodes, it would corrupt dnode tree. */ int hpfs_check_free_dnodes(struct super_block *s, int n) { int n_bmps = (hpfs_sb(s)->sb_fs_size + 0x4000 - 1) >> 14; int b = hpfs_sb(s)->sb_c_bitmap & 0x0fffffff; int i, j; __le32 *bmp; struct quad_buffer_head qbh; if ((bmp = hpfs_map_dnode_bitmap(s, &qbh))) { for (j = 0; j < 512; j++) { unsigned k; if (!le32_to_cpu(bmp[j])) continue; for (k = le32_to_cpu(bmp[j]); k; k >>= 1) if (k & 1) if (!--n) { hpfs_brelse4(&qbh); return 0; } } } hpfs_brelse4(&qbh); i = 0; if (hpfs_sb(s)->sb_c_bitmap != -1) { bmp = hpfs_map_bitmap(s, b, &qbh, "chkdn1"); goto chk_bmp; } chk_next: if (i == b) i++; if (i >= n_bmps) return 1; bmp = hpfs_map_bitmap(s, i, &qbh, "chkdn2"); chk_bmp: if (bmp) { for (j = 0; j < 512; j++) { u32 k; if (!le32_to_cpu(bmp[j])) continue; for (k = 0xf; k; k <<= 4) if ((le32_to_cpu(bmp[j]) & k) == k) { if (!--n) { hpfs_brelse4(&qbh); return 0; } } } hpfs_brelse4(&qbh); } i++; goto chk_next; } void hpfs_free_dnode(struct super_block *s, dnode_secno dno) { if (hpfs_sb(s)->sb_chk) if (dno & 3) { hpfs_error(s, "hpfs_free_dnode: dnode %08x not aligned", dno); return; } if (dno < hpfs_sb(s)->sb_dirband_start || dno >= hpfs_sb(s)->sb_dirband_start + hpfs_sb(s)->sb_dirband_size) { hpfs_free_sectors(s, dno, 4); } else { struct quad_buffer_head qbh; __le32 *bmp; unsigned ssec = (dno - hpfs_sb(s)->sb_dirband_start) / 4; if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) { return; } bmp[ssec >> 5] |= cpu_to_le32(1 << (ssec & 0x1f)); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); hpfs_claim_dirband_free(s, dno); } } struct dnode *hpfs_alloc_dnode(struct super_block *s, secno near, dnode_secno *dno, struct quad_buffer_head *qbh) { struct dnode *d; if (hpfs_get_free_dnodes(s) > FREE_DNODES_ADD) { if (!(*dno = alloc_in_dirband(s, near))) if (!(*dno = hpfs_alloc_sector(s, near, 4, 0))) return NULL; } else { if (!(*dno = hpfs_alloc_sector(s, near, 4, 0))) if (!(*dno = alloc_in_dirband(s, near))) return NULL; } if (!(d = hpfs_get_4sectors(s, *dno, qbh))) { hpfs_free_dnode(s, *dno); return NULL; } memset(d, 0, 2048); d->magic = cpu_to_le32(DNODE_MAGIC); d->first_free = cpu_to_le32(52); d->dirent[0] = 32; d->dirent[2] = 8; d->dirent[30] = 1; d->dirent[31] = 255; d->self = cpu_to_le32(*dno); return d; } struct fnode *hpfs_alloc_fnode(struct super_block *s, secno near, fnode_secno *fno, struct buffer_head **bh) { struct fnode *f; if (!(*fno = hpfs_alloc_sector(s, near, 1, FNODE_ALLOC_FWD))) return NULL; if (!(f = hpfs_get_sector(s, *fno, bh))) { hpfs_free_sectors(s, *fno, 1); return NULL; } memset(f, 0, 512); f->magic = cpu_to_le32(FNODE_MAGIC); f->ea_offs = cpu_to_le16(0xc4); f->btree.n_free_nodes = 8; f->btree.first_free = cpu_to_le16(8); return f; } struct anode *hpfs_alloc_anode(struct super_block *s, secno near, anode_secno *ano, struct buffer_head **bh) { struct anode *a; if (!(*ano = hpfs_alloc_sector(s, near, 1, ANODE_ALLOC_FWD))) return NULL; if (!(a = hpfs_get_sector(s, *ano, bh))) { hpfs_free_sectors(s, *ano, 1); return NULL; } memset(a, 0, 512); a->magic = cpu_to_le32(ANODE_MAGIC); a->self = cpu_to_le32(*ano); a->btree.n_free_nodes = 40; a->btree.n_used_nodes = 0; a->btree.first_free = cpu_to_le16(8); return a; } static unsigned find_run(__le32 *bmp, unsigned *idx) { unsigned len; while (tstbits(bmp, *idx, 1)) { (*idx)++; if (unlikely(*idx >= 0x4000)) return 0; } len = 1; while (!tstbits(bmp, *idx + len, 1)) len++; return len; } static int do_trim(struct super_block *s, secno start, unsigned len, secno limit_start, secno limit_end, unsigned minlen, unsigned *result) { int err; secno end; if (fatal_signal_pending(current)) return -EINTR; end = start + len; if (start < limit_start) start = limit_start; if (end > limit_end) end = limit_end; if (start >= end) return 0; if (end - start < minlen) return 0; err = sb_issue_discard(s, start, end - start, GFP_NOFS, 0); if (err) return err; *result += end - start; return 0; } int hpfs_trim_fs(struct super_block *s, u64 start, u64 end, u64 minlen, unsigned *result) { int err = 0; struct hpfs_sb_info *sbi = hpfs_sb(s); unsigned idx, len, start_bmp, end_bmp; __le32 *bmp; struct quad_buffer_head qbh; *result = 0; if (!end || end > sbi->sb_fs_size) end = sbi->sb_fs_size; if (start >= sbi->sb_fs_size) return 0; if (minlen > 0x4000) return 0; if (start < sbi->sb_dirband_start + sbi->sb_dirband_size && end > sbi->sb_dirband_start) { hpfs_lock(s); if (sb_rdonly(s)) { err = -EROFS; goto unlock_1; } if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) { err = -EIO; goto unlock_1; } idx = 0; while ((len = find_run(bmp, &idx)) && !err) { err = do_trim(s, sbi->sb_dirband_start + idx * 4, len * 4, start, end, minlen, result); idx += len; } hpfs_brelse4(&qbh); unlock_1: hpfs_unlock(s); } start_bmp = start >> 14; end_bmp = (end + 0x3fff) >> 14; while (start_bmp < end_bmp && !err) { hpfs_lock(s); if (sb_rdonly(s)) { err = -EROFS; goto unlock_2; } if (!(bmp = hpfs_map_bitmap(s, start_bmp, &qbh, "trim"))) { err = -EIO; goto unlock_2; } idx = 0; while ((len = find_run(bmp, &idx)) && !err) { err = do_trim(s, (start_bmp << 14) + idx, len, start, end, minlen, result); idx += len; } hpfs_brelse4(&qbh); unlock_2: hpfs_unlock(s); start_bmp++; } return err; }
linux-master
fs/hpfs/alloc.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/hpfs/dir.c * * Mikulas Patocka ([email protected]), 1998-1999 * * directory VFS functions */ #include <linux/slab.h> #include "hpfs_fn.h" static int hpfs_dir_release(struct inode *inode, struct file *filp) { hpfs_lock(inode->i_sb); hpfs_del_pos(inode, &filp->f_pos); /*hpfs_write_if_changed(inode);*/ hpfs_unlock(inode->i_sb); return 0; } /* This is slow, but it's not used often */ static loff_t hpfs_dir_lseek(struct file *filp, loff_t off, int whence) { loff_t new_off = off + (whence == 1 ? filp->f_pos : 0); loff_t pos; struct quad_buffer_head qbh; struct inode *i = file_inode(filp); struct hpfs_inode_info *hpfs_inode = hpfs_i(i); struct super_block *s = i->i_sb; /* Somebody else will have to figure out what to do here */ if (whence == SEEK_DATA || whence == SEEK_HOLE) return -EINVAL; inode_lock(i); hpfs_lock(s); /*pr_info("dir lseek\n");*/ if (new_off == 0 || new_off == 1 || new_off == 11 || new_off == 12 || new_off == 13) goto ok; pos = ((loff_t) hpfs_de_as_down_as_possible(s, hpfs_inode->i_dno) << 4) + 1; while (pos != new_off) { if (map_pos_dirent(i, &pos, &qbh)) hpfs_brelse4(&qbh); else goto fail; if (pos == 12) goto fail; } if (unlikely(hpfs_add_pos(i, &filp->f_pos) < 0)) { hpfs_unlock(s); inode_unlock(i); return -ENOMEM; } ok: filp->f_pos = new_off; hpfs_unlock(s); inode_unlock(i); return new_off; fail: /*pr_warn("illegal lseek: %016llx\n", new_off);*/ hpfs_unlock(s); inode_unlock(i); return -ESPIPE; } static int hpfs_readdir(struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); struct quad_buffer_head qbh; struct hpfs_dirent *de; int lc; loff_t next_pos; unsigned char *tempname; int c1, c2 = 0; int ret = 0; hpfs_lock(inode->i_sb); if (hpfs_sb(inode->i_sb)->sb_chk) { if (hpfs_chk_sectors(inode->i_sb, inode->i_ino, 1, "dir_fnode")) { ret = -EFSERROR; goto out; } if (hpfs_chk_sectors(inode->i_sb, hpfs_inode->i_dno, 4, "dir_dnode")) { ret = -EFSERROR; goto out; } } if (hpfs_sb(inode->i_sb)->sb_chk >= 2) { struct buffer_head *bh; struct fnode *fno; int e = 0; if (!(fno = hpfs_map_fnode(inode->i_sb, inode->i_ino, &bh))) { ret = -EIOERROR; goto out; } if (!fnode_is_dir(fno)) { e = 1; hpfs_error(inode->i_sb, "not a directory, fnode %08lx", (unsigned long)inode->i_ino); } if (hpfs_inode->i_dno != le32_to_cpu(fno->u.external[0].disk_secno)) { e = 1; hpfs_error(inode->i_sb, "corrupted inode: i_dno == %08x, fnode -> dnode == %08x", hpfs_inode->i_dno, le32_to_cpu(fno->u.external[0].disk_secno)); } brelse(bh); if (e) { ret = -EFSERROR; goto out; } } lc = hpfs_sb(inode->i_sb)->sb_lowercase; if (ctx->pos == 12) { /* diff -r requires this (note, that diff -r */ ctx->pos = 13; /* also fails on msdos filesystem in 2.0) */ goto out; } if (ctx->pos == 13) { ret = -ENOENT; goto out; } while (1) { again: /* This won't work when cycle is longer than number of dirents accepted by filldir, but what can I do? maybe killall -9 ls helps */ if (hpfs_sb(inode->i_sb)->sb_chk) if (hpfs_stop_cycles(inode->i_sb, ctx->pos, &c1, &c2, "hpfs_readdir")) { ret = -EFSERROR; goto out; } if (ctx->pos == 12) goto out; if (ctx->pos == 3 || ctx->pos == 4 || ctx->pos == 5) { pr_err("pos==%d\n", (int)ctx->pos); goto out; } if (ctx->pos == 0) { if (!dir_emit_dot(file, ctx)) goto out; ctx->pos = 11; } if (ctx->pos == 11) { if (!dir_emit(ctx, "..", 2, hpfs_inode->i_parent_dir, DT_DIR)) goto out; ctx->pos = 1; } if (ctx->pos == 1) { ret = hpfs_add_pos(inode, &file->f_pos); if (unlikely(ret < 0)) goto out; ctx->pos = ((loff_t) hpfs_de_as_down_as_possible(inode->i_sb, hpfs_inode->i_dno) << 4) + 1; } next_pos = ctx->pos; if (!(de = map_pos_dirent(inode, &next_pos, &qbh))) { ctx->pos = next_pos; ret = -EIOERROR; goto out; } if (de->first || de->last) { if (hpfs_sb(inode->i_sb)->sb_chk) { if (de->first && !de->last && (de->namelen != 2 || de ->name[0] != 1 || de->name[1] != 1)) hpfs_error(inode->i_sb, "hpfs_readdir: bad ^A^A entry; pos = %08lx", (unsigned long)ctx->pos); if (de->last && (de->namelen != 1 || de ->name[0] != 255)) hpfs_error(inode->i_sb, "hpfs_readdir: bad \\377 entry; pos = %08lx", (unsigned long)ctx->pos); } hpfs_brelse4(&qbh); ctx->pos = next_pos; goto again; } tempname = hpfs_translate_name(inode->i_sb, de->name, de->namelen, lc, de->not_8x3); if (!dir_emit(ctx, tempname, de->namelen, le32_to_cpu(de->fnode), DT_UNKNOWN)) { if (tempname != de->name) kfree(tempname); hpfs_brelse4(&qbh); goto out; } ctx->pos = next_pos; if (tempname != de->name) kfree(tempname); hpfs_brelse4(&qbh); } out: hpfs_unlock(inode->i_sb); return ret; } /* * lookup. Search the specified directory for the specified name, set * *result to the corresponding inode. * * lookup uses the inode number to tell read_inode whether it is reading * the inode of a directory or a file -- file ino's are odd, directory * ino's are even. read_inode avoids i/o for file inodes; everything * needed is up here in the directory. (And file fnodes are out in * the boondocks.) * * - M.P.: this is over, sometimes we've got to read file's fnode for eas * inode numbers are just fnode sector numbers; iget lock is used * to tell read_inode to read fnode or not. */ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { const unsigned char *name = dentry->d_name.name; unsigned len = dentry->d_name.len; struct quad_buffer_head qbh; struct hpfs_dirent *de; ino_t ino; int err; struct inode *result = NULL; struct hpfs_inode_info *hpfs_result; hpfs_lock(dir->i_sb); if ((err = hpfs_chk_name(name, &len))) { if (err == -ENAMETOOLONG) { hpfs_unlock(dir->i_sb); return ERR_PTR(-ENAMETOOLONG); } goto end_add; } /* * '.' and '..' will never be passed here. */ de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, NULL, &qbh); /* * This is not really a bailout, just means file not found. */ if (!de) goto end; /* * Get inode number, what we're after. */ ino = le32_to_cpu(de->fnode); /* * Go find or make an inode. */ result = iget_locked(dir->i_sb, ino); if (!result) { hpfs_error(dir->i_sb, "hpfs_lookup: can't get inode"); result = ERR_PTR(-ENOMEM); goto bail1; } if (result->i_state & I_NEW) { hpfs_init_inode(result); if (de->directory) hpfs_read_inode(result); else if (le32_to_cpu(de->ea_size) && hpfs_sb(dir->i_sb)->sb_eas) hpfs_read_inode(result); else { result->i_mode |= S_IFREG; result->i_mode &= ~0111; result->i_op = &hpfs_file_iops; result->i_fop = &hpfs_file_ops; set_nlink(result, 1); } unlock_new_inode(result); } hpfs_result = hpfs_i(result); if (!de->directory) hpfs_result->i_parent_dir = dir->i_ino; if (de->has_acl || de->has_xtd_perm) if (!sb_rdonly(dir->i_sb)) { hpfs_error(result->i_sb, "ACLs or XPERM found. This is probably HPFS386. This driver doesn't support it now. Send me some info on these structures"); iput(result); result = ERR_PTR(-EINVAL); goto bail1; } /* * Fill in the info from the directory if this is a newly created * inode. */ if (!inode_get_ctime(result).tv_sec) { time64_t csec = local_to_gmt(dir->i_sb, le32_to_cpu(de->creation_date)); inode_set_ctime(result, csec ? csec : 1, 0); result->i_mtime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->write_date)); result->i_mtime.tv_nsec = 0; result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->read_date)); result->i_atime.tv_nsec = 0; hpfs_result->i_ea_size = le32_to_cpu(de->ea_size); if (!hpfs_result->i_ea_mode && de->read_only) result->i_mode &= ~0222; if (!de->directory) { if (result->i_size == -1) { result->i_size = le32_to_cpu(de->file_size); result->i_data.a_ops = &hpfs_aops; hpfs_i(result)->mmu_private = result->i_size; /* * i_blocks should count the fnode and any anodes. * We count 1 for the fnode and don't bother about * anodes -- the disk heads are on the directory band * and we want them to stay there. */ result->i_blocks = 1 + ((result->i_size + 511) >> 9); } } } bail1: hpfs_brelse4(&qbh); /* * Made it. */ end: end_add: hpfs_unlock(dir->i_sb); return d_splice_alias(result, dentry); } const struct file_operations hpfs_dir_ops = { .llseek = hpfs_dir_lseek, .read = generic_read_dir, .iterate_shared = hpfs_readdir, .release = hpfs_dir_release, .fsync = hpfs_file_fsync, .unlocked_ioctl = hpfs_ioctl, .compat_ioctl = compat_ptr_ioctl, };
linux-master
fs/hpfs/dir.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/hpfs/inode.c * * Mikulas Patocka ([email protected]), 1998-1999 * * inode VFS functions */ #include <linux/slab.h> #include <linux/user_namespace.h> #include "hpfs_fn.h" void hpfs_init_inode(struct inode *i) { struct super_block *sb = i->i_sb; struct hpfs_inode_info *hpfs_inode = hpfs_i(i); i->i_uid = hpfs_sb(sb)->sb_uid; i->i_gid = hpfs_sb(sb)->sb_gid; i->i_mode = hpfs_sb(sb)->sb_mode; i->i_size = -1; i->i_blocks = -1; hpfs_inode->i_dno = 0; hpfs_inode->i_n_secs = 0; hpfs_inode->i_file_sec = 0; hpfs_inode->i_disk_sec = 0; hpfs_inode->i_dpos = 0; hpfs_inode->i_dsubdno = 0; hpfs_inode->i_ea_mode = 0; hpfs_inode->i_ea_uid = 0; hpfs_inode->i_ea_gid = 0; hpfs_inode->i_ea_size = 0; hpfs_inode->i_rddir_off = NULL; hpfs_inode->i_dirty = 0; inode_set_ctime(i, 0, 0); i->i_mtime.tv_sec = i->i_mtime.tv_nsec = 0; i->i_atime.tv_sec = i->i_atime.tv_nsec = 0; } void hpfs_read_inode(struct inode *i) { struct buffer_head *bh; struct fnode *fnode; struct super_block *sb = i->i_sb; struct hpfs_inode_info *hpfs_inode = hpfs_i(i); void *ea; int ea_size; if (!(fnode = hpfs_map_fnode(sb, i->i_ino, &bh))) { /*i->i_mode |= S_IFREG; i->i_mode &= ~0111; i->i_op = &hpfs_file_iops; i->i_fop = &hpfs_file_ops; clear_nlink(i);*/ make_bad_inode(i); return; } if (hpfs_sb(i->i_sb)->sb_eas) { if ((ea = hpfs_get_ea(i->i_sb, fnode, "UID", &ea_size))) { if (ea_size == 2) { i_uid_write(i, le16_to_cpu(*(__le16*)ea)); hpfs_inode->i_ea_uid = 1; } kfree(ea); } if ((ea = hpfs_get_ea(i->i_sb, fnode, "GID", &ea_size))) { if (ea_size == 2) { i_gid_write(i, le16_to_cpu(*(__le16*)ea)); hpfs_inode->i_ea_gid = 1; } kfree(ea); } if ((ea = hpfs_get_ea(i->i_sb, fnode, "SYMLINK", &ea_size))) { kfree(ea); i->i_mode = S_IFLNK | 0777; i->i_op = &page_symlink_inode_operations; inode_nohighmem(i); i->i_data.a_ops = &hpfs_symlink_aops; set_nlink(i, 1); i->i_size = ea_size; i->i_blocks = 1; brelse(bh); return; } if ((ea = hpfs_get_ea(i->i_sb, fnode, "MODE", &ea_size))) { int rdev = 0; umode_t mode = hpfs_sb(sb)->sb_mode; if (ea_size == 2) { mode = le16_to_cpu(*(__le16*)ea); hpfs_inode->i_ea_mode = 1; } kfree(ea); i->i_mode = mode; if (S_ISBLK(mode) || S_ISCHR(mode)) { if ((ea = hpfs_get_ea(i->i_sb, fnode, "DEV", &ea_size))) { if (ea_size == 4) rdev = le32_to_cpu(*(__le32*)ea); kfree(ea); } } if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) { brelse(bh); set_nlink(i, 1); i->i_size = 0; i->i_blocks = 1; init_special_inode(i, mode, new_decode_dev(rdev)); return; } } } if (fnode_is_dir(fnode)) { int n_dnodes, n_subdirs; i->i_mode |= S_IFDIR; i->i_op = &hpfs_dir_iops; i->i_fop = &hpfs_dir_ops; hpfs_inode->i_parent_dir = le32_to_cpu(fnode->up); hpfs_inode->i_dno = le32_to_cpu(fnode->u.external[0].disk_secno); if (hpfs_sb(sb)->sb_chk >= 2) { struct buffer_head *bh0; if (hpfs_map_fnode(sb, hpfs_inode->i_parent_dir, &bh0)) brelse(bh0); } n_dnodes = 0; n_subdirs = 0; hpfs_count_dnodes(i->i_sb, hpfs_inode->i_dno, &n_dnodes, &n_subdirs, NULL); i->i_blocks = 4 * n_dnodes; i->i_size = 2048 * n_dnodes; set_nlink(i, 2 + n_subdirs); } else { i->i_mode |= S_IFREG; if (!hpfs_inode->i_ea_mode) i->i_mode &= ~0111; i->i_op = &hpfs_file_iops; i->i_fop = &hpfs_file_ops; set_nlink(i, 1); i->i_size = le32_to_cpu(fnode->file_size); i->i_blocks = ((i->i_size + 511) >> 9) + 1; i->i_data.a_ops = &hpfs_aops; hpfs_i(i)->mmu_private = i->i_size; } brelse(bh); } static void hpfs_write_inode_ea(struct inode *i, struct fnode *fnode) { struct hpfs_inode_info *hpfs_inode = hpfs_i(i); /*if (le32_to_cpu(fnode->acl_size_l) || le16_to_cpu(fnode->acl_size_s)) { Some unknown structures like ACL may be in fnode, we'd better not overwrite them hpfs_error(i->i_sb, "fnode %08x has some unknown HPFS386 structures", i->i_ino); } else*/ if (hpfs_sb(i->i_sb)->sb_eas >= 2) { __le32 ea; if (!uid_eq(i->i_uid, hpfs_sb(i->i_sb)->sb_uid) || hpfs_inode->i_ea_uid) { ea = cpu_to_le32(i_uid_read(i)); hpfs_set_ea(i, fnode, "UID", (char*)&ea, 2); hpfs_inode->i_ea_uid = 1; } if (!gid_eq(i->i_gid, hpfs_sb(i->i_sb)->sb_gid) || hpfs_inode->i_ea_gid) { ea = cpu_to_le32(i_gid_read(i)); hpfs_set_ea(i, fnode, "GID", (char *)&ea, 2); hpfs_inode->i_ea_gid = 1; } if (!S_ISLNK(i->i_mode)) if ((i->i_mode != ((hpfs_sb(i->i_sb)->sb_mode & ~(S_ISDIR(i->i_mode) ? 0 : 0111)) | (S_ISDIR(i->i_mode) ? S_IFDIR : S_IFREG)) && i->i_mode != ((hpfs_sb(i->i_sb)->sb_mode & ~(S_ISDIR(i->i_mode) ? 0222 : 0333)) | (S_ISDIR(i->i_mode) ? S_IFDIR : S_IFREG))) || hpfs_inode->i_ea_mode) { ea = cpu_to_le32(i->i_mode); /* sick, but legal */ hpfs_set_ea(i, fnode, "MODE", (char *)&ea, 2); hpfs_inode->i_ea_mode = 1; } if (S_ISBLK(i->i_mode) || S_ISCHR(i->i_mode)) { ea = cpu_to_le32(new_encode_dev(i->i_rdev)); hpfs_set_ea(i, fnode, "DEV", (char *)&ea, 4); } } } void hpfs_write_inode(struct inode *i) { struct hpfs_inode_info *hpfs_inode = hpfs_i(i); struct inode *parent; if (i->i_ino == hpfs_sb(i->i_sb)->sb_root) return; if (hpfs_inode->i_rddir_off && !atomic_read(&i->i_count)) { if (*hpfs_inode->i_rddir_off) pr_err("write_inode: some position still there\n"); kfree(hpfs_inode->i_rddir_off); hpfs_inode->i_rddir_off = NULL; } if (!i->i_nlink) { return; } parent = iget_locked(i->i_sb, hpfs_inode->i_parent_dir); if (parent) { hpfs_inode->i_dirty = 0; if (parent->i_state & I_NEW) { hpfs_init_inode(parent); hpfs_read_inode(parent); unlock_new_inode(parent); } hpfs_write_inode_nolock(i); iput(parent); } } void hpfs_write_inode_nolock(struct inode *i) { struct hpfs_inode_info *hpfs_inode = hpfs_i(i); struct buffer_head *bh; struct fnode *fnode; struct quad_buffer_head qbh; struct hpfs_dirent *de; if (i->i_ino == hpfs_sb(i->i_sb)->sb_root) return; if (!(fnode = hpfs_map_fnode(i->i_sb, i->i_ino, &bh))) return; if (i->i_ino != hpfs_sb(i->i_sb)->sb_root && i->i_nlink) { if (!(de = map_fnode_dirent(i->i_sb, i->i_ino, fnode, &qbh))) { brelse(bh); return; } } else de = NULL; if (S_ISREG(i->i_mode)) { fnode->file_size = cpu_to_le32(i->i_size); if (de) de->file_size = cpu_to_le32(i->i_size); } else if (S_ISDIR(i->i_mode)) { fnode->file_size = cpu_to_le32(0); if (de) de->file_size = cpu_to_le32(0); } hpfs_write_inode_ea(i, fnode); if (de) { de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_mtime.tv_sec)); de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_atime.tv_sec)); de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, inode_get_ctime(i).tv_sec)); de->read_only = !(i->i_mode & 0222); de->ea_size = cpu_to_le32(hpfs_inode->i_ea_size); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); } if (S_ISDIR(i->i_mode)) { if ((de = map_dirent(i, hpfs_inode->i_dno, "\001\001", 2, NULL, &qbh))) { de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_mtime.tv_sec)); de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_atime.tv_sec)); de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, inode_get_ctime(i).tv_sec)); de->read_only = !(i->i_mode & 0222); de->ea_size = cpu_to_le32(/*hpfs_inode->i_ea_size*/0); de->file_size = cpu_to_le32(0); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); } else hpfs_error(i->i_sb, "directory %08lx doesn't have '.' entry", (unsigned long)i->i_ino); } mark_buffer_dirty(bh); brelse(bh); } int hpfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); int error = -EINVAL; hpfs_lock(inode->i_sb); if (inode->i_ino == hpfs_sb(inode->i_sb)->sb_root) goto out_unlock; if ((attr->ia_valid & ATTR_UID) && from_kuid(&init_user_ns, attr->ia_uid) >= 0x10000) goto out_unlock; if ((attr->ia_valid & ATTR_GID) && from_kgid(&init_user_ns, attr->ia_gid) >= 0x10000) goto out_unlock; if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size > inode->i_size) goto out_unlock; error = setattr_prepare(&nop_mnt_idmap, dentry, attr); if (error) goto out_unlock; if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size != i_size_read(inode)) { error = inode_newsize_ok(inode, attr->ia_size); if (error) goto out_unlock; truncate_setsize(inode, attr->ia_size); hpfs_truncate(inode); } setattr_copy(&nop_mnt_idmap, inode, attr); hpfs_write_inode(inode); out_unlock: hpfs_unlock(inode->i_sb); return error; } void hpfs_write_if_changed(struct inode *inode) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); if (hpfs_inode->i_dirty) hpfs_write_inode(inode); } void hpfs_evict_inode(struct inode *inode) { truncate_inode_pages_final(&inode->i_data); clear_inode(inode); if (!inode->i_nlink) { hpfs_lock(inode->i_sb); hpfs_remove_fnode(inode->i_sb, inode->i_ino); hpfs_unlock(inode->i_sb); } }
linux-master
fs/hpfs/inode.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/hpfs/namei.c * * Mikulas Patocka ([email protected]), 1998-1999 * * adding & removing files & directories */ #include <linux/sched.h> #include "hpfs_fn.h" static void hpfs_update_directory_times(struct inode *dir) { time64_t t = local_to_gmt(dir->i_sb, local_get_seconds(dir->i_sb)); if (t == dir->i_mtime.tv_sec && t == inode_get_ctime(dir).tv_sec) return; dir->i_mtime = inode_set_ctime(dir, t, 0); hpfs_write_inode_nolock(dir); } static int hpfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) { const unsigned char *name = dentry->d_name.name; unsigned len = dentry->d_name.len; struct quad_buffer_head qbh0; struct buffer_head *bh; struct hpfs_dirent *de; struct fnode *fnode; struct dnode *dnode; struct inode *result; fnode_secno fno; dnode_secno dno; int r; struct hpfs_dirent dee; int err; if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err; hpfs_lock(dir->i_sb); err = -ENOSPC; fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh); if (!fnode) goto bail; dnode = hpfs_alloc_dnode(dir->i_sb, fno, &dno, &qbh0); if (!dnode) goto bail1; memset(&dee, 0, sizeof dee); dee.directory = 1; if (!(mode & 0222)) dee.read_only = 1; /*dee.archive = 0;*/ dee.hidden = name[0] == '.'; dee.fnode = cpu_to_le32(fno); dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(local_get_seconds(dir->i_sb)); result = new_inode(dir->i_sb); if (!result) goto bail2; hpfs_init_inode(result); result->i_ino = fno; hpfs_i(result)->i_parent_dir = dir->i_ino; hpfs_i(result)->i_dno = dno; result->i_mtime = result->i_atime = inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0); hpfs_i(result)->i_ea_size = 0; result->i_mode |= S_IFDIR; result->i_op = &hpfs_dir_iops; result->i_fop = &hpfs_dir_ops; result->i_blocks = 4; result->i_size = 2048; set_nlink(result, 2); if (dee.read_only) result->i_mode &= ~0222; r = hpfs_add_dirent(dir, name, len, &dee); if (r == 1) goto bail3; if (r == -1) { err = -EEXIST; goto bail3; } fnode->len = len; memcpy(fnode->name, name, len > 15 ? 15 : len); fnode->up = cpu_to_le32(dir->i_ino); fnode->flags |= FNODE_dir; fnode->btree.n_free_nodes = 7; fnode->btree.n_used_nodes = 1; fnode->btree.first_free = cpu_to_le16(0x14); fnode->u.external[0].disk_secno = cpu_to_le32(dno); fnode->u.external[0].file_secno = cpu_to_le32(-1); dnode->root_dnode = 1; dnode->up = cpu_to_le32(fno); de = hpfs_add_de(dir->i_sb, dnode, "\001\001", 2, 0); de->creation_date = de->write_date = de->read_date = cpu_to_le32(local_get_seconds(dir->i_sb)); if (!(mode & 0222)) de->read_only = 1; de->first = de->directory = 1; /*de->hidden = de->system = 0;*/ de->fnode = cpu_to_le32(fno); mark_buffer_dirty(bh); brelse(bh); hpfs_mark_4buffers_dirty(&qbh0); hpfs_brelse4(&qbh0); inc_nlink(dir); insert_inode_hash(result); if (!uid_eq(result->i_uid, current_fsuid()) || !gid_eq(result->i_gid, current_fsgid()) || result->i_mode != (mode | S_IFDIR)) { result->i_uid = current_fsuid(); result->i_gid = current_fsgid(); result->i_mode = mode | S_IFDIR; hpfs_write_inode_nolock(result); } hpfs_update_directory_times(dir); d_instantiate(dentry, result); hpfs_unlock(dir->i_sb); return 0; bail3: iput(result); bail2: hpfs_brelse4(&qbh0); hpfs_free_dnode(dir->i_sb, dno); bail1: brelse(bh); hpfs_free_sectors(dir->i_sb, fno, 1); bail: hpfs_unlock(dir->i_sb); return err; } static int hpfs_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { const unsigned char *name = dentry->d_name.name; unsigned len = dentry->d_name.len; struct inode *result = NULL; struct buffer_head *bh; struct fnode *fnode; fnode_secno fno; int r; struct hpfs_dirent dee; int err; if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err; hpfs_lock(dir->i_sb); err = -ENOSPC; fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh); if (!fnode) goto bail; memset(&dee, 0, sizeof dee); if (!(mode & 0222)) dee.read_only = 1; dee.archive = 1; dee.hidden = name[0] == '.'; dee.fnode = cpu_to_le32(fno); dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(local_get_seconds(dir->i_sb)); result = new_inode(dir->i_sb); if (!result) goto bail1; hpfs_init_inode(result); result->i_ino = fno; result->i_mode |= S_IFREG; result->i_mode &= ~0111; result->i_op = &hpfs_file_iops; result->i_fop = &hpfs_file_ops; set_nlink(result, 1); hpfs_i(result)->i_parent_dir = dir->i_ino; result->i_mtime = result->i_atime = inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0); hpfs_i(result)->i_ea_size = 0; if (dee.read_only) result->i_mode &= ~0222; result->i_blocks = 1; result->i_size = 0; result->i_data.a_ops = &hpfs_aops; hpfs_i(result)->mmu_private = 0; r = hpfs_add_dirent(dir, name, len, &dee); if (r == 1) goto bail2; if (r == -1) { err = -EEXIST; goto bail2; } fnode->len = len; memcpy(fnode->name, name, len > 15 ? 15 : len); fnode->up = cpu_to_le32(dir->i_ino); mark_buffer_dirty(bh); brelse(bh); insert_inode_hash(result); if (!uid_eq(result->i_uid, current_fsuid()) || !gid_eq(result->i_gid, current_fsgid()) || result->i_mode != (mode | S_IFREG)) { result->i_uid = current_fsuid(); result->i_gid = current_fsgid(); result->i_mode = mode | S_IFREG; hpfs_write_inode_nolock(result); } hpfs_update_directory_times(dir); d_instantiate(dentry, result); hpfs_unlock(dir->i_sb); return 0; bail2: iput(result); bail1: brelse(bh); hpfs_free_sectors(dir->i_sb, fno, 1); bail: hpfs_unlock(dir->i_sb); return err; } static int hpfs_mknod(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { const unsigned char *name = dentry->d_name.name; unsigned len = dentry->d_name.len; struct buffer_head *bh; struct fnode *fnode; fnode_secno fno; int r; struct hpfs_dirent dee; struct inode *result = NULL; int err; if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err; if (hpfs_sb(dir->i_sb)->sb_eas < 2) return -EPERM; hpfs_lock(dir->i_sb); err = -ENOSPC; fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh); if (!fnode) goto bail; memset(&dee, 0, sizeof dee); if (!(mode & 0222)) dee.read_only = 1; dee.archive = 1; dee.hidden = name[0] == '.'; dee.fnode = cpu_to_le32(fno); dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(local_get_seconds(dir->i_sb)); result = new_inode(dir->i_sb); if (!result) goto bail1; hpfs_init_inode(result); result->i_ino = fno; hpfs_i(result)->i_parent_dir = dir->i_ino; result->i_mtime = result->i_atime = inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0); hpfs_i(result)->i_ea_size = 0; result->i_uid = current_fsuid(); result->i_gid = current_fsgid(); set_nlink(result, 1); result->i_size = 0; result->i_blocks = 1; init_special_inode(result, mode, rdev); r = hpfs_add_dirent(dir, name, len, &dee); if (r == 1) goto bail2; if (r == -1) { err = -EEXIST; goto bail2; } fnode->len = len; memcpy(fnode->name, name, len > 15 ? 15 : len); fnode->up = cpu_to_le32(dir->i_ino); mark_buffer_dirty(bh); insert_inode_hash(result); hpfs_write_inode_nolock(result); hpfs_update_directory_times(dir); d_instantiate(dentry, result); brelse(bh); hpfs_unlock(dir->i_sb); return 0; bail2: iput(result); bail1: brelse(bh); hpfs_free_sectors(dir->i_sb, fno, 1); bail: hpfs_unlock(dir->i_sb); return err; } static int hpfs_symlink(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, const char *symlink) { const unsigned char *name = dentry->d_name.name; unsigned len = dentry->d_name.len; struct buffer_head *bh; struct fnode *fnode; fnode_secno fno; int r; struct hpfs_dirent dee; struct inode *result; int err; if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err; hpfs_lock(dir->i_sb); if (hpfs_sb(dir->i_sb)->sb_eas < 2) { hpfs_unlock(dir->i_sb); return -EPERM; } err = -ENOSPC; fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh); if (!fnode) goto bail; memset(&dee, 0, sizeof dee); dee.archive = 1; dee.hidden = name[0] == '.'; dee.fnode = cpu_to_le32(fno); dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(local_get_seconds(dir->i_sb)); result = new_inode(dir->i_sb); if (!result) goto bail1; result->i_ino = fno; hpfs_init_inode(result); hpfs_i(result)->i_parent_dir = dir->i_ino; result->i_mtime = result->i_atime = inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0); hpfs_i(result)->i_ea_size = 0; result->i_mode = S_IFLNK | 0777; result->i_uid = current_fsuid(); result->i_gid = current_fsgid(); result->i_blocks = 1; set_nlink(result, 1); result->i_size = strlen(symlink); inode_nohighmem(result); result->i_op = &page_symlink_inode_operations; result->i_data.a_ops = &hpfs_symlink_aops; r = hpfs_add_dirent(dir, name, len, &dee); if (r == 1) goto bail2; if (r == -1) { err = -EEXIST; goto bail2; } fnode->len = len; memcpy(fnode->name, name, len > 15 ? 15 : len); fnode->up = cpu_to_le32(dir->i_ino); hpfs_set_ea(result, fnode, "SYMLINK", symlink, strlen(symlink)); mark_buffer_dirty(bh); brelse(bh); insert_inode_hash(result); hpfs_write_inode_nolock(result); hpfs_update_directory_times(dir); d_instantiate(dentry, result); hpfs_unlock(dir->i_sb); return 0; bail2: iput(result); bail1: brelse(bh); hpfs_free_sectors(dir->i_sb, fno, 1); bail: hpfs_unlock(dir->i_sb); return err; } static int hpfs_unlink(struct inode *dir, struct dentry *dentry) { const unsigned char *name = dentry->d_name.name; unsigned len = dentry->d_name.len; struct quad_buffer_head qbh; struct hpfs_dirent *de; struct inode *inode = d_inode(dentry); dnode_secno dno; int r; int err; hpfs_lock(dir->i_sb); hpfs_adjust_length(name, &len); err = -ENOENT; de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh); if (!de) goto out; err = -EPERM; if (de->first) goto out1; err = -EISDIR; if (de->directory) goto out1; r = hpfs_remove_dirent(dir, dno, de, &qbh, 1); switch (r) { case 1: hpfs_error(dir->i_sb, "there was error when removing dirent"); err = -EFSERROR; break; case 2: /* no space for deleting */ err = -ENOSPC; break; default: drop_nlink(inode); err = 0; } goto out; out1: hpfs_brelse4(&qbh); out: if (!err) hpfs_update_directory_times(dir); hpfs_unlock(dir->i_sb); return err; } static int hpfs_rmdir(struct inode *dir, struct dentry *dentry) { const unsigned char *name = dentry->d_name.name; unsigned len = dentry->d_name.len; struct quad_buffer_head qbh; struct hpfs_dirent *de; struct inode *inode = d_inode(dentry); dnode_secno dno; int n_items = 0; int err; int r; hpfs_adjust_length(name, &len); hpfs_lock(dir->i_sb); err = -ENOENT; de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh); if (!de) goto out; err = -EPERM; if (de->first) goto out1; err = -ENOTDIR; if (!de->directory) goto out1; hpfs_count_dnodes(dir->i_sb, hpfs_i(inode)->i_dno, NULL, NULL, &n_items); err = -ENOTEMPTY; if (n_items) goto out1; r = hpfs_remove_dirent(dir, dno, de, &qbh, 1); switch (r) { case 1: hpfs_error(dir->i_sb, "there was error when removing dirent"); err = -EFSERROR; break; case 2: err = -ENOSPC; break; default: drop_nlink(dir); clear_nlink(inode); err = 0; } goto out; out1: hpfs_brelse4(&qbh); out: if (!err) hpfs_update_directory_times(dir); hpfs_unlock(dir->i_sb); return err; } static int hpfs_symlink_read_folio(struct file *file, struct folio *folio) { struct page *page = &folio->page; char *link = page_address(page); struct inode *i = page->mapping->host; struct fnode *fnode; struct buffer_head *bh; int err; err = -EIO; hpfs_lock(i->i_sb); if (!(fnode = hpfs_map_fnode(i->i_sb, i->i_ino, &bh))) goto fail; err = hpfs_read_ea(i->i_sb, fnode, "SYMLINK", link, PAGE_SIZE); brelse(bh); if (err) goto fail; hpfs_unlock(i->i_sb); SetPageUptodate(page); unlock_page(page); return 0; fail: hpfs_unlock(i->i_sb); SetPageError(page); unlock_page(page); return err; } const struct address_space_operations hpfs_symlink_aops = { .read_folio = hpfs_symlink_read_folio }; static int hpfs_rename(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { const unsigned char *old_name = old_dentry->d_name.name; unsigned old_len = old_dentry->d_name.len; const unsigned char *new_name = new_dentry->d_name.name; unsigned new_len = new_dentry->d_name.len; struct inode *i = d_inode(old_dentry); struct inode *new_inode = d_inode(new_dentry); struct quad_buffer_head qbh, qbh1; struct hpfs_dirent *dep, *nde; struct hpfs_dirent de; dnode_secno dno; int r; struct buffer_head *bh; struct fnode *fnode; int err; if (flags & ~RENAME_NOREPLACE) return -EINVAL; if ((err = hpfs_chk_name(new_name, &new_len))) return err; err = 0; hpfs_adjust_length(old_name, &old_len); hpfs_lock(i->i_sb); /* order doesn't matter, due to VFS exclusion */ /* Erm? Moving over the empty non-busy directory is perfectly legal */ if (new_inode && S_ISDIR(new_inode->i_mode)) { err = -EINVAL; goto end1; } if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, old_name, old_len, &dno, &qbh))) { hpfs_error(i->i_sb, "lookup succeeded but map dirent failed"); err = -ENOENT; goto end1; } copy_de(&de, dep); de.hidden = new_name[0] == '.'; if (new_inode) { int r; if ((r = hpfs_remove_dirent(old_dir, dno, dep, &qbh, 1)) != 2) { if ((nde = map_dirent(new_dir, hpfs_i(new_dir)->i_dno, new_name, new_len, NULL, &qbh1))) { clear_nlink(new_inode); copy_de(nde, &de); memcpy(nde->name, new_name, new_len); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); goto end; } hpfs_error(new_dir->i_sb, "hpfs_rename: could not find dirent"); err = -EFSERROR; goto end1; } err = -ENOSPC; goto end1; } if (new_dir == old_dir) hpfs_brelse4(&qbh); if ((r = hpfs_add_dirent(new_dir, new_name, new_len, &de))) { if (r == -1) hpfs_error(new_dir->i_sb, "hpfs_rename: dirent already exists!"); err = r == 1 ? -ENOSPC : -EFSERROR; if (new_dir != old_dir) hpfs_brelse4(&qbh); goto end1; } if (new_dir == old_dir) if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, old_name, old_len, &dno, &qbh))) { hpfs_error(i->i_sb, "lookup succeeded but map dirent failed at #2"); err = -ENOENT; goto end1; } if ((r = hpfs_remove_dirent(old_dir, dno, dep, &qbh, 0))) { hpfs_error(i->i_sb, "hpfs_rename: could not remove dirent"); err = r == 2 ? -ENOSPC : -EFSERROR; goto end1; } end: hpfs_i(i)->i_parent_dir = new_dir->i_ino; if (S_ISDIR(i->i_mode)) { inc_nlink(new_dir); drop_nlink(old_dir); } if ((fnode = hpfs_map_fnode(i->i_sb, i->i_ino, &bh))) { fnode->up = cpu_to_le32(new_dir->i_ino); fnode->len = new_len; memcpy(fnode->name, new_name, new_len>15?15:new_len); if (new_len < 15) memset(&fnode->name[new_len], 0, 15 - new_len); mark_buffer_dirty(bh); brelse(bh); } end1: if (!err) { hpfs_update_directory_times(old_dir); hpfs_update_directory_times(new_dir); } hpfs_unlock(i->i_sb); return err; } const struct inode_operations hpfs_dir_iops = { .create = hpfs_create, .lookup = hpfs_lookup, .unlink = hpfs_unlink, .symlink = hpfs_symlink, .mkdir = hpfs_mkdir, .rmdir = hpfs_rmdir, .mknod = hpfs_mknod, .rename = hpfs_rename, .setattr = hpfs_setattr, };
linux-master
fs/hpfs/namei.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/hpfs/ea.c * * Mikulas Patocka ([email protected]), 1998-1999 * * handling extended attributes */ #include "hpfs_fn.h" /* Remove external extended attributes. ano specifies whether a is a direct sector where eas starts or an anode */ void hpfs_ea_ext_remove(struct super_block *s, secno a, int ano, unsigned len) { unsigned pos = 0; while (pos < len) { char ex[4 + 255 + 1 + 8]; struct extended_attribute *ea = (struct extended_attribute *)ex; if (pos + 4 > len) { hpfs_error(s, "EAs don't end correctly, %s %08x, len %08x", ano ? "anode" : "sectors", a, len); return; } if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return; if (ea_indirect(ea)) { if (ea_valuelen(ea) != 8) { hpfs_error(s, "ea_indirect(ea) set while ea->valuelen!=8, %s %08x, pos %08x", ano ? "anode" : "sectors", a, pos); return; } if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 9, ex+4)) return; hpfs_ea_remove(s, ea_sec(ea), ea_in_anode(ea), ea_len(ea)); } pos += ea->namelen + ea_valuelen(ea) + 5; } if (!ano) hpfs_free_sectors(s, a, (len+511) >> 9); else { struct buffer_head *bh; struct anode *anode; if ((anode = hpfs_map_anode(s, a, &bh))) { hpfs_remove_btree(s, &anode->btree); brelse(bh); hpfs_free_sectors(s, a, 1); } } } static char *get_indirect_ea(struct super_block *s, int ano, secno a, int size) { char *ret; if (!(ret = kmalloc(size + 1, GFP_NOFS))) { pr_err("out of memory for EA\n"); return NULL; } if (hpfs_ea_read(s, a, ano, 0, size, ret)) { kfree(ret); return NULL; } ret[size] = 0; return ret; } static void set_indirect_ea(struct super_block *s, int ano, secno a, const char *data, int size) { hpfs_ea_write(s, a, ano, 0, size, data); } /* Read an extended attribute named 'key' into the provided buffer */ int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key, char *buf, int size) { unsigned pos; int ano, len; secno a; char ex[4 + 255 + 1 + 8]; struct extended_attribute *ea; struct extended_attribute *ea_end = fnode_end_ea(fnode); for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) if (!strcmp(ea->name, key)) { if (ea_indirect(ea)) goto indirect; if (ea_valuelen(ea) >= size) return -EINVAL; memcpy(buf, ea_data(ea), ea_valuelen(ea)); buf[ea_valuelen(ea)] = 0; return 0; } a = le32_to_cpu(fnode->ea_secno); len = le32_to_cpu(fnode->ea_size_l); ano = fnode_in_anode(fnode); pos = 0; while (pos < len) { ea = (struct extended_attribute *)ex; if (pos + 4 > len) { hpfs_error(s, "EAs don't end correctly, %s %08x, len %08x", ano ? "anode" : "sectors", a, len); return -EIO; } if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return -EIO; if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea_indirect(ea) ? 8 : 0), ex + 4)) return -EIO; if (!strcmp(ea->name, key)) { if (ea_indirect(ea)) goto indirect; if (ea_valuelen(ea) >= size) return -EINVAL; if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea_valuelen(ea), buf)) return -EIO; buf[ea_valuelen(ea)] = 0; return 0; } pos += ea->namelen + ea_valuelen(ea) + 5; } return -ENOENT; indirect: if (ea_len(ea) >= size) return -EINVAL; if (hpfs_ea_read(s, ea_sec(ea), ea_in_anode(ea), 0, ea_len(ea), buf)) return -EIO; buf[ea_len(ea)] = 0; return 0; } /* Read an extended attribute named 'key' */ char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *size) { char *ret; unsigned pos; int ano, len; secno a; struct extended_attribute *ea; struct extended_attribute *ea_end = fnode_end_ea(fnode); for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) if (!strcmp(ea->name, key)) { if (ea_indirect(ea)) return get_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), *size = ea_len(ea)); if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) { pr_err("out of memory for EA\n"); return NULL; } memcpy(ret, ea_data(ea), ea_valuelen(ea)); ret[ea_valuelen(ea)] = 0; return ret; } a = le32_to_cpu(fnode->ea_secno); len = le32_to_cpu(fnode->ea_size_l); ano = fnode_in_anode(fnode); pos = 0; while (pos < len) { char ex[4 + 255 + 1 + 8]; ea = (struct extended_attribute *)ex; if (pos + 4 > len) { hpfs_error(s, "EAs don't end correctly, %s %08x, len %08x", ano ? "anode" : "sectors", a, len); return NULL; } if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return NULL; if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea_indirect(ea) ? 8 : 0), ex + 4)) return NULL; if (!strcmp(ea->name, key)) { if (ea_indirect(ea)) return get_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), *size = ea_len(ea)); if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) { pr_err("out of memory for EA\n"); return NULL; } if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea_valuelen(ea), ret)) { kfree(ret); return NULL; } ret[ea_valuelen(ea)] = 0; return ret; } pos += ea->namelen + ea_valuelen(ea) + 5; } return NULL; } /* * Update or create extended attribute 'key' with value 'data'. Note that * when this ea exists, it MUST have the same size as size of data. * This driver can't change sizes of eas ('cause I just don't need it). */ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key, const char *data, int size) { fnode_secno fno = inode->i_ino; struct super_block *s = inode->i_sb; unsigned pos; int ano, len; secno a; unsigned char h[4]; struct extended_attribute *ea; struct extended_attribute *ea_end = fnode_end_ea(fnode); for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) if (!strcmp(ea->name, key)) { if (ea_indirect(ea)) { if (ea_len(ea) == size) set_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), data, size); } else if (ea_valuelen(ea) == size) { memcpy(ea_data(ea), data, size); } return; } a = le32_to_cpu(fnode->ea_secno); len = le32_to_cpu(fnode->ea_size_l); ano = fnode_in_anode(fnode); pos = 0; while (pos < len) { char ex[4 + 255 + 1 + 8]; ea = (struct extended_attribute *)ex; if (pos + 4 > len) { hpfs_error(s, "EAs don't end correctly, %s %08x, len %08x", ano ? "anode" : "sectors", a, len); return; } if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return; if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea_indirect(ea) ? 8 : 0), ex + 4)) return; if (!strcmp(ea->name, key)) { if (ea_indirect(ea)) { if (ea_len(ea) == size) set_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), data, size); } else { if (ea_valuelen(ea) == size) hpfs_ea_write(s, a, ano, pos + 4 + ea->namelen + 1, size, data); } return; } pos += ea->namelen + ea_valuelen(ea) + 5; } if (!le16_to_cpu(fnode->ea_offs)) { /*if (le16_to_cpu(fnode->ea_size_s)) { hpfs_error(s, "fnode %08x: ea_size_s == %03x, ea_offs == 0", inode->i_ino, le16_to_cpu(fnode->ea_size_s)); return; }*/ fnode->ea_offs = cpu_to_le16(0xc4); } if (le16_to_cpu(fnode->ea_offs) < 0xc4 || le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) > 0x200) { hpfs_error(s, "fnode %08lx: ea_offs == %03x, ea_size_s == %03x", (unsigned long)inode->i_ino, le16_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s)); return; } if ((le16_to_cpu(fnode->ea_size_s) || !le32_to_cpu(fnode->ea_size_l)) && le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) + strlen(key) + size + 5 <= 0x200) { ea = fnode_end_ea(fnode); *(char *)ea = 0; ea->namelen = strlen(key); ea->valuelen_lo = size; ea->valuelen_hi = size >> 8; strcpy(ea->name, key); memcpy(ea_data(ea), data, size); fnode->ea_size_s = cpu_to_le16(le16_to_cpu(fnode->ea_size_s) + strlen(key) + size + 5); goto ret; } /* Most the code here is 99.9993422% unused. I hope there are no bugs. But what .. HPFS.IFS has also bugs in ea management. */ if (le16_to_cpu(fnode->ea_size_s) && !le32_to_cpu(fnode->ea_size_l)) { secno n; struct buffer_head *bh; char *data; if (!(n = hpfs_alloc_sector(s, fno, 1, 0))) return; if (!(data = hpfs_get_sector(s, n, &bh))) { hpfs_free_sectors(s, n, 1); return; } memcpy(data, fnode_ea(fnode), le16_to_cpu(fnode->ea_size_s)); fnode->ea_size_l = cpu_to_le32(le16_to_cpu(fnode->ea_size_s)); fnode->ea_size_s = cpu_to_le16(0); fnode->ea_secno = cpu_to_le32(n); fnode->flags &= ~FNODE_anode; mark_buffer_dirty(bh); brelse(bh); } pos = le32_to_cpu(fnode->ea_size_l) + 5 + strlen(key) + size; len = (le32_to_cpu(fnode->ea_size_l) + 511) >> 9; if (pos >= 30000) goto bail; while (((pos + 511) >> 9) > len) { if (!len) { secno q = hpfs_alloc_sector(s, fno, 1, 0); if (!q) goto bail; fnode->ea_secno = cpu_to_le32(q); fnode->flags &= ~FNODE_anode; len++; } else if (!fnode_in_anode(fnode)) { if (hpfs_alloc_if_possible(s, le32_to_cpu(fnode->ea_secno) + len)) { len++; } else { /* Aargh... don't know how to create ea anodes :-( */ /*struct buffer_head *bh; struct anode *anode; anode_secno a_s; if (!(anode = hpfs_alloc_anode(s, fno, &a_s, &bh))) goto bail; anode->up = cpu_to_le32(fno); anode->btree.fnode_parent = 1; anode->btree.n_free_nodes--; anode->btree.n_used_nodes++; anode->btree.first_free = cpu_to_le16(le16_to_cpu(anode->btree.first_free) + 12); anode->u.external[0].disk_secno = cpu_to_le32(le32_to_cpu(fnode->ea_secno)); anode->u.external[0].file_secno = cpu_to_le32(0); anode->u.external[0].length = cpu_to_le32(len); mark_buffer_dirty(bh); brelse(bh); fnode->flags |= FNODE_anode; fnode->ea_secno = cpu_to_le32(a_s);*/ secno new_sec; int i; if (!(new_sec = hpfs_alloc_sector(s, fno, 1, 1 - ((pos + 511) >> 9)))) goto bail; for (i = 0; i < len; i++) { struct buffer_head *bh1, *bh2; void *b1, *b2; if (!(b1 = hpfs_map_sector(s, le32_to_cpu(fnode->ea_secno) + i, &bh1, len - i - 1))) { hpfs_free_sectors(s, new_sec, (pos + 511) >> 9); goto bail; } if (!(b2 = hpfs_get_sector(s, new_sec + i, &bh2))) { brelse(bh1); hpfs_free_sectors(s, new_sec, (pos + 511) >> 9); goto bail; } memcpy(b2, b1, 512); brelse(bh1); mark_buffer_dirty(bh2); brelse(bh2); } hpfs_free_sectors(s, le32_to_cpu(fnode->ea_secno), len); fnode->ea_secno = cpu_to_le32(new_sec); len = (pos + 511) >> 9; } } if (fnode_in_anode(fnode)) { if (hpfs_add_sector_to_btree(s, le32_to_cpu(fnode->ea_secno), 0, len) != -1) { len++; } else { goto bail; } } } h[0] = 0; h[1] = strlen(key); h[2] = size & 0xff; h[3] = size >> 8; if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l), 4, h)) goto bail; if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l) + 4, h[1] + 1, key)) goto bail; if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l) + 5 + h[1], size, data)) goto bail; fnode->ea_size_l = cpu_to_le32(pos); ret: hpfs_i(inode)->i_ea_size += 5 + strlen(key) + size; return; bail: if (le32_to_cpu(fnode->ea_secno)) if (fnode_in_anode(fnode)) hpfs_truncate_btree(s, le32_to_cpu(fnode->ea_secno), 1, (le32_to_cpu(fnode->ea_size_l) + 511) >> 9); else hpfs_free_sectors(s, le32_to_cpu(fnode->ea_secno) + ((le32_to_cpu(fnode->ea_size_l) + 511) >> 9), len - ((le32_to_cpu(fnode->ea_size_l) + 511) >> 9)); else fnode->ea_secno = fnode->ea_size_l = cpu_to_le32(0); }
linux-master
fs/hpfs/ea.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/hpfs/map.c * * Mikulas Patocka ([email protected]), 1998-1999 * * mapping structures to memory with some minimal checks */ #include "hpfs_fn.h" __le32 *hpfs_map_dnode_bitmap(struct super_block *s, struct quad_buffer_head *qbh) { return hpfs_map_4sectors(s, hpfs_sb(s)->sb_dmap, qbh, 0); } __le32 *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block, struct quad_buffer_head *qbh, char *id) { secno sec; __le32 *ret; unsigned n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14; if (hpfs_sb(s)->sb_chk) if (bmp_block >= n_bands) { hpfs_error(s, "hpfs_map_bitmap called with bad parameter: %08x at %s", bmp_block, id); return NULL; } sec = le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[bmp_block]); if (!sec || sec > hpfs_sb(s)->sb_fs_size-4) { hpfs_error(s, "invalid bitmap block pointer %08x -> %08x at %s", bmp_block, sec, id); return NULL; } ret = hpfs_map_4sectors(s, sec, qbh, 4); if (ret) hpfs_prefetch_bitmap(s, bmp_block + 1); return ret; } void hpfs_prefetch_bitmap(struct super_block *s, unsigned bmp_block) { unsigned to_prefetch, next_prefetch; unsigned n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14; if (unlikely(bmp_block >= n_bands)) return; to_prefetch = le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[bmp_block]); if (unlikely(bmp_block + 1 >= n_bands)) next_prefetch = 0; else next_prefetch = le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[bmp_block + 1]); hpfs_prefetch_sectors(s, to_prefetch, 4 + 4 * (to_prefetch + 4 == next_prefetch)); } /* * Load first code page into kernel memory, return pointer to 256-byte array, * first 128 bytes are uppercasing table for chars 128-255, next 128 bytes are * lowercasing table */ unsigned char *hpfs_load_code_page(struct super_block *s, secno cps) { struct buffer_head *bh; secno cpds; unsigned cpi; unsigned char *ptr; unsigned char *cp_table; int i; struct code_page_data *cpd; struct code_page_directory *cp = hpfs_map_sector(s, cps, &bh, 0); if (!cp) return NULL; if (le32_to_cpu(cp->magic) != CP_DIR_MAGIC) { pr_err("Code page directory magic doesn't match (magic = %08x)\n", le32_to_cpu(cp->magic)); brelse(bh); return NULL; } if (!le32_to_cpu(cp->n_code_pages)) { pr_err("n_code_pages == 0\n"); brelse(bh); return NULL; } cpds = le32_to_cpu(cp->array[0].code_page_data); cpi = le16_to_cpu(cp->array[0].index); brelse(bh); if (cpi >= 3) { pr_err("Code page index out of array\n"); return NULL; } if (!(cpd = hpfs_map_sector(s, cpds, &bh, 0))) return NULL; if (le16_to_cpu(cpd->offs[cpi]) > 0x178) { pr_err("Code page index out of sector\n"); brelse(bh); return NULL; } ptr = (unsigned char *)cpd + le16_to_cpu(cpd->offs[cpi]) + 6; if (!(cp_table = kmalloc(256, GFP_KERNEL))) { pr_err("out of memory for code page table\n"); brelse(bh); return NULL; } memcpy(cp_table, ptr, 128); brelse(bh); /* Try to build lowercasing table from uppercasing one */ for (i=128; i<256; i++) cp_table[i]=i; for (i=128; i<256; i++) if (cp_table[i-128]!=i && cp_table[i-128]>=128) cp_table[cp_table[i-128]] = i; return cp_table; } __le32 *hpfs_load_bitmap_directory(struct super_block *s, secno bmp) { struct buffer_head *bh; int n = (hpfs_sb(s)->sb_fs_size + 0x200000 - 1) >> 21; int i; __le32 *b; if (!(b = kmalloc_array(n, 512, GFP_KERNEL))) { pr_err("can't allocate memory for bitmap directory\n"); return NULL; } for (i=0;i<n;i++) { __le32 *d = hpfs_map_sector(s, bmp+i, &bh, n - i - 1); if (!d) { kfree(b); return NULL; } memcpy((char *)b + 512 * i, d, 512); brelse(bh); } return b; } void hpfs_load_hotfix_map(struct super_block *s, struct hpfs_spare_block *spareblock) { struct quad_buffer_head qbh; __le32 *directory; u32 n_hotfixes, n_used_hotfixes; unsigned i; n_hotfixes = le32_to_cpu(spareblock->n_spares); n_used_hotfixes = le32_to_cpu(spareblock->n_spares_used); if (n_hotfixes > 256 || n_used_hotfixes > n_hotfixes) { hpfs_error(s, "invalid number of hotfixes: %u, used: %u", n_hotfixes, n_used_hotfixes); return; } if (!(directory = hpfs_map_4sectors(s, le32_to_cpu(spareblock->hotfix_map), &qbh, 0))) { hpfs_error(s, "can't load hotfix map"); return; } for (i = 0; i < n_used_hotfixes; i++) { hpfs_sb(s)->hotfix_from[i] = le32_to_cpu(directory[i]); hpfs_sb(s)->hotfix_to[i] = le32_to_cpu(directory[n_hotfixes + i]); } hpfs_sb(s)->n_hotfixes = n_used_hotfixes; hpfs_brelse4(&qbh); } /* * Load fnode to memory */ struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_head **bhp) { struct fnode *fnode; if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, ino, 1, "fnode")) { return NULL; } if ((fnode = hpfs_map_sector(s, ino, bhp, FNODE_RD_AHEAD))) { if (hpfs_sb(s)->sb_chk) { struct extended_attribute *ea; struct extended_attribute *ea_end; if (le32_to_cpu(fnode->magic) != FNODE_MAGIC) { hpfs_error(s, "bad magic on fnode %08lx", (unsigned long)ino); goto bail; } if (!fnode_is_dir(fnode)) { if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes != (bp_internal(&fnode->btree) ? 12 : 8)) { hpfs_error(s, "bad number of nodes in fnode %08lx", (unsigned long)ino); goto bail; } if (le16_to_cpu(fnode->btree.first_free) != 8 + fnode->btree.n_used_nodes * (bp_internal(&fnode->btree) ? 8 : 12)) { hpfs_error(s, "bad first_free pointer in fnode %08lx", (unsigned long)ino); goto bail; } } if (le16_to_cpu(fnode->ea_size_s) && (le16_to_cpu(fnode->ea_offs) < 0xc4 || le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) > 0x200)) { hpfs_error(s, "bad EA info in fnode %08lx: ea_offs == %04x ea_size_s == %04x", (unsigned long)ino, le16_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s)); goto bail; } ea = fnode_ea(fnode); ea_end = fnode_end_ea(fnode); while (ea != ea_end) { if (ea > ea_end) { hpfs_error(s, "bad EA in fnode %08lx", (unsigned long)ino); goto bail; } ea = next_ea(ea); } } } return fnode; bail: brelse(*bhp); return NULL; } struct anode *hpfs_map_anode(struct super_block *s, anode_secno ano, struct buffer_head **bhp) { struct anode *anode; if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, ano, 1, "anode")) return NULL; if ((anode = hpfs_map_sector(s, ano, bhp, ANODE_RD_AHEAD))) if (hpfs_sb(s)->sb_chk) { if (le32_to_cpu(anode->magic) != ANODE_MAGIC) { hpfs_error(s, "bad magic on anode %08x", ano); goto bail; } if (le32_to_cpu(anode->self) != ano) { hpfs_error(s, "self pointer invalid on anode %08x", ano); goto bail; } if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes != (bp_internal(&anode->btree) ? 60 : 40)) { hpfs_error(s, "bad number of nodes in anode %08x", ano); goto bail; } if (le16_to_cpu(anode->btree.first_free) != 8 + anode->btree.n_used_nodes * (bp_internal(&anode->btree) ? 8 : 12)) { hpfs_error(s, "bad first_free pointer in anode %08x", ano); goto bail; } } return anode; bail: brelse(*bhp); return NULL; } /* * Load dnode to memory and do some checks */ struct dnode *hpfs_map_dnode(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh) { struct dnode *dnode; if (hpfs_sb(s)->sb_chk) { if (hpfs_chk_sectors(s, secno, 4, "dnode")) return NULL; if (secno & 3) { hpfs_error(s, "dnode %08x not byte-aligned", secno); return NULL; } } if ((dnode = hpfs_map_4sectors(s, secno, qbh, DNODE_RD_AHEAD))) if (hpfs_sb(s)->sb_chk) { unsigned p, pp = 0; unsigned char *d = (unsigned char *)dnode; int b = 0; if (le32_to_cpu(dnode->magic) != DNODE_MAGIC) { hpfs_error(s, "bad magic on dnode %08x", secno); goto bail; } if (le32_to_cpu(dnode->self) != secno) hpfs_error(s, "bad self pointer on dnode %08x self = %08x", secno, le32_to_cpu(dnode->self)); /* Check dirents - bad dirents would cause infinite loops or shooting to memory */ if (le32_to_cpu(dnode->first_free) > 2048) { hpfs_error(s, "dnode %08x has first_free == %08x", secno, le32_to_cpu(dnode->first_free)); goto bail; } for (p = 20; p < le32_to_cpu(dnode->first_free); p += d[p] + (d[p+1] << 8)) { struct hpfs_dirent *de = (struct hpfs_dirent *)((char *)dnode + p); if (le16_to_cpu(de->length) > 292 || (le16_to_cpu(de->length) < 32) || (le16_to_cpu(de->length) & 3) || p + le16_to_cpu(de->length) > 2048) { hpfs_error(s, "bad dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp); goto bail; } if (((31 + de->namelen + de->down*4 + 3) & ~3) != le16_to_cpu(de->length)) { if (((31 + de->namelen + de->down*4 + 3) & ~3) < le16_to_cpu(de->length) && s->s_flags & SB_RDONLY) goto ok; hpfs_error(s, "namelen does not match dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp); goto bail; } ok: if (hpfs_sb(s)->sb_chk >= 2) b |= 1 << de->down; if (de->down) if (de_down_pointer(de) < 0x10) { hpfs_error(s, "bad down pointer in dnode %08x, dirent %03x, last %03x", secno, p, pp); goto bail; } pp = p; } if (p != le32_to_cpu(dnode->first_free)) { hpfs_error(s, "size on last dirent does not match first_free; dnode %08x", secno); goto bail; } if (d[pp + 30] != 1 || d[pp + 31] != 255) { hpfs_error(s, "dnode %08x does not end with \\377 entry", secno); goto bail; } if (b == 3) pr_err("unbalanced dnode tree, dnode %08x; see hpfs.txt 4 more info\n", secno); } return dnode; bail: hpfs_brelse4(qbh); return NULL; } dnode_secno hpfs_fnode_dno(struct super_block *s, ino_t ino) { struct buffer_head *bh; struct fnode *fnode; dnode_secno dno; fnode = hpfs_map_fnode(s, ino, &bh); if (!fnode) return 0; dno = le32_to_cpu(fnode->u.external[0].disk_secno); brelse(bh); return dno; }
linux-master
fs/hpfs/map.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/hpfs/name.c * * Mikulas Patocka ([email protected]), 1998-1999 * * operations with filenames */ #include "hpfs_fn.h" static inline int not_allowed_char(unsigned char c) { return c<' ' || c=='"' || c=='*' || c=='/' || c==':' || c=='<' || c=='>' || c=='?' || c=='\\' || c=='|'; } static inline int no_dos_char(unsigned char c) { /* Characters that are allowed in HPFS but not in DOS */ return c=='+' || c==',' || c==';' || c=='=' || c=='[' || c==']'; } static inline unsigned char upcase(unsigned char *dir, unsigned char a) { if (a<128 || a==255) return a>='a' && a<='z' ? a - 0x20 : a; if (!dir) return a; return dir[a-128]; } unsigned char hpfs_upcase(unsigned char *dir, unsigned char a) { return upcase(dir, a); } static inline unsigned char locase(unsigned char *dir, unsigned char a) { if (a<128 || a==255) return a>='A' && a<='Z' ? a + 0x20 : a; if (!dir) return a; return dir[a]; } int hpfs_chk_name(const unsigned char *name, unsigned *len) { int i; if (*len > 254) return -ENAMETOOLONG; hpfs_adjust_length(name, len); if (!*len) return -EINVAL; for (i = 0; i < *len; i++) if (not_allowed_char(name[i])) return -EINVAL; if (*len == 1) if (name[0] == '.') return -EINVAL; if (*len == 2) if (name[0] == '.' && name[1] == '.') return -EINVAL; return 0; } unsigned char *hpfs_translate_name(struct super_block *s, unsigned char *from, unsigned len, int lc, int lng) { unsigned char *to; int i; if (hpfs_sb(s)->sb_chk >= 2) if (hpfs_is_name_long(from, len) != lng) { pr_err("Long name flag mismatch - name "); for (i = 0; i < len; i++) pr_cont("%c", from[i]); pr_cont(" misidentified as %s.\n", lng ? "short" : "long"); pr_err("It's nothing serious. It could happen because of bug in OS/2.\nSet checks=normal to disable this message.\n"); } if (!lc) return from; if (!(to = kmalloc(len, GFP_KERNEL))) { pr_err("can't allocate memory for name conversion buffer\n"); return from; } for (i = 0; i < len; i++) to[i] = locase(hpfs_sb(s)->sb_cp_table,from[i]); return to; } int hpfs_compare_names(struct super_block *s, const unsigned char *n1, unsigned l1, const unsigned char *n2, unsigned l2, int last) { unsigned l = l1 < l2 ? l1 : l2; unsigned i; if (last) return -1; for (i = 0; i < l; i++) { unsigned char c1 = upcase(hpfs_sb(s)->sb_cp_table,n1[i]); unsigned char c2 = upcase(hpfs_sb(s)->sb_cp_table,n2[i]); if (c1 < c2) return -1; if (c1 > c2) return 1; } if (l1 < l2) return -1; if (l1 > l2) return 1; return 0; } int hpfs_is_name_long(const unsigned char *name, unsigned len) { int i,j; for (i = 0; i < len && name[i] != '.'; i++) if (no_dos_char(name[i])) return 1; if (!i || i > 8) return 1; if (i == len) return 0; for (j = i + 1; j < len; j++) if (name[j] == '.' || no_dos_char(name[i])) return 1; return j - i > 4; } /* OS/2 clears dots and spaces at the end of file name, so we have to */ void hpfs_adjust_length(const unsigned char *name, unsigned *len) { if (!*len) return; if (*len == 1 && name[0] == '.') return; if (*len == 2 && name[0] == '.' && name[1] == '.') return; while (*len && (name[*len - 1] == '.' || name[*len - 1] == ' ')) (*len)--; }
linux-master
fs/hpfs/name.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/hpfs/file.c * * Mikulas Patocka ([email protected]), 1998-1999 * * file VFS functions */ #include "hpfs_fn.h" #include <linux/mpage.h> #include <linux/iomap.h> #include <linux/fiemap.h> #define BLOCKS(size) (((size) + 511) >> 9) static int hpfs_file_release(struct inode *inode, struct file *file) { hpfs_lock(inode->i_sb); hpfs_write_if_changed(inode); hpfs_unlock(inode->i_sb); return 0; } int hpfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; int ret; ret = file_write_and_wait_range(file, start, end); if (ret) return ret; return sync_blockdev(inode->i_sb->s_bdev); } /* * generic_file_read often calls bmap with non-existing sector, * so we must ignore such errors. */ static secno hpfs_bmap(struct inode *inode, unsigned file_secno, unsigned *n_secs) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); unsigned n, disk_secno; struct fnode *fnode; struct buffer_head *bh; if (BLOCKS(hpfs_i(inode)->mmu_private) <= file_secno) return 0; n = file_secno - hpfs_inode->i_file_sec; if (n < hpfs_inode->i_n_secs) { *n_secs = hpfs_inode->i_n_secs - n; return hpfs_inode->i_disk_sec + n; } if (!(fnode = hpfs_map_fnode(inode->i_sb, inode->i_ino, &bh))) return 0; disk_secno = hpfs_bplus_lookup(inode->i_sb, inode, &fnode->btree, file_secno, bh); if (disk_secno == -1) return 0; if (hpfs_chk_sectors(inode->i_sb, disk_secno, 1, "bmap")) return 0; n = file_secno - hpfs_inode->i_file_sec; if (n < hpfs_inode->i_n_secs) { *n_secs = hpfs_inode->i_n_secs - n; return hpfs_inode->i_disk_sec + n; } *n_secs = 1; return disk_secno; } void hpfs_truncate(struct inode *i) { if (IS_IMMUTABLE(i)) return /*-EPERM*/; hpfs_lock_assert(i->i_sb); hpfs_i(i)->i_n_secs = 0; i->i_blocks = 1 + ((i->i_size + 511) >> 9); hpfs_i(i)->mmu_private = i->i_size; hpfs_truncate_btree(i->i_sb, i->i_ino, 1, ((i->i_size + 511) >> 9)); hpfs_write_inode(i); hpfs_i(i)->i_n_secs = 0; } static int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { int r; secno s; unsigned n_secs; hpfs_lock(inode->i_sb); s = hpfs_bmap(inode, iblock, &n_secs); if (s) { if (bh_result->b_size >> 9 < n_secs) n_secs = bh_result->b_size >> 9; n_secs = hpfs_search_hotfix_map_for_range(inode->i_sb, s, n_secs); if (unlikely(!n_secs)) { s = hpfs_search_hotfix_map(inode->i_sb, s); n_secs = 1; } map_bh(bh_result, inode->i_sb, s); bh_result->b_size = n_secs << 9; goto ret_0; } if (!create) goto ret_0; if (iblock<<9 != hpfs_i(inode)->mmu_private) { BUG(); r = -EIO; goto ret_r; } if ((s = hpfs_add_sector_to_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1)) == -1) { hpfs_truncate_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1); r = -ENOSPC; goto ret_r; } inode->i_blocks++; hpfs_i(inode)->mmu_private += 512; set_buffer_new(bh_result); map_bh(bh_result, inode->i_sb, hpfs_search_hotfix_map(inode->i_sb, s)); ret_0: r = 0; ret_r: hpfs_unlock(inode->i_sb); return r; } static int hpfs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, unsigned flags, struct iomap *iomap, struct iomap *srcmap) { struct super_block *sb = inode->i_sb; unsigned int blkbits = inode->i_blkbits; unsigned int n_secs; secno s; if (WARN_ON_ONCE(flags & (IOMAP_WRITE | IOMAP_ZERO))) return -EINVAL; iomap->bdev = inode->i_sb->s_bdev; iomap->offset = offset; hpfs_lock(sb); s = hpfs_bmap(inode, offset >> blkbits, &n_secs); if (s) { n_secs = hpfs_search_hotfix_map_for_range(sb, s, min_t(loff_t, n_secs, length)); if (unlikely(!n_secs)) { s = hpfs_search_hotfix_map(sb, s); n_secs = 1; } iomap->type = IOMAP_MAPPED; iomap->flags = IOMAP_F_MERGED; iomap->addr = (u64)s << blkbits; iomap->length = (u64)n_secs << blkbits; } else { iomap->type = IOMAP_HOLE; iomap->addr = IOMAP_NULL_ADDR; iomap->length = 1 << blkbits; } hpfs_unlock(sb); return 0; } static const struct iomap_ops hpfs_iomap_ops = { .iomap_begin = hpfs_iomap_begin, }; static int hpfs_read_folio(struct file *file, struct folio *folio) { return mpage_read_folio(folio, hpfs_get_block); } static void hpfs_readahead(struct readahead_control *rac) { mpage_readahead(rac, hpfs_get_block); } static int hpfs_writepages(struct address_space *mapping, struct writeback_control *wbc) { return mpage_writepages(mapping, wbc, hpfs_get_block); } static void hpfs_write_failed(struct address_space *mapping, loff_t to) { struct inode *inode = mapping->host; hpfs_lock(inode->i_sb); if (to > inode->i_size) { truncate_pagecache(inode, inode->i_size); hpfs_truncate(inode); } hpfs_unlock(inode->i_sb); } static int hpfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int ret; *pagep = NULL; ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata, hpfs_get_block, &hpfs_i(mapping->host)->mmu_private); if (unlikely(ret)) hpfs_write_failed(mapping, pos + len); return ret; } static int hpfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *pagep, void *fsdata) { struct inode *inode = mapping->host; int err; err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata); if (err < len) hpfs_write_failed(mapping, pos + len); if (!(err < 0)) { /* make sure we write it on close, if not earlier */ hpfs_lock(inode->i_sb); hpfs_i(inode)->i_dirty = 1; hpfs_unlock(inode->i_sb); } return err; } static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping, block, hpfs_get_block); } static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len) { int ret; inode_lock(inode); len = min_t(u64, len, i_size_read(inode)); ret = iomap_fiemap(inode, fieinfo, start, len, &hpfs_iomap_ops); inode_unlock(inode); return ret; } const struct address_space_operations hpfs_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .read_folio = hpfs_read_folio, .readahead = hpfs_readahead, .writepages = hpfs_writepages, .write_begin = hpfs_write_begin, .write_end = hpfs_write_end, .bmap = _hpfs_bmap, .migrate_folio = buffer_migrate_folio, }; const struct file_operations hpfs_file_ops = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .mmap = generic_file_mmap, .release = hpfs_file_release, .fsync = hpfs_file_fsync, .splice_read = filemap_splice_read, .unlocked_ioctl = hpfs_ioctl, .compat_ioctl = compat_ptr_ioctl, }; const struct inode_operations hpfs_file_iops = { .setattr = hpfs_setattr, .fiemap = hpfs_fiemap, };
linux-master
fs/hpfs/file.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/hpfs/dentry.c * * Mikulas Patocka ([email protected]), 1998-1999 * * dcache operations */ #include "hpfs_fn.h" /* * Note: the dentry argument is the parent dentry. */ static int hpfs_hash_dentry(const struct dentry *dentry, struct qstr *qstr) { unsigned long hash; int i; unsigned l = qstr->len; if (l == 1) if (qstr->name[0]=='.') goto x; if (l == 2) if (qstr->name[0]=='.' || qstr->name[1]=='.') goto x; hpfs_adjust_length(qstr->name, &l); /*if (hpfs_chk_name(qstr->name,&l))*/ /*return -ENAMETOOLONG;*/ /*return -ENOENT;*/ x: hash = init_name_hash(dentry); for (i = 0; i < l; i++) hash = partial_name_hash(hpfs_upcase(hpfs_sb(dentry->d_sb)->sb_cp_table,qstr->name[i]), hash); qstr->hash = end_name_hash(hash); return 0; } static int hpfs_compare_dentry(const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { unsigned al = len; unsigned bl = name->len; hpfs_adjust_length(str, &al); /*hpfs_adjust_length(b->name, &bl);*/ /* * 'str' is the nane of an already existing dentry, so the name * must be valid. 'name' must be validated first. */ if (hpfs_chk_name(name->name, &bl)) return 1; if (hpfs_compare_names(dentry->d_sb, str, al, name->name, bl, 0)) return 1; return 0; } const struct dentry_operations hpfs_dentry_operations = { .d_hash = hpfs_hash_dentry, .d_compare = hpfs_compare_dentry, };
linux-master
fs/hpfs/dentry.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2008 Red Hat, Inc., Eric Paris <[email protected]> */ #include <linux/list.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/srcu.h> #include <linux/rculist.h> #include <linux/wait.h> #include <linux/memcontrol.h> #include <linux/fsnotify_backend.h> #include "fsnotify.h" #include <linux/atomic.h> /* * Final freeing of a group */ static void fsnotify_final_destroy_group(struct fsnotify_group *group) { if (group->ops->free_group_priv) group->ops->free_group_priv(group); mem_cgroup_put(group->memcg); mutex_destroy(&group->mark_mutex); kfree(group); } /* * Stop queueing new events for this group. Once this function returns * fsnotify_add_event() will not add any new events to the group's queue. */ void fsnotify_group_stop_queueing(struct fsnotify_group *group) { spin_lock(&group->notification_lock); group->shutdown = true; spin_unlock(&group->notification_lock); } /* * Trying to get rid of a group. Remove all marks, flush all events and release * the group reference. * Note that another thread calling fsnotify_clear_marks_by_group() may still * hold a ref to the group. */ void fsnotify_destroy_group(struct fsnotify_group *group) { /* * Stop queueing new events. The code below is careful enough to not * require this but fanotify needs to stop queuing events even before * fsnotify_destroy_group() is called and this makes the other callers * of fsnotify_destroy_group() to see the same behavior. */ fsnotify_group_stop_queueing(group); /* Clear all marks for this group and queue them for destruction */ fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_ANY); /* * Some marks can still be pinned when waiting for response from * userspace. Wait for those now. fsnotify_prepare_user_wait() will * not succeed now so this wait is race-free. */ wait_event(group->notification_waitq, !atomic_read(&group->user_waits)); /* * Wait until all marks get really destroyed. We could actually destroy * them ourselves instead of waiting for worker to do it, however that * would be racy as worker can already be processing some marks before * we even entered fsnotify_destroy_group(). */ fsnotify_wait_marks_destroyed(); /* * Since we have waited for fsnotify_mark_srcu in * fsnotify_mark_destroy_list() there can be no outstanding event * notification against this group. So clearing the notification queue * of all events is reliable now. */ fsnotify_flush_notify(group); /* * Destroy overflow event (we cannot use fsnotify_destroy_event() as * that deliberately ignores overflow events. */ if (group->overflow_event) group->ops->free_event(group, group->overflow_event); fsnotify_put_group(group); } /* * Get reference to a group. */ void fsnotify_get_group(struct fsnotify_group *group) { refcount_inc(&group->refcnt); } /* * Drop a reference to a group. Free it if it's through. */ void fsnotify_put_group(struct fsnotify_group *group) { if (refcount_dec_and_test(&group->refcnt)) fsnotify_final_destroy_group(group); } EXPORT_SYMBOL_GPL(fsnotify_put_group); static struct fsnotify_group *__fsnotify_alloc_group( const struct fsnotify_ops *ops, int flags, gfp_t gfp) { static struct lock_class_key nofs_marks_lock; struct fsnotify_group *group; group = kzalloc(sizeof(struct fsnotify_group), gfp); if (!group) return ERR_PTR(-ENOMEM); /* set to 0 when there a no external references to this group */ refcount_set(&group->refcnt, 1); atomic_set(&group->user_waits, 0); spin_lock_init(&group->notification_lock); INIT_LIST_HEAD(&group->notification_list); init_waitqueue_head(&group->notification_waitq); group->max_events = UINT_MAX; mutex_init(&group->mark_mutex); INIT_LIST_HEAD(&group->marks_list); group->ops = ops; group->flags = flags; /* * For most backends, eviction of inode with a mark is not expected, * because marks hold a refcount on the inode against eviction. * * Use a different lockdep class for groups that support evictable * inode marks, because with evictable marks, mark_mutex is NOT * fs-reclaim safe - the mutex is taken when evicting inodes. */ if (flags & FSNOTIFY_GROUP_NOFS) lockdep_set_class(&group->mark_mutex, &nofs_marks_lock); return group; } /* * Create a new fsnotify_group and hold a reference for the group returned. */ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops, int flags) { gfp_t gfp = (flags & FSNOTIFY_GROUP_USER) ? GFP_KERNEL_ACCOUNT : GFP_KERNEL; return __fsnotify_alloc_group(ops, flags, gfp); } EXPORT_SYMBOL_GPL(fsnotify_alloc_group); int fsnotify_fasync(int fd, struct file *file, int on) { struct fsnotify_group *group = file->private_data; return fasync_helper(fd, file, on, &group->fsn_fa) >= 0 ? 0 : -EIO; }
linux-master
fs/notify/group.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2008 Red Hat, Inc., Eric Paris <[email protected]> */ #include <linux/dcache.h> #include <linux/fs.h> #include <linux/gfp.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mount.h> #include <linux/srcu.h> #include <linux/fsnotify_backend.h> #include "fsnotify.h" /* * Clear all of the marks on an inode when it is being evicted from core */ void __fsnotify_inode_delete(struct inode *inode) { fsnotify_clear_marks_by_inode(inode); } EXPORT_SYMBOL_GPL(__fsnotify_inode_delete); void __fsnotify_vfsmount_delete(struct vfsmount *mnt) { fsnotify_clear_marks_by_mount(mnt); } /** * fsnotify_unmount_inodes - an sb is unmounting. handle any watched inodes. * @sb: superblock being unmounted. * * Called during unmount with no locks held, so needs to be safe against * concurrent modifiers. We temporarily drop sb->s_inode_list_lock and CAN block. */ static void fsnotify_unmount_inodes(struct super_block *sb) { struct inode *inode, *iput_inode = NULL; spin_lock(&sb->s_inode_list_lock); list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { /* * We cannot __iget() an inode in state I_FREEING, * I_WILL_FREE, or I_NEW which is fine because by that point * the inode cannot have any associated watches. */ spin_lock(&inode->i_lock); if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) { spin_unlock(&inode->i_lock); continue; } /* * If i_count is zero, the inode cannot have any watches and * doing an __iget/iput with SB_ACTIVE clear would actually * evict all inodes with zero i_count from icache which is * unnecessarily violent and may in fact be illegal to do. * However, we should have been called /after/ evict_inodes * removed all zero refcount inodes, in any case. Test to * be sure. */ if (!atomic_read(&inode->i_count)) { spin_unlock(&inode->i_lock); continue; } __iget(inode); spin_unlock(&inode->i_lock); spin_unlock(&sb->s_inode_list_lock); iput(iput_inode); /* for each watch, send FS_UNMOUNT and then remove it */ fsnotify_inode(inode, FS_UNMOUNT); fsnotify_inode_delete(inode); iput_inode = inode; cond_resched(); spin_lock(&sb->s_inode_list_lock); } spin_unlock(&sb->s_inode_list_lock); iput(iput_inode); } void fsnotify_sb_delete(struct super_block *sb) { fsnotify_unmount_inodes(sb); fsnotify_clear_marks_by_sb(sb); /* Wait for outstanding object references from connectors */ wait_var_event(&sb->s_fsnotify_connectors, !atomic_long_read(&sb->s_fsnotify_connectors)); } /* * Given an inode, first check if we care what happens to our children. Inotify * and dnotify both tell their parents about events. If we care about any event * on a child we run all of our children and set a dentry flag saying that the * parent cares. Thus when an event happens on a child it can quickly tell * if there is a need to find a parent and send the event to the parent. */ void __fsnotify_update_child_dentry_flags(struct inode *inode) { struct dentry *alias; int watched; if (!S_ISDIR(inode->i_mode)) return; /* determine if the children should tell inode about their events */ watched = fsnotify_inode_watches_children(inode); spin_lock(&inode->i_lock); /* run all of the dentries associated with this inode. Since this is a * directory, there damn well better only be one item on this list */ hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { struct dentry *child; /* run all of the children of the original inode and fix their * d_flags to indicate parental interest (their parent is the * original inode) */ spin_lock(&alias->d_lock); list_for_each_entry(child, &alias->d_subdirs, d_child) { if (!child->d_inode) continue; spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED); if (watched) child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED; else child->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED; spin_unlock(&child->d_lock); } spin_unlock(&alias->d_lock); } spin_unlock(&inode->i_lock); } /* Are inode/sb/mount interested in parent and name info with this event? */ static bool fsnotify_event_needs_parent(struct inode *inode, struct mount *mnt, __u32 mask) { __u32 marks_mask = 0; /* We only send parent/name to inode/sb/mount for events on non-dir */ if (mask & FS_ISDIR) return false; /* * All events that are possible on child can also may be reported with * parent/name info to inode/sb/mount. Otherwise, a watching parent * could result in events reported with unexpected name info to sb/mount. */ BUILD_BUG_ON(FS_EVENTS_POSS_ON_CHILD & ~FS_EVENTS_POSS_TO_PARENT); /* Did either inode/sb/mount subscribe for events with parent/name? */ marks_mask |= fsnotify_parent_needed_mask(inode->i_fsnotify_mask); marks_mask |= fsnotify_parent_needed_mask(inode->i_sb->s_fsnotify_mask); if (mnt) marks_mask |= fsnotify_parent_needed_mask(mnt->mnt_fsnotify_mask); /* Did they subscribe for this event with parent/name info? */ return mask & marks_mask; } /* * Notify this dentry's parent about a child's events with child name info * if parent is watching or if inode/sb/mount are interested in events with * parent and name info. * * Notify only the child without name info if parent is not watching and * inode/sb/mount are not interested in events with parent and name info. */ int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, int data_type) { const struct path *path = fsnotify_data_path(data, data_type); struct mount *mnt = path ? real_mount(path->mnt) : NULL; struct inode *inode = d_inode(dentry); struct dentry *parent; bool parent_watched = dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED; bool parent_needed, parent_interested; __u32 p_mask; struct inode *p_inode = NULL; struct name_snapshot name; struct qstr *file_name = NULL; int ret = 0; /* * Do inode/sb/mount care about parent and name info on non-dir? * Do they care about any event at all? */ if (!inode->i_fsnotify_marks && !inode->i_sb->s_fsnotify_marks && (!mnt || !mnt->mnt_fsnotify_marks) && !parent_watched) return 0; parent = NULL; parent_needed = fsnotify_event_needs_parent(inode, mnt, mask); if (!parent_watched && !parent_needed) goto notify; /* Does parent inode care about events on children? */ parent = dget_parent(dentry); p_inode = parent->d_inode; p_mask = fsnotify_inode_watches_children(p_inode); if (unlikely(parent_watched && !p_mask)) __fsnotify_update_child_dentry_flags(p_inode); /* * Include parent/name in notification either if some notification * groups require parent info or the parent is interested in this event. */ parent_interested = mask & p_mask & ALL_FSNOTIFY_EVENTS; if (parent_needed || parent_interested) { /* When notifying parent, child should be passed as data */ WARN_ON_ONCE(inode != fsnotify_data_inode(data, data_type)); /* Notify both parent and child with child name info */ take_dentry_name_snapshot(&name, dentry); file_name = &name.name; if (parent_interested) mask |= FS_EVENT_ON_CHILD; } notify: ret = fsnotify(mask, data, data_type, p_inode, file_name, inode, 0); if (file_name) release_dentry_name_snapshot(&name); dput(parent); return ret; } EXPORT_SYMBOL_GPL(__fsnotify_parent); static int fsnotify_handle_inode_event(struct fsnotify_group *group, struct fsnotify_mark *inode_mark, u32 mask, const void *data, int data_type, struct inode *dir, const struct qstr *name, u32 cookie) { const struct path *path = fsnotify_data_path(data, data_type); struct inode *inode = fsnotify_data_inode(data, data_type); const struct fsnotify_ops *ops = group->ops; if (WARN_ON_ONCE(!ops->handle_inode_event)) return 0; if (WARN_ON_ONCE(!inode && !dir)) return 0; if ((inode_mark->flags & FSNOTIFY_MARK_FLAG_EXCL_UNLINK) && path && d_unlinked(path->dentry)) return 0; /* Check interest of this mark in case event was sent with two marks */ if (!(mask & inode_mark->mask & ALL_FSNOTIFY_EVENTS)) return 0; return ops->handle_inode_event(inode_mark, mask, inode, dir, name, cookie); } static int fsnotify_handle_event(struct fsnotify_group *group, __u32 mask, const void *data, int data_type, struct inode *dir, const struct qstr *name, u32 cookie, struct fsnotify_iter_info *iter_info) { struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info); struct fsnotify_mark *parent_mark = fsnotify_iter_parent_mark(iter_info); int ret; if (WARN_ON_ONCE(fsnotify_iter_sb_mark(iter_info)) || WARN_ON_ONCE(fsnotify_iter_vfsmount_mark(iter_info))) return 0; /* * For FS_RENAME, 'dir' is old dir and 'data' is new dentry. * The only ->handle_inode_event() backend that supports FS_RENAME is * dnotify, where it means file was renamed within same parent. */ if (mask & FS_RENAME) { struct dentry *moved = fsnotify_data_dentry(data, data_type); if (dir != moved->d_parent->d_inode) return 0; } if (parent_mark) { ret = fsnotify_handle_inode_event(group, parent_mark, mask, data, data_type, dir, name, 0); if (ret) return ret; } if (!inode_mark) return 0; if (mask & FS_EVENT_ON_CHILD) { /* * Some events can be sent on both parent dir and child marks * (e.g. FS_ATTRIB). If both parent dir and child are * watching, report the event once to parent dir with name (if * interested) and once to child without name (if interested). * The child watcher is expecting an event without a file name * and without the FS_EVENT_ON_CHILD flag. */ mask &= ~FS_EVENT_ON_CHILD; dir = NULL; name = NULL; } return fsnotify_handle_inode_event(group, inode_mark, mask, data, data_type, dir, name, cookie); } static int send_to_group(__u32 mask, const void *data, int data_type, struct inode *dir, const struct qstr *file_name, u32 cookie, struct fsnotify_iter_info *iter_info) { struct fsnotify_group *group = NULL; __u32 test_mask = (mask & ALL_FSNOTIFY_EVENTS); __u32 marks_mask = 0; __u32 marks_ignore_mask = 0; bool is_dir = mask & FS_ISDIR; struct fsnotify_mark *mark; int type; if (!iter_info->report_mask) return 0; /* clear ignored on inode modification */ if (mask & FS_MODIFY) { fsnotify_foreach_iter_mark_type(iter_info, mark, type) { if (!(mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY)) mark->ignore_mask = 0; } } /* Are any of the group marks interested in this event? */ fsnotify_foreach_iter_mark_type(iter_info, mark, type) { group = mark->group; marks_mask |= mark->mask; marks_ignore_mask |= fsnotify_effective_ignore_mask(mark, is_dir, type); } pr_debug("%s: group=%p mask=%x marks_mask=%x marks_ignore_mask=%x data=%p data_type=%d dir=%p cookie=%d\n", __func__, group, mask, marks_mask, marks_ignore_mask, data, data_type, dir, cookie); if (!(test_mask & marks_mask & ~marks_ignore_mask)) return 0; if (group->ops->handle_event) { return group->ops->handle_event(group, mask, data, data_type, dir, file_name, cookie, iter_info); } return fsnotify_handle_event(group, mask, data, data_type, dir, file_name, cookie, iter_info); } static struct fsnotify_mark *fsnotify_first_mark(struct fsnotify_mark_connector **connp) { struct fsnotify_mark_connector *conn; struct hlist_node *node = NULL; conn = srcu_dereference(*connp, &fsnotify_mark_srcu); if (conn) node = srcu_dereference(conn->list.first, &fsnotify_mark_srcu); return hlist_entry_safe(node, struct fsnotify_mark, obj_list); } static struct fsnotify_mark *fsnotify_next_mark(struct fsnotify_mark *mark) { struct hlist_node *node = NULL; if (mark) node = srcu_dereference(mark->obj_list.next, &fsnotify_mark_srcu); return hlist_entry_safe(node, struct fsnotify_mark, obj_list); } /* * iter_info is a multi head priority queue of marks. * Pick a subset of marks from queue heads, all with the same group * and set the report_mask to a subset of the selected marks. * Returns false if there are no more groups to iterate. */ static bool fsnotify_iter_select_report_types( struct fsnotify_iter_info *iter_info) { struct fsnotify_group *max_prio_group = NULL; struct fsnotify_mark *mark; int type; /* Choose max prio group among groups of all queue heads */ fsnotify_foreach_iter_type(type) { mark = iter_info->marks[type]; if (mark && fsnotify_compare_groups(max_prio_group, mark->group) > 0) max_prio_group = mark->group; } if (!max_prio_group) return false; /* Set the report mask for marks from same group as max prio group */ iter_info->current_group = max_prio_group; iter_info->report_mask = 0; fsnotify_foreach_iter_type(type) { mark = iter_info->marks[type]; if (mark && mark->group == iter_info->current_group) { /* * FSNOTIFY_ITER_TYPE_PARENT indicates that this inode * is watching children and interested in this event, * which is an event possible on child. * But is *this mark* watching children? */ if (type == FSNOTIFY_ITER_TYPE_PARENT && !(mark->mask & FS_EVENT_ON_CHILD) && !(fsnotify_ignore_mask(mark) & FS_EVENT_ON_CHILD)) continue; fsnotify_iter_set_report_type(iter_info, type); } } return true; } /* * Pop from iter_info multi head queue, the marks that belong to the group of * current iteration step. */ static void fsnotify_iter_next(struct fsnotify_iter_info *iter_info) { struct fsnotify_mark *mark; int type; /* * We cannot use fsnotify_foreach_iter_mark_type() here because we * may need to advance a mark of type X that belongs to current_group * but was not selected for reporting. */ fsnotify_foreach_iter_type(type) { mark = iter_info->marks[type]; if (mark && mark->group == iter_info->current_group) iter_info->marks[type] = fsnotify_next_mark(iter_info->marks[type]); } } /* * fsnotify - This is the main call to fsnotify. * * The VFS calls into hook specific functions in linux/fsnotify.h. * Those functions then in turn call here. Here will call out to all of the * registered fsnotify_group. Those groups can then use the notification event * in whatever means they feel necessary. * * @mask: event type and flags * @data: object that event happened on * @data_type: type of object for fanotify_data_XXX() accessors * @dir: optional directory associated with event - * if @file_name is not NULL, this is the directory that * @file_name is relative to * @file_name: optional file name associated with event * @inode: optional inode associated with event - * If @dir and @inode are both non-NULL, event may be * reported to both. * @cookie: inotify rename cookie */ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir, const struct qstr *file_name, struct inode *inode, u32 cookie) { const struct path *path = fsnotify_data_path(data, data_type); struct super_block *sb = fsnotify_data_sb(data, data_type); struct fsnotify_iter_info iter_info = {}; struct mount *mnt = NULL; struct inode *inode2 = NULL; struct dentry *moved; int inode2_type; int ret = 0; __u32 test_mask, marks_mask; if (path) mnt = real_mount(path->mnt); if (!inode) { /* Dirent event - report on TYPE_INODE to dir */ inode = dir; /* For FS_RENAME, inode is old_dir and inode2 is new_dir */ if (mask & FS_RENAME) { moved = fsnotify_data_dentry(data, data_type); inode2 = moved->d_parent->d_inode; inode2_type = FSNOTIFY_ITER_TYPE_INODE2; } } else if (mask & FS_EVENT_ON_CHILD) { /* * Event on child - report on TYPE_PARENT to dir if it is * watching children and on TYPE_INODE to child. */ inode2 = dir; inode2_type = FSNOTIFY_ITER_TYPE_PARENT; } /* * Optimization: srcu_read_lock() has a memory barrier which can * be expensive. It protects walking the *_fsnotify_marks lists. * However, if we do not walk the lists, we do not have to do * SRCU because we have no references to any objects and do not * need SRCU to keep them "alive". */ if (!sb->s_fsnotify_marks && (!mnt || !mnt->mnt_fsnotify_marks) && (!inode || !inode->i_fsnotify_marks) && (!inode2 || !inode2->i_fsnotify_marks)) return 0; marks_mask = sb->s_fsnotify_mask; if (mnt) marks_mask |= mnt->mnt_fsnotify_mask; if (inode) marks_mask |= inode->i_fsnotify_mask; if (inode2) marks_mask |= inode2->i_fsnotify_mask; /* * If this is a modify event we may need to clear some ignore masks. * In that case, the object with ignore masks will have the FS_MODIFY * event in its mask. * Otherwise, return if none of the marks care about this type of event. */ test_mask = (mask & ALL_FSNOTIFY_EVENTS); if (!(test_mask & marks_mask)) return 0; iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu); iter_info.marks[FSNOTIFY_ITER_TYPE_SB] = fsnotify_first_mark(&sb->s_fsnotify_marks); if (mnt) { iter_info.marks[FSNOTIFY_ITER_TYPE_VFSMOUNT] = fsnotify_first_mark(&mnt->mnt_fsnotify_marks); } if (inode) { iter_info.marks[FSNOTIFY_ITER_TYPE_INODE] = fsnotify_first_mark(&inode->i_fsnotify_marks); } if (inode2) { iter_info.marks[inode2_type] = fsnotify_first_mark(&inode2->i_fsnotify_marks); } /* * We need to merge inode/vfsmount/sb mark lists so that e.g. inode mark * ignore masks are properly reflected for mount/sb mark notifications. * That's why this traversal is so complicated... */ while (fsnotify_iter_select_report_types(&iter_info)) { ret = send_to_group(mask, data, data_type, dir, file_name, cookie, &iter_info); if (ret && (mask & ALL_FSNOTIFY_PERM_EVENTS)) goto out; fsnotify_iter_next(&iter_info); } ret = 0; out: srcu_read_unlock(&fsnotify_mark_srcu, iter_info.srcu_idx); return ret; } EXPORT_SYMBOL_GPL(fsnotify); static __init int fsnotify_init(void) { int ret; BUILD_BUG_ON(HWEIGHT32(ALL_FSNOTIFY_BITS) != 23); ret = init_srcu_struct(&fsnotify_mark_srcu); if (ret) panic("initializing fsnotify_mark_srcu"); fsnotify_mark_connector_cachep = KMEM_CACHE(fsnotify_mark_connector, SLAB_PANIC); return 0; } core_initcall(fsnotify_init);
linux-master
fs/notify/fsnotify.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2008 Red Hat, Inc., Eric Paris <[email protected]> */ /* * Basic idea behind the notification queue: An fsnotify group (like inotify) * sends the userspace notification about events asynchronously some time after * the event happened. When inotify gets an event it will need to add that * event to the group notify queue. Since a single event might need to be on * multiple group's notification queues we can't add the event directly to each * queue and instead add a small "event_holder" to each queue. This event_holder * has a pointer back to the original event. Since the majority of events are * going to end up on one, and only one, notification queue we embed one * event_holder into each event. This means we have a single allocation instead * of always needing two. If the embedded event_holder is already in use by * another group a new event_holder (from fsnotify_event_holder_cachep) will be * allocated and used. */ #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mount.h> #include <linux/mutex.h> #include <linux/namei.h> #include <linux/path.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/atomic.h> #include <linux/fsnotify_backend.h> #include "fsnotify.h" static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0); /** * fsnotify_get_cookie - return a unique cookie for use in synchronizing events. * Called from fsnotify_move, which is inlined into filesystem modules. */ u32 fsnotify_get_cookie(void) { return atomic_inc_return(&fsnotify_sync_cookie); } EXPORT_SYMBOL_GPL(fsnotify_get_cookie); void fsnotify_destroy_event(struct fsnotify_group *group, struct fsnotify_event *event) { /* Overflow events are per-group and we don't want to free them */ if (!event || event == group->overflow_event) return; /* * If the event is still queued, we have a problem... Do an unreliable * lockless check first to avoid locking in the common case. The * locking may be necessary for permission events which got removed * from the list by a different CPU than the one freeing the event. */ if (!list_empty(&event->list)) { spin_lock(&group->notification_lock); WARN_ON(!list_empty(&event->list)); spin_unlock(&group->notification_lock); } group->ops->free_event(group, event); } /* * Try to add an event to the notification queue. * The group can later pull this event off the queue to deal with. * The group can use the @merge hook to merge the event with a queued event. * The group can use the @insert hook to insert the event into hash table. * The function returns: * 0 if the event was added to a queue * 1 if the event was merged with some other queued event * 2 if the event was not queued - either the queue of events has overflown * or the group is shutting down. */ int fsnotify_insert_event(struct fsnotify_group *group, struct fsnotify_event *event, int (*merge)(struct fsnotify_group *, struct fsnotify_event *), void (*insert)(struct fsnotify_group *, struct fsnotify_event *)) { int ret = 0; struct list_head *list = &group->notification_list; pr_debug("%s: group=%p event=%p\n", __func__, group, event); spin_lock(&group->notification_lock); if (group->shutdown) { spin_unlock(&group->notification_lock); return 2; } if (event == group->overflow_event || group->q_len >= group->max_events) { ret = 2; /* Queue overflow event only if it isn't already queued */ if (!list_empty(&group->overflow_event->list)) { spin_unlock(&group->notification_lock); return ret; } event = group->overflow_event; goto queue; } if (!list_empty(list) && merge) { ret = merge(group, event); if (ret) { spin_unlock(&group->notification_lock); return ret; } } queue: group->q_len++; list_add_tail(&event->list, list); if (insert) insert(group, event); spin_unlock(&group->notification_lock); wake_up(&group->notification_waitq); kill_fasync(&group->fsn_fa, SIGIO, POLL_IN); return ret; } void fsnotify_remove_queued_event(struct fsnotify_group *group, struct fsnotify_event *event) { assert_spin_locked(&group->notification_lock); /* * We need to init list head for the case of overflow event so that * check in fsnotify_add_event() works */ list_del_init(&event->list); group->q_len--; } /* * Return the first event on the notification list without removing it. * Returns NULL if the list is empty. */ struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group) { assert_spin_locked(&group->notification_lock); if (fsnotify_notify_queue_is_empty(group)) return NULL; return list_first_entry(&group->notification_list, struct fsnotify_event, list); } /* * Remove and return the first event from the notification list. It is the * responsibility of the caller to destroy the obtained event */ struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group) { struct fsnotify_event *event = fsnotify_peek_first_event(group); if (!event) return NULL; pr_debug("%s: group=%p event=%p\n", __func__, group, event); fsnotify_remove_queued_event(group, event); return event; } /* * Called when a group is being torn down to clean up any outstanding * event notifications. */ void fsnotify_flush_notify(struct fsnotify_group *group) { struct fsnotify_event *event; spin_lock(&group->notification_lock); while (!fsnotify_notify_queue_is_empty(group)) { event = fsnotify_remove_first_event(group); spin_unlock(&group->notification_lock); fsnotify_destroy_event(group, event); spin_lock(&group->notification_lock); } spin_unlock(&group->notification_lock); }
linux-master
fs/notify/notification.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/file.h> #include <linux/fs.h> #include <linux/fsnotify_backend.h> #include <linux/idr.h> #include <linux/init.h> #include <linux/inotify.h> #include <linux/fanotify.h> #include <linux/kernel.h> #include <linux/namei.h> #include <linux/sched.h> #include <linux/types.h> #include <linux/seq_file.h> #include <linux/exportfs.h> #include "inotify/inotify.h" #include "fanotify/fanotify.h" #include "fdinfo.h" #include "fsnotify.h" #if defined(CONFIG_PROC_FS) #if defined(CONFIG_INOTIFY_USER) || defined(CONFIG_FANOTIFY) static void show_fdinfo(struct seq_file *m, struct file *f, void (*show)(struct seq_file *m, struct fsnotify_mark *mark)) { struct fsnotify_group *group = f->private_data; struct fsnotify_mark *mark; fsnotify_group_lock(group); list_for_each_entry(mark, &group->marks_list, g_list) { show(m, mark); if (seq_has_overflowed(m)) break; } fsnotify_group_unlock(group); } #if defined(CONFIG_EXPORTFS) static void show_mark_fhandle(struct seq_file *m, struct inode *inode) { struct { struct file_handle handle; u8 pad[MAX_HANDLE_SZ]; } f; int size, ret, i; f.handle.handle_bytes = sizeof(f.pad); size = f.handle.handle_bytes >> 2; ret = exportfs_encode_fid(inode, (struct fid *)f.handle.f_handle, &size); if ((ret == FILEID_INVALID) || (ret < 0)) { WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret); return; } f.handle.handle_type = ret; f.handle.handle_bytes = size * sizeof(u32); seq_printf(m, "fhandle-bytes:%x fhandle-type:%x f_handle:", f.handle.handle_bytes, f.handle.handle_type); for (i = 0; i < f.handle.handle_bytes; i++) seq_printf(m, "%02x", (int)f.handle.f_handle[i]); } #else static void show_mark_fhandle(struct seq_file *m, struct inode *inode) { } #endif #ifdef CONFIG_INOTIFY_USER static void inotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark) { struct inotify_inode_mark *inode_mark; struct inode *inode; if (mark->connector->type != FSNOTIFY_OBJ_TYPE_INODE) return; inode_mark = container_of(mark, struct inotify_inode_mark, fsn_mark); inode = igrab(fsnotify_conn_inode(mark->connector)); if (inode) { seq_printf(m, "inotify wd:%x ino:%lx sdev:%x mask:%x ignored_mask:0 ", inode_mark->wd, inode->i_ino, inode->i_sb->s_dev, inotify_mark_user_mask(mark)); show_mark_fhandle(m, inode); seq_putc(m, '\n'); iput(inode); } } void inotify_show_fdinfo(struct seq_file *m, struct file *f) { show_fdinfo(m, f, inotify_fdinfo); } #endif /* CONFIG_INOTIFY_USER */ #ifdef CONFIG_FANOTIFY static void fanotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark) { unsigned int mflags = fanotify_mark_user_flags(mark); struct inode *inode; if (mark->connector->type == FSNOTIFY_OBJ_TYPE_INODE) { inode = igrab(fsnotify_conn_inode(mark->connector)); if (!inode) return; seq_printf(m, "fanotify ino:%lx sdev:%x mflags:%x mask:%x ignored_mask:%x ", inode->i_ino, inode->i_sb->s_dev, mflags, mark->mask, mark->ignore_mask); show_mark_fhandle(m, inode); seq_putc(m, '\n'); iput(inode); } else if (mark->connector->type == FSNOTIFY_OBJ_TYPE_VFSMOUNT) { struct mount *mnt = fsnotify_conn_mount(mark->connector); seq_printf(m, "fanotify mnt_id:%x mflags:%x mask:%x ignored_mask:%x\n", mnt->mnt_id, mflags, mark->mask, mark->ignore_mask); } else if (mark->connector->type == FSNOTIFY_OBJ_TYPE_SB) { struct super_block *sb = fsnotify_conn_sb(mark->connector); seq_printf(m, "fanotify sdev:%x mflags:%x mask:%x ignored_mask:%x\n", sb->s_dev, mflags, mark->mask, mark->ignore_mask); } } void fanotify_show_fdinfo(struct seq_file *m, struct file *f) { struct fsnotify_group *group = f->private_data; seq_printf(m, "fanotify flags:%x event-flags:%x\n", group->fanotify_data.flags & FANOTIFY_INIT_FLAGS, group->fanotify_data.f_flags); show_fdinfo(m, f, fanotify_fdinfo); } #endif /* CONFIG_FANOTIFY */ #endif /* CONFIG_INOTIFY_USER || CONFIG_FANOTIFY */ #endif /* CONFIG_PROC_FS */
linux-master
fs/notify/fdinfo.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2008 Red Hat, Inc., Eric Paris <[email protected]> */ /* * fsnotify inode mark locking/lifetime/and refcnting * * REFCNT: * The group->recnt and mark->refcnt tell how many "things" in the kernel * currently are referencing the objects. Both kind of objects typically will * live inside the kernel with a refcnt of 2, one for its creation and one for * the reference a group and a mark hold to each other. * If you are holding the appropriate locks, you can take a reference and the * object itself is guaranteed to survive until the reference is dropped. * * LOCKING: * There are 3 locks involved with fsnotify inode marks and they MUST be taken * in order as follows: * * group->mark_mutex * mark->lock * mark->connector->lock * * group->mark_mutex protects the marks_list anchored inside a given group and * each mark is hooked via the g_list. It also protects the groups private * data (i.e group limits). * mark->lock protects the marks attributes like its masks and flags. * Furthermore it protects the access to a reference of the group that the mark * is assigned to as well as the access to a reference of the inode/vfsmount * that is being watched by the mark. * * mark->connector->lock protects the list of marks anchored inside an * inode / vfsmount and each mark is hooked via the i_list. * * A list of notification marks relating to inode / mnt is contained in * fsnotify_mark_connector. That structure is alive as long as there are any * marks in the list and is also protected by fsnotify_mark_srcu. A mark gets * detached from fsnotify_mark_connector when last reference to the mark is * dropped. Thus having mark reference is enough to protect mark->connector * pointer and to make sure fsnotify_mark_connector cannot disappear. Also * because we remove mark from g_list before dropping mark reference associated * with that, any mark found through g_list is guaranteed to have * mark->connector set until we drop group->mark_mutex. * * LIFETIME: * Inode marks survive between when they are added to an inode and when their * refcnt==0. Marks are also protected by fsnotify_mark_srcu. * * The inode mark can be cleared for a number of different reasons including: * - The inode is unlinked for the last time. (fsnotify_inode_remove) * - The inode is being evicted from cache. (fsnotify_inode_delete) * - The fs the inode is on is unmounted. (fsnotify_inode_delete/fsnotify_unmount_inodes) * - Something explicitly requests that it be removed. (fsnotify_destroy_mark) * - The fsnotify_group associated with the mark is going away and all such marks * need to be cleaned up. (fsnotify_clear_marks_by_group) * * This has the very interesting property of being able to run concurrently with * any (or all) other directions. */ #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/srcu.h> #include <linux/ratelimit.h> #include <linux/atomic.h> #include <linux/fsnotify_backend.h> #include "fsnotify.h" #define FSNOTIFY_REAPER_DELAY (1) /* 1 jiffy */ struct srcu_struct fsnotify_mark_srcu; struct kmem_cache *fsnotify_mark_connector_cachep; static DEFINE_SPINLOCK(destroy_lock); static LIST_HEAD(destroy_list); static struct fsnotify_mark_connector *connector_destroy_list; static void fsnotify_mark_destroy_workfn(struct work_struct *work); static DECLARE_DELAYED_WORK(reaper_work, fsnotify_mark_destroy_workfn); static void fsnotify_connector_destroy_workfn(struct work_struct *work); static DECLARE_WORK(connector_reaper_work, fsnotify_connector_destroy_workfn); void fsnotify_get_mark(struct fsnotify_mark *mark) { WARN_ON_ONCE(!refcount_read(&mark->refcnt)); refcount_inc(&mark->refcnt); } static __u32 *fsnotify_conn_mask_p(struct fsnotify_mark_connector *conn) { if (conn->type == FSNOTIFY_OBJ_TYPE_INODE) return &fsnotify_conn_inode(conn)->i_fsnotify_mask; else if (conn->type == FSNOTIFY_OBJ_TYPE_VFSMOUNT) return &fsnotify_conn_mount(conn)->mnt_fsnotify_mask; else if (conn->type == FSNOTIFY_OBJ_TYPE_SB) return &fsnotify_conn_sb(conn)->s_fsnotify_mask; return NULL; } __u32 fsnotify_conn_mask(struct fsnotify_mark_connector *conn) { if (WARN_ON(!fsnotify_valid_obj_type(conn->type))) return 0; return *fsnotify_conn_mask_p(conn); } static void fsnotify_get_inode_ref(struct inode *inode) { ihold(inode); atomic_long_inc(&inode->i_sb->s_fsnotify_connectors); } /* * Grab or drop inode reference for the connector if needed. * * When it's time to drop the reference, we only clear the HAS_IREF flag and * return the inode object. fsnotify_drop_object() will be resonsible for doing * iput() outside of spinlocks. This happens when last mark that wanted iref is * detached. */ static struct inode *fsnotify_update_iref(struct fsnotify_mark_connector *conn, bool want_iref) { bool has_iref = conn->flags & FSNOTIFY_CONN_FLAG_HAS_IREF; struct inode *inode = NULL; if (conn->type != FSNOTIFY_OBJ_TYPE_INODE || want_iref == has_iref) return NULL; if (want_iref) { /* Pin inode if any mark wants inode refcount held */ fsnotify_get_inode_ref(fsnotify_conn_inode(conn)); conn->flags |= FSNOTIFY_CONN_FLAG_HAS_IREF; } else { /* Unpin inode after detach of last mark that wanted iref */ inode = fsnotify_conn_inode(conn); conn->flags &= ~FSNOTIFY_CONN_FLAG_HAS_IREF; } return inode; } static void *__fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) { u32 new_mask = 0; bool want_iref = false; struct fsnotify_mark *mark; assert_spin_locked(&conn->lock); /* We can get detached connector here when inode is getting unlinked. */ if (!fsnotify_valid_obj_type(conn->type)) return NULL; hlist_for_each_entry(mark, &conn->list, obj_list) { if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) continue; new_mask |= fsnotify_calc_mask(mark); if (conn->type == FSNOTIFY_OBJ_TYPE_INODE && !(mark->flags & FSNOTIFY_MARK_FLAG_NO_IREF)) want_iref = true; } *fsnotify_conn_mask_p(conn) = new_mask; return fsnotify_update_iref(conn, want_iref); } /* * Calculate mask of events for a list of marks. The caller must make sure * connector and connector->obj cannot disappear under us. Callers achieve * this by holding a mark->lock or mark->group->mark_mutex for a mark on this * list. */ void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) { if (!conn) return; spin_lock(&conn->lock); __fsnotify_recalc_mask(conn); spin_unlock(&conn->lock); if (conn->type == FSNOTIFY_OBJ_TYPE_INODE) __fsnotify_update_child_dentry_flags( fsnotify_conn_inode(conn)); } /* Free all connectors queued for freeing once SRCU period ends */ static void fsnotify_connector_destroy_workfn(struct work_struct *work) { struct fsnotify_mark_connector *conn, *free; spin_lock(&destroy_lock); conn = connector_destroy_list; connector_destroy_list = NULL; spin_unlock(&destroy_lock); synchronize_srcu(&fsnotify_mark_srcu); while (conn) { free = conn; conn = conn->destroy_next; kmem_cache_free(fsnotify_mark_connector_cachep, free); } } static void fsnotify_put_inode_ref(struct inode *inode) { struct super_block *sb = inode->i_sb; iput(inode); if (atomic_long_dec_and_test(&sb->s_fsnotify_connectors)) wake_up_var(&sb->s_fsnotify_connectors); } static void fsnotify_get_sb_connectors(struct fsnotify_mark_connector *conn) { struct super_block *sb = fsnotify_connector_sb(conn); if (sb) atomic_long_inc(&sb->s_fsnotify_connectors); } static void fsnotify_put_sb_connectors(struct fsnotify_mark_connector *conn) { struct super_block *sb = fsnotify_connector_sb(conn); if (sb && atomic_long_dec_and_test(&sb->s_fsnotify_connectors)) wake_up_var(&sb->s_fsnotify_connectors); } static void *fsnotify_detach_connector_from_object( struct fsnotify_mark_connector *conn, unsigned int *type) { struct inode *inode = NULL; *type = conn->type; if (conn->type == FSNOTIFY_OBJ_TYPE_DETACHED) return NULL; if (conn->type == FSNOTIFY_OBJ_TYPE_INODE) { inode = fsnotify_conn_inode(conn); inode->i_fsnotify_mask = 0; /* Unpin inode when detaching from connector */ if (!(conn->flags & FSNOTIFY_CONN_FLAG_HAS_IREF)) inode = NULL; } else if (conn->type == FSNOTIFY_OBJ_TYPE_VFSMOUNT) { fsnotify_conn_mount(conn)->mnt_fsnotify_mask = 0; } else if (conn->type == FSNOTIFY_OBJ_TYPE_SB) { fsnotify_conn_sb(conn)->s_fsnotify_mask = 0; } fsnotify_put_sb_connectors(conn); rcu_assign_pointer(*(conn->obj), NULL); conn->obj = NULL; conn->type = FSNOTIFY_OBJ_TYPE_DETACHED; return inode; } static void fsnotify_final_mark_destroy(struct fsnotify_mark *mark) { struct fsnotify_group *group = mark->group; if (WARN_ON_ONCE(!group)) return; group->ops->free_mark(mark); fsnotify_put_group(group); } /* Drop object reference originally held by a connector */ static void fsnotify_drop_object(unsigned int type, void *objp) { if (!objp) return; /* Currently only inode references are passed to be dropped */ if (WARN_ON_ONCE(type != FSNOTIFY_OBJ_TYPE_INODE)) return; fsnotify_put_inode_ref(objp); } void fsnotify_put_mark(struct fsnotify_mark *mark) { struct fsnotify_mark_connector *conn = READ_ONCE(mark->connector); void *objp = NULL; unsigned int type = FSNOTIFY_OBJ_TYPE_DETACHED; bool free_conn = false; /* Catch marks that were actually never attached to object */ if (!conn) { if (refcount_dec_and_test(&mark->refcnt)) fsnotify_final_mark_destroy(mark); return; } /* * We have to be careful so that traversals of obj_list under lock can * safely grab mark reference. */ if (!refcount_dec_and_lock(&mark->refcnt, &conn->lock)) return; hlist_del_init_rcu(&mark->obj_list); if (hlist_empty(&conn->list)) { objp = fsnotify_detach_connector_from_object(conn, &type); free_conn = true; } else { objp = __fsnotify_recalc_mask(conn); type = conn->type; } WRITE_ONCE(mark->connector, NULL); spin_unlock(&conn->lock); fsnotify_drop_object(type, objp); if (free_conn) { spin_lock(&destroy_lock); conn->destroy_next = connector_destroy_list; connector_destroy_list = conn; spin_unlock(&destroy_lock); queue_work(system_unbound_wq, &connector_reaper_work); } /* * Note that we didn't update flags telling whether inode cares about * what's happening with children. We update these flags from * __fsnotify_parent() lazily when next event happens on one of our * children. */ spin_lock(&destroy_lock); list_add(&mark->g_list, &destroy_list); spin_unlock(&destroy_lock); queue_delayed_work(system_unbound_wq, &reaper_work, FSNOTIFY_REAPER_DELAY); } EXPORT_SYMBOL_GPL(fsnotify_put_mark); /* * Get mark reference when we found the mark via lockless traversal of object * list. Mark can be already removed from the list by now and on its way to be * destroyed once SRCU period ends. * * Also pin the group so it doesn't disappear under us. */ static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark) { if (!mark) return true; if (refcount_inc_not_zero(&mark->refcnt)) { spin_lock(&mark->lock); if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) { /* mark is attached, group is still alive then */ atomic_inc(&mark->group->user_waits); spin_unlock(&mark->lock); return true; } spin_unlock(&mark->lock); fsnotify_put_mark(mark); } return false; } /* * Puts marks and wakes up group destruction if necessary. * * Pairs with fsnotify_get_mark_safe() */ static void fsnotify_put_mark_wake(struct fsnotify_mark *mark) { if (mark) { struct fsnotify_group *group = mark->group; fsnotify_put_mark(mark); /* * We abuse notification_waitq on group shutdown for waiting for * all marks pinned when waiting for userspace. */ if (atomic_dec_and_test(&group->user_waits) && group->shutdown) wake_up(&group->notification_waitq); } } bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info) __releases(&fsnotify_mark_srcu) { int type; fsnotify_foreach_iter_type(type) { /* This can fail if mark is being removed */ if (!fsnotify_get_mark_safe(iter_info->marks[type])) { __release(&fsnotify_mark_srcu); goto fail; } } /* * Now that both marks are pinned by refcount in the inode / vfsmount * lists, we can drop SRCU lock, and safely resume the list iteration * once userspace returns. */ srcu_read_unlock(&fsnotify_mark_srcu, iter_info->srcu_idx); return true; fail: for (type--; type >= 0; type--) fsnotify_put_mark_wake(iter_info->marks[type]); return false; } void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info) __acquires(&fsnotify_mark_srcu) { int type; iter_info->srcu_idx = srcu_read_lock(&fsnotify_mark_srcu); fsnotify_foreach_iter_type(type) fsnotify_put_mark_wake(iter_info->marks[type]); } /* * Mark mark as detached, remove it from group list. Mark still stays in object * list until its last reference is dropped. Note that we rely on mark being * removed from group list before corresponding reference to it is dropped. In * particular we rely on mark->connector being valid while we hold * group->mark_mutex if we found the mark through g_list. * * Must be called with group->mark_mutex held. The caller must either hold * reference to the mark or be protected by fsnotify_mark_srcu. */ void fsnotify_detach_mark(struct fsnotify_mark *mark) { fsnotify_group_assert_locked(mark->group); WARN_ON_ONCE(!srcu_read_lock_held(&fsnotify_mark_srcu) && refcount_read(&mark->refcnt) < 1 + !!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)); spin_lock(&mark->lock); /* something else already called this function on this mark */ if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) { spin_unlock(&mark->lock); return; } mark->flags &= ~FSNOTIFY_MARK_FLAG_ATTACHED; list_del_init(&mark->g_list); spin_unlock(&mark->lock); /* Drop mark reference acquired in fsnotify_add_mark_locked() */ fsnotify_put_mark(mark); } /* * Free fsnotify mark. The mark is actually only marked as being freed. The * freeing is actually happening only once last reference to the mark is * dropped from a workqueue which first waits for srcu period end. * * Caller must have a reference to the mark or be protected by * fsnotify_mark_srcu. */ void fsnotify_free_mark(struct fsnotify_mark *mark) { struct fsnotify_group *group = mark->group; spin_lock(&mark->lock); /* something else already called this function on this mark */ if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) { spin_unlock(&mark->lock); return; } mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE; spin_unlock(&mark->lock); /* * Some groups like to know that marks are being freed. This is a * callback to the group function to let it know that this mark * is being freed. */ if (group->ops->freeing_mark) group->ops->freeing_mark(mark, group); } void fsnotify_destroy_mark(struct fsnotify_mark *mark, struct fsnotify_group *group) { fsnotify_group_lock(group); fsnotify_detach_mark(mark); fsnotify_group_unlock(group); fsnotify_free_mark(mark); } EXPORT_SYMBOL_GPL(fsnotify_destroy_mark); /* * Sorting function for lists of fsnotify marks. * * Fanotify supports different notification classes (reflected as priority of * notification group). Events shall be passed to notification groups in * decreasing priority order. To achieve this marks in notification lists for * inodes and vfsmounts are sorted so that priorities of corresponding groups * are descending. * * Furthermore correct handling of the ignore mask requires processing inode * and vfsmount marks of each group together. Using the group address as * further sort criterion provides a unique sorting order and thus we can * merge inode and vfsmount lists of marks in linear time and find groups * present in both lists. * * A return value of 1 signifies that b has priority over a. * A return value of 0 signifies that the two marks have to be handled together. * A return value of -1 signifies that a has priority over b. */ int fsnotify_compare_groups(struct fsnotify_group *a, struct fsnotify_group *b) { if (a == b) return 0; if (!a) return 1; if (!b) return -1; if (a->priority < b->priority) return 1; if (a->priority > b->priority) return -1; if (a < b) return 1; return -1; } static int fsnotify_attach_connector_to_object(fsnotify_connp_t *connp, unsigned int obj_type, __kernel_fsid_t *fsid) { struct fsnotify_mark_connector *conn; conn = kmem_cache_alloc(fsnotify_mark_connector_cachep, GFP_KERNEL); if (!conn) return -ENOMEM; spin_lock_init(&conn->lock); INIT_HLIST_HEAD(&conn->list); conn->flags = 0; conn->type = obj_type; conn->obj = connp; /* Cache fsid of filesystem containing the object */ if (fsid) { conn->fsid = *fsid; conn->flags = FSNOTIFY_CONN_FLAG_HAS_FSID; } else { conn->fsid.val[0] = conn->fsid.val[1] = 0; conn->flags = 0; } fsnotify_get_sb_connectors(conn); /* * cmpxchg() provides the barrier so that readers of *connp can see * only initialized structure */ if (cmpxchg(connp, NULL, conn)) { /* Someone else created list structure for us */ fsnotify_put_sb_connectors(conn); kmem_cache_free(fsnotify_mark_connector_cachep, conn); } return 0; } /* * Get mark connector, make sure it is alive and return with its lock held. * This is for users that get connector pointer from inode or mount. Users that * hold reference to a mark on the list may directly lock connector->lock as * they are sure list cannot go away under them. */ static struct fsnotify_mark_connector *fsnotify_grab_connector( fsnotify_connp_t *connp) { struct fsnotify_mark_connector *conn; int idx; idx = srcu_read_lock(&fsnotify_mark_srcu); conn = srcu_dereference(*connp, &fsnotify_mark_srcu); if (!conn) goto out; spin_lock(&conn->lock); if (conn->type == FSNOTIFY_OBJ_TYPE_DETACHED) { spin_unlock(&conn->lock); srcu_read_unlock(&fsnotify_mark_srcu, idx); return NULL; } out: srcu_read_unlock(&fsnotify_mark_srcu, idx); return conn; } /* * Add mark into proper place in given list of marks. These marks may be used * for the fsnotify backend to determine which event types should be delivered * to which group and for which inodes. These marks are ordered according to * priority, highest number first, and then by the group's location in memory. */ static int fsnotify_add_mark_list(struct fsnotify_mark *mark, fsnotify_connp_t *connp, unsigned int obj_type, int add_flags, __kernel_fsid_t *fsid) { struct fsnotify_mark *lmark, *last = NULL; struct fsnotify_mark_connector *conn; int cmp; int err = 0; if (WARN_ON(!fsnotify_valid_obj_type(obj_type))) return -EINVAL; /* Backend is expected to check for zero fsid (e.g. tmpfs) */ if (fsid && WARN_ON_ONCE(!fsid->val[0] && !fsid->val[1])) return -ENODEV; restart: spin_lock(&mark->lock); conn = fsnotify_grab_connector(connp); if (!conn) { spin_unlock(&mark->lock); err = fsnotify_attach_connector_to_object(connp, obj_type, fsid); if (err) return err; goto restart; } else if (fsid && !(conn->flags & FSNOTIFY_CONN_FLAG_HAS_FSID)) { conn->fsid = *fsid; /* Pairs with smp_rmb() in fanotify_get_fsid() */ smp_wmb(); conn->flags |= FSNOTIFY_CONN_FLAG_HAS_FSID; } else if (fsid && (conn->flags & FSNOTIFY_CONN_FLAG_HAS_FSID) && (fsid->val[0] != conn->fsid.val[0] || fsid->val[1] != conn->fsid.val[1])) { /* * Backend is expected to check for non uniform fsid * (e.g. btrfs), but maybe we missed something? * Only allow setting conn->fsid once to non zero fsid. * inotify and non-fid fanotify groups do not set nor test * conn->fsid. */ pr_warn_ratelimited("%s: fsid mismatch on object of type %u: " "%x.%x != %x.%x\n", __func__, conn->type, fsid->val[0], fsid->val[1], conn->fsid.val[0], conn->fsid.val[1]); err = -EXDEV; goto out_err; } /* is mark the first mark? */ if (hlist_empty(&conn->list)) { hlist_add_head_rcu(&mark->obj_list, &conn->list); goto added; } /* should mark be in the middle of the current list? */ hlist_for_each_entry(lmark, &conn->list, obj_list) { last = lmark; if ((lmark->group == mark->group) && (lmark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) && !(mark->group->flags & FSNOTIFY_GROUP_DUPS)) { err = -EEXIST; goto out_err; } cmp = fsnotify_compare_groups(lmark->group, mark->group); if (cmp >= 0) { hlist_add_before_rcu(&mark->obj_list, &lmark->obj_list); goto added; } } BUG_ON(last == NULL); /* mark should be the last entry. last is the current last entry */ hlist_add_behind_rcu(&mark->obj_list, &last->obj_list); added: /* * Since connector is attached to object using cmpxchg() we are * guaranteed that connector initialization is fully visible by anyone * seeing mark->connector set. */ WRITE_ONCE(mark->connector, conn); out_err: spin_unlock(&conn->lock); spin_unlock(&mark->lock); return err; } /* * Attach an initialized mark to a given group and fs object. * These marks may be used for the fsnotify backend to determine which * event types should be delivered to which group. */ int fsnotify_add_mark_locked(struct fsnotify_mark *mark, fsnotify_connp_t *connp, unsigned int obj_type, int add_flags, __kernel_fsid_t *fsid) { struct fsnotify_group *group = mark->group; int ret = 0; fsnotify_group_assert_locked(group); /* * LOCKING ORDER!!!! * group->mark_mutex * mark->lock * mark->connector->lock */ spin_lock(&mark->lock); mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE | FSNOTIFY_MARK_FLAG_ATTACHED; list_add(&mark->g_list, &group->marks_list); fsnotify_get_mark(mark); /* for g_list */ spin_unlock(&mark->lock); ret = fsnotify_add_mark_list(mark, connp, obj_type, add_flags, fsid); if (ret) goto err; fsnotify_recalc_mask(mark->connector); return ret; err: spin_lock(&mark->lock); mark->flags &= ~(FSNOTIFY_MARK_FLAG_ALIVE | FSNOTIFY_MARK_FLAG_ATTACHED); list_del_init(&mark->g_list); spin_unlock(&mark->lock); fsnotify_put_mark(mark); return ret; } int fsnotify_add_mark(struct fsnotify_mark *mark, fsnotify_connp_t *connp, unsigned int obj_type, int add_flags, __kernel_fsid_t *fsid) { int ret; struct fsnotify_group *group = mark->group; fsnotify_group_lock(group); ret = fsnotify_add_mark_locked(mark, connp, obj_type, add_flags, fsid); fsnotify_group_unlock(group); return ret; } EXPORT_SYMBOL_GPL(fsnotify_add_mark); /* * Given a list of marks, find the mark associated with given group. If found * take a reference to that mark and return it, else return NULL. */ struct fsnotify_mark *fsnotify_find_mark(fsnotify_connp_t *connp, struct fsnotify_group *group) { struct fsnotify_mark_connector *conn; struct fsnotify_mark *mark; conn = fsnotify_grab_connector(connp); if (!conn) return NULL; hlist_for_each_entry(mark, &conn->list, obj_list) { if (mark->group == group && (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) { fsnotify_get_mark(mark); spin_unlock(&conn->lock); return mark; } } spin_unlock(&conn->lock); return NULL; } EXPORT_SYMBOL_GPL(fsnotify_find_mark); /* Clear any marks in a group with given type mask */ void fsnotify_clear_marks_by_group(struct fsnotify_group *group, unsigned int obj_type) { struct fsnotify_mark *lmark, *mark; LIST_HEAD(to_free); struct list_head *head = &to_free; /* Skip selection step if we want to clear all marks. */ if (obj_type == FSNOTIFY_OBJ_TYPE_ANY) { head = &group->marks_list; goto clear; } /* * We have to be really careful here. Anytime we drop mark_mutex, e.g. * fsnotify_clear_marks_by_inode() can come and free marks. Even in our * to_free list so we have to use mark_mutex even when accessing that * list. And freeing mark requires us to drop mark_mutex. So we can * reliably free only the first mark in the list. That's why we first * move marks to free to to_free list in one go and then free marks in * to_free list one by one. */ fsnotify_group_lock(group); list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) { if (mark->connector->type == obj_type) list_move(&mark->g_list, &to_free); } fsnotify_group_unlock(group); clear: while (1) { fsnotify_group_lock(group); if (list_empty(head)) { fsnotify_group_unlock(group); break; } mark = list_first_entry(head, struct fsnotify_mark, g_list); fsnotify_get_mark(mark); fsnotify_detach_mark(mark); fsnotify_group_unlock(group); fsnotify_free_mark(mark); fsnotify_put_mark(mark); } } /* Destroy all marks attached to an object via connector */ void fsnotify_destroy_marks(fsnotify_connp_t *connp) { struct fsnotify_mark_connector *conn; struct fsnotify_mark *mark, *old_mark = NULL; void *objp; unsigned int type; conn = fsnotify_grab_connector(connp); if (!conn) return; /* * We have to be careful since we can race with e.g. * fsnotify_clear_marks_by_group() and once we drop the conn->lock, the * list can get modified. However we are holding mark reference and * thus our mark cannot be removed from obj_list so we can continue * iteration after regaining conn->lock. */ hlist_for_each_entry(mark, &conn->list, obj_list) { fsnotify_get_mark(mark); spin_unlock(&conn->lock); if (old_mark) fsnotify_put_mark(old_mark); old_mark = mark; fsnotify_destroy_mark(mark, mark->group); spin_lock(&conn->lock); } /* * Detach list from object now so that we don't pin inode until all * mark references get dropped. It would lead to strange results such * as delaying inode deletion or blocking unmount. */ objp = fsnotify_detach_connector_from_object(conn, &type); spin_unlock(&conn->lock); if (old_mark) fsnotify_put_mark(old_mark); fsnotify_drop_object(type, objp); } /* * Nothing fancy, just initialize lists and locks and counters. */ void fsnotify_init_mark(struct fsnotify_mark *mark, struct fsnotify_group *group) { memset(mark, 0, sizeof(*mark)); spin_lock_init(&mark->lock); refcount_set(&mark->refcnt, 1); fsnotify_get_group(group); mark->group = group; WRITE_ONCE(mark->connector, NULL); } EXPORT_SYMBOL_GPL(fsnotify_init_mark); /* * Destroy all marks in destroy_list, waits for SRCU period to finish before * actually freeing marks. */ static void fsnotify_mark_destroy_workfn(struct work_struct *work) { struct fsnotify_mark *mark, *next; struct list_head private_destroy_list; spin_lock(&destroy_lock); /* exchange the list head */ list_replace_init(&destroy_list, &private_destroy_list); spin_unlock(&destroy_lock); synchronize_srcu(&fsnotify_mark_srcu); list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) { list_del_init(&mark->g_list); fsnotify_final_mark_destroy(mark); } } /* Wait for all marks queued for destruction to be actually destroyed */ void fsnotify_wait_marks_destroyed(void) { flush_delayed_work(&reaper_work); } EXPORT_SYMBOL_GPL(fsnotify_wait_marks_destroyed);
linux-master
fs/notify/mark.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * fs/inotify_user.c - inotify support for userspace * * Authors: * John McCutchan <[email protected]> * Robert Love <[email protected]> * * Copyright (C) 2005 John McCutchan * Copyright 2006 Hewlett-Packard Development Company, L.P. * * Copyright (C) 2009 Eric Paris <Red Hat Inc> * inotify was largely rewriten to make use of the fsnotify infrastructure */ #include <linux/dcache.h> /* d_unlinked */ #include <linux/fs.h> /* struct inode */ #include <linux/fsnotify_backend.h> #include <linux/inotify.h> #include <linux/path.h> /* struct path */ #include <linux/slab.h> /* kmem_* */ #include <linux/types.h> #include <linux/sched.h> #include <linux/sched/user.h> #include <linux/sched/mm.h> #include "inotify.h" /* * Check if 2 events contain the same information. */ static bool event_compare(struct fsnotify_event *old_fsn, struct fsnotify_event *new_fsn) { struct inotify_event_info *old, *new; old = INOTIFY_E(old_fsn); new = INOTIFY_E(new_fsn); if (old->mask & FS_IN_IGNORED) return false; if ((old->mask == new->mask) && (old->wd == new->wd) && (old->name_len == new->name_len) && (!old->name_len || !strcmp(old->name, new->name))) return true; return false; } static int inotify_merge(struct fsnotify_group *group, struct fsnotify_event *event) { struct list_head *list = &group->notification_list; struct fsnotify_event *last_event; last_event = list_entry(list->prev, struct fsnotify_event, list); return event_compare(last_event, event); } int inotify_handle_inode_event(struct fsnotify_mark *inode_mark, u32 mask, struct inode *inode, struct inode *dir, const struct qstr *name, u32 cookie) { struct inotify_inode_mark *i_mark; struct inotify_event_info *event; struct fsnotify_event *fsn_event; struct fsnotify_group *group = inode_mark->group; int ret; int len = 0, wd; int alloc_len = sizeof(struct inotify_event_info); struct mem_cgroup *old_memcg; if (name) { len = name->len; alloc_len += len + 1; } pr_debug("%s: group=%p mark=%p mask=%x\n", __func__, group, inode_mark, mask); i_mark = container_of(inode_mark, struct inotify_inode_mark, fsn_mark); /* * We can be racing with mark being detached. Don't report event with * invalid wd. */ wd = READ_ONCE(i_mark->wd); if (wd == -1) return 0; /* * Whoever is interested in the event, pays for the allocation. Do not * trigger OOM killer in the target monitoring memcg as it may have * security repercussion. */ old_memcg = set_active_memcg(group->memcg); event = kmalloc(alloc_len, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); set_active_memcg(old_memcg); if (unlikely(!event)) { /* * Treat lost event due to ENOMEM the same way as queue * overflow to let userspace know event was lost. */ fsnotify_queue_overflow(group); return -ENOMEM; } /* * We now report FS_ISDIR flag with MOVE_SELF and DELETE_SELF events * for fanotify. inotify never reported IN_ISDIR with those events. * It looks like an oversight, but to avoid the risk of breaking * existing inotify programs, mask the flag out from those events. */ if (mask & (IN_MOVE_SELF | IN_DELETE_SELF)) mask &= ~IN_ISDIR; fsn_event = &event->fse; fsnotify_init_event(fsn_event); event->mask = mask; event->wd = wd; event->sync_cookie = cookie; event->name_len = len; if (len) strcpy(event->name, name->name); ret = fsnotify_add_event(group, fsn_event, inotify_merge); if (ret) { /* Our event wasn't used in the end. Free it. */ fsnotify_destroy_event(group, fsn_event); } if (inode_mark->flags & FSNOTIFY_MARK_FLAG_IN_ONESHOT) fsnotify_destroy_mark(inode_mark, group); return 0; } static void inotify_freeing_mark(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group) { inotify_ignored_and_remove_idr(fsn_mark, group); } /* * This is NEVER supposed to be called. Inotify marks should either have been * removed from the idr when the watch was removed or in the * fsnotify_destroy_mark_by_group() call when the inotify instance was being * torn down. This is only called if the idr is about to be freed but there * are still marks in it. */ static int idr_callback(int id, void *p, void *data) { struct fsnotify_mark *fsn_mark; struct inotify_inode_mark *i_mark; static bool warned = false; if (warned) return 0; warned = true; fsn_mark = p; i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); WARN(1, "inotify closing but id=%d for fsn_mark=%p in group=%p still in " "idr. Probably leaking memory\n", id, p, data); /* * I'm taking the liberty of assuming that the mark in question is a * valid address and I'm dereferencing it. This might help to figure * out why we got here and the panic is no worse than the original * BUG() that was here. */ if (fsn_mark) printk(KERN_WARNING "fsn_mark->group=%p wd=%d\n", fsn_mark->group, i_mark->wd); return 0; } static void inotify_free_group_priv(struct fsnotify_group *group) { /* ideally the idr is empty and we won't hit the BUG in the callback */ idr_for_each(&group->inotify_data.idr, idr_callback, group); idr_destroy(&group->inotify_data.idr); if (group->inotify_data.ucounts) dec_inotify_instances(group->inotify_data.ucounts); } static void inotify_free_event(struct fsnotify_group *group, struct fsnotify_event *fsn_event) { kfree(INOTIFY_E(fsn_event)); } /* ding dong the mark is dead */ static void inotify_free_mark(struct fsnotify_mark *fsn_mark) { struct inotify_inode_mark *i_mark; i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); kmem_cache_free(inotify_inode_mark_cachep, i_mark); } const struct fsnotify_ops inotify_fsnotify_ops = { .handle_inode_event = inotify_handle_inode_event, .free_group_priv = inotify_free_group_priv, .free_event = inotify_free_event, .freeing_mark = inotify_freeing_mark, .free_mark = inotify_free_mark, };
linux-master
fs/notify/inotify/inotify_fsnotify.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * fs/inotify_user.c - inotify support for userspace * * Authors: * John McCutchan <[email protected]> * Robert Love <[email protected]> * * Copyright (C) 2005 John McCutchan * Copyright 2006 Hewlett-Packard Development Company, L.P. * * Copyright (C) 2009 Eric Paris <Red Hat Inc> * inotify was largely rewriten to make use of the fsnotify infrastructure */ #include <linux/file.h> #include <linux/fs.h> /* struct inode */ #include <linux/fsnotify_backend.h> #include <linux/idr.h> #include <linux/init.h> /* fs_initcall */ #include <linux/inotify.h> #include <linux/kernel.h> /* roundup() */ #include <linux/namei.h> /* LOOKUP_FOLLOW */ #include <linux/sched/signal.h> #include <linux/slab.h> /* struct kmem_cache */ #include <linux/syscalls.h> #include <linux/types.h> #include <linux/anon_inodes.h> #include <linux/uaccess.h> #include <linux/poll.h> #include <linux/wait.h> #include <linux/memcontrol.h> #include <linux/security.h> #include "inotify.h" #include "../fdinfo.h" #include <asm/ioctls.h> /* * An inotify watch requires allocating an inotify_inode_mark structure as * well as pinning the watched inode. Doubling the size of a VFS inode * should be more than enough to cover the additional filesystem inode * size increase. */ #define INOTIFY_WATCH_COST (sizeof(struct inotify_inode_mark) + \ 2 * sizeof(struct inode)) /* configurable via /proc/sys/fs/inotify/ */ static int inotify_max_queued_events __read_mostly; struct kmem_cache *inotify_inode_mark_cachep __read_mostly; #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> static long it_zero = 0; static long it_int_max = INT_MAX; static struct ctl_table inotify_table[] = { { .procname = "max_user_instances", .data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES], .maxlen = sizeof(long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, .extra1 = &it_zero, .extra2 = &it_int_max, }, { .procname = "max_user_watches", .data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES], .maxlen = sizeof(long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, .extra1 = &it_zero, .extra2 = &it_int_max, }, { .procname = "max_queued_events", .data = &inotify_max_queued_events, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO }, { } }; static void __init inotify_sysctls_init(void) { register_sysctl("fs/inotify", inotify_table); } #else #define inotify_sysctls_init() do { } while (0) #endif /* CONFIG_SYSCTL */ static inline __u32 inotify_arg_to_mask(struct inode *inode, u32 arg) { __u32 mask; /* * Everything should receive events when the inode is unmounted. * All directories care about children. */ mask = (FS_UNMOUNT); if (S_ISDIR(inode->i_mode)) mask |= FS_EVENT_ON_CHILD; /* mask off the flags used to open the fd */ mask |= (arg & INOTIFY_USER_MASK); return mask; } #define INOTIFY_MARK_FLAGS \ (FSNOTIFY_MARK_FLAG_EXCL_UNLINK | FSNOTIFY_MARK_FLAG_IN_ONESHOT) static inline unsigned int inotify_arg_to_flags(u32 arg) { unsigned int flags = 0; if (arg & IN_EXCL_UNLINK) flags |= FSNOTIFY_MARK_FLAG_EXCL_UNLINK; if (arg & IN_ONESHOT) flags |= FSNOTIFY_MARK_FLAG_IN_ONESHOT; return flags; } static inline u32 inotify_mask_to_arg(__u32 mask) { return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED | IN_Q_OVERFLOW); } /* inotify userspace file descriptor functions */ static __poll_t inotify_poll(struct file *file, poll_table *wait) { struct fsnotify_group *group = file->private_data; __poll_t ret = 0; poll_wait(file, &group->notification_waitq, wait); spin_lock(&group->notification_lock); if (!fsnotify_notify_queue_is_empty(group)) ret = EPOLLIN | EPOLLRDNORM; spin_unlock(&group->notification_lock); return ret; } static int round_event_name_len(struct fsnotify_event *fsn_event) { struct inotify_event_info *event; event = INOTIFY_E(fsn_event); if (!event->name_len) return 0; return roundup(event->name_len + 1, sizeof(struct inotify_event)); } /* * Get an inotify_kernel_event if one exists and is small * enough to fit in "count". Return an error pointer if * not large enough. * * Called with the group->notification_lock held. */ static struct fsnotify_event *get_one_event(struct fsnotify_group *group, size_t count) { size_t event_size = sizeof(struct inotify_event); struct fsnotify_event *event; event = fsnotify_peek_first_event(group); if (!event) return NULL; pr_debug("%s: group=%p event=%p\n", __func__, group, event); event_size += round_event_name_len(event); if (event_size > count) return ERR_PTR(-EINVAL); /* held the notification_lock the whole time, so this is the * same event we peeked above */ fsnotify_remove_first_event(group); return event; } /* * Copy an event to user space, returning how much we copied. * * We already checked that the event size is smaller than the * buffer we had in "get_one_event()" above. */ static ssize_t copy_event_to_user(struct fsnotify_group *group, struct fsnotify_event *fsn_event, char __user *buf) { struct inotify_event inotify_event; struct inotify_event_info *event; size_t event_size = sizeof(struct inotify_event); size_t name_len; size_t pad_name_len; pr_debug("%s: group=%p event=%p\n", __func__, group, fsn_event); event = INOTIFY_E(fsn_event); name_len = event->name_len; /* * round up name length so it is a multiple of event_size * plus an extra byte for the terminating '\0'. */ pad_name_len = round_event_name_len(fsn_event); inotify_event.len = pad_name_len; inotify_event.mask = inotify_mask_to_arg(event->mask); inotify_event.wd = event->wd; inotify_event.cookie = event->sync_cookie; /* send the main event */ if (copy_to_user(buf, &inotify_event, event_size)) return -EFAULT; buf += event_size; /* * fsnotify only stores the pathname, so here we have to send the pathname * and then pad that pathname out to a multiple of sizeof(inotify_event) * with zeros. */ if (pad_name_len) { /* copy the path name */ if (copy_to_user(buf, event->name, name_len)) return -EFAULT; buf += name_len; /* fill userspace with 0's */ if (clear_user(buf, pad_name_len - name_len)) return -EFAULT; event_size += pad_name_len; } return event_size; } static ssize_t inotify_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct fsnotify_group *group; struct fsnotify_event *kevent; char __user *start; int ret; DEFINE_WAIT_FUNC(wait, woken_wake_function); start = buf; group = file->private_data; add_wait_queue(&group->notification_waitq, &wait); while (1) { spin_lock(&group->notification_lock); kevent = get_one_event(group, count); spin_unlock(&group->notification_lock); pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent); if (kevent) { ret = PTR_ERR(kevent); if (IS_ERR(kevent)) break; ret = copy_event_to_user(group, kevent, buf); fsnotify_destroy_event(group, kevent); if (ret < 0) break; buf += ret; count -= ret; continue; } ret = -EAGAIN; if (file->f_flags & O_NONBLOCK) break; ret = -ERESTARTSYS; if (signal_pending(current)) break; if (start != buf) break; wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } remove_wait_queue(&group->notification_waitq, &wait); if (start != buf && ret != -EFAULT) ret = buf - start; return ret; } static int inotify_release(struct inode *ignored, struct file *file) { struct fsnotify_group *group = file->private_data; pr_debug("%s: group=%p\n", __func__, group); /* free this group, matching get was inotify_init->fsnotify_obtain_group */ fsnotify_destroy_group(group); return 0; } static long inotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct fsnotify_group *group; struct fsnotify_event *fsn_event; void __user *p; int ret = -ENOTTY; size_t send_len = 0; group = file->private_data; p = (void __user *) arg; pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd); switch (cmd) { case FIONREAD: spin_lock(&group->notification_lock); list_for_each_entry(fsn_event, &group->notification_list, list) { send_len += sizeof(struct inotify_event); send_len += round_event_name_len(fsn_event); } spin_unlock(&group->notification_lock); ret = put_user(send_len, (int __user *) p); break; #ifdef CONFIG_CHECKPOINT_RESTORE case INOTIFY_IOC_SETNEXTWD: ret = -EINVAL; if (arg >= 1 && arg <= INT_MAX) { struct inotify_group_private_data *data; data = &group->inotify_data; spin_lock(&data->idr_lock); idr_set_cursor(&data->idr, (unsigned int)arg); spin_unlock(&data->idr_lock); ret = 0; } break; #endif /* CONFIG_CHECKPOINT_RESTORE */ } return ret; } static const struct file_operations inotify_fops = { .show_fdinfo = inotify_show_fdinfo, .poll = inotify_poll, .read = inotify_read, .fasync = fsnotify_fasync, .release = inotify_release, .unlocked_ioctl = inotify_ioctl, .compat_ioctl = inotify_ioctl, .llseek = noop_llseek, }; /* * find_inode - resolve a user-given path to a specific inode */ static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned int flags, __u64 mask) { int error; error = user_path_at(AT_FDCWD, dirname, flags, path); if (error) return error; /* you can only watch an inode if you have read permissions on it */ error = path_permission(path, MAY_READ); if (error) { path_put(path); return error; } error = security_path_notify(path, mask, FSNOTIFY_OBJ_TYPE_INODE); if (error) path_put(path); return error; } static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock, struct inotify_inode_mark *i_mark) { int ret; idr_preload(GFP_KERNEL); spin_lock(idr_lock); ret = idr_alloc_cyclic(idr, i_mark, 1, 0, GFP_NOWAIT); if (ret >= 0) { /* we added the mark to the idr, take a reference */ i_mark->wd = ret; fsnotify_get_mark(&i_mark->fsn_mark); } spin_unlock(idr_lock); idr_preload_end(); return ret < 0 ? ret : 0; } static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group, int wd) { struct idr *idr = &group->inotify_data.idr; spinlock_t *idr_lock = &group->inotify_data.idr_lock; struct inotify_inode_mark *i_mark; assert_spin_locked(idr_lock); i_mark = idr_find(idr, wd); if (i_mark) { struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark; fsnotify_get_mark(fsn_mark); /* One ref for being in the idr, one ref we just took */ BUG_ON(refcount_read(&fsn_mark->refcnt) < 2); } return i_mark; } static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group, int wd) { struct inotify_inode_mark *i_mark; spinlock_t *idr_lock = &group->inotify_data.idr_lock; spin_lock(idr_lock); i_mark = inotify_idr_find_locked(group, wd); spin_unlock(idr_lock); return i_mark; } /* * Remove the mark from the idr (if present) and drop the reference * on the mark because it was in the idr. */ static void inotify_remove_from_idr(struct fsnotify_group *group, struct inotify_inode_mark *i_mark) { struct idr *idr = &group->inotify_data.idr; spinlock_t *idr_lock = &group->inotify_data.idr_lock; struct inotify_inode_mark *found_i_mark = NULL; int wd; spin_lock(idr_lock); wd = i_mark->wd; /* * does this i_mark think it is in the idr? we shouldn't get called * if it wasn't.... */ if (wd == -1) { WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n", __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group); goto out; } /* Lets look in the idr to see if we find it */ found_i_mark = inotify_idr_find_locked(group, wd); if (unlikely(!found_i_mark)) { WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n", __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group); goto out; } /* * We found an mark in the idr at the right wd, but it's * not the mark we were told to remove. eparis seriously * fucked up somewhere. */ if (unlikely(found_i_mark != i_mark)) { WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p " "found_i_mark=%p found_i_mark->wd=%d " "found_i_mark->group=%p\n", __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group, found_i_mark, found_i_mark->wd, found_i_mark->fsn_mark.group); goto out; } /* * One ref for being in the idr * one ref grabbed by inotify_idr_find */ if (unlikely(refcount_read(&i_mark->fsn_mark.refcnt) < 2)) { printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n", __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group); /* we can't really recover with bad ref cnting.. */ BUG(); } idr_remove(idr, wd); /* Removed from the idr, drop that ref. */ fsnotify_put_mark(&i_mark->fsn_mark); out: i_mark->wd = -1; spin_unlock(idr_lock); /* match the ref taken by inotify_idr_find_locked() */ if (found_i_mark) fsnotify_put_mark(&found_i_mark->fsn_mark); } /* * Send IN_IGNORED for this wd, remove this wd from the idr. */ void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group) { struct inotify_inode_mark *i_mark; /* Queue ignore event for the watch */ inotify_handle_inode_event(fsn_mark, FS_IN_IGNORED, NULL, NULL, NULL, 0); i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); /* remove this mark from the idr */ inotify_remove_from_idr(group, i_mark); dec_inotify_watches(group->inotify_data.ucounts); } static int inotify_update_existing_watch(struct fsnotify_group *group, struct inode *inode, u32 arg) { struct fsnotify_mark *fsn_mark; struct inotify_inode_mark *i_mark; __u32 old_mask, new_mask; int replace = !(arg & IN_MASK_ADD); int create = (arg & IN_MASK_CREATE); int ret; fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group); if (!fsn_mark) return -ENOENT; else if (create) { ret = -EEXIST; goto out; } i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); spin_lock(&fsn_mark->lock); old_mask = fsn_mark->mask; if (replace) { fsn_mark->mask = 0; fsn_mark->flags &= ~INOTIFY_MARK_FLAGS; } fsn_mark->mask |= inotify_arg_to_mask(inode, arg); fsn_mark->flags |= inotify_arg_to_flags(arg); new_mask = fsn_mark->mask; spin_unlock(&fsn_mark->lock); if (old_mask != new_mask) { /* more bits in old than in new? */ int dropped = (old_mask & ~new_mask); /* more bits in this fsn_mark than the inode's mask? */ int do_inode = (new_mask & ~inode->i_fsnotify_mask); /* update the inode with this new fsn_mark */ if (dropped || do_inode) fsnotify_recalc_mask(inode->i_fsnotify_marks); } /* return the wd */ ret = i_mark->wd; out: /* match the get from fsnotify_find_mark() */ fsnotify_put_mark(fsn_mark); return ret; } static int inotify_new_watch(struct fsnotify_group *group, struct inode *inode, u32 arg) { struct inotify_inode_mark *tmp_i_mark; int ret; struct idr *idr = &group->inotify_data.idr; spinlock_t *idr_lock = &group->inotify_data.idr_lock; tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); if (unlikely(!tmp_i_mark)) return -ENOMEM; fsnotify_init_mark(&tmp_i_mark->fsn_mark, group); tmp_i_mark->fsn_mark.mask = inotify_arg_to_mask(inode, arg); tmp_i_mark->fsn_mark.flags = inotify_arg_to_flags(arg); tmp_i_mark->wd = -1; ret = inotify_add_to_idr(idr, idr_lock, tmp_i_mark); if (ret) goto out_err; /* increment the number of watches the user has */ if (!inc_inotify_watches(group->inotify_data.ucounts)) { inotify_remove_from_idr(group, tmp_i_mark); ret = -ENOSPC; goto out_err; } /* we are on the idr, now get on the inode */ ret = fsnotify_add_inode_mark_locked(&tmp_i_mark->fsn_mark, inode, 0); if (ret) { /* we failed to get on the inode, get off the idr */ inotify_remove_from_idr(group, tmp_i_mark); goto out_err; } /* return the watch descriptor for this new mark */ ret = tmp_i_mark->wd; out_err: /* match the ref from fsnotify_init_mark() */ fsnotify_put_mark(&tmp_i_mark->fsn_mark); return ret; } static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg) { int ret = 0; fsnotify_group_lock(group); /* try to update and existing watch with the new arg */ ret = inotify_update_existing_watch(group, inode, arg); /* no mark present, try to add a new one */ if (ret == -ENOENT) ret = inotify_new_watch(group, inode, arg); fsnotify_group_unlock(group); return ret; } static struct fsnotify_group *inotify_new_group(unsigned int max_events) { struct fsnotify_group *group; struct inotify_event_info *oevent; group = fsnotify_alloc_group(&inotify_fsnotify_ops, FSNOTIFY_GROUP_USER); if (IS_ERR(group)) return group; oevent = kmalloc(sizeof(struct inotify_event_info), GFP_KERNEL_ACCOUNT); if (unlikely(!oevent)) { fsnotify_destroy_group(group); return ERR_PTR(-ENOMEM); } group->overflow_event = &oevent->fse; fsnotify_init_event(group->overflow_event); oevent->mask = FS_Q_OVERFLOW; oevent->wd = -1; oevent->sync_cookie = 0; oevent->name_len = 0; group->max_events = max_events; group->memcg = get_mem_cgroup_from_mm(current->mm); spin_lock_init(&group->inotify_data.idr_lock); idr_init(&group->inotify_data.idr); group->inotify_data.ucounts = inc_ucount(current_user_ns(), current_euid(), UCOUNT_INOTIFY_INSTANCES); if (!group->inotify_data.ucounts) { fsnotify_destroy_group(group); return ERR_PTR(-EMFILE); } return group; } /* inotify syscalls */ static int do_inotify_init(int flags) { struct fsnotify_group *group; int ret; /* Check the IN_* constants for consistency. */ BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC); BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK); if (flags & ~(IN_CLOEXEC | IN_NONBLOCK)) return -EINVAL; /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */ group = inotify_new_group(inotify_max_queued_events); if (IS_ERR(group)) return PTR_ERR(group); ret = anon_inode_getfd("inotify", &inotify_fops, group, O_RDONLY | flags); if (ret < 0) fsnotify_destroy_group(group); return ret; } SYSCALL_DEFINE1(inotify_init1, int, flags) { return do_inotify_init(flags); } SYSCALL_DEFINE0(inotify_init) { return do_inotify_init(0); } SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname, u32, mask) { struct fsnotify_group *group; struct inode *inode; struct path path; struct fd f; int ret; unsigned flags = 0; /* * We share a lot of code with fs/dnotify. We also share * the bit layout between inotify's IN_* and the fsnotify * FS_*. This check ensures that only the inotify IN_* * bits get passed in and set in watches/events. */ if (unlikely(mask & ~ALL_INOTIFY_BITS)) return -EINVAL; /* * Require at least one valid bit set in the mask. * Without _something_ set, we would have no events to * watch for. */ if (unlikely(!(mask & ALL_INOTIFY_BITS))) return -EINVAL; f = fdget(fd); if (unlikely(!f.file)) return -EBADF; /* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */ if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) { ret = -EINVAL; goto fput_and_out; } /* verify that this is indeed an inotify instance */ if (unlikely(f.file->f_op != &inotify_fops)) { ret = -EINVAL; goto fput_and_out; } if (!(mask & IN_DONT_FOLLOW)) flags |= LOOKUP_FOLLOW; if (mask & IN_ONLYDIR) flags |= LOOKUP_DIRECTORY; ret = inotify_find_inode(pathname, &path, flags, (mask & IN_ALL_EVENTS)); if (ret) goto fput_and_out; /* inode held in place by reference to path; group by fget on fd */ inode = path.dentry->d_inode; group = f.file->private_data; /* create/update an inode mark */ ret = inotify_update_watch(group, inode, mask); path_put(&path); fput_and_out: fdput(f); return ret; } SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd) { struct fsnotify_group *group; struct inotify_inode_mark *i_mark; struct fd f; int ret = -EINVAL; f = fdget(fd); if (unlikely(!f.file)) return -EBADF; /* verify that this is indeed an inotify instance */ if (unlikely(f.file->f_op != &inotify_fops)) goto out; group = f.file->private_data; i_mark = inotify_idr_find(group, wd); if (unlikely(!i_mark)) goto out; ret = 0; fsnotify_destroy_mark(&i_mark->fsn_mark, group); /* match ref taken by inotify_idr_find */ fsnotify_put_mark(&i_mark->fsn_mark); out: fdput(f); return ret; } /* * inotify_user_setup - Our initialization function. Note that we cannot return * error because we have compiled-in VFS hooks. So an (unlikely) failure here * must result in panic(). */ static int __init inotify_user_setup(void) { unsigned long watches_max; struct sysinfo si; si_meminfo(&si); /* * Allow up to 1% of addressable memory to be allocated for inotify * watches (per user) limited to the range [8192, 1048576]. */ watches_max = (((si.totalram - si.totalhigh) / 100) << PAGE_SHIFT) / INOTIFY_WATCH_COST; watches_max = clamp(watches_max, 8192UL, 1048576UL); BUILD_BUG_ON(IN_ACCESS != FS_ACCESS); BUILD_BUG_ON(IN_MODIFY != FS_MODIFY); BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB); BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE); BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE); BUILD_BUG_ON(IN_OPEN != FS_OPEN); BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM); BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO); BUILD_BUG_ON(IN_CREATE != FS_CREATE); BUILD_BUG_ON(IN_DELETE != FS_DELETE); BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF); BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF); BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT); BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW); BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED); BUILD_BUG_ON(IN_ISDIR != FS_ISDIR); BUILD_BUG_ON(HWEIGHT32(ALL_INOTIFY_BITS) != 22); inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, SLAB_PANIC|SLAB_ACCOUNT); inotify_max_queued_events = 16384; init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES] = 128; init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES] = watches_max; inotify_sysctls_init(); return 0; } fs_initcall(inotify_user_setup);
linux-master
fs/notify/inotify/inotify_user.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/fanotify.h> #include <linux/fcntl.h> #include <linux/fdtable.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/anon_inodes.h> #include <linux/fsnotify_backend.h> #include <linux/init.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/poll.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/uaccess.h> #include <linux/compat.h> #include <linux/sched/signal.h> #include <linux/memcontrol.h> #include <linux/statfs.h> #include <linux/exportfs.h> #include <asm/ioctls.h> #include "../../mount.h" #include "../fdinfo.h" #include "fanotify.h" #define FANOTIFY_DEFAULT_MAX_EVENTS 16384 #define FANOTIFY_OLD_DEFAULT_MAX_MARKS 8192 #define FANOTIFY_DEFAULT_MAX_GROUPS 128 #define FANOTIFY_DEFAULT_FEE_POOL_SIZE 32 /* * Legacy fanotify marks limits (8192) is per group and we introduced a tunable * limit of marks per user, similar to inotify. Effectively, the legacy limit * of fanotify marks per user is <max marks per group> * <max groups per user>. * This default limit (1M) also happens to match the increased limit of inotify * max_user_watches since v5.10. */ #define FANOTIFY_DEFAULT_MAX_USER_MARKS \ (FANOTIFY_OLD_DEFAULT_MAX_MARKS * FANOTIFY_DEFAULT_MAX_GROUPS) /* * Most of the memory cost of adding an inode mark is pinning the marked inode. * The size of the filesystem inode struct is not uniform across filesystems, * so double the size of a VFS inode is used as a conservative approximation. */ #define INODE_MARK_COST (2 * sizeof(struct inode)) /* configurable via /proc/sys/fs/fanotify/ */ static int fanotify_max_queued_events __read_mostly; #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> static long ft_zero = 0; static long ft_int_max = INT_MAX; static struct ctl_table fanotify_table[] = { { .procname = "max_user_groups", .data = &init_user_ns.ucount_max[UCOUNT_FANOTIFY_GROUPS], .maxlen = sizeof(long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, .extra1 = &ft_zero, .extra2 = &ft_int_max, }, { .procname = "max_user_marks", .data = &init_user_ns.ucount_max[UCOUNT_FANOTIFY_MARKS], .maxlen = sizeof(long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, .extra1 = &ft_zero, .extra2 = &ft_int_max, }, { .procname = "max_queued_events", .data = &fanotify_max_queued_events, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO }, { } }; static void __init fanotify_sysctls_init(void) { register_sysctl("fs/fanotify", fanotify_table); } #else #define fanotify_sysctls_init() do { } while (0) #endif /* CONFIG_SYSCTL */ /* * All flags that may be specified in parameter event_f_flags of fanotify_init. * * Internal and external open flags are stored together in field f_flags of * struct file. Only external open flags shall be allowed in event_f_flags. * Internal flags like FMODE_NONOTIFY, FMODE_EXEC, FMODE_NOCMTIME shall be * excluded. */ #define FANOTIFY_INIT_ALL_EVENT_F_BITS ( \ O_ACCMODE | O_APPEND | O_NONBLOCK | \ __O_SYNC | O_DSYNC | O_CLOEXEC | \ O_LARGEFILE | O_NOATIME ) extern const struct fsnotify_ops fanotify_fsnotify_ops; struct kmem_cache *fanotify_mark_cache __read_mostly; struct kmem_cache *fanotify_fid_event_cachep __read_mostly; struct kmem_cache *fanotify_path_event_cachep __read_mostly; struct kmem_cache *fanotify_perm_event_cachep __read_mostly; #define FANOTIFY_EVENT_ALIGN 4 #define FANOTIFY_FID_INFO_HDR_LEN \ (sizeof(struct fanotify_event_info_fid) + sizeof(struct file_handle)) #define FANOTIFY_PIDFD_INFO_HDR_LEN \ sizeof(struct fanotify_event_info_pidfd) #define FANOTIFY_ERROR_INFO_LEN \ (sizeof(struct fanotify_event_info_error)) static int fanotify_fid_info_len(int fh_len, int name_len) { int info_len = fh_len; if (name_len) info_len += name_len + 1; return roundup(FANOTIFY_FID_INFO_HDR_LEN + info_len, FANOTIFY_EVENT_ALIGN); } /* FAN_RENAME may have one or two dir+name info records */ static int fanotify_dir_name_info_len(struct fanotify_event *event) { struct fanotify_info *info = fanotify_event_info(event); int dir_fh_len = fanotify_event_dir_fh_len(event); int dir2_fh_len = fanotify_event_dir2_fh_len(event); int info_len = 0; if (dir_fh_len) info_len += fanotify_fid_info_len(dir_fh_len, info->name_len); if (dir2_fh_len) info_len += fanotify_fid_info_len(dir2_fh_len, info->name2_len); return info_len; } static size_t fanotify_event_len(unsigned int info_mode, struct fanotify_event *event) { size_t event_len = FAN_EVENT_METADATA_LEN; int fh_len; int dot_len = 0; if (!info_mode) return event_len; if (fanotify_is_error_event(event->mask)) event_len += FANOTIFY_ERROR_INFO_LEN; if (fanotify_event_has_any_dir_fh(event)) { event_len += fanotify_dir_name_info_len(event); } else if ((info_mode & FAN_REPORT_NAME) && (event->mask & FAN_ONDIR)) { /* * With group flag FAN_REPORT_NAME, if name was not recorded in * event on a directory, we will report the name ".". */ dot_len = 1; } if (info_mode & FAN_REPORT_PIDFD) event_len += FANOTIFY_PIDFD_INFO_HDR_LEN; if (fanotify_event_has_object_fh(event)) { fh_len = fanotify_event_object_fh_len(event); event_len += fanotify_fid_info_len(fh_len, dot_len); } return event_len; } /* * Remove an hashed event from merge hash table. */ static void fanotify_unhash_event(struct fsnotify_group *group, struct fanotify_event *event) { assert_spin_locked(&group->notification_lock); pr_debug("%s: group=%p event=%p bucket=%u\n", __func__, group, event, fanotify_event_hash_bucket(group, event)); if (WARN_ON_ONCE(hlist_unhashed(&event->merge_list))) return; hlist_del_init(&event->merge_list); } /* * Get an fanotify notification event if one exists and is small * enough to fit in "count". Return an error pointer if the count * is not large enough. When permission event is dequeued, its state is * updated accordingly. */ static struct fanotify_event *get_one_event(struct fsnotify_group *group, size_t count) { size_t event_size; struct fanotify_event *event = NULL; struct fsnotify_event *fsn_event; unsigned int info_mode = FAN_GROUP_FLAG(group, FANOTIFY_INFO_MODES); pr_debug("%s: group=%p count=%zd\n", __func__, group, count); spin_lock(&group->notification_lock); fsn_event = fsnotify_peek_first_event(group); if (!fsn_event) goto out; event = FANOTIFY_E(fsn_event); event_size = fanotify_event_len(info_mode, event); if (event_size > count) { event = ERR_PTR(-EINVAL); goto out; } /* * Held the notification_lock the whole time, so this is the * same event we peeked above. */ fsnotify_remove_first_event(group); if (fanotify_is_perm_event(event->mask)) FANOTIFY_PERM(event)->state = FAN_EVENT_REPORTED; if (fanotify_is_hashed_event(event->mask)) fanotify_unhash_event(group, event); out: spin_unlock(&group->notification_lock); return event; } static int create_fd(struct fsnotify_group *group, const struct path *path, struct file **file) { int client_fd; struct file *new_file; client_fd = get_unused_fd_flags(group->fanotify_data.f_flags); if (client_fd < 0) return client_fd; /* * we need a new file handle for the userspace program so it can read even if it was * originally opened O_WRONLY. */ new_file = dentry_open(path, group->fanotify_data.f_flags | __FMODE_NONOTIFY, current_cred()); if (IS_ERR(new_file)) { /* * we still send an event even if we can't open the file. this * can happen when say tasks are gone and we try to open their * /proc files or we try to open a WRONLY file like in sysfs * we just send the errno to userspace since there isn't much * else we can do. */ put_unused_fd(client_fd); client_fd = PTR_ERR(new_file); } else { *file = new_file; } return client_fd; } static int process_access_response_info(const char __user *info, size_t info_len, struct fanotify_response_info_audit_rule *friar) { if (info_len != sizeof(*friar)) return -EINVAL; if (copy_from_user(friar, info, sizeof(*friar))) return -EFAULT; if (friar->hdr.type != FAN_RESPONSE_INFO_AUDIT_RULE) return -EINVAL; if (friar->hdr.pad != 0) return -EINVAL; if (friar->hdr.len != sizeof(*friar)) return -EINVAL; return info_len; } /* * Finish processing of permission event by setting it to ANSWERED state and * drop group->notification_lock. */ static void finish_permission_event(struct fsnotify_group *group, struct fanotify_perm_event *event, u32 response, struct fanotify_response_info_audit_rule *friar) __releases(&group->notification_lock) { bool destroy = false; assert_spin_locked(&group->notification_lock); event->response = response & ~FAN_INFO; if (response & FAN_INFO) memcpy(&event->audit_rule, friar, sizeof(*friar)); if (event->state == FAN_EVENT_CANCELED) destroy = true; else event->state = FAN_EVENT_ANSWERED; spin_unlock(&group->notification_lock); if (destroy) fsnotify_destroy_event(group, &event->fae.fse); } static int process_access_response(struct fsnotify_group *group, struct fanotify_response *response_struct, const char __user *info, size_t info_len) { struct fanotify_perm_event *event; int fd = response_struct->fd; u32 response = response_struct->response; int ret = info_len; struct fanotify_response_info_audit_rule friar; pr_debug("%s: group=%p fd=%d response=%u buf=%p size=%zu\n", __func__, group, fd, response, info, info_len); /* * make sure the response is valid, if invalid we do nothing and either * userspace can send a valid response or we will clean it up after the * timeout */ if (response & ~FANOTIFY_RESPONSE_VALID_MASK) return -EINVAL; switch (response & FANOTIFY_RESPONSE_ACCESS) { case FAN_ALLOW: case FAN_DENY: break; default: return -EINVAL; } if ((response & FAN_AUDIT) && !FAN_GROUP_FLAG(group, FAN_ENABLE_AUDIT)) return -EINVAL; if (response & FAN_INFO) { ret = process_access_response_info(info, info_len, &friar); if (ret < 0) return ret; if (fd == FAN_NOFD) return ret; } else { ret = 0; } if (fd < 0) return -EINVAL; spin_lock(&group->notification_lock); list_for_each_entry(event, &group->fanotify_data.access_list, fae.fse.list) { if (event->fd != fd) continue; list_del_init(&event->fae.fse.list); finish_permission_event(group, event, response, &friar); wake_up(&group->fanotify_data.access_waitq); return ret; } spin_unlock(&group->notification_lock); return -ENOENT; } static size_t copy_error_info_to_user(struct fanotify_event *event, char __user *buf, int count) { struct fanotify_event_info_error info = { }; struct fanotify_error_event *fee = FANOTIFY_EE(event); info.hdr.info_type = FAN_EVENT_INFO_TYPE_ERROR; info.hdr.len = FANOTIFY_ERROR_INFO_LEN; if (WARN_ON(count < info.hdr.len)) return -EFAULT; info.error = fee->error; info.error_count = fee->err_count; if (copy_to_user(buf, &info, sizeof(info))) return -EFAULT; return info.hdr.len; } static int copy_fid_info_to_user(__kernel_fsid_t *fsid, struct fanotify_fh *fh, int info_type, const char *name, size_t name_len, char __user *buf, size_t count) { struct fanotify_event_info_fid info = { }; struct file_handle handle = { }; unsigned char bounce[FANOTIFY_INLINE_FH_LEN], *fh_buf; size_t fh_len = fh ? fh->len : 0; size_t info_len = fanotify_fid_info_len(fh_len, name_len); size_t len = info_len; pr_debug("%s: fh_len=%zu name_len=%zu, info_len=%zu, count=%zu\n", __func__, fh_len, name_len, info_len, count); if (WARN_ON_ONCE(len < sizeof(info) || len > count)) return -EFAULT; /* * Copy event info fid header followed by variable sized file handle * and optionally followed by variable sized filename. */ switch (info_type) { case FAN_EVENT_INFO_TYPE_FID: case FAN_EVENT_INFO_TYPE_DFID: if (WARN_ON_ONCE(name_len)) return -EFAULT; break; case FAN_EVENT_INFO_TYPE_DFID_NAME: case FAN_EVENT_INFO_TYPE_OLD_DFID_NAME: case FAN_EVENT_INFO_TYPE_NEW_DFID_NAME: if (WARN_ON_ONCE(!name || !name_len)) return -EFAULT; break; default: return -EFAULT; } info.hdr.info_type = info_type; info.hdr.len = len; info.fsid = *fsid; if (copy_to_user(buf, &info, sizeof(info))) return -EFAULT; buf += sizeof(info); len -= sizeof(info); if (WARN_ON_ONCE(len < sizeof(handle))) return -EFAULT; handle.handle_type = fh->type; handle.handle_bytes = fh_len; /* Mangle handle_type for bad file_handle */ if (!fh_len) handle.handle_type = FILEID_INVALID; if (copy_to_user(buf, &handle, sizeof(handle))) return -EFAULT; buf += sizeof(handle); len -= sizeof(handle); if (WARN_ON_ONCE(len < fh_len)) return -EFAULT; /* * For an inline fh and inline file name, copy through stack to exclude * the copy from usercopy hardening protections. */ fh_buf = fanotify_fh_buf(fh); if (fh_len <= FANOTIFY_INLINE_FH_LEN) { memcpy(bounce, fh_buf, fh_len); fh_buf = bounce; } if (copy_to_user(buf, fh_buf, fh_len)) return -EFAULT; buf += fh_len; len -= fh_len; if (name_len) { /* Copy the filename with terminating null */ name_len++; if (WARN_ON_ONCE(len < name_len)) return -EFAULT; if (copy_to_user(buf, name, name_len)) return -EFAULT; buf += name_len; len -= name_len; } /* Pad with 0's */ WARN_ON_ONCE(len < 0 || len >= FANOTIFY_EVENT_ALIGN); if (len > 0 && clear_user(buf, len)) return -EFAULT; return info_len; } static int copy_pidfd_info_to_user(int pidfd, char __user *buf, size_t count) { struct fanotify_event_info_pidfd info = { }; size_t info_len = FANOTIFY_PIDFD_INFO_HDR_LEN; if (WARN_ON_ONCE(info_len > count)) return -EFAULT; info.hdr.info_type = FAN_EVENT_INFO_TYPE_PIDFD; info.hdr.len = info_len; info.pidfd = pidfd; if (copy_to_user(buf, &info, info_len)) return -EFAULT; return info_len; } static int copy_info_records_to_user(struct fanotify_event *event, struct fanotify_info *info, unsigned int info_mode, int pidfd, char __user *buf, size_t count) { int ret, total_bytes = 0, info_type = 0; unsigned int fid_mode = info_mode & FANOTIFY_FID_BITS; unsigned int pidfd_mode = info_mode & FAN_REPORT_PIDFD; /* * Event info records order is as follows: * 1. dir fid + name * 2. (optional) new dir fid + new name * 3. (optional) child fid */ if (fanotify_event_has_dir_fh(event)) { info_type = info->name_len ? FAN_EVENT_INFO_TYPE_DFID_NAME : FAN_EVENT_INFO_TYPE_DFID; /* FAN_RENAME uses special info types */ if (event->mask & FAN_RENAME) info_type = FAN_EVENT_INFO_TYPE_OLD_DFID_NAME; ret = copy_fid_info_to_user(fanotify_event_fsid(event), fanotify_info_dir_fh(info), info_type, fanotify_info_name(info), info->name_len, buf, count); if (ret < 0) return ret; buf += ret; count -= ret; total_bytes += ret; } /* New dir fid+name may be reported in addition to old dir fid+name */ if (fanotify_event_has_dir2_fh(event)) { info_type = FAN_EVENT_INFO_TYPE_NEW_DFID_NAME; ret = copy_fid_info_to_user(fanotify_event_fsid(event), fanotify_info_dir2_fh(info), info_type, fanotify_info_name2(info), info->name2_len, buf, count); if (ret < 0) return ret; buf += ret; count -= ret; total_bytes += ret; } if (fanotify_event_has_object_fh(event)) { const char *dot = NULL; int dot_len = 0; if (fid_mode == FAN_REPORT_FID || info_type) { /* * With only group flag FAN_REPORT_FID only type FID is * reported. Second info record type is always FID. */ info_type = FAN_EVENT_INFO_TYPE_FID; } else if ((fid_mode & FAN_REPORT_NAME) && (event->mask & FAN_ONDIR)) { /* * With group flag FAN_REPORT_NAME, if name was not * recorded in an event on a directory, report the name * "." with info type DFID_NAME. */ info_type = FAN_EVENT_INFO_TYPE_DFID_NAME; dot = "."; dot_len = 1; } else if ((event->mask & ALL_FSNOTIFY_DIRENT_EVENTS) || (event->mask & FAN_ONDIR)) { /* * With group flag FAN_REPORT_DIR_FID, a single info * record has type DFID for directory entry modification * event and for event on a directory. */ info_type = FAN_EVENT_INFO_TYPE_DFID; } else { /* * With group flags FAN_REPORT_DIR_FID|FAN_REPORT_FID, * a single info record has type FID for event on a * non-directory, when there is no directory to report. * For example, on FAN_DELETE_SELF event. */ info_type = FAN_EVENT_INFO_TYPE_FID; } ret = copy_fid_info_to_user(fanotify_event_fsid(event), fanotify_event_object_fh(event), info_type, dot, dot_len, buf, count); if (ret < 0) return ret; buf += ret; count -= ret; total_bytes += ret; } if (pidfd_mode) { ret = copy_pidfd_info_to_user(pidfd, buf, count); if (ret < 0) return ret; buf += ret; count -= ret; total_bytes += ret; } if (fanotify_is_error_event(event->mask)) { ret = copy_error_info_to_user(event, buf, count); if (ret < 0) return ret; buf += ret; count -= ret; total_bytes += ret; } return total_bytes; } static ssize_t copy_event_to_user(struct fsnotify_group *group, struct fanotify_event *event, char __user *buf, size_t count) { struct fanotify_event_metadata metadata; const struct path *path = fanotify_event_path(event); struct fanotify_info *info = fanotify_event_info(event); unsigned int info_mode = FAN_GROUP_FLAG(group, FANOTIFY_INFO_MODES); unsigned int pidfd_mode = info_mode & FAN_REPORT_PIDFD; struct file *f = NULL, *pidfd_file = NULL; int ret, pidfd = FAN_NOPIDFD, fd = FAN_NOFD; pr_debug("%s: group=%p event=%p\n", __func__, group, event); metadata.event_len = fanotify_event_len(info_mode, event); metadata.metadata_len = FAN_EVENT_METADATA_LEN; metadata.vers = FANOTIFY_METADATA_VERSION; metadata.reserved = 0; metadata.mask = event->mask & FANOTIFY_OUTGOING_EVENTS; metadata.pid = pid_vnr(event->pid); /* * For an unprivileged listener, event->pid can be used to identify the * events generated by the listener process itself, without disclosing * the pids of other processes. */ if (FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV) && task_tgid(current) != event->pid) metadata.pid = 0; /* * For now, fid mode is required for an unprivileged listener and * fid mode does not report fd in events. Keep this check anyway * for safety in case fid mode requirement is relaxed in the future * to allow unprivileged listener to get events with no fd and no fid. */ if (!FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV) && path && path->mnt && path->dentry) { fd = create_fd(group, path, &f); if (fd < 0) return fd; } metadata.fd = fd; if (pidfd_mode) { /* * Complain if the FAN_REPORT_PIDFD and FAN_REPORT_TID mutual * exclusion is ever lifted. At the time of incoporating pidfd * support within fanotify, the pidfd API only supported the * creation of pidfds for thread-group leaders. */ WARN_ON_ONCE(FAN_GROUP_FLAG(group, FAN_REPORT_TID)); /* * The PIDTYPE_TGID check for an event->pid is performed * preemptively in an attempt to catch out cases where the event * listener reads events after the event generating process has * already terminated. Report FAN_NOPIDFD to the event listener * in those cases, with all other pidfd creation errors being * reported as FAN_EPIDFD. */ if (metadata.pid == 0 || !pid_has_task(event->pid, PIDTYPE_TGID)) { pidfd = FAN_NOPIDFD; } else { pidfd = pidfd_prepare(event->pid, 0, &pidfd_file); if (pidfd < 0) pidfd = FAN_EPIDFD; } } ret = -EFAULT; /* * Sanity check copy size in case get_one_event() and * event_len sizes ever get out of sync. */ if (WARN_ON_ONCE(metadata.event_len > count)) goto out_close_fd; if (copy_to_user(buf, &metadata, FAN_EVENT_METADATA_LEN)) goto out_close_fd; buf += FAN_EVENT_METADATA_LEN; count -= FAN_EVENT_METADATA_LEN; if (fanotify_is_perm_event(event->mask)) FANOTIFY_PERM(event)->fd = fd; if (info_mode) { ret = copy_info_records_to_user(event, info, info_mode, pidfd, buf, count); if (ret < 0) goto out_close_fd; } if (f) fd_install(fd, f); if (pidfd_file) fd_install(pidfd, pidfd_file); return metadata.event_len; out_close_fd: if (fd != FAN_NOFD) { put_unused_fd(fd); fput(f); } if (pidfd >= 0) { put_unused_fd(pidfd); fput(pidfd_file); } return ret; } /* intofiy userspace file descriptor functions */ static __poll_t fanotify_poll(struct file *file, poll_table *wait) { struct fsnotify_group *group = file->private_data; __poll_t ret = 0; poll_wait(file, &group->notification_waitq, wait); spin_lock(&group->notification_lock); if (!fsnotify_notify_queue_is_empty(group)) ret = EPOLLIN | EPOLLRDNORM; spin_unlock(&group->notification_lock); return ret; } static ssize_t fanotify_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct fsnotify_group *group; struct fanotify_event *event; char __user *start; int ret; DEFINE_WAIT_FUNC(wait, woken_wake_function); start = buf; group = file->private_data; pr_debug("%s: group=%p\n", __func__, group); add_wait_queue(&group->notification_waitq, &wait); while (1) { /* * User can supply arbitrarily large buffer. Avoid softlockups * in case there are lots of available events. */ cond_resched(); event = get_one_event(group, count); if (IS_ERR(event)) { ret = PTR_ERR(event); break; } if (!event) { ret = -EAGAIN; if (file->f_flags & O_NONBLOCK) break; ret = -ERESTARTSYS; if (signal_pending(current)) break; if (start != buf) break; wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); continue; } ret = copy_event_to_user(group, event, buf, count); if (unlikely(ret == -EOPENSTALE)) { /* * We cannot report events with stale fd so drop it. * Setting ret to 0 will continue the event loop and * do the right thing if there are no more events to * read (i.e. return bytes read, -EAGAIN or wait). */ ret = 0; } /* * Permission events get queued to wait for response. Other * events can be destroyed now. */ if (!fanotify_is_perm_event(event->mask)) { fsnotify_destroy_event(group, &event->fse); } else { if (ret <= 0) { spin_lock(&group->notification_lock); finish_permission_event(group, FANOTIFY_PERM(event), FAN_DENY, NULL); wake_up(&group->fanotify_data.access_waitq); } else { spin_lock(&group->notification_lock); list_add_tail(&event->fse.list, &group->fanotify_data.access_list); spin_unlock(&group->notification_lock); } } if (ret < 0) break; buf += ret; count -= ret; } remove_wait_queue(&group->notification_waitq, &wait); if (start != buf && ret != -EFAULT) ret = buf - start; return ret; } static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { struct fanotify_response response; struct fsnotify_group *group; int ret; const char __user *info_buf = buf + sizeof(struct fanotify_response); size_t info_len; if (!IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS)) return -EINVAL; group = file->private_data; pr_debug("%s: group=%p count=%zu\n", __func__, group, count); if (count < sizeof(response)) return -EINVAL; if (copy_from_user(&response, buf, sizeof(response))) return -EFAULT; info_len = count - sizeof(response); ret = process_access_response(group, &response, info_buf, info_len); if (ret < 0) count = ret; else count = sizeof(response) + ret; return count; } static int fanotify_release(struct inode *ignored, struct file *file) { struct fsnotify_group *group = file->private_data; struct fsnotify_event *fsn_event; /* * Stop new events from arriving in the notification queue. since * userspace cannot use fanotify fd anymore, no event can enter or * leave access_list by now either. */ fsnotify_group_stop_queueing(group); /* * Process all permission events on access_list and notification queue * and simulate reply from userspace. */ spin_lock(&group->notification_lock); while (!list_empty(&group->fanotify_data.access_list)) { struct fanotify_perm_event *event; event = list_first_entry(&group->fanotify_data.access_list, struct fanotify_perm_event, fae.fse.list); list_del_init(&event->fae.fse.list); finish_permission_event(group, event, FAN_ALLOW, NULL); spin_lock(&group->notification_lock); } /* * Destroy all non-permission events. For permission events just * dequeue them and set the response. They will be freed once the * response is consumed and fanotify_get_response() returns. */ while ((fsn_event = fsnotify_remove_first_event(group))) { struct fanotify_event *event = FANOTIFY_E(fsn_event); if (!(event->mask & FANOTIFY_PERM_EVENTS)) { spin_unlock(&group->notification_lock); fsnotify_destroy_event(group, fsn_event); } else { finish_permission_event(group, FANOTIFY_PERM(event), FAN_ALLOW, NULL); } spin_lock(&group->notification_lock); } spin_unlock(&group->notification_lock); /* Response for all permission events it set, wakeup waiters */ wake_up(&group->fanotify_data.access_waitq); /* matches the fanotify_init->fsnotify_alloc_group */ fsnotify_destroy_group(group); return 0; } static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct fsnotify_group *group; struct fsnotify_event *fsn_event; void __user *p; int ret = -ENOTTY; size_t send_len = 0; group = file->private_data; p = (void __user *) arg; switch (cmd) { case FIONREAD: spin_lock(&group->notification_lock); list_for_each_entry(fsn_event, &group->notification_list, list) send_len += FAN_EVENT_METADATA_LEN; spin_unlock(&group->notification_lock); ret = put_user(send_len, (int __user *) p); break; } return ret; } static const struct file_operations fanotify_fops = { .show_fdinfo = fanotify_show_fdinfo, .poll = fanotify_poll, .read = fanotify_read, .write = fanotify_write, .fasync = NULL, .release = fanotify_release, .unlocked_ioctl = fanotify_ioctl, .compat_ioctl = compat_ptr_ioctl, .llseek = noop_llseek, }; static int fanotify_find_path(int dfd, const char __user *filename, struct path *path, unsigned int flags, __u64 mask, unsigned int obj_type) { int ret; pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__, dfd, filename, flags); if (filename == NULL) { struct fd f = fdget(dfd); ret = -EBADF; if (!f.file) goto out; ret = -ENOTDIR; if ((flags & FAN_MARK_ONLYDIR) && !(S_ISDIR(file_inode(f.file)->i_mode))) { fdput(f); goto out; } *path = f.file->f_path; path_get(path); fdput(f); } else { unsigned int lookup_flags = 0; if (!(flags & FAN_MARK_DONT_FOLLOW)) lookup_flags |= LOOKUP_FOLLOW; if (flags & FAN_MARK_ONLYDIR) lookup_flags |= LOOKUP_DIRECTORY; ret = user_path_at(dfd, filename, lookup_flags, path); if (ret) goto out; } /* you can only watch an inode if you have read permissions on it */ ret = path_permission(path, MAY_READ); if (ret) { path_put(path); goto out; } ret = security_path_notify(path, mask, obj_type); if (ret) path_put(path); out: return ret; } static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark, __u32 mask, unsigned int flags, __u32 umask, int *destroy) { __u32 oldmask, newmask; /* umask bits cannot be removed by user */ mask &= ~umask; spin_lock(&fsn_mark->lock); oldmask = fsnotify_calc_mask(fsn_mark); if (!(flags & FANOTIFY_MARK_IGNORE_BITS)) { fsn_mark->mask &= ~mask; } else { fsn_mark->ignore_mask &= ~mask; } newmask = fsnotify_calc_mask(fsn_mark); /* * We need to keep the mark around even if remaining mask cannot * result in any events (e.g. mask == FAN_ONDIR) to support incremenal * changes to the mask. * Destroy mark when only umask bits remain. */ *destroy = !((fsn_mark->mask | fsn_mark->ignore_mask) & ~umask); spin_unlock(&fsn_mark->lock); return oldmask & ~newmask; } static int fanotify_remove_mark(struct fsnotify_group *group, fsnotify_connp_t *connp, __u32 mask, unsigned int flags, __u32 umask) { struct fsnotify_mark *fsn_mark = NULL; __u32 removed; int destroy_mark; fsnotify_group_lock(group); fsn_mark = fsnotify_find_mark(connp, group); if (!fsn_mark) { fsnotify_group_unlock(group); return -ENOENT; } removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags, umask, &destroy_mark); if (removed & fsnotify_conn_mask(fsn_mark->connector)) fsnotify_recalc_mask(fsn_mark->connector); if (destroy_mark) fsnotify_detach_mark(fsn_mark); fsnotify_group_unlock(group); if (destroy_mark) fsnotify_free_mark(fsn_mark); /* matches the fsnotify_find_mark() */ fsnotify_put_mark(fsn_mark); return 0; } static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt, __u32 mask, unsigned int flags, __u32 umask) { return fanotify_remove_mark(group, &real_mount(mnt)->mnt_fsnotify_marks, mask, flags, umask); } static int fanotify_remove_sb_mark(struct fsnotify_group *group, struct super_block *sb, __u32 mask, unsigned int flags, __u32 umask) { return fanotify_remove_mark(group, &sb->s_fsnotify_marks, mask, flags, umask); } static int fanotify_remove_inode_mark(struct fsnotify_group *group, struct inode *inode, __u32 mask, unsigned int flags, __u32 umask) { return fanotify_remove_mark(group, &inode->i_fsnotify_marks, mask, flags, umask); } static bool fanotify_mark_update_flags(struct fsnotify_mark *fsn_mark, unsigned int fan_flags) { bool want_iref = !(fan_flags & FAN_MARK_EVICTABLE); unsigned int ignore = fan_flags & FANOTIFY_MARK_IGNORE_BITS; bool recalc = false; /* * When using FAN_MARK_IGNORE for the first time, mark starts using * independent event flags in ignore mask. After that, trying to * update the ignore mask with the old FAN_MARK_IGNORED_MASK API * will result in EEXIST error. */ if (ignore == FAN_MARK_IGNORE) fsn_mark->flags |= FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS; /* * Setting FAN_MARK_IGNORED_SURV_MODIFY for the first time may lead to * the removal of the FS_MODIFY bit in calculated mask if it was set * because of an ignore mask that is now going to survive FS_MODIFY. */ if (ignore && (fan_flags & FAN_MARK_IGNORED_SURV_MODIFY) && !(fsn_mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY)) { fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY; if (!(fsn_mark->mask & FS_MODIFY)) recalc = true; } if (fsn_mark->connector->type != FSNOTIFY_OBJ_TYPE_INODE || want_iref == !(fsn_mark->flags & FSNOTIFY_MARK_FLAG_NO_IREF)) return recalc; /* * NO_IREF may be removed from a mark, but not added. * When removed, fsnotify_recalc_mask() will take the inode ref. */ WARN_ON_ONCE(!want_iref); fsn_mark->flags &= ~FSNOTIFY_MARK_FLAG_NO_IREF; return true; } static bool fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark, __u32 mask, unsigned int fan_flags) { bool recalc; spin_lock(&fsn_mark->lock); if (!(fan_flags & FANOTIFY_MARK_IGNORE_BITS)) fsn_mark->mask |= mask; else fsn_mark->ignore_mask |= mask; recalc = fsnotify_calc_mask(fsn_mark) & ~fsnotify_conn_mask(fsn_mark->connector); recalc |= fanotify_mark_update_flags(fsn_mark, fan_flags); spin_unlock(&fsn_mark->lock); return recalc; } static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group, fsnotify_connp_t *connp, unsigned int obj_type, unsigned int fan_flags, __kernel_fsid_t *fsid) { struct ucounts *ucounts = group->fanotify_data.ucounts; struct fsnotify_mark *mark; int ret; /* * Enforce per user marks limits per user in all containing user ns. * A group with FAN_UNLIMITED_MARKS does not contribute to mark count * in the limited groups account. */ if (!FAN_GROUP_FLAG(group, FAN_UNLIMITED_MARKS) && !inc_ucount(ucounts->ns, ucounts->uid, UCOUNT_FANOTIFY_MARKS)) return ERR_PTR(-ENOSPC); mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); if (!mark) { ret = -ENOMEM; goto out_dec_ucounts; } fsnotify_init_mark(mark, group); if (fan_flags & FAN_MARK_EVICTABLE) mark->flags |= FSNOTIFY_MARK_FLAG_NO_IREF; ret = fsnotify_add_mark_locked(mark, connp, obj_type, 0, fsid); if (ret) { fsnotify_put_mark(mark); goto out_dec_ucounts; } return mark; out_dec_ucounts: if (!FAN_GROUP_FLAG(group, FAN_UNLIMITED_MARKS)) dec_ucount(ucounts, UCOUNT_FANOTIFY_MARKS); return ERR_PTR(ret); } static int fanotify_group_init_error_pool(struct fsnotify_group *group) { if (mempool_initialized(&group->fanotify_data.error_events_pool)) return 0; return mempool_init_kmalloc_pool(&group->fanotify_data.error_events_pool, FANOTIFY_DEFAULT_FEE_POOL_SIZE, sizeof(struct fanotify_error_event)); } static int fanotify_may_update_existing_mark(struct fsnotify_mark *fsn_mark, unsigned int fan_flags) { /* * Non evictable mark cannot be downgraded to evictable mark. */ if (fan_flags & FAN_MARK_EVICTABLE && !(fsn_mark->flags & FSNOTIFY_MARK_FLAG_NO_IREF)) return -EEXIST; /* * New ignore mask semantics cannot be downgraded to old semantics. */ if (fan_flags & FAN_MARK_IGNORED_MASK && fsn_mark->flags & FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS) return -EEXIST; /* * An ignore mask that survives modify could never be downgraded to not * survive modify. With new FAN_MARK_IGNORE semantics we make that rule * explicit and return an error when trying to update the ignore mask * without the original FAN_MARK_IGNORED_SURV_MODIFY value. */ if (fan_flags & FAN_MARK_IGNORE && !(fan_flags & FAN_MARK_IGNORED_SURV_MODIFY) && fsn_mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY) return -EEXIST; return 0; } static int fanotify_add_mark(struct fsnotify_group *group, fsnotify_connp_t *connp, unsigned int obj_type, __u32 mask, unsigned int fan_flags, __kernel_fsid_t *fsid) { struct fsnotify_mark *fsn_mark; bool recalc; int ret = 0; fsnotify_group_lock(group); fsn_mark = fsnotify_find_mark(connp, group); if (!fsn_mark) { fsn_mark = fanotify_add_new_mark(group, connp, obj_type, fan_flags, fsid); if (IS_ERR(fsn_mark)) { fsnotify_group_unlock(group); return PTR_ERR(fsn_mark); } } /* * Check if requested mark flags conflict with an existing mark flags. */ ret = fanotify_may_update_existing_mark(fsn_mark, fan_flags); if (ret) goto out; /* * Error events are pre-allocated per group, only if strictly * needed (i.e. FAN_FS_ERROR was requested). */ if (!(fan_flags & FANOTIFY_MARK_IGNORE_BITS) && (mask & FAN_FS_ERROR)) { ret = fanotify_group_init_error_pool(group); if (ret) goto out; } recalc = fanotify_mark_add_to_mask(fsn_mark, mask, fan_flags); if (recalc) fsnotify_recalc_mask(fsn_mark->connector); out: fsnotify_group_unlock(group); fsnotify_put_mark(fsn_mark); return ret; } static int fanotify_add_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt, __u32 mask, unsigned int flags, __kernel_fsid_t *fsid) { return fanotify_add_mark(group, &real_mount(mnt)->mnt_fsnotify_marks, FSNOTIFY_OBJ_TYPE_VFSMOUNT, mask, flags, fsid); } static int fanotify_add_sb_mark(struct fsnotify_group *group, struct super_block *sb, __u32 mask, unsigned int flags, __kernel_fsid_t *fsid) { return fanotify_add_mark(group, &sb->s_fsnotify_marks, FSNOTIFY_OBJ_TYPE_SB, mask, flags, fsid); } static int fanotify_add_inode_mark(struct fsnotify_group *group, struct inode *inode, __u32 mask, unsigned int flags, __kernel_fsid_t *fsid) { pr_debug("%s: group=%p inode=%p\n", __func__, group, inode); /* * If some other task has this inode open for write we should not add * an ignore mask, unless that ignore mask is supposed to survive * modification changes anyway. */ if ((flags & FANOTIFY_MARK_IGNORE_BITS) && !(flags & FAN_MARK_IGNORED_SURV_MODIFY) && inode_is_open_for_write(inode)) return 0; return fanotify_add_mark(group, &inode->i_fsnotify_marks, FSNOTIFY_OBJ_TYPE_INODE, mask, flags, fsid); } static struct fsnotify_event *fanotify_alloc_overflow_event(void) { struct fanotify_event *oevent; oevent = kmalloc(sizeof(*oevent), GFP_KERNEL_ACCOUNT); if (!oevent) return NULL; fanotify_init_event(oevent, 0, FS_Q_OVERFLOW); oevent->type = FANOTIFY_EVENT_TYPE_OVERFLOW; return &oevent->fse; } static struct hlist_head *fanotify_alloc_merge_hash(void) { struct hlist_head *hash; hash = kmalloc(sizeof(struct hlist_head) << FANOTIFY_HTABLE_BITS, GFP_KERNEL_ACCOUNT); if (!hash) return NULL; __hash_init(hash, FANOTIFY_HTABLE_SIZE); return hash; } /* fanotify syscalls */ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) { struct fsnotify_group *group; int f_flags, fd; unsigned int fid_mode = flags & FANOTIFY_FID_BITS; unsigned int class = flags & FANOTIFY_CLASS_BITS; unsigned int internal_flags = 0; pr_debug("%s: flags=%x event_f_flags=%x\n", __func__, flags, event_f_flags); if (!capable(CAP_SYS_ADMIN)) { /* * An unprivileged user can setup an fanotify group with * limited functionality - an unprivileged group is limited to * notification events with file handles and it cannot use * unlimited queue/marks. */ if ((flags & FANOTIFY_ADMIN_INIT_FLAGS) || !fid_mode) return -EPERM; /* * Setting the internal flag FANOTIFY_UNPRIV on the group * prevents setting mount/filesystem marks on this group and * prevents reporting pid and open fd in events. */ internal_flags |= FANOTIFY_UNPRIV; } #ifdef CONFIG_AUDITSYSCALL if (flags & ~(FANOTIFY_INIT_FLAGS | FAN_ENABLE_AUDIT)) #else if (flags & ~FANOTIFY_INIT_FLAGS) #endif return -EINVAL; /* * A pidfd can only be returned for a thread-group leader; thus * FAN_REPORT_PIDFD and FAN_REPORT_TID need to remain mutually * exclusive. */ if ((flags & FAN_REPORT_PIDFD) && (flags & FAN_REPORT_TID)) return -EINVAL; if (event_f_flags & ~FANOTIFY_INIT_ALL_EVENT_F_BITS) return -EINVAL; switch (event_f_flags & O_ACCMODE) { case O_RDONLY: case O_RDWR: case O_WRONLY: break; default: return -EINVAL; } if (fid_mode && class != FAN_CLASS_NOTIF) return -EINVAL; /* * Child name is reported with parent fid so requires dir fid. * We can report both child fid and dir fid with or without name. */ if ((fid_mode & FAN_REPORT_NAME) && !(fid_mode & FAN_REPORT_DIR_FID)) return -EINVAL; /* * FAN_REPORT_TARGET_FID requires FAN_REPORT_NAME and FAN_REPORT_FID * and is used as an indication to report both dir and child fid on all * dirent events. */ if ((fid_mode & FAN_REPORT_TARGET_FID) && (!(fid_mode & FAN_REPORT_NAME) || !(fid_mode & FAN_REPORT_FID))) return -EINVAL; f_flags = O_RDWR | __FMODE_NONOTIFY; if (flags & FAN_CLOEXEC) f_flags |= O_CLOEXEC; if (flags & FAN_NONBLOCK) f_flags |= O_NONBLOCK; /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */ group = fsnotify_alloc_group(&fanotify_fsnotify_ops, FSNOTIFY_GROUP_USER | FSNOTIFY_GROUP_NOFS); if (IS_ERR(group)) { return PTR_ERR(group); } /* Enforce groups limits per user in all containing user ns */ group->fanotify_data.ucounts = inc_ucount(current_user_ns(), current_euid(), UCOUNT_FANOTIFY_GROUPS); if (!group->fanotify_data.ucounts) { fd = -EMFILE; goto out_destroy_group; } group->fanotify_data.flags = flags | internal_flags; group->memcg = get_mem_cgroup_from_mm(current->mm); group->fanotify_data.merge_hash = fanotify_alloc_merge_hash(); if (!group->fanotify_data.merge_hash) { fd = -ENOMEM; goto out_destroy_group; } group->overflow_event = fanotify_alloc_overflow_event(); if (unlikely(!group->overflow_event)) { fd = -ENOMEM; goto out_destroy_group; } if (force_o_largefile()) event_f_flags |= O_LARGEFILE; group->fanotify_data.f_flags = event_f_flags; init_waitqueue_head(&group->fanotify_data.access_waitq); INIT_LIST_HEAD(&group->fanotify_data.access_list); switch (class) { case FAN_CLASS_NOTIF: group->priority = FS_PRIO_0; break; case FAN_CLASS_CONTENT: group->priority = FS_PRIO_1; break; case FAN_CLASS_PRE_CONTENT: group->priority = FS_PRIO_2; break; default: fd = -EINVAL; goto out_destroy_group; } if (flags & FAN_UNLIMITED_QUEUE) { fd = -EPERM; if (!capable(CAP_SYS_ADMIN)) goto out_destroy_group; group->max_events = UINT_MAX; } else { group->max_events = fanotify_max_queued_events; } if (flags & FAN_UNLIMITED_MARKS) { fd = -EPERM; if (!capable(CAP_SYS_ADMIN)) goto out_destroy_group; } if (flags & FAN_ENABLE_AUDIT) { fd = -EPERM; if (!capable(CAP_AUDIT_WRITE)) goto out_destroy_group; } fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags); if (fd < 0) goto out_destroy_group; return fd; out_destroy_group: fsnotify_destroy_group(group); return fd; } static int fanotify_test_fsid(struct dentry *dentry, __kernel_fsid_t *fsid) { __kernel_fsid_t root_fsid; int err; /* * Make sure dentry is not of a filesystem with zero fsid (e.g. fuse). */ err = vfs_get_fsid(dentry, fsid); if (err) return err; if (!fsid->val[0] && !fsid->val[1]) return -ENODEV; /* * Make sure dentry is not of a filesystem subvolume (e.g. btrfs) * which uses a different fsid than sb root. */ err = vfs_get_fsid(dentry->d_sb->s_root, &root_fsid); if (err) return err; if (root_fsid.val[0] != fsid->val[0] || root_fsid.val[1] != fsid->val[1]) return -EXDEV; return 0; } /* Check if filesystem can encode a unique fid */ static int fanotify_test_fid(struct dentry *dentry) { /* * We need to make sure that the file system supports at least * encoding a file handle so user can use name_to_handle_at() to * compare fid returned with event to the file handle of watched * objects. However, even the relaxed AT_HANDLE_FID flag requires * at least empty export_operations for ecoding unique file ids. */ if (!dentry->d_sb->s_export_op) return -EOPNOTSUPP; return 0; } static int fanotify_events_supported(struct fsnotify_group *group, const struct path *path, __u64 mask, unsigned int flags) { unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS; /* Strict validation of events in non-dir inode mask with v5.17+ APIs */ bool strict_dir_events = FAN_GROUP_FLAG(group, FAN_REPORT_TARGET_FID) || (mask & FAN_RENAME) || (flags & FAN_MARK_IGNORE); /* * Some filesystems such as 'proc' acquire unusual locks when opening * files. For them fanotify permission events have high chances of * deadlocking the system - open done when reporting fanotify event * blocks on this "unusual" lock while another process holding the lock * waits for fanotify permission event to be answered. Just disallow * permission events for such filesystems. */ if (mask & FANOTIFY_PERM_EVENTS && path->mnt->mnt_sb->s_type->fs_flags & FS_DISALLOW_NOTIFY_PERM) return -EINVAL; /* * mount and sb marks are not allowed on kernel internal pseudo fs, * like pipe_mnt, because that would subscribe to events on all the * anonynous pipes in the system. * * SB_NOUSER covers all of the internal pseudo fs whose objects are not * exposed to user's mount namespace, but there are other SB_KERNMOUNT * fs, like nsfs, debugfs, for which the value of allowing sb and mount * mark is questionable. For now we leave them alone. */ if (mark_type != FAN_MARK_INODE && path->mnt->mnt_sb->s_flags & SB_NOUSER) return -EINVAL; /* * We shouldn't have allowed setting dirent events and the directory * flags FAN_ONDIR and FAN_EVENT_ON_CHILD in mask of non-dir inode, * but because we always allowed it, error only when using new APIs. */ if (strict_dir_events && mark_type == FAN_MARK_INODE && !d_is_dir(path->dentry) && (mask & FANOTIFY_DIRONLY_EVENT_BITS)) return -ENOTDIR; return 0; } static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask, int dfd, const char __user *pathname) { struct inode *inode = NULL; struct vfsmount *mnt = NULL; struct fsnotify_group *group; struct fd f; struct path path; __kernel_fsid_t __fsid, *fsid = NULL; u32 valid_mask = FANOTIFY_EVENTS | FANOTIFY_EVENT_FLAGS; unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS; unsigned int mark_cmd = flags & FANOTIFY_MARK_CMD_BITS; unsigned int ignore = flags & FANOTIFY_MARK_IGNORE_BITS; unsigned int obj_type, fid_mode; u32 umask = 0; int ret; pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n", __func__, fanotify_fd, flags, dfd, pathname, mask); /* we only use the lower 32 bits as of right now. */ if (upper_32_bits(mask)) return -EINVAL; if (flags & ~FANOTIFY_MARK_FLAGS) return -EINVAL; switch (mark_type) { case FAN_MARK_INODE: obj_type = FSNOTIFY_OBJ_TYPE_INODE; break; case FAN_MARK_MOUNT: obj_type = FSNOTIFY_OBJ_TYPE_VFSMOUNT; break; case FAN_MARK_FILESYSTEM: obj_type = FSNOTIFY_OBJ_TYPE_SB; break; default: return -EINVAL; } switch (mark_cmd) { case FAN_MARK_ADD: case FAN_MARK_REMOVE: if (!mask) return -EINVAL; break; case FAN_MARK_FLUSH: if (flags & ~(FANOTIFY_MARK_TYPE_BITS | FAN_MARK_FLUSH)) return -EINVAL; break; default: return -EINVAL; } if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS)) valid_mask |= FANOTIFY_PERM_EVENTS; if (mask & ~valid_mask) return -EINVAL; /* We don't allow FAN_MARK_IGNORE & FAN_MARK_IGNORED_MASK together */ if (ignore == (FAN_MARK_IGNORE | FAN_MARK_IGNORED_MASK)) return -EINVAL; /* * Event flags (FAN_ONDIR, FAN_EVENT_ON_CHILD) have no effect with * FAN_MARK_IGNORED_MASK. */ if (ignore == FAN_MARK_IGNORED_MASK) { mask &= ~FANOTIFY_EVENT_FLAGS; umask = FANOTIFY_EVENT_FLAGS; } f = fdget(fanotify_fd); if (unlikely(!f.file)) return -EBADF; /* verify that this is indeed an fanotify instance */ ret = -EINVAL; if (unlikely(f.file->f_op != &fanotify_fops)) goto fput_and_out; group = f.file->private_data; /* * An unprivileged user is not allowed to setup mount nor filesystem * marks. This also includes setting up such marks by a group that * was initialized by an unprivileged user. */ ret = -EPERM; if ((!capable(CAP_SYS_ADMIN) || FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV)) && mark_type != FAN_MARK_INODE) goto fput_and_out; /* * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not * allowed to set permissions events. */ ret = -EINVAL; if (mask & FANOTIFY_PERM_EVENTS && group->priority == FS_PRIO_0) goto fput_and_out; if (mask & FAN_FS_ERROR && mark_type != FAN_MARK_FILESYSTEM) goto fput_and_out; /* * Evictable is only relevant for inode marks, because only inode object * can be evicted on memory pressure. */ if (flags & FAN_MARK_EVICTABLE && mark_type != FAN_MARK_INODE) goto fput_and_out; /* * Events that do not carry enough information to report * event->fd require a group that supports reporting fid. Those * events are not supported on a mount mark, because they do not * carry enough information (i.e. path) to be filtered by mount * point. */ fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS); if (mask & ~(FANOTIFY_FD_EVENTS|FANOTIFY_EVENT_FLAGS) && (!fid_mode || mark_type == FAN_MARK_MOUNT)) goto fput_and_out; /* * FAN_RENAME uses special info type records to report the old and * new parent+name. Reporting only old and new parent id is less * useful and was not implemented. */ if (mask & FAN_RENAME && !(fid_mode & FAN_REPORT_NAME)) goto fput_and_out; if (mark_cmd == FAN_MARK_FLUSH) { ret = 0; if (mark_type == FAN_MARK_MOUNT) fsnotify_clear_vfsmount_marks_by_group(group); else if (mark_type == FAN_MARK_FILESYSTEM) fsnotify_clear_sb_marks_by_group(group); else fsnotify_clear_inode_marks_by_group(group); goto fput_and_out; } ret = fanotify_find_path(dfd, pathname, &path, flags, (mask & ALL_FSNOTIFY_EVENTS), obj_type); if (ret) goto fput_and_out; if (mark_cmd == FAN_MARK_ADD) { ret = fanotify_events_supported(group, &path, mask, flags); if (ret) goto path_put_and_out; } if (fid_mode) { ret = fanotify_test_fsid(path.dentry, &__fsid); if (ret) goto path_put_and_out; ret = fanotify_test_fid(path.dentry); if (ret) goto path_put_and_out; fsid = &__fsid; } /* inode held in place by reference to path; group by fget on fd */ if (mark_type == FAN_MARK_INODE) inode = path.dentry->d_inode; else mnt = path.mnt; ret = mnt ? -EINVAL : -EISDIR; /* FAN_MARK_IGNORE requires SURV_MODIFY for sb/mount/dir marks */ if (mark_cmd == FAN_MARK_ADD && ignore == FAN_MARK_IGNORE && (mnt || S_ISDIR(inode->i_mode)) && !(flags & FAN_MARK_IGNORED_SURV_MODIFY)) goto path_put_and_out; /* Mask out FAN_EVENT_ON_CHILD flag for sb/mount/non-dir marks */ if (mnt || !S_ISDIR(inode->i_mode)) { mask &= ~FAN_EVENT_ON_CHILD; umask = FAN_EVENT_ON_CHILD; /* * If group needs to report parent fid, register for getting * events with parent/name info for non-directory. */ if ((fid_mode & FAN_REPORT_DIR_FID) && (flags & FAN_MARK_ADD) && !ignore) mask |= FAN_EVENT_ON_CHILD; } /* create/update an inode mark */ switch (mark_cmd) { case FAN_MARK_ADD: if (mark_type == FAN_MARK_MOUNT) ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags, fsid); else if (mark_type == FAN_MARK_FILESYSTEM) ret = fanotify_add_sb_mark(group, mnt->mnt_sb, mask, flags, fsid); else ret = fanotify_add_inode_mark(group, inode, mask, flags, fsid); break; case FAN_MARK_REMOVE: if (mark_type == FAN_MARK_MOUNT) ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags, umask); else if (mark_type == FAN_MARK_FILESYSTEM) ret = fanotify_remove_sb_mark(group, mnt->mnt_sb, mask, flags, umask); else ret = fanotify_remove_inode_mark(group, inode, mask, flags, umask); break; default: ret = -EINVAL; } path_put_and_out: path_put(&path); fput_and_out: fdput(f); return ret; } #ifndef CONFIG_ARCH_SPLIT_ARG64 SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags, __u64, mask, int, dfd, const char __user *, pathname) { return do_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname); } #endif #if defined(CONFIG_ARCH_SPLIT_ARG64) || defined(CONFIG_COMPAT) SYSCALL32_DEFINE6(fanotify_mark, int, fanotify_fd, unsigned int, flags, SC_ARG64(mask), int, dfd, const char __user *, pathname) { return do_fanotify_mark(fanotify_fd, flags, SC_VAL64(__u64, mask), dfd, pathname); } #endif /* * fanotify_user_setup - Our initialization function. Note that we cannot return * error because we have compiled-in VFS hooks. So an (unlikely) failure here * must result in panic(). */ static int __init fanotify_user_setup(void) { struct sysinfo si; int max_marks; si_meminfo(&si); /* * Allow up to 1% of addressable memory to be accounted for per user * marks limited to the range [8192, 1048576]. mount and sb marks are * a lot cheaper than inode marks, but there is no reason for a user * to have many of those, so calculate by the cost of inode marks. */ max_marks = (((si.totalram - si.totalhigh) / 100) << PAGE_SHIFT) / INODE_MARK_COST; max_marks = clamp(max_marks, FANOTIFY_OLD_DEFAULT_MAX_MARKS, FANOTIFY_DEFAULT_MAX_USER_MARKS); BUILD_BUG_ON(FANOTIFY_INIT_FLAGS & FANOTIFY_INTERNAL_GROUP_FLAGS); BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 12); BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 11); fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC|SLAB_ACCOUNT); fanotify_fid_event_cachep = KMEM_CACHE(fanotify_fid_event, SLAB_PANIC); fanotify_path_event_cachep = KMEM_CACHE(fanotify_path_event, SLAB_PANIC); if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS)) { fanotify_perm_event_cachep = KMEM_CACHE(fanotify_perm_event, SLAB_PANIC); } fanotify_max_queued_events = FANOTIFY_DEFAULT_MAX_EVENTS; init_user_ns.ucount_max[UCOUNT_FANOTIFY_GROUPS] = FANOTIFY_DEFAULT_MAX_GROUPS; init_user_ns.ucount_max[UCOUNT_FANOTIFY_MARKS] = max_marks; fanotify_sysctls_init(); return 0; } device_initcall(fanotify_user_setup);
linux-master
fs/notify/fanotify/fanotify_user.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/fanotify.h> #include <linux/fdtable.h> #include <linux/fsnotify_backend.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/kernel.h> /* UINT_MAX */ #include <linux/mount.h> #include <linux/sched.h> #include <linux/sched/user.h> #include <linux/sched/signal.h> #include <linux/types.h> #include <linux/wait.h> #include <linux/audit.h> #include <linux/sched/mm.h> #include <linux/statfs.h> #include <linux/stringhash.h> #include "fanotify.h" static bool fanotify_path_equal(const struct path *p1, const struct path *p2) { return p1->mnt == p2->mnt && p1->dentry == p2->dentry; } static unsigned int fanotify_hash_path(const struct path *path) { return hash_ptr(path->dentry, FANOTIFY_EVENT_HASH_BITS) ^ hash_ptr(path->mnt, FANOTIFY_EVENT_HASH_BITS); } static inline bool fanotify_fsid_equal(__kernel_fsid_t *fsid1, __kernel_fsid_t *fsid2) { return fsid1->val[0] == fsid2->val[0] && fsid1->val[1] == fsid2->val[1]; } static unsigned int fanotify_hash_fsid(__kernel_fsid_t *fsid) { return hash_32(fsid->val[0], FANOTIFY_EVENT_HASH_BITS) ^ hash_32(fsid->val[1], FANOTIFY_EVENT_HASH_BITS); } static bool fanotify_fh_equal(struct fanotify_fh *fh1, struct fanotify_fh *fh2) { if (fh1->type != fh2->type || fh1->len != fh2->len) return false; return !fh1->len || !memcmp(fanotify_fh_buf(fh1), fanotify_fh_buf(fh2), fh1->len); } static unsigned int fanotify_hash_fh(struct fanotify_fh *fh) { long salt = (long)fh->type | (long)fh->len << 8; /* * full_name_hash() works long by long, so it handles fh buf optimally. */ return full_name_hash((void *)salt, fanotify_fh_buf(fh), fh->len); } static bool fanotify_fid_event_equal(struct fanotify_fid_event *ffe1, struct fanotify_fid_event *ffe2) { /* Do not merge fid events without object fh */ if (!ffe1->object_fh.len) return false; return fanotify_fsid_equal(&ffe1->fsid, &ffe2->fsid) && fanotify_fh_equal(&ffe1->object_fh, &ffe2->object_fh); } static bool fanotify_info_equal(struct fanotify_info *info1, struct fanotify_info *info2) { if (info1->dir_fh_totlen != info2->dir_fh_totlen || info1->dir2_fh_totlen != info2->dir2_fh_totlen || info1->file_fh_totlen != info2->file_fh_totlen || info1->name_len != info2->name_len || info1->name2_len != info2->name2_len) return false; if (info1->dir_fh_totlen && !fanotify_fh_equal(fanotify_info_dir_fh(info1), fanotify_info_dir_fh(info2))) return false; if (info1->dir2_fh_totlen && !fanotify_fh_equal(fanotify_info_dir2_fh(info1), fanotify_info_dir2_fh(info2))) return false; if (info1->file_fh_totlen && !fanotify_fh_equal(fanotify_info_file_fh(info1), fanotify_info_file_fh(info2))) return false; if (info1->name_len && memcmp(fanotify_info_name(info1), fanotify_info_name(info2), info1->name_len)) return false; return !info1->name2_len || !memcmp(fanotify_info_name2(info1), fanotify_info_name2(info2), info1->name2_len); } static bool fanotify_name_event_equal(struct fanotify_name_event *fne1, struct fanotify_name_event *fne2) { struct fanotify_info *info1 = &fne1->info; struct fanotify_info *info2 = &fne2->info; /* Do not merge name events without dir fh */ if (!info1->dir_fh_totlen) return false; if (!fanotify_fsid_equal(&fne1->fsid, &fne2->fsid)) return false; return fanotify_info_equal(info1, info2); } static bool fanotify_error_event_equal(struct fanotify_error_event *fee1, struct fanotify_error_event *fee2) { /* Error events against the same file system are always merged. */ if (!fanotify_fsid_equal(&fee1->fsid, &fee2->fsid)) return false; return true; } static bool fanotify_should_merge(struct fanotify_event *old, struct fanotify_event *new) { pr_debug("%s: old=%p new=%p\n", __func__, old, new); if (old->hash != new->hash || old->type != new->type || old->pid != new->pid) return false; /* * We want to merge many dirent events in the same dir (i.e. * creates/unlinks/renames), but we do not want to merge dirent * events referring to subdirs with dirent events referring to * non subdirs, otherwise, user won't be able to tell from a * mask FAN_CREATE|FAN_DELETE|FAN_ONDIR if it describes mkdir+ * unlink pair or rmdir+create pair of events. */ if ((old->mask & FS_ISDIR) != (new->mask & FS_ISDIR)) return false; /* * FAN_RENAME event is reported with special info record types, * so we cannot merge it with other events. */ if ((old->mask & FAN_RENAME) != (new->mask & FAN_RENAME)) return false; switch (old->type) { case FANOTIFY_EVENT_TYPE_PATH: return fanotify_path_equal(fanotify_event_path(old), fanotify_event_path(new)); case FANOTIFY_EVENT_TYPE_FID: return fanotify_fid_event_equal(FANOTIFY_FE(old), FANOTIFY_FE(new)); case FANOTIFY_EVENT_TYPE_FID_NAME: return fanotify_name_event_equal(FANOTIFY_NE(old), FANOTIFY_NE(new)); case FANOTIFY_EVENT_TYPE_FS_ERROR: return fanotify_error_event_equal(FANOTIFY_EE(old), FANOTIFY_EE(new)); default: WARN_ON_ONCE(1); } return false; } /* Limit event merges to limit CPU overhead per event */ #define FANOTIFY_MAX_MERGE_EVENTS 128 /* and the list better be locked by something too! */ static int fanotify_merge(struct fsnotify_group *group, struct fsnotify_event *event) { struct fanotify_event *old, *new = FANOTIFY_E(event); unsigned int bucket = fanotify_event_hash_bucket(group, new); struct hlist_head *hlist = &group->fanotify_data.merge_hash[bucket]; int i = 0; pr_debug("%s: group=%p event=%p bucket=%u\n", __func__, group, event, bucket); /* * Don't merge a permission event with any other event so that we know * the event structure we have created in fanotify_handle_event() is the * one we should check for permission response. */ if (fanotify_is_perm_event(new->mask)) return 0; hlist_for_each_entry(old, hlist, merge_list) { if (++i > FANOTIFY_MAX_MERGE_EVENTS) break; if (fanotify_should_merge(old, new)) { old->mask |= new->mask; if (fanotify_is_error_event(old->mask)) FANOTIFY_EE(old)->err_count++; return 1; } } return 0; } /* * Wait for response to permission event. The function also takes care of * freeing the permission event (or offloads that in case the wait is canceled * by a signal). The function returns 0 in case access got allowed by userspace, * -EPERM in case userspace disallowed the access, and -ERESTARTSYS in case * the wait got interrupted by a signal. */ static int fanotify_get_response(struct fsnotify_group *group, struct fanotify_perm_event *event, struct fsnotify_iter_info *iter_info) { int ret; pr_debug("%s: group=%p event=%p\n", __func__, group, event); ret = wait_event_killable(group->fanotify_data.access_waitq, event->state == FAN_EVENT_ANSWERED); /* Signal pending? */ if (ret < 0) { spin_lock(&group->notification_lock); /* Event reported to userspace and no answer yet? */ if (event->state == FAN_EVENT_REPORTED) { /* Event will get freed once userspace answers to it */ event->state = FAN_EVENT_CANCELED; spin_unlock(&group->notification_lock); return ret; } /* Event not yet reported? Just remove it. */ if (event->state == FAN_EVENT_INIT) { fsnotify_remove_queued_event(group, &event->fae.fse); /* Permission events are not supposed to be hashed */ WARN_ON_ONCE(!hlist_unhashed(&event->fae.merge_list)); } /* * Event may be also answered in case signal delivery raced * with wakeup. In that case we have nothing to do besides * freeing the event and reporting error. */ spin_unlock(&group->notification_lock); goto out; } /* userspace responded, convert to something usable */ switch (event->response & FANOTIFY_RESPONSE_ACCESS) { case FAN_ALLOW: ret = 0; break; case FAN_DENY: default: ret = -EPERM; } /* Check if the response should be audited */ if (event->response & FAN_AUDIT) audit_fanotify(event->response & ~FAN_AUDIT, &event->audit_rule); pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__, group, event, ret); out: fsnotify_destroy_event(group, &event->fae.fse); return ret; } /* * This function returns a mask for an event that only contains the flags * that have been specifically requested by the user. Flags that may have * been included within the event mask, but have not been explicitly * requested by the user, will not be present in the returned mask. */ static u32 fanotify_group_event_mask(struct fsnotify_group *group, struct fsnotify_iter_info *iter_info, u32 *match_mask, u32 event_mask, const void *data, int data_type, struct inode *dir) { __u32 marks_mask = 0, marks_ignore_mask = 0; __u32 test_mask, user_mask = FANOTIFY_OUTGOING_EVENTS | FANOTIFY_EVENT_FLAGS; const struct path *path = fsnotify_data_path(data, data_type); unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS); struct fsnotify_mark *mark; bool ondir = event_mask & FAN_ONDIR; int type; pr_debug("%s: report_mask=%x mask=%x data=%p data_type=%d\n", __func__, iter_info->report_mask, event_mask, data, data_type); if (!fid_mode) { /* Do we have path to open a file descriptor? */ if (!path) return 0; /* Path type events are only relevant for files and dirs */ if (!d_is_reg(path->dentry) && !d_can_lookup(path->dentry)) return 0; } else if (!(fid_mode & FAN_REPORT_FID)) { /* Do we have a directory inode to report? */ if (!dir && !ondir) return 0; } fsnotify_foreach_iter_mark_type(iter_info, mark, type) { /* * Apply ignore mask depending on event flags in ignore mask. */ marks_ignore_mask |= fsnotify_effective_ignore_mask(mark, ondir, type); /* * Send the event depending on event flags in mark mask. */ if (!fsnotify_mask_applicable(mark->mask, ondir, type)) continue; marks_mask |= mark->mask; /* Record the mark types of this group that matched the event */ *match_mask |= 1U << type; } test_mask = event_mask & marks_mask & ~marks_ignore_mask; /* * For dirent modification events (create/delete/move) that do not carry * the child entry name information, we report FAN_ONDIR for mkdir/rmdir * so user can differentiate them from creat/unlink. * * For backward compatibility and consistency, do not report FAN_ONDIR * to user in legacy fanotify mode (reporting fd) and report FAN_ONDIR * to user in fid mode for all event types. * * We never report FAN_EVENT_ON_CHILD to user, but we do pass it in to * fanotify_alloc_event() when group is reporting fid as indication * that event happened on child. */ if (fid_mode) { /* Do not report event flags without any event */ if (!(test_mask & ~FANOTIFY_EVENT_FLAGS)) return 0; } else { user_mask &= ~FANOTIFY_EVENT_FLAGS; } return test_mask & user_mask; } /* * Check size needed to encode fanotify_fh. * * Return size of encoded fh without fanotify_fh header. * Return 0 on failure to encode. */ static int fanotify_encode_fh_len(struct inode *inode) { int dwords = 0; int fh_len; if (!inode) return 0; exportfs_encode_fid(inode, NULL, &dwords); fh_len = dwords << 2; /* * struct fanotify_error_event might be preallocated and is * limited to MAX_HANDLE_SZ. This should never happen, but * safeguard by forcing an invalid file handle. */ if (WARN_ON_ONCE(fh_len > MAX_HANDLE_SZ)) return 0; return fh_len; } /* * Encode fanotify_fh. * * Return total size of encoded fh including fanotify_fh header. * Return 0 on failure to encode. */ static int fanotify_encode_fh(struct fanotify_fh *fh, struct inode *inode, unsigned int fh_len, unsigned int *hash, gfp_t gfp) { int dwords, type = 0; char *ext_buf = NULL; void *buf = fh->buf; int err; fh->type = FILEID_ROOT; fh->len = 0; fh->flags = 0; /* * Invalid FHs are used by FAN_FS_ERROR for errors not * linked to any inode. The f_handle won't be reported * back to userspace. */ if (!inode) goto out; /* * !gpf means preallocated variable size fh, but fh_len could * be zero in that case if encoding fh len failed. */ err = -ENOENT; if (fh_len < 4 || WARN_ON_ONCE(fh_len % 4) || fh_len > MAX_HANDLE_SZ) goto out_err; /* No external buffer in a variable size allocated fh */ if (gfp && fh_len > FANOTIFY_INLINE_FH_LEN) { /* Treat failure to allocate fh as failure to encode fh */ err = -ENOMEM; ext_buf = kmalloc(fh_len, gfp); if (!ext_buf) goto out_err; *fanotify_fh_ext_buf_ptr(fh) = ext_buf; buf = ext_buf; fh->flags |= FANOTIFY_FH_FLAG_EXT_BUF; } dwords = fh_len >> 2; type = exportfs_encode_fid(inode, buf, &dwords); err = -EINVAL; if (type <= 0 || type == FILEID_INVALID || fh_len != dwords << 2) goto out_err; fh->type = type; fh->len = fh_len; out: /* * Mix fh into event merge key. Hash might be NULL in case of * unhashed FID events (i.e. FAN_FS_ERROR). */ if (hash) *hash ^= fanotify_hash_fh(fh); return FANOTIFY_FH_HDR_LEN + fh_len; out_err: pr_warn_ratelimited("fanotify: failed to encode fid (type=%d, len=%d, err=%i)\n", type, fh_len, err); kfree(ext_buf); *fanotify_fh_ext_buf_ptr(fh) = NULL; /* Report the event without a file identifier on encode error */ fh->type = FILEID_INVALID; fh->len = 0; return 0; } /* * FAN_REPORT_FID is ambiguous in that it reports the fid of the child for * some events and the fid of the parent for create/delete/move events. * * With the FAN_REPORT_TARGET_FID flag, the fid of the child is reported * also in create/delete/move events in addition to the fid of the parent * and the name of the child. */ static inline bool fanotify_report_child_fid(unsigned int fid_mode, u32 mask) { if (mask & ALL_FSNOTIFY_DIRENT_EVENTS) return (fid_mode & FAN_REPORT_TARGET_FID); return (fid_mode & FAN_REPORT_FID) && !(mask & FAN_ONDIR); } /* * The inode to use as identifier when reporting fid depends on the event * and the group flags. * * With the group flag FAN_REPORT_TARGET_FID, always report the child fid. * * Without the group flag FAN_REPORT_TARGET_FID, report the modified directory * fid on dirent events and the child fid otherwise. * * For example: * FS_ATTRIB reports the child fid even if reported on a watched parent. * FS_CREATE reports the modified dir fid without FAN_REPORT_TARGET_FID. * and reports the created child fid with FAN_REPORT_TARGET_FID. */ static struct inode *fanotify_fid_inode(u32 event_mask, const void *data, int data_type, struct inode *dir, unsigned int fid_mode) { if ((event_mask & ALL_FSNOTIFY_DIRENT_EVENTS) && !(fid_mode & FAN_REPORT_TARGET_FID)) return dir; return fsnotify_data_inode(data, data_type); } /* * The inode to use as identifier when reporting dir fid depends on the event. * Report the modified directory inode on dirent modification events. * Report the "victim" inode if "victim" is a directory. * Report the parent inode if "victim" is not a directory and event is * reported to parent. * Otherwise, do not report dir fid. */ static struct inode *fanotify_dfid_inode(u32 event_mask, const void *data, int data_type, struct inode *dir) { struct inode *inode = fsnotify_data_inode(data, data_type); if (event_mask & ALL_FSNOTIFY_DIRENT_EVENTS) return dir; if (inode && S_ISDIR(inode->i_mode)) return inode; return dir; } static struct fanotify_event *fanotify_alloc_path_event(const struct path *path, unsigned int *hash, gfp_t gfp) { struct fanotify_path_event *pevent; pevent = kmem_cache_alloc(fanotify_path_event_cachep, gfp); if (!pevent) return NULL; pevent->fae.type = FANOTIFY_EVENT_TYPE_PATH; pevent->path = *path; *hash ^= fanotify_hash_path(path); path_get(path); return &pevent->fae; } static struct fanotify_event *fanotify_alloc_perm_event(const struct path *path, gfp_t gfp) { struct fanotify_perm_event *pevent; pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp); if (!pevent) return NULL; pevent->fae.type = FANOTIFY_EVENT_TYPE_PATH_PERM; pevent->response = 0; pevent->hdr.type = FAN_RESPONSE_INFO_NONE; pevent->hdr.pad = 0; pevent->hdr.len = 0; pevent->state = FAN_EVENT_INIT; pevent->path = *path; path_get(path); return &pevent->fae; } static struct fanotify_event *fanotify_alloc_fid_event(struct inode *id, __kernel_fsid_t *fsid, unsigned int *hash, gfp_t gfp) { struct fanotify_fid_event *ffe; ffe = kmem_cache_alloc(fanotify_fid_event_cachep, gfp); if (!ffe) return NULL; ffe->fae.type = FANOTIFY_EVENT_TYPE_FID; ffe->fsid = *fsid; *hash ^= fanotify_hash_fsid(fsid); fanotify_encode_fh(&ffe->object_fh, id, fanotify_encode_fh_len(id), hash, gfp); return &ffe->fae; } static struct fanotify_event *fanotify_alloc_name_event(struct inode *dir, __kernel_fsid_t *fsid, const struct qstr *name, struct inode *child, struct dentry *moved, unsigned int *hash, gfp_t gfp) { struct fanotify_name_event *fne; struct fanotify_info *info; struct fanotify_fh *dfh, *ffh; struct inode *dir2 = moved ? d_inode(moved->d_parent) : NULL; const struct qstr *name2 = moved ? &moved->d_name : NULL; unsigned int dir_fh_len = fanotify_encode_fh_len(dir); unsigned int dir2_fh_len = fanotify_encode_fh_len(dir2); unsigned int child_fh_len = fanotify_encode_fh_len(child); unsigned long name_len = name ? name->len : 0; unsigned long name2_len = name2 ? name2->len : 0; unsigned int len, size; /* Reserve terminating null byte even for empty name */ size = sizeof(*fne) + name_len + name2_len + 2; if (dir_fh_len) size += FANOTIFY_FH_HDR_LEN + dir_fh_len; if (dir2_fh_len) size += FANOTIFY_FH_HDR_LEN + dir2_fh_len; if (child_fh_len) size += FANOTIFY_FH_HDR_LEN + child_fh_len; fne = kmalloc(size, gfp); if (!fne) return NULL; fne->fae.type = FANOTIFY_EVENT_TYPE_FID_NAME; fne->fsid = *fsid; *hash ^= fanotify_hash_fsid(fsid); info = &fne->info; fanotify_info_init(info); if (dir_fh_len) { dfh = fanotify_info_dir_fh(info); len = fanotify_encode_fh(dfh, dir, dir_fh_len, hash, 0); fanotify_info_set_dir_fh(info, len); } if (dir2_fh_len) { dfh = fanotify_info_dir2_fh(info); len = fanotify_encode_fh(dfh, dir2, dir2_fh_len, hash, 0); fanotify_info_set_dir2_fh(info, len); } if (child_fh_len) { ffh = fanotify_info_file_fh(info); len = fanotify_encode_fh(ffh, child, child_fh_len, hash, 0); fanotify_info_set_file_fh(info, len); } if (name_len) { fanotify_info_copy_name(info, name); *hash ^= full_name_hash((void *)name_len, name->name, name_len); } if (name2_len) { fanotify_info_copy_name2(info, name2); *hash ^= full_name_hash((void *)name2_len, name2->name, name2_len); } pr_debug("%s: size=%u dir_fh_len=%u child_fh_len=%u name_len=%u name='%.*s'\n", __func__, size, dir_fh_len, child_fh_len, info->name_len, info->name_len, fanotify_info_name(info)); if (dir2_fh_len) { pr_debug("%s: dir2_fh_len=%u name2_len=%u name2='%.*s'\n", __func__, dir2_fh_len, info->name2_len, info->name2_len, fanotify_info_name2(info)); } return &fne->fae; } static struct fanotify_event *fanotify_alloc_error_event( struct fsnotify_group *group, __kernel_fsid_t *fsid, const void *data, int data_type, unsigned int *hash) { struct fs_error_report *report = fsnotify_data_error_report(data, data_type); struct inode *inode; struct fanotify_error_event *fee; int fh_len; if (WARN_ON_ONCE(!report)) return NULL; fee = mempool_alloc(&group->fanotify_data.error_events_pool, GFP_NOFS); if (!fee) return NULL; fee->fae.type = FANOTIFY_EVENT_TYPE_FS_ERROR; fee->error = report->error; fee->err_count = 1; fee->fsid = *fsid; inode = report->inode; fh_len = fanotify_encode_fh_len(inode); /* Bad fh_len. Fallback to using an invalid fh. Should never happen. */ if (!fh_len && inode) inode = NULL; fanotify_encode_fh(&fee->object_fh, inode, fh_len, NULL, 0); *hash ^= fanotify_hash_fsid(fsid); return &fee->fae; } static struct fanotify_event *fanotify_alloc_event( struct fsnotify_group *group, u32 mask, const void *data, int data_type, struct inode *dir, const struct qstr *file_name, __kernel_fsid_t *fsid, u32 match_mask) { struct fanotify_event *event = NULL; gfp_t gfp = GFP_KERNEL_ACCOUNT; unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS); struct inode *id = fanotify_fid_inode(mask, data, data_type, dir, fid_mode); struct inode *dirid = fanotify_dfid_inode(mask, data, data_type, dir); const struct path *path = fsnotify_data_path(data, data_type); struct mem_cgroup *old_memcg; struct dentry *moved = NULL; struct inode *child = NULL; bool name_event = false; unsigned int hash = 0; bool ondir = mask & FAN_ONDIR; struct pid *pid; if ((fid_mode & FAN_REPORT_DIR_FID) && dirid) { /* * For certain events and group flags, report the child fid * in addition to reporting the parent fid and maybe child name. */ if (fanotify_report_child_fid(fid_mode, mask) && id != dirid) child = id; id = dirid; /* * We record file name only in a group with FAN_REPORT_NAME * and when we have a directory inode to report. * * For directory entry modification event, we record the fid of * the directory and the name of the modified entry. * * For event on non-directory that is reported to parent, we * record the fid of the parent and the name of the child. * * Even if not reporting name, we need a variable length * fanotify_name_event if reporting both parent and child fids. */ if (!(fid_mode & FAN_REPORT_NAME)) { name_event = !!child; file_name = NULL; } else if ((mask & ALL_FSNOTIFY_DIRENT_EVENTS) || !ondir) { name_event = true; } /* * In the special case of FAN_RENAME event, use the match_mask * to determine if we need to report only the old parent+name, * only the new parent+name or both. * 'dirid' and 'file_name' are the old parent+name and * 'moved' has the new parent+name. */ if (mask & FAN_RENAME) { bool report_old, report_new; if (WARN_ON_ONCE(!match_mask)) return NULL; /* Report both old and new parent+name if sb watching */ report_old = report_new = match_mask & (1U << FSNOTIFY_ITER_TYPE_SB); report_old |= match_mask & (1U << FSNOTIFY_ITER_TYPE_INODE); report_new |= match_mask & (1U << FSNOTIFY_ITER_TYPE_INODE2); if (!report_old) { /* Do not report old parent+name */ dirid = NULL; file_name = NULL; } if (report_new) { /* Report new parent+name */ moved = fsnotify_data_dentry(data, data_type); } } } /* * For queues with unlimited length lost events are not expected and * can possibly have security implications. Avoid losing events when * memory is short. For the limited size queues, avoid OOM killer in the * target monitoring memcg as it may have security repercussion. */ if (group->max_events == UINT_MAX) gfp |= __GFP_NOFAIL; else gfp |= __GFP_RETRY_MAYFAIL; /* Whoever is interested in the event, pays for the allocation. */ old_memcg = set_active_memcg(group->memcg); if (fanotify_is_perm_event(mask)) { event = fanotify_alloc_perm_event(path, gfp); } else if (fanotify_is_error_event(mask)) { event = fanotify_alloc_error_event(group, fsid, data, data_type, &hash); } else if (name_event && (file_name || moved || child)) { event = fanotify_alloc_name_event(dirid, fsid, file_name, child, moved, &hash, gfp); } else if (fid_mode) { event = fanotify_alloc_fid_event(id, fsid, &hash, gfp); } else { event = fanotify_alloc_path_event(path, &hash, gfp); } if (!event) goto out; if (FAN_GROUP_FLAG(group, FAN_REPORT_TID)) pid = get_pid(task_pid(current)); else pid = get_pid(task_tgid(current)); /* Mix event info, FAN_ONDIR flag and pid into event merge key */ hash ^= hash_long((unsigned long)pid | ondir, FANOTIFY_EVENT_HASH_BITS); fanotify_init_event(event, hash, mask); event->pid = pid; out: set_active_memcg(old_memcg); return event; } /* * Get cached fsid of the filesystem containing the object from any connector. * All connectors are supposed to have the same fsid, but we do not verify that * here. */ static __kernel_fsid_t fanotify_get_fsid(struct fsnotify_iter_info *iter_info) { struct fsnotify_mark *mark; int type; __kernel_fsid_t fsid = {}; fsnotify_foreach_iter_mark_type(iter_info, mark, type) { struct fsnotify_mark_connector *conn; conn = READ_ONCE(mark->connector); /* Mark is just getting destroyed or created? */ if (!conn) continue; if (!(conn->flags & FSNOTIFY_CONN_FLAG_HAS_FSID)) continue; /* Pairs with smp_wmb() in fsnotify_add_mark_list() */ smp_rmb(); fsid = conn->fsid; if (WARN_ON_ONCE(!fsid.val[0] && !fsid.val[1])) continue; return fsid; } return fsid; } /* * Add an event to hash table for faster merge. */ static void fanotify_insert_event(struct fsnotify_group *group, struct fsnotify_event *fsn_event) { struct fanotify_event *event = FANOTIFY_E(fsn_event); unsigned int bucket = fanotify_event_hash_bucket(group, event); struct hlist_head *hlist = &group->fanotify_data.merge_hash[bucket]; assert_spin_locked(&group->notification_lock); if (!fanotify_is_hashed_event(event->mask)) return; pr_debug("%s: group=%p event=%p bucket=%u\n", __func__, group, event, bucket); hlist_add_head(&event->merge_list, hlist); } static int fanotify_handle_event(struct fsnotify_group *group, u32 mask, const void *data, int data_type, struct inode *dir, const struct qstr *file_name, u32 cookie, struct fsnotify_iter_info *iter_info) { int ret = 0; struct fanotify_event *event; struct fsnotify_event *fsn_event; __kernel_fsid_t fsid = {}; u32 match_mask = 0; BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS); BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY); BUILD_BUG_ON(FAN_ATTRIB != FS_ATTRIB); BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE); BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE); BUILD_BUG_ON(FAN_OPEN != FS_OPEN); BUILD_BUG_ON(FAN_MOVED_TO != FS_MOVED_TO); BUILD_BUG_ON(FAN_MOVED_FROM != FS_MOVED_FROM); BUILD_BUG_ON(FAN_CREATE != FS_CREATE); BUILD_BUG_ON(FAN_DELETE != FS_DELETE); BUILD_BUG_ON(FAN_DELETE_SELF != FS_DELETE_SELF); BUILD_BUG_ON(FAN_MOVE_SELF != FS_MOVE_SELF); BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD); BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW); BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM); BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM); BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR); BUILD_BUG_ON(FAN_OPEN_EXEC != FS_OPEN_EXEC); BUILD_BUG_ON(FAN_OPEN_EXEC_PERM != FS_OPEN_EXEC_PERM); BUILD_BUG_ON(FAN_FS_ERROR != FS_ERROR); BUILD_BUG_ON(FAN_RENAME != FS_RENAME); BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 21); mask = fanotify_group_event_mask(group, iter_info, &match_mask, mask, data, data_type, dir); if (!mask) return 0; pr_debug("%s: group=%p mask=%x report_mask=%x\n", __func__, group, mask, match_mask); if (fanotify_is_perm_event(mask)) { /* * fsnotify_prepare_user_wait() fails if we race with mark * deletion. Just let the operation pass in that case. */ if (!fsnotify_prepare_user_wait(iter_info)) return 0; } if (FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS)) { fsid = fanotify_get_fsid(iter_info); /* Racing with mark destruction or creation? */ if (!fsid.val[0] && !fsid.val[1]) return 0; } event = fanotify_alloc_event(group, mask, data, data_type, dir, file_name, &fsid, match_mask); ret = -ENOMEM; if (unlikely(!event)) { /* * We don't queue overflow events for permission events as * there the access is denied and so no event is in fact lost. */ if (!fanotify_is_perm_event(mask)) fsnotify_queue_overflow(group); goto finish; } fsn_event = &event->fse; ret = fsnotify_insert_event(group, fsn_event, fanotify_merge, fanotify_insert_event); if (ret) { /* Permission events shouldn't be merged */ BUG_ON(ret == 1 && mask & FANOTIFY_PERM_EVENTS); /* Our event wasn't used in the end. Free it. */ fsnotify_destroy_event(group, fsn_event); ret = 0; } else if (fanotify_is_perm_event(mask)) { ret = fanotify_get_response(group, FANOTIFY_PERM(event), iter_info); } finish: if (fanotify_is_perm_event(mask)) fsnotify_finish_user_wait(iter_info); return ret; } static void fanotify_free_group_priv(struct fsnotify_group *group) { kfree(group->fanotify_data.merge_hash); if (group->fanotify_data.ucounts) dec_ucount(group->fanotify_data.ucounts, UCOUNT_FANOTIFY_GROUPS); if (mempool_initialized(&group->fanotify_data.error_events_pool)) mempool_exit(&group->fanotify_data.error_events_pool); } static void fanotify_free_path_event(struct fanotify_event *event) { path_put(fanotify_event_path(event)); kmem_cache_free(fanotify_path_event_cachep, FANOTIFY_PE(event)); } static void fanotify_free_perm_event(struct fanotify_event *event) { path_put(fanotify_event_path(event)); kmem_cache_free(fanotify_perm_event_cachep, FANOTIFY_PERM(event)); } static void fanotify_free_fid_event(struct fanotify_event *event) { struct fanotify_fid_event *ffe = FANOTIFY_FE(event); if (fanotify_fh_has_ext_buf(&ffe->object_fh)) kfree(fanotify_fh_ext_buf(&ffe->object_fh)); kmem_cache_free(fanotify_fid_event_cachep, ffe); } static void fanotify_free_name_event(struct fanotify_event *event) { kfree(FANOTIFY_NE(event)); } static void fanotify_free_error_event(struct fsnotify_group *group, struct fanotify_event *event) { struct fanotify_error_event *fee = FANOTIFY_EE(event); mempool_free(fee, &group->fanotify_data.error_events_pool); } static void fanotify_free_event(struct fsnotify_group *group, struct fsnotify_event *fsn_event) { struct fanotify_event *event; event = FANOTIFY_E(fsn_event); put_pid(event->pid); switch (event->type) { case FANOTIFY_EVENT_TYPE_PATH: fanotify_free_path_event(event); break; case FANOTIFY_EVENT_TYPE_PATH_PERM: fanotify_free_perm_event(event); break; case FANOTIFY_EVENT_TYPE_FID: fanotify_free_fid_event(event); break; case FANOTIFY_EVENT_TYPE_FID_NAME: fanotify_free_name_event(event); break; case FANOTIFY_EVENT_TYPE_OVERFLOW: kfree(event); break; case FANOTIFY_EVENT_TYPE_FS_ERROR: fanotify_free_error_event(group, event); break; default: WARN_ON_ONCE(1); } } static void fanotify_freeing_mark(struct fsnotify_mark *mark, struct fsnotify_group *group) { if (!FAN_GROUP_FLAG(group, FAN_UNLIMITED_MARKS)) dec_ucount(group->fanotify_data.ucounts, UCOUNT_FANOTIFY_MARKS); } static void fanotify_free_mark(struct fsnotify_mark *fsn_mark) { kmem_cache_free(fanotify_mark_cache, fsn_mark); } const struct fsnotify_ops fanotify_fsnotify_ops = { .handle_event = fanotify_handle_event, .free_group_priv = fanotify_free_group_priv, .free_event = fanotify_free_event, .freeing_mark = fanotify_freeing_mark, .free_mark = fanotify_free_mark, };
linux-master
fs/notify/fanotify/fanotify.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Directory notifications for Linux. * * Copyright (C) 2000,2001,2002 Stephen Rothwell * * Copyright (C) 2009 Eric Paris <Red Hat Inc> * dnotify was largly rewritten to use the new fsnotify infrastructure */ #include <linux/fs.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/sched/signal.h> #include <linux/dnotify.h> #include <linux/init.h> #include <linux/security.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/fdtable.h> #include <linux/fsnotify_backend.h> static int dir_notify_enable __read_mostly = 1; #ifdef CONFIG_SYSCTL static struct ctl_table dnotify_sysctls[] = { { .procname = "dir-notify-enable", .data = &dir_notify_enable, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, {} }; static void __init dnotify_sysctl_init(void) { register_sysctl_init("fs", dnotify_sysctls); } #else #define dnotify_sysctl_init() do { } while (0) #endif static struct kmem_cache *dnotify_struct_cache __read_mostly; static struct kmem_cache *dnotify_mark_cache __read_mostly; static struct fsnotify_group *dnotify_group __read_mostly; /* * dnotify will attach one of these to each inode (i_fsnotify_marks) which * is being watched by dnotify. If multiple userspace applications are watching * the same directory with dnotify their information is chained in dn */ struct dnotify_mark { struct fsnotify_mark fsn_mark; struct dnotify_struct *dn; }; /* * When a process starts or stops watching an inode the set of events which * dnotify cares about for that inode may change. This function runs the * list of everything receiving dnotify events about this directory and calculates * the set of all those events. After it updates what dnotify is interested in * it calls the fsnotify function so it can update the set of all events relevant * to this inode. */ static void dnotify_recalc_inode_mask(struct fsnotify_mark *fsn_mark) { __u32 new_mask = 0; struct dnotify_struct *dn; struct dnotify_mark *dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); assert_spin_locked(&fsn_mark->lock); for (dn = dn_mark->dn; dn != NULL; dn = dn->dn_next) new_mask |= (dn->dn_mask & ~FS_DN_MULTISHOT); if (fsn_mark->mask == new_mask) return; fsn_mark->mask = new_mask; fsnotify_recalc_mask(fsn_mark->connector); } /* * Mains fsnotify call where events are delivered to dnotify. * Find the dnotify mark on the relevant inode, run the list of dnotify structs * on that mark and determine which of them has expressed interest in receiving * events of this type. When found send the correct process and signal and * destroy the dnotify struct if it was not registered to receive multiple * events. */ static int dnotify_handle_event(struct fsnotify_mark *inode_mark, u32 mask, struct inode *inode, struct inode *dir, const struct qstr *name, u32 cookie) { struct dnotify_mark *dn_mark; struct dnotify_struct *dn; struct dnotify_struct **prev; struct fown_struct *fown; __u32 test_mask = mask & ~FS_EVENT_ON_CHILD; /* not a dir, dnotify doesn't care */ if (!dir && !(mask & FS_ISDIR)) return 0; dn_mark = container_of(inode_mark, struct dnotify_mark, fsn_mark); spin_lock(&inode_mark->lock); prev = &dn_mark->dn; while ((dn = *prev) != NULL) { if ((dn->dn_mask & test_mask) == 0) { prev = &dn->dn_next; continue; } fown = &dn->dn_filp->f_owner; send_sigio(fown, dn->dn_fd, POLL_MSG); if (dn->dn_mask & FS_DN_MULTISHOT) prev = &dn->dn_next; else { *prev = dn->dn_next; kmem_cache_free(dnotify_struct_cache, dn); dnotify_recalc_inode_mask(inode_mark); } } spin_unlock(&inode_mark->lock); return 0; } static void dnotify_free_mark(struct fsnotify_mark *fsn_mark) { struct dnotify_mark *dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); BUG_ON(dn_mark->dn); kmem_cache_free(dnotify_mark_cache, dn_mark); } static const struct fsnotify_ops dnotify_fsnotify_ops = { .handle_inode_event = dnotify_handle_event, .free_mark = dnotify_free_mark, }; /* * Called every time a file is closed. Looks first for a dnotify mark on the * inode. If one is found run all of the ->dn structures attached to that * mark for one relevant to this process closing the file and remove that * dnotify_struct. If that was the last dnotify_struct also remove the * fsnotify_mark. */ void dnotify_flush(struct file *filp, fl_owner_t id) { struct fsnotify_mark *fsn_mark; struct dnotify_mark *dn_mark; struct dnotify_struct *dn; struct dnotify_struct **prev; struct inode *inode; bool free = false; inode = file_inode(filp); if (!S_ISDIR(inode->i_mode)) return; fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, dnotify_group); if (!fsn_mark) return; dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); fsnotify_group_lock(dnotify_group); spin_lock(&fsn_mark->lock); prev = &dn_mark->dn; while ((dn = *prev) != NULL) { if ((dn->dn_owner == id) && (dn->dn_filp == filp)) { *prev = dn->dn_next; kmem_cache_free(dnotify_struct_cache, dn); dnotify_recalc_inode_mask(fsn_mark); break; } prev = &dn->dn_next; } spin_unlock(&fsn_mark->lock); /* nothing else could have found us thanks to the dnotify_groups mark_mutex */ if (dn_mark->dn == NULL) { fsnotify_detach_mark(fsn_mark); free = true; } fsnotify_group_unlock(dnotify_group); if (free) fsnotify_free_mark(fsn_mark); fsnotify_put_mark(fsn_mark); } /* this conversion is done only at watch creation */ static __u32 convert_arg(unsigned int arg) { __u32 new_mask = FS_EVENT_ON_CHILD; if (arg & DN_MULTISHOT) new_mask |= FS_DN_MULTISHOT; if (arg & DN_DELETE) new_mask |= (FS_DELETE | FS_MOVED_FROM); if (arg & DN_MODIFY) new_mask |= FS_MODIFY; if (arg & DN_ACCESS) new_mask |= FS_ACCESS; if (arg & DN_ATTRIB) new_mask |= FS_ATTRIB; if (arg & DN_RENAME) new_mask |= FS_RENAME; if (arg & DN_CREATE) new_mask |= (FS_CREATE | FS_MOVED_TO); return new_mask; } /* * If multiple processes watch the same inode with dnotify there is only one * dnotify mark in inode->i_fsnotify_marks but we chain a dnotify_struct * onto that mark. This function either attaches the new dnotify_struct onto * that list, or it |= the mask onto an existing dnofiy_struct. */ static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark *dn_mark, fl_owner_t id, int fd, struct file *filp, __u32 mask) { struct dnotify_struct *odn; odn = dn_mark->dn; while (odn != NULL) { /* adding more events to existing dnofiy_struct? */ if ((odn->dn_owner == id) && (odn->dn_filp == filp)) { odn->dn_fd = fd; odn->dn_mask |= mask; return -EEXIST; } odn = odn->dn_next; } dn->dn_mask = mask; dn->dn_fd = fd; dn->dn_filp = filp; dn->dn_owner = id; dn->dn_next = dn_mark->dn; dn_mark->dn = dn; return 0; } /* * When a process calls fcntl to attach a dnotify watch to a directory it ends * up here. Allocate both a mark for fsnotify to add and a dnotify_struct to be * attached to the fsnotify_mark. */ int fcntl_dirnotify(int fd, struct file *filp, unsigned int arg) { struct dnotify_mark *new_dn_mark, *dn_mark; struct fsnotify_mark *new_fsn_mark, *fsn_mark; struct dnotify_struct *dn; struct inode *inode; fl_owner_t id = current->files; struct file *f; int destroy = 0, error = 0; __u32 mask; /* we use these to tell if we need to kfree */ new_fsn_mark = NULL; dn = NULL; if (!dir_notify_enable) { error = -EINVAL; goto out_err; } /* a 0 mask means we are explicitly removing the watch */ if ((arg & ~DN_MULTISHOT) == 0) { dnotify_flush(filp, id); error = 0; goto out_err; } /* dnotify only works on directories */ inode = file_inode(filp); if (!S_ISDIR(inode->i_mode)) { error = -ENOTDIR; goto out_err; } /* * convert the userspace DN_* "arg" to the internal FS_* * defined in fsnotify */ mask = convert_arg(arg); error = security_path_notify(&filp->f_path, mask, FSNOTIFY_OBJ_TYPE_INODE); if (error) goto out_err; /* expect most fcntl to add new rather than augment old */ dn = kmem_cache_alloc(dnotify_struct_cache, GFP_KERNEL); if (!dn) { error = -ENOMEM; goto out_err; } /* new fsnotify mark, we expect most fcntl calls to add a new mark */ new_dn_mark = kmem_cache_alloc(dnotify_mark_cache, GFP_KERNEL); if (!new_dn_mark) { error = -ENOMEM; goto out_err; } /* set up the new_fsn_mark and new_dn_mark */ new_fsn_mark = &new_dn_mark->fsn_mark; fsnotify_init_mark(new_fsn_mark, dnotify_group); new_fsn_mark->mask = mask; new_dn_mark->dn = NULL; /* this is needed to prevent the fcntl/close race described below */ fsnotify_group_lock(dnotify_group); /* add the new_fsn_mark or find an old one. */ fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, dnotify_group); if (fsn_mark) { dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); spin_lock(&fsn_mark->lock); } else { error = fsnotify_add_inode_mark_locked(new_fsn_mark, inode, 0); if (error) { fsnotify_group_unlock(dnotify_group); goto out_err; } spin_lock(&new_fsn_mark->lock); fsn_mark = new_fsn_mark; dn_mark = new_dn_mark; /* we used new_fsn_mark, so don't free it */ new_fsn_mark = NULL; } rcu_read_lock(); f = lookup_fd_rcu(fd); rcu_read_unlock(); /* if (f != filp) means that we lost a race and another task/thread * actually closed the fd we are still playing with before we grabbed * the dnotify_groups mark_mutex and fsn_mark->lock. Since closing the * fd is the only time we clean up the marks we need to get our mark * off the list. */ if (f != filp) { /* if we added ourselves, shoot ourselves, it's possible that * the flush actually did shoot this fsn_mark. That's fine too * since multiple calls to destroy_mark is perfectly safe, if * we found a dn_mark already attached to the inode, just sod * off silently as the flush at close time dealt with it. */ if (dn_mark == new_dn_mark) destroy = 1; error = 0; goto out; } __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0); error = attach_dn(dn, dn_mark, id, fd, filp, mask); /* !error means that we attached the dn to the dn_mark, so don't free it */ if (!error) dn = NULL; /* -EEXIST means that we didn't add this new dn and used an old one. * that isn't an error (and the unused dn should be freed) */ else if (error == -EEXIST) error = 0; dnotify_recalc_inode_mask(fsn_mark); out: spin_unlock(&fsn_mark->lock); if (destroy) fsnotify_detach_mark(fsn_mark); fsnotify_group_unlock(dnotify_group); if (destroy) fsnotify_free_mark(fsn_mark); fsnotify_put_mark(fsn_mark); out_err: if (new_fsn_mark) fsnotify_put_mark(new_fsn_mark); if (dn) kmem_cache_free(dnotify_struct_cache, dn); return error; } static int __init dnotify_init(void) { dnotify_struct_cache = KMEM_CACHE(dnotify_struct, SLAB_PANIC|SLAB_ACCOUNT); dnotify_mark_cache = KMEM_CACHE(dnotify_mark, SLAB_PANIC|SLAB_ACCOUNT); dnotify_group = fsnotify_alloc_group(&dnotify_fsnotify_ops, FSNOTIFY_GROUP_NOFS); if (IS_ERR(dnotify_group)) panic("unable to allocate fsnotify group for dnotify\n"); dnotify_sysctl_init(); return 0; } module_init(dnotify_init)
linux-master
fs/notify/dnotify/dnotify.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved * Copyright 2001-2006 Ian Kent <[email protected]> */ #include <linux/sched/signal.h> #include "autofs_i.h" /* We make this a static variable rather than a part of the superblock; it * is better if we don't reassign numbers easily even across filesystems */ static autofs_wqt_t autofs_next_wait_queue = 1; void autofs_catatonic_mode(struct autofs_sb_info *sbi) { struct autofs_wait_queue *wq, *nwq; mutex_lock(&sbi->wq_mutex); if (sbi->flags & AUTOFS_SBI_CATATONIC) { mutex_unlock(&sbi->wq_mutex); return; } pr_debug("entering catatonic mode\n"); sbi->flags |= AUTOFS_SBI_CATATONIC; wq = sbi->queues; sbi->queues = NULL; /* Erase all wait queues */ while (wq) { nwq = wq->next; wq->status = -ENOENT; /* Magic is gone - report failure */ kfree(wq->name.name - wq->offset); wq->name.name = NULL; wake_up(&wq->queue); if (!--wq->wait_ctr) kfree(wq); wq = nwq; } fput(sbi->pipe); /* Close the pipe */ sbi->pipe = NULL; sbi->pipefd = -1; mutex_unlock(&sbi->wq_mutex); } static int autofs_write(struct autofs_sb_info *sbi, struct file *file, const void *addr, int bytes) { unsigned long sigpipe, flags; const char *data = (const char *)addr; ssize_t wr = 0; sigpipe = sigismember(&current->pending.signal, SIGPIPE); mutex_lock(&sbi->pipe_mutex); while (bytes) { wr = __kernel_write(file, data, bytes, NULL); if (wr <= 0) break; data += wr; bytes -= wr; } mutex_unlock(&sbi->pipe_mutex); /* Keep the currently executing process from receiving a * SIGPIPE unless it was already supposed to get one */ if (wr == -EPIPE && !sigpipe) { spin_lock_irqsave(&current->sighand->siglock, flags); sigdelset(&current->pending.signal, SIGPIPE); recalc_sigpending(); spin_unlock_irqrestore(&current->sighand->siglock, flags); } /* if 'wr' returned 0 (impossible) we assume -EIO (safe) */ return bytes == 0 ? 0 : wr < 0 ? wr : -EIO; } static void autofs_notify_daemon(struct autofs_sb_info *sbi, struct autofs_wait_queue *wq, int type) { union { struct autofs_packet_hdr hdr; union autofs_packet_union v4_pkt; union autofs_v5_packet_union v5_pkt; } pkt; struct file *pipe = NULL; size_t pktsz; int ret; pr_debug("wait id = 0x%08lx, name = %.*s, type=%d\n", (unsigned long) wq->wait_queue_token, wq->name.len, wq->name.name, type); memset(&pkt, 0, sizeof(pkt)); /* For security reasons */ pkt.hdr.proto_version = sbi->version; pkt.hdr.type = type; switch (type) { /* Kernel protocol v4 missing and expire packets */ case autofs_ptype_missing: { struct autofs_packet_missing *mp = &pkt.v4_pkt.missing; pktsz = sizeof(*mp); mp->wait_queue_token = wq->wait_queue_token; mp->len = wq->name.len; memcpy(mp->name, wq->name.name, wq->name.len); mp->name[wq->name.len] = '\0'; break; } case autofs_ptype_expire_multi: { struct autofs_packet_expire_multi *ep = &pkt.v4_pkt.expire_multi; pktsz = sizeof(*ep); ep->wait_queue_token = wq->wait_queue_token; ep->len = wq->name.len; memcpy(ep->name, wq->name.name, wq->name.len); ep->name[wq->name.len] = '\0'; break; } /* * Kernel protocol v5 packet for handling indirect and direct * mount missing and expire requests */ case autofs_ptype_missing_indirect: case autofs_ptype_expire_indirect: case autofs_ptype_missing_direct: case autofs_ptype_expire_direct: { struct autofs_v5_packet *packet = &pkt.v5_pkt.v5_packet; struct user_namespace *user_ns = sbi->pipe->f_cred->user_ns; pktsz = sizeof(*packet); packet->wait_queue_token = wq->wait_queue_token; packet->len = wq->name.len; memcpy(packet->name, wq->name.name, wq->name.len); packet->name[wq->name.len] = '\0'; packet->dev = wq->dev; packet->ino = wq->ino; packet->uid = from_kuid_munged(user_ns, wq->uid); packet->gid = from_kgid_munged(user_ns, wq->gid); packet->pid = wq->pid; packet->tgid = wq->tgid; break; } default: pr_warn("bad type %d!\n", type); mutex_unlock(&sbi->wq_mutex); return; } pipe = get_file(sbi->pipe); mutex_unlock(&sbi->wq_mutex); switch (ret = autofs_write(sbi, pipe, &pkt, pktsz)) { case 0: break; case -ENOMEM: case -ERESTARTSYS: /* Just fail this one */ autofs_wait_release(sbi, wq->wait_queue_token, ret); break; default: autofs_catatonic_mode(sbi); break; } fput(pipe); } static struct autofs_wait_queue * autofs_find_wait(struct autofs_sb_info *sbi, const struct qstr *qstr) { struct autofs_wait_queue *wq; for (wq = sbi->queues; wq; wq = wq->next) { if (wq->name.hash == qstr->hash && wq->name.len == qstr->len && wq->name.name && !memcmp(wq->name.name, qstr->name, qstr->len)) break; } return wq; } /* * Check if we have a valid request. * Returns * 1 if the request should continue. * In this case we can return an autofs_wait_queue entry if one is * found or NULL to idicate a new wait needs to be created. * 0 or a negative errno if the request shouldn't continue. */ static int validate_request(struct autofs_wait_queue **wait, struct autofs_sb_info *sbi, const struct qstr *qstr, const struct path *path, enum autofs_notify notify) { struct dentry *dentry = path->dentry; struct autofs_wait_queue *wq; struct autofs_info *ino; if (sbi->flags & AUTOFS_SBI_CATATONIC) return -ENOENT; /* Wait in progress, continue; */ wq = autofs_find_wait(sbi, qstr); if (wq) { *wait = wq; return 1; } *wait = NULL; /* If we don't yet have any info this is a new request */ ino = autofs_dentry_ino(dentry); if (!ino) return 1; /* * If we've been asked to wait on an existing expire (NFY_NONE) * but there is no wait in the queue ... */ if (notify == NFY_NONE) { /* * Either we've betean the pending expire to post it's * wait or it finished while we waited on the mutex. * So we need to wait till either, the wait appears * or the expire finishes. */ while (ino->flags & AUTOFS_INF_EXPIRING) { mutex_unlock(&sbi->wq_mutex); schedule_timeout_interruptible(HZ/10); if (mutex_lock_interruptible(&sbi->wq_mutex)) return -EINTR; if (sbi->flags & AUTOFS_SBI_CATATONIC) return -ENOENT; wq = autofs_find_wait(sbi, qstr); if (wq) { *wait = wq; return 1; } } /* * Not ideal but the status has already gone. Of the two * cases where we wait on NFY_NONE neither depend on the * return status of the wait. */ return 0; } /* * If we've been asked to trigger a mount and the request * completed while we waited on the mutex ... */ if (notify == NFY_MOUNT) { struct dentry *new = NULL; struct path this; int valid = 1; /* * If the dentry was successfully mounted while we slept * on the wait queue mutex we can return success. If it * isn't mounted (doesn't have submounts for the case of * a multi-mount with no mount at it's base) we can * continue on and create a new request. */ if (!IS_ROOT(dentry)) { if (d_unhashed(dentry) && d_really_is_positive(dentry)) { struct dentry *parent = dentry->d_parent; new = d_lookup(parent, &dentry->d_name); if (new) dentry = new; } } this.mnt = path->mnt; this.dentry = dentry; if (path_has_submounts(&this)) valid = 0; if (new) dput(new); return valid; } return 1; } int autofs_wait(struct autofs_sb_info *sbi, const struct path *path, enum autofs_notify notify) { struct dentry *dentry = path->dentry; struct autofs_wait_queue *wq; struct qstr qstr; char *name; int status, ret, type; unsigned int offset = 0; pid_t pid; pid_t tgid; /* In catatonic mode, we don't wait for nobody */ if (sbi->flags & AUTOFS_SBI_CATATONIC) return -ENOENT; /* * Try translating pids to the namespace of the daemon. * * Zero means failure: we are in an unrelated pid namespace. */ pid = task_pid_nr_ns(current, ns_of_pid(sbi->oz_pgrp)); tgid = task_tgid_nr_ns(current, ns_of_pid(sbi->oz_pgrp)); if (pid == 0 || tgid == 0) return -ENOENT; if (d_really_is_negative(dentry)) { /* * A wait for a negative dentry is invalid for certain * cases. A direct or offset mount "always" has its mount * point directory created and so the request dentry must * be positive or the map key doesn't exist. The situation * is very similar for indirect mounts except only dentrys * in the root of the autofs file system may be negative. */ if (autofs_type_trigger(sbi->type)) return -ENOENT; else if (!IS_ROOT(dentry->d_parent)) return -ENOENT; } name = kmalloc(NAME_MAX + 1, GFP_KERNEL); if (!name) return -ENOMEM; /* If this is a direct mount request create a dummy name */ if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type)) { qstr.name = name; qstr.len = sprintf(name, "%p", dentry); } else { char *p = dentry_path_raw(dentry, name, NAME_MAX); if (IS_ERR(p)) { kfree(name); return -ENOENT; } qstr.name = ++p; // skip the leading slash qstr.len = strlen(p); offset = p - name; } qstr.hash = full_name_hash(dentry, qstr.name, qstr.len); if (mutex_lock_interruptible(&sbi->wq_mutex)) { kfree(name); return -EINTR; } ret = validate_request(&wq, sbi, &qstr, path, notify); if (ret <= 0) { if (ret != -EINTR) mutex_unlock(&sbi->wq_mutex); kfree(name); return ret; } if (!wq) { /* Create a new wait queue */ wq = kmalloc(sizeof(struct autofs_wait_queue), GFP_KERNEL); if (!wq) { kfree(name); mutex_unlock(&sbi->wq_mutex); return -ENOMEM; } wq->wait_queue_token = autofs_next_wait_queue; if (++autofs_next_wait_queue == 0) autofs_next_wait_queue = 1; wq->next = sbi->queues; sbi->queues = wq; init_waitqueue_head(&wq->queue); memcpy(&wq->name, &qstr, sizeof(struct qstr)); wq->offset = offset; wq->dev = autofs_get_dev(sbi); wq->ino = autofs_get_ino(sbi); wq->uid = current_uid(); wq->gid = current_gid(); wq->pid = pid; wq->tgid = tgid; wq->status = -EINTR; /* Status return if interrupted */ wq->wait_ctr = 2; if (sbi->version < 5) { if (notify == NFY_MOUNT) type = autofs_ptype_missing; else type = autofs_ptype_expire_multi; } else { if (notify == NFY_MOUNT) type = autofs_type_trigger(sbi->type) ? autofs_ptype_missing_direct : autofs_ptype_missing_indirect; else type = autofs_type_trigger(sbi->type) ? autofs_ptype_expire_direct : autofs_ptype_expire_indirect; } pr_debug("new wait id = 0x%08lx, name = %.*s, nfy=%d\n", (unsigned long) wq->wait_queue_token, wq->name.len, wq->name.name, notify); /* * autofs_notify_daemon() may block; it will unlock ->wq_mutex */ autofs_notify_daemon(sbi, wq, type); } else { wq->wait_ctr++; pr_debug("existing wait id = 0x%08lx, name = %.*s, nfy=%d\n", (unsigned long) wq->wait_queue_token, wq->name.len, wq->name.name, notify); mutex_unlock(&sbi->wq_mutex); kfree(name); } /* * wq->name.name is NULL iff the lock is already released * or the mount has been made catatonic. */ wait_event_killable(wq->queue, wq->name.name == NULL); status = wq->status; /* * For direct and offset mounts we need to track the requester's * uid and gid in the dentry info struct. This is so it can be * supplied, on request, by the misc device ioctl interface. * This is needed during daemon resatart when reconnecting * to existing, active, autofs mounts. The uid and gid (and * related string values) may be used for macro substitution * in autofs mount maps. */ if (!status) { struct autofs_info *ino; struct dentry *de = NULL; /* direct mount or browsable map */ ino = autofs_dentry_ino(dentry); if (!ino) { /* If not lookup actual dentry used */ de = d_lookup(dentry->d_parent, &dentry->d_name); if (de) ino = autofs_dentry_ino(de); } /* Set mount requester */ if (ino) { spin_lock(&sbi->fs_lock); ino->uid = wq->uid; ino->gid = wq->gid; spin_unlock(&sbi->fs_lock); } if (de) dput(de); } /* Are we the last process to need status? */ mutex_lock(&sbi->wq_mutex); if (!--wq->wait_ctr) kfree(wq); mutex_unlock(&sbi->wq_mutex); return status; } int autofs_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_token, int status) { struct autofs_wait_queue *wq, **wql; mutex_lock(&sbi->wq_mutex); for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) { if (wq->wait_queue_token == wait_queue_token) break; } if (!wq) { mutex_unlock(&sbi->wq_mutex); return -EINVAL; } *wql = wq->next; /* Unlink from chain */ kfree(wq->name.name - wq->offset); wq->name.name = NULL; /* Do not wait on this queue */ wq->status = status; wake_up(&wq->queue); if (!--wq->wait_ctr) kfree(wq); mutex_unlock(&sbi->wq_mutex); return 0; }
linux-master
fs/autofs/waitq.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved */ #include <linux/module.h> #include <linux/init.h> #include "autofs_i.h" static struct dentry *autofs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_nodev(fs_type, flags, data, autofs_fill_super); } struct file_system_type autofs_fs_type = { .owner = THIS_MODULE, .name = "autofs", .mount = autofs_mount, .kill_sb = autofs_kill_sb, }; MODULE_ALIAS_FS("autofs"); MODULE_ALIAS("autofs"); static int __init init_autofs_fs(void) { int err; autofs_dev_ioctl_init(); err = register_filesystem(&autofs_fs_type); if (err) autofs_dev_ioctl_exit(); return err; } static void __exit exit_autofs_fs(void) { autofs_dev_ioctl_exit(); unregister_filesystem(&autofs_fs_type); } module_init(init_autofs_fs) module_exit(exit_autofs_fs) MODULE_LICENSE("GPL");
linux-master
fs/autofs/init.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved * Copyright 1999-2000 Jeremy Fitzhardinge <[email protected]> * Copyright 2001-2006 Ian Kent <[email protected]> */ #include <linux/capability.h> #include <linux/compat.h> #include "autofs_i.h" static int autofs_dir_permission(struct mnt_idmap *, struct inode *, int); static int autofs_dir_symlink(struct mnt_idmap *, struct inode *, struct dentry *, const char *); static int autofs_dir_unlink(struct inode *, struct dentry *); static int autofs_dir_rmdir(struct inode *, struct dentry *); static int autofs_dir_mkdir(struct mnt_idmap *, struct inode *, struct dentry *, umode_t); static long autofs_root_ioctl(struct file *, unsigned int, unsigned long); #ifdef CONFIG_COMPAT static long autofs_root_compat_ioctl(struct file *, unsigned int, unsigned long); #endif static int autofs_dir_open(struct inode *inode, struct file *file); static struct dentry *autofs_lookup(struct inode *, struct dentry *, unsigned int); static struct vfsmount *autofs_d_automount(struct path *); static int autofs_d_manage(const struct path *, bool); static void autofs_dentry_release(struct dentry *); const struct file_operations autofs_root_operations = { .open = dcache_dir_open, .release = dcache_dir_close, .read = generic_read_dir, .iterate_shared = dcache_readdir, .llseek = dcache_dir_lseek, .unlocked_ioctl = autofs_root_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = autofs_root_compat_ioctl, #endif }; const struct file_operations autofs_dir_operations = { .open = autofs_dir_open, .release = dcache_dir_close, .read = generic_read_dir, .iterate_shared = dcache_readdir, .llseek = dcache_dir_lseek, }; const struct inode_operations autofs_dir_inode_operations = { .lookup = autofs_lookup, .permission = autofs_dir_permission, .unlink = autofs_dir_unlink, .symlink = autofs_dir_symlink, .mkdir = autofs_dir_mkdir, .rmdir = autofs_dir_rmdir, }; const struct dentry_operations autofs_dentry_operations = { .d_automount = autofs_d_automount, .d_manage = autofs_d_manage, .d_release = autofs_dentry_release, }; static void autofs_del_active(struct dentry *dentry) { struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb); struct autofs_info *ino; ino = autofs_dentry_ino(dentry); spin_lock(&sbi->lookup_lock); list_del_init(&ino->active); spin_unlock(&sbi->lookup_lock); } static int autofs_dir_open(struct inode *inode, struct file *file) { struct dentry *dentry = file->f_path.dentry; struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb); struct autofs_info *ino = autofs_dentry_ino(dentry); pr_debug("file=%p dentry=%p %pd\n", file, dentry, dentry); if (autofs_oz_mode(sbi)) goto out; /* * An empty directory in an autofs file system is always a * mount point. The daemon must have failed to mount this * during lookup so it doesn't exist. This can happen, for * example, if user space returns an incorrect status for a * mount request. Otherwise we're doing a readdir on the * autofs file system so just let the libfs routines handle * it. */ spin_lock(&sbi->lookup_lock); if (!path_is_mountpoint(&file->f_path) && autofs_empty(ino)) { spin_unlock(&sbi->lookup_lock); return -ENOENT; } spin_unlock(&sbi->lookup_lock); out: return dcache_dir_open(inode, file); } static void autofs_dentry_release(struct dentry *de) { struct autofs_info *ino = autofs_dentry_ino(de); struct autofs_sb_info *sbi = autofs_sbi(de->d_sb); pr_debug("releasing %p\n", de); if (!ino) return; if (sbi) { spin_lock(&sbi->lookup_lock); if (!list_empty(&ino->active)) list_del(&ino->active); if (!list_empty(&ino->expiring)) list_del(&ino->expiring); spin_unlock(&sbi->lookup_lock); } autofs_free_ino(ino); } static struct dentry *autofs_lookup_active(struct dentry *dentry) { struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb); struct dentry *parent = dentry->d_parent; const struct qstr *name = &dentry->d_name; unsigned int len = name->len; unsigned int hash = name->hash; const unsigned char *str = name->name; struct list_head *p, *head; head = &sbi->active_list; if (list_empty(head)) return NULL; spin_lock(&sbi->lookup_lock); list_for_each(p, head) { struct autofs_info *ino; struct dentry *active; const struct qstr *qstr; ino = list_entry(p, struct autofs_info, active); active = ino->dentry; spin_lock(&active->d_lock); /* Already gone? */ if ((int) d_count(active) <= 0) goto next; qstr = &active->d_name; if (active->d_name.hash != hash) goto next; if (active->d_parent != parent) goto next; if (qstr->len != len) goto next; if (memcmp(qstr->name, str, len)) goto next; if (d_unhashed(active)) { dget_dlock(active); spin_unlock(&active->d_lock); spin_unlock(&sbi->lookup_lock); return active; } next: spin_unlock(&active->d_lock); } spin_unlock(&sbi->lookup_lock); return NULL; } static struct dentry *autofs_lookup_expiring(struct dentry *dentry, bool rcu_walk) { struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb); struct dentry *parent = dentry->d_parent; const struct qstr *name = &dentry->d_name; unsigned int len = name->len; unsigned int hash = name->hash; const unsigned char *str = name->name; struct list_head *p, *head; head = &sbi->expiring_list; if (list_empty(head)) return NULL; spin_lock(&sbi->lookup_lock); list_for_each(p, head) { struct autofs_info *ino; struct dentry *expiring; const struct qstr *qstr; if (rcu_walk) { spin_unlock(&sbi->lookup_lock); return ERR_PTR(-ECHILD); } ino = list_entry(p, struct autofs_info, expiring); expiring = ino->dentry; spin_lock(&expiring->d_lock); /* We've already been dentry_iput or unlinked */ if (d_really_is_negative(expiring)) goto next; qstr = &expiring->d_name; if (expiring->d_name.hash != hash) goto next; if (expiring->d_parent != parent) goto next; if (qstr->len != len) goto next; if (memcmp(qstr->name, str, len)) goto next; if (d_unhashed(expiring)) { dget_dlock(expiring); spin_unlock(&expiring->d_lock); spin_unlock(&sbi->lookup_lock); return expiring; } next: spin_unlock(&expiring->d_lock); } spin_unlock(&sbi->lookup_lock); return NULL; } static int autofs_mount_wait(const struct path *path, bool rcu_walk) { struct autofs_sb_info *sbi = autofs_sbi(path->dentry->d_sb); struct autofs_info *ino = autofs_dentry_ino(path->dentry); int status = 0; if (ino->flags & AUTOFS_INF_PENDING) { if (rcu_walk) return -ECHILD; pr_debug("waiting for mount name=%pd\n", path->dentry); status = autofs_wait(sbi, path, NFY_MOUNT); pr_debug("mount wait done status=%d\n", status); ino->last_used = jiffies; return status; } if (!(sbi->flags & AUTOFS_SBI_STRICTEXPIRE)) ino->last_used = jiffies; return status; } static int do_expire_wait(const struct path *path, bool rcu_walk) { struct dentry *dentry = path->dentry; struct dentry *expiring; expiring = autofs_lookup_expiring(dentry, rcu_walk); if (IS_ERR(expiring)) return PTR_ERR(expiring); if (!expiring) return autofs_expire_wait(path, rcu_walk); else { const struct path this = { .mnt = path->mnt, .dentry = expiring }; /* * If we are racing with expire the request might not * be quite complete, but the directory has been removed * so it must have been successful, just wait for it. */ autofs_expire_wait(&this, 0); autofs_del_expiring(expiring); dput(expiring); } return 0; } static struct dentry *autofs_mountpoint_changed(struct path *path) { struct dentry *dentry = path->dentry; struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb); /* If this is an indirect mount the dentry could have gone away * and a new one created. * * This is unusual and I can't remember the case for which it * was originally added now. But an example of how this can * happen is an autofs indirect mount that has the "browse" * option set and also has the "symlink" option in the autofs * map entry. In this case the daemon will remove the browse * directory and create a symlink as the mount leaving the * struct path stale. * * Another not so obvious case is when a mount in an autofs * indirect mount that uses the "nobrowse" option is being * expired at the same time as a path walk. If the mount has * been umounted but the mount point directory seen before * becoming unhashed (during a lockless path walk) when a stat * family system call is made the mount won't be re-mounted as * it should. In this case the mount point that's been removed * (by the daemon) will be stale and the a new mount point * dentry created. */ if (autofs_type_indirect(sbi->type) && d_unhashed(dentry)) { struct dentry *parent = dentry->d_parent; struct autofs_info *ino; struct dentry *new; new = d_lookup(parent, &dentry->d_name); if (!new) return NULL; ino = autofs_dentry_ino(new); ino->last_used = jiffies; dput(path->dentry); path->dentry = new; } return path->dentry; } static struct vfsmount *autofs_d_automount(struct path *path) { struct dentry *dentry = path->dentry; struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb); struct autofs_info *ino = autofs_dentry_ino(dentry); int status; pr_debug("dentry=%p %pd\n", dentry, dentry); /* The daemon never triggers a mount. */ if (autofs_oz_mode(sbi)) return NULL; /* * If an expire request is pending everyone must wait. * If the expire fails we're still mounted so continue * the follow and return. A return of -EAGAIN (which only * happens with indirect mounts) means the expire completed * and the directory was removed, so just go ahead and try * the mount. */ status = do_expire_wait(path, 0); if (status && status != -EAGAIN) return NULL; /* Callback to the daemon to perform the mount or wait */ spin_lock(&sbi->fs_lock); if (ino->flags & AUTOFS_INF_PENDING) { spin_unlock(&sbi->fs_lock); status = autofs_mount_wait(path, 0); if (status) return ERR_PTR(status); goto done; } /* * If the dentry is a symlink it's equivalent to a directory * having path_is_mountpoint() true, so there's no need to call * back to the daemon. */ if (d_really_is_positive(dentry) && d_is_symlink(dentry)) { spin_unlock(&sbi->fs_lock); goto done; } if (!path_is_mountpoint(path)) { /* * It's possible that user space hasn't removed directories * after umounting a rootless multi-mount, although it * should. For v5 path_has_submounts() is sufficient to * handle this because the leaves of the directory tree under * the mount never trigger mounts themselves (they have an * autofs trigger mount mounted on them). But v4 pseudo direct * mounts do need the leaves to trigger mounts. In this case * we have no choice but to use the autofs_empty() check and * require user space behave. */ if (sbi->version > 4) { if (path_has_submounts(path)) { spin_unlock(&sbi->fs_lock); goto done; } } else { if (!autofs_empty(ino)) { spin_unlock(&sbi->fs_lock); goto done; } } ino->flags |= AUTOFS_INF_PENDING; spin_unlock(&sbi->fs_lock); status = autofs_mount_wait(path, 0); spin_lock(&sbi->fs_lock); ino->flags &= ~AUTOFS_INF_PENDING; if (status) { spin_unlock(&sbi->fs_lock); return ERR_PTR(status); } } spin_unlock(&sbi->fs_lock); done: /* Mount succeeded, check if we ended up with a new dentry */ dentry = autofs_mountpoint_changed(path); if (!dentry) return ERR_PTR(-ENOENT); return NULL; } static int autofs_d_manage(const struct path *path, bool rcu_walk) { struct dentry *dentry = path->dentry; struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb); struct autofs_info *ino = autofs_dentry_ino(dentry); int status; pr_debug("dentry=%p %pd\n", dentry, dentry); /* The daemon never waits. */ if (autofs_oz_mode(sbi)) { if (!path_is_mountpoint(path)) return -EISDIR; return 0; } /* Wait for pending expires */ if (do_expire_wait(path, rcu_walk) == -ECHILD) return -ECHILD; /* * This dentry may be under construction so wait on mount * completion. */ status = autofs_mount_wait(path, rcu_walk); if (status) return status; if (rcu_walk) { /* We don't need fs_lock in rcu_walk mode, * just testing 'AUTOFS_INF_WANT_EXPIRE' is enough. * * We only return -EISDIR when certain this isn't * a mount-trap. */ struct inode *inode; if (ino->flags & AUTOFS_INF_WANT_EXPIRE) return 0; if (path_is_mountpoint(path)) return 0; inode = d_inode_rcu(dentry); if (inode && S_ISLNK(inode->i_mode)) return -EISDIR; if (!autofs_empty(ino)) return -EISDIR; return 0; } spin_lock(&sbi->fs_lock); /* * If the dentry has been selected for expire while we slept * on the lock then it might go away. We'll deal with that in * ->d_automount() and wait on a new mount if the expire * succeeds or return here if it doesn't (since there's no * mount to follow with a rootless multi-mount). */ if (!(ino->flags & AUTOFS_INF_EXPIRING)) { /* * Any needed mounting has been completed and the path * updated so check if this is a rootless multi-mount so * we can avoid needless calls ->d_automount() and avoid * an incorrect ELOOP error return. */ if ((!path_is_mountpoint(path) && !autofs_empty(ino)) || (d_really_is_positive(dentry) && d_is_symlink(dentry))) status = -EISDIR; } spin_unlock(&sbi->fs_lock); return status; } /* Lookups in the root directory */ static struct dentry *autofs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct autofs_sb_info *sbi; struct autofs_info *ino; struct dentry *active; pr_debug("name = %pd\n", dentry); /* File name too long to exist */ if (dentry->d_name.len > NAME_MAX) return ERR_PTR(-ENAMETOOLONG); sbi = autofs_sbi(dir->i_sb); pr_debug("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d\n", current->pid, task_pgrp_nr(current), sbi->flags & AUTOFS_SBI_CATATONIC, autofs_oz_mode(sbi)); active = autofs_lookup_active(dentry); if (active) return active; else { /* * A dentry that is not within the root can never trigger a * mount operation, unless the directory already exists, so we * can return fail immediately. The daemon however does need * to create directories within the file system. */ if (!autofs_oz_mode(sbi) && !IS_ROOT(dentry->d_parent)) return ERR_PTR(-ENOENT); ino = autofs_new_ino(sbi); if (!ino) return ERR_PTR(-ENOMEM); spin_lock(&sbi->lookup_lock); spin_lock(&dentry->d_lock); /* Mark entries in the root as mount triggers */ if (IS_ROOT(dentry->d_parent) && autofs_type_indirect(sbi->type)) __managed_dentry_set_managed(dentry); dentry->d_fsdata = ino; ino->dentry = dentry; list_add(&ino->active, &sbi->active_list); spin_unlock(&sbi->lookup_lock); spin_unlock(&dentry->d_lock); } return NULL; } static int autofs_dir_permission(struct mnt_idmap *idmap, struct inode *inode, int mask) { if (mask & MAY_WRITE) { struct autofs_sb_info *sbi = autofs_sbi(inode->i_sb); if (!autofs_oz_mode(sbi)) return -EACCES; /* autofs_oz_mode() needs to allow path walks when the * autofs mount is catatonic but the state of an autofs * file system needs to be preserved over restarts. */ if (sbi->flags & AUTOFS_SBI_CATATONIC) return -EACCES; } return generic_permission(idmap, inode, mask); } static int autofs_dir_symlink(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, const char *symname) { struct autofs_info *ino = autofs_dentry_ino(dentry); struct autofs_info *p_ino; struct inode *inode; size_t size = strlen(symname); char *cp; pr_debug("%s <- %pd\n", symname, dentry); BUG_ON(!ino); autofs_clean_ino(ino); autofs_del_active(dentry); cp = kmalloc(size + 1, GFP_KERNEL); if (!cp) return -ENOMEM; strcpy(cp, symname); inode = autofs_get_inode(dir->i_sb, S_IFLNK | 0555); if (!inode) { kfree(cp); return -ENOMEM; } inode->i_private = cp; inode->i_size = size; d_add(dentry, inode); dget(dentry); p_ino = autofs_dentry_ino(dentry->d_parent); p_ino->count++; dir->i_mtime = inode_set_ctime_current(dir); return 0; } /* * NOTE! * * Normal filesystems would do a "d_delete()" to tell the VFS dcache * that the file no longer exists. However, doing that means that the * VFS layer can turn the dentry into a negative dentry. We don't want * this, because the unlink is probably the result of an expire. * We simply d_drop it and add it to a expiring list in the super block, * which allows the dentry lookup to check for an incomplete expire. * * If a process is blocked on the dentry waiting for the expire to finish, * it will invalidate the dentry and try to mount with a new one. * * Also see autofs_dir_rmdir().. */ static int autofs_dir_unlink(struct inode *dir, struct dentry *dentry) { struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb); struct autofs_info *ino = autofs_dentry_ino(dentry); struct autofs_info *p_ino; p_ino = autofs_dentry_ino(dentry->d_parent); p_ino->count--; dput(ino->dentry); d_inode(dentry)->i_size = 0; clear_nlink(d_inode(dentry)); dir->i_mtime = inode_set_ctime_current(dir); spin_lock(&sbi->lookup_lock); __autofs_add_expiring(dentry); d_drop(dentry); spin_unlock(&sbi->lookup_lock); return 0; } /* * Version 4 of autofs provides a pseudo direct mount implementation * that relies on directories at the leaves of a directory tree under * an indirect mount to trigger mounts. To allow for this we need to * set the DMANAGED_AUTOMOUNT and DMANAGED_TRANSIT flags on the leaves * of the directory tree. There is no need to clear the automount flag * following a mount or restore it after an expire because these mounts * are always covered. However, it is necessary to ensure that these * flags are clear on non-empty directories to avoid unnecessary calls * during path walks. */ static void autofs_set_leaf_automount_flags(struct dentry *dentry) { struct dentry *parent; /* root and dentrys in the root are already handled */ if (IS_ROOT(dentry->d_parent)) return; managed_dentry_set_managed(dentry); parent = dentry->d_parent; /* only consider parents below dentrys in the root */ if (IS_ROOT(parent->d_parent)) return; managed_dentry_clear_managed(parent); } static void autofs_clear_leaf_automount_flags(struct dentry *dentry) { struct dentry *parent; /* flags for dentrys in the root are handled elsewhere */ if (IS_ROOT(dentry->d_parent)) return; managed_dentry_clear_managed(dentry); parent = dentry->d_parent; /* only consider parents below dentrys in the root */ if (IS_ROOT(parent->d_parent)) return; if (autofs_dentry_ino(parent)->count == 2) managed_dentry_set_managed(parent); } static int autofs_dir_rmdir(struct inode *dir, struct dentry *dentry) { struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb); struct autofs_info *ino = autofs_dentry_ino(dentry); struct autofs_info *p_ino; pr_debug("dentry %p, removing %pd\n", dentry, dentry); if (ino->count != 1) return -ENOTEMPTY; spin_lock(&sbi->lookup_lock); __autofs_add_expiring(dentry); d_drop(dentry); spin_unlock(&sbi->lookup_lock); if (sbi->version < 5) autofs_clear_leaf_automount_flags(dentry); p_ino = autofs_dentry_ino(dentry->d_parent); p_ino->count--; dput(ino->dentry); d_inode(dentry)->i_size = 0; clear_nlink(d_inode(dentry)); if (dir->i_nlink) drop_nlink(dir); return 0; } static int autofs_dir_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) { struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb); struct autofs_info *ino = autofs_dentry_ino(dentry); struct autofs_info *p_ino; struct inode *inode; pr_debug("dentry %p, creating %pd\n", dentry, dentry); BUG_ON(!ino); autofs_clean_ino(ino); autofs_del_active(dentry); inode = autofs_get_inode(dir->i_sb, S_IFDIR | mode); if (!inode) return -ENOMEM; d_add(dentry, inode); if (sbi->version < 5) autofs_set_leaf_automount_flags(dentry); dget(dentry); p_ino = autofs_dentry_ino(dentry->d_parent); p_ino->count++; inc_nlink(dir); dir->i_mtime = inode_set_ctime_current(dir); return 0; } /* Get/set timeout ioctl() operation */ #ifdef CONFIG_COMPAT static inline int autofs_compat_get_set_timeout(struct autofs_sb_info *sbi, compat_ulong_t __user *p) { unsigned long ntimeout; int rv; rv = get_user(ntimeout, p); if (rv) goto error; rv = put_user(sbi->exp_timeout/HZ, p); if (rv) goto error; if (ntimeout > UINT_MAX/HZ) sbi->exp_timeout = 0; else sbi->exp_timeout = ntimeout * HZ; return 0; error: return rv; } #endif static inline int autofs_get_set_timeout(struct autofs_sb_info *sbi, unsigned long __user *p) { unsigned long ntimeout; int rv; rv = get_user(ntimeout, p); if (rv) goto error; rv = put_user(sbi->exp_timeout/HZ, p); if (rv) goto error; if (ntimeout > ULONG_MAX/HZ) sbi->exp_timeout = 0; else sbi->exp_timeout = ntimeout * HZ; return 0; error: return rv; } /* Return protocol version */ static inline int autofs_get_protover(struct autofs_sb_info *sbi, int __user *p) { return put_user(sbi->version, p); } /* Return protocol sub version */ static inline int autofs_get_protosubver(struct autofs_sb_info *sbi, int __user *p) { return put_user(sbi->sub_version, p); } /* * Tells the daemon whether it can umount the autofs mount. */ static inline int autofs_ask_umount(struct vfsmount *mnt, int __user *p) { int status = 0; if (may_umount(mnt)) status = 1; pr_debug("may umount %d\n", status); status = put_user(status, p); return status; } /* Identify autofs_dentries - this is so we can tell if there's * an extra dentry refcount or not. We only hold a refcount on the * dentry if its non-negative (ie, d_inode != NULL) */ int is_autofs_dentry(struct dentry *dentry) { return dentry && d_really_is_positive(dentry) && dentry->d_op == &autofs_dentry_operations && dentry->d_fsdata != NULL; } /* * ioctl()'s on the root directory is the chief method for the daemon to * generate kernel reactions */ static int autofs_root_ioctl_unlocked(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { struct autofs_sb_info *sbi = autofs_sbi(inode->i_sb); void __user *p = (void __user *)arg; pr_debug("cmd = 0x%08x, arg = 0x%08lx, sbi = %p, pgrp = %u\n", cmd, arg, sbi, task_pgrp_nr(current)); if (_IOC_TYPE(cmd) != _IOC_TYPE(AUTOFS_IOC_FIRST) || _IOC_NR(cmd) - _IOC_NR(AUTOFS_IOC_FIRST) >= AUTOFS_IOC_COUNT) return -ENOTTY; if (!autofs_oz_mode(sbi) && !capable(CAP_SYS_ADMIN)) return -EPERM; switch (cmd) { case AUTOFS_IOC_READY: /* Wait queue: go ahead and retry */ return autofs_wait_release(sbi, (autofs_wqt_t) arg, 0); case AUTOFS_IOC_FAIL: /* Wait queue: fail with ENOENT */ return autofs_wait_release(sbi, (autofs_wqt_t) arg, -ENOENT); case AUTOFS_IOC_CATATONIC: /* Enter catatonic mode (daemon shutdown) */ autofs_catatonic_mode(sbi); return 0; case AUTOFS_IOC_PROTOVER: /* Get protocol version */ return autofs_get_protover(sbi, p); case AUTOFS_IOC_PROTOSUBVER: /* Get protocol sub version */ return autofs_get_protosubver(sbi, p); case AUTOFS_IOC_SETTIMEOUT: return autofs_get_set_timeout(sbi, p); #ifdef CONFIG_COMPAT case AUTOFS_IOC_SETTIMEOUT32: return autofs_compat_get_set_timeout(sbi, p); #endif case AUTOFS_IOC_ASKUMOUNT: return autofs_ask_umount(filp->f_path.mnt, p); /* return a single thing to expire */ case AUTOFS_IOC_EXPIRE: return autofs_expire_run(inode->i_sb, filp->f_path.mnt, sbi, p); /* same as above, but can send multiple expires through pipe */ case AUTOFS_IOC_EXPIRE_MULTI: return autofs_expire_multi(inode->i_sb, filp->f_path.mnt, sbi, p); default: return -EINVAL; } } static long autofs_root_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(filp); return autofs_root_ioctl_unlocked(inode, filp, cmd, arg); } #ifdef CONFIG_COMPAT static long autofs_root_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(filp); int ret; if (cmd == AUTOFS_IOC_READY || cmd == AUTOFS_IOC_FAIL) ret = autofs_root_ioctl_unlocked(inode, filp, cmd, arg); else ret = autofs_root_ioctl_unlocked(inode, filp, cmd, (unsigned long) compat_ptr(arg)); return ret; } #endif
linux-master
fs/autofs/root.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved * Copyright 2005-2006 Ian Kent <[email protected]> */ #include <linux/seq_file.h> #include <linux/pagemap.h> #include <linux/parser.h> #include "autofs_i.h" struct autofs_info *autofs_new_ino(struct autofs_sb_info *sbi) { struct autofs_info *ino; ino = kzalloc(sizeof(*ino), GFP_KERNEL); if (ino) { INIT_LIST_HEAD(&ino->active); INIT_LIST_HEAD(&ino->expiring); ino->last_used = jiffies; ino->sbi = sbi; ino->count = 1; } return ino; } void autofs_clean_ino(struct autofs_info *ino) { ino->uid = GLOBAL_ROOT_UID; ino->gid = GLOBAL_ROOT_GID; ino->last_used = jiffies; } void autofs_free_ino(struct autofs_info *ino) { kfree_rcu(ino, rcu); } void autofs_kill_sb(struct super_block *sb) { struct autofs_sb_info *sbi = autofs_sbi(sb); /* * In the event of a failure in get_sb_nodev the superblock * info is not present so nothing else has been setup, so * just call kill_anon_super when we are called from * deactivate_super. */ if (sbi) { /* Free wait queues, close pipe */ autofs_catatonic_mode(sbi); put_pid(sbi->oz_pgrp); } pr_debug("shutting down\n"); kill_litter_super(sb); if (sbi) kfree_rcu(sbi, rcu); } static int autofs_show_options(struct seq_file *m, struct dentry *root) { struct autofs_sb_info *sbi = autofs_sbi(root->d_sb); struct inode *root_inode = d_inode(root->d_sb->s_root); if (!sbi) return 0; seq_printf(m, ",fd=%d", sbi->pipefd); if (!uid_eq(root_inode->i_uid, GLOBAL_ROOT_UID)) seq_printf(m, ",uid=%u", from_kuid_munged(&init_user_ns, root_inode->i_uid)); if (!gid_eq(root_inode->i_gid, GLOBAL_ROOT_GID)) seq_printf(m, ",gid=%u", from_kgid_munged(&init_user_ns, root_inode->i_gid)); seq_printf(m, ",pgrp=%d", pid_vnr(sbi->oz_pgrp)); seq_printf(m, ",timeout=%lu", sbi->exp_timeout/HZ); seq_printf(m, ",minproto=%d", sbi->min_proto); seq_printf(m, ",maxproto=%d", sbi->max_proto); if (autofs_type_offset(sbi->type)) seq_puts(m, ",offset"); else if (autofs_type_direct(sbi->type)) seq_puts(m, ",direct"); else seq_puts(m, ",indirect"); if (sbi->flags & AUTOFS_SBI_STRICTEXPIRE) seq_puts(m, ",strictexpire"); if (sbi->flags & AUTOFS_SBI_IGNORE) seq_puts(m, ",ignore"); #ifdef CONFIG_CHECKPOINT_RESTORE if (sbi->pipe) seq_printf(m, ",pipe_ino=%ld", file_inode(sbi->pipe)->i_ino); else seq_puts(m, ",pipe_ino=-1"); #endif return 0; } static void autofs_evict_inode(struct inode *inode) { clear_inode(inode); kfree(inode->i_private); } static const struct super_operations autofs_sops = { .statfs = simple_statfs, .show_options = autofs_show_options, .evict_inode = autofs_evict_inode, }; enum {Opt_err, Opt_fd, Opt_uid, Opt_gid, Opt_pgrp, Opt_minproto, Opt_maxproto, Opt_indirect, Opt_direct, Opt_offset, Opt_strictexpire, Opt_ignore}; static const match_table_t tokens = { {Opt_fd, "fd=%u"}, {Opt_uid, "uid=%u"}, {Opt_gid, "gid=%u"}, {Opt_pgrp, "pgrp=%u"}, {Opt_minproto, "minproto=%u"}, {Opt_maxproto, "maxproto=%u"}, {Opt_indirect, "indirect"}, {Opt_direct, "direct"}, {Opt_offset, "offset"}, {Opt_strictexpire, "strictexpire"}, {Opt_ignore, "ignore"}, {Opt_err, NULL} }; static int parse_options(char *options, struct inode *root, int *pgrp, bool *pgrp_set, struct autofs_sb_info *sbi) { char *p; substring_t args[MAX_OPT_ARGS]; int option; int pipefd = -1; kuid_t uid; kgid_t gid; root->i_uid = current_uid(); root->i_gid = current_gid(); sbi->min_proto = AUTOFS_MIN_PROTO_VERSION; sbi->max_proto = AUTOFS_MAX_PROTO_VERSION; sbi->pipefd = -1; if (!options) return 1; while ((p = strsep(&options, ",")) != NULL) { int token; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_fd: if (match_int(args, &pipefd)) return 1; sbi->pipefd = pipefd; break; case Opt_uid: if (match_int(args, &option)) return 1; uid = make_kuid(current_user_ns(), option); if (!uid_valid(uid)) return 1; root->i_uid = uid; break; case Opt_gid: if (match_int(args, &option)) return 1; gid = make_kgid(current_user_ns(), option); if (!gid_valid(gid)) return 1; root->i_gid = gid; break; case Opt_pgrp: if (match_int(args, &option)) return 1; *pgrp = option; *pgrp_set = true; break; case Opt_minproto: if (match_int(args, &option)) return 1; sbi->min_proto = option; break; case Opt_maxproto: if (match_int(args, &option)) return 1; sbi->max_proto = option; break; case Opt_indirect: set_autofs_type_indirect(&sbi->type); break; case Opt_direct: set_autofs_type_direct(&sbi->type); break; case Opt_offset: set_autofs_type_offset(&sbi->type); break; case Opt_strictexpire: sbi->flags |= AUTOFS_SBI_STRICTEXPIRE; break; case Opt_ignore: sbi->flags |= AUTOFS_SBI_IGNORE; break; default: return 1; } } return (sbi->pipefd < 0); } int autofs_fill_super(struct super_block *s, void *data, int silent) { struct inode *root_inode; struct dentry *root; struct file *pipe; struct autofs_sb_info *sbi; struct autofs_info *ino; int pgrp = 0; bool pgrp_set = false; int ret = -EINVAL; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) return -ENOMEM; pr_debug("starting up, sbi = %p\n", sbi); s->s_fs_info = sbi; sbi->magic = AUTOFS_SBI_MAGIC; sbi->pipefd = -1; sbi->pipe = NULL; sbi->exp_timeout = 0; sbi->oz_pgrp = NULL; sbi->sb = s; sbi->version = 0; sbi->sub_version = 0; sbi->flags = AUTOFS_SBI_CATATONIC; set_autofs_type_indirect(&sbi->type); sbi->min_proto = 0; sbi->max_proto = 0; mutex_init(&sbi->wq_mutex); mutex_init(&sbi->pipe_mutex); spin_lock_init(&sbi->fs_lock); sbi->queues = NULL; spin_lock_init(&sbi->lookup_lock); INIT_LIST_HEAD(&sbi->active_list); INIT_LIST_HEAD(&sbi->expiring_list); s->s_blocksize = 1024; s->s_blocksize_bits = 10; s->s_magic = AUTOFS_SUPER_MAGIC; s->s_op = &autofs_sops; s->s_d_op = &autofs_dentry_operations; s->s_time_gran = 1; /* * Get the root inode and dentry, but defer checking for errors. */ ino = autofs_new_ino(sbi); if (!ino) { ret = -ENOMEM; goto fail_free; } root_inode = autofs_get_inode(s, S_IFDIR | 0755); root = d_make_root(root_inode); if (!root) { ret = -ENOMEM; goto fail_ino; } pipe = NULL; root->d_fsdata = ino; /* Can this call block? */ if (parse_options(data, root_inode, &pgrp, &pgrp_set, sbi)) { pr_err("called with bogus options\n"); goto fail_dput; } /* Test versions first */ if (sbi->max_proto < AUTOFS_MIN_PROTO_VERSION || sbi->min_proto > AUTOFS_MAX_PROTO_VERSION) { pr_err("kernel does not match daemon version " "daemon (%d, %d) kernel (%d, %d)\n", sbi->min_proto, sbi->max_proto, AUTOFS_MIN_PROTO_VERSION, AUTOFS_MAX_PROTO_VERSION); goto fail_dput; } /* Establish highest kernel protocol version */ if (sbi->max_proto > AUTOFS_MAX_PROTO_VERSION) sbi->version = AUTOFS_MAX_PROTO_VERSION; else sbi->version = sbi->max_proto; sbi->sub_version = AUTOFS_PROTO_SUBVERSION; if (pgrp_set) { sbi->oz_pgrp = find_get_pid(pgrp); if (!sbi->oz_pgrp) { pr_err("could not find process group %d\n", pgrp); goto fail_dput; } } else { sbi->oz_pgrp = get_task_pid(current, PIDTYPE_PGID); } if (autofs_type_trigger(sbi->type)) __managed_dentry_set_managed(root); root_inode->i_fop = &autofs_root_operations; root_inode->i_op = &autofs_dir_inode_operations; pr_debug("pipe fd = %d, pgrp = %u\n", sbi->pipefd, pid_nr(sbi->oz_pgrp)); pipe = fget(sbi->pipefd); if (!pipe) { pr_err("could not open pipe file descriptor\n"); goto fail_put_pid; } ret = autofs_prepare_pipe(pipe); if (ret < 0) goto fail_fput; sbi->pipe = pipe; sbi->flags &= ~AUTOFS_SBI_CATATONIC; /* * Success! Install the root dentry now to indicate completion. */ s->s_root = root; return 0; /* * Failure ... clean up. */ fail_fput: pr_err("pipe file descriptor does not contain proper ops\n"); fput(pipe); fail_put_pid: put_pid(sbi->oz_pgrp); fail_dput: dput(root); goto fail_free; fail_ino: autofs_free_ino(ino); fail_free: kfree(sbi); s->s_fs_info = NULL; return ret; } struct inode *autofs_get_inode(struct super_block *sb, umode_t mode) { struct inode *inode = new_inode(sb); if (inode == NULL) return NULL; inode->i_mode = mode; if (sb->s_root) { inode->i_uid = d_inode(sb->s_root)->i_uid; inode->i_gid = d_inode(sb->s_root)->i_gid; } inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode); inode->i_ino = get_next_ino(); if (S_ISDIR(mode)) { set_nlink(inode, 2); inode->i_op = &autofs_dir_inode_operations; inode->i_fop = &autofs_dir_operations; } else if (S_ISLNK(mode)) { inode->i_op = &autofs_symlink_inode_operations; } else WARN_ON(1); return inode; }
linux-master
fs/autofs/inode.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved */ #include "autofs_i.h" static const char *autofs_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done) { struct autofs_sb_info *sbi; struct autofs_info *ino; if (!dentry) return ERR_PTR(-ECHILD); sbi = autofs_sbi(dentry->d_sb); ino = autofs_dentry_ino(dentry); if (ino && !autofs_oz_mode(sbi)) ino->last_used = jiffies; return d_inode(dentry)->i_private; } const struct inode_operations autofs_symlink_inode_operations = { .get_link = autofs_get_link };
linux-master
fs/autofs/symlink.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2008 Red Hat, Inc. All rights reserved. * Copyright 2008 Ian Kent <[email protected]> */ #include <linux/module.h> #include <linux/miscdevice.h> #include <linux/compat.h> #include <linux/fdtable.h> #include <linux/magic.h> #include <linux/nospec.h> #include "autofs_i.h" /* * This module implements an interface for routing autofs ioctl control * commands via a miscellaneous device file. * * The alternate interface is needed because we need to be able open * an ioctl file descriptor on an autofs mount that may be covered by * another mount. This situation arises when starting automount(8) * or other user space daemon which uses direct mounts or offset * mounts (used for autofs lazy mount/umount of nested mount trees), * which have been left busy at service shutdown. */ typedef int (*ioctl_fn)(struct file *, struct autofs_sb_info *, struct autofs_dev_ioctl *); static int check_name(const char *name) { if (!strchr(name, '/')) return -EINVAL; return 0; } /* * Check a string doesn't overrun the chunk of * memory we copied from user land. */ static int invalid_str(char *str, size_t size) { if (memchr(str, 0, size)) return 0; return -EINVAL; } /* * Check that the user compiled against correct version of autofs * misc device code. * * As well as checking the version compatibility this always copies * the kernel interface version out. */ static int check_dev_ioctl_version(int cmd, struct autofs_dev_ioctl *param) { int err = 0; if ((param->ver_major != AUTOFS_DEV_IOCTL_VERSION_MAJOR) || (param->ver_minor > AUTOFS_DEV_IOCTL_VERSION_MINOR)) { pr_warn("ioctl control interface version mismatch: " "kernel(%u.%u), user(%u.%u), cmd(0x%08x)\n", AUTOFS_DEV_IOCTL_VERSION_MAJOR, AUTOFS_DEV_IOCTL_VERSION_MINOR, param->ver_major, param->ver_minor, cmd); err = -EINVAL; } /* Fill in the kernel version. */ param->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR; param->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR; return err; } /* * Copy parameter control struct, including a possible path allocated * at the end of the struct. */ static struct autofs_dev_ioctl * copy_dev_ioctl(struct autofs_dev_ioctl __user *in) { struct autofs_dev_ioctl tmp, *res; if (copy_from_user(&tmp, in, AUTOFS_DEV_IOCTL_SIZE)) return ERR_PTR(-EFAULT); if (tmp.size < AUTOFS_DEV_IOCTL_SIZE) return ERR_PTR(-EINVAL); if (tmp.size > AUTOFS_DEV_IOCTL_SIZE + PATH_MAX) return ERR_PTR(-ENAMETOOLONG); res = memdup_user(in, tmp.size); if (!IS_ERR(res)) res->size = tmp.size; return res; } static inline void free_dev_ioctl(struct autofs_dev_ioctl *param) { kfree(param); } /* * Check sanity of parameter control fields and if a path is present * check that it is terminated and contains at least one "/". */ static int validate_dev_ioctl(int cmd, struct autofs_dev_ioctl *param) { int err; err = check_dev_ioctl_version(cmd, param); if (err) { pr_warn("invalid device control module version " "supplied for cmd(0x%08x)\n", cmd); goto out; } if (param->size > AUTOFS_DEV_IOCTL_SIZE) { err = invalid_str(param->path, param->size - AUTOFS_DEV_IOCTL_SIZE); if (err) { pr_warn( "path string terminator missing for cmd(0x%08x)\n", cmd); goto out; } err = check_name(param->path); if (err) { pr_warn("invalid path supplied for cmd(0x%08x)\n", cmd); goto out; } } else { unsigned int inr = _IOC_NR(cmd); if (inr == AUTOFS_DEV_IOCTL_OPENMOUNT_CMD || inr == AUTOFS_DEV_IOCTL_REQUESTER_CMD || inr == AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD) { err = -EINVAL; goto out; } } err = 0; out: return err; } /* Return autofs dev ioctl version */ static int autofs_dev_ioctl_version(struct file *fp, struct autofs_sb_info *sbi, struct autofs_dev_ioctl *param) { /* This should have already been set. */ param->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR; param->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR; return 0; } /* Return autofs module protocol version */ static int autofs_dev_ioctl_protover(struct file *fp, struct autofs_sb_info *sbi, struct autofs_dev_ioctl *param) { param->protover.version = sbi->version; return 0; } /* Return autofs module protocol sub version */ static int autofs_dev_ioctl_protosubver(struct file *fp, struct autofs_sb_info *sbi, struct autofs_dev_ioctl *param) { param->protosubver.sub_version = sbi->sub_version; return 0; } /* Find the topmost mount satisfying test() */ static int find_autofs_mount(const char *pathname, struct path *res, int test(const struct path *path, void *data), void *data) { struct path path; int err; err = kern_path(pathname, LOOKUP_MOUNTPOINT, &path); if (err) return err; err = -ENOENT; while (path.dentry == path.mnt->mnt_root) { if (path.dentry->d_sb->s_magic == AUTOFS_SUPER_MAGIC) { if (test(&path, data)) { path_get(&path); *res = path; err = 0; break; } } if (!follow_up(&path)) break; } path_put(&path); return err; } static int test_by_dev(const struct path *path, void *p) { return path->dentry->d_sb->s_dev == *(dev_t *)p; } static int test_by_type(const struct path *path, void *p) { struct autofs_info *ino = autofs_dentry_ino(path->dentry); return ino && ino->sbi->type & *(unsigned *)p; } /* * Open a file descriptor on the autofs mount point corresponding * to the given path and device number (aka. new_encode_dev(sb->s_dev)). */ static int autofs_dev_ioctl_open_mountpoint(const char *name, dev_t devid) { int err, fd; fd = get_unused_fd_flags(O_CLOEXEC); if (likely(fd >= 0)) { struct file *filp; struct path path; err = find_autofs_mount(name, &path, test_by_dev, &devid); if (err) goto out; filp = dentry_open(&path, O_RDONLY, current_cred()); path_put(&path); if (IS_ERR(filp)) { err = PTR_ERR(filp); goto out; } fd_install(fd, filp); } return fd; out: put_unused_fd(fd); return err; } /* Open a file descriptor on an autofs mount point */ static int autofs_dev_ioctl_openmount(struct file *fp, struct autofs_sb_info *sbi, struct autofs_dev_ioctl *param) { const char *path; dev_t devid; int err, fd; /* param->path has been checked in validate_dev_ioctl() */ if (!param->openmount.devid) return -EINVAL; param->ioctlfd = -1; path = param->path; devid = new_decode_dev(param->openmount.devid); err = 0; fd = autofs_dev_ioctl_open_mountpoint(path, devid); if (unlikely(fd < 0)) { err = fd; goto out; } param->ioctlfd = fd; out: return err; } /* Close file descriptor allocated above (user can also use close(2)). */ static int autofs_dev_ioctl_closemount(struct file *fp, struct autofs_sb_info *sbi, struct autofs_dev_ioctl *param) { return close_fd(param->ioctlfd); } /* * Send "ready" status for an existing wait (either a mount or an expire * request). */ static int autofs_dev_ioctl_ready(struct file *fp, struct autofs_sb_info *sbi, struct autofs_dev_ioctl *param) { autofs_wqt_t token; token = (autofs_wqt_t) param->ready.token; return autofs_wait_release(sbi, token, 0); } /* * Send "fail" status for an existing wait (either a mount or an expire * request). */ static int autofs_dev_ioctl_fail(struct file *fp, struct autofs_sb_info *sbi, struct autofs_dev_ioctl *param) { autofs_wqt_t token; int status; token = (autofs_wqt_t) param->fail.token; status = param->fail.status < 0 ? param->fail.status : -ENOENT; return autofs_wait_release(sbi, token, status); } /* * Set the pipe fd for kernel communication to the daemon. * * Normally this is set at mount using an option but if we * are reconnecting to a busy mount then we need to use this * to tell the autofs mount about the new kernel pipe fd. In * order to protect mounts against incorrectly setting the * pipefd we also require that the autofs mount be catatonic. * * This also sets the process group id used to identify the * controlling process (eg. the owning automount(8) daemon). */ static int autofs_dev_ioctl_setpipefd(struct file *fp, struct autofs_sb_info *sbi, struct autofs_dev_ioctl *param) { int pipefd; int err = 0; struct pid *new_pid = NULL; if (param->setpipefd.pipefd == -1) return -EINVAL; pipefd = param->setpipefd.pipefd; mutex_lock(&sbi->wq_mutex); if (!(sbi->flags & AUTOFS_SBI_CATATONIC)) { mutex_unlock(&sbi->wq_mutex); return -EBUSY; } else { struct file *pipe; new_pid = get_task_pid(current, PIDTYPE_PGID); if (ns_of_pid(new_pid) != ns_of_pid(sbi->oz_pgrp)) { pr_warn("not allowed to change PID namespace\n"); err = -EINVAL; goto out; } pipe = fget(pipefd); if (!pipe) { err = -EBADF; goto out; } if (autofs_prepare_pipe(pipe) < 0) { err = -EPIPE; fput(pipe); goto out; } swap(sbi->oz_pgrp, new_pid); sbi->pipefd = pipefd; sbi->pipe = pipe; sbi->flags &= ~AUTOFS_SBI_CATATONIC; } out: put_pid(new_pid); mutex_unlock(&sbi->wq_mutex); return err; } /* * Make the autofs mount point catatonic, no longer responsive to * mount requests. Also closes the kernel pipe file descriptor. */ static int autofs_dev_ioctl_catatonic(struct file *fp, struct autofs_sb_info *sbi, struct autofs_dev_ioctl *param) { autofs_catatonic_mode(sbi); return 0; } /* Set the autofs mount timeout */ static int autofs_dev_ioctl_timeout(struct file *fp, struct autofs_sb_info *sbi, struct autofs_dev_ioctl *param) { unsigned long timeout; timeout = param->timeout.timeout; param->timeout.timeout = sbi->exp_timeout / HZ; sbi->exp_timeout = timeout * HZ; return 0; } /* * Return the uid and gid of the last request for the mount * * When reconstructing an autofs mount tree with active mounts * we need to re-connect to mounts that may have used the original * process uid and gid (or string variations of them) for mount * lookups within the map entry. */ static int autofs_dev_ioctl_requester(struct file *fp, struct autofs_sb_info *sbi, struct autofs_dev_ioctl *param) { struct autofs_info *ino; struct path path; dev_t devid; int err = -ENOENT; /* param->path has been checked in validate_dev_ioctl() */ devid = sbi->sb->s_dev; param->requester.uid = param->requester.gid = -1; err = find_autofs_mount(param->path, &path, test_by_dev, &devid); if (err) goto out; ino = autofs_dentry_ino(path.dentry); if (ino) { err = 0; autofs_expire_wait(&path, 0); spin_lock(&sbi->fs_lock); param->requester.uid = from_kuid_munged(current_user_ns(), ino->uid); param->requester.gid = from_kgid_munged(current_user_ns(), ino->gid); spin_unlock(&sbi->fs_lock); } path_put(&path); out: return err; } /* * Call repeatedly until it returns -EAGAIN, meaning there's nothing * more that can be done. */ static int autofs_dev_ioctl_expire(struct file *fp, struct autofs_sb_info *sbi, struct autofs_dev_ioctl *param) { struct vfsmount *mnt; int how; how = param->expire.how; mnt = fp->f_path.mnt; return autofs_do_expire_multi(sbi->sb, mnt, sbi, how); } /* Check if autofs mount point is in use */ static int autofs_dev_ioctl_askumount(struct file *fp, struct autofs_sb_info *sbi, struct autofs_dev_ioctl *param) { param->askumount.may_umount = 0; if (may_umount(fp->f_path.mnt)) param->askumount.may_umount = 1; return 0; } /* * Check if the given path is a mountpoint. * * If we are supplied with the file descriptor of an autofs * mount we're looking for a specific mount. In this case * the path is considered a mountpoint if it is itself a * mountpoint or contains a mount, such as a multi-mount * without a root mount. In this case we return 1 if the * path is a mount point and the super magic of the covering * mount if there is one or 0 if it isn't a mountpoint. * * If we aren't supplied with a file descriptor then we * lookup the path and check if it is the root of a mount. * If a type is given we are looking for a particular autofs * mount and if we don't find a match we return fail. If the * located path is the root of a mount we return 1 along with * the super magic of the mount or 0 otherwise. * * In both cases the device number (as returned by * new_encode_dev()) is also returned. */ static int autofs_dev_ioctl_ismountpoint(struct file *fp, struct autofs_sb_info *sbi, struct autofs_dev_ioctl *param) { struct path path; const char *name; unsigned int type; unsigned int devid, magic; int err = -ENOENT; /* param->path has been checked in validate_dev_ioctl() */ name = param->path; type = param->ismountpoint.in.type; param->ismountpoint.out.devid = devid = 0; param->ismountpoint.out.magic = magic = 0; if (!fp || param->ioctlfd == -1) { if (autofs_type_any(type)) err = kern_path(name, LOOKUP_FOLLOW | LOOKUP_MOUNTPOINT, &path); else err = find_autofs_mount(name, &path, test_by_type, &type); if (err) goto out; devid = new_encode_dev(path.dentry->d_sb->s_dev); err = 0; if (path.mnt->mnt_root == path.dentry) { err = 1; magic = path.dentry->d_sb->s_magic; } } else { dev_t dev = sbi->sb->s_dev; err = find_autofs_mount(name, &path, test_by_dev, &dev); if (err) goto out; devid = new_encode_dev(dev); err = path_has_submounts(&path); if (follow_down_one(&path)) magic = path.dentry->d_sb->s_magic; } param->ismountpoint.out.devid = devid; param->ismountpoint.out.magic = magic; path_put(&path); out: return err; } /* * Our range of ioctl numbers isn't 0 based so we need to shift * the array index by _IOC_NR(AUTOFS_CTL_IOC_FIRST) for the table * lookup. */ #define cmd_idx(cmd) (cmd - _IOC_NR(AUTOFS_DEV_IOCTL_IOC_FIRST)) static ioctl_fn lookup_dev_ioctl(unsigned int cmd) { static const ioctl_fn _ioctls[] = { autofs_dev_ioctl_version, autofs_dev_ioctl_protover, autofs_dev_ioctl_protosubver, autofs_dev_ioctl_openmount, autofs_dev_ioctl_closemount, autofs_dev_ioctl_ready, autofs_dev_ioctl_fail, autofs_dev_ioctl_setpipefd, autofs_dev_ioctl_catatonic, autofs_dev_ioctl_timeout, autofs_dev_ioctl_requester, autofs_dev_ioctl_expire, autofs_dev_ioctl_askumount, autofs_dev_ioctl_ismountpoint, }; unsigned int idx = cmd_idx(cmd); if (idx >= ARRAY_SIZE(_ioctls)) return NULL; idx = array_index_nospec(idx, ARRAY_SIZE(_ioctls)); return _ioctls[idx]; } /* ioctl dispatcher */ static int _autofs_dev_ioctl(unsigned int command, struct autofs_dev_ioctl __user *user) { struct autofs_dev_ioctl *param; struct file *fp; struct autofs_sb_info *sbi; unsigned int cmd_first, cmd; ioctl_fn fn = NULL; int err = 0; cmd_first = _IOC_NR(AUTOFS_DEV_IOCTL_IOC_FIRST); cmd = _IOC_NR(command); if (_IOC_TYPE(command) != _IOC_TYPE(AUTOFS_DEV_IOCTL_IOC_FIRST) || cmd - cmd_first > AUTOFS_DEV_IOCTL_IOC_COUNT) { return -ENOTTY; } /* Only root can use ioctls other than AUTOFS_DEV_IOCTL_VERSION_CMD * and AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD */ if (cmd != AUTOFS_DEV_IOCTL_VERSION_CMD && cmd != AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD && !capable(CAP_SYS_ADMIN)) return -EPERM; /* Copy the parameters into kernel space. */ param = copy_dev_ioctl(user); if (IS_ERR(param)) return PTR_ERR(param); err = validate_dev_ioctl(command, param); if (err) goto out; fn = lookup_dev_ioctl(cmd); if (!fn) { pr_warn("unknown command 0x%08x\n", command); err = -ENOTTY; goto out; } fp = NULL; sbi = NULL; /* * For obvious reasons the openmount can't have a file * descriptor yet. We don't take a reference to the * file during close to allow for immediate release, * and the same for retrieving ioctl version. */ if (cmd != AUTOFS_DEV_IOCTL_VERSION_CMD && cmd != AUTOFS_DEV_IOCTL_OPENMOUNT_CMD && cmd != AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD) { struct super_block *sb; fp = fget(param->ioctlfd); if (!fp) { if (cmd == AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD) goto cont; err = -EBADF; goto out; } sb = file_inode(fp)->i_sb; if (sb->s_type != &autofs_fs_type) { err = -EINVAL; fput(fp); goto out; } sbi = autofs_sbi(sb); /* * Admin needs to be able to set the mount catatonic in * order to be able to perform the re-open. */ if (!autofs_oz_mode(sbi) && cmd != AUTOFS_DEV_IOCTL_CATATONIC_CMD) { err = -EACCES; fput(fp); goto out; } } cont: err = fn(fp, sbi, param); if (fp) fput(fp); if (err >= 0 && copy_to_user(user, param, AUTOFS_DEV_IOCTL_SIZE)) err = -EFAULT; out: free_dev_ioctl(param); return err; } static long autofs_dev_ioctl(struct file *file, unsigned int command, unsigned long u) { int err; err = _autofs_dev_ioctl(command, (struct autofs_dev_ioctl __user *) u); return (long) err; } #ifdef CONFIG_COMPAT static long autofs_dev_ioctl_compat(struct file *file, unsigned int command, unsigned long u) { return autofs_dev_ioctl(file, command, (unsigned long) compat_ptr(u)); } #else #define autofs_dev_ioctl_compat NULL #endif static const struct file_operations _dev_ioctl_fops = { .unlocked_ioctl = autofs_dev_ioctl, .compat_ioctl = autofs_dev_ioctl_compat, .owner = THIS_MODULE, .llseek = noop_llseek, }; static struct miscdevice _autofs_dev_ioctl_misc = { .minor = AUTOFS_MINOR, .name = AUTOFS_DEVICE_NAME, .fops = &_dev_ioctl_fops, .mode = 0644, }; MODULE_ALIAS_MISCDEV(AUTOFS_MINOR); MODULE_ALIAS("devname:autofs"); /* Register/deregister misc character device */ int __init autofs_dev_ioctl_init(void) { int r; r = misc_register(&_autofs_dev_ioctl_misc); if (r) { pr_err("misc_register failed for control device\n"); return r; } return 0; } void autofs_dev_ioctl_exit(void) { misc_deregister(&_autofs_dev_ioctl_misc); }
linux-master
fs/autofs/dev-ioctl.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved * Copyright 1999-2000 Jeremy Fitzhardinge <[email protected]> * Copyright 2001-2006 Ian Kent <[email protected]> */ #include "autofs_i.h" /* Check if a dentry can be expired */ static inline int autofs_can_expire(struct dentry *dentry, unsigned long timeout, unsigned int how) { struct autofs_info *ino = autofs_dentry_ino(dentry); /* dentry in the process of being deleted */ if (ino == NULL) return 0; if (!(how & AUTOFS_EXP_IMMEDIATE)) { /* Too young to die */ if (!timeout || time_after(ino->last_used + timeout, jiffies)) return 0; } return 1; } /* Check a mount point for busyness */ static int autofs_mount_busy(struct vfsmount *mnt, struct dentry *dentry, unsigned int how) { struct dentry *top = dentry; struct path path = {.mnt = mnt, .dentry = dentry}; int status = 1; pr_debug("dentry %p %pd\n", dentry, dentry); path_get(&path); if (!follow_down_one(&path)) goto done; if (is_autofs_dentry(path.dentry)) { struct autofs_sb_info *sbi = autofs_sbi(path.dentry->d_sb); /* This is an autofs submount, we can't expire it */ if (autofs_type_indirect(sbi->type)) goto done; } /* Not a submount, has a forced expire been requested */ if (how & AUTOFS_EXP_FORCED) { status = 0; goto done; } /* Update the expiry counter if fs is busy */ if (!may_umount_tree(path.mnt)) { struct autofs_info *ino; ino = autofs_dentry_ino(top); ino->last_used = jiffies; goto done; } status = 0; done: pr_debug("returning = %d\n", status); path_put(&path); return status; } /* p->d_lock held */ static struct dentry *positive_after(struct dentry *p, struct dentry *child) { if (child) child = list_next_entry(child, d_child); else child = list_first_entry(&p->d_subdirs, struct dentry, d_child); list_for_each_entry_from(child, &p->d_subdirs, d_child) { spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED); if (simple_positive(child)) { dget_dlock(child); spin_unlock(&child->d_lock); return child; } spin_unlock(&child->d_lock); } return NULL; } /* * Calculate and dget next entry in the subdirs list under root. */ static struct dentry *get_next_positive_subdir(struct dentry *prev, struct dentry *root) { struct autofs_sb_info *sbi = autofs_sbi(root->d_sb); struct dentry *q; spin_lock(&sbi->lookup_lock); spin_lock(&root->d_lock); q = positive_after(root, prev); spin_unlock(&root->d_lock); spin_unlock(&sbi->lookup_lock); dput(prev); return q; } /* * Calculate and dget next entry in top down tree traversal. */ static struct dentry *get_next_positive_dentry(struct dentry *prev, struct dentry *root) { struct autofs_sb_info *sbi = autofs_sbi(root->d_sb); struct dentry *p = prev, *ret = NULL, *d = NULL; if (prev == NULL) return dget(root); spin_lock(&sbi->lookup_lock); spin_lock(&p->d_lock); while (1) { struct dentry *parent; ret = positive_after(p, d); if (ret || p == root) break; parent = p->d_parent; spin_unlock(&p->d_lock); spin_lock(&parent->d_lock); d = p; p = parent; } spin_unlock(&p->d_lock); spin_unlock(&sbi->lookup_lock); dput(prev); return ret; } /* * Check a direct mount point for busyness. * Direct mounts have similar expiry semantics to tree mounts. * The tree is not busy iff no mountpoints are busy and there are no * autofs submounts. */ static int autofs_direct_busy(struct vfsmount *mnt, struct dentry *top, unsigned long timeout, unsigned int how) { pr_debug("top %p %pd\n", top, top); /* Forced expire, user space handles busy mounts */ if (how & AUTOFS_EXP_FORCED) return 0; /* If it's busy update the expiry counters */ if (!may_umount_tree(mnt)) { struct autofs_info *ino; ino = autofs_dentry_ino(top); if (ino) ino->last_used = jiffies; return 1; } /* Timeout of a direct mount is determined by its top dentry */ if (!autofs_can_expire(top, timeout, how)) return 1; return 0; } /* * Check a directory tree of mount points for busyness * The tree is not busy iff no mountpoints are busy */ static int autofs_tree_busy(struct vfsmount *mnt, struct dentry *top, unsigned long timeout, unsigned int how) { struct autofs_info *top_ino = autofs_dentry_ino(top); struct dentry *p; pr_debug("top %p %pd\n", top, top); /* Negative dentry - give up */ if (!simple_positive(top)) return 1; p = NULL; while ((p = get_next_positive_dentry(p, top))) { pr_debug("dentry %p %pd\n", p, p); /* * Is someone visiting anywhere in the subtree ? * If there's no mount we need to check the usage * count for the autofs dentry. * If the fs is busy update the expiry counter. */ if (d_mountpoint(p)) { if (autofs_mount_busy(mnt, p, how)) { top_ino->last_used = jiffies; dput(p); return 1; } } else { struct autofs_info *ino = autofs_dentry_ino(p); unsigned int ino_count = READ_ONCE(ino->count); /* allow for dget above and top is already dgot */ if (p == top) ino_count += 2; else ino_count++; if (d_count(p) > ino_count) { top_ino->last_used = jiffies; dput(p); return 1; } } } /* Forced expire, user space handles busy mounts */ if (how & AUTOFS_EXP_FORCED) return 0; /* Timeout of a tree mount is ultimately determined by its top dentry */ if (!autofs_can_expire(top, timeout, how)) return 1; return 0; } static struct dentry *autofs_check_leaves(struct vfsmount *mnt, struct dentry *parent, unsigned long timeout, unsigned int how) { struct dentry *p; pr_debug("parent %p %pd\n", parent, parent); p = NULL; while ((p = get_next_positive_dentry(p, parent))) { pr_debug("dentry %p %pd\n", p, p); if (d_mountpoint(p)) { /* Can we umount this guy */ if (autofs_mount_busy(mnt, p, how)) continue; /* This isn't a submount so if a forced expire * has been requested, user space handles busy * mounts */ if (how & AUTOFS_EXP_FORCED) return p; /* Can we expire this guy */ if (autofs_can_expire(p, timeout, how)) return p; } } return NULL; } /* Check if we can expire a direct mount (possibly a tree) */ static struct dentry *autofs_expire_direct(struct super_block *sb, struct vfsmount *mnt, struct autofs_sb_info *sbi, unsigned int how) { struct dentry *root = dget(sb->s_root); struct autofs_info *ino; unsigned long timeout; if (!root) return NULL; timeout = sbi->exp_timeout; if (!autofs_direct_busy(mnt, root, timeout, how)) { spin_lock(&sbi->fs_lock); ino = autofs_dentry_ino(root); /* No point expiring a pending mount */ if (ino->flags & AUTOFS_INF_PENDING) { spin_unlock(&sbi->fs_lock); goto out; } ino->flags |= AUTOFS_INF_WANT_EXPIRE; spin_unlock(&sbi->fs_lock); synchronize_rcu(); if (!autofs_direct_busy(mnt, root, timeout, how)) { spin_lock(&sbi->fs_lock); ino->flags |= AUTOFS_INF_EXPIRING; init_completion(&ino->expire_complete); spin_unlock(&sbi->fs_lock); return root; } spin_lock(&sbi->fs_lock); ino->flags &= ~AUTOFS_INF_WANT_EXPIRE; spin_unlock(&sbi->fs_lock); } out: dput(root); return NULL; } /* Check if 'dentry' should expire, or return a nearby * dentry that is suitable. * If returned dentry is different from arg dentry, * then a dget() reference was taken, else not. */ static struct dentry *should_expire(struct dentry *dentry, struct vfsmount *mnt, unsigned long timeout, unsigned int how) { struct autofs_info *ino = autofs_dentry_ino(dentry); unsigned int ino_count; /* No point expiring a pending mount */ if (ino->flags & AUTOFS_INF_PENDING) return NULL; /* * Case 1: (i) indirect mount or top level pseudo direct mount * (autofs-4.1). * (ii) indirect mount with offset mount, check the "/" * offset (autofs-5.0+). */ if (d_mountpoint(dentry)) { pr_debug("checking mountpoint %p %pd\n", dentry, dentry); /* Can we umount this guy */ if (autofs_mount_busy(mnt, dentry, how)) return NULL; /* This isn't a submount so if a forced expire * has been requested, user space handles busy * mounts */ if (how & AUTOFS_EXP_FORCED) return dentry; /* Can we expire this guy */ if (autofs_can_expire(dentry, timeout, how)) return dentry; return NULL; } if (d_is_symlink(dentry)) { pr_debug("checking symlink %p %pd\n", dentry, dentry); /* Forced expire, user space handles busy mounts */ if (how & AUTOFS_EXP_FORCED) return dentry; /* * A symlink can't be "busy" in the usual sense so * just check last used for expire timeout. */ if (autofs_can_expire(dentry, timeout, how)) return dentry; return NULL; } if (autofs_empty(ino)) return NULL; /* Case 2: tree mount, expire iff entire tree is not busy */ if (!(how & AUTOFS_EXP_LEAVES)) { /* Not a forced expire? */ if (!(how & AUTOFS_EXP_FORCED)) { /* ref-walk currently on this dentry? */ ino_count = READ_ONCE(ino->count) + 1; if (d_count(dentry) > ino_count) return NULL; } if (!autofs_tree_busy(mnt, dentry, timeout, how)) return dentry; /* * Case 3: pseudo direct mount, expire individual leaves * (autofs-4.1). */ } else { struct dentry *expired; /* Not a forced expire? */ if (!(how & AUTOFS_EXP_FORCED)) { /* ref-walk currently on this dentry? */ ino_count = READ_ONCE(ino->count) + 1; if (d_count(dentry) > ino_count) return NULL; } expired = autofs_check_leaves(mnt, dentry, timeout, how); if (expired) { if (expired == dentry) dput(dentry); return expired; } } return NULL; } /* * Find an eligible tree to time-out * A tree is eligible if :- * - it is unused by any user process * - it has been unused for exp_timeout time */ static struct dentry *autofs_expire_indirect(struct super_block *sb, struct vfsmount *mnt, struct autofs_sb_info *sbi, unsigned int how) { unsigned long timeout; struct dentry *root = sb->s_root; struct dentry *dentry; struct dentry *expired; struct dentry *found; struct autofs_info *ino; if (!root) return NULL; timeout = sbi->exp_timeout; dentry = NULL; while ((dentry = get_next_positive_subdir(dentry, root))) { spin_lock(&sbi->fs_lock); ino = autofs_dentry_ino(dentry); if (ino->flags & AUTOFS_INF_WANT_EXPIRE) { spin_unlock(&sbi->fs_lock); continue; } spin_unlock(&sbi->fs_lock); expired = should_expire(dentry, mnt, timeout, how); if (!expired) continue; spin_lock(&sbi->fs_lock); ino = autofs_dentry_ino(expired); ino->flags |= AUTOFS_INF_WANT_EXPIRE; spin_unlock(&sbi->fs_lock); synchronize_rcu(); /* Make sure a reference is not taken on found if * things have changed. */ how &= ~AUTOFS_EXP_LEAVES; found = should_expire(expired, mnt, timeout, how); if (found != expired) { // something has changed, continue dput(found); goto next; } if (expired != dentry) dput(dentry); spin_lock(&sbi->fs_lock); goto found; next: spin_lock(&sbi->fs_lock); ino->flags &= ~AUTOFS_INF_WANT_EXPIRE; spin_unlock(&sbi->fs_lock); if (expired != dentry) dput(expired); } return NULL; found: pr_debug("returning %p %pd\n", expired, expired); ino->flags |= AUTOFS_INF_EXPIRING; init_completion(&ino->expire_complete); spin_unlock(&sbi->fs_lock); return expired; } int autofs_expire_wait(const struct path *path, int rcu_walk) { struct dentry *dentry = path->dentry; struct autofs_sb_info *sbi = autofs_sbi(dentry->d_sb); struct autofs_info *ino = autofs_dentry_ino(dentry); int status; int state; /* Block on any pending expire */ if (!(ino->flags & AUTOFS_INF_WANT_EXPIRE)) return 0; if (rcu_walk) return -ECHILD; retry: spin_lock(&sbi->fs_lock); state = ino->flags & (AUTOFS_INF_WANT_EXPIRE | AUTOFS_INF_EXPIRING); if (state == AUTOFS_INF_WANT_EXPIRE) { spin_unlock(&sbi->fs_lock); /* * Possibly being selected for expire, wait until * it's selected or not. */ schedule_timeout_uninterruptible(HZ/10); goto retry; } if (state & AUTOFS_INF_EXPIRING) { spin_unlock(&sbi->fs_lock); pr_debug("waiting for expire %p name=%pd\n", dentry, dentry); status = autofs_wait(sbi, path, NFY_NONE); wait_for_completion(&ino->expire_complete); pr_debug("expire done status=%d\n", status); if (d_unhashed(dentry)) return -EAGAIN; return status; } spin_unlock(&sbi->fs_lock); return 0; } /* Perform an expiry operation */ int autofs_expire_run(struct super_block *sb, struct vfsmount *mnt, struct autofs_sb_info *sbi, struct autofs_packet_expire __user *pkt_p) { struct autofs_packet_expire pkt; struct autofs_info *ino; struct dentry *dentry; int ret = 0; memset(&pkt, 0, sizeof(pkt)); pkt.hdr.proto_version = sbi->version; pkt.hdr.type = autofs_ptype_expire; dentry = autofs_expire_indirect(sb, mnt, sbi, 0); if (!dentry) return -EAGAIN; pkt.len = dentry->d_name.len; memcpy(pkt.name, dentry->d_name.name, pkt.len); pkt.name[pkt.len] = '\0'; if (copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire))) ret = -EFAULT; spin_lock(&sbi->fs_lock); ino = autofs_dentry_ino(dentry); /* avoid rapid-fire expire attempts if expiry fails */ ino->last_used = jiffies; ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE); complete_all(&ino->expire_complete); spin_unlock(&sbi->fs_lock); dput(dentry); return ret; } int autofs_do_expire_multi(struct super_block *sb, struct vfsmount *mnt, struct autofs_sb_info *sbi, unsigned int how) { struct dentry *dentry; int ret = -EAGAIN; if (autofs_type_trigger(sbi->type)) dentry = autofs_expire_direct(sb, mnt, sbi, how); else dentry = autofs_expire_indirect(sb, mnt, sbi, how); if (dentry) { struct autofs_info *ino = autofs_dentry_ino(dentry); const struct path path = { .mnt = mnt, .dentry = dentry }; /* This is synchronous because it makes the daemon a * little easier */ ret = autofs_wait(sbi, &path, NFY_EXPIRE); spin_lock(&sbi->fs_lock); /* avoid rapid-fire expire attempts if expiry fails */ ino->last_used = jiffies; ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE); complete_all(&ino->expire_complete); spin_unlock(&sbi->fs_lock); dput(dentry); } return ret; } /* * Call repeatedly until it returns -EAGAIN, meaning there's nothing * more to be done. */ int autofs_expire_multi(struct super_block *sb, struct vfsmount *mnt, struct autofs_sb_info *sbi, int __user *arg) { unsigned int how = 0; if (arg && get_user(how, arg)) return -EFAULT; return autofs_do_expire_multi(sb, mnt, sbi, how); }
linux-master
fs/autofs/expire.c
// SPDX-License-Identifier: GPL-2.0-or-later /* -*- linux-c -*- --------------------------------------------------------- * * * linux/fs/devpts/inode.c * * Copyright 1998-2004 H. Peter Anvin -- All Rights Reserved * * ------------------------------------------------------------------------- */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/namei.h> #include <linux/slab.h> #include <linux/mount.h> #include <linux/tty.h> #include <linux/mutex.h> #include <linux/magic.h> #include <linux/idr.h> #include <linux/devpts_fs.h> #include <linux/parser.h> #include <linux/fsnotify.h> #include <linux/seq_file.h> #define DEVPTS_DEFAULT_MODE 0600 /* * ptmx is a new node in /dev/pts and will be unused in legacy (single- * instance) mode. To prevent surprises in user space, set permissions of * ptmx to 0. Use 'chmod' or remount with '-o ptmxmode' to set meaningful * permissions. */ #define DEVPTS_DEFAULT_PTMX_MODE 0000 #define PTMX_MINOR 2 /* * sysctl support for setting limits on the number of Unix98 ptys allocated. * Otherwise one can eat up all kernel memory by opening /dev/ptmx repeatedly. */ static int pty_limit = NR_UNIX98_PTY_DEFAULT; static int pty_reserve = NR_UNIX98_PTY_RESERVE; static int pty_limit_min; static int pty_limit_max = INT_MAX; static atomic_t pty_count = ATOMIC_INIT(0); static struct ctl_table pty_table[] = { { .procname = "max", .maxlen = sizeof(int), .mode = 0644, .data = &pty_limit, .proc_handler = proc_dointvec_minmax, .extra1 = &pty_limit_min, .extra2 = &pty_limit_max, }, { .procname = "reserve", .maxlen = sizeof(int), .mode = 0644, .data = &pty_reserve, .proc_handler = proc_dointvec_minmax, .extra1 = &pty_limit_min, .extra2 = &pty_limit_max, }, { .procname = "nr", .maxlen = sizeof(int), .mode = 0444, .data = &pty_count, .proc_handler = proc_dointvec, }, {} }; struct pts_mount_opts { int setuid; int setgid; kuid_t uid; kgid_t gid; umode_t mode; umode_t ptmxmode; int reserve; int max; }; enum { Opt_uid, Opt_gid, Opt_mode, Opt_ptmxmode, Opt_newinstance, Opt_max, Opt_err }; static const match_table_t tokens = { {Opt_uid, "uid=%u"}, {Opt_gid, "gid=%u"}, {Opt_mode, "mode=%o"}, {Opt_ptmxmode, "ptmxmode=%o"}, {Opt_newinstance, "newinstance"}, {Opt_max, "max=%d"}, {Opt_err, NULL} }; struct pts_fs_info { struct ida allocated_ptys; struct pts_mount_opts mount_opts; struct super_block *sb; struct dentry *ptmx_dentry; }; static inline struct pts_fs_info *DEVPTS_SB(struct super_block *sb) { return sb->s_fs_info; } static int devpts_ptmx_path(struct path *path) { struct super_block *sb; int err; /* Is a devpts filesystem at "pts" in the same directory? */ err = path_pts(path); if (err) return err; /* Is the path the root of a devpts filesystem? */ sb = path->mnt->mnt_sb; if ((sb->s_magic != DEVPTS_SUPER_MAGIC) || (path->mnt->mnt_root != sb->s_root)) return -ENODEV; return 0; } /* * Try to find a suitable devpts filesystem. We support the following * scenarios: * - The ptmx device node is located in the same directory as the devpts * mount where the pts device nodes are located. * This is e.g. the case when calling open on the /dev/pts/ptmx device * node when the devpts filesystem is mounted at /dev/pts. * - The ptmx device node is located outside the devpts filesystem mount * where the pts device nodes are located. For example, the ptmx device * is a symlink, separate device node, or bind-mount. * A supported scenario is bind-mounting /dev/pts/ptmx to /dev/ptmx and * then calling open on /dev/ptmx. In this case a suitable pts * subdirectory can be found in the common parent directory /dev of the * devpts mount and the ptmx bind-mount, after resolving the /dev/ptmx * bind-mount. * If no suitable pts subdirectory can be found this function will fail. * This is e.g. the case when bind-mounting /dev/pts/ptmx to /ptmx. */ struct vfsmount *devpts_mntget(struct file *filp, struct pts_fs_info *fsi) { struct path path; int err = 0; path = filp->f_path; path_get(&path); /* Walk upward while the start point is a bind mount of * a single file. */ while (path.mnt->mnt_root == path.dentry) if (follow_up(&path) == 0) break; /* devpts_ptmx_path() finds a devpts fs or returns an error. */ if ((path.mnt->mnt_sb->s_magic != DEVPTS_SUPER_MAGIC) || (DEVPTS_SB(path.mnt->mnt_sb) != fsi)) err = devpts_ptmx_path(&path); dput(path.dentry); if (!err) { if (DEVPTS_SB(path.mnt->mnt_sb) == fsi) return path.mnt; err = -ENODEV; } mntput(path.mnt); return ERR_PTR(err); } struct pts_fs_info *devpts_acquire(struct file *filp) { struct pts_fs_info *result; struct path path; struct super_block *sb; path = filp->f_path; path_get(&path); /* Has the devpts filesystem already been found? */ if (path.mnt->mnt_sb->s_magic != DEVPTS_SUPER_MAGIC) { int err; err = devpts_ptmx_path(&path); if (err) { result = ERR_PTR(err); goto out; } } /* * pty code needs to hold extra references in case of last /dev/tty close */ sb = path.mnt->mnt_sb; atomic_inc(&sb->s_active); result = DEVPTS_SB(sb); out: path_put(&path); return result; } void devpts_release(struct pts_fs_info *fsi) { deactivate_super(fsi->sb); } #define PARSE_MOUNT 0 #define PARSE_REMOUNT 1 /* * parse_mount_options(): * Set @opts to mount options specified in @data. If an option is not * specified in @data, set it to its default value. * * Note: @data may be NULL (in which case all options are set to default). */ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts) { char *p; kuid_t uid; kgid_t gid; opts->setuid = 0; opts->setgid = 0; opts->uid = GLOBAL_ROOT_UID; opts->gid = GLOBAL_ROOT_GID; opts->mode = DEVPTS_DEFAULT_MODE; opts->ptmxmode = DEVPTS_DEFAULT_PTMX_MODE; opts->max = NR_UNIX98_PTY_MAX; /* Only allow instances mounted from the initial mount * namespace to tap the reserve pool of ptys. */ if (op == PARSE_MOUNT) opts->reserve = (current->nsproxy->mnt_ns == init_task.nsproxy->mnt_ns); while ((p = strsep(&data, ",")) != NULL) { substring_t args[MAX_OPT_ARGS]; int token; int option; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_uid: if (match_int(&args[0], &option)) return -EINVAL; uid = make_kuid(current_user_ns(), option); if (!uid_valid(uid)) return -EINVAL; opts->uid = uid; opts->setuid = 1; break; case Opt_gid: if (match_int(&args[0], &option)) return -EINVAL; gid = make_kgid(current_user_ns(), option); if (!gid_valid(gid)) return -EINVAL; opts->gid = gid; opts->setgid = 1; break; case Opt_mode: if (match_octal(&args[0], &option)) return -EINVAL; opts->mode = option & S_IALLUGO; break; case Opt_ptmxmode: if (match_octal(&args[0], &option)) return -EINVAL; opts->ptmxmode = option & S_IALLUGO; break; case Opt_newinstance: break; case Opt_max: if (match_int(&args[0], &option) || option < 0 || option > NR_UNIX98_PTY_MAX) return -EINVAL; opts->max = option; break; default: pr_err("called with bogus options\n"); return -EINVAL; } } return 0; } static int mknod_ptmx(struct super_block *sb) { int mode; int rc = -ENOMEM; struct dentry *dentry; struct inode *inode; struct dentry *root = sb->s_root; struct pts_fs_info *fsi = DEVPTS_SB(sb); struct pts_mount_opts *opts = &fsi->mount_opts; kuid_t ptmx_uid = current_fsuid(); kgid_t ptmx_gid = current_fsgid(); inode_lock(d_inode(root)); /* If we have already created ptmx node, return */ if (fsi->ptmx_dentry) { rc = 0; goto out; } dentry = d_alloc_name(root, "ptmx"); if (!dentry) { pr_err("Unable to alloc dentry for ptmx node\n"); goto out; } /* * Create a new 'ptmx' node in this mount of devpts. */ inode = new_inode(sb); if (!inode) { pr_err("Unable to alloc inode for ptmx node\n"); dput(dentry); goto out; } inode->i_ino = 2; inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode); mode = S_IFCHR|opts->ptmxmode; init_special_inode(inode, mode, MKDEV(TTYAUX_MAJOR, 2)); inode->i_uid = ptmx_uid; inode->i_gid = ptmx_gid; d_add(dentry, inode); fsi->ptmx_dentry = dentry; rc = 0; out: inode_unlock(d_inode(root)); return rc; } static void update_ptmx_mode(struct pts_fs_info *fsi) { struct inode *inode; if (fsi->ptmx_dentry) { inode = d_inode(fsi->ptmx_dentry); inode->i_mode = S_IFCHR|fsi->mount_opts.ptmxmode; } } static int devpts_remount(struct super_block *sb, int *flags, char *data) { int err; struct pts_fs_info *fsi = DEVPTS_SB(sb); struct pts_mount_opts *opts = &fsi->mount_opts; err = parse_mount_options(data, PARSE_REMOUNT, opts); /* * parse_mount_options() restores options to default values * before parsing and may have changed ptmxmode. So, update the * mode in the inode too. Bogus options don't fail the remount, * so do this even on error return. */ update_ptmx_mode(fsi); return err; } static int devpts_show_options(struct seq_file *seq, struct dentry *root) { struct pts_fs_info *fsi = DEVPTS_SB(root->d_sb); struct pts_mount_opts *opts = &fsi->mount_opts; if (opts->setuid) seq_printf(seq, ",uid=%u", from_kuid_munged(&init_user_ns, opts->uid)); if (opts->setgid) seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, opts->gid)); seq_printf(seq, ",mode=%03o", opts->mode); seq_printf(seq, ",ptmxmode=%03o", opts->ptmxmode); if (opts->max < NR_UNIX98_PTY_MAX) seq_printf(seq, ",max=%d", opts->max); return 0; } static const struct super_operations devpts_sops = { .statfs = simple_statfs, .remount_fs = devpts_remount, .show_options = devpts_show_options, }; static void *new_pts_fs_info(struct super_block *sb) { struct pts_fs_info *fsi; fsi = kzalloc(sizeof(struct pts_fs_info), GFP_KERNEL); if (!fsi) return NULL; ida_init(&fsi->allocated_ptys); fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE; fsi->mount_opts.ptmxmode = DEVPTS_DEFAULT_PTMX_MODE; fsi->sb = sb; return fsi; } static int devpts_fill_super(struct super_block *s, void *data, int silent) { struct inode *inode; int error; s->s_iflags &= ~SB_I_NODEV; s->s_blocksize = 1024; s->s_blocksize_bits = 10; s->s_magic = DEVPTS_SUPER_MAGIC; s->s_op = &devpts_sops; s->s_d_op = &simple_dentry_operations; s->s_time_gran = 1; error = -ENOMEM; s->s_fs_info = new_pts_fs_info(s); if (!s->s_fs_info) goto fail; error = parse_mount_options(data, PARSE_MOUNT, &DEVPTS_SB(s)->mount_opts); if (error) goto fail; error = -ENOMEM; inode = new_inode(s); if (!inode) goto fail; inode->i_ino = 1; inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode); inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR; inode->i_op = &simple_dir_inode_operations; inode->i_fop = &simple_dir_operations; set_nlink(inode, 2); s->s_root = d_make_root(inode); if (!s->s_root) { pr_err("get root dentry failed\n"); goto fail; } error = mknod_ptmx(s); if (error) goto fail_dput; return 0; fail_dput: dput(s->s_root); s->s_root = NULL; fail: return error; } /* * devpts_mount() * * Mount a new (private) instance of devpts. PTYs created in this * instance are independent of the PTYs in other devpts instances. */ static struct dentry *devpts_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_nodev(fs_type, flags, data, devpts_fill_super); } static void devpts_kill_sb(struct super_block *sb) { struct pts_fs_info *fsi = DEVPTS_SB(sb); if (fsi) ida_destroy(&fsi->allocated_ptys); kfree(fsi); kill_litter_super(sb); } static struct file_system_type devpts_fs_type = { .name = "devpts", .mount = devpts_mount, .kill_sb = devpts_kill_sb, .fs_flags = FS_USERNS_MOUNT, }; /* * The normal naming convention is simply /dev/pts/<number>; this conforms * to the System V naming convention */ int devpts_new_index(struct pts_fs_info *fsi) { int index = -ENOSPC; if (atomic_inc_return(&pty_count) >= (pty_limit - (fsi->mount_opts.reserve ? 0 : pty_reserve))) goto out; index = ida_alloc_max(&fsi->allocated_ptys, fsi->mount_opts.max - 1, GFP_KERNEL); out: if (index < 0) atomic_dec(&pty_count); return index; } void devpts_kill_index(struct pts_fs_info *fsi, int idx) { ida_free(&fsi->allocated_ptys, idx); atomic_dec(&pty_count); } /** * devpts_pty_new -- create a new inode in /dev/pts/ * @fsi: Filesystem info for this instance. * @index: used as a name of the node * @priv: what's given back by devpts_get_priv * * The dentry for the created inode is returned. * Remove it from /dev/pts/ with devpts_pty_kill(). */ struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv) { struct dentry *dentry; struct super_block *sb = fsi->sb; struct inode *inode; struct dentry *root; struct pts_mount_opts *opts; char s[12]; root = sb->s_root; opts = &fsi->mount_opts; inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); inode->i_ino = index + 3; inode->i_uid = opts->setuid ? opts->uid : current_fsuid(); inode->i_gid = opts->setgid ? opts->gid : current_fsgid(); inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode); init_special_inode(inode, S_IFCHR|opts->mode, MKDEV(UNIX98_PTY_SLAVE_MAJOR, index)); sprintf(s, "%d", index); dentry = d_alloc_name(root, s); if (dentry) { dentry->d_fsdata = priv; d_add(dentry, inode); fsnotify_create(d_inode(root), dentry); } else { iput(inode); dentry = ERR_PTR(-ENOMEM); } return dentry; } /** * devpts_get_priv -- get private data for a slave * @dentry: dentry of the slave * * Returns whatever was passed as priv in devpts_pty_new for a given inode. */ void *devpts_get_priv(struct dentry *dentry) { if (dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC) return NULL; return dentry->d_fsdata; } /** * devpts_pty_kill -- remove inode form /dev/pts/ * @dentry: dentry of the slave to be removed * * This is an inverse operation of devpts_pty_new. */ void devpts_pty_kill(struct dentry *dentry) { WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC); dentry->d_fsdata = NULL; drop_nlink(dentry->d_inode); d_drop(dentry); fsnotify_unlink(d_inode(dentry->d_parent), dentry); dput(dentry); /* d_alloc_name() in devpts_pty_new() */ } static int __init init_devpts_fs(void) { int err = register_filesystem(&devpts_fs_type); if (!err) { register_sysctl("kernel/pty", pty_table); } return err; } module_init(init_devpts_fs)
linux-master
fs/devpts/inode.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/adfs/super.c * * Copyright (C) 1997-1999 Russell King */ #include <linux/module.h> #include <linux/init.h> #include <linux/parser.h> #include <linux/mount.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/statfs.h> #include <linux/user_namespace.h> #include <linux/blkdev.h> #include "adfs.h" #include "dir_f.h" #include "dir_fplus.h" #define ADFS_SB_FLAGS SB_NOATIME #define ADFS_DEFAULT_OWNER_MASK S_IRWXU #define ADFS_DEFAULT_OTHER_MASK (S_IRWXG | S_IRWXO) void __adfs_error(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_CRIT "ADFS-fs error (device %s)%s%s: %pV\n", sb->s_id, function ? ": " : "", function ? function : "", &vaf); va_end(args); } void adfs_msg(struct super_block *sb, const char *pfx, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk("%sADFS-fs (%s): %pV\n", pfx, sb->s_id, &vaf); va_end(args); } static int adfs_checkdiscrecord(struct adfs_discrecord *dr) { unsigned int max_idlen; int i; /* sector size must be 256, 512 or 1024 bytes */ if (dr->log2secsize != 8 && dr->log2secsize != 9 && dr->log2secsize != 10) return 1; /* idlen must be at least log2secsize + 3 */ if (dr->idlen < dr->log2secsize + 3) return 1; /* we cannot have such a large disc that we * are unable to represent sector offsets in * 32 bits. This works out at 2.0 TB. */ if (le32_to_cpu(dr->disc_size_high) >> dr->log2secsize) return 1; /* * Maximum idlen is limited to 16 bits for new directories by * the three-byte storage of an indirect disc address. For * big directories, idlen must be no greater than 19 v2 [1.0] */ max_idlen = dr->format_version ? 19 : 16; if (dr->idlen > max_idlen) return 1; /* reserved bytes should be zero */ for (i = 0; i < sizeof(dr->unused52); i++) if (dr->unused52[i] != 0) return 1; return 0; } static void adfs_put_super(struct super_block *sb) { struct adfs_sb_info *asb = ADFS_SB(sb); adfs_free_map(sb); kfree_rcu(asb, rcu); } static int adfs_show_options(struct seq_file *seq, struct dentry *root) { struct adfs_sb_info *asb = ADFS_SB(root->d_sb); if (!uid_eq(asb->s_uid, GLOBAL_ROOT_UID)) seq_printf(seq, ",uid=%u", from_kuid_munged(&init_user_ns, asb->s_uid)); if (!gid_eq(asb->s_gid, GLOBAL_ROOT_GID)) seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, asb->s_gid)); if (asb->s_owner_mask != ADFS_DEFAULT_OWNER_MASK) seq_printf(seq, ",ownmask=%o", asb->s_owner_mask); if (asb->s_other_mask != ADFS_DEFAULT_OTHER_MASK) seq_printf(seq, ",othmask=%o", asb->s_other_mask); if (asb->s_ftsuffix != 0) seq_printf(seq, ",ftsuffix=%u", asb->s_ftsuffix); return 0; } enum {Opt_uid, Opt_gid, Opt_ownmask, Opt_othmask, Opt_ftsuffix, Opt_err}; static const match_table_t tokens = { {Opt_uid, "uid=%u"}, {Opt_gid, "gid=%u"}, {Opt_ownmask, "ownmask=%o"}, {Opt_othmask, "othmask=%o"}, {Opt_ftsuffix, "ftsuffix=%u"}, {Opt_err, NULL} }; static int parse_options(struct super_block *sb, struct adfs_sb_info *asb, char *options) { char *p; int option; if (!options) return 0; while ((p = strsep(&options, ",")) != NULL) { substring_t args[MAX_OPT_ARGS]; int token; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_uid: if (match_int(args, &option)) return -EINVAL; asb->s_uid = make_kuid(current_user_ns(), option); if (!uid_valid(asb->s_uid)) return -EINVAL; break; case Opt_gid: if (match_int(args, &option)) return -EINVAL; asb->s_gid = make_kgid(current_user_ns(), option); if (!gid_valid(asb->s_gid)) return -EINVAL; break; case Opt_ownmask: if (match_octal(args, &option)) return -EINVAL; asb->s_owner_mask = option; break; case Opt_othmask: if (match_octal(args, &option)) return -EINVAL; asb->s_other_mask = option; break; case Opt_ftsuffix: if (match_int(args, &option)) return -EINVAL; asb->s_ftsuffix = option; break; default: adfs_msg(sb, KERN_ERR, "unrecognised mount option \"%s\" or missing value", p); return -EINVAL; } } return 0; } static int adfs_remount(struct super_block *sb, int *flags, char *data) { struct adfs_sb_info temp_asb; int ret; sync_filesystem(sb); *flags |= ADFS_SB_FLAGS; temp_asb = *ADFS_SB(sb); ret = parse_options(sb, &temp_asb, data); if (ret == 0) *ADFS_SB(sb) = temp_asb; return ret; } static int adfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct adfs_sb_info *sbi = ADFS_SB(sb); u64 id = huge_encode_dev(sb->s_bdev->bd_dev); adfs_map_statfs(sb, buf); buf->f_type = ADFS_SUPER_MAGIC; buf->f_namelen = sbi->s_namelen; buf->f_bsize = sb->s_blocksize; buf->f_ffree = (long)(buf->f_bfree * buf->f_files) / (long)buf->f_blocks; buf->f_fsid = u64_to_fsid(id); return 0; } static struct kmem_cache *adfs_inode_cachep; static struct inode *adfs_alloc_inode(struct super_block *sb) { struct adfs_inode_info *ei; ei = alloc_inode_sb(sb, adfs_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; } static void adfs_free_inode(struct inode *inode) { kmem_cache_free(adfs_inode_cachep, ADFS_I(inode)); } static int adfs_drop_inode(struct inode *inode) { /* always drop inodes if we are read-only */ return !IS_ENABLED(CONFIG_ADFS_FS_RW) || IS_RDONLY(inode); } static void init_once(void *foo) { struct adfs_inode_info *ei = (struct adfs_inode_info *) foo; inode_init_once(&ei->vfs_inode); } static int __init init_inodecache(void) { adfs_inode_cachep = kmem_cache_create("adfs_inode_cache", sizeof(struct adfs_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD|SLAB_ACCOUNT), init_once); if (adfs_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(adfs_inode_cachep); } static const struct super_operations adfs_sops = { .alloc_inode = adfs_alloc_inode, .free_inode = adfs_free_inode, .drop_inode = adfs_drop_inode, .write_inode = adfs_write_inode, .put_super = adfs_put_super, .statfs = adfs_statfs, .remount_fs = adfs_remount, .show_options = adfs_show_options, }; static int adfs_probe(struct super_block *sb, unsigned int offset, int silent, int (*validate)(struct super_block *sb, struct buffer_head *bh, struct adfs_discrecord **bhp)) { struct adfs_sb_info *asb = ADFS_SB(sb); struct adfs_discrecord *dr; struct buffer_head *bh; unsigned int blocksize = BLOCK_SIZE; int ret, try; for (try = 0; try < 2; try++) { /* try to set the requested block size */ if (sb->s_blocksize != blocksize && !sb_set_blocksize(sb, blocksize)) { if (!silent) adfs_msg(sb, KERN_ERR, "error: unsupported blocksize"); return -EINVAL; } /* read the buffer */ bh = sb_bread(sb, offset >> sb->s_blocksize_bits); if (!bh) { adfs_msg(sb, KERN_ERR, "error: unable to read block %u, try %d", offset >> sb->s_blocksize_bits, try); return -EIO; } /* validate it */ ret = validate(sb, bh, &dr); if (ret) { brelse(bh); return ret; } /* does the block size match the filesystem block size? */ blocksize = 1 << dr->log2secsize; if (sb->s_blocksize == blocksize) { asb->s_map = adfs_read_map(sb, dr); brelse(bh); return PTR_ERR_OR_ZERO(asb->s_map); } brelse(bh); } return -EIO; } static int adfs_validate_bblk(struct super_block *sb, struct buffer_head *bh, struct adfs_discrecord **drp) { struct adfs_discrecord *dr; unsigned char *b_data; b_data = bh->b_data + (ADFS_DISCRECORD % sb->s_blocksize); if (adfs_checkbblk(b_data)) return -EILSEQ; /* Do some sanity checks on the ADFS disc record */ dr = (struct adfs_discrecord *)(b_data + ADFS_DR_OFFSET); if (adfs_checkdiscrecord(dr)) return -EILSEQ; *drp = dr; return 0; } static int adfs_validate_dr0(struct super_block *sb, struct buffer_head *bh, struct adfs_discrecord **drp) { struct adfs_discrecord *dr; /* Do some sanity checks on the ADFS disc record */ dr = (struct adfs_discrecord *)(bh->b_data + 4); if (adfs_checkdiscrecord(dr) || dr->nzones_high || dr->nzones != 1) return -EILSEQ; *drp = dr; return 0; } static int adfs_fill_super(struct super_block *sb, void *data, int silent) { struct adfs_discrecord *dr; struct object_info root_obj; struct adfs_sb_info *asb; struct inode *root; int ret = -EINVAL; sb->s_flags |= ADFS_SB_FLAGS; asb = kzalloc(sizeof(*asb), GFP_KERNEL); if (!asb) return -ENOMEM; sb->s_fs_info = asb; sb->s_magic = ADFS_SUPER_MAGIC; sb->s_time_gran = 10000000; /* set default options */ asb->s_uid = GLOBAL_ROOT_UID; asb->s_gid = GLOBAL_ROOT_GID; asb->s_owner_mask = ADFS_DEFAULT_OWNER_MASK; asb->s_other_mask = ADFS_DEFAULT_OTHER_MASK; asb->s_ftsuffix = 0; if (parse_options(sb, asb, data)) goto error; /* Try to probe the filesystem boot block */ ret = adfs_probe(sb, ADFS_DISCRECORD, 1, adfs_validate_bblk); if (ret == -EILSEQ) ret = adfs_probe(sb, 0, silent, adfs_validate_dr0); if (ret == -EILSEQ) { if (!silent) adfs_msg(sb, KERN_ERR, "error: can't find an ADFS filesystem on dev %s.", sb->s_id); ret = -EINVAL; } if (ret) goto error; /* set up enough so that we can read an inode */ sb->s_op = &adfs_sops; dr = adfs_map_discrecord(asb->s_map); root_obj.parent_id = root_obj.indaddr = le32_to_cpu(dr->root); root_obj.name_len = 0; /* Set root object date as 01 Jan 1987 00:00:00 */ root_obj.loadaddr = 0xfff0003f; root_obj.execaddr = 0xec22c000; root_obj.size = ADFS_NEWDIR_SIZE; root_obj.attr = ADFS_NDA_DIRECTORY | ADFS_NDA_OWNER_READ | ADFS_NDA_OWNER_WRITE | ADFS_NDA_PUBLIC_READ; /* * If this is a F+ disk with variable length directories, * get the root_size from the disc record. */ if (dr->format_version) { root_obj.size = le32_to_cpu(dr->root_size); asb->s_dir = &adfs_fplus_dir_ops; asb->s_namelen = ADFS_FPLUS_NAME_LEN; } else { asb->s_dir = &adfs_f_dir_ops; asb->s_namelen = ADFS_F_NAME_LEN; } /* * ,xyz hex filetype suffix may be added by driver * to files that have valid RISC OS filetype */ if (asb->s_ftsuffix) asb->s_namelen += 4; sb->s_d_op = &adfs_dentry_operations; root = adfs_iget(sb, &root_obj); sb->s_root = d_make_root(root); if (!sb->s_root) { adfs_free_map(sb); adfs_error(sb, "get root inode failed\n"); ret = -EIO; goto error; } return 0; error: sb->s_fs_info = NULL; kfree(asb); return ret; } static struct dentry *adfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, adfs_fill_super); } static struct file_system_type adfs_fs_type = { .owner = THIS_MODULE, .name = "adfs", .mount = adfs_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("adfs"); static int __init init_adfs_fs(void) { int err = init_inodecache(); if (err) goto out1; err = register_filesystem(&adfs_fs_type); if (err) goto out; return 0; out: destroy_inodecache(); out1: return err; } static void __exit exit_adfs_fs(void) { unregister_filesystem(&adfs_fs_type); destroy_inodecache(); } module_init(init_adfs_fs) module_exit(exit_adfs_fs) MODULE_LICENSE("GPL");
linux-master
fs/adfs/super.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/adfs/dir_f.c * * Copyright (C) 1997-1999 Russell King * * E and F format directory handling */ #include "adfs.h" #include "dir_f.h" /* * Read an (unaligned) value of length 1..4 bytes */ static inline unsigned int adfs_readval(unsigned char *p, int len) { unsigned int val = 0; switch (len) { case 4: val |= p[3] << 24; fallthrough; case 3: val |= p[2] << 16; fallthrough; case 2: val |= p[1] << 8; fallthrough; default: val |= p[0]; } return val; } static inline void adfs_writeval(unsigned char *p, int len, unsigned int val) { switch (len) { case 4: p[3] = val >> 24; fallthrough; case 3: p[2] = val >> 16; fallthrough; case 2: p[1] = val >> 8; fallthrough; default: p[0] = val; } } #define ror13(v) ((v >> 13) | (v << 19)) #define dir_u8(idx) \ ({ int _buf = idx >> blocksize_bits; \ int _off = idx - (_buf << blocksize_bits);\ *(u8 *)(bh[_buf]->b_data + _off); \ }) #define dir_u32(idx) \ ({ int _buf = idx >> blocksize_bits; \ int _off = idx - (_buf << blocksize_bits);\ *(__le32 *)(bh[_buf]->b_data + _off); \ }) #define bufoff(_bh,_idx) \ ({ int _buf = _idx >> blocksize_bits; \ int _off = _idx - (_buf << blocksize_bits);\ (void *)(_bh[_buf]->b_data + _off); \ }) /* * There are some algorithms that are nice in * assembler, but a bitch in C... This is one * of them. */ static u8 adfs_dir_checkbyte(const struct adfs_dir *dir) { struct buffer_head * const *bh = dir->bh; const int blocksize_bits = dir->sb->s_blocksize_bits; union { __le32 *ptr32; u8 *ptr8; } ptr, end; u32 dircheck = 0; int last = 5 - 26; int i = 0; /* * Accumulate each word up to the last whole * word of the last directory entry. This * can spread across several buffer heads. */ do { last += 26; do { dircheck = le32_to_cpu(dir_u32(i)) ^ ror13(dircheck); i += sizeof(u32); } while (i < (last & ~3)); } while (dir_u8(last) != 0); /* * Accumulate the last few bytes. These * bytes will be within the same bh. */ if (i != last) { ptr.ptr8 = bufoff(bh, i); end.ptr8 = ptr.ptr8 + last - i; do { dircheck = *ptr.ptr8++ ^ ror13(dircheck); } while (ptr.ptr8 < end.ptr8); } /* * The directory tail is in the final bh * Note that contary to the RISC OS PRMs, * the first few bytes are NOT included * in the check. All bytes are in the * same bh. */ ptr.ptr8 = bufoff(bh, 2008); end.ptr8 = ptr.ptr8 + 36; do { __le32 v = *ptr.ptr32++; dircheck = le32_to_cpu(v) ^ ror13(dircheck); } while (ptr.ptr32 < end.ptr32); return (dircheck ^ (dircheck >> 8) ^ (dircheck >> 16) ^ (dircheck >> 24)) & 0xff; } static int adfs_f_validate(struct adfs_dir *dir) { struct adfs_dirheader *head = dir->dirhead; struct adfs_newdirtail *tail = dir->newtail; if (head->startmasseq != tail->endmasseq || tail->dirlastmask || tail->reserved[0] || tail->reserved[1] || (memcmp(&head->startname, "Nick", 4) && memcmp(&head->startname, "Hugo", 4)) || memcmp(&head->startname, &tail->endname, 4) || adfs_dir_checkbyte(dir) != tail->dircheckbyte) return -EIO; return 0; } /* Read and check that a directory is valid */ static int adfs_f_read(struct super_block *sb, u32 indaddr, unsigned int size, struct adfs_dir *dir) { const unsigned int blocksize_bits = sb->s_blocksize_bits; int ret; if (size && size != ADFS_NEWDIR_SIZE) return -EIO; ret = adfs_dir_read_buffers(sb, indaddr, ADFS_NEWDIR_SIZE, dir); if (ret) return ret; dir->dirhead = bufoff(dir->bh, 0); dir->newtail = bufoff(dir->bh, 2007); if (adfs_f_validate(dir)) goto bad_dir; dir->parent_id = adfs_readval(dir->newtail->dirparent, 3); return 0; bad_dir: adfs_error(sb, "dir %06x is corrupted", indaddr); adfs_dir_relse(dir); return -EIO; } /* * convert a disk-based directory entry to a Linux ADFS directory entry */ static inline void adfs_dir2obj(struct adfs_dir *dir, struct object_info *obj, struct adfs_direntry *de) { unsigned int name_len; for (name_len = 0; name_len < ADFS_F_NAME_LEN; name_len++) { if (de->dirobname[name_len] < ' ') break; obj->name[name_len] = de->dirobname[name_len]; } obj->name_len = name_len; obj->indaddr = adfs_readval(de->dirinddiscadd, 3); obj->loadaddr = adfs_readval(de->dirload, 4); obj->execaddr = adfs_readval(de->direxec, 4); obj->size = adfs_readval(de->dirlen, 4); obj->attr = de->newdiratts; adfs_object_fixup(dir, obj); } /* * convert a Linux ADFS directory entry to a disk-based directory entry */ static inline void adfs_obj2dir(struct adfs_direntry *de, struct object_info *obj) { adfs_writeval(de->dirinddiscadd, 3, obj->indaddr); adfs_writeval(de->dirload, 4, obj->loadaddr); adfs_writeval(de->direxec, 4, obj->execaddr); adfs_writeval(de->dirlen, 4, obj->size); de->newdiratts = obj->attr; } /* * get a directory entry. Note that the caller is responsible * for holding the relevant locks. */ static int __adfs_dir_get(struct adfs_dir *dir, int pos, struct object_info *obj) { struct adfs_direntry de; int ret; ret = adfs_dir_copyfrom(&de, dir, pos, 26); if (ret) return ret; if (!de.dirobname[0]) return -ENOENT; adfs_dir2obj(dir, obj, &de); return 0; } static int adfs_f_setpos(struct adfs_dir *dir, unsigned int fpos) { if (fpos >= ADFS_NUM_DIR_ENTRIES) return -ENOENT; dir->pos = 5 + fpos * 26; return 0; } static int adfs_f_getnext(struct adfs_dir *dir, struct object_info *obj) { unsigned int ret; ret = __adfs_dir_get(dir, dir->pos, obj); if (ret == 0) dir->pos += 26; return ret; } static int adfs_f_iterate(struct adfs_dir *dir, struct dir_context *ctx) { struct object_info obj; int pos = 5 + (ctx->pos - 2) * 26; while (ctx->pos < 2 + ADFS_NUM_DIR_ENTRIES) { if (__adfs_dir_get(dir, pos, &obj)) break; if (!dir_emit(ctx, obj.name, obj.name_len, obj.indaddr, DT_UNKNOWN)) break; pos += 26; ctx->pos++; } return 0; } static int adfs_f_update(struct adfs_dir *dir, struct object_info *obj) { struct adfs_direntry de; int offset, ret; offset = 5 - (int)sizeof(de); do { offset += sizeof(de); ret = adfs_dir_copyfrom(&de, dir, offset, sizeof(de)); if (ret) { adfs_error(dir->sb, "error reading directory entry"); return -ENOENT; } if (!de.dirobname[0]) { adfs_error(dir->sb, "unable to locate entry to update"); return -ENOENT; } } while (adfs_readval(de.dirinddiscadd, 3) != obj->indaddr); /* Update the directory entry with the new object state */ adfs_obj2dir(&de, obj); /* Write the directory entry back to the directory */ return adfs_dir_copyto(dir, offset, &de, 26); } static int adfs_f_commit(struct adfs_dir *dir) { int ret; /* Increment directory sequence number */ dir->dirhead->startmasseq += 1; dir->newtail->endmasseq += 1; /* Update directory check byte */ dir->newtail->dircheckbyte = adfs_dir_checkbyte(dir); /* Make sure the directory still validates correctly */ ret = adfs_f_validate(dir); if (ret) adfs_msg(dir->sb, KERN_ERR, "error: update broke directory"); return ret; } const struct adfs_dir_ops adfs_f_dir_ops = { .read = adfs_f_read, .iterate = adfs_f_iterate, .setpos = adfs_f_setpos, .getnext = adfs_f_getnext, .update = adfs_f_update, .commit = adfs_f_commit, };
linux-master
fs/adfs/dir_f.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/adfs/dir_fplus.c * * Copyright (C) 1997-1999 Russell King */ #include "adfs.h" #include "dir_fplus.h" /* Return the byte offset to directory entry pos */ static unsigned int adfs_fplus_offset(const struct adfs_bigdirheader *h, unsigned int pos) { return offsetof(struct adfs_bigdirheader, bigdirname) + ALIGN(le32_to_cpu(h->bigdirnamelen), 4) + pos * sizeof(struct adfs_bigdirentry); } static int adfs_fplus_validate_header(const struct adfs_bigdirheader *h) { unsigned int size = le32_to_cpu(h->bigdirsize); unsigned int len; if (h->bigdirversion[0] != 0 || h->bigdirversion[1] != 0 || h->bigdirversion[2] != 0 || h->bigdirstartname != cpu_to_le32(BIGDIRSTARTNAME) || !size || size & 2047 || size > SZ_4M) return -EIO; size -= sizeof(struct adfs_bigdirtail) + offsetof(struct adfs_bigdirheader, bigdirname); /* Check that bigdirnamelen fits within the directory */ len = ALIGN(le32_to_cpu(h->bigdirnamelen), 4); if (len > size) return -EIO; size -= len; /* Check that bigdirnamesize fits within the directory */ len = le32_to_cpu(h->bigdirnamesize); if (len > size) return -EIO; size -= len; /* * Avoid division, we know that absolute maximum number of entries * can not be so large to cause overflow of the multiplication below. */ len = le32_to_cpu(h->bigdirentries); if (len > SZ_4M / sizeof(struct adfs_bigdirentry) || len * sizeof(struct adfs_bigdirentry) > size) return -EIO; return 0; } static int adfs_fplus_validate_tail(const struct adfs_bigdirheader *h, const struct adfs_bigdirtail *t) { if (t->bigdirendname != cpu_to_le32(BIGDIRENDNAME) || t->bigdirendmasseq != h->startmasseq || t->reserved[0] != 0 || t->reserved[1] != 0) return -EIO; return 0; } static u8 adfs_fplus_checkbyte(struct adfs_dir *dir) { struct adfs_bigdirheader *h = dir->bighead; struct adfs_bigdirtail *t = dir->bigtail; unsigned int end, bs, bi, i; __le32 *bp; u32 dircheck; end = adfs_fplus_offset(h, le32_to_cpu(h->bigdirentries)) + le32_to_cpu(h->bigdirnamesize); /* Accumulate the contents of the header, entries and names */ for (dircheck = 0, bi = 0; end; bi++) { bp = (void *)dir->bhs[bi]->b_data; bs = dir->bhs[bi]->b_size; if (bs > end) bs = end; for (i = 0; i < bs; i += sizeof(u32)) dircheck = ror32(dircheck, 13) ^ le32_to_cpup(bp++); end -= bs; } /* Accumulate the contents of the tail except for the check byte */ dircheck = ror32(dircheck, 13) ^ le32_to_cpu(t->bigdirendname); dircheck = ror32(dircheck, 13) ^ t->bigdirendmasseq; dircheck = ror32(dircheck, 13) ^ t->reserved[0]; dircheck = ror32(dircheck, 13) ^ t->reserved[1]; return dircheck ^ dircheck >> 8 ^ dircheck >> 16 ^ dircheck >> 24; } static int adfs_fplus_read(struct super_block *sb, u32 indaddr, unsigned int size, struct adfs_dir *dir) { struct adfs_bigdirheader *h; struct adfs_bigdirtail *t; unsigned int dirsize; int ret; /* Read first buffer */ ret = adfs_dir_read_buffers(sb, indaddr, sb->s_blocksize, dir); if (ret) return ret; dir->bighead = h = (void *)dir->bhs[0]->b_data; ret = adfs_fplus_validate_header(h); if (ret) { adfs_error(sb, "dir %06x has malformed header", indaddr); goto out; } dirsize = le32_to_cpu(h->bigdirsize); if (size && dirsize != size) { adfs_msg(sb, KERN_WARNING, "dir %06x header size %X does not match directory size %X", indaddr, dirsize, size); } /* Read remaining buffers */ ret = adfs_dir_read_buffers(sb, indaddr, dirsize, dir); if (ret) return ret; dir->bigtail = t = (struct adfs_bigdirtail *) (dir->bhs[dir->nr_buffers - 1]->b_data + (sb->s_blocksize - 8)); ret = adfs_fplus_validate_tail(h, t); if (ret) { adfs_error(sb, "dir %06x has malformed tail", indaddr); goto out; } if (adfs_fplus_checkbyte(dir) != t->bigdircheckbyte) { adfs_error(sb, "dir %06x checkbyte mismatch\n", indaddr); goto out; } dir->parent_id = le32_to_cpu(h->bigdirparent); return 0; out: adfs_dir_relse(dir); return ret; } static int adfs_fplus_setpos(struct adfs_dir *dir, unsigned int fpos) { int ret = -ENOENT; if (fpos <= le32_to_cpu(dir->bighead->bigdirentries)) { dir->pos = fpos; ret = 0; } return ret; } static int adfs_fplus_getnext(struct adfs_dir *dir, struct object_info *obj) { struct adfs_bigdirheader *h = dir->bighead; struct adfs_bigdirentry bde; unsigned int offset; int ret; if (dir->pos >= le32_to_cpu(h->bigdirentries)) return -ENOENT; offset = adfs_fplus_offset(h, dir->pos); ret = adfs_dir_copyfrom(&bde, dir, offset, sizeof(struct adfs_bigdirentry)); if (ret) return ret; obj->loadaddr = le32_to_cpu(bde.bigdirload); obj->execaddr = le32_to_cpu(bde.bigdirexec); obj->size = le32_to_cpu(bde.bigdirlen); obj->indaddr = le32_to_cpu(bde.bigdirindaddr); obj->attr = le32_to_cpu(bde.bigdirattr); obj->name_len = le32_to_cpu(bde.bigdirobnamelen); offset = adfs_fplus_offset(h, le32_to_cpu(h->bigdirentries)); offset += le32_to_cpu(bde.bigdirobnameptr); ret = adfs_dir_copyfrom(obj->name, dir, offset, obj->name_len); if (ret) return ret; adfs_object_fixup(dir, obj); dir->pos += 1; return 0; } static int adfs_fplus_iterate(struct adfs_dir *dir, struct dir_context *ctx) { struct object_info obj; if ((ctx->pos - 2) >> 32) return 0; if (adfs_fplus_setpos(dir, ctx->pos - 2)) return 0; while (!adfs_fplus_getnext(dir, &obj)) { if (!dir_emit(ctx, obj.name, obj.name_len, obj.indaddr, DT_UNKNOWN)) break; ctx->pos++; } return 0; } static int adfs_fplus_update(struct adfs_dir *dir, struct object_info *obj) { struct adfs_bigdirheader *h = dir->bighead; struct adfs_bigdirentry bde; int offset, end, ret; offset = adfs_fplus_offset(h, 0) - sizeof(bde); end = adfs_fplus_offset(h, le32_to_cpu(h->bigdirentries)); do { offset += sizeof(bde); if (offset >= end) { adfs_error(dir->sb, "unable to locate entry to update"); return -ENOENT; } ret = adfs_dir_copyfrom(&bde, dir, offset, sizeof(bde)); if (ret) { adfs_error(dir->sb, "error reading directory entry"); return -ENOENT; } } while (le32_to_cpu(bde.bigdirindaddr) != obj->indaddr); bde.bigdirload = cpu_to_le32(obj->loadaddr); bde.bigdirexec = cpu_to_le32(obj->execaddr); bde.bigdirlen = cpu_to_le32(obj->size); bde.bigdirindaddr = cpu_to_le32(obj->indaddr); bde.bigdirattr = cpu_to_le32(obj->attr); return adfs_dir_copyto(dir, offset, &bde, sizeof(bde)); } static int adfs_fplus_commit(struct adfs_dir *dir) { int ret; /* Increment directory sequence number */ dir->bighead->startmasseq += 1; dir->bigtail->bigdirendmasseq += 1; /* Update directory check byte */ dir->bigtail->bigdircheckbyte = adfs_fplus_checkbyte(dir); /* Make sure the directory still validates correctly */ ret = adfs_fplus_validate_header(dir->bighead); if (ret == 0) ret = adfs_fplus_validate_tail(dir->bighead, dir->bigtail); return ret; } const struct adfs_dir_ops adfs_fplus_dir_ops = { .read = adfs_fplus_read, .iterate = adfs_fplus_iterate, .setpos = adfs_fplus_setpos, .getnext = adfs_fplus_getnext, .update = adfs_fplus_update, .commit = adfs_fplus_commit, };
linux-master
fs/adfs/dir_fplus.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/adfs/dir.c * * Copyright (C) 1999-2000 Russell King * * Common directory handling for ADFS */ #include <linux/slab.h> #include "adfs.h" /* * For future. This should probably be per-directory. */ static DECLARE_RWSEM(adfs_dir_rwsem); int adfs_dir_copyfrom(void *dst, struct adfs_dir *dir, unsigned int offset, size_t len) { struct super_block *sb = dir->sb; unsigned int index, remain; index = offset >> sb->s_blocksize_bits; offset &= sb->s_blocksize - 1; remain = sb->s_blocksize - offset; if (index + (remain < len) >= dir->nr_buffers) return -EINVAL; if (remain < len) { memcpy(dst, dir->bhs[index]->b_data + offset, remain); dst += remain; len -= remain; index += 1; offset = 0; } memcpy(dst, dir->bhs[index]->b_data + offset, len); return 0; } int adfs_dir_copyto(struct adfs_dir *dir, unsigned int offset, const void *src, size_t len) { struct super_block *sb = dir->sb; unsigned int index, remain; index = offset >> sb->s_blocksize_bits; offset &= sb->s_blocksize - 1; remain = sb->s_blocksize - offset; if (index + (remain < len) >= dir->nr_buffers) return -EINVAL; if (remain < len) { memcpy(dir->bhs[index]->b_data + offset, src, remain); src += remain; len -= remain; index += 1; offset = 0; } memcpy(dir->bhs[index]->b_data + offset, src, len); return 0; } static void __adfs_dir_cleanup(struct adfs_dir *dir) { dir->nr_buffers = 0; if (dir->bhs != dir->bh) kfree(dir->bhs); dir->bhs = NULL; dir->sb = NULL; } void adfs_dir_relse(struct adfs_dir *dir) { unsigned int i; for (i = 0; i < dir->nr_buffers; i++) brelse(dir->bhs[i]); __adfs_dir_cleanup(dir); } static void adfs_dir_forget(struct adfs_dir *dir) { unsigned int i; for (i = 0; i < dir->nr_buffers; i++) bforget(dir->bhs[i]); __adfs_dir_cleanup(dir); } int adfs_dir_read_buffers(struct super_block *sb, u32 indaddr, unsigned int size, struct adfs_dir *dir) { struct buffer_head **bhs; unsigned int i, num; int block; num = ALIGN(size, sb->s_blocksize) >> sb->s_blocksize_bits; if (num > ARRAY_SIZE(dir->bh)) { /* We only allow one extension */ if (dir->bhs != dir->bh) return -EINVAL; bhs = kcalloc(num, sizeof(*bhs), GFP_KERNEL); if (!bhs) return -ENOMEM; if (dir->nr_buffers) memcpy(bhs, dir->bhs, dir->nr_buffers * sizeof(*bhs)); dir->bhs = bhs; } for (i = dir->nr_buffers; i < num; i++) { block = __adfs_block_map(sb, indaddr, i); if (!block) { adfs_error(sb, "dir %06x has a hole at offset %u", indaddr, i); goto error; } dir->bhs[i] = sb_bread(sb, block); if (!dir->bhs[i]) { adfs_error(sb, "dir %06x failed read at offset %u, mapped block 0x%08x", indaddr, i, block); goto error; } dir->nr_buffers++; } return 0; error: adfs_dir_relse(dir); return -EIO; } static int adfs_dir_read(struct super_block *sb, u32 indaddr, unsigned int size, struct adfs_dir *dir) { dir->sb = sb; dir->bhs = dir->bh; dir->nr_buffers = 0; return ADFS_SB(sb)->s_dir->read(sb, indaddr, size, dir); } static int adfs_dir_read_inode(struct super_block *sb, struct inode *inode, struct adfs_dir *dir) { int ret; ret = adfs_dir_read(sb, ADFS_I(inode)->indaddr, inode->i_size, dir); if (ret) return ret; if (ADFS_I(inode)->parent_id != dir->parent_id) { adfs_error(sb, "parent directory id changed under me! (%06x but got %06x)\n", ADFS_I(inode)->parent_id, dir->parent_id); adfs_dir_relse(dir); ret = -EIO; } return ret; } static void adfs_dir_mark_dirty(struct adfs_dir *dir) { unsigned int i; /* Mark the buffers dirty */ for (i = 0; i < dir->nr_buffers; i++) mark_buffer_dirty(dir->bhs[i]); } static int adfs_dir_sync(struct adfs_dir *dir) { int err = 0; int i; for (i = dir->nr_buffers - 1; i >= 0; i--) { struct buffer_head *bh = dir->bhs[i]; sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) err = -EIO; } return err; } void adfs_object_fixup(struct adfs_dir *dir, struct object_info *obj) { unsigned int dots, i; /* * RISC OS allows the use of '/' in directory entry names, so we need * to fix these up. '/' is typically used for FAT compatibility to * represent '.', so do the same conversion here. In any case, '.' * will never be in a RISC OS name since it is used as the pathname * separator. Handle the case where we may generate a '.' or '..' * name, replacing the first character with '^' (the RISC OS "parent * directory" character.) */ for (i = dots = 0; i < obj->name_len; i++) if (obj->name[i] == '/') { obj->name[i] = '.'; dots++; } if (obj->name_len <= 2 && dots == obj->name_len) obj->name[0] = '^'; /* * If the object is a file, and the user requested the ,xyz hex * filetype suffix to the name, check the filetype and append. */ if (!(obj->attr & ADFS_NDA_DIRECTORY) && ADFS_SB(dir->sb)->s_ftsuffix) { u16 filetype = adfs_filetype(obj->loadaddr); if (filetype != ADFS_FILETYPE_NONE) { obj->name[obj->name_len++] = ','; obj->name[obj->name_len++] = hex_asc_lo(filetype >> 8); obj->name[obj->name_len++] = hex_asc_lo(filetype >> 4); obj->name[obj->name_len++] = hex_asc_lo(filetype >> 0); } } } static int adfs_iterate(struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; const struct adfs_dir_ops *ops = ADFS_SB(sb)->s_dir; struct adfs_dir dir; int ret; down_read(&adfs_dir_rwsem); ret = adfs_dir_read_inode(sb, inode, &dir); if (ret) goto unlock; if (ctx->pos == 0) { if (!dir_emit_dot(file, ctx)) goto unlock_relse; ctx->pos = 1; } if (ctx->pos == 1) { if (!dir_emit(ctx, "..", 2, dir.parent_id, DT_DIR)) goto unlock_relse; ctx->pos = 2; } ret = ops->iterate(&dir, ctx); unlock_relse: up_read(&adfs_dir_rwsem); adfs_dir_relse(&dir); return ret; unlock: up_read(&adfs_dir_rwsem); return ret; } int adfs_dir_update(struct super_block *sb, struct object_info *obj, int wait) { const struct adfs_dir_ops *ops = ADFS_SB(sb)->s_dir; struct adfs_dir dir; int ret; if (!IS_ENABLED(CONFIG_ADFS_FS_RW)) return -EINVAL; if (!ops->update) return -EINVAL; down_write(&adfs_dir_rwsem); ret = adfs_dir_read(sb, obj->parent_id, 0, &dir); if (ret) goto unlock; ret = ops->update(&dir, obj); if (ret) goto forget; ret = ops->commit(&dir); if (ret) goto forget; up_write(&adfs_dir_rwsem); adfs_dir_mark_dirty(&dir); if (wait) ret = adfs_dir_sync(&dir); adfs_dir_relse(&dir); return ret; /* * If the updated failed because the entry wasn't found, we can * just release the buffers. If it was any other error, forget * the dirtied buffers so they aren't written back to the media. */ forget: if (ret == -ENOENT) adfs_dir_relse(&dir); else adfs_dir_forget(&dir); unlock: up_write(&adfs_dir_rwsem); return ret; } static unsigned char adfs_tolower(unsigned char c) { if (c >= 'A' && c <= 'Z') c += 'a' - 'A'; return c; } static int __adfs_compare(const unsigned char *qstr, u32 qlen, const char *str, u32 len) { u32 i; if (qlen != len) return 1; for (i = 0; i < qlen; i++) if (adfs_tolower(qstr[i]) != adfs_tolower(str[i])) return 1; return 0; } static int adfs_dir_lookup_byname(struct inode *inode, const struct qstr *qstr, struct object_info *obj) { struct super_block *sb = inode->i_sb; const struct adfs_dir_ops *ops = ADFS_SB(sb)->s_dir; const unsigned char *name; struct adfs_dir dir; u32 name_len; int ret; down_read(&adfs_dir_rwsem); ret = adfs_dir_read_inode(sb, inode, &dir); if (ret) goto unlock; ret = ops->setpos(&dir, 0); if (ret) goto unlock_relse; ret = -ENOENT; name = qstr->name; name_len = qstr->len; while (ops->getnext(&dir, obj) == 0) { if (!__adfs_compare(name, name_len, obj->name, obj->name_len)) { ret = 0; break; } } obj->parent_id = ADFS_I(inode)->indaddr; unlock_relse: up_read(&adfs_dir_rwsem); adfs_dir_relse(&dir); return ret; unlock: up_read(&adfs_dir_rwsem); return ret; } const struct file_operations adfs_dir_operations = { .read = generic_read_dir, .llseek = generic_file_llseek, .iterate_shared = adfs_iterate, .fsync = generic_file_fsync, }; static int adfs_hash(const struct dentry *parent, struct qstr *qstr) { const unsigned char *name; unsigned long hash; u32 len; if (qstr->len > ADFS_SB(parent->d_sb)->s_namelen) return -ENAMETOOLONG; len = qstr->len; name = qstr->name; hash = init_name_hash(parent); while (len--) hash = partial_name_hash(adfs_tolower(*name++), hash); qstr->hash = end_name_hash(hash); return 0; } /* * Compare two names, taking note of the name length * requirements of the underlying filesystem. */ static int adfs_compare(const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *qstr) { return __adfs_compare(qstr->name, qstr->len, str, len); } const struct dentry_operations adfs_dentry_operations = { .d_hash = adfs_hash, .d_compare = adfs_compare, }; static struct dentry * adfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct inode *inode = NULL; struct object_info obj; int error; error = adfs_dir_lookup_byname(dir, &dentry->d_name, &obj); if (error == 0) { /* * This only returns NULL if get_empty_inode * fails. */ inode = adfs_iget(dir->i_sb, &obj); if (!inode) inode = ERR_PTR(-EACCES); } else if (error != -ENOENT) { inode = ERR_PTR(error); } return d_splice_alias(inode, dentry); } /* * directories can handle most operations... */ const struct inode_operations adfs_dir_inode_operations = { .lookup = adfs_lookup, .setattr = adfs_notify_change, };
linux-master
fs/adfs/dir.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/adfs/inode.c * * Copyright (C) 1997-1999 Russell King */ #include <linux/buffer_head.h> #include <linux/writeback.h> #include "adfs.h" /* * Lookup/Create a block at offset 'block' into 'inode'. We currently do * not support creation of new blocks, so we return -EIO for this case. */ static int adfs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh, int create) { if (!create) { if (block >= inode->i_blocks) goto abort_toobig; block = __adfs_block_map(inode->i_sb, ADFS_I(inode)->indaddr, block); if (block) map_bh(bh, inode->i_sb, block); return 0; } /* don't support allocation of blocks yet */ return -EIO; abort_toobig: return 0; } static int adfs_writepage(struct page *page, struct writeback_control *wbc) { return block_write_full_page(page, adfs_get_block, wbc); } static int adfs_read_folio(struct file *file, struct folio *folio) { return block_read_full_folio(folio, adfs_get_block); } static void adfs_write_failed(struct address_space *mapping, loff_t to) { struct inode *inode = mapping->host; if (to > inode->i_size) truncate_pagecache(inode, inode->i_size); } static int adfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int ret; *pagep = NULL; ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata, adfs_get_block, &ADFS_I(mapping->host)->mmu_private); if (unlikely(ret)) adfs_write_failed(mapping, pos + len); return ret; } static sector_t _adfs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping, block, adfs_get_block); } static const struct address_space_operations adfs_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .read_folio = adfs_read_folio, .writepage = adfs_writepage, .write_begin = adfs_write_begin, .write_end = generic_write_end, .bmap = _adfs_bmap }; /* * Convert ADFS attributes and filetype to Linux permission. */ static umode_t adfs_atts2mode(struct super_block *sb, struct inode *inode) { unsigned int attr = ADFS_I(inode)->attr; umode_t mode, rmask; struct adfs_sb_info *asb = ADFS_SB(sb); if (attr & ADFS_NDA_DIRECTORY) { mode = S_IRUGO & asb->s_owner_mask; return S_IFDIR | S_IXUGO | mode; } switch (adfs_filetype(ADFS_I(inode)->loadaddr)) { case 0xfc0: /* LinkFS */ return S_IFLNK|S_IRWXUGO; case 0xfe6: /* UnixExec */ rmask = S_IRUGO | S_IXUGO; break; default: rmask = S_IRUGO; } mode = S_IFREG; if (attr & ADFS_NDA_OWNER_READ) mode |= rmask & asb->s_owner_mask; if (attr & ADFS_NDA_OWNER_WRITE) mode |= S_IWUGO & asb->s_owner_mask; if (attr & ADFS_NDA_PUBLIC_READ) mode |= rmask & asb->s_other_mask; if (attr & ADFS_NDA_PUBLIC_WRITE) mode |= S_IWUGO & asb->s_other_mask; return mode; } /* * Convert Linux permission to ADFS attribute. We try to do the reverse * of atts2mode, but there is not a 1:1 translation. */ static int adfs_mode2atts(struct super_block *sb, struct inode *inode, umode_t ia_mode) { struct adfs_sb_info *asb = ADFS_SB(sb); umode_t mode; int attr; /* FIXME: should we be able to alter a link? */ if (S_ISLNK(inode->i_mode)) return ADFS_I(inode)->attr; /* Directories do not have read/write permissions on the media */ if (S_ISDIR(inode->i_mode)) return ADFS_NDA_DIRECTORY; attr = 0; mode = ia_mode & asb->s_owner_mask; if (mode & S_IRUGO) attr |= ADFS_NDA_OWNER_READ; if (mode & S_IWUGO) attr |= ADFS_NDA_OWNER_WRITE; mode = ia_mode & asb->s_other_mask; mode &= ~asb->s_owner_mask; if (mode & S_IRUGO) attr |= ADFS_NDA_PUBLIC_READ; if (mode & S_IWUGO) attr |= ADFS_NDA_PUBLIC_WRITE; return attr; } static const s64 nsec_unix_epoch_diff_risc_os_epoch = 2208988800000000000LL; /* * Convert an ADFS time to Unix time. ADFS has a 40-bit centi-second time * referenced to 1 Jan 1900 (til 2248) so we need to discard 2208988800 seconds * of time to convert from RISC OS epoch to Unix epoch. */ static void adfs_adfs2unix_time(struct timespec64 *tv, struct inode *inode) { unsigned int high, low; /* 01 Jan 1970 00:00:00 (Unix epoch) as nanoseconds since * 01 Jan 1900 00:00:00 (RISC OS epoch) */ s64 nsec; if (!adfs_inode_is_stamped(inode)) goto cur_time; high = ADFS_I(inode)->loadaddr & 0xFF; /* top 8 bits of timestamp */ low = ADFS_I(inode)->execaddr; /* bottom 32 bits of timestamp */ /* convert 40-bit centi-seconds to 32-bit seconds * going via nanoseconds to retain precision */ nsec = (((s64) high << 32) | (s64) low) * 10000000; /* cs to ns */ /* Files dated pre 01 Jan 1970 00:00:00. */ if (nsec < nsec_unix_epoch_diff_risc_os_epoch) goto too_early; /* convert from RISC OS to Unix epoch */ nsec -= nsec_unix_epoch_diff_risc_os_epoch; *tv = ns_to_timespec64(nsec); return; cur_time: *tv = current_time(inode); return; too_early: tv->tv_sec = tv->tv_nsec = 0; return; } /* Convert an Unix time to ADFS time for an entry that is already stamped. */ static void adfs_unix2adfs_time(struct inode *inode, const struct timespec64 *ts) { s64 cs, nsec = timespec64_to_ns(ts); /* convert from Unix to RISC OS epoch */ nsec += nsec_unix_epoch_diff_risc_os_epoch; /* convert from nanoseconds to centiseconds */ cs = div_s64(nsec, 10000000); cs = clamp_t(s64, cs, 0, 0xffffffffff); ADFS_I(inode)->loadaddr &= ~0xff; ADFS_I(inode)->loadaddr |= (cs >> 32) & 0xff; ADFS_I(inode)->execaddr = cs; } /* * Fill in the inode information from the object information. * * Note that this is an inode-less filesystem, so we can't use the inode * number to reference the metadata on the media. Instead, we use the * inode number to hold the object ID, which in turn will tell us where * the data is held. We also save the parent object ID, and with these * two, we can locate the metadata. * * This does mean that we rely on an objects parent remaining the same at * all times - we cannot cope with a cross-directory rename (yet). */ struct inode * adfs_iget(struct super_block *sb, struct object_info *obj) { struct inode *inode; inode = new_inode(sb); if (!inode) goto out; inode->i_uid = ADFS_SB(sb)->s_uid; inode->i_gid = ADFS_SB(sb)->s_gid; inode->i_ino = obj->indaddr; inode->i_size = obj->size; set_nlink(inode, 2); inode->i_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; /* * we need to save the parent directory ID so that * write_inode can update the directory information * for this file. This will need special handling * for cross-directory renames. */ ADFS_I(inode)->parent_id = obj->parent_id; ADFS_I(inode)->indaddr = obj->indaddr; ADFS_I(inode)->loadaddr = obj->loadaddr; ADFS_I(inode)->execaddr = obj->execaddr; ADFS_I(inode)->attr = obj->attr; inode->i_mode = adfs_atts2mode(sb, inode); adfs_adfs2unix_time(&inode->i_mtime, inode); inode->i_atime = inode->i_mtime; inode_set_ctime_to_ts(inode, inode->i_mtime); if (S_ISDIR(inode->i_mode)) { inode->i_op = &adfs_dir_inode_operations; inode->i_fop = &adfs_dir_operations; } else if (S_ISREG(inode->i_mode)) { inode->i_op = &adfs_file_inode_operations; inode->i_fop = &adfs_file_operations; inode->i_mapping->a_ops = &adfs_aops; ADFS_I(inode)->mmu_private = inode->i_size; } inode_fake_hash(inode); out: return inode; } /* * Validate and convert a changed access mode/time to their ADFS equivalents. * adfs_write_inode will actually write the information back to the directory * later. */ int adfs_notify_change(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); struct super_block *sb = inode->i_sb; unsigned int ia_valid = attr->ia_valid; int error; error = setattr_prepare(&nop_mnt_idmap, dentry, attr); /* * we can't change the UID or GID of any file - * we have a global UID/GID in the superblock */ if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, ADFS_SB(sb)->s_uid)) || (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, ADFS_SB(sb)->s_gid))) error = -EPERM; if (error) goto out; /* XXX: this is missing some actual on-disk truncation.. */ if (ia_valid & ATTR_SIZE) truncate_setsize(inode, attr->ia_size); if (ia_valid & ATTR_MTIME && adfs_inode_is_stamped(inode)) { adfs_unix2adfs_time(inode, &attr->ia_mtime); adfs_adfs2unix_time(&inode->i_mtime, inode); } /* * FIXME: should we make these == to i_mtime since we don't * have the ability to represent them in our filesystem? */ if (ia_valid & ATTR_ATIME) inode->i_atime = attr->ia_atime; if (ia_valid & ATTR_CTIME) inode_set_ctime_to_ts(inode, attr->ia_ctime); if (ia_valid & ATTR_MODE) { ADFS_I(inode)->attr = adfs_mode2atts(sb, inode, attr->ia_mode); inode->i_mode = adfs_atts2mode(sb, inode); } /* * FIXME: should we be marking this inode dirty even if * we don't have any metadata to write back? */ if (ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MODE)) mark_inode_dirty(inode); out: return error; } /* * write an existing inode back to the directory, and therefore the disk. * The adfs-specific inode data has already been updated by * adfs_notify_change() */ int adfs_write_inode(struct inode *inode, struct writeback_control *wbc) { struct super_block *sb = inode->i_sb; struct object_info obj; obj.indaddr = ADFS_I(inode)->indaddr; obj.name_len = 0; obj.parent_id = ADFS_I(inode)->parent_id; obj.loadaddr = ADFS_I(inode)->loadaddr; obj.execaddr = ADFS_I(inode)->execaddr; obj.attr = ADFS_I(inode)->attr; obj.size = inode->i_size; return adfs_dir_update(sb, &obj, wbc->sync_mode == WB_SYNC_ALL); }
linux-master
fs/adfs/inode.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/adfs/map.c * * Copyright (C) 1997-2002 Russell King */ #include <linux/slab.h> #include <linux/statfs.h> #include <asm/unaligned.h> #include "adfs.h" /* * The ADFS map is basically a set of sectors. Each sector is called a * zone which contains a bitstream made up of variable sized fragments. * Each bit refers to a set of bytes in the filesystem, defined by * log2bpmb. This may be larger or smaller than the sector size, but * the overall size it describes will always be a round number of * sectors. A fragment id is always idlen bits long. * * < idlen > < n > <1> * +---------+-------//---------+---+ * | frag id | 0000....000000 | 1 | * +---------+-------//---------+---+ * * The physical disk space used by a fragment is taken from the start of * the fragment id up to and including the '1' bit - ie, idlen + n + 1 * bits. * * A fragment id can be repeated multiple times in the whole map for * large or fragmented files. The first map zone a fragment starts in * is given by fragment id / ids_per_zone - this allows objects to start * from any zone on the disk. * * Free space is described by a linked list of fragments. Each free * fragment describes free space in the same way as the other fragments, * however, the frag id specifies an offset (in map bits) from the end * of this fragment to the start of the next free fragment. * * Objects stored on the disk are allocated object ids (we use these as * our inode numbers.) Object ids contain a fragment id and an optional * offset. This allows a directory fragment to contain small files * associated with that directory. */ /* * For the future... */ static DEFINE_RWLOCK(adfs_map_lock); /* * This is fun. We need to load up to 19 bits from the map at an * arbitrary bit alignment. (We're limited to 19 bits by F+ version 2). */ #define GET_FRAG_ID(_map,_start,_idmask) \ ({ \ unsigned char *_m = _map + (_start >> 3); \ u32 _frag = get_unaligned_le32(_m); \ _frag >>= (_start & 7); \ _frag & _idmask; \ }) /* * return the map bit offset of the fragment frag_id in the zone dm. * Note that the loop is optimised for best asm code - look at the * output of: * gcc -D__KERNEL__ -O2 -I../../include -o - -S map.c */ static int lookup_zone(const struct adfs_discmap *dm, const unsigned int idlen, const u32 frag_id, unsigned int *offset) { const unsigned int endbit = dm->dm_endbit; const u32 idmask = (1 << idlen) - 1; unsigned char *map = dm->dm_bh->b_data; unsigned int start = dm->dm_startbit; unsigned int freelink, fragend; u32 frag; frag = GET_FRAG_ID(map, 8, idmask & 0x7fff); freelink = frag ? 8 + frag : 0; do { frag = GET_FRAG_ID(map, start, idmask); fragend = find_next_bit_le(map, endbit, start + idlen); if (fragend >= endbit) goto error; if (start == freelink) { freelink += frag & 0x7fff; } else if (frag == frag_id) { unsigned int length = fragend + 1 - start; if (*offset < length) return start + *offset; *offset -= length; } start = fragend + 1; } while (start < endbit); return -1; error: printk(KERN_ERR "adfs: oversized fragment 0x%x at 0x%x-0x%x\n", frag, start, fragend); return -1; } /* * Scan the free space map, for this zone, calculating the total * number of map bits in each free space fragment. * * Note: idmask is limited to 15 bits [3.2] */ static unsigned int scan_free_map(struct adfs_sb_info *asb, struct adfs_discmap *dm) { const unsigned int endbit = dm->dm_endbit; const unsigned int idlen = asb->s_idlen; const unsigned int frag_idlen = idlen <= 15 ? idlen : 15; const u32 idmask = (1 << frag_idlen) - 1; unsigned char *map = dm->dm_bh->b_data; unsigned int start = 8, fragend; u32 frag; unsigned long total = 0; /* * get fragment id */ frag = GET_FRAG_ID(map, start, idmask); /* * If the freelink is null, then no free fragments * exist in this zone. */ if (frag == 0) return 0; do { start += frag; frag = GET_FRAG_ID(map, start, idmask); fragend = find_next_bit_le(map, endbit, start + idlen); if (fragend >= endbit) goto error; total += fragend + 1 - start; } while (frag >= idlen + 1); if (frag != 0) printk(KERN_ERR "adfs: undersized free fragment\n"); return total; error: printk(KERN_ERR "adfs: oversized free fragment\n"); return 0; } static int scan_map(struct adfs_sb_info *asb, unsigned int zone, const u32 frag_id, unsigned int mapoff) { const unsigned int idlen = asb->s_idlen; struct adfs_discmap *dm, *dm_end; int result; dm = asb->s_map + zone; zone = asb->s_map_size; dm_end = asb->s_map + zone; do { result = lookup_zone(dm, idlen, frag_id, &mapoff); if (result != -1) goto found; dm ++; if (dm == dm_end) dm = asb->s_map; } while (--zone > 0); return -1; found: result -= dm->dm_startbit; result += dm->dm_startblk; return result; } /* * calculate the amount of free blocks in the map. * * n=1 * total_free = E(free_in_zone_n) * nzones */ void adfs_map_statfs(struct super_block *sb, struct kstatfs *buf) { struct adfs_sb_info *asb = ADFS_SB(sb); struct adfs_discrecord *dr = adfs_map_discrecord(asb->s_map); struct adfs_discmap *dm; unsigned int total = 0; unsigned int zone; dm = asb->s_map; zone = asb->s_map_size; do { total += scan_free_map(asb, dm++); } while (--zone > 0); buf->f_blocks = adfs_disc_size(dr) >> sb->s_blocksize_bits; buf->f_files = asb->s_ids_per_zone * asb->s_map_size; buf->f_bavail = buf->f_bfree = signed_asl(total, asb->s_map2blk); } int adfs_map_lookup(struct super_block *sb, u32 frag_id, unsigned int offset) { struct adfs_sb_info *asb = ADFS_SB(sb); unsigned int zone, mapoff; int result; /* * map & root fragment is special - it starts in the center of the * disk. The other fragments start at zone (frag / ids_per_zone) */ if (frag_id == ADFS_ROOT_FRAG) zone = asb->s_map_size >> 1; else zone = frag_id / asb->s_ids_per_zone; if (zone >= asb->s_map_size) goto bad_fragment; /* Convert sector offset to map offset */ mapoff = signed_asl(offset, -asb->s_map2blk); read_lock(&adfs_map_lock); result = scan_map(asb, zone, frag_id, mapoff); read_unlock(&adfs_map_lock); if (result > 0) { unsigned int secoff; /* Calculate sector offset into map block */ secoff = offset - signed_asl(mapoff, asb->s_map2blk); return secoff + signed_asl(result, asb->s_map2blk); } adfs_error(sb, "fragment 0x%04x at offset %d not found in map", frag_id, offset); return 0; bad_fragment: adfs_error(sb, "invalid fragment 0x%04x (zone = %d, max = %d)", frag_id, zone, asb->s_map_size); return 0; } static unsigned char adfs_calczonecheck(struct super_block *sb, unsigned char *map) { unsigned int v0, v1, v2, v3; int i; v0 = v1 = v2 = v3 = 0; for (i = sb->s_blocksize - 4; i; i -= 4) { v0 += map[i] + (v3 >> 8); v3 &= 0xff; v1 += map[i + 1] + (v0 >> 8); v0 &= 0xff; v2 += map[i + 2] + (v1 >> 8); v1 &= 0xff; v3 += map[i + 3] + (v2 >> 8); v2 &= 0xff; } v0 += v3 >> 8; v1 += map[1] + (v0 >> 8); v2 += map[2] + (v1 >> 8); v3 += map[3] + (v2 >> 8); return v0 ^ v1 ^ v2 ^ v3; } static int adfs_checkmap(struct super_block *sb, struct adfs_discmap *dm) { unsigned char crosscheck = 0, zonecheck = 1; int i; for (i = 0; i < ADFS_SB(sb)->s_map_size; i++) { unsigned char *map; map = dm[i].dm_bh->b_data; if (adfs_calczonecheck(sb, map) != map[0]) { adfs_error(sb, "zone %d fails zonecheck", i); zonecheck = 0; } crosscheck ^= map[3]; } if (crosscheck != 0xff) adfs_error(sb, "crosscheck != 0xff"); return crosscheck == 0xff && zonecheck; } /* * Layout the map - the first zone contains a copy of the disc record, * and the last zone must be limited to the size of the filesystem. */ static void adfs_map_layout(struct adfs_discmap *dm, unsigned int nzones, struct adfs_discrecord *dr) { unsigned int zone, zone_size; u64 size; zone_size = (8 << dr->log2secsize) - le16_to_cpu(dr->zone_spare); dm[0].dm_bh = NULL; dm[0].dm_startblk = 0; dm[0].dm_startbit = 32 + ADFS_DR_SIZE_BITS; dm[0].dm_endbit = 32 + zone_size; for (zone = 1; zone < nzones; zone++) { dm[zone].dm_bh = NULL; dm[zone].dm_startblk = zone * zone_size - ADFS_DR_SIZE_BITS; dm[zone].dm_startbit = 32; dm[zone].dm_endbit = 32 + zone_size; } size = adfs_disc_size(dr) >> dr->log2bpmb; size -= (nzones - 1) * zone_size - ADFS_DR_SIZE_BITS; dm[nzones - 1].dm_endbit = 32 + size; } static int adfs_map_read(struct adfs_discmap *dm, struct super_block *sb, unsigned int map_addr, unsigned int nzones) { unsigned int zone; for (zone = 0; zone < nzones; zone++) { dm[zone].dm_bh = sb_bread(sb, map_addr + zone); if (!dm[zone].dm_bh) return -EIO; } return 0; } static void adfs_map_relse(struct adfs_discmap *dm, unsigned int nzones) { unsigned int zone; for (zone = 0; zone < nzones; zone++) brelse(dm[zone].dm_bh); } struct adfs_discmap *adfs_read_map(struct super_block *sb, struct adfs_discrecord *dr) { struct adfs_sb_info *asb = ADFS_SB(sb); struct adfs_discmap *dm; unsigned int map_addr, zone_size, nzones; int ret; nzones = dr->nzones | dr->nzones_high << 8; zone_size = (8 << dr->log2secsize) - le16_to_cpu(dr->zone_spare); asb->s_idlen = dr->idlen; asb->s_map_size = nzones; asb->s_map2blk = dr->log2bpmb - dr->log2secsize; asb->s_log2sharesize = dr->log2sharesize; asb->s_ids_per_zone = zone_size / (asb->s_idlen + 1); map_addr = (nzones >> 1) * zone_size - ((nzones > 1) ? ADFS_DR_SIZE_BITS : 0); map_addr = signed_asl(map_addr, asb->s_map2blk); dm = kmalloc_array(nzones, sizeof(*dm), GFP_KERNEL); if (dm == NULL) { adfs_error(sb, "not enough memory"); return ERR_PTR(-ENOMEM); } adfs_map_layout(dm, nzones, dr); ret = adfs_map_read(dm, sb, map_addr, nzones); if (ret) { adfs_error(sb, "unable to read map"); goto error_free; } if (adfs_checkmap(sb, dm)) return dm; adfs_error(sb, "map corrupted"); error_free: adfs_map_relse(dm, nzones); kfree(dm); return ERR_PTR(-EIO); } void adfs_free_map(struct super_block *sb) { struct adfs_sb_info *asb = ADFS_SB(sb); adfs_map_relse(asb->s_map, asb->s_map_size); kfree(asb->s_map); }
linux-master
fs/adfs/map.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/adfs/file.c * * Copyright (C) 1997-1999 Russell King * from: * * linux/fs/ext2/file.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card ([email protected]) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/file.c * * Copyright (C) 1991, 1992 Linus Torvalds * * adfs regular file handling primitives */ #include "adfs.h" const struct file_operations adfs_file_operations = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .mmap = generic_file_mmap, .fsync = generic_file_fsync, .write_iter = generic_file_write_iter, .splice_read = filemap_splice_read, }; const struct inode_operations adfs_file_inode_operations = { .setattr = adfs_notify_change, };
linux-master
fs/adfs/file.c
// SPDX-License-Identifier: GPL-2.0 /* * Key setup for v1 encryption policies * * Copyright 2015, 2019 Google LLC */ /* * This file implements compatibility functions for the original encryption * policy version ("v1"), including: * * - Deriving per-file encryption keys using the AES-128-ECB based KDF * (rather than the new method of using HKDF-SHA512) * * - Retrieving fscrypt master keys from process-subscribed keyrings * (rather than the new method of using a filesystem-level keyring) * * - Handling policies with the DIRECT_KEY flag set using a master key table * (rather than the new method of implementing DIRECT_KEY with per-mode keys * managed alongside the master keys in the filesystem-level keyring) */ #include <crypto/algapi.h> #include <crypto/skcipher.h> #include <keys/user-type.h> #include <linux/hashtable.h> #include <linux/scatterlist.h> #include "fscrypt_private.h" /* Table of keys referenced by DIRECT_KEY policies */ static DEFINE_HASHTABLE(fscrypt_direct_keys, 6); /* 6 bits = 64 buckets */ static DEFINE_SPINLOCK(fscrypt_direct_keys_lock); /* * v1 key derivation function. This generates the derived key by encrypting the * master key with AES-128-ECB using the nonce as the AES key. This provides a * unique derived key with sufficient entropy for each inode. However, it's * nonstandard, non-extensible, doesn't evenly distribute the entropy from the * master key, and is trivially reversible: an attacker who compromises a * derived key can "decrypt" it to get back to the master key, then derive any * other key. For all new code, use HKDF instead. * * The master key must be at least as long as the derived key. If the master * key is longer, then only the first 'derived_keysize' bytes are used. */ static int derive_key_aes(const u8 *master_key, const u8 nonce[FSCRYPT_FILE_NONCE_SIZE], u8 *derived_key, unsigned int derived_keysize) { int res = 0; struct skcipher_request *req = NULL; DECLARE_CRYPTO_WAIT(wait); struct scatterlist src_sg, dst_sg; struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0); if (IS_ERR(tfm)) { res = PTR_ERR(tfm); tfm = NULL; goto out; } crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); req = skcipher_request_alloc(tfm, GFP_KERNEL); if (!req) { res = -ENOMEM; goto out; } skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait); res = crypto_skcipher_setkey(tfm, nonce, FSCRYPT_FILE_NONCE_SIZE); if (res < 0) goto out; sg_init_one(&src_sg, master_key, derived_keysize); sg_init_one(&dst_sg, derived_key, derived_keysize); skcipher_request_set_crypt(req, &src_sg, &dst_sg, derived_keysize, NULL); res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); out: skcipher_request_free(req); crypto_free_skcipher(tfm); return res; } /* * Search the current task's subscribed keyrings for a "logon" key with * description prefix:descriptor, and if found acquire a read lock on it and * return a pointer to its validated payload in *payload_ret. */ static struct key * find_and_lock_process_key(const char *prefix, const u8 descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE], unsigned int min_keysize, const struct fscrypt_key **payload_ret) { char *description; struct key *key; const struct user_key_payload *ukp; const struct fscrypt_key *payload; description = kasprintf(GFP_KERNEL, "%s%*phN", prefix, FSCRYPT_KEY_DESCRIPTOR_SIZE, descriptor); if (!description) return ERR_PTR(-ENOMEM); key = request_key(&key_type_logon, description, NULL); kfree(description); if (IS_ERR(key)) return key; down_read(&key->sem); ukp = user_key_payload_locked(key); if (!ukp) /* was the key revoked before we acquired its semaphore? */ goto invalid; payload = (const struct fscrypt_key *)ukp->data; if (ukp->datalen != sizeof(struct fscrypt_key) || payload->size < 1 || payload->size > FSCRYPT_MAX_KEY_SIZE) { fscrypt_warn(NULL, "key with description '%s' has invalid payload", key->description); goto invalid; } if (payload->size < min_keysize) { fscrypt_warn(NULL, "key with description '%s' is too short (got %u bytes, need %u+ bytes)", key->description, payload->size, min_keysize); goto invalid; } *payload_ret = payload; return key; invalid: up_read(&key->sem); key_put(key); return ERR_PTR(-ENOKEY); } /* Master key referenced by DIRECT_KEY policy */ struct fscrypt_direct_key { struct super_block *dk_sb; struct hlist_node dk_node; refcount_t dk_refcount; const struct fscrypt_mode *dk_mode; struct fscrypt_prepared_key dk_key; u8 dk_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; u8 dk_raw[FSCRYPT_MAX_KEY_SIZE]; }; static void free_direct_key(struct fscrypt_direct_key *dk) { if (dk) { fscrypt_destroy_prepared_key(dk->dk_sb, &dk->dk_key); kfree_sensitive(dk); } } void fscrypt_put_direct_key(struct fscrypt_direct_key *dk) { if (!refcount_dec_and_lock(&dk->dk_refcount, &fscrypt_direct_keys_lock)) return; hash_del(&dk->dk_node); spin_unlock(&fscrypt_direct_keys_lock); free_direct_key(dk); } /* * Find/insert the given key into the fscrypt_direct_keys table. If found, it * is returned with elevated refcount, and 'to_insert' is freed if non-NULL. If * not found, 'to_insert' is inserted and returned if it's non-NULL; otherwise * NULL is returned. */ static struct fscrypt_direct_key * find_or_insert_direct_key(struct fscrypt_direct_key *to_insert, const u8 *raw_key, const struct fscrypt_info *ci) { unsigned long hash_key; struct fscrypt_direct_key *dk; /* * Careful: to avoid potentially leaking secret key bytes via timing * information, we must key the hash table by descriptor rather than by * raw key, and use crypto_memneq() when comparing raw keys. */ BUILD_BUG_ON(sizeof(hash_key) > FSCRYPT_KEY_DESCRIPTOR_SIZE); memcpy(&hash_key, ci->ci_policy.v1.master_key_descriptor, sizeof(hash_key)); spin_lock(&fscrypt_direct_keys_lock); hash_for_each_possible(fscrypt_direct_keys, dk, dk_node, hash_key) { if (memcmp(ci->ci_policy.v1.master_key_descriptor, dk->dk_descriptor, FSCRYPT_KEY_DESCRIPTOR_SIZE) != 0) continue; if (ci->ci_mode != dk->dk_mode) continue; if (!fscrypt_is_key_prepared(&dk->dk_key, ci)) continue; if (crypto_memneq(raw_key, dk->dk_raw, ci->ci_mode->keysize)) continue; /* using existing tfm with same (descriptor, mode, raw_key) */ refcount_inc(&dk->dk_refcount); spin_unlock(&fscrypt_direct_keys_lock); free_direct_key(to_insert); return dk; } if (to_insert) hash_add(fscrypt_direct_keys, &to_insert->dk_node, hash_key); spin_unlock(&fscrypt_direct_keys_lock); return to_insert; } /* Prepare to encrypt directly using the master key in the given mode */ static struct fscrypt_direct_key * fscrypt_get_direct_key(const struct fscrypt_info *ci, const u8 *raw_key) { struct fscrypt_direct_key *dk; int err; /* Is there already a tfm for this key? */ dk = find_or_insert_direct_key(NULL, raw_key, ci); if (dk) return dk; /* Nope, allocate one. */ dk = kzalloc(sizeof(*dk), GFP_KERNEL); if (!dk) return ERR_PTR(-ENOMEM); dk->dk_sb = ci->ci_inode->i_sb; refcount_set(&dk->dk_refcount, 1); dk->dk_mode = ci->ci_mode; err = fscrypt_prepare_key(&dk->dk_key, raw_key, ci); if (err) goto err_free_dk; memcpy(dk->dk_descriptor, ci->ci_policy.v1.master_key_descriptor, FSCRYPT_KEY_DESCRIPTOR_SIZE); memcpy(dk->dk_raw, raw_key, ci->ci_mode->keysize); return find_or_insert_direct_key(dk, raw_key, ci); err_free_dk: free_direct_key(dk); return ERR_PTR(err); } /* v1 policy, DIRECT_KEY: use the master key directly */ static int setup_v1_file_key_direct(struct fscrypt_info *ci, const u8 *raw_master_key) { struct fscrypt_direct_key *dk; dk = fscrypt_get_direct_key(ci, raw_master_key); if (IS_ERR(dk)) return PTR_ERR(dk); ci->ci_direct_key = dk; ci->ci_enc_key = dk->dk_key; return 0; } /* v1 policy, !DIRECT_KEY: derive the file's encryption key */ static int setup_v1_file_key_derived(struct fscrypt_info *ci, const u8 *raw_master_key) { u8 *derived_key; int err; /* * This cannot be a stack buffer because it will be passed to the * scatterlist crypto API during derive_key_aes(). */ derived_key = kmalloc(ci->ci_mode->keysize, GFP_KERNEL); if (!derived_key) return -ENOMEM; err = derive_key_aes(raw_master_key, ci->ci_nonce, derived_key, ci->ci_mode->keysize); if (err) goto out; err = fscrypt_set_per_file_enc_key(ci, derived_key); out: kfree_sensitive(derived_key); return err; } int fscrypt_setup_v1_file_key(struct fscrypt_info *ci, const u8 *raw_master_key) { if (ci->ci_policy.v1.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) return setup_v1_file_key_direct(ci, raw_master_key); else return setup_v1_file_key_derived(ci, raw_master_key); } int fscrypt_setup_v1_file_key_via_subscribed_keyrings(struct fscrypt_info *ci) { struct key *key; const struct fscrypt_key *payload; int err; key = find_and_lock_process_key(FSCRYPT_KEY_DESC_PREFIX, ci->ci_policy.v1.master_key_descriptor, ci->ci_mode->keysize, &payload); if (key == ERR_PTR(-ENOKEY) && ci->ci_inode->i_sb->s_cop->key_prefix) { key = find_and_lock_process_key(ci->ci_inode->i_sb->s_cop->key_prefix, ci->ci_policy.v1.master_key_descriptor, ci->ci_mode->keysize, &payload); } if (IS_ERR(key)) return PTR_ERR(key); err = fscrypt_setup_v1_file_key(ci, payload->raw); up_read(&key->sem); key_put(key); return err; }
linux-master
fs/crypto/keysetup_v1.c
// SPDX-License-Identifier: GPL-2.0 /* * Filesystem-level keyring for fscrypt * * Copyright 2019 Google LLC */ /* * This file implements management of fscrypt master keys in the * filesystem-level keyring, including the ioctls: * * - FS_IOC_ADD_ENCRYPTION_KEY * - FS_IOC_REMOVE_ENCRYPTION_KEY * - FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS * - FS_IOC_GET_ENCRYPTION_KEY_STATUS * * See the "User API" section of Documentation/filesystems/fscrypt.rst for more * information about these ioctls. */ #include <asm/unaligned.h> #include <crypto/skcipher.h> #include <linux/key-type.h> #include <linux/random.h> #include <linux/seq_file.h> #include "fscrypt_private.h" /* The master encryption keys for a filesystem (->s_master_keys) */ struct fscrypt_keyring { /* * Lock that protects ->key_hashtable. It does *not* protect the * fscrypt_master_key structs themselves. */ spinlock_t lock; /* Hash table that maps fscrypt_key_specifier to fscrypt_master_key */ struct hlist_head key_hashtable[128]; }; static void wipe_master_key_secret(struct fscrypt_master_key_secret *secret) { fscrypt_destroy_hkdf(&secret->hkdf); memzero_explicit(secret, sizeof(*secret)); } static void move_master_key_secret(struct fscrypt_master_key_secret *dst, struct fscrypt_master_key_secret *src) { memcpy(dst, src, sizeof(*dst)); memzero_explicit(src, sizeof(*src)); } static void fscrypt_free_master_key(struct rcu_head *head) { struct fscrypt_master_key *mk = container_of(head, struct fscrypt_master_key, mk_rcu_head); /* * The master key secret and any embedded subkeys should have already * been wiped when the last active reference to the fscrypt_master_key * struct was dropped; doing it here would be unnecessarily late. * Nevertheless, use kfree_sensitive() in case anything was missed. */ kfree_sensitive(mk); } void fscrypt_put_master_key(struct fscrypt_master_key *mk) { if (!refcount_dec_and_test(&mk->mk_struct_refs)) return; /* * No structural references left, so free ->mk_users, and also free the * fscrypt_master_key struct itself after an RCU grace period ensures * that concurrent keyring lookups can no longer find it. */ WARN_ON_ONCE(refcount_read(&mk->mk_active_refs) != 0); key_put(mk->mk_users); mk->mk_users = NULL; call_rcu(&mk->mk_rcu_head, fscrypt_free_master_key); } void fscrypt_put_master_key_activeref(struct super_block *sb, struct fscrypt_master_key *mk) { size_t i; if (!refcount_dec_and_test(&mk->mk_active_refs)) return; /* * No active references left, so complete the full removal of this * fscrypt_master_key struct by removing it from the keyring and * destroying any subkeys embedded in it. */ if (WARN_ON_ONCE(!sb->s_master_keys)) return; spin_lock(&sb->s_master_keys->lock); hlist_del_rcu(&mk->mk_node); spin_unlock(&sb->s_master_keys->lock); /* * ->mk_active_refs == 0 implies that ->mk_secret is not present and * that ->mk_decrypted_inodes is empty. */ WARN_ON_ONCE(is_master_key_secret_present(&mk->mk_secret)); WARN_ON_ONCE(!list_empty(&mk->mk_decrypted_inodes)); for (i = 0; i <= FSCRYPT_MODE_MAX; i++) { fscrypt_destroy_prepared_key( sb, &mk->mk_direct_keys[i]); fscrypt_destroy_prepared_key( sb, &mk->mk_iv_ino_lblk_64_keys[i]); fscrypt_destroy_prepared_key( sb, &mk->mk_iv_ino_lblk_32_keys[i]); } memzero_explicit(&mk->mk_ino_hash_key, sizeof(mk->mk_ino_hash_key)); mk->mk_ino_hash_key_initialized = false; /* Drop the structural ref associated with the active refs. */ fscrypt_put_master_key(mk); } static inline bool valid_key_spec(const struct fscrypt_key_specifier *spec) { if (spec->__reserved) return false; return master_key_spec_len(spec) != 0; } static int fscrypt_user_key_instantiate(struct key *key, struct key_preparsed_payload *prep) { /* * We just charge FSCRYPT_MAX_KEY_SIZE bytes to the user's key quota for * each key, regardless of the exact key size. The amount of memory * actually used is greater than the size of the raw key anyway. */ return key_payload_reserve(key, FSCRYPT_MAX_KEY_SIZE); } static void fscrypt_user_key_describe(const struct key *key, struct seq_file *m) { seq_puts(m, key->description); } /* * Type of key in ->mk_users. Each key of this type represents a particular * user who has added a particular master key. * * Note that the name of this key type really should be something like * ".fscrypt-user" instead of simply ".fscrypt". But the shorter name is chosen * mainly for simplicity of presentation in /proc/keys when read by a non-root * user. And it is expected to be rare that a key is actually added by multiple * users, since users should keep their encryption keys confidential. */ static struct key_type key_type_fscrypt_user = { .name = ".fscrypt", .instantiate = fscrypt_user_key_instantiate, .describe = fscrypt_user_key_describe, }; #define FSCRYPT_MK_USERS_DESCRIPTION_SIZE \ (CONST_STRLEN("fscrypt-") + 2 * FSCRYPT_KEY_IDENTIFIER_SIZE + \ CONST_STRLEN("-users") + 1) #define FSCRYPT_MK_USER_DESCRIPTION_SIZE \ (2 * FSCRYPT_KEY_IDENTIFIER_SIZE + CONST_STRLEN(".uid.") + 10 + 1) static void format_mk_users_keyring_description( char description[FSCRYPT_MK_USERS_DESCRIPTION_SIZE], const u8 mk_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]) { sprintf(description, "fscrypt-%*phN-users", FSCRYPT_KEY_IDENTIFIER_SIZE, mk_identifier); } static void format_mk_user_description( char description[FSCRYPT_MK_USER_DESCRIPTION_SIZE], const u8 mk_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]) { sprintf(description, "%*phN.uid.%u", FSCRYPT_KEY_IDENTIFIER_SIZE, mk_identifier, __kuid_val(current_fsuid())); } /* Create ->s_master_keys if needed. Synchronized by fscrypt_add_key_mutex. */ static int allocate_filesystem_keyring(struct super_block *sb) { struct fscrypt_keyring *keyring; if (sb->s_master_keys) return 0; keyring = kzalloc(sizeof(*keyring), GFP_KERNEL); if (!keyring) return -ENOMEM; spin_lock_init(&keyring->lock); /* * Pairs with the smp_load_acquire() in fscrypt_find_master_key(). * I.e., here we publish ->s_master_keys with a RELEASE barrier so that * concurrent tasks can ACQUIRE it. */ smp_store_release(&sb->s_master_keys, keyring); return 0; } /* * Release all encryption keys that have been added to the filesystem, along * with the keyring that contains them. * * This is called at unmount time, after all potentially-encrypted inodes have * been evicted. The filesystem's underlying block device(s) are still * available at this time; this is important because after user file accesses * have been allowed, this function may need to evict keys from the keyslots of * an inline crypto engine, which requires the block device(s). */ void fscrypt_destroy_keyring(struct super_block *sb) { struct fscrypt_keyring *keyring = sb->s_master_keys; size_t i; if (!keyring) return; for (i = 0; i < ARRAY_SIZE(keyring->key_hashtable); i++) { struct hlist_head *bucket = &keyring->key_hashtable[i]; struct fscrypt_master_key *mk; struct hlist_node *tmp; hlist_for_each_entry_safe(mk, tmp, bucket, mk_node) { /* * Since all potentially-encrypted inodes were already * evicted, every key remaining in the keyring should * have an empty inode list, and should only still be in * the keyring due to the single active ref associated * with ->mk_secret. There should be no structural refs * beyond the one associated with the active ref. */ WARN_ON_ONCE(refcount_read(&mk->mk_active_refs) != 1); WARN_ON_ONCE(refcount_read(&mk->mk_struct_refs) != 1); WARN_ON_ONCE(!is_master_key_secret_present(&mk->mk_secret)); wipe_master_key_secret(&mk->mk_secret); fscrypt_put_master_key_activeref(sb, mk); } } kfree_sensitive(keyring); sb->s_master_keys = NULL; } static struct hlist_head * fscrypt_mk_hash_bucket(struct fscrypt_keyring *keyring, const struct fscrypt_key_specifier *mk_spec) { /* * Since key specifiers should be "random" values, it is sufficient to * use a trivial hash function that just takes the first several bits of * the key specifier. */ unsigned long i = get_unaligned((unsigned long *)&mk_spec->u); return &keyring->key_hashtable[i % ARRAY_SIZE(keyring->key_hashtable)]; } /* * Find the specified master key struct in ->s_master_keys and take a structural * ref to it. The structural ref guarantees that the key struct continues to * exist, but it does *not* guarantee that ->s_master_keys continues to contain * the key struct. The structural ref needs to be dropped by * fscrypt_put_master_key(). Returns NULL if the key struct is not found. */ struct fscrypt_master_key * fscrypt_find_master_key(struct super_block *sb, const struct fscrypt_key_specifier *mk_spec) { struct fscrypt_keyring *keyring; struct hlist_head *bucket; struct fscrypt_master_key *mk; /* * Pairs with the smp_store_release() in allocate_filesystem_keyring(). * I.e., another task can publish ->s_master_keys concurrently, * executing a RELEASE barrier. We need to use smp_load_acquire() here * to safely ACQUIRE the memory the other task published. */ keyring = smp_load_acquire(&sb->s_master_keys); if (keyring == NULL) return NULL; /* No keyring yet, so no keys yet. */ bucket = fscrypt_mk_hash_bucket(keyring, mk_spec); rcu_read_lock(); switch (mk_spec->type) { case FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR: hlist_for_each_entry_rcu(mk, bucket, mk_node) { if (mk->mk_spec.type == FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && memcmp(mk->mk_spec.u.descriptor, mk_spec->u.descriptor, FSCRYPT_KEY_DESCRIPTOR_SIZE) == 0 && refcount_inc_not_zero(&mk->mk_struct_refs)) goto out; } break; case FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER: hlist_for_each_entry_rcu(mk, bucket, mk_node) { if (mk->mk_spec.type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER && memcmp(mk->mk_spec.u.identifier, mk_spec->u.identifier, FSCRYPT_KEY_IDENTIFIER_SIZE) == 0 && refcount_inc_not_zero(&mk->mk_struct_refs)) goto out; } break; } mk = NULL; out: rcu_read_unlock(); return mk; } static int allocate_master_key_users_keyring(struct fscrypt_master_key *mk) { char description[FSCRYPT_MK_USERS_DESCRIPTION_SIZE]; struct key *keyring; format_mk_users_keyring_description(description, mk->mk_spec.u.identifier); keyring = keyring_alloc(description, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(), KEY_POS_SEARCH | KEY_USR_SEARCH | KEY_USR_READ | KEY_USR_VIEW, KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL); if (IS_ERR(keyring)) return PTR_ERR(keyring); mk->mk_users = keyring; return 0; } /* * Find the current user's "key" in the master key's ->mk_users. * Returns ERR_PTR(-ENOKEY) if not found. */ static struct key *find_master_key_user(struct fscrypt_master_key *mk) { char description[FSCRYPT_MK_USER_DESCRIPTION_SIZE]; key_ref_t keyref; format_mk_user_description(description, mk->mk_spec.u.identifier); /* * We need to mark the keyring reference as "possessed" so that we * acquire permission to search it, via the KEY_POS_SEARCH permission. */ keyref = keyring_search(make_key_ref(mk->mk_users, true /*possessed*/), &key_type_fscrypt_user, description, false); if (IS_ERR(keyref)) { if (PTR_ERR(keyref) == -EAGAIN || /* not found */ PTR_ERR(keyref) == -EKEYREVOKED) /* recently invalidated */ keyref = ERR_PTR(-ENOKEY); return ERR_CAST(keyref); } return key_ref_to_ptr(keyref); } /* * Give the current user a "key" in ->mk_users. This charges the user's quota * and marks the master key as added by the current user, so that it cannot be * removed by another user with the key. Either ->mk_sem must be held for * write, or the master key must be still undergoing initialization. */ static int add_master_key_user(struct fscrypt_master_key *mk) { char description[FSCRYPT_MK_USER_DESCRIPTION_SIZE]; struct key *mk_user; int err; format_mk_user_description(description, mk->mk_spec.u.identifier); mk_user = key_alloc(&key_type_fscrypt_user, description, current_fsuid(), current_gid(), current_cred(), KEY_POS_SEARCH | KEY_USR_VIEW, 0, NULL); if (IS_ERR(mk_user)) return PTR_ERR(mk_user); err = key_instantiate_and_link(mk_user, NULL, 0, mk->mk_users, NULL); key_put(mk_user); return err; } /* * Remove the current user's "key" from ->mk_users. * ->mk_sem must be held for write. * * Returns 0 if removed, -ENOKEY if not found, or another -errno code. */ static int remove_master_key_user(struct fscrypt_master_key *mk) { struct key *mk_user; int err; mk_user = find_master_key_user(mk); if (IS_ERR(mk_user)) return PTR_ERR(mk_user); err = key_unlink(mk->mk_users, mk_user); key_put(mk_user); return err; } /* * Allocate a new fscrypt_master_key, transfer the given secret over to it, and * insert it into sb->s_master_keys. */ static int add_new_master_key(struct super_block *sb, struct fscrypt_master_key_secret *secret, const struct fscrypt_key_specifier *mk_spec) { struct fscrypt_keyring *keyring = sb->s_master_keys; struct fscrypt_master_key *mk; int err; mk = kzalloc(sizeof(*mk), GFP_KERNEL); if (!mk) return -ENOMEM; init_rwsem(&mk->mk_sem); refcount_set(&mk->mk_struct_refs, 1); mk->mk_spec = *mk_spec; INIT_LIST_HEAD(&mk->mk_decrypted_inodes); spin_lock_init(&mk->mk_decrypted_inodes_lock); if (mk_spec->type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) { err = allocate_master_key_users_keyring(mk); if (err) goto out_put; err = add_master_key_user(mk); if (err) goto out_put; } move_master_key_secret(&mk->mk_secret, secret); refcount_set(&mk->mk_active_refs, 1); /* ->mk_secret is present */ spin_lock(&keyring->lock); hlist_add_head_rcu(&mk->mk_node, fscrypt_mk_hash_bucket(keyring, mk_spec)); spin_unlock(&keyring->lock); return 0; out_put: fscrypt_put_master_key(mk); return err; } #define KEY_DEAD 1 static int add_existing_master_key(struct fscrypt_master_key *mk, struct fscrypt_master_key_secret *secret) { int err; /* * If the current user is already in ->mk_users, then there's nothing to * do. Otherwise, we need to add the user to ->mk_users. (Neither is * applicable for v1 policy keys, which have NULL ->mk_users.) */ if (mk->mk_users) { struct key *mk_user = find_master_key_user(mk); if (mk_user != ERR_PTR(-ENOKEY)) { if (IS_ERR(mk_user)) return PTR_ERR(mk_user); key_put(mk_user); return 0; } err = add_master_key_user(mk); if (err) return err; } /* Re-add the secret if needed. */ if (!is_master_key_secret_present(&mk->mk_secret)) { if (!refcount_inc_not_zero(&mk->mk_active_refs)) return KEY_DEAD; move_master_key_secret(&mk->mk_secret, secret); } return 0; } static int do_add_master_key(struct super_block *sb, struct fscrypt_master_key_secret *secret, const struct fscrypt_key_specifier *mk_spec) { static DEFINE_MUTEX(fscrypt_add_key_mutex); struct fscrypt_master_key *mk; int err; mutex_lock(&fscrypt_add_key_mutex); /* serialize find + link */ mk = fscrypt_find_master_key(sb, mk_spec); if (!mk) { /* Didn't find the key in ->s_master_keys. Add it. */ err = allocate_filesystem_keyring(sb); if (!err) err = add_new_master_key(sb, secret, mk_spec); } else { /* * Found the key in ->s_master_keys. Re-add the secret if * needed, and add the user to ->mk_users if needed. */ down_write(&mk->mk_sem); err = add_existing_master_key(mk, secret); up_write(&mk->mk_sem); if (err == KEY_DEAD) { /* * We found a key struct, but it's already been fully * removed. Ignore the old struct and add a new one. * fscrypt_add_key_mutex means we don't need to worry * about concurrent adds. */ err = add_new_master_key(sb, secret, mk_spec); } fscrypt_put_master_key(mk); } mutex_unlock(&fscrypt_add_key_mutex); return err; } static int add_master_key(struct super_block *sb, struct fscrypt_master_key_secret *secret, struct fscrypt_key_specifier *key_spec) { int err; if (key_spec->type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) { err = fscrypt_init_hkdf(&secret->hkdf, secret->raw, secret->size); if (err) return err; /* * Now that the HKDF context is initialized, the raw key is no * longer needed. */ memzero_explicit(secret->raw, secret->size); /* Calculate the key identifier */ err = fscrypt_hkdf_expand(&secret->hkdf, HKDF_CONTEXT_KEY_IDENTIFIER, NULL, 0, key_spec->u.identifier, FSCRYPT_KEY_IDENTIFIER_SIZE); if (err) return err; } return do_add_master_key(sb, secret, key_spec); } static int fscrypt_provisioning_key_preparse(struct key_preparsed_payload *prep) { const struct fscrypt_provisioning_key_payload *payload = prep->data; if (prep->datalen < sizeof(*payload) + FSCRYPT_MIN_KEY_SIZE || prep->datalen > sizeof(*payload) + FSCRYPT_MAX_KEY_SIZE) return -EINVAL; if (payload->type != FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && payload->type != FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) return -EINVAL; if (payload->__reserved) return -EINVAL; prep->payload.data[0] = kmemdup(payload, prep->datalen, GFP_KERNEL); if (!prep->payload.data[0]) return -ENOMEM; prep->quotalen = prep->datalen; return 0; } static void fscrypt_provisioning_key_free_preparse( struct key_preparsed_payload *prep) { kfree_sensitive(prep->payload.data[0]); } static void fscrypt_provisioning_key_describe(const struct key *key, struct seq_file *m) { seq_puts(m, key->description); if (key_is_positive(key)) { const struct fscrypt_provisioning_key_payload *payload = key->payload.data[0]; seq_printf(m, ": %u [%u]", key->datalen, payload->type); } } static void fscrypt_provisioning_key_destroy(struct key *key) { kfree_sensitive(key->payload.data[0]); } static struct key_type key_type_fscrypt_provisioning = { .name = "fscrypt-provisioning", .preparse = fscrypt_provisioning_key_preparse, .free_preparse = fscrypt_provisioning_key_free_preparse, .instantiate = generic_key_instantiate, .describe = fscrypt_provisioning_key_describe, .destroy = fscrypt_provisioning_key_destroy, }; /* * Retrieve the raw key from the Linux keyring key specified by 'key_id', and * store it into 'secret'. * * The key must be of type "fscrypt-provisioning" and must have the field * fscrypt_provisioning_key_payload::type set to 'type', indicating that it's * only usable with fscrypt with the particular KDF version identified by * 'type'. We don't use the "logon" key type because there's no way to * completely restrict the use of such keys; they can be used by any kernel API * that accepts "logon" keys and doesn't require a specific service prefix. * * The ability to specify the key via Linux keyring key is intended for cases * where userspace needs to re-add keys after the filesystem is unmounted and * re-mounted. Most users should just provide the raw key directly instead. */ static int get_keyring_key(u32 key_id, u32 type, struct fscrypt_master_key_secret *secret) { key_ref_t ref; struct key *key; const struct fscrypt_provisioning_key_payload *payload; int err; ref = lookup_user_key(key_id, 0, KEY_NEED_SEARCH); if (IS_ERR(ref)) return PTR_ERR(ref); key = key_ref_to_ptr(ref); if (key->type != &key_type_fscrypt_provisioning) goto bad_key; payload = key->payload.data[0]; /* Don't allow fscrypt v1 keys to be used as v2 keys and vice versa. */ if (payload->type != type) goto bad_key; secret->size = key->datalen - sizeof(*payload); memcpy(secret->raw, payload->raw, secret->size); err = 0; goto out_put; bad_key: err = -EKEYREJECTED; out_put: key_ref_put(ref); return err; } /* * Add a master encryption key to the filesystem, causing all files which were * encrypted with it to appear "unlocked" (decrypted) when accessed. * * When adding a key for use by v1 encryption policies, this ioctl is * privileged, and userspace must provide the 'key_descriptor'. * * When adding a key for use by v2+ encryption policies, this ioctl is * unprivileged. This is needed, in general, to allow non-root users to use * encryption without encountering the visibility problems of process-subscribed * keyrings and the inability to properly remove keys. This works by having * each key identified by its cryptographically secure hash --- the * 'key_identifier'. The cryptographic hash ensures that a malicious user * cannot add the wrong key for a given identifier. Furthermore, each added key * is charged to the appropriate user's quota for the keyrings service, which * prevents a malicious user from adding too many keys. Finally, we forbid a * user from removing a key while other users have added it too, which prevents * a user who knows another user's key from causing a denial-of-service by * removing it at an inopportune time. (We tolerate that a user who knows a key * can prevent other users from removing it.) * * For more details, see the "FS_IOC_ADD_ENCRYPTION_KEY" section of * Documentation/filesystems/fscrypt.rst. */ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) { struct super_block *sb = file_inode(filp)->i_sb; struct fscrypt_add_key_arg __user *uarg = _uarg; struct fscrypt_add_key_arg arg; struct fscrypt_master_key_secret secret; int err; if (copy_from_user(&arg, uarg, sizeof(arg))) return -EFAULT; if (!valid_key_spec(&arg.key_spec)) return -EINVAL; if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved))) return -EINVAL; /* * Only root can add keys that are identified by an arbitrary descriptor * rather than by a cryptographic hash --- since otherwise a malicious * user could add the wrong key. */ if (arg.key_spec.type == FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && !capable(CAP_SYS_ADMIN)) return -EACCES; memset(&secret, 0, sizeof(secret)); if (arg.key_id) { if (arg.raw_size != 0) return -EINVAL; err = get_keyring_key(arg.key_id, arg.key_spec.type, &secret); if (err) goto out_wipe_secret; } else { if (arg.raw_size < FSCRYPT_MIN_KEY_SIZE || arg.raw_size > FSCRYPT_MAX_KEY_SIZE) return -EINVAL; secret.size = arg.raw_size; err = -EFAULT; if (copy_from_user(secret.raw, uarg->raw, secret.size)) goto out_wipe_secret; } err = add_master_key(sb, &secret, &arg.key_spec); if (err) goto out_wipe_secret; /* Return the key identifier to userspace, if applicable */ err = -EFAULT; if (arg.key_spec.type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER && copy_to_user(uarg->key_spec.u.identifier, arg.key_spec.u.identifier, FSCRYPT_KEY_IDENTIFIER_SIZE)) goto out_wipe_secret; err = 0; out_wipe_secret: wipe_master_key_secret(&secret); return err; } EXPORT_SYMBOL_GPL(fscrypt_ioctl_add_key); static void fscrypt_get_test_dummy_secret(struct fscrypt_master_key_secret *secret) { static u8 test_key[FSCRYPT_MAX_KEY_SIZE]; get_random_once(test_key, FSCRYPT_MAX_KEY_SIZE); memset(secret, 0, sizeof(*secret)); secret->size = FSCRYPT_MAX_KEY_SIZE; memcpy(secret->raw, test_key, FSCRYPT_MAX_KEY_SIZE); } int fscrypt_get_test_dummy_key_identifier( u8 key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]) { struct fscrypt_master_key_secret secret; int err; fscrypt_get_test_dummy_secret(&secret); err = fscrypt_init_hkdf(&secret.hkdf, secret.raw, secret.size); if (err) goto out; err = fscrypt_hkdf_expand(&secret.hkdf, HKDF_CONTEXT_KEY_IDENTIFIER, NULL, 0, key_identifier, FSCRYPT_KEY_IDENTIFIER_SIZE); out: wipe_master_key_secret(&secret); return err; } /** * fscrypt_add_test_dummy_key() - add the test dummy encryption key * @sb: the filesystem instance to add the key to * @key_spec: the key specifier of the test dummy encryption key * * Add the key for the test_dummy_encryption mount option to the filesystem. To * prevent misuse of this mount option, a per-boot random key is used instead of * a hardcoded one. This makes it so that any encrypted files created using * this option won't be accessible after a reboot. * * Return: 0 on success, -errno on failure */ int fscrypt_add_test_dummy_key(struct super_block *sb, struct fscrypt_key_specifier *key_spec) { struct fscrypt_master_key_secret secret; int err; fscrypt_get_test_dummy_secret(&secret); err = add_master_key(sb, &secret, key_spec); wipe_master_key_secret(&secret); return err; } /* * Verify that the current user has added a master key with the given identifier * (returns -ENOKEY if not). This is needed to prevent a user from encrypting * their files using some other user's key which they don't actually know. * Cryptographically this isn't much of a problem, but the semantics of this * would be a bit weird, so it's best to just forbid it. * * The system administrator (CAP_FOWNER) can override this, which should be * enough for any use cases where encryption policies are being set using keys * that were chosen ahead of time but aren't available at the moment. * * Note that the key may have already removed by the time this returns, but * that's okay; we just care whether the key was there at some point. * * Return: 0 if the key is added, -ENOKEY if it isn't, or another -errno code */ int fscrypt_verify_key_added(struct super_block *sb, const u8 identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]) { struct fscrypt_key_specifier mk_spec; struct fscrypt_master_key *mk; struct key *mk_user; int err; mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER; memcpy(mk_spec.u.identifier, identifier, FSCRYPT_KEY_IDENTIFIER_SIZE); mk = fscrypt_find_master_key(sb, &mk_spec); if (!mk) { err = -ENOKEY; goto out; } down_read(&mk->mk_sem); mk_user = find_master_key_user(mk); if (IS_ERR(mk_user)) { err = PTR_ERR(mk_user); } else { key_put(mk_user); err = 0; } up_read(&mk->mk_sem); fscrypt_put_master_key(mk); out: if (err == -ENOKEY && capable(CAP_FOWNER)) err = 0; return err; } /* * Try to evict the inode's dentries from the dentry cache. If the inode is a * directory, then it can have at most one dentry; however, that dentry may be * pinned by child dentries, so first try to evict the children too. */ static void shrink_dcache_inode(struct inode *inode) { struct dentry *dentry; if (S_ISDIR(inode->i_mode)) { dentry = d_find_any_alias(inode); if (dentry) { shrink_dcache_parent(dentry); dput(dentry); } } d_prune_aliases(inode); } static void evict_dentries_for_decrypted_inodes(struct fscrypt_master_key *mk) { struct fscrypt_info *ci; struct inode *inode; struct inode *toput_inode = NULL; spin_lock(&mk->mk_decrypted_inodes_lock); list_for_each_entry(ci, &mk->mk_decrypted_inodes, ci_master_key_link) { inode = ci->ci_inode; spin_lock(&inode->i_lock); if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) { spin_unlock(&inode->i_lock); continue; } __iget(inode); spin_unlock(&inode->i_lock); spin_unlock(&mk->mk_decrypted_inodes_lock); shrink_dcache_inode(inode); iput(toput_inode); toput_inode = inode; spin_lock(&mk->mk_decrypted_inodes_lock); } spin_unlock(&mk->mk_decrypted_inodes_lock); iput(toput_inode); } static int check_for_busy_inodes(struct super_block *sb, struct fscrypt_master_key *mk) { struct list_head *pos; size_t busy_count = 0; unsigned long ino; char ino_str[50] = ""; spin_lock(&mk->mk_decrypted_inodes_lock); list_for_each(pos, &mk->mk_decrypted_inodes) busy_count++; if (busy_count == 0) { spin_unlock(&mk->mk_decrypted_inodes_lock); return 0; } { /* select an example file to show for debugging purposes */ struct inode *inode = list_first_entry(&mk->mk_decrypted_inodes, struct fscrypt_info, ci_master_key_link)->ci_inode; ino = inode->i_ino; } spin_unlock(&mk->mk_decrypted_inodes_lock); /* If the inode is currently being created, ino may still be 0. */ if (ino) snprintf(ino_str, sizeof(ino_str), ", including ino %lu", ino); fscrypt_warn(NULL, "%s: %zu inode(s) still busy after removing key with %s %*phN%s", sb->s_id, busy_count, master_key_spec_type(&mk->mk_spec), master_key_spec_len(&mk->mk_spec), (u8 *)&mk->mk_spec.u, ino_str); return -EBUSY; } static int try_to_lock_encrypted_files(struct super_block *sb, struct fscrypt_master_key *mk) { int err1; int err2; /* * An inode can't be evicted while it is dirty or has dirty pages. * Thus, we first have to clean the inodes in ->mk_decrypted_inodes. * * Just do it the easy way: call sync_filesystem(). It's overkill, but * it works, and it's more important to minimize the amount of caches we * drop than the amount of data we sync. Also, unprivileged users can * already call sync_filesystem() via sys_syncfs() or sys_sync(). */ down_read(&sb->s_umount); err1 = sync_filesystem(sb); up_read(&sb->s_umount); /* If a sync error occurs, still try to evict as much as possible. */ /* * Inodes are pinned by their dentries, so we have to evict their * dentries. shrink_dcache_sb() would suffice, but would be overkill * and inappropriate for use by unprivileged users. So instead go * through the inodes' alias lists and try to evict each dentry. */ evict_dentries_for_decrypted_inodes(mk); /* * evict_dentries_for_decrypted_inodes() already iput() each inode in * the list; any inodes for which that dropped the last reference will * have been evicted due to fscrypt_drop_inode() detecting the key * removal and telling the VFS to evict the inode. So to finish, we * just need to check whether any inodes couldn't be evicted. */ err2 = check_for_busy_inodes(sb, mk); return err1 ?: err2; } /* * Try to remove an fscrypt master encryption key. * * FS_IOC_REMOVE_ENCRYPTION_KEY (all_users=false) removes the current user's * claim to the key, then removes the key itself if no other users have claims. * FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS (all_users=true) always removes the * key itself. * * To "remove the key itself", first we wipe the actual master key secret, so * that no more inodes can be unlocked with it. Then we try to evict all cached * inodes that had been unlocked with the key. * * If all inodes were evicted, then we unlink the fscrypt_master_key from the * keyring. Otherwise it remains in the keyring in the "incompletely removed" * state (without the actual secret key) where it tracks the list of remaining * inodes. Userspace can execute the ioctl again later to retry eviction, or * alternatively can re-add the secret key again. * * For more details, see the "Removing keys" section of * Documentation/filesystems/fscrypt.rst. */ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users) { struct super_block *sb = file_inode(filp)->i_sb; struct fscrypt_remove_key_arg __user *uarg = _uarg; struct fscrypt_remove_key_arg arg; struct fscrypt_master_key *mk; u32 status_flags = 0; int err; bool inodes_remain; if (copy_from_user(&arg, uarg, sizeof(arg))) return -EFAULT; if (!valid_key_spec(&arg.key_spec)) return -EINVAL; if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved))) return -EINVAL; /* * Only root can add and remove keys that are identified by an arbitrary * descriptor rather than by a cryptographic hash. */ if (arg.key_spec.type == FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && !capable(CAP_SYS_ADMIN)) return -EACCES; /* Find the key being removed. */ mk = fscrypt_find_master_key(sb, &arg.key_spec); if (!mk) return -ENOKEY; down_write(&mk->mk_sem); /* If relevant, remove current user's (or all users) claim to the key */ if (mk->mk_users && mk->mk_users->keys.nr_leaves_on_tree != 0) { if (all_users) err = keyring_clear(mk->mk_users); else err = remove_master_key_user(mk); if (err) { up_write(&mk->mk_sem); goto out_put_key; } if (mk->mk_users->keys.nr_leaves_on_tree != 0) { /* * Other users have still added the key too. We removed * the current user's claim to the key, but we still * can't remove the key itself. */ status_flags |= FSCRYPT_KEY_REMOVAL_STATUS_FLAG_OTHER_USERS; err = 0; up_write(&mk->mk_sem); goto out_put_key; } } /* No user claims remaining. Go ahead and wipe the secret. */ err = -ENOKEY; if (is_master_key_secret_present(&mk->mk_secret)) { wipe_master_key_secret(&mk->mk_secret); fscrypt_put_master_key_activeref(sb, mk); err = 0; } inodes_remain = refcount_read(&mk->mk_active_refs) > 0; up_write(&mk->mk_sem); if (inodes_remain) { /* Some inodes still reference this key; try to evict them. */ err = try_to_lock_encrypted_files(sb, mk); if (err == -EBUSY) { status_flags |= FSCRYPT_KEY_REMOVAL_STATUS_FLAG_FILES_BUSY; err = 0; } } /* * We return 0 if we successfully did something: removed a claim to the * key, wiped the secret, or tried locking the files again. Users need * to check the informational status flags if they care whether the key * has been fully removed including all files locked. */ out_put_key: fscrypt_put_master_key(mk); if (err == 0) err = put_user(status_flags, &uarg->removal_status_flags); return err; } int fscrypt_ioctl_remove_key(struct file *filp, void __user *uarg) { return do_remove_key(filp, uarg, false); } EXPORT_SYMBOL_GPL(fscrypt_ioctl_remove_key); int fscrypt_ioctl_remove_key_all_users(struct file *filp, void __user *uarg) { if (!capable(CAP_SYS_ADMIN)) return -EACCES; return do_remove_key(filp, uarg, true); } EXPORT_SYMBOL_GPL(fscrypt_ioctl_remove_key_all_users); /* * Retrieve the status of an fscrypt master encryption key. * * We set ->status to indicate whether the key is absent, present, or * incompletely removed. "Incompletely removed" means that the master key * secret has been removed, but some files which had been unlocked with it are * still in use. This field allows applications to easily determine the state * of an encrypted directory without using a hack such as trying to open a * regular file in it (which can confuse the "incompletely removed" state with * absent or present). * * In addition, for v2 policy keys we allow applications to determine, via * ->status_flags and ->user_count, whether the key has been added by the * current user, by other users, or by both. Most applications should not need * this, since ordinarily only one user should know a given key. However, if a * secret key is shared by multiple users, applications may wish to add an * already-present key to prevent other users from removing it. This ioctl can * be used to check whether that really is the case before the work is done to * add the key --- which might e.g. require prompting the user for a passphrase. * * For more details, see the "FS_IOC_GET_ENCRYPTION_KEY_STATUS" section of * Documentation/filesystems/fscrypt.rst. */ int fscrypt_ioctl_get_key_status(struct file *filp, void __user *uarg) { struct super_block *sb = file_inode(filp)->i_sb; struct fscrypt_get_key_status_arg arg; struct fscrypt_master_key *mk; int err; if (copy_from_user(&arg, uarg, sizeof(arg))) return -EFAULT; if (!valid_key_spec(&arg.key_spec)) return -EINVAL; if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved))) return -EINVAL; arg.status_flags = 0; arg.user_count = 0; memset(arg.__out_reserved, 0, sizeof(arg.__out_reserved)); mk = fscrypt_find_master_key(sb, &arg.key_spec); if (!mk) { arg.status = FSCRYPT_KEY_STATUS_ABSENT; err = 0; goto out; } down_read(&mk->mk_sem); if (!is_master_key_secret_present(&mk->mk_secret)) { arg.status = refcount_read(&mk->mk_active_refs) > 0 ? FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED : FSCRYPT_KEY_STATUS_ABSENT /* raced with full removal */; err = 0; goto out_release_key; } arg.status = FSCRYPT_KEY_STATUS_PRESENT; if (mk->mk_users) { struct key *mk_user; arg.user_count = mk->mk_users->keys.nr_leaves_on_tree; mk_user = find_master_key_user(mk); if (!IS_ERR(mk_user)) { arg.status_flags |= FSCRYPT_KEY_STATUS_FLAG_ADDED_BY_SELF; key_put(mk_user); } else if (mk_user != ERR_PTR(-ENOKEY)) { err = PTR_ERR(mk_user); goto out_release_key; } } err = 0; out_release_key: up_read(&mk->mk_sem); fscrypt_put_master_key(mk); out: if (!err && copy_to_user(uarg, &arg, sizeof(arg))) err = -EFAULT; return err; } EXPORT_SYMBOL_GPL(fscrypt_ioctl_get_key_status); int __init fscrypt_init_keyring(void) { int err; err = register_key_type(&key_type_fscrypt_user); if (err) return err; err = register_key_type(&key_type_fscrypt_provisioning); if (err) goto err_unregister_fscrypt_user; return 0; err_unregister_fscrypt_user: unregister_key_type(&key_type_fscrypt_user); return err; }
linux-master
fs/crypto/keyring.c
// SPDX-License-Identifier: GPL-2.0 /* * This contains functions for filename crypto management * * Copyright (C) 2015, Google, Inc. * Copyright (C) 2015, Motorola Mobility * * Written by Uday Savagaonkar, 2014. * Modified by Jaegeuk Kim, 2015. * * This has not yet undergone a rigorous security audit. */ #include <linux/namei.h> #include <linux/scatterlist.h> #include <crypto/hash.h> #include <crypto/sha2.h> #include <crypto/skcipher.h> #include "fscrypt_private.h" /* * The minimum message length (input and output length), in bytes, for all * filenames encryption modes. Filenames shorter than this will be zero-padded * before being encrypted. */ #define FSCRYPT_FNAME_MIN_MSG_LEN 16 /* * struct fscrypt_nokey_name - identifier for directory entry when key is absent * * When userspace lists an encrypted directory without access to the key, the * filesystem must present a unique "no-key name" for each filename that allows * it to find the directory entry again if requested. Naively, that would just * mean using the ciphertext filenames. However, since the ciphertext filenames * can contain illegal characters ('\0' and '/'), they must be encoded in some * way. We use base64url. But that can cause names to exceed NAME_MAX (255 * bytes), so we also need to use a strong hash to abbreviate long names. * * The filesystem may also need another kind of hash, the "dirhash", to quickly * find the directory entry. Since filesystems normally compute the dirhash * over the on-disk filename (i.e. the ciphertext), it's not computable from * no-key names that abbreviate the ciphertext using the strong hash to fit in * NAME_MAX. It's also not computable if it's a keyed hash taken over the * plaintext (but it may still be available in the on-disk directory entry); * casefolded directories use this type of dirhash. At least in these cases, * each no-key name must include the name's dirhash too. * * To meet all these requirements, we base64url-encode the following * variable-length structure. It contains the dirhash, or 0's if the filesystem * didn't provide one; up to 149 bytes of the ciphertext name; and for * ciphertexts longer than 149 bytes, also the SHA-256 of the remaining bytes. * * This ensures that each no-key name contains everything needed to find the * directory entry again, contains only legal characters, doesn't exceed * NAME_MAX, is unambiguous unless there's a SHA-256 collision, and that we only * take the performance hit of SHA-256 on very long filenames (which are rare). */ struct fscrypt_nokey_name { u32 dirhash[2]; u8 bytes[149]; u8 sha256[SHA256_DIGEST_SIZE]; }; /* 189 bytes => 252 bytes base64url-encoded, which is <= NAME_MAX (255) */ /* * Decoded size of max-size no-key name, i.e. a name that was abbreviated using * the strong hash and thus includes the 'sha256' field. This isn't simply * sizeof(struct fscrypt_nokey_name), as the padding at the end isn't included. */ #define FSCRYPT_NOKEY_NAME_MAX offsetofend(struct fscrypt_nokey_name, sha256) /* Encoded size of max-size no-key name */ #define FSCRYPT_NOKEY_NAME_MAX_ENCODED \ FSCRYPT_BASE64URL_CHARS(FSCRYPT_NOKEY_NAME_MAX) static inline bool fscrypt_is_dot_dotdot(const struct qstr *str) { if (str->len == 1 && str->name[0] == '.') return true; if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.') return true; return false; } /** * fscrypt_fname_encrypt() - encrypt a filename * @inode: inode of the parent directory (for regular filenames) * or of the symlink (for symlink targets). Key must already be * set up. * @iname: the filename to encrypt * @out: (output) the encrypted filename * @olen: size of the encrypted filename. It must be at least @iname->len. * Any extra space is filled with NUL padding before encryption. * * Return: 0 on success, -errno on failure */ int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname, u8 *out, unsigned int olen) { struct skcipher_request *req = NULL; DECLARE_CRYPTO_WAIT(wait); const struct fscrypt_info *ci = inode->i_crypt_info; struct crypto_skcipher *tfm = ci->ci_enc_key.tfm; union fscrypt_iv iv; struct scatterlist sg; int res; /* * Copy the filename to the output buffer for encrypting in-place and * pad it with the needed number of NUL bytes. */ if (WARN_ON_ONCE(olen < iname->len)) return -ENOBUFS; memcpy(out, iname->name, iname->len); memset(out + iname->len, 0, olen - iname->len); /* Initialize the IV */ fscrypt_generate_iv(&iv, 0, ci); /* Set up the encryption request */ req = skcipher_request_alloc(tfm, GFP_NOFS); if (!req) return -ENOMEM; skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait); sg_init_one(&sg, out, olen); skcipher_request_set_crypt(req, &sg, &sg, olen, &iv); /* Do the encryption */ res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); skcipher_request_free(req); if (res < 0) { fscrypt_err(inode, "Filename encryption failed: %d", res); return res; } return 0; } EXPORT_SYMBOL_GPL(fscrypt_fname_encrypt); /** * fname_decrypt() - decrypt a filename * @inode: inode of the parent directory (for regular filenames) * or of the symlink (for symlink targets) * @iname: the encrypted filename to decrypt * @oname: (output) the decrypted filename. The caller must have allocated * enough space for this, e.g. using fscrypt_fname_alloc_buffer(). * * Return: 0 on success, -errno on failure */ static int fname_decrypt(const struct inode *inode, const struct fscrypt_str *iname, struct fscrypt_str *oname) { struct skcipher_request *req = NULL; DECLARE_CRYPTO_WAIT(wait); struct scatterlist src_sg, dst_sg; const struct fscrypt_info *ci = inode->i_crypt_info; struct crypto_skcipher *tfm = ci->ci_enc_key.tfm; union fscrypt_iv iv; int res; /* Allocate request */ req = skcipher_request_alloc(tfm, GFP_NOFS); if (!req) return -ENOMEM; skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait); /* Initialize IV */ fscrypt_generate_iv(&iv, 0, ci); /* Create decryption request */ sg_init_one(&src_sg, iname->name, iname->len); sg_init_one(&dst_sg, oname->name, oname->len); skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, &iv); res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait); skcipher_request_free(req); if (res < 0) { fscrypt_err(inode, "Filename decryption failed: %d", res); return res; } oname->len = strnlen(oname->name, iname->len); return 0; } static const char base64url_table[65] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_"; #define FSCRYPT_BASE64URL_CHARS(nbytes) DIV_ROUND_UP((nbytes) * 4, 3) /** * fscrypt_base64url_encode() - base64url-encode some binary data * @src: the binary data to encode * @srclen: the length of @src in bytes * @dst: (output) the base64url-encoded string. Not NUL-terminated. * * Encodes data using base64url encoding, i.e. the "Base 64 Encoding with URL * and Filename Safe Alphabet" specified by RFC 4648. '='-padding isn't used, * as it's unneeded and not required by the RFC. base64url is used instead of * base64 to avoid the '/' character, which isn't allowed in filenames. * * Return: the length of the resulting base64url-encoded string in bytes. * This will be equal to FSCRYPT_BASE64URL_CHARS(srclen). */ static int fscrypt_base64url_encode(const u8 *src, int srclen, char *dst) { u32 ac = 0; int bits = 0; int i; char *cp = dst; for (i = 0; i < srclen; i++) { ac = (ac << 8) | src[i]; bits += 8; do { bits -= 6; *cp++ = base64url_table[(ac >> bits) & 0x3f]; } while (bits >= 6); } if (bits) *cp++ = base64url_table[(ac << (6 - bits)) & 0x3f]; return cp - dst; } /** * fscrypt_base64url_decode() - base64url-decode a string * @src: the string to decode. Doesn't need to be NUL-terminated. * @srclen: the length of @src in bytes * @dst: (output) the decoded binary data * * Decodes a string using base64url encoding, i.e. the "Base 64 Encoding with * URL and Filename Safe Alphabet" specified by RFC 4648. '='-padding isn't * accepted, nor are non-encoding characters such as whitespace. * * This implementation hasn't been optimized for performance. * * Return: the length of the resulting decoded binary data in bytes, * or -1 if the string isn't a valid base64url string. */ static int fscrypt_base64url_decode(const char *src, int srclen, u8 *dst) { u32 ac = 0; int bits = 0; int i; u8 *bp = dst; for (i = 0; i < srclen; i++) { const char *p = strchr(base64url_table, src[i]); if (p == NULL || src[i] == 0) return -1; ac = (ac << 6) | (p - base64url_table); bits += 6; if (bits >= 8) { bits -= 8; *bp++ = (u8)(ac >> bits); } } if (ac & ((1 << bits) - 1)) return -1; return bp - dst; } bool __fscrypt_fname_encrypted_size(const union fscrypt_policy *policy, u32 orig_len, u32 max_len, u32 *encrypted_len_ret) { int padding = 4 << (fscrypt_policy_flags(policy) & FSCRYPT_POLICY_FLAGS_PAD_MASK); u32 encrypted_len; if (orig_len > max_len) return false; encrypted_len = max_t(u32, orig_len, FSCRYPT_FNAME_MIN_MSG_LEN); encrypted_len = round_up(encrypted_len, padding); *encrypted_len_ret = min(encrypted_len, max_len); return true; } /** * fscrypt_fname_encrypted_size() - calculate length of encrypted filename * @inode: parent inode of dentry name being encrypted. Key must * already be set up. * @orig_len: length of the original filename * @max_len: maximum length to return * @encrypted_len_ret: where calculated length should be returned (on success) * * Filenames that are shorter than the maximum length may have their lengths * increased slightly by encryption, due to padding that is applied. * * Return: false if the orig_len is greater than max_len. Otherwise, true and * fill out encrypted_len_ret with the length (up to max_len). */ bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len, u32 max_len, u32 *encrypted_len_ret) { return __fscrypt_fname_encrypted_size(&inode->i_crypt_info->ci_policy, orig_len, max_len, encrypted_len_ret); } EXPORT_SYMBOL_GPL(fscrypt_fname_encrypted_size); /** * fscrypt_fname_alloc_buffer() - allocate a buffer for presented filenames * @max_encrypted_len: maximum length of encrypted filenames the buffer will be * used to present * @crypto_str: (output) buffer to allocate * * Allocate a buffer that is large enough to hold any decrypted or encoded * filename (null-terminated), for the given maximum encrypted filename length. * * Return: 0 on success, -errno on failure */ int fscrypt_fname_alloc_buffer(u32 max_encrypted_len, struct fscrypt_str *crypto_str) { u32 max_presented_len = max_t(u32, FSCRYPT_NOKEY_NAME_MAX_ENCODED, max_encrypted_len); crypto_str->name = kmalloc(max_presented_len + 1, GFP_NOFS); if (!crypto_str->name) return -ENOMEM; crypto_str->len = max_presented_len; return 0; } EXPORT_SYMBOL(fscrypt_fname_alloc_buffer); /** * fscrypt_fname_free_buffer() - free a buffer for presented filenames * @crypto_str: the buffer to free * * Free a buffer that was allocated by fscrypt_fname_alloc_buffer(). */ void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str) { if (!crypto_str) return; kfree(crypto_str->name); crypto_str->name = NULL; } EXPORT_SYMBOL(fscrypt_fname_free_buffer); /** * fscrypt_fname_disk_to_usr() - convert an encrypted filename to * user-presentable form * @inode: inode of the parent directory (for regular filenames) * or of the symlink (for symlink targets) * @hash: first part of the name's dirhash, if applicable. This only needs to * be provided if the filename is located in an indexed directory whose * encryption key may be unavailable. Not needed for symlink targets. * @minor_hash: second part of the name's dirhash, if applicable * @iname: encrypted filename to convert. May also be "." or "..", which * aren't actually encrypted. * @oname: output buffer for the user-presentable filename. The caller must * have allocated enough space for this, e.g. using * fscrypt_fname_alloc_buffer(). * * If the key is available, we'll decrypt the disk name. Otherwise, we'll * encode it for presentation in fscrypt_nokey_name format. * See struct fscrypt_nokey_name for details. * * Return: 0 on success, -errno on failure */ int fscrypt_fname_disk_to_usr(const struct inode *inode, u32 hash, u32 minor_hash, const struct fscrypt_str *iname, struct fscrypt_str *oname) { const struct qstr qname = FSTR_TO_QSTR(iname); struct fscrypt_nokey_name nokey_name; u32 size; /* size of the unencoded no-key name */ if (fscrypt_is_dot_dotdot(&qname)) { oname->name[0] = '.'; oname->name[iname->len - 1] = '.'; oname->len = iname->len; return 0; } if (iname->len < FSCRYPT_FNAME_MIN_MSG_LEN) return -EUCLEAN; if (fscrypt_has_encryption_key(inode)) return fname_decrypt(inode, iname, oname); /* * Sanity check that struct fscrypt_nokey_name doesn't have padding * between fields and that its encoded size never exceeds NAME_MAX. */ BUILD_BUG_ON(offsetofend(struct fscrypt_nokey_name, dirhash) != offsetof(struct fscrypt_nokey_name, bytes)); BUILD_BUG_ON(offsetofend(struct fscrypt_nokey_name, bytes) != offsetof(struct fscrypt_nokey_name, sha256)); BUILD_BUG_ON(FSCRYPT_NOKEY_NAME_MAX_ENCODED > NAME_MAX); nokey_name.dirhash[0] = hash; nokey_name.dirhash[1] = minor_hash; if (iname->len <= sizeof(nokey_name.bytes)) { memcpy(nokey_name.bytes, iname->name, iname->len); size = offsetof(struct fscrypt_nokey_name, bytes[iname->len]); } else { memcpy(nokey_name.bytes, iname->name, sizeof(nokey_name.bytes)); /* Compute strong hash of remaining part of name. */ sha256(&iname->name[sizeof(nokey_name.bytes)], iname->len - sizeof(nokey_name.bytes), nokey_name.sha256); size = FSCRYPT_NOKEY_NAME_MAX; } oname->len = fscrypt_base64url_encode((const u8 *)&nokey_name, size, oname->name); return 0; } EXPORT_SYMBOL(fscrypt_fname_disk_to_usr); /** * fscrypt_setup_filename() - prepare to search a possibly encrypted directory * @dir: the directory that will be searched * @iname: the user-provided filename being searched for * @lookup: 1 if we're allowed to proceed without the key because it's * ->lookup() or we're finding the dir_entry for deletion; 0 if we cannot * proceed without the key because we're going to create the dir_entry. * @fname: the filename information to be filled in * * Given a user-provided filename @iname, this function sets @fname->disk_name * to the name that would be stored in the on-disk directory entry, if possible. * If the directory is unencrypted this is simply @iname. Else, if we have the * directory's encryption key, then @iname is the plaintext, so we encrypt it to * get the disk_name. * * Else, for keyless @lookup operations, @iname should be a no-key name, so we * decode it to get the struct fscrypt_nokey_name. Non-@lookup operations will * be impossible in this case, so we fail them with ENOKEY. * * If successful, fscrypt_free_filename() must be called later to clean up. * * Return: 0 on success, -errno on failure */ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname, int lookup, struct fscrypt_name *fname) { struct fscrypt_nokey_name *nokey_name; int ret; memset(fname, 0, sizeof(struct fscrypt_name)); fname->usr_fname = iname; if (!IS_ENCRYPTED(dir) || fscrypt_is_dot_dotdot(iname)) { fname->disk_name.name = (unsigned char *)iname->name; fname->disk_name.len = iname->len; return 0; } ret = fscrypt_get_encryption_info(dir, lookup); if (ret) return ret; if (fscrypt_has_encryption_key(dir)) { if (!fscrypt_fname_encrypted_size(dir, iname->len, NAME_MAX, &fname->crypto_buf.len)) return -ENAMETOOLONG; fname->crypto_buf.name = kmalloc(fname->crypto_buf.len, GFP_NOFS); if (!fname->crypto_buf.name) return -ENOMEM; ret = fscrypt_fname_encrypt(dir, iname, fname->crypto_buf.name, fname->crypto_buf.len); if (ret) goto errout; fname->disk_name.name = fname->crypto_buf.name; fname->disk_name.len = fname->crypto_buf.len; return 0; } if (!lookup) return -ENOKEY; fname->is_nokey_name = true; /* * We don't have the key and we are doing a lookup; decode the * user-supplied name */ if (iname->len > FSCRYPT_NOKEY_NAME_MAX_ENCODED) return -ENOENT; fname->crypto_buf.name = kmalloc(FSCRYPT_NOKEY_NAME_MAX, GFP_KERNEL); if (fname->crypto_buf.name == NULL) return -ENOMEM; ret = fscrypt_base64url_decode(iname->name, iname->len, fname->crypto_buf.name); if (ret < (int)offsetof(struct fscrypt_nokey_name, bytes[1]) || (ret > offsetof(struct fscrypt_nokey_name, sha256) && ret != FSCRYPT_NOKEY_NAME_MAX)) { ret = -ENOENT; goto errout; } fname->crypto_buf.len = ret; nokey_name = (void *)fname->crypto_buf.name; fname->hash = nokey_name->dirhash[0]; fname->minor_hash = nokey_name->dirhash[1]; if (ret != FSCRYPT_NOKEY_NAME_MAX) { /* The full ciphertext filename is available. */ fname->disk_name.name = nokey_name->bytes; fname->disk_name.len = ret - offsetof(struct fscrypt_nokey_name, bytes); } return 0; errout: kfree(fname->crypto_buf.name); return ret; } EXPORT_SYMBOL(fscrypt_setup_filename); /** * fscrypt_match_name() - test whether the given name matches a directory entry * @fname: the name being searched for * @de_name: the name from the directory entry * @de_name_len: the length of @de_name in bytes * * Normally @fname->disk_name will be set, and in that case we simply compare * that to the name stored in the directory entry. The only exception is that * if we don't have the key for an encrypted directory and the name we're * looking for is very long, then we won't have the full disk_name and instead * we'll need to match against a fscrypt_nokey_name that includes a strong hash. * * Return: %true if the name matches, otherwise %false. */ bool fscrypt_match_name(const struct fscrypt_name *fname, const u8 *de_name, u32 de_name_len) { const struct fscrypt_nokey_name *nokey_name = (const void *)fname->crypto_buf.name; u8 digest[SHA256_DIGEST_SIZE]; if (likely(fname->disk_name.name)) { if (de_name_len != fname->disk_name.len) return false; return !memcmp(de_name, fname->disk_name.name, de_name_len); } if (de_name_len <= sizeof(nokey_name->bytes)) return false; if (memcmp(de_name, nokey_name->bytes, sizeof(nokey_name->bytes))) return false; sha256(&de_name[sizeof(nokey_name->bytes)], de_name_len - sizeof(nokey_name->bytes), digest); return !memcmp(digest, nokey_name->sha256, sizeof(digest)); } EXPORT_SYMBOL_GPL(fscrypt_match_name); /** * fscrypt_fname_siphash() - calculate the SipHash of a filename * @dir: the parent directory * @name: the filename to calculate the SipHash of * * Given a plaintext filename @name and a directory @dir which uses SipHash as * its dirhash method and has had its fscrypt key set up, this function * calculates the SipHash of that name using the directory's secret dirhash key. * * Return: the SipHash of @name using the hash key of @dir */ u64 fscrypt_fname_siphash(const struct inode *dir, const struct qstr *name) { const struct fscrypt_info *ci = dir->i_crypt_info; WARN_ON_ONCE(!ci->ci_dirhash_key_initialized); return siphash(name->name, name->len, &ci->ci_dirhash_key); } EXPORT_SYMBOL_GPL(fscrypt_fname_siphash); /* * Validate dentries in encrypted directories to make sure we aren't potentially * caching stale dentries after a key has been added. */ int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) { struct dentry *dir; int err; int valid; /* * Plaintext names are always valid, since fscrypt doesn't support * reverting to no-key names without evicting the directory's inode * -- which implies eviction of the dentries in the directory. */ if (!(dentry->d_flags & DCACHE_NOKEY_NAME)) return 1; /* * No-key name; valid if the directory's key is still unavailable. * * Although fscrypt forbids rename() on no-key names, we still must use * dget_parent() here rather than use ->d_parent directly. That's * because a corrupted fs image may contain directory hard links, which * the VFS handles by moving the directory's dentry tree in the dcache * each time ->lookup() finds the directory and it already has a dentry * elsewhere. Thus ->d_parent can be changing, and we must safely grab * a reference to some ->d_parent to prevent it from being freed. */ if (flags & LOOKUP_RCU) return -ECHILD; dir = dget_parent(dentry); /* * Pass allow_unsupported=true, so that files with an unsupported * encryption policy can be deleted. */ err = fscrypt_get_encryption_info(d_inode(dir), true); valid = !fscrypt_has_encryption_key(d_inode(dir)); dput(dir); if (err < 0) return err; return valid; } EXPORT_SYMBOL_GPL(fscrypt_d_revalidate);
linux-master
fs/crypto/fname.c
// SPDX-License-Identifier: GPL-2.0 /* * Inline encryption support for fscrypt * * Copyright 2019 Google LLC */ /* * With "inline encryption", the block layer handles the decryption/encryption * as part of the bio, instead of the filesystem doing the crypto itself via * crypto API. See Documentation/block/inline-encryption.rst. fscrypt still * provides the key and IV to use. */ #include <linux/blk-crypto.h> #include <linux/blkdev.h> #include <linux/buffer_head.h> #include <linux/sched/mm.h> #include <linux/slab.h> #include <linux/uio.h> #include "fscrypt_private.h" static struct block_device **fscrypt_get_devices(struct super_block *sb, unsigned int *num_devs) { struct block_device **devs; if (sb->s_cop->get_devices) { devs = sb->s_cop->get_devices(sb, num_devs); if (devs) return devs; } devs = kmalloc(sizeof(*devs), GFP_KERNEL); if (!devs) return ERR_PTR(-ENOMEM); devs[0] = sb->s_bdev; *num_devs = 1; return devs; } static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci) { struct super_block *sb = ci->ci_inode->i_sb; unsigned int flags = fscrypt_policy_flags(&ci->ci_policy); int ino_bits = 64, lblk_bits = 64; if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) return offsetofend(union fscrypt_iv, nonce); if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) return sizeof(__le64); if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) return sizeof(__le32); /* Default case: IVs are just the file logical block number */ if (sb->s_cop->get_ino_and_lblk_bits) sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits); return DIV_ROUND_UP(lblk_bits, 8); } /* * Log a message when starting to use blk-crypto (native) or blk-crypto-fallback * for an encryption mode for the first time. This is the blk-crypto * counterpart to the message logged when starting to use the crypto API for the * first time. A limitation is that these messages don't convey which specific * filesystems or files are using each implementation. However, *usually* * systems use just one implementation per mode, which makes these messages * helpful for debugging problems where the "wrong" implementation is used. */ static void fscrypt_log_blk_crypto_impl(struct fscrypt_mode *mode, struct block_device **devs, unsigned int num_devs, const struct blk_crypto_config *cfg) { unsigned int i; for (i = 0; i < num_devs; i++) { if (!IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) || blk_crypto_config_supported_natively(devs[i], cfg)) { if (!xchg(&mode->logged_blk_crypto_native, 1)) pr_info("fscrypt: %s using blk-crypto (native)\n", mode->friendly_name); } else if (!xchg(&mode->logged_blk_crypto_fallback, 1)) { pr_info("fscrypt: %s using blk-crypto-fallback\n", mode->friendly_name); } } } /* Enable inline encryption for this file if supported. */ int fscrypt_select_encryption_impl(struct fscrypt_info *ci) { const struct inode *inode = ci->ci_inode; struct super_block *sb = inode->i_sb; struct blk_crypto_config crypto_cfg; struct block_device **devs; unsigned int num_devs; unsigned int i; /* The file must need contents encryption, not filenames encryption */ if (!S_ISREG(inode->i_mode)) return 0; /* The crypto mode must have a blk-crypto counterpart */ if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID) return 0; /* The filesystem must be mounted with -o inlinecrypt */ if (!(sb->s_flags & SB_INLINECRYPT)) return 0; /* * When a page contains multiple logically contiguous filesystem blocks, * some filesystem code only calls fscrypt_mergeable_bio() for the first * block in the page. This is fine for most of fscrypt's IV generation * strategies, where contiguous blocks imply contiguous IVs. But it * doesn't work with IV_INO_LBLK_32. For now, simply exclude * IV_INO_LBLK_32 with blocksize != PAGE_SIZE from inline encryption. */ if ((fscrypt_policy_flags(&ci->ci_policy) & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) && sb->s_blocksize != PAGE_SIZE) return 0; /* * On all the filesystem's block devices, blk-crypto must support the * crypto configuration that the file would use. */ crypto_cfg.crypto_mode = ci->ci_mode->blk_crypto_mode; crypto_cfg.data_unit_size = sb->s_blocksize; crypto_cfg.dun_bytes = fscrypt_get_dun_bytes(ci); devs = fscrypt_get_devices(sb, &num_devs); if (IS_ERR(devs)) return PTR_ERR(devs); for (i = 0; i < num_devs; i++) { if (!blk_crypto_config_supported(devs[i], &crypto_cfg)) goto out_free_devs; } fscrypt_log_blk_crypto_impl(ci->ci_mode, devs, num_devs, &crypto_cfg); ci->ci_inlinecrypt = true; out_free_devs: kfree(devs); return 0; } int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, const u8 *raw_key, const struct fscrypt_info *ci) { const struct inode *inode = ci->ci_inode; struct super_block *sb = inode->i_sb; enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode; struct blk_crypto_key *blk_key; struct block_device **devs; unsigned int num_devs; unsigned int i; int err; blk_key = kmalloc(sizeof(*blk_key), GFP_KERNEL); if (!blk_key) return -ENOMEM; err = blk_crypto_init_key(blk_key, raw_key, crypto_mode, fscrypt_get_dun_bytes(ci), sb->s_blocksize); if (err) { fscrypt_err(inode, "error %d initializing blk-crypto key", err); goto fail; } /* Start using blk-crypto on all the filesystem's block devices. */ devs = fscrypt_get_devices(sb, &num_devs); if (IS_ERR(devs)) { err = PTR_ERR(devs); goto fail; } for (i = 0; i < num_devs; i++) { err = blk_crypto_start_using_key(devs[i], blk_key); if (err) break; } kfree(devs); if (err) { fscrypt_err(inode, "error %d starting to use blk-crypto", err); goto fail; } /* * Pairs with the smp_load_acquire() in fscrypt_is_key_prepared(). * I.e., here we publish ->blk_key with a RELEASE barrier so that * concurrent tasks can ACQUIRE it. Note that this concurrency is only * possible for per-mode keys, not for per-file keys. */ smp_store_release(&prep_key->blk_key, blk_key); return 0; fail: kfree_sensitive(blk_key); return err; } void fscrypt_destroy_inline_crypt_key(struct super_block *sb, struct fscrypt_prepared_key *prep_key) { struct blk_crypto_key *blk_key = prep_key->blk_key; struct block_device **devs; unsigned int num_devs; unsigned int i; if (!blk_key) return; /* Evict the key from all the filesystem's block devices. */ devs = fscrypt_get_devices(sb, &num_devs); if (!IS_ERR(devs)) { for (i = 0; i < num_devs; i++) blk_crypto_evict_key(devs[i], blk_key); kfree(devs); } kfree_sensitive(blk_key); } bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode) { return inode->i_crypt_info->ci_inlinecrypt; } EXPORT_SYMBOL_GPL(__fscrypt_inode_uses_inline_crypto); static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num, u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]) { union fscrypt_iv iv; int i; fscrypt_generate_iv(&iv, lblk_num, ci); BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE); memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE); for (i = 0; i < ci->ci_mode->ivsize/sizeof(dun[0]); i++) dun[i] = le64_to_cpu(iv.dun[i]); } /** * fscrypt_set_bio_crypt_ctx() - prepare a file contents bio for inline crypto * @bio: a bio which will eventually be submitted to the file * @inode: the file's inode * @first_lblk: the first file logical block number in the I/O * @gfp_mask: memory allocation flags - these must be a waiting mask so that * bio_crypt_set_ctx can't fail. * * If the contents of the file should be encrypted (or decrypted) with inline * encryption, then assign the appropriate encryption context to the bio. * * Normally the bio should be newly allocated (i.e. no pages added yet), as * otherwise fscrypt_mergeable_bio() won't work as intended. * * The encryption context will be freed automatically when the bio is freed. */ void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode, u64 first_lblk, gfp_t gfp_mask) { const struct fscrypt_info *ci; u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; if (!fscrypt_inode_uses_inline_crypto(inode)) return; ci = inode->i_crypt_info; fscrypt_generate_dun(ci, first_lblk, dun); bio_crypt_set_ctx(bio, ci->ci_enc_key.blk_key, dun, gfp_mask); } EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx); /* Extract the inode and logical block number from a buffer_head. */ static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh, const struct inode **inode_ret, u64 *lblk_num_ret) { struct page *page = bh->b_page; const struct address_space *mapping; const struct inode *inode; /* * The ext4 journal (jbd2) can submit a buffer_head it directly created * for a non-pagecache page. fscrypt doesn't care about these. */ mapping = page_mapping(page); if (!mapping) return false; inode = mapping->host; *inode_ret = inode; *lblk_num_ret = ((u64)page->index << (PAGE_SHIFT - inode->i_blkbits)) + (bh_offset(bh) >> inode->i_blkbits); return true; } /** * fscrypt_set_bio_crypt_ctx_bh() - prepare a file contents bio for inline * crypto * @bio: a bio which will eventually be submitted to the file * @first_bh: the first buffer_head for which I/O will be submitted * @gfp_mask: memory allocation flags * * Same as fscrypt_set_bio_crypt_ctx(), except this takes a buffer_head instead * of an inode and block number directly. */ void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio, const struct buffer_head *first_bh, gfp_t gfp_mask) { const struct inode *inode; u64 first_lblk; if (bh_get_inode_and_lblk_num(first_bh, &inode, &first_lblk)) fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask); } EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh); /** * fscrypt_mergeable_bio() - test whether data can be added to a bio * @bio: the bio being built up * @inode: the inode for the next part of the I/O * @next_lblk: the next file logical block number in the I/O * * When building a bio which may contain data which should undergo inline * encryption (or decryption) via fscrypt, filesystems should call this function * to ensure that the resulting bio contains only contiguous data unit numbers. * This will return false if the next part of the I/O cannot be merged with the * bio because either the encryption key would be different or the encryption * data unit numbers would be discontiguous. * * fscrypt_set_bio_crypt_ctx() must have already been called on the bio. * * This function isn't required in cases where crypto-mergeability is ensured in * another way, such as I/O targeting only a single file (and thus a single key) * combined with fscrypt_limit_io_blocks() to ensure DUN contiguity. * * Return: true iff the I/O is mergeable */ bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode, u64 next_lblk) { const struct bio_crypt_ctx *bc = bio->bi_crypt_context; u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; if (!!bc != fscrypt_inode_uses_inline_crypto(inode)) return false; if (!bc) return true; /* * Comparing the key pointers is good enough, as all I/O for each key * uses the same pointer. I.e., there's currently no need to support * merging requests where the keys are the same but the pointers differ. */ if (bc->bc_key != inode->i_crypt_info->ci_enc_key.blk_key) return false; fscrypt_generate_dun(inode->i_crypt_info, next_lblk, next_dun); return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun); } EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio); /** * fscrypt_mergeable_bio_bh() - test whether data can be added to a bio * @bio: the bio being built up * @next_bh: the next buffer_head for which I/O will be submitted * * Same as fscrypt_mergeable_bio(), except this takes a buffer_head instead of * an inode and block number directly. * * Return: true iff the I/O is mergeable */ bool fscrypt_mergeable_bio_bh(struct bio *bio, const struct buffer_head *next_bh) { const struct inode *inode; u64 next_lblk; if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk)) return !bio->bi_crypt_context; return fscrypt_mergeable_bio(bio, inode, next_lblk); } EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh); /** * fscrypt_dio_supported() - check whether DIO (direct I/O) is supported on an * inode, as far as encryption is concerned * @inode: the inode in question * * Return: %true if there are no encryption constraints that prevent DIO from * being supported; %false if DIO is unsupported. (Note that in the * %true case, the filesystem might have other, non-encryption-related * constraints that prevent DIO from actually being supported. Also, on * encrypted files the filesystem is still responsible for only allowing * DIO when requests are filesystem-block-aligned.) */ bool fscrypt_dio_supported(struct inode *inode) { int err; /* If the file is unencrypted, no veto from us. */ if (!fscrypt_needs_contents_encryption(inode)) return true; /* * We only support DIO with inline crypto, not fs-layer crypto. * * To determine whether the inode is using inline crypto, we have to set * up the key if it wasn't already done. This is because in the current * design of fscrypt, the decision of whether to use inline crypto or * not isn't made until the inode's encryption key is being set up. In * the DIO read/write case, the key will always be set up already, since * the file will be open. But in the case of statx(), the key might not * be set up yet, as the file might not have been opened yet. */ err = fscrypt_require_key(inode); if (err) { /* * Key unavailable or couldn't be set up. This edge case isn't * worth worrying about; just report that DIO is unsupported. */ return false; } return fscrypt_inode_uses_inline_crypto(inode); } EXPORT_SYMBOL_GPL(fscrypt_dio_supported); /** * fscrypt_limit_io_blocks() - limit I/O blocks to avoid discontiguous DUNs * @inode: the file on which I/O is being done * @lblk: the block at which the I/O is being started from * @nr_blocks: the number of blocks we want to submit starting at @lblk * * Determine the limit to the number of blocks that can be submitted in a bio * targeting @lblk without causing a data unit number (DUN) discontiguity. * * This is normally just @nr_blocks, as normally the DUNs just increment along * with the logical blocks. (Or the file is not encrypted.) * * In rare cases, fscrypt can be using an IV generation method that allows the * DUN to wrap around within logically contiguous blocks, and that wraparound * will occur. If this happens, a value less than @nr_blocks will be returned * so that the wraparound doesn't occur in the middle of a bio, which would * cause encryption/decryption to produce wrong results. * * Return: the actual number of blocks that can be submitted */ u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks) { const struct fscrypt_info *ci; u32 dun; if (!fscrypt_inode_uses_inline_crypto(inode)) return nr_blocks; if (nr_blocks <= 1) return nr_blocks; ci = inode->i_crypt_info; if (!(fscrypt_policy_flags(&ci->ci_policy) & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) return nr_blocks; /* With IV_INO_LBLK_32, the DUN can wrap around from U32_MAX to 0. */ dun = ci->ci_hashed_ino + lblk; return min_t(u64, nr_blocks, (u64)U32_MAX + 1 - dun); } EXPORT_SYMBOL_GPL(fscrypt_limit_io_blocks);
linux-master
fs/crypto/inline_crypt.c