python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0 /* * Implements pstore backend driver that write to block (or non-block) storage * devices, using the pstore/zone API. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/blkdev.h> #include <linux/string.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/pstore_blk.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/init_syscalls.h> #include <linux/mount.h> static long kmsg_size = CONFIG_PSTORE_BLK_KMSG_SIZE; module_param(kmsg_size, long, 0400); MODULE_PARM_DESC(kmsg_size, "kmsg dump record size in kbytes"); static int max_reason = CONFIG_PSTORE_BLK_MAX_REASON; module_param(max_reason, int, 0400); MODULE_PARM_DESC(max_reason, "maximum reason for kmsg dump (default 2: Oops and Panic)"); #if IS_ENABLED(CONFIG_PSTORE_PMSG) static long pmsg_size = CONFIG_PSTORE_BLK_PMSG_SIZE; #else static long pmsg_size = -1; #endif module_param(pmsg_size, long, 0400); MODULE_PARM_DESC(pmsg_size, "pmsg size in kbytes"); #if IS_ENABLED(CONFIG_PSTORE_CONSOLE) static long console_size = CONFIG_PSTORE_BLK_CONSOLE_SIZE; #else static long console_size = -1; #endif module_param(console_size, long, 0400); MODULE_PARM_DESC(console_size, "console size in kbytes"); #if IS_ENABLED(CONFIG_PSTORE_FTRACE) static long ftrace_size = CONFIG_PSTORE_BLK_FTRACE_SIZE; #else static long ftrace_size = -1; #endif module_param(ftrace_size, long, 0400); MODULE_PARM_DESC(ftrace_size, "ftrace size in kbytes"); static bool best_effort; module_param(best_effort, bool, 0400); MODULE_PARM_DESC(best_effort, "use best effort to write (i.e. do not require storage driver pstore support, default: off)"); /* * blkdev - the block device to use for pstore storage * See Documentation/admin-guide/pstore-blk.rst for details. */ static char blkdev[80] = CONFIG_PSTORE_BLK_BLKDEV; module_param_string(blkdev, blkdev, 80, 0400); MODULE_PARM_DESC(blkdev, "block device for pstore storage"); /* * All globals must only be accessed under the pstore_blk_lock * during the register/unregister functions. */ static DEFINE_MUTEX(pstore_blk_lock); static struct file *psblk_file; static struct pstore_device_info *pstore_device_info; #define check_size(name, alignsize) ({ \ long _##name_ = (name); \ _##name_ = _##name_ <= 0 ? 0 : (_##name_ * 1024); \ if (_##name_ & ((alignsize) - 1)) { \ pr_info(#name " must align to %d\n", \ (alignsize)); \ _##name_ = ALIGN(name, (alignsize)); \ } \ _##name_; \ }) #define verify_size(name, alignsize, enabled) { \ long _##name_; \ if (enabled) \ _##name_ = check_size(name, alignsize); \ else \ _##name_ = 0; \ /* Synchronize module parameters with resuls. */ \ name = _##name_ / 1024; \ dev->zone.name = _##name_; \ } static int __register_pstore_device(struct pstore_device_info *dev) { int ret; lockdep_assert_held(&pstore_blk_lock); if (!dev) { pr_err("NULL device info\n"); return -EINVAL; } if (!dev->zone.total_size) { pr_err("zero sized device\n"); return -EINVAL; } if (!dev->zone.read) { pr_err("no read handler for device\n"); return -EINVAL; } if (!dev->zone.write) { pr_err("no write handler for device\n"); return -EINVAL; } /* someone already registered before */ if (pstore_device_info) return -EBUSY; /* zero means not limit on which backends to attempt to store. */ if (!dev->flags) dev->flags = UINT_MAX; /* Copy in module parameters. */ verify_size(kmsg_size, 4096, dev->flags & PSTORE_FLAGS_DMESG); verify_size(pmsg_size, 4096, dev->flags & PSTORE_FLAGS_PMSG); verify_size(console_size, 4096, dev->flags & PSTORE_FLAGS_CONSOLE); verify_size(ftrace_size, 4096, dev->flags & PSTORE_FLAGS_FTRACE); dev->zone.max_reason = max_reason; /* Initialize required zone ownership details. */ dev->zone.name = KBUILD_MODNAME; dev->zone.owner = THIS_MODULE; ret = register_pstore_zone(&dev->zone); if (ret == 0) pstore_device_info = dev; return ret; } /** * register_pstore_device() - register non-block device to pstore/blk * * @dev: non-block device information * * Return: * * 0 - OK * * Others - something error. */ int register_pstore_device(struct pstore_device_info *dev) { int ret; mutex_lock(&pstore_blk_lock); ret = __register_pstore_device(dev); mutex_unlock(&pstore_blk_lock); return ret; } EXPORT_SYMBOL_GPL(register_pstore_device); static void __unregister_pstore_device(struct pstore_device_info *dev) { lockdep_assert_held(&pstore_blk_lock); if (pstore_device_info && pstore_device_info == dev) { unregister_pstore_zone(&dev->zone); pstore_device_info = NULL; } } /** * unregister_pstore_device() - unregister non-block device from pstore/blk * * @dev: non-block device information */ void unregister_pstore_device(struct pstore_device_info *dev) { mutex_lock(&pstore_blk_lock); __unregister_pstore_device(dev); mutex_unlock(&pstore_blk_lock); } EXPORT_SYMBOL_GPL(unregister_pstore_device); static ssize_t psblk_generic_blk_read(char *buf, size_t bytes, loff_t pos) { return kernel_read(psblk_file, buf, bytes, &pos); } static ssize_t psblk_generic_blk_write(const char *buf, size_t bytes, loff_t pos) { /* Console/Ftrace backend may handle buffer until flush dirty zones */ if (in_interrupt() || irqs_disabled()) return -EBUSY; return kernel_write(psblk_file, buf, bytes, &pos); } /* * This takes its configuration only from the module parameters now. */ static int __register_pstore_blk(struct pstore_device_info *dev, const char *devpath) { int ret = -ENODEV; lockdep_assert_held(&pstore_blk_lock); psblk_file = filp_open(devpath, O_RDWR | O_DSYNC | O_NOATIME | O_EXCL, 0); if (IS_ERR(psblk_file)) { ret = PTR_ERR(psblk_file); pr_err("failed to open '%s': %d!\n", devpath, ret); goto err; } if (!S_ISBLK(file_inode(psblk_file)->i_mode)) { pr_err("'%s' is not block device!\n", devpath); goto err_fput; } dev->zone.total_size = bdev_nr_bytes(I_BDEV(psblk_file->f_mapping->host)); ret = __register_pstore_device(dev); if (ret) goto err_fput; return 0; err_fput: fput(psblk_file); err: psblk_file = NULL; return ret; } /* get information of pstore/blk */ int pstore_blk_get_config(struct pstore_blk_config *info) { strncpy(info->device, blkdev, 80); info->max_reason = max_reason; info->kmsg_size = check_size(kmsg_size, 4096); info->pmsg_size = check_size(pmsg_size, 4096); info->ftrace_size = check_size(ftrace_size, 4096); info->console_size = check_size(console_size, 4096); return 0; } EXPORT_SYMBOL_GPL(pstore_blk_get_config); #ifndef MODULE static const char devname[] = "/dev/pstore-blk"; static __init const char *early_boot_devpath(const char *initial_devname) { /* * During early boot the real root file system hasn't been * mounted yet, and no device nodes are present yet. Use the * same scheme to find the device that we use for mounting * the root file system. */ dev_t dev; if (early_lookup_bdev(initial_devname, &dev)) { pr_err("failed to resolve '%s'!\n", initial_devname); return initial_devname; } init_unlink(devname); init_mknod(devname, S_IFBLK | 0600, new_encode_dev(dev)); return devname; } #else static inline const char *early_boot_devpath(const char *initial_devname) { return initial_devname; } #endif static int __init __best_effort_init(void) { struct pstore_device_info *best_effort_dev; int ret; /* No best-effort mode requested. */ if (!best_effort) return 0; /* Reject an empty blkdev. */ if (!blkdev[0]) { pr_err("blkdev empty with best_effort=Y\n"); return -EINVAL; } best_effort_dev = kzalloc(sizeof(*best_effort_dev), GFP_KERNEL); if (!best_effort_dev) return -ENOMEM; best_effort_dev->zone.read = psblk_generic_blk_read; best_effort_dev->zone.write = psblk_generic_blk_write; ret = __register_pstore_blk(best_effort_dev, early_boot_devpath(blkdev)); if (ret) kfree(best_effort_dev); else pr_info("attached %s (%lu) (no dedicated panic_write!)\n", blkdev, best_effort_dev->zone.total_size); return ret; } static void __exit __best_effort_exit(void) { /* * Currently, the only user of psblk_file is best_effort, so * we can assume that pstore_device_info is associated with it. * Once there are "real" blk devices, there will need to be a * dedicated pstore_blk_info, etc. */ if (psblk_file) { struct pstore_device_info *dev = pstore_device_info; __unregister_pstore_device(dev); kfree(dev); fput(psblk_file); psblk_file = NULL; } } static int __init pstore_blk_init(void) { int ret; mutex_lock(&pstore_blk_lock); ret = __best_effort_init(); mutex_unlock(&pstore_blk_lock); return ret; } late_initcall(pstore_blk_init); static void __exit pstore_blk_exit(void) { mutex_lock(&pstore_blk_lock); __best_effort_exit(); /* If we've been asked to unload, unregister any remaining device. */ __unregister_pstore_device(pstore_device_info); mutex_unlock(&pstore_blk_lock); } module_exit(pstore_blk_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("WeiXiong Liao <[email protected]>"); MODULE_AUTHOR("Kees Cook <[email protected]>"); MODULE_DESCRIPTION("pstore backend for block devices");
linux-master
fs/pstore/blk.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012 Google, Inc. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/device.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/memblock.h> #include <linux/rslib.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/mm.h> #include <asm/page.h> #include "ram_internal.h" /** * struct persistent_ram_buffer - persistent circular RAM buffer * * @sig: Signature to indicate header (PERSISTENT_RAM_SIG xor PRZ-type value) * @start: First valid byte in the buffer. * @size: Number of valid bytes in the buffer. * @data: The contents of the buffer. */ struct persistent_ram_buffer { uint32_t sig; atomic_t start; atomic_t size; uint8_t data[]; }; #define PERSISTENT_RAM_SIG (0x43474244) /* DBGC */ static inline size_t buffer_size(struct persistent_ram_zone *prz) { return atomic_read(&prz->buffer->size); } static inline size_t buffer_start(struct persistent_ram_zone *prz) { return atomic_read(&prz->buffer->start); } /* increase and wrap the start pointer, returning the old value */ static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a) { int old; int new; unsigned long flags = 0; if (!(prz->flags & PRZ_FLAG_NO_LOCK)) raw_spin_lock_irqsave(&prz->buffer_lock, flags); old = atomic_read(&prz->buffer->start); new = old + a; while (unlikely(new >= prz->buffer_size)) new -= prz->buffer_size; atomic_set(&prz->buffer->start, new); if (!(prz->flags & PRZ_FLAG_NO_LOCK)) raw_spin_unlock_irqrestore(&prz->buffer_lock, flags); return old; } /* increase the size counter until it hits the max size */ static void buffer_size_add(struct persistent_ram_zone *prz, size_t a) { size_t old; size_t new; unsigned long flags = 0; if (!(prz->flags & PRZ_FLAG_NO_LOCK)) raw_spin_lock_irqsave(&prz->buffer_lock, flags); old = atomic_read(&prz->buffer->size); if (old == prz->buffer_size) goto exit; new = old + a; if (new > prz->buffer_size) new = prz->buffer_size; atomic_set(&prz->buffer->size, new); exit: if (!(prz->flags & PRZ_FLAG_NO_LOCK)) raw_spin_unlock_irqrestore(&prz->buffer_lock, flags); } static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz, uint8_t *data, size_t len, uint8_t *ecc) { int i; /* Initialize the parity buffer */ memset(prz->ecc_info.par, 0, prz->ecc_info.ecc_size * sizeof(prz->ecc_info.par[0])); encode_rs8(prz->rs_decoder, data, len, prz->ecc_info.par, 0); for (i = 0; i < prz->ecc_info.ecc_size; i++) ecc[i] = prz->ecc_info.par[i]; } static int persistent_ram_decode_rs8(struct persistent_ram_zone *prz, void *data, size_t len, uint8_t *ecc) { int i; for (i = 0; i < prz->ecc_info.ecc_size; i++) prz->ecc_info.par[i] = ecc[i]; return decode_rs8(prz->rs_decoder, data, prz->ecc_info.par, len, NULL, 0, NULL, 0, NULL); } static void notrace persistent_ram_update_ecc(struct persistent_ram_zone *prz, unsigned int start, unsigned int count) { struct persistent_ram_buffer *buffer = prz->buffer; uint8_t *buffer_end = buffer->data + prz->buffer_size; uint8_t *block; uint8_t *par; int ecc_block_size = prz->ecc_info.block_size; int ecc_size = prz->ecc_info.ecc_size; int size = ecc_block_size; if (!ecc_size) return; block = buffer->data + (start & ~(ecc_block_size - 1)); par = prz->par_buffer + (start / ecc_block_size) * ecc_size; do { if (block + ecc_block_size > buffer_end) size = buffer_end - block; persistent_ram_encode_rs8(prz, block, size, par); block += ecc_block_size; par += ecc_size; } while (block < buffer->data + start + count); } static void persistent_ram_update_header_ecc(struct persistent_ram_zone *prz) { struct persistent_ram_buffer *buffer = prz->buffer; if (!prz->ecc_info.ecc_size) return; persistent_ram_encode_rs8(prz, (uint8_t *)buffer, sizeof(*buffer), prz->par_header); } static void persistent_ram_ecc_old(struct persistent_ram_zone *prz) { struct persistent_ram_buffer *buffer = prz->buffer; uint8_t *block; uint8_t *par; if (!prz->ecc_info.ecc_size) return; block = buffer->data; par = prz->par_buffer; while (block < buffer->data + buffer_size(prz)) { int numerr; int size = prz->ecc_info.block_size; if (block + size > buffer->data + prz->buffer_size) size = buffer->data + prz->buffer_size - block; numerr = persistent_ram_decode_rs8(prz, block, size, par); if (numerr > 0) { pr_devel("error in block %p, %d\n", block, numerr); prz->corrected_bytes += numerr; } else if (numerr < 0) { pr_devel("uncorrectable error in block %p\n", block); prz->bad_blocks++; } block += prz->ecc_info.block_size; par += prz->ecc_info.ecc_size; } } static int persistent_ram_init_ecc(struct persistent_ram_zone *prz, struct persistent_ram_ecc_info *ecc_info) { int numerr; struct persistent_ram_buffer *buffer = prz->buffer; int ecc_blocks; size_t ecc_total; if (!ecc_info || !ecc_info->ecc_size) return 0; prz->ecc_info.block_size = ecc_info->block_size ?: 128; prz->ecc_info.ecc_size = ecc_info->ecc_size ?: 16; prz->ecc_info.symsize = ecc_info->symsize ?: 8; prz->ecc_info.poly = ecc_info->poly ?: 0x11d; ecc_blocks = DIV_ROUND_UP(prz->buffer_size - prz->ecc_info.ecc_size, prz->ecc_info.block_size + prz->ecc_info.ecc_size); ecc_total = (ecc_blocks + 1) * prz->ecc_info.ecc_size; if (ecc_total >= prz->buffer_size) { pr_err("%s: invalid ecc_size %u (total %zu, buffer size %zu)\n", __func__, prz->ecc_info.ecc_size, ecc_total, prz->buffer_size); return -EINVAL; } prz->buffer_size -= ecc_total; prz->par_buffer = buffer->data + prz->buffer_size; prz->par_header = prz->par_buffer + ecc_blocks * prz->ecc_info.ecc_size; /* * first consecutive root is 0 * primitive element to generate roots = 1 */ prz->rs_decoder = init_rs(prz->ecc_info.symsize, prz->ecc_info.poly, 0, 1, prz->ecc_info.ecc_size); if (prz->rs_decoder == NULL) { pr_info("init_rs failed\n"); return -EINVAL; } /* allocate workspace instead of using stack VLA */ prz->ecc_info.par = kmalloc_array(prz->ecc_info.ecc_size, sizeof(*prz->ecc_info.par), GFP_KERNEL); if (!prz->ecc_info.par) { pr_err("cannot allocate ECC parity workspace\n"); return -ENOMEM; } prz->corrected_bytes = 0; prz->bad_blocks = 0; numerr = persistent_ram_decode_rs8(prz, buffer, sizeof(*buffer), prz->par_header); if (numerr > 0) { pr_info("error in header, %d\n", numerr); prz->corrected_bytes += numerr; } else if (numerr < 0) { pr_info_ratelimited("uncorrectable error in header\n"); prz->bad_blocks++; } return 0; } ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz, char *str, size_t len) { ssize_t ret; if (!prz->ecc_info.ecc_size) return 0; if (prz->corrected_bytes || prz->bad_blocks) ret = snprintf(str, len, "" "\nECC: %d Corrected bytes, %d unrecoverable blocks\n", prz->corrected_bytes, prz->bad_blocks); else ret = snprintf(str, len, "\nECC: No errors detected\n"); return ret; } static void notrace persistent_ram_update(struct persistent_ram_zone *prz, const void *s, unsigned int start, unsigned int count) { struct persistent_ram_buffer *buffer = prz->buffer; memcpy_toio(buffer->data + start, s, count); persistent_ram_update_ecc(prz, start, count); } static int notrace persistent_ram_update_user(struct persistent_ram_zone *prz, const void __user *s, unsigned int start, unsigned int count) { struct persistent_ram_buffer *buffer = prz->buffer; int ret = unlikely(copy_from_user(buffer->data + start, s, count)) ? -EFAULT : 0; persistent_ram_update_ecc(prz, start, count); return ret; } void persistent_ram_save_old(struct persistent_ram_zone *prz) { struct persistent_ram_buffer *buffer = prz->buffer; size_t size = buffer_size(prz); size_t start = buffer_start(prz); if (!size) return; if (!prz->old_log) { persistent_ram_ecc_old(prz); prz->old_log = kvzalloc(size, GFP_KERNEL); } if (!prz->old_log) { pr_err("failed to allocate buffer\n"); return; } prz->old_log_size = size; memcpy_fromio(prz->old_log, &buffer->data[start], size - start); memcpy_fromio(prz->old_log + size - start, &buffer->data[0], start); } int notrace persistent_ram_write(struct persistent_ram_zone *prz, const void *s, unsigned int count) { int rem; int c = count; size_t start; if (unlikely(c > prz->buffer_size)) { s += c - prz->buffer_size; c = prz->buffer_size; } buffer_size_add(prz, c); start = buffer_start_add(prz, c); rem = prz->buffer_size - start; if (unlikely(rem < c)) { persistent_ram_update(prz, s, start, rem); s += rem; c -= rem; start = 0; } persistent_ram_update(prz, s, start, c); persistent_ram_update_header_ecc(prz); return count; } int notrace persistent_ram_write_user(struct persistent_ram_zone *prz, const void __user *s, unsigned int count) { int rem, ret = 0, c = count; size_t start; if (unlikely(c > prz->buffer_size)) { s += c - prz->buffer_size; c = prz->buffer_size; } buffer_size_add(prz, c); start = buffer_start_add(prz, c); rem = prz->buffer_size - start; if (unlikely(rem < c)) { ret = persistent_ram_update_user(prz, s, start, rem); s += rem; c -= rem; start = 0; } if (likely(!ret)) ret = persistent_ram_update_user(prz, s, start, c); persistent_ram_update_header_ecc(prz); return unlikely(ret) ? ret : count; } size_t persistent_ram_old_size(struct persistent_ram_zone *prz) { return prz->old_log_size; } void *persistent_ram_old(struct persistent_ram_zone *prz) { return prz->old_log; } void persistent_ram_free_old(struct persistent_ram_zone *prz) { kvfree(prz->old_log); prz->old_log = NULL; prz->old_log_size = 0; } void persistent_ram_zap(struct persistent_ram_zone *prz) { atomic_set(&prz->buffer->start, 0); atomic_set(&prz->buffer->size, 0); persistent_ram_update_header_ecc(prz); } #define MEM_TYPE_WCOMBINE 0 #define MEM_TYPE_NONCACHED 1 #define MEM_TYPE_NORMAL 2 static void *persistent_ram_vmap(phys_addr_t start, size_t size, unsigned int memtype) { struct page **pages; phys_addr_t page_start; unsigned int page_count; pgprot_t prot; unsigned int i; void *vaddr; page_start = start - offset_in_page(start); page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE); switch (memtype) { case MEM_TYPE_NORMAL: prot = PAGE_KERNEL; break; case MEM_TYPE_NONCACHED: prot = pgprot_noncached(PAGE_KERNEL); break; case MEM_TYPE_WCOMBINE: prot = pgprot_writecombine(PAGE_KERNEL); break; default: pr_err("invalid mem_type=%d\n", memtype); return NULL; } pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL); if (!pages) { pr_err("%s: Failed to allocate array for %u pages\n", __func__, page_count); return NULL; } for (i = 0; i < page_count; i++) { phys_addr_t addr = page_start + i * PAGE_SIZE; pages[i] = pfn_to_page(addr >> PAGE_SHIFT); } /* * VM_IOREMAP used here to bypass this region during vread() * and kmap_atomic() (i.e. kcore) to avoid __va() failures. */ vaddr = vmap(pages, page_count, VM_MAP | VM_IOREMAP, prot); kfree(pages); /* * Since vmap() uses page granularity, we must add the offset * into the page here, to get the byte granularity address * into the mapping to represent the actual "start" location. */ return vaddr + offset_in_page(start); } static void *persistent_ram_iomap(phys_addr_t start, size_t size, unsigned int memtype, char *label) { void *va; if (!request_mem_region(start, size, label ?: "ramoops")) { pr_err("request mem region (%s 0x%llx@0x%llx) failed\n", label ?: "ramoops", (unsigned long long)size, (unsigned long long)start); return NULL; } if (memtype) va = ioremap(start, size); else va = ioremap_wc(start, size); /* * Since request_mem_region() and ioremap() are byte-granularity * there is no need handle anything special like we do when the * vmap() case in persistent_ram_vmap() above. */ return va; } static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size, struct persistent_ram_zone *prz, int memtype) { prz->paddr = start; prz->size = size; if (pfn_valid(start >> PAGE_SHIFT)) prz->vaddr = persistent_ram_vmap(start, size, memtype); else prz->vaddr = persistent_ram_iomap(start, size, memtype, prz->label); if (!prz->vaddr) { pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__, (unsigned long long)size, (unsigned long long)start); return -ENOMEM; } prz->buffer = prz->vaddr; prz->buffer_size = size - sizeof(struct persistent_ram_buffer); return 0; } static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig, struct persistent_ram_ecc_info *ecc_info) { int ret; bool zap = !!(prz->flags & PRZ_FLAG_ZAP_OLD); ret = persistent_ram_init_ecc(prz, ecc_info); if (ret) { pr_warn("ECC failed %s\n", prz->label); return ret; } sig ^= PERSISTENT_RAM_SIG; if (prz->buffer->sig == sig) { if (buffer_size(prz) == 0 && buffer_start(prz) == 0) { pr_debug("found existing empty buffer\n"); return 0; } if (buffer_size(prz) > prz->buffer_size || buffer_start(prz) > buffer_size(prz)) { pr_info("found existing invalid buffer, size %zu, start %zu\n", buffer_size(prz), buffer_start(prz)); zap = true; } else { pr_debug("found existing buffer, size %zu, start %zu\n", buffer_size(prz), buffer_start(prz)); persistent_ram_save_old(prz); } } else { pr_debug("no valid data in buffer (sig = 0x%08x)\n", prz->buffer->sig); prz->buffer->sig = sig; zap = true; } /* Reset missing, invalid, or single-use memory area. */ if (zap) persistent_ram_zap(prz); return 0; } void persistent_ram_free(struct persistent_ram_zone **_prz) { struct persistent_ram_zone *prz; if (!_prz) return; prz = *_prz; if (!prz) return; if (prz->vaddr) { if (pfn_valid(prz->paddr >> PAGE_SHIFT)) { /* We must vunmap() at page-granularity. */ vunmap(prz->vaddr - offset_in_page(prz->paddr)); } else { iounmap(prz->vaddr); release_mem_region(prz->paddr, prz->size); } prz->vaddr = NULL; } if (prz->rs_decoder) { free_rs(prz->rs_decoder); prz->rs_decoder = NULL; } kfree(prz->ecc_info.par); prz->ecc_info.par = NULL; persistent_ram_free_old(prz); kfree(prz->label); kfree(prz); *_prz = NULL; } struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, u32 sig, struct persistent_ram_ecc_info *ecc_info, unsigned int memtype, u32 flags, char *label) { struct persistent_ram_zone *prz; int ret = -ENOMEM; prz = kzalloc(sizeof(struct persistent_ram_zone), GFP_KERNEL); if (!prz) { pr_err("failed to allocate persistent ram zone\n"); goto err; } /* Initialize general buffer state. */ raw_spin_lock_init(&prz->buffer_lock); prz->flags = flags; prz->label = kstrdup(label, GFP_KERNEL); if (!prz->label) goto err; ret = persistent_ram_buffer_map(start, size, prz, memtype); if (ret) goto err; ret = persistent_ram_post_init(prz, sig, ecc_info); if (ret) goto err; pr_debug("attached %s 0x%zx@0x%llx: %zu header, %zu data, %zu ecc (%d/%d)\n", prz->label, prz->size, (unsigned long long)prz->paddr, sizeof(*prz->buffer), prz->buffer_size, prz->size - sizeof(*prz->buffer) - prz->buffer_size, prz->ecc_info.ecc_size, prz->ecc_info.block_size); return prz; err: persistent_ram_free(&prz); return ERR_PTR(ret); }
linux-master
fs/pstore/ram_core.c
// SPDX-License-Identifier: GPL-2.0+ /* * linux/fs/jbd2/recovery.c * * Written by Stephen C. Tweedie <[email protected]>, 1999 * * Copyright 1999-2000 Red Hat Software --- All Rights Reserved * * Journal recovery routines for the generic filesystem journaling code; * part of the ext2fs journaling system. */ #ifndef __KERNEL__ #include "jfs_user.h" #else #include <linux/time.h> #include <linux/fs.h> #include <linux/jbd2.h> #include <linux/errno.h> #include <linux/crc32.h> #include <linux/blkdev.h> #endif /* * Maintain information about the progress of the recovery job, so that * the different passes can carry information between them. */ struct recovery_info { tid_t start_transaction; tid_t end_transaction; unsigned long head_block; int nr_replays; int nr_revokes; int nr_revoke_hits; }; static int do_one_pass(journal_t *journal, struct recovery_info *info, enum passtype pass); static int scan_revoke_records(journal_t *, struct buffer_head *, tid_t, struct recovery_info *); #ifdef __KERNEL__ /* Release readahead buffers after use */ static void journal_brelse_array(struct buffer_head *b[], int n) { while (--n >= 0) brelse (b[n]); } /* * When reading from the journal, we are going through the block device * layer directly and so there is no readahead being done for us. We * need to implement any readahead ourselves if we want it to happen at * all. Recovery is basically one long sequential read, so make sure we * do the IO in reasonably large chunks. * * This is not so critical that we need to be enormously clever about * the readahead size, though. 128K is a purely arbitrary, good-enough * fixed value. */ #define MAXBUF 8 static int do_readahead(journal_t *journal, unsigned int start) { int err; unsigned int max, nbufs, next; unsigned long long blocknr; struct buffer_head *bh; struct buffer_head * bufs[MAXBUF]; /* Do up to 128K of readahead */ max = start + (128 * 1024 / journal->j_blocksize); if (max > journal->j_total_len) max = journal->j_total_len; /* Do the readahead itself. We'll submit MAXBUF buffer_heads at * a time to the block device IO layer. */ nbufs = 0; for (next = start; next < max; next++) { err = jbd2_journal_bmap(journal, next, &blocknr); if (err) { printk(KERN_ERR "JBD2: bad block at offset %u\n", next); goto failed; } bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize); if (!bh) { err = -ENOMEM; goto failed; } if (!buffer_uptodate(bh) && !buffer_locked(bh)) { bufs[nbufs++] = bh; if (nbufs == MAXBUF) { bh_readahead_batch(nbufs, bufs, 0); journal_brelse_array(bufs, nbufs); nbufs = 0; } } else brelse(bh); } if (nbufs) bh_readahead_batch(nbufs, bufs, 0); err = 0; failed: if (nbufs) journal_brelse_array(bufs, nbufs); return err; } #endif /* __KERNEL__ */ /* * Read a block from the journal */ static int jread(struct buffer_head **bhp, journal_t *journal, unsigned int offset) { int err; unsigned long long blocknr; struct buffer_head *bh; *bhp = NULL; if (offset >= journal->j_total_len) { printk(KERN_ERR "JBD2: corrupted journal superblock\n"); return -EFSCORRUPTED; } err = jbd2_journal_bmap(journal, offset, &blocknr); if (err) { printk(KERN_ERR "JBD2: bad block at offset %u\n", offset); return err; } bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize); if (!bh) return -ENOMEM; if (!buffer_uptodate(bh)) { /* * If this is a brand new buffer, start readahead. * Otherwise, we assume we are already reading it. */ bool need_readahead = !buffer_req(bh); bh_read_nowait(bh, 0); if (need_readahead) do_readahead(journal, offset); wait_on_buffer(bh); } if (!buffer_uptodate(bh)) { printk(KERN_ERR "JBD2: Failed to read block at offset %u\n", offset); brelse(bh); return -EIO; } *bhp = bh; return 0; } static int jbd2_descriptor_block_csum_verify(journal_t *j, void *buf) { struct jbd2_journal_block_tail *tail; __be32 provided; __u32 calculated; if (!jbd2_journal_has_csum_v2or3(j)) return 1; tail = (struct jbd2_journal_block_tail *)((char *)buf + j->j_blocksize - sizeof(struct jbd2_journal_block_tail)); provided = tail->t_checksum; tail->t_checksum = 0; calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize); tail->t_checksum = provided; return provided == cpu_to_be32(calculated); } /* * Count the number of in-use tags in a journal descriptor block. */ static int count_tags(journal_t *journal, struct buffer_head *bh) { char * tagp; journal_block_tag_t tag; int nr = 0, size = journal->j_blocksize; int tag_bytes = journal_tag_bytes(journal); if (jbd2_journal_has_csum_v2or3(journal)) size -= sizeof(struct jbd2_journal_block_tail); tagp = &bh->b_data[sizeof(journal_header_t)]; while ((tagp - bh->b_data + tag_bytes) <= size) { memcpy(&tag, tagp, sizeof(tag)); nr++; tagp += tag_bytes; if (!(tag.t_flags & cpu_to_be16(JBD2_FLAG_SAME_UUID))) tagp += 16; if (tag.t_flags & cpu_to_be16(JBD2_FLAG_LAST_TAG)) break; } return nr; } /* Make sure we wrap around the log correctly! */ #define wrap(journal, var) \ do { \ if (var >= (journal)->j_last) \ var -= ((journal)->j_last - (journal)->j_first); \ } while (0) static int fc_do_one_pass(journal_t *journal, struct recovery_info *info, enum passtype pass) { unsigned int expected_commit_id = info->end_transaction; unsigned long next_fc_block; struct buffer_head *bh; int err = 0; next_fc_block = journal->j_fc_first; if (!journal->j_fc_replay_callback) return 0; while (next_fc_block <= journal->j_fc_last) { jbd2_debug(3, "Fast commit replay: next block %ld\n", next_fc_block); err = jread(&bh, journal, next_fc_block); if (err) { jbd2_debug(3, "Fast commit replay: read error\n"); break; } err = journal->j_fc_replay_callback(journal, bh, pass, next_fc_block - journal->j_fc_first, expected_commit_id); brelse(bh); next_fc_block++; if (err < 0 || err == JBD2_FC_REPLAY_STOP) break; err = 0; } if (err) jbd2_debug(3, "Fast commit replay failed, err = %d\n", err); return err; } /** * jbd2_journal_recover - recovers a on-disk journal * @journal: the journal to recover * * The primary function for recovering the log contents when mounting a * journaled device. * * Recovery is done in three passes. In the first pass, we look for the * end of the log. In the second, we assemble the list of revoke * blocks. In the third and final pass, we replay any un-revoked blocks * in the log. */ int jbd2_journal_recover(journal_t *journal) { int err, err2; journal_superblock_t * sb; struct recovery_info info; memset(&info, 0, sizeof(info)); sb = journal->j_superblock; /* * The journal superblock's s_start field (the current log head) * is always zero if, and only if, the journal was cleanly * unmounted. */ if (!sb->s_start) { jbd2_debug(1, "No recovery required, last transaction %d, head block %u\n", be32_to_cpu(sb->s_sequence), be32_to_cpu(sb->s_head)); journal->j_transaction_sequence = be32_to_cpu(sb->s_sequence) + 1; journal->j_head = be32_to_cpu(sb->s_head); return 0; } err = do_one_pass(journal, &info, PASS_SCAN); if (!err) err = do_one_pass(journal, &info, PASS_REVOKE); if (!err) err = do_one_pass(journal, &info, PASS_REPLAY); jbd2_debug(1, "JBD2: recovery, exit status %d, " "recovered transactions %u to %u\n", err, info.start_transaction, info.end_transaction); jbd2_debug(1, "JBD2: Replayed %d and revoked %d/%d blocks\n", info.nr_replays, info.nr_revoke_hits, info.nr_revokes); /* Restart the log at the next transaction ID, thus invalidating * any existing commit records in the log. */ journal->j_transaction_sequence = ++info.end_transaction; journal->j_head = info.head_block; jbd2_debug(1, "JBD2: last transaction %d, head block %lu\n", journal->j_transaction_sequence, journal->j_head); jbd2_journal_clear_revoke(journal); err2 = sync_blockdev(journal->j_fs_dev); if (!err) err = err2; /* Make sure all replayed data is on permanent storage */ if (journal->j_flags & JBD2_BARRIER) { err2 = blkdev_issue_flush(journal->j_fs_dev); if (!err) err = err2; } return err; } /** * jbd2_journal_skip_recovery - Start journal and wipe exiting records * @journal: journal to startup * * Locate any valid recovery information from the journal and set up the * journal structures in memory to ignore it (presumably because the * caller has evidence that it is out of date). * This function doesn't appear to be exported.. * * We perform one pass over the journal to allow us to tell the user how * much recovery information is being erased, and to let us initialise * the journal transaction sequence numbers to the next unused ID. */ int jbd2_journal_skip_recovery(journal_t *journal) { int err; struct recovery_info info; memset (&info, 0, sizeof(info)); err = do_one_pass(journal, &info, PASS_SCAN); if (err) { printk(KERN_ERR "JBD2: error %d scanning journal\n", err); ++journal->j_transaction_sequence; journal->j_head = journal->j_first; } else { #ifdef CONFIG_JBD2_DEBUG int dropped = info.end_transaction - be32_to_cpu(journal->j_superblock->s_sequence); jbd2_debug(1, "JBD2: ignoring %d transaction%s from the journal.\n", dropped, (dropped == 1) ? "" : "s"); #endif journal->j_transaction_sequence = ++info.end_transaction; journal->j_head = info.head_block; } journal->j_tail = 0; return err; } static inline unsigned long long read_tag_block(journal_t *journal, journal_block_tag_t *tag) { unsigned long long block = be32_to_cpu(tag->t_blocknr); if (jbd2_has_feature_64bit(journal)) block |= (u64)be32_to_cpu(tag->t_blocknr_high) << 32; return block; } /* * calc_chksums calculates the checksums for the blocks described in the * descriptor block. */ static int calc_chksums(journal_t *journal, struct buffer_head *bh, unsigned long *next_log_block, __u32 *crc32_sum) { int i, num_blks, err; unsigned long io_block; struct buffer_head *obh; num_blks = count_tags(journal, bh); /* Calculate checksum of the descriptor block. */ *crc32_sum = crc32_be(*crc32_sum, (void *)bh->b_data, bh->b_size); for (i = 0; i < num_blks; i++) { io_block = (*next_log_block)++; wrap(journal, *next_log_block); err = jread(&obh, journal, io_block); if (err) { printk(KERN_ERR "JBD2: IO error %d recovering block " "%lu in log\n", err, io_block); return 1; } else { *crc32_sum = crc32_be(*crc32_sum, (void *)obh->b_data, obh->b_size); } put_bh(obh); } return 0; } static int jbd2_commit_block_csum_verify(journal_t *j, void *buf) { struct commit_header *h; __be32 provided; __u32 calculated; if (!jbd2_journal_has_csum_v2or3(j)) return 1; h = buf; provided = h->h_chksum[0]; h->h_chksum[0] = 0; calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize); h->h_chksum[0] = provided; return provided == cpu_to_be32(calculated); } static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag, journal_block_tag3_t *tag3, void *buf, __u32 sequence) { __u32 csum32; __be32 seq; if (!jbd2_journal_has_csum_v2or3(j)) return 1; seq = cpu_to_be32(sequence); csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq)); csum32 = jbd2_chksum(j, csum32, buf, j->j_blocksize); if (jbd2_has_feature_csum3(j)) return tag3->t_checksum == cpu_to_be32(csum32); else return tag->t_checksum == cpu_to_be16(csum32); } static int do_one_pass(journal_t *journal, struct recovery_info *info, enum passtype pass) { unsigned int first_commit_ID, next_commit_ID; unsigned long next_log_block, head_block; int err, success = 0; journal_superblock_t * sb; journal_header_t * tmp; struct buffer_head * bh; unsigned int sequence; int blocktype; int tag_bytes = journal_tag_bytes(journal); __u32 crc32_sum = ~0; /* Transactional Checksums */ int descr_csum_size = 0; int block_error = 0; bool need_check_commit_time = false; __u64 last_trans_commit_time = 0, commit_time; /* * First thing is to establish what we expect to find in the log * (in terms of transaction IDs), and where (in terms of log * block offsets): query the superblock. */ sb = journal->j_superblock; next_commit_ID = be32_to_cpu(sb->s_sequence); next_log_block = be32_to_cpu(sb->s_start); head_block = next_log_block; first_commit_ID = next_commit_ID; if (pass == PASS_SCAN) info->start_transaction = first_commit_ID; jbd2_debug(1, "Starting recovery pass %d\n", pass); /* * Now we walk through the log, transaction by transaction, * making sure that each transaction has a commit block in the * expected place. Each complete transaction gets replayed back * into the main filesystem. */ while (1) { int flags; char * tagp; journal_block_tag_t tag; struct buffer_head * obh; struct buffer_head * nbh; cond_resched(); /* If we already know where to stop the log traversal, * check right now that we haven't gone past the end of * the log. */ if (pass != PASS_SCAN) if (tid_geq(next_commit_ID, info->end_transaction)) break; jbd2_debug(2, "Scanning for sequence ID %u at %lu/%lu\n", next_commit_ID, next_log_block, journal->j_last); /* Skip over each chunk of the transaction looking * either the next descriptor block or the final commit * record. */ jbd2_debug(3, "JBD2: checking block %ld\n", next_log_block); err = jread(&bh, journal, next_log_block); if (err) goto failed; next_log_block++; wrap(journal, next_log_block); /* What kind of buffer is it? * * If it is a descriptor block, check that it has the * expected sequence number. Otherwise, we're all done * here. */ tmp = (journal_header_t *)bh->b_data; if (tmp->h_magic != cpu_to_be32(JBD2_MAGIC_NUMBER)) { brelse(bh); break; } blocktype = be32_to_cpu(tmp->h_blocktype); sequence = be32_to_cpu(tmp->h_sequence); jbd2_debug(3, "Found magic %d, sequence %d\n", blocktype, sequence); if (sequence != next_commit_ID) { brelse(bh); break; } /* OK, we have a valid descriptor block which matches * all of the sequence number checks. What are we going * to do with it? That depends on the pass... */ switch(blocktype) { case JBD2_DESCRIPTOR_BLOCK: /* Verify checksum first */ if (jbd2_journal_has_csum_v2or3(journal)) descr_csum_size = sizeof(struct jbd2_journal_block_tail); if (descr_csum_size > 0 && !jbd2_descriptor_block_csum_verify(journal, bh->b_data)) { /* * PASS_SCAN can see stale blocks due to lazy * journal init. Don't error out on those yet. */ if (pass != PASS_SCAN) { pr_err("JBD2: Invalid checksum recovering block %lu in log\n", next_log_block); err = -EFSBADCRC; brelse(bh); goto failed; } need_check_commit_time = true; jbd2_debug(1, "invalid descriptor block found in %lu\n", next_log_block); } /* If it is a valid descriptor block, replay it * in pass REPLAY; if journal_checksums enabled, then * calculate checksums in PASS_SCAN, otherwise, * just skip over the blocks it describes. */ if (pass != PASS_REPLAY) { if (pass == PASS_SCAN && jbd2_has_feature_checksum(journal) && !need_check_commit_time && !info->end_transaction) { if (calc_chksums(journal, bh, &next_log_block, &crc32_sum)) { put_bh(bh); break; } put_bh(bh); continue; } next_log_block += count_tags(journal, bh); wrap(journal, next_log_block); put_bh(bh); continue; } /* A descriptor block: we can now write all of * the data blocks. Yay, useful work is finally * getting done here! */ tagp = &bh->b_data[sizeof(journal_header_t)]; while ((tagp - bh->b_data + tag_bytes) <= journal->j_blocksize - descr_csum_size) { unsigned long io_block; memcpy(&tag, tagp, sizeof(tag)); flags = be16_to_cpu(tag.t_flags); io_block = next_log_block++; wrap(journal, next_log_block); err = jread(&obh, journal, io_block); if (err) { /* Recover what we can, but * report failure at the end. */ success = err; printk(KERN_ERR "JBD2: IO error %d recovering " "block %ld in log\n", err, io_block); } else { unsigned long long blocknr; J_ASSERT(obh != NULL); blocknr = read_tag_block(journal, &tag); /* If the block has been * revoked, then we're all done * here. */ if (jbd2_journal_test_revoke (journal, blocknr, next_commit_ID)) { brelse(obh); ++info->nr_revoke_hits; goto skip_write; } /* Look for block corruption */ if (!jbd2_block_tag_csum_verify( journal, &tag, (journal_block_tag3_t *)tagp, obh->b_data, be32_to_cpu(tmp->h_sequence))) { brelse(obh); success = -EFSBADCRC; printk(KERN_ERR "JBD2: Invalid " "checksum recovering " "data block %llu in " "log\n", blocknr); block_error = 1; goto skip_write; } /* Find a buffer for the new * data being restored */ nbh = __getblk(journal->j_fs_dev, blocknr, journal->j_blocksize); if (nbh == NULL) { printk(KERN_ERR "JBD2: Out of memory " "during recovery.\n"); err = -ENOMEM; brelse(bh); brelse(obh); goto failed; } lock_buffer(nbh); memcpy(nbh->b_data, obh->b_data, journal->j_blocksize); if (flags & JBD2_FLAG_ESCAPE) { *((__be32 *)nbh->b_data) = cpu_to_be32(JBD2_MAGIC_NUMBER); } BUFFER_TRACE(nbh, "marking dirty"); set_buffer_uptodate(nbh); mark_buffer_dirty(nbh); BUFFER_TRACE(nbh, "marking uptodate"); ++info->nr_replays; unlock_buffer(nbh); brelse(obh); brelse(nbh); } skip_write: tagp += tag_bytes; if (!(flags & JBD2_FLAG_SAME_UUID)) tagp += 16; if (flags & JBD2_FLAG_LAST_TAG) break; } brelse(bh); continue; case JBD2_COMMIT_BLOCK: /* How to differentiate between interrupted commit * and journal corruption ? * * {nth transaction} * Checksum Verification Failed * | * ____________________ * | | * async_commit sync_commit * | | * | GO TO NEXT "Journal Corruption" * | TRANSACTION * | * {(n+1)th transanction} * | * _______|______________ * | | * Commit block found Commit block not found * | | * "Journal Corruption" | * _____________|_________ * | | * nth trans corrupt OR nth trans * and (n+1)th interrupted interrupted * before commit block * could reach the disk. * (Cannot find the difference in above * mentioned conditions. Hence assume * "Interrupted Commit".) */ commit_time = be64_to_cpu( ((struct commit_header *)bh->b_data)->h_commit_sec); /* * If need_check_commit_time is set, it means we are in * PASS_SCAN and csum verify failed before. If * commit_time is increasing, it's the same journal, * otherwise it is stale journal block, just end this * recovery. */ if (need_check_commit_time) { if (commit_time >= last_trans_commit_time) { pr_err("JBD2: Invalid checksum found in transaction %u\n", next_commit_ID); err = -EFSBADCRC; brelse(bh); goto failed; } ignore_crc_mismatch: /* * It likely does not belong to same journal, * just end this recovery with success. */ jbd2_debug(1, "JBD2: Invalid checksum ignored in transaction %u, likely stale data\n", next_commit_ID); brelse(bh); goto done; } /* * Found an expected commit block: if checksums * are present, verify them in PASS_SCAN; else not * much to do other than move on to the next sequence * number. */ if (pass == PASS_SCAN && jbd2_has_feature_checksum(journal)) { struct commit_header *cbh = (struct commit_header *)bh->b_data; unsigned found_chksum = be32_to_cpu(cbh->h_chksum[0]); if (info->end_transaction) { journal->j_failed_commit = info->end_transaction; brelse(bh); break; } /* Neither checksum match nor unused? */ if (!((crc32_sum == found_chksum && cbh->h_chksum_type == JBD2_CRC32_CHKSUM && cbh->h_chksum_size == JBD2_CRC32_CHKSUM_SIZE) || (cbh->h_chksum_type == 0 && cbh->h_chksum_size == 0 && found_chksum == 0))) goto chksum_error; crc32_sum = ~0; } if (pass == PASS_SCAN && !jbd2_commit_block_csum_verify(journal, bh->b_data)) { chksum_error: if (commit_time < last_trans_commit_time) goto ignore_crc_mismatch; info->end_transaction = next_commit_ID; info->head_block = head_block; if (!jbd2_has_feature_async_commit(journal)) { journal->j_failed_commit = next_commit_ID; brelse(bh); break; } } if (pass == PASS_SCAN) { last_trans_commit_time = commit_time; head_block = next_log_block; } brelse(bh); next_commit_ID++; continue; case JBD2_REVOKE_BLOCK: /* * Check revoke block crc in pass_scan, if csum verify * failed, check commit block time later. */ if (pass == PASS_SCAN && !jbd2_descriptor_block_csum_verify(journal, bh->b_data)) { jbd2_debug(1, "JBD2: invalid revoke block found in %lu\n", next_log_block); need_check_commit_time = true; } /* If we aren't in the REVOKE pass, then we can * just skip over this block. */ if (pass != PASS_REVOKE) { brelse(bh); continue; } err = scan_revoke_records(journal, bh, next_commit_ID, info); brelse(bh); if (err) goto failed; continue; default: jbd2_debug(3, "Unrecognised magic %d, end of scan.\n", blocktype); brelse(bh); goto done; } } done: /* * We broke out of the log scan loop: either we came to the * known end of the log or we found an unexpected block in the * log. If the latter happened, then we know that the "current" * transaction marks the end of the valid log. */ if (pass == PASS_SCAN) { if (!info->end_transaction) info->end_transaction = next_commit_ID; if (!info->head_block) info->head_block = head_block; } else { /* It's really bad news if different passes end up at * different places (but possible due to IO errors). */ if (info->end_transaction != next_commit_ID) { printk(KERN_ERR "JBD2: recovery pass %d ended at " "transaction %u, expected %u\n", pass, next_commit_ID, info->end_transaction); if (!success) success = -EIO; } } if (jbd2_has_feature_fast_commit(journal) && pass != PASS_REVOKE) { err = fc_do_one_pass(journal, info, pass); if (err) success = err; } if (block_error && success == 0) success = -EIO; return success; failed: return err; } /* Scan a revoke record, marking all blocks mentioned as revoked. */ static int scan_revoke_records(journal_t *journal, struct buffer_head *bh, tid_t sequence, struct recovery_info *info) { jbd2_journal_revoke_header_t *header; int offset, max; unsigned csum_size = 0; __u32 rcount; int record_len = 4; header = (jbd2_journal_revoke_header_t *) bh->b_data; offset = sizeof(jbd2_journal_revoke_header_t); rcount = be32_to_cpu(header->r_count); if (jbd2_journal_has_csum_v2or3(journal)) csum_size = sizeof(struct jbd2_journal_block_tail); if (rcount > journal->j_blocksize - csum_size) return -EINVAL; max = rcount; if (jbd2_has_feature_64bit(journal)) record_len = 8; while (offset + record_len <= max) { unsigned long long blocknr; int err; if (record_len == 4) blocknr = be32_to_cpu(* ((__be32 *) (bh->b_data+offset))); else blocknr = be64_to_cpu(* ((__be64 *) (bh->b_data+offset))); offset += record_len; err = jbd2_journal_set_revoke(journal, blocknr, sequence); if (err) return err; ++info->nr_revokes; } return 0; }
linux-master
fs/jbd2/recovery.c
// SPDX-License-Identifier: GPL-2.0+ /* * linux/fs/jbd2/revoke.c * * Written by Stephen C. Tweedie <[email protected]>, 2000 * * Copyright 2000 Red Hat corp --- All Rights Reserved * * Journal revoke routines for the generic filesystem journaling code; * part of the ext2fs journaling system. * * Revoke is the mechanism used to prevent old log records for deleted * metadata from being replayed on top of newer data using the same * blocks. The revoke mechanism is used in two separate places: * * + Commit: during commit we write the entire list of the current * transaction's revoked blocks to the journal * * + Recovery: during recovery we record the transaction ID of all * revoked blocks. If there are multiple revoke records in the log * for a single block, only the last one counts, and if there is a log * entry for a block beyond the last revoke, then that log entry still * gets replayed. * * We can get interactions between revokes and new log data within a * single transaction: * * Block is revoked and then journaled: * The desired end result is the journaling of the new block, so we * cancel the revoke before the transaction commits. * * Block is journaled and then revoked: * The revoke must take precedence over the write of the block, so we * need either to cancel the journal entry or to write the revoke * later in the log than the log block. In this case, we choose the * latter: journaling a block cancels any revoke record for that block * in the current transaction, so any revoke for that block in the * transaction must have happened after the block was journaled and so * the revoke must take precedence. * * Block is revoked and then written as data: * The data write is allowed to succeed, but the revoke is _not_ * cancelled. We still need to prevent old log records from * overwriting the new data. We don't even need to clear the revoke * bit here. * * We cache revoke status of a buffer in the current transaction in b_states * bits. As the name says, revokevalid flag indicates that the cached revoke * status of a buffer is valid and we can rely on the cached status. * * Revoke information on buffers is a tri-state value: * * RevokeValid clear: no cached revoke status, need to look it up * RevokeValid set, Revoked clear: * buffer has not been revoked, and cancel_revoke * need do nothing. * RevokeValid set, Revoked set: * buffer has been revoked. * * Locking rules: * We keep two hash tables of revoke records. One hashtable belongs to the * running transaction (is pointed to by journal->j_revoke), the other one * belongs to the committing transaction. Accesses to the second hash table * happen only from the kjournald and no other thread touches this table. Also * journal_switch_revoke_table() which switches which hashtable belongs to the * running and which to the committing transaction is called only from * kjournald. Therefore we need no locks when accessing the hashtable belonging * to the committing transaction. * * All users operating on the hash table belonging to the running transaction * have a handle to the transaction. Therefore they are safe from kjournald * switching hash tables under them. For operations on the lists of entries in * the hash table j_revoke_lock is used. * * Finally, also replay code uses the hash tables but at this moment no one else * can touch them (filesystem isn't mounted yet) and hence no locking is * needed. */ #ifndef __KERNEL__ #include "jfs_user.h" #else #include <linux/time.h> #include <linux/fs.h> #include <linux/jbd2.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/init.h> #include <linux/bio.h> #include <linux/log2.h> #include <linux/hash.h> #endif static struct kmem_cache *jbd2_revoke_record_cache; static struct kmem_cache *jbd2_revoke_table_cache; /* Each revoke record represents one single revoked block. During journal replay, this involves recording the transaction ID of the last transaction to revoke this block. */ struct jbd2_revoke_record_s { struct list_head hash; tid_t sequence; /* Used for recovery only */ unsigned long long blocknr; }; /* The revoke table is just a simple hash table of revoke records. */ struct jbd2_revoke_table_s { /* It is conceivable that we might want a larger hash table * for recovery. Must be a power of two. */ int hash_size; int hash_shift; struct list_head *hash_table; }; #ifdef __KERNEL__ static void write_one_revoke_record(transaction_t *, struct list_head *, struct buffer_head **, int *, struct jbd2_revoke_record_s *); static void flush_descriptor(journal_t *, struct buffer_head *, int); #endif /* Utility functions to maintain the revoke table */ static inline int hash(journal_t *journal, unsigned long long block) { return hash_64(block, journal->j_revoke->hash_shift); } static int insert_revoke_hash(journal_t *journal, unsigned long long blocknr, tid_t seq) { struct list_head *hash_list; struct jbd2_revoke_record_s *record; gfp_t gfp_mask = GFP_NOFS; if (journal_oom_retry) gfp_mask |= __GFP_NOFAIL; record = kmem_cache_alloc(jbd2_revoke_record_cache, gfp_mask); if (!record) return -ENOMEM; record->sequence = seq; record->blocknr = blocknr; hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)]; spin_lock(&journal->j_revoke_lock); list_add(&record->hash, hash_list); spin_unlock(&journal->j_revoke_lock); return 0; } /* Find a revoke record in the journal's hash table. */ static struct jbd2_revoke_record_s *find_revoke_record(journal_t *journal, unsigned long long blocknr) { struct list_head *hash_list; struct jbd2_revoke_record_s *record; hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)]; spin_lock(&journal->j_revoke_lock); record = (struct jbd2_revoke_record_s *) hash_list->next; while (&(record->hash) != hash_list) { if (record->blocknr == blocknr) { spin_unlock(&journal->j_revoke_lock); return record; } record = (struct jbd2_revoke_record_s *) record->hash.next; } spin_unlock(&journal->j_revoke_lock); return NULL; } void jbd2_journal_destroy_revoke_record_cache(void) { kmem_cache_destroy(jbd2_revoke_record_cache); jbd2_revoke_record_cache = NULL; } void jbd2_journal_destroy_revoke_table_cache(void) { kmem_cache_destroy(jbd2_revoke_table_cache); jbd2_revoke_table_cache = NULL; } int __init jbd2_journal_init_revoke_record_cache(void) { J_ASSERT(!jbd2_revoke_record_cache); jbd2_revoke_record_cache = KMEM_CACHE(jbd2_revoke_record_s, SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY); if (!jbd2_revoke_record_cache) { pr_emerg("JBD2: failed to create revoke_record cache\n"); return -ENOMEM; } return 0; } int __init jbd2_journal_init_revoke_table_cache(void) { J_ASSERT(!jbd2_revoke_table_cache); jbd2_revoke_table_cache = KMEM_CACHE(jbd2_revoke_table_s, SLAB_TEMPORARY); if (!jbd2_revoke_table_cache) { pr_emerg("JBD2: failed to create revoke_table cache\n"); return -ENOMEM; } return 0; } static struct jbd2_revoke_table_s *jbd2_journal_init_revoke_table(int hash_size) { int shift = 0; int tmp = hash_size; struct jbd2_revoke_table_s *table; table = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL); if (!table) goto out; while((tmp >>= 1UL) != 0UL) shift++; table->hash_size = hash_size; table->hash_shift = shift; table->hash_table = kmalloc_array(hash_size, sizeof(struct list_head), GFP_KERNEL); if (!table->hash_table) { kmem_cache_free(jbd2_revoke_table_cache, table); table = NULL; goto out; } for (tmp = 0; tmp < hash_size; tmp++) INIT_LIST_HEAD(&table->hash_table[tmp]); out: return table; } static void jbd2_journal_destroy_revoke_table(struct jbd2_revoke_table_s *table) { int i; struct list_head *hash_list; for (i = 0; i < table->hash_size; i++) { hash_list = &table->hash_table[i]; J_ASSERT(list_empty(hash_list)); } kfree(table->hash_table); kmem_cache_free(jbd2_revoke_table_cache, table); } /* Initialise the revoke table for a given journal to a given size. */ int jbd2_journal_init_revoke(journal_t *journal, int hash_size) { J_ASSERT(journal->j_revoke_table[0] == NULL); J_ASSERT(is_power_of_2(hash_size)); journal->j_revoke_table[0] = jbd2_journal_init_revoke_table(hash_size); if (!journal->j_revoke_table[0]) goto fail0; journal->j_revoke_table[1] = jbd2_journal_init_revoke_table(hash_size); if (!journal->j_revoke_table[1]) goto fail1; journal->j_revoke = journal->j_revoke_table[1]; spin_lock_init(&journal->j_revoke_lock); return 0; fail1: jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]); journal->j_revoke_table[0] = NULL; fail0: return -ENOMEM; } /* Destroy a journal's revoke table. The table must already be empty! */ void jbd2_journal_destroy_revoke(journal_t *journal) { journal->j_revoke = NULL; if (journal->j_revoke_table[0]) jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]); if (journal->j_revoke_table[1]) jbd2_journal_destroy_revoke_table(journal->j_revoke_table[1]); } #ifdef __KERNEL__ /* * jbd2_journal_revoke: revoke a given buffer_head from the journal. This * prevents the block from being replayed during recovery if we take a * crash after this current transaction commits. Any subsequent * metadata writes of the buffer in this transaction cancel the * revoke. * * Note that this call may block --- it is up to the caller to make * sure that there are no further calls to journal_write_metadata * before the revoke is complete. In ext3, this implies calling the * revoke before clearing the block bitmap when we are deleting * metadata. * * Revoke performs a jbd2_journal_forget on any buffer_head passed in as a * parameter, but does _not_ forget the buffer_head if the bh was only * found implicitly. * * bh_in may not be a journalled buffer - it may have come off * the hash tables without an attached journal_head. * * If bh_in is non-zero, jbd2_journal_revoke() will decrement its b_count * by one. */ int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr, struct buffer_head *bh_in) { struct buffer_head *bh = NULL; journal_t *journal; struct block_device *bdev; int err; might_sleep(); if (bh_in) BUFFER_TRACE(bh_in, "enter"); journal = handle->h_transaction->t_journal; if (!jbd2_journal_set_features(journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)){ J_ASSERT (!"Cannot set revoke feature!"); return -EINVAL; } bdev = journal->j_fs_dev; bh = bh_in; if (!bh) { bh = __find_get_block(bdev, blocknr, journal->j_blocksize); if (bh) BUFFER_TRACE(bh, "found on hash"); } #ifdef JBD2_EXPENSIVE_CHECKING else { struct buffer_head *bh2; /* If there is a different buffer_head lying around in * memory anywhere... */ bh2 = __find_get_block(bdev, blocknr, journal->j_blocksize); if (bh2) { /* ... and it has RevokeValid status... */ if (bh2 != bh && buffer_revokevalid(bh2)) /* ...then it better be revoked too, * since it's illegal to create a revoke * record against a buffer_head which is * not marked revoked --- that would * risk missing a subsequent revoke * cancel. */ J_ASSERT_BH(bh2, buffer_revoked(bh2)); put_bh(bh2); } } #endif if (WARN_ON_ONCE(handle->h_revoke_credits <= 0)) { if (!bh_in) brelse(bh); return -EIO; } /* We really ought not ever to revoke twice in a row without first having the revoke cancelled: it's illegal to free a block twice without allocating it in between! */ if (bh) { if (!J_EXPECT_BH(bh, !buffer_revoked(bh), "inconsistent data on disk")) { if (!bh_in) brelse(bh); return -EIO; } set_buffer_revoked(bh); set_buffer_revokevalid(bh); if (bh_in) { BUFFER_TRACE(bh_in, "call jbd2_journal_forget"); jbd2_journal_forget(handle, bh_in); } else { BUFFER_TRACE(bh, "call brelse"); __brelse(bh); } } handle->h_revoke_credits--; jbd2_debug(2, "insert revoke for block %llu, bh_in=%p\n",blocknr, bh_in); err = insert_revoke_hash(journal, blocknr, handle->h_transaction->t_tid); BUFFER_TRACE(bh_in, "exit"); return err; } /* * Cancel an outstanding revoke. For use only internally by the * journaling code (called from jbd2_journal_get_write_access). * * We trust buffer_revoked() on the buffer if the buffer is already * being journaled: if there is no revoke pending on the buffer, then we * don't do anything here. * * This would break if it were possible for a buffer to be revoked and * discarded, and then reallocated within the same transaction. In such * a case we would have lost the revoked bit, but when we arrived here * the second time we would still have a pending revoke to cancel. So, * do not trust the Revoked bit on buffers unless RevokeValid is also * set. */ int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh) { struct jbd2_revoke_record_s *record; journal_t *journal = handle->h_transaction->t_journal; int need_cancel; int did_revoke = 0; /* akpm: debug */ struct buffer_head *bh = jh2bh(jh); jbd2_debug(4, "journal_head %p, cancelling revoke\n", jh); /* Is the existing Revoke bit valid? If so, we trust it, and * only perform the full cancel if the revoke bit is set. If * not, we can't trust the revoke bit, and we need to do the * full search for a revoke record. */ if (test_set_buffer_revokevalid(bh)) { need_cancel = test_clear_buffer_revoked(bh); } else { need_cancel = 1; clear_buffer_revoked(bh); } if (need_cancel) { record = find_revoke_record(journal, bh->b_blocknr); if (record) { jbd2_debug(4, "cancelled existing revoke on " "blocknr %llu\n", (unsigned long long)bh->b_blocknr); spin_lock(&journal->j_revoke_lock); list_del(&record->hash); spin_unlock(&journal->j_revoke_lock); kmem_cache_free(jbd2_revoke_record_cache, record); did_revoke = 1; } } #ifdef JBD2_EXPENSIVE_CHECKING /* There better not be one left behind by now! */ record = find_revoke_record(journal, bh->b_blocknr); J_ASSERT_JH(jh, record == NULL); #endif /* Finally, have we just cleared revoke on an unhashed * buffer_head? If so, we'd better make sure we clear the * revoked status on any hashed alias too, otherwise the revoke * state machine will get very upset later on. */ if (need_cancel) { struct buffer_head *bh2; bh2 = __find_get_block(bh->b_bdev, bh->b_blocknr, bh->b_size); if (bh2) { if (bh2 != bh) clear_buffer_revoked(bh2); __brelse(bh2); } } return did_revoke; } /* * journal_clear_revoked_flag clears revoked flag of buffers in * revoke table to reflect there is no revoked buffers in the next * transaction which is going to be started. */ void jbd2_clear_buffer_revoked_flags(journal_t *journal) { struct jbd2_revoke_table_s *revoke = journal->j_revoke; int i = 0; for (i = 0; i < revoke->hash_size; i++) { struct list_head *hash_list; struct list_head *list_entry; hash_list = &revoke->hash_table[i]; list_for_each(list_entry, hash_list) { struct jbd2_revoke_record_s *record; struct buffer_head *bh; record = (struct jbd2_revoke_record_s *)list_entry; bh = __find_get_block(journal->j_fs_dev, record->blocknr, journal->j_blocksize); if (bh) { clear_buffer_revoked(bh); __brelse(bh); } } } } /* journal_switch_revoke table select j_revoke for next transaction * we do not want to suspend any processing until all revokes are * written -bzzz */ void jbd2_journal_switch_revoke_table(journal_t *journal) { int i; if (journal->j_revoke == journal->j_revoke_table[0]) journal->j_revoke = journal->j_revoke_table[1]; else journal->j_revoke = journal->j_revoke_table[0]; for (i = 0; i < journal->j_revoke->hash_size; i++) INIT_LIST_HEAD(&journal->j_revoke->hash_table[i]); } /* * Write revoke records to the journal for all entries in the current * revoke hash, deleting the entries as we go. */ void jbd2_journal_write_revoke_records(transaction_t *transaction, struct list_head *log_bufs) { journal_t *journal = transaction->t_journal; struct buffer_head *descriptor; struct jbd2_revoke_record_s *record; struct jbd2_revoke_table_s *revoke; struct list_head *hash_list; int i, offset, count; descriptor = NULL; offset = 0; count = 0; /* select revoke table for committing transaction */ revoke = journal->j_revoke == journal->j_revoke_table[0] ? journal->j_revoke_table[1] : journal->j_revoke_table[0]; for (i = 0; i < revoke->hash_size; i++) { hash_list = &revoke->hash_table[i]; while (!list_empty(hash_list)) { record = (struct jbd2_revoke_record_s *) hash_list->next; write_one_revoke_record(transaction, log_bufs, &descriptor, &offset, record); count++; list_del(&record->hash); kmem_cache_free(jbd2_revoke_record_cache, record); } } if (descriptor) flush_descriptor(journal, descriptor, offset); jbd2_debug(1, "Wrote %d revoke records\n", count); } /* * Write out one revoke record. We need to create a new descriptor * block if the old one is full or if we have not already created one. */ static void write_one_revoke_record(transaction_t *transaction, struct list_head *log_bufs, struct buffer_head **descriptorp, int *offsetp, struct jbd2_revoke_record_s *record) { journal_t *journal = transaction->t_journal; int csum_size = 0; struct buffer_head *descriptor; int sz, offset; /* If we are already aborting, this all becomes a noop. We still need to go round the loop in jbd2_journal_write_revoke_records in order to free all of the revoke records: only the IO to the journal is omitted. */ if (is_journal_aborted(journal)) return; descriptor = *descriptorp; offset = *offsetp; /* Do we need to leave space at the end for a checksum? */ if (jbd2_journal_has_csum_v2or3(journal)) csum_size = sizeof(struct jbd2_journal_block_tail); if (jbd2_has_feature_64bit(journal)) sz = 8; else sz = 4; /* Make sure we have a descriptor with space left for the record */ if (descriptor) { if (offset + sz > journal->j_blocksize - csum_size) { flush_descriptor(journal, descriptor, offset); descriptor = NULL; } } if (!descriptor) { descriptor = jbd2_journal_get_descriptor_buffer(transaction, JBD2_REVOKE_BLOCK); if (!descriptor) return; /* Record it so that we can wait for IO completion later */ BUFFER_TRACE(descriptor, "file in log_bufs"); jbd2_file_log_bh(log_bufs, descriptor); offset = sizeof(jbd2_journal_revoke_header_t); *descriptorp = descriptor; } if (jbd2_has_feature_64bit(journal)) * ((__be64 *)(&descriptor->b_data[offset])) = cpu_to_be64(record->blocknr); else * ((__be32 *)(&descriptor->b_data[offset])) = cpu_to_be32(record->blocknr); offset += sz; *offsetp = offset; } /* * Flush a revoke descriptor out to the journal. If we are aborting, * this is a noop; otherwise we are generating a buffer which needs to * be waited for during commit, so it has to go onto the appropriate * journal buffer list. */ static void flush_descriptor(journal_t *journal, struct buffer_head *descriptor, int offset) { jbd2_journal_revoke_header_t *header; if (is_journal_aborted(journal)) return; header = (jbd2_journal_revoke_header_t *)descriptor->b_data; header->r_count = cpu_to_be32(offset); jbd2_descriptor_block_csum_set(journal, descriptor); set_buffer_jwrite(descriptor); BUFFER_TRACE(descriptor, "write"); set_buffer_dirty(descriptor); write_dirty_buffer(descriptor, REQ_SYNC); } #endif /* * Revoke support for recovery. * * Recovery needs to be able to: * * record all revoke records, including the tid of the latest instance * of each revoke in the journal * * check whether a given block in a given transaction should be replayed * (ie. has not been revoked by a revoke record in that or a subsequent * transaction) * * empty the revoke table after recovery. */ /* * First, setting revoke records. We create a new revoke record for * every block ever revoked in the log as we scan it for recovery, and * we update the existing records if we find multiple revokes for a * single block. */ int jbd2_journal_set_revoke(journal_t *journal, unsigned long long blocknr, tid_t sequence) { struct jbd2_revoke_record_s *record; record = find_revoke_record(journal, blocknr); if (record) { /* If we have multiple occurrences, only record the * latest sequence number in the hashed record */ if (tid_gt(sequence, record->sequence)) record->sequence = sequence; return 0; } return insert_revoke_hash(journal, blocknr, sequence); } /* * Test revoke records. For a given block referenced in the log, has * that block been revoked? A revoke record with a given transaction * sequence number revokes all blocks in that transaction and earlier * ones, but later transactions still need replayed. */ int jbd2_journal_test_revoke(journal_t *journal, unsigned long long blocknr, tid_t sequence) { struct jbd2_revoke_record_s *record; record = find_revoke_record(journal, blocknr); if (!record) return 0; if (tid_gt(sequence, record->sequence)) return 0; return 1; } /* * Finally, once recovery is over, we need to clear the revoke table so * that it can be reused by the running filesystem. */ void jbd2_journal_clear_revoke(journal_t *journal) { int i; struct list_head *hash_list; struct jbd2_revoke_record_s *record; struct jbd2_revoke_table_s *revoke; revoke = journal->j_revoke; for (i = 0; i < revoke->hash_size; i++) { hash_list = &revoke->hash_table[i]; while (!list_empty(hash_list)) { record = (struct jbd2_revoke_record_s*) hash_list->next; list_del(&record->hash); kmem_cache_free(jbd2_revoke_record_cache, record); } } }
linux-master
fs/jbd2/revoke.c
// SPDX-License-Identifier: GPL-2.0+ /* * linux/fs/jbd2/journal.c * * Written by Stephen C. Tweedie <[email protected]>, 1998 * * Copyright 1998 Red Hat corp --- All Rights Reserved * * Generic filesystem journal-writing code; part of the ext2fs * journaling system. * * This file manages journals: areas of disk reserved for logging * transactional updates. This includes the kernel journaling thread * which is responsible for scheduling updates to the log. * * We do not actually manage the physical storage of the journal in this * file: that is left to a per-journal policy function, which allows us * to store the journal within a filesystem-specified area for ext2 * journaling (ext2 can use a reserved inode for storing the log). */ #include <linux/module.h> #include <linux/time.h> #include <linux/fs.h> #include <linux/jbd2.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/freezer.h> #include <linux/pagemap.h> #include <linux/kthread.h> #include <linux/poison.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/math64.h> #include <linux/hash.h> #include <linux/log2.h> #include <linux/vmalloc.h> #include <linux/backing-dev.h> #include <linux/bitops.h> #include <linux/ratelimit.h> #include <linux/sched/mm.h> #define CREATE_TRACE_POINTS #include <trace/events/jbd2.h> #include <linux/uaccess.h> #include <asm/page.h> #ifdef CONFIG_JBD2_DEBUG static ushort jbd2_journal_enable_debug __read_mostly; module_param_named(jbd2_debug, jbd2_journal_enable_debug, ushort, 0644); MODULE_PARM_DESC(jbd2_debug, "Debugging level for jbd2"); #endif EXPORT_SYMBOL(jbd2_journal_extend); EXPORT_SYMBOL(jbd2_journal_stop); EXPORT_SYMBOL(jbd2_journal_lock_updates); EXPORT_SYMBOL(jbd2_journal_unlock_updates); EXPORT_SYMBOL(jbd2_journal_get_write_access); EXPORT_SYMBOL(jbd2_journal_get_create_access); EXPORT_SYMBOL(jbd2_journal_get_undo_access); EXPORT_SYMBOL(jbd2_journal_set_triggers); EXPORT_SYMBOL(jbd2_journal_dirty_metadata); EXPORT_SYMBOL(jbd2_journal_forget); EXPORT_SYMBOL(jbd2_journal_flush); EXPORT_SYMBOL(jbd2_journal_revoke); EXPORT_SYMBOL(jbd2_journal_init_dev); EXPORT_SYMBOL(jbd2_journal_init_inode); EXPORT_SYMBOL(jbd2_journal_check_used_features); EXPORT_SYMBOL(jbd2_journal_check_available_features); EXPORT_SYMBOL(jbd2_journal_set_features); EXPORT_SYMBOL(jbd2_journal_load); EXPORT_SYMBOL(jbd2_journal_destroy); EXPORT_SYMBOL(jbd2_journal_abort); EXPORT_SYMBOL(jbd2_journal_errno); EXPORT_SYMBOL(jbd2_journal_ack_err); EXPORT_SYMBOL(jbd2_journal_clear_err); EXPORT_SYMBOL(jbd2_log_wait_commit); EXPORT_SYMBOL(jbd2_journal_start_commit); EXPORT_SYMBOL(jbd2_journal_force_commit_nested); EXPORT_SYMBOL(jbd2_journal_wipe); EXPORT_SYMBOL(jbd2_journal_blocks_per_page); EXPORT_SYMBOL(jbd2_journal_invalidate_folio); EXPORT_SYMBOL(jbd2_journal_try_to_free_buffers); EXPORT_SYMBOL(jbd2_journal_force_commit); EXPORT_SYMBOL(jbd2_journal_inode_ranged_write); EXPORT_SYMBOL(jbd2_journal_inode_ranged_wait); EXPORT_SYMBOL(jbd2_journal_finish_inode_data_buffers); EXPORT_SYMBOL(jbd2_journal_init_jbd_inode); EXPORT_SYMBOL(jbd2_journal_release_jbd_inode); EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate); EXPORT_SYMBOL(jbd2_inode_cache); static int jbd2_journal_create_slab(size_t slab_size); #ifdef CONFIG_JBD2_DEBUG void __jbd2_debug(int level, const char *file, const char *func, unsigned int line, const char *fmt, ...) { struct va_format vaf; va_list args; if (level > jbd2_journal_enable_debug) return; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_DEBUG "%s: (%s, %u): %pV", file, func, line, &vaf); va_end(args); } #endif /* Checksumming functions */ static __be32 jbd2_superblock_csum(journal_t *j, journal_superblock_t *sb) { __u32 csum; __be32 old_csum; old_csum = sb->s_checksum; sb->s_checksum = 0; csum = jbd2_chksum(j, ~0, (char *)sb, sizeof(journal_superblock_t)); sb->s_checksum = old_csum; return cpu_to_be32(csum); } /* * Helper function used to manage commit timeouts */ static void commit_timeout(struct timer_list *t) { journal_t *journal = from_timer(journal, t, j_commit_timer); wake_up_process(journal->j_task); } /* * kjournald2: The main thread function used to manage a logging device * journal. * * This kernel thread is responsible for two things: * * 1) COMMIT: Every so often we need to commit the current state of the * filesystem to disk. The journal thread is responsible for writing * all of the metadata buffers to disk. If a fast commit is ongoing * journal thread waits until it's done and then continues from * there on. * * 2) CHECKPOINT: We cannot reuse a used section of the log file until all * of the data in that part of the log has been rewritten elsewhere on * the disk. Flushing these old buffers to reclaim space in the log is * known as checkpointing, and this thread is responsible for that job. */ static int kjournald2(void *arg) { journal_t *journal = arg; transaction_t *transaction; /* * Set up an interval timer which can be used to trigger a commit wakeup * after the commit interval expires */ timer_setup(&journal->j_commit_timer, commit_timeout, 0); set_freezable(); /* Record that the journal thread is running */ journal->j_task = current; wake_up(&journal->j_wait_done_commit); /* * Make sure that no allocations from this kernel thread will ever * recurse to the fs layer because we are responsible for the * transaction commit and any fs involvement might get stuck waiting for * the trasn. commit. */ memalloc_nofs_save(); /* * And now, wait forever for commit wakeup events. */ write_lock(&journal->j_state_lock); loop: if (journal->j_flags & JBD2_UNMOUNT) goto end_loop; jbd2_debug(1, "commit_sequence=%u, commit_request=%u\n", journal->j_commit_sequence, journal->j_commit_request); if (journal->j_commit_sequence != journal->j_commit_request) { jbd2_debug(1, "OK, requests differ\n"); write_unlock(&journal->j_state_lock); del_timer_sync(&journal->j_commit_timer); jbd2_journal_commit_transaction(journal); write_lock(&journal->j_state_lock); goto loop; } wake_up(&journal->j_wait_done_commit); if (freezing(current)) { /* * The simpler the better. Flushing journal isn't a * good idea, because that depends on threads that may * be already stopped. */ jbd2_debug(1, "Now suspending kjournald2\n"); write_unlock(&journal->j_state_lock); try_to_freeze(); write_lock(&journal->j_state_lock); } else { /* * We assume on resume that commits are already there, * so we don't sleep */ DEFINE_WAIT(wait); int should_sleep = 1; prepare_to_wait(&journal->j_wait_commit, &wait, TASK_INTERRUPTIBLE); if (journal->j_commit_sequence != journal->j_commit_request) should_sleep = 0; transaction = journal->j_running_transaction; if (transaction && time_after_eq(jiffies, transaction->t_expires)) should_sleep = 0; if (journal->j_flags & JBD2_UNMOUNT) should_sleep = 0; if (should_sleep) { write_unlock(&journal->j_state_lock); schedule(); write_lock(&journal->j_state_lock); } finish_wait(&journal->j_wait_commit, &wait); } jbd2_debug(1, "kjournald2 wakes\n"); /* * Were we woken up by a commit wakeup event? */ transaction = journal->j_running_transaction; if (transaction && time_after_eq(jiffies, transaction->t_expires)) { journal->j_commit_request = transaction->t_tid; jbd2_debug(1, "woke because of timeout\n"); } goto loop; end_loop: del_timer_sync(&journal->j_commit_timer); journal->j_task = NULL; wake_up(&journal->j_wait_done_commit); jbd2_debug(1, "Journal thread exiting.\n"); write_unlock(&journal->j_state_lock); return 0; } static int jbd2_journal_start_thread(journal_t *journal) { struct task_struct *t; t = kthread_run(kjournald2, journal, "jbd2/%s", journal->j_devname); if (IS_ERR(t)) return PTR_ERR(t); wait_event(journal->j_wait_done_commit, journal->j_task != NULL); return 0; } static void journal_kill_thread(journal_t *journal) { write_lock(&journal->j_state_lock); journal->j_flags |= JBD2_UNMOUNT; while (journal->j_task) { write_unlock(&journal->j_state_lock); wake_up(&journal->j_wait_commit); wait_event(journal->j_wait_done_commit, journal->j_task == NULL); write_lock(&journal->j_state_lock); } write_unlock(&journal->j_state_lock); } /* * jbd2_journal_write_metadata_buffer: write a metadata buffer to the journal. * * Writes a metadata buffer to a given disk block. The actual IO is not * performed but a new buffer_head is constructed which labels the data * to be written with the correct destination disk block. * * Any magic-number escaping which needs to be done will cause a * copy-out here. If the buffer happens to start with the * JBD2_MAGIC_NUMBER, then we can't write it to the log directly: the * magic number is only written to the log for descripter blocks. In * this case, we copy the data and replace the first word with 0, and we * return a result code which indicates that this buffer needs to be * marked as an escaped buffer in the corresponding log descriptor * block. The missing word can then be restored when the block is read * during recovery. * * If the source buffer has already been modified by a new transaction * since we took the last commit snapshot, we use the frozen copy of * that data for IO. If we end up using the existing buffer_head's data * for the write, then we have to make sure nobody modifies it while the * IO is in progress. do_get_write_access() handles this. * * The function returns a pointer to the buffer_head to be used for IO. * * * Return value: * <0: Error * >=0: Finished OK * * On success: * Bit 0 set == escape performed on the data * Bit 1 set == buffer copy-out performed (kfree the data after IO) */ int jbd2_journal_write_metadata_buffer(transaction_t *transaction, struct journal_head *jh_in, struct buffer_head **bh_out, sector_t blocknr) { int need_copy_out = 0; int done_copy_out = 0; int do_escape = 0; char *mapped_data; struct buffer_head *new_bh; struct folio *new_folio; unsigned int new_offset; struct buffer_head *bh_in = jh2bh(jh_in); journal_t *journal = transaction->t_journal; /* * The buffer really shouldn't be locked: only the current committing * transaction is allowed to write it, so nobody else is allowed * to do any IO. * * akpm: except if we're journalling data, and write() output is * also part of a shared mapping, and another thread has * decided to launch a writepage() against this buffer. */ J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in)); new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL); /* keep subsequent assertions sane */ atomic_set(&new_bh->b_count, 1); spin_lock(&jh_in->b_state_lock); repeat: /* * If a new transaction has already done a buffer copy-out, then * we use that version of the data for the commit. */ if (jh_in->b_frozen_data) { done_copy_out = 1; new_folio = virt_to_folio(jh_in->b_frozen_data); new_offset = offset_in_folio(new_folio, jh_in->b_frozen_data); } else { new_folio = jh2bh(jh_in)->b_folio; new_offset = offset_in_folio(new_folio, jh2bh(jh_in)->b_data); } mapped_data = kmap_local_folio(new_folio, new_offset); /* * Fire data frozen trigger if data already wasn't frozen. Do this * before checking for escaping, as the trigger may modify the magic * offset. If a copy-out happens afterwards, it will have the correct * data in the buffer. */ if (!done_copy_out) jbd2_buffer_frozen_trigger(jh_in, mapped_data, jh_in->b_triggers); /* * Check for escaping */ if (*((__be32 *)mapped_data) == cpu_to_be32(JBD2_MAGIC_NUMBER)) { need_copy_out = 1; do_escape = 1; } kunmap_local(mapped_data); /* * Do we need to do a data copy? */ if (need_copy_out && !done_copy_out) { char *tmp; spin_unlock(&jh_in->b_state_lock); tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS); if (!tmp) { brelse(new_bh); return -ENOMEM; } spin_lock(&jh_in->b_state_lock); if (jh_in->b_frozen_data) { jbd2_free(tmp, bh_in->b_size); goto repeat; } jh_in->b_frozen_data = tmp; memcpy_from_folio(tmp, new_folio, new_offset, bh_in->b_size); new_folio = virt_to_folio(tmp); new_offset = offset_in_folio(new_folio, tmp); done_copy_out = 1; /* * This isn't strictly necessary, as we're using frozen * data for the escaping, but it keeps consistency with * b_frozen_data usage. */ jh_in->b_frozen_triggers = jh_in->b_triggers; } /* * Did we need to do an escaping? Now we've done all the * copying, we can finally do so. */ if (do_escape) { mapped_data = kmap_local_folio(new_folio, new_offset); *((unsigned int *)mapped_data) = 0; kunmap_local(mapped_data); } folio_set_bh(new_bh, new_folio, new_offset); new_bh->b_size = bh_in->b_size; new_bh->b_bdev = journal->j_dev; new_bh->b_blocknr = blocknr; new_bh->b_private = bh_in; set_buffer_mapped(new_bh); set_buffer_dirty(new_bh); *bh_out = new_bh; /* * The to-be-written buffer needs to get moved to the io queue, * and the original buffer whose contents we are shadowing or * copying is moved to the transaction's shadow queue. */ JBUFFER_TRACE(jh_in, "file as BJ_Shadow"); spin_lock(&journal->j_list_lock); __jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow); spin_unlock(&journal->j_list_lock); set_buffer_shadow(bh_in); spin_unlock(&jh_in->b_state_lock); return do_escape | (done_copy_out << 1); } /* * Allocation code for the journal file. Manage the space left in the * journal, so that we can begin checkpointing when appropriate. */ /* * Called with j_state_lock locked for writing. * Returns true if a transaction commit was started. */ static int __jbd2_log_start_commit(journal_t *journal, tid_t target) { /* Return if the txn has already requested to be committed */ if (journal->j_commit_request == target) return 0; /* * The only transaction we can possibly wait upon is the * currently running transaction (if it exists). Otherwise, * the target tid must be an old one. */ if (journal->j_running_transaction && journal->j_running_transaction->t_tid == target) { /* * We want a new commit: OK, mark the request and wakeup the * commit thread. We do _not_ do the commit ourselves. */ journal->j_commit_request = target; jbd2_debug(1, "JBD2: requesting commit %u/%u\n", journal->j_commit_request, journal->j_commit_sequence); journal->j_running_transaction->t_requested = jiffies; wake_up(&journal->j_wait_commit); return 1; } else if (!tid_geq(journal->j_commit_request, target)) /* This should never happen, but if it does, preserve the evidence before kjournald goes into a loop and increments j_commit_sequence beyond all recognition. */ WARN_ONCE(1, "JBD2: bad log_start_commit: %u %u %u %u\n", journal->j_commit_request, journal->j_commit_sequence, target, journal->j_running_transaction ? journal->j_running_transaction->t_tid : 0); return 0; } int jbd2_log_start_commit(journal_t *journal, tid_t tid) { int ret; write_lock(&journal->j_state_lock); ret = __jbd2_log_start_commit(journal, tid); write_unlock(&journal->j_state_lock); return ret; } /* * Force and wait any uncommitted transactions. We can only force the running * transaction if we don't have an active handle, otherwise, we will deadlock. * Returns: <0 in case of error, * 0 if nothing to commit, * 1 if transaction was successfully committed. */ static int __jbd2_journal_force_commit(journal_t *journal) { transaction_t *transaction = NULL; tid_t tid; int need_to_start = 0, ret = 0; read_lock(&journal->j_state_lock); if (journal->j_running_transaction && !current->journal_info) { transaction = journal->j_running_transaction; if (!tid_geq(journal->j_commit_request, transaction->t_tid)) need_to_start = 1; } else if (journal->j_committing_transaction) transaction = journal->j_committing_transaction; if (!transaction) { /* Nothing to commit */ read_unlock(&journal->j_state_lock); return 0; } tid = transaction->t_tid; read_unlock(&journal->j_state_lock); if (need_to_start) jbd2_log_start_commit(journal, tid); ret = jbd2_log_wait_commit(journal, tid); if (!ret) ret = 1; return ret; } /** * jbd2_journal_force_commit_nested - Force and wait upon a commit if the * calling process is not within transaction. * * @journal: journal to force * Returns true if progress was made. * * This is used for forcing out undo-protected data which contains * bitmaps, when the fs is running out of space. */ int jbd2_journal_force_commit_nested(journal_t *journal) { int ret; ret = __jbd2_journal_force_commit(journal); return ret > 0; } /** * jbd2_journal_force_commit() - force any uncommitted transactions * @journal: journal to force * * Caller want unconditional commit. We can only force the running transaction * if we don't have an active handle, otherwise, we will deadlock. */ int jbd2_journal_force_commit(journal_t *journal) { int ret; J_ASSERT(!current->journal_info); ret = __jbd2_journal_force_commit(journal); if (ret > 0) ret = 0; return ret; } /* * Start a commit of the current running transaction (if any). Returns true * if a transaction is going to be committed (or is currently already * committing), and fills its tid in at *ptid */ int jbd2_journal_start_commit(journal_t *journal, tid_t *ptid) { int ret = 0; write_lock(&journal->j_state_lock); if (journal->j_running_transaction) { tid_t tid = journal->j_running_transaction->t_tid; __jbd2_log_start_commit(journal, tid); /* There's a running transaction and we've just made sure * it's commit has been scheduled. */ if (ptid) *ptid = tid; ret = 1; } else if (journal->j_committing_transaction) { /* * If commit has been started, then we have to wait for * completion of that transaction. */ if (ptid) *ptid = journal->j_committing_transaction->t_tid; ret = 1; } write_unlock(&journal->j_state_lock); return ret; } /* * Return 1 if a given transaction has not yet sent barrier request * connected with a transaction commit. If 0 is returned, transaction * may or may not have sent the barrier. Used to avoid sending barrier * twice in common cases. */ int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid) { int ret = 0; transaction_t *commit_trans; if (!(journal->j_flags & JBD2_BARRIER)) return 0; read_lock(&journal->j_state_lock); /* Transaction already committed? */ if (tid_geq(journal->j_commit_sequence, tid)) goto out; commit_trans = journal->j_committing_transaction; if (!commit_trans || commit_trans->t_tid != tid) { ret = 1; goto out; } /* * Transaction is being committed and we already proceeded to * submitting a flush to fs partition? */ if (journal->j_fs_dev != journal->j_dev) { if (!commit_trans->t_need_data_flush || commit_trans->t_state >= T_COMMIT_DFLUSH) goto out; } else { if (commit_trans->t_state >= T_COMMIT_JFLUSH) goto out; } ret = 1; out: read_unlock(&journal->j_state_lock); return ret; } EXPORT_SYMBOL(jbd2_trans_will_send_data_barrier); /* * Wait for a specified commit to complete. * The caller may not hold the journal lock. */ int jbd2_log_wait_commit(journal_t *journal, tid_t tid) { int err = 0; read_lock(&journal->j_state_lock); #ifdef CONFIG_PROVE_LOCKING /* * Some callers make sure transaction is already committing and in that * case we cannot block on open handles anymore. So don't warn in that * case. */ if (tid_gt(tid, journal->j_commit_sequence) && (!journal->j_committing_transaction || journal->j_committing_transaction->t_tid != tid)) { read_unlock(&journal->j_state_lock); jbd2_might_wait_for_commit(journal); read_lock(&journal->j_state_lock); } #endif #ifdef CONFIG_JBD2_DEBUG if (!tid_geq(journal->j_commit_request, tid)) { printk(KERN_ERR "%s: error: j_commit_request=%u, tid=%u\n", __func__, journal->j_commit_request, tid); } #endif while (tid_gt(tid, journal->j_commit_sequence)) { jbd2_debug(1, "JBD2: want %u, j_commit_sequence=%u\n", tid, journal->j_commit_sequence); read_unlock(&journal->j_state_lock); wake_up(&journal->j_wait_commit); wait_event(journal->j_wait_done_commit, !tid_gt(tid, journal->j_commit_sequence)); read_lock(&journal->j_state_lock); } read_unlock(&journal->j_state_lock); if (unlikely(is_journal_aborted(journal))) err = -EIO; return err; } /* * Start a fast commit. If there's an ongoing fast or full commit wait for * it to complete. Returns 0 if a new fast commit was started. Returns -EALREADY * if a fast commit is not needed, either because there's an already a commit * going on or this tid has already been committed. Returns -EINVAL if no jbd2 * commit has yet been performed. */ int jbd2_fc_begin_commit(journal_t *journal, tid_t tid) { if (unlikely(is_journal_aborted(journal))) return -EIO; /* * Fast commits only allowed if at least one full commit has * been processed. */ if (!journal->j_stats.ts_tid) return -EINVAL; write_lock(&journal->j_state_lock); if (tid <= journal->j_commit_sequence) { write_unlock(&journal->j_state_lock); return -EALREADY; } if (journal->j_flags & JBD2_FULL_COMMIT_ONGOING || (journal->j_flags & JBD2_FAST_COMMIT_ONGOING)) { DEFINE_WAIT(wait); prepare_to_wait(&journal->j_fc_wait, &wait, TASK_UNINTERRUPTIBLE); write_unlock(&journal->j_state_lock); schedule(); finish_wait(&journal->j_fc_wait, &wait); return -EALREADY; } journal->j_flags |= JBD2_FAST_COMMIT_ONGOING; write_unlock(&journal->j_state_lock); jbd2_journal_lock_updates(journal); return 0; } EXPORT_SYMBOL(jbd2_fc_begin_commit); /* * Stop a fast commit. If fallback is set, this function starts commit of * TID tid before any other fast commit can start. */ static int __jbd2_fc_end_commit(journal_t *journal, tid_t tid, bool fallback) { jbd2_journal_unlock_updates(journal); if (journal->j_fc_cleanup_callback) journal->j_fc_cleanup_callback(journal, 0, tid); write_lock(&journal->j_state_lock); journal->j_flags &= ~JBD2_FAST_COMMIT_ONGOING; if (fallback) journal->j_flags |= JBD2_FULL_COMMIT_ONGOING; write_unlock(&journal->j_state_lock); wake_up(&journal->j_fc_wait); if (fallback) return jbd2_complete_transaction(journal, tid); return 0; } int jbd2_fc_end_commit(journal_t *journal) { return __jbd2_fc_end_commit(journal, 0, false); } EXPORT_SYMBOL(jbd2_fc_end_commit); int jbd2_fc_end_commit_fallback(journal_t *journal) { tid_t tid; read_lock(&journal->j_state_lock); tid = journal->j_running_transaction ? journal->j_running_transaction->t_tid : 0; read_unlock(&journal->j_state_lock); return __jbd2_fc_end_commit(journal, tid, true); } EXPORT_SYMBOL(jbd2_fc_end_commit_fallback); /* Return 1 when transaction with given tid has already committed. */ int jbd2_transaction_committed(journal_t *journal, tid_t tid) { int ret = 1; read_lock(&journal->j_state_lock); if (journal->j_running_transaction && journal->j_running_transaction->t_tid == tid) ret = 0; if (journal->j_committing_transaction && journal->j_committing_transaction->t_tid == tid) ret = 0; read_unlock(&journal->j_state_lock); return ret; } EXPORT_SYMBOL(jbd2_transaction_committed); /* * When this function returns the transaction corresponding to tid * will be completed. If the transaction has currently running, start * committing that transaction before waiting for it to complete. If * the transaction id is stale, it is by definition already completed, * so just return SUCCESS. */ int jbd2_complete_transaction(journal_t *journal, tid_t tid) { int need_to_wait = 1; read_lock(&journal->j_state_lock); if (journal->j_running_transaction && journal->j_running_transaction->t_tid == tid) { if (journal->j_commit_request != tid) { /* transaction not yet started, so request it */ read_unlock(&journal->j_state_lock); jbd2_log_start_commit(journal, tid); goto wait_commit; } } else if (!(journal->j_committing_transaction && journal->j_committing_transaction->t_tid == tid)) need_to_wait = 0; read_unlock(&journal->j_state_lock); if (!need_to_wait) return 0; wait_commit: return jbd2_log_wait_commit(journal, tid); } EXPORT_SYMBOL(jbd2_complete_transaction); /* * Log buffer allocation routines: */ int jbd2_journal_next_log_block(journal_t *journal, unsigned long long *retp) { unsigned long blocknr; write_lock(&journal->j_state_lock); J_ASSERT(journal->j_free > 1); blocknr = journal->j_head; journal->j_head++; journal->j_free--; if (journal->j_head == journal->j_last) journal->j_head = journal->j_first; write_unlock(&journal->j_state_lock); return jbd2_journal_bmap(journal, blocknr, retp); } /* Map one fast commit buffer for use by the file system */ int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out) { unsigned long long pblock; unsigned long blocknr; int ret = 0; struct buffer_head *bh; int fc_off; *bh_out = NULL; if (journal->j_fc_off + journal->j_fc_first < journal->j_fc_last) { fc_off = journal->j_fc_off; blocknr = journal->j_fc_first + fc_off; journal->j_fc_off++; } else { ret = -EINVAL; } if (ret) return ret; ret = jbd2_journal_bmap(journal, blocknr, &pblock); if (ret) return ret; bh = __getblk(journal->j_dev, pblock, journal->j_blocksize); if (!bh) return -ENOMEM; journal->j_fc_wbuf[fc_off] = bh; *bh_out = bh; return 0; } EXPORT_SYMBOL(jbd2_fc_get_buf); /* * Wait on fast commit buffers that were allocated by jbd2_fc_get_buf * for completion. */ int jbd2_fc_wait_bufs(journal_t *journal, int num_blks) { struct buffer_head *bh; int i, j_fc_off; j_fc_off = journal->j_fc_off; /* * Wait in reverse order to minimize chances of us being woken up before * all IOs have completed */ for (i = j_fc_off - 1; i >= j_fc_off - num_blks; i--) { bh = journal->j_fc_wbuf[i]; wait_on_buffer(bh); /* * Update j_fc_off so jbd2_fc_release_bufs can release remain * buffer head. */ if (unlikely(!buffer_uptodate(bh))) { journal->j_fc_off = i + 1; return -EIO; } put_bh(bh); journal->j_fc_wbuf[i] = NULL; } return 0; } EXPORT_SYMBOL(jbd2_fc_wait_bufs); int jbd2_fc_release_bufs(journal_t *journal) { struct buffer_head *bh; int i, j_fc_off; j_fc_off = journal->j_fc_off; for (i = j_fc_off - 1; i >= 0; i--) { bh = journal->j_fc_wbuf[i]; if (!bh) break; put_bh(bh); journal->j_fc_wbuf[i] = NULL; } return 0; } EXPORT_SYMBOL(jbd2_fc_release_bufs); /* * Conversion of logical to physical block numbers for the journal * * On external journals the journal blocks are identity-mapped, so * this is a no-op. If needed, we can use j_blk_offset - everything is * ready. */ int jbd2_journal_bmap(journal_t *journal, unsigned long blocknr, unsigned long long *retp) { int err = 0; unsigned long long ret; sector_t block = blocknr; if (journal->j_bmap) { err = journal->j_bmap(journal, &block); if (err == 0) *retp = block; } else if (journal->j_inode) { ret = bmap(journal->j_inode, &block); if (ret || !block) { printk(KERN_ALERT "%s: journal block not found " "at offset %lu on %s\n", __func__, blocknr, journal->j_devname); err = -EIO; jbd2_journal_abort(journal, err); } else { *retp = block; } } else { *retp = blocknr; /* +journal->j_blk_offset */ } return err; } /* * We play buffer_head aliasing tricks to write data/metadata blocks to * the journal without copying their contents, but for journal * descriptor blocks we do need to generate bona fide buffers. * * After the caller of jbd2_journal_get_descriptor_buffer() has finished modifying * the buffer's contents they really should run flush_dcache_page(bh->b_page). * But we don't bother doing that, so there will be coherency problems with * mmaps of blockdevs which hold live JBD-controlled filesystems. */ struct buffer_head * jbd2_journal_get_descriptor_buffer(transaction_t *transaction, int type) { journal_t *journal = transaction->t_journal; struct buffer_head *bh; unsigned long long blocknr; journal_header_t *header; int err; err = jbd2_journal_next_log_block(journal, &blocknr); if (err) return NULL; bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize); if (!bh) return NULL; atomic_dec(&transaction->t_outstanding_credits); lock_buffer(bh); memset(bh->b_data, 0, journal->j_blocksize); header = (journal_header_t *)bh->b_data; header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER); header->h_blocktype = cpu_to_be32(type); header->h_sequence = cpu_to_be32(transaction->t_tid); set_buffer_uptodate(bh); unlock_buffer(bh); BUFFER_TRACE(bh, "return this buffer"); return bh; } void jbd2_descriptor_block_csum_set(journal_t *j, struct buffer_head *bh) { struct jbd2_journal_block_tail *tail; __u32 csum; if (!jbd2_journal_has_csum_v2or3(j)) return; tail = (struct jbd2_journal_block_tail *)(bh->b_data + j->j_blocksize - sizeof(struct jbd2_journal_block_tail)); tail->t_checksum = 0; csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize); tail->t_checksum = cpu_to_be32(csum); } /* * Return tid of the oldest transaction in the journal and block in the journal * where the transaction starts. * * If the journal is now empty, return which will be the next transaction ID * we will write and where will that transaction start. * * The return value is 0 if journal tail cannot be pushed any further, 1 if * it can. */ int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid, unsigned long *block) { transaction_t *transaction; int ret; read_lock(&journal->j_state_lock); spin_lock(&journal->j_list_lock); transaction = journal->j_checkpoint_transactions; if (transaction) { *tid = transaction->t_tid; *block = transaction->t_log_start; } else if ((transaction = journal->j_committing_transaction) != NULL) { *tid = transaction->t_tid; *block = transaction->t_log_start; } else if ((transaction = journal->j_running_transaction) != NULL) { *tid = transaction->t_tid; *block = journal->j_head; } else { *tid = journal->j_transaction_sequence; *block = journal->j_head; } ret = tid_gt(*tid, journal->j_tail_sequence); spin_unlock(&journal->j_list_lock); read_unlock(&journal->j_state_lock); return ret; } /* * Update information in journal structure and in on disk journal superblock * about log tail. This function does not check whether information passed in * really pushes log tail further. It's responsibility of the caller to make * sure provided log tail information is valid (e.g. by holding * j_checkpoint_mutex all the time between computing log tail and calling this * function as is the case with jbd2_cleanup_journal_tail()). * * Requires j_checkpoint_mutex */ int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block) { unsigned long freed; int ret; BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex)); /* * We cannot afford for write to remain in drive's caches since as * soon as we update j_tail, next transaction can start reusing journal * space and if we lose sb update during power failure we'd replay * old transaction with possibly newly overwritten data. */ ret = jbd2_journal_update_sb_log_tail(journal, tid, block, REQ_SYNC | REQ_FUA); if (ret) goto out; write_lock(&journal->j_state_lock); freed = block - journal->j_tail; if (block < journal->j_tail) freed += journal->j_last - journal->j_first; trace_jbd2_update_log_tail(journal, tid, block, freed); jbd2_debug(1, "Cleaning journal tail from %u to %u (offset %lu), " "freeing %lu\n", journal->j_tail_sequence, tid, block, freed); journal->j_free += freed; journal->j_tail_sequence = tid; journal->j_tail = block; write_unlock(&journal->j_state_lock); out: return ret; } /* * This is a variation of __jbd2_update_log_tail which checks for validity of * provided log tail and locks j_checkpoint_mutex. So it is safe against races * with other threads updating log tail. */ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block) { mutex_lock_io(&journal->j_checkpoint_mutex); if (tid_gt(tid, journal->j_tail_sequence)) __jbd2_update_log_tail(journal, tid, block); mutex_unlock(&journal->j_checkpoint_mutex); } struct jbd2_stats_proc_session { journal_t *journal; struct transaction_stats_s *stats; int start; int max; }; static void *jbd2_seq_info_start(struct seq_file *seq, loff_t *pos) { return *pos ? NULL : SEQ_START_TOKEN; } static void *jbd2_seq_info_next(struct seq_file *seq, void *v, loff_t *pos) { (*pos)++; return NULL; } static int jbd2_seq_info_show(struct seq_file *seq, void *v) { struct jbd2_stats_proc_session *s = seq->private; if (v != SEQ_START_TOKEN) return 0; seq_printf(seq, "%lu transactions (%lu requested), " "each up to %u blocks\n", s->stats->ts_tid, s->stats->ts_requested, s->journal->j_max_transaction_buffers); if (s->stats->ts_tid == 0) return 0; seq_printf(seq, "average: \n %ums waiting for transaction\n", jiffies_to_msecs(s->stats->run.rs_wait / s->stats->ts_tid)); seq_printf(seq, " %ums request delay\n", (s->stats->ts_requested == 0) ? 0 : jiffies_to_msecs(s->stats->run.rs_request_delay / s->stats->ts_requested)); seq_printf(seq, " %ums running transaction\n", jiffies_to_msecs(s->stats->run.rs_running / s->stats->ts_tid)); seq_printf(seq, " %ums transaction was being locked\n", jiffies_to_msecs(s->stats->run.rs_locked / s->stats->ts_tid)); seq_printf(seq, " %ums flushing data (in ordered mode)\n", jiffies_to_msecs(s->stats->run.rs_flushing / s->stats->ts_tid)); seq_printf(seq, " %ums logging transaction\n", jiffies_to_msecs(s->stats->run.rs_logging / s->stats->ts_tid)); seq_printf(seq, " %lluus average transaction commit time\n", div_u64(s->journal->j_average_commit_time, 1000)); seq_printf(seq, " %lu handles per transaction\n", s->stats->run.rs_handle_count / s->stats->ts_tid); seq_printf(seq, " %lu blocks per transaction\n", s->stats->run.rs_blocks / s->stats->ts_tid); seq_printf(seq, " %lu logged blocks per transaction\n", s->stats->run.rs_blocks_logged / s->stats->ts_tid); return 0; } static void jbd2_seq_info_stop(struct seq_file *seq, void *v) { } static const struct seq_operations jbd2_seq_info_ops = { .start = jbd2_seq_info_start, .next = jbd2_seq_info_next, .stop = jbd2_seq_info_stop, .show = jbd2_seq_info_show, }; static int jbd2_seq_info_open(struct inode *inode, struct file *file) { journal_t *journal = pde_data(inode); struct jbd2_stats_proc_session *s; int rc, size; s = kmalloc(sizeof(*s), GFP_KERNEL); if (s == NULL) return -ENOMEM; size = sizeof(struct transaction_stats_s); s->stats = kmalloc(size, GFP_KERNEL); if (s->stats == NULL) { kfree(s); return -ENOMEM; } spin_lock(&journal->j_history_lock); memcpy(s->stats, &journal->j_stats, size); s->journal = journal; spin_unlock(&journal->j_history_lock); rc = seq_open(file, &jbd2_seq_info_ops); if (rc == 0) { struct seq_file *m = file->private_data; m->private = s; } else { kfree(s->stats); kfree(s); } return rc; } static int jbd2_seq_info_release(struct inode *inode, struct file *file) { struct seq_file *seq = file->private_data; struct jbd2_stats_proc_session *s = seq->private; kfree(s->stats); kfree(s); return seq_release(inode, file); } static const struct proc_ops jbd2_info_proc_ops = { .proc_open = jbd2_seq_info_open, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_release = jbd2_seq_info_release, }; static struct proc_dir_entry *proc_jbd2_stats; static void jbd2_stats_proc_init(journal_t *journal) { journal->j_proc_entry = proc_mkdir(journal->j_devname, proc_jbd2_stats); if (journal->j_proc_entry) { proc_create_data("info", S_IRUGO, journal->j_proc_entry, &jbd2_info_proc_ops, journal); } } static void jbd2_stats_proc_exit(journal_t *journal) { remove_proc_entry("info", journal->j_proc_entry); remove_proc_entry(journal->j_devname, proc_jbd2_stats); } /* Minimum size of descriptor tag */ static int jbd2_min_tag_size(void) { /* * Tag with 32-bit block numbers does not use last four bytes of the * structure */ return sizeof(journal_block_tag_t) - 4; } /** * jbd2_journal_shrink_scan() * @shrink: shrinker to work on * @sc: reclaim request to process * * Scan the checkpointed buffer on the checkpoint list and release the * journal_head. */ static unsigned long jbd2_journal_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { journal_t *journal = container_of(shrink, journal_t, j_shrinker); unsigned long nr_to_scan = sc->nr_to_scan; unsigned long nr_shrunk; unsigned long count; count = percpu_counter_read_positive(&journal->j_checkpoint_jh_count); trace_jbd2_shrink_scan_enter(journal, sc->nr_to_scan, count); nr_shrunk = jbd2_journal_shrink_checkpoint_list(journal, &nr_to_scan); count = percpu_counter_read_positive(&journal->j_checkpoint_jh_count); trace_jbd2_shrink_scan_exit(journal, nr_to_scan, nr_shrunk, count); return nr_shrunk; } /** * jbd2_journal_shrink_count() * @shrink: shrinker to work on * @sc: reclaim request to process * * Count the number of checkpoint buffers on the checkpoint list. */ static unsigned long jbd2_journal_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { journal_t *journal = container_of(shrink, journal_t, j_shrinker); unsigned long count; count = percpu_counter_read_positive(&journal->j_checkpoint_jh_count); trace_jbd2_shrink_count(journal, sc->nr_to_scan, count); return count; } /* * If the journal init or create aborts, we need to mark the journal * superblock as being NULL to prevent the journal destroy from writing * back a bogus superblock. */ static void journal_fail_superblock(journal_t *journal) { struct buffer_head *bh = journal->j_sb_buffer; brelse(bh); journal->j_sb_buffer = NULL; } /* * Check the superblock for a given journal, performing initial * validation of the format. */ static int journal_check_superblock(journal_t *journal) { journal_superblock_t *sb = journal->j_superblock; int num_fc_blks; int err = -EINVAL; if (sb->s_header.h_magic != cpu_to_be32(JBD2_MAGIC_NUMBER) || sb->s_blocksize != cpu_to_be32(journal->j_blocksize)) { printk(KERN_WARNING "JBD2: no valid journal superblock found\n"); return err; } if (be32_to_cpu(sb->s_header.h_blocktype) != JBD2_SUPERBLOCK_V1 && be32_to_cpu(sb->s_header.h_blocktype) != JBD2_SUPERBLOCK_V2) { printk(KERN_WARNING "JBD2: unrecognised superblock format ID\n"); return err; } if (be32_to_cpu(sb->s_maxlen) > journal->j_total_len) { printk(KERN_WARNING "JBD2: journal file too short\n"); return err; } if (be32_to_cpu(sb->s_first) == 0 || be32_to_cpu(sb->s_first) >= journal->j_total_len) { printk(KERN_WARNING "JBD2: Invalid start block of journal: %u\n", be32_to_cpu(sb->s_first)); return err; } /* * If this is a V2 superblock, then we have to check the * features flags on it. */ if (!jbd2_format_support_feature(journal)) return 0; if ((sb->s_feature_ro_compat & ~cpu_to_be32(JBD2_KNOWN_ROCOMPAT_FEATURES)) || (sb->s_feature_incompat & ~cpu_to_be32(JBD2_KNOWN_INCOMPAT_FEATURES))) { printk(KERN_WARNING "JBD2: Unrecognised features on journal\n"); return err; } num_fc_blks = jbd2_has_feature_fast_commit(journal) ? jbd2_journal_get_num_fc_blks(sb) : 0; if (be32_to_cpu(sb->s_maxlen) < JBD2_MIN_JOURNAL_BLOCKS || be32_to_cpu(sb->s_maxlen) - JBD2_MIN_JOURNAL_BLOCKS < num_fc_blks) { printk(KERN_ERR "JBD2: journal file too short %u,%d\n", be32_to_cpu(sb->s_maxlen), num_fc_blks); return err; } if (jbd2_has_feature_csum2(journal) && jbd2_has_feature_csum3(journal)) { /* Can't have checksum v2 and v3 at the same time! */ printk(KERN_ERR "JBD2: Can't enable checksumming v2 and v3 " "at the same time!\n"); return err; } if (jbd2_journal_has_csum_v2or3_feature(journal) && jbd2_has_feature_checksum(journal)) { /* Can't have checksum v1 and v2 on at the same time! */ printk(KERN_ERR "JBD2: Can't enable checksumming v1 and v2/3 " "at the same time!\n"); return err; } /* Load the checksum driver */ if (jbd2_journal_has_csum_v2or3_feature(journal)) { if (sb->s_checksum_type != JBD2_CRC32C_CHKSUM) { printk(KERN_ERR "JBD2: Unknown checksum type\n"); return err; } journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); if (IS_ERR(journal->j_chksum_driver)) { printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n"); err = PTR_ERR(journal->j_chksum_driver); journal->j_chksum_driver = NULL; return err; } /* Check superblock checksum */ if (sb->s_checksum != jbd2_superblock_csum(journal, sb)) { printk(KERN_ERR "JBD2: journal checksum error\n"); err = -EFSBADCRC; return err; } } return 0; } static int journal_revoke_records_per_block(journal_t *journal) { int record_size; int space = journal->j_blocksize - sizeof(jbd2_journal_revoke_header_t); if (jbd2_has_feature_64bit(journal)) record_size = 8; else record_size = 4; if (jbd2_journal_has_csum_v2or3(journal)) space -= sizeof(struct jbd2_journal_block_tail); return space / record_size; } /* * Load the on-disk journal superblock and read the key fields into the * journal_t. */ static int journal_load_superblock(journal_t *journal) { int err; struct buffer_head *bh; journal_superblock_t *sb; bh = getblk_unmovable(journal->j_dev, journal->j_blk_offset, journal->j_blocksize); if (bh) err = bh_read(bh, 0); if (!bh || err < 0) { pr_err("%s: Cannot read journal superblock\n", __func__); brelse(bh); return -EIO; } journal->j_sb_buffer = bh; sb = (journal_superblock_t *)bh->b_data; journal->j_superblock = sb; err = journal_check_superblock(journal); if (err) { journal_fail_superblock(journal); return err; } journal->j_tail_sequence = be32_to_cpu(sb->s_sequence); journal->j_tail = be32_to_cpu(sb->s_start); journal->j_first = be32_to_cpu(sb->s_first); journal->j_errno = be32_to_cpu(sb->s_errno); journal->j_last = be32_to_cpu(sb->s_maxlen); if (be32_to_cpu(sb->s_maxlen) < journal->j_total_len) journal->j_total_len = be32_to_cpu(sb->s_maxlen); /* Precompute checksum seed for all metadata */ if (jbd2_journal_has_csum_v2or3(journal)) journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid, sizeof(sb->s_uuid)); journal->j_revoke_records_per_block = journal_revoke_records_per_block(journal); if (jbd2_has_feature_fast_commit(journal)) { journal->j_fc_last = be32_to_cpu(sb->s_maxlen); journal->j_last = journal->j_fc_last - jbd2_journal_get_num_fc_blks(sb); journal->j_fc_first = journal->j_last + 1; journal->j_fc_off = 0; } return 0; } /* * Management for journal control blocks: functions to create and * destroy journal_t structures, and to initialise and read existing * journal blocks from disk. */ /* First: create and setup a journal_t object in memory. We initialise * very few fields yet: that has to wait until we have created the * journal structures from from scratch, or loaded them from disk. */ static journal_t *journal_init_common(struct block_device *bdev, struct block_device *fs_dev, unsigned long long start, int len, int blocksize) { static struct lock_class_key jbd2_trans_commit_key; journal_t *journal; int err; int n; journal = kzalloc(sizeof(*journal), GFP_KERNEL); if (!journal) return ERR_PTR(-ENOMEM); journal->j_blocksize = blocksize; journal->j_dev = bdev; journal->j_fs_dev = fs_dev; journal->j_blk_offset = start; journal->j_total_len = len; err = journal_load_superblock(journal); if (err) goto err_cleanup; init_waitqueue_head(&journal->j_wait_transaction_locked); init_waitqueue_head(&journal->j_wait_done_commit); init_waitqueue_head(&journal->j_wait_commit); init_waitqueue_head(&journal->j_wait_updates); init_waitqueue_head(&journal->j_wait_reserved); init_waitqueue_head(&journal->j_fc_wait); mutex_init(&journal->j_abort_mutex); mutex_init(&journal->j_barrier); mutex_init(&journal->j_checkpoint_mutex); spin_lock_init(&journal->j_revoke_lock); spin_lock_init(&journal->j_list_lock); spin_lock_init(&journal->j_history_lock); rwlock_init(&journal->j_state_lock); journal->j_commit_interval = (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE); journal->j_min_batch_time = 0; journal->j_max_batch_time = 15000; /* 15ms */ atomic_set(&journal->j_reserved_credits, 0); lockdep_init_map(&journal->j_trans_commit_map, "jbd2_handle", &jbd2_trans_commit_key, 0); /* The journal is marked for error until we succeed with recovery! */ journal->j_flags = JBD2_ABORT; /* Set up a default-sized revoke table for the new mount. */ err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH); if (err) goto err_cleanup; /* * journal descriptor can store up to n blocks, we need enough * buffers to write out full descriptor block. */ err = -ENOMEM; n = journal->j_blocksize / jbd2_min_tag_size(); journal->j_wbufsize = n; journal->j_fc_wbuf = NULL; journal->j_wbuf = kmalloc_array(n, sizeof(struct buffer_head *), GFP_KERNEL); if (!journal->j_wbuf) goto err_cleanup; err = percpu_counter_init(&journal->j_checkpoint_jh_count, 0, GFP_KERNEL); if (err) goto err_cleanup; journal->j_shrink_transaction = NULL; journal->j_shrinker.scan_objects = jbd2_journal_shrink_scan; journal->j_shrinker.count_objects = jbd2_journal_shrink_count; journal->j_shrinker.seeks = DEFAULT_SEEKS; journal->j_shrinker.batch = journal->j_max_transaction_buffers; err = register_shrinker(&journal->j_shrinker, "jbd2-journal:(%u:%u)", MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); if (err) goto err_cleanup; return journal; err_cleanup: percpu_counter_destroy(&journal->j_checkpoint_jh_count); if (journal->j_chksum_driver) crypto_free_shash(journal->j_chksum_driver); kfree(journal->j_wbuf); jbd2_journal_destroy_revoke(journal); journal_fail_superblock(journal); kfree(journal); return ERR_PTR(err); } /* jbd2_journal_init_dev and jbd2_journal_init_inode: * * Create a journal structure assigned some fixed set of disk blocks to * the journal. We don't actually touch those disk blocks yet, but we * need to set up all of the mapping information to tell the journaling * system where the journal blocks are. * */ /** * journal_t * jbd2_journal_init_dev() - creates and initialises a journal structure * @bdev: Block device on which to create the journal * @fs_dev: Device which hold journalled filesystem for this journal. * @start: Block nr Start of journal. * @len: Length of the journal in blocks. * @blocksize: blocksize of journalling device * * Returns: a newly created journal_t * * * jbd2_journal_init_dev creates a journal which maps a fixed contiguous * range of blocks on an arbitrary block device. * */ journal_t *jbd2_journal_init_dev(struct block_device *bdev, struct block_device *fs_dev, unsigned long long start, int len, int blocksize) { journal_t *journal; journal = journal_init_common(bdev, fs_dev, start, len, blocksize); if (IS_ERR(journal)) return ERR_CAST(journal); snprintf(journal->j_devname, sizeof(journal->j_devname), "%pg", journal->j_dev); strreplace(journal->j_devname, '/', '!'); jbd2_stats_proc_init(journal); return journal; } /** * journal_t * jbd2_journal_init_inode () - creates a journal which maps to a inode. * @inode: An inode to create the journal in * * jbd2_journal_init_inode creates a journal which maps an on-disk inode as * the journal. The inode must exist already, must support bmap() and * must have all data blocks preallocated. */ journal_t *jbd2_journal_init_inode(struct inode *inode) { journal_t *journal; sector_t blocknr; int err = 0; blocknr = 0; err = bmap(inode, &blocknr); if (err || !blocknr) { pr_err("%s: Cannot locate journal superblock\n", __func__); return err ? ERR_PTR(err) : ERR_PTR(-EINVAL); } jbd2_debug(1, "JBD2: inode %s/%ld, size %lld, bits %d, blksize %ld\n", inode->i_sb->s_id, inode->i_ino, (long long) inode->i_size, inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize); journal = journal_init_common(inode->i_sb->s_bdev, inode->i_sb->s_bdev, blocknr, inode->i_size >> inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize); if (IS_ERR(journal)) return ERR_CAST(journal); journal->j_inode = inode; snprintf(journal->j_devname, sizeof(journal->j_devname), "%pg-%lu", journal->j_dev, journal->j_inode->i_ino); strreplace(journal->j_devname, '/', '!'); jbd2_stats_proc_init(journal); return journal; } /* * Given a journal_t structure, initialise the various fields for * startup of a new journaling session. We use this both when creating * a journal, and after recovering an old journal to reset it for * subsequent use. */ static int journal_reset(journal_t *journal) { journal_superblock_t *sb = journal->j_superblock; unsigned long long first, last; first = be32_to_cpu(sb->s_first); last = be32_to_cpu(sb->s_maxlen); if (first + JBD2_MIN_JOURNAL_BLOCKS > last + 1) { printk(KERN_ERR "JBD2: Journal too short (blocks %llu-%llu).\n", first, last); journal_fail_superblock(journal); return -EINVAL; } journal->j_first = first; journal->j_last = last; if (journal->j_head != 0 && journal->j_flags & JBD2_CYCLE_RECORD) { /* * Disable the cycled recording mode if the journal head block * number is not correct. */ if (journal->j_head < first || journal->j_head >= last) { printk(KERN_WARNING "JBD2: Incorrect Journal head block %lu, " "disable journal_cycle_record\n", journal->j_head); journal->j_head = journal->j_first; } } else { journal->j_head = journal->j_first; } journal->j_tail = journal->j_head; journal->j_free = journal->j_last - journal->j_first; journal->j_tail_sequence = journal->j_transaction_sequence; journal->j_commit_sequence = journal->j_transaction_sequence - 1; journal->j_commit_request = journal->j_commit_sequence; journal->j_max_transaction_buffers = jbd2_journal_get_max_txn_bufs(journal); /* * Now that journal recovery is done, turn fast commits off here. This * way, if fast commit was enabled before the crash but if now FS has * disabled it, we don't enable fast commits. */ jbd2_clear_feature_fast_commit(journal); /* * As a special case, if the on-disk copy is already marked as needing * no recovery (s_start == 0), then we can safely defer the superblock * update until the next commit by setting JBD2_FLUSHED. This avoids * attempting a write to a potential-readonly device. */ if (sb->s_start == 0) { jbd2_debug(1, "JBD2: Skipping superblock update on recovered sb " "(start %ld, seq %u, errno %d)\n", journal->j_tail, journal->j_tail_sequence, journal->j_errno); journal->j_flags |= JBD2_FLUSHED; } else { /* Lock here to make assertions happy... */ mutex_lock_io(&journal->j_checkpoint_mutex); /* * Update log tail information. We use REQ_FUA since new * transaction will start reusing journal space and so we * must make sure information about current log tail is on * disk before that. */ jbd2_journal_update_sb_log_tail(journal, journal->j_tail_sequence, journal->j_tail, REQ_SYNC | REQ_FUA); mutex_unlock(&journal->j_checkpoint_mutex); } return jbd2_journal_start_thread(journal); } /* * This function expects that the caller will have locked the journal * buffer head, and will return with it unlocked */ static int jbd2_write_superblock(journal_t *journal, blk_opf_t write_flags) { struct buffer_head *bh = journal->j_sb_buffer; journal_superblock_t *sb = journal->j_superblock; int ret = 0; /* Buffer got discarded which means block device got invalidated */ if (!buffer_mapped(bh)) { unlock_buffer(bh); return -EIO; } trace_jbd2_write_superblock(journal, write_flags); if (!(journal->j_flags & JBD2_BARRIER)) write_flags &= ~(REQ_FUA | REQ_PREFLUSH); if (buffer_write_io_error(bh)) { /* * Oh, dear. A previous attempt to write the journal * superblock failed. This could happen because the * USB device was yanked out. Or it could happen to * be a transient write error and maybe the block will * be remapped. Nothing we can do but to retry the * write and hope for the best. */ printk(KERN_ERR "JBD2: previous I/O error detected " "for journal superblock update for %s.\n", journal->j_devname); clear_buffer_write_io_error(bh); set_buffer_uptodate(bh); } if (jbd2_journal_has_csum_v2or3(journal)) sb->s_checksum = jbd2_superblock_csum(journal, sb); get_bh(bh); bh->b_end_io = end_buffer_write_sync; submit_bh(REQ_OP_WRITE | write_flags, bh); wait_on_buffer(bh); if (buffer_write_io_error(bh)) { clear_buffer_write_io_error(bh); set_buffer_uptodate(bh); ret = -EIO; } if (ret) { printk(KERN_ERR "JBD2: I/O error when updating journal superblock for %s.\n", journal->j_devname); if (!is_journal_aborted(journal)) jbd2_journal_abort(journal, ret); } return ret; } /** * jbd2_journal_update_sb_log_tail() - Update log tail in journal sb on disk. * @journal: The journal to update. * @tail_tid: TID of the new transaction at the tail of the log * @tail_block: The first block of the transaction at the tail of the log * @write_flags: Flags for the journal sb write operation * * Update a journal's superblock information about log tail and write it to * disk, waiting for the IO to complete. */ int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid, unsigned long tail_block, blk_opf_t write_flags) { journal_superblock_t *sb = journal->j_superblock; int ret; if (is_journal_aborted(journal)) return -EIO; if (test_bit(JBD2_CHECKPOINT_IO_ERROR, &journal->j_atomic_flags)) { jbd2_journal_abort(journal, -EIO); return -EIO; } BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex)); jbd2_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n", tail_block, tail_tid); lock_buffer(journal->j_sb_buffer); sb->s_sequence = cpu_to_be32(tail_tid); sb->s_start = cpu_to_be32(tail_block); ret = jbd2_write_superblock(journal, write_flags); if (ret) goto out; /* Log is no longer empty */ write_lock(&journal->j_state_lock); WARN_ON(!sb->s_sequence); journal->j_flags &= ~JBD2_FLUSHED; write_unlock(&journal->j_state_lock); out: return ret; } /** * jbd2_mark_journal_empty() - Mark on disk journal as empty. * @journal: The journal to update. * @write_flags: Flags for the journal sb write operation * * Update a journal's dynamic superblock fields to show that journal is empty. * Write updated superblock to disk waiting for IO to complete. */ static void jbd2_mark_journal_empty(journal_t *journal, blk_opf_t write_flags) { journal_superblock_t *sb = journal->j_superblock; bool had_fast_commit = false; BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex)); lock_buffer(journal->j_sb_buffer); if (sb->s_start == 0) { /* Is it already empty? */ unlock_buffer(journal->j_sb_buffer); return; } jbd2_debug(1, "JBD2: Marking journal as empty (seq %u)\n", journal->j_tail_sequence); sb->s_sequence = cpu_to_be32(journal->j_tail_sequence); sb->s_start = cpu_to_be32(0); sb->s_head = cpu_to_be32(journal->j_head); if (jbd2_has_feature_fast_commit(journal)) { /* * When journal is clean, no need to commit fast commit flag and * make file system incompatible with older kernels. */ jbd2_clear_feature_fast_commit(journal); had_fast_commit = true; } jbd2_write_superblock(journal, write_flags); if (had_fast_commit) jbd2_set_feature_fast_commit(journal); /* Log is no longer empty */ write_lock(&journal->j_state_lock); journal->j_flags |= JBD2_FLUSHED; write_unlock(&journal->j_state_lock); } /** * __jbd2_journal_erase() - Discard or zeroout journal blocks (excluding superblock) * @journal: The journal to erase. * @flags: A discard/zeroout request is sent for each physically contigous * region of the journal. Either JBD2_JOURNAL_FLUSH_DISCARD or * JBD2_JOURNAL_FLUSH_ZEROOUT must be set to determine which operation * to perform. * * Note: JBD2_JOURNAL_FLUSH_ZEROOUT attempts to use hardware offload. Zeroes * will be explicitly written if no hardware offload is available, see * blkdev_issue_zeroout for more details. */ static int __jbd2_journal_erase(journal_t *journal, unsigned int flags) { int err = 0; unsigned long block, log_offset; /* logical */ unsigned long long phys_block, block_start, block_stop; /* physical */ loff_t byte_start, byte_stop, byte_count; /* flags must be set to either discard or zeroout */ if ((flags & ~JBD2_JOURNAL_FLUSH_VALID) || !flags || ((flags & JBD2_JOURNAL_FLUSH_DISCARD) && (flags & JBD2_JOURNAL_FLUSH_ZEROOUT))) return -EINVAL; if ((flags & JBD2_JOURNAL_FLUSH_DISCARD) && !bdev_max_discard_sectors(journal->j_dev)) return -EOPNOTSUPP; /* * lookup block mapping and issue discard/zeroout for each * contiguous region */ log_offset = be32_to_cpu(journal->j_superblock->s_first); block_start = ~0ULL; for (block = log_offset; block < journal->j_total_len; block++) { err = jbd2_journal_bmap(journal, block, &phys_block); if (err) { pr_err("JBD2: bad block at offset %lu", block); return err; } if (block_start == ~0ULL) { block_start = phys_block; block_stop = block_start - 1; } /* * last block not contiguous with current block, * process last contiguous region and return to this block on * next loop */ if (phys_block != block_stop + 1) { block--; } else { block_stop++; /* * if this isn't the last block of journal, * no need to process now because next block may also * be part of this contiguous region */ if (block != journal->j_total_len - 1) continue; } /* * end of contiguous region or this is last block of journal, * take care of the region */ byte_start = block_start * journal->j_blocksize; byte_stop = block_stop * journal->j_blocksize; byte_count = (block_stop - block_start + 1) * journal->j_blocksize; truncate_inode_pages_range(journal->j_dev->bd_inode->i_mapping, byte_start, byte_stop); if (flags & JBD2_JOURNAL_FLUSH_DISCARD) { err = blkdev_issue_discard(journal->j_dev, byte_start >> SECTOR_SHIFT, byte_count >> SECTOR_SHIFT, GFP_NOFS); } else if (flags & JBD2_JOURNAL_FLUSH_ZEROOUT) { err = blkdev_issue_zeroout(journal->j_dev, byte_start >> SECTOR_SHIFT, byte_count >> SECTOR_SHIFT, GFP_NOFS, 0); } if (unlikely(err != 0)) { pr_err("JBD2: (error %d) unable to wipe journal at physical blocks %llu - %llu", err, block_start, block_stop); return err; } /* reset start and stop after processing a region */ block_start = ~0ULL; } return blkdev_issue_flush(journal->j_dev); } /** * jbd2_journal_update_sb_errno() - Update error in the journal. * @journal: The journal to update. * * Update a journal's errno. Write updated superblock to disk waiting for IO * to complete. */ void jbd2_journal_update_sb_errno(journal_t *journal) { journal_superblock_t *sb = journal->j_superblock; int errcode; lock_buffer(journal->j_sb_buffer); errcode = journal->j_errno; if (errcode == -ESHUTDOWN) errcode = 0; jbd2_debug(1, "JBD2: updating superblock error (errno %d)\n", errcode); sb->s_errno = cpu_to_be32(errcode); jbd2_write_superblock(journal, REQ_SYNC | REQ_FUA); } EXPORT_SYMBOL(jbd2_journal_update_sb_errno); /** * jbd2_journal_load() - Read journal from disk. * @journal: Journal to act on. * * Given a journal_t structure which tells us which disk blocks contain * a journal, read the journal from disk to initialise the in-memory * structures. */ int jbd2_journal_load(journal_t *journal) { int err; journal_superblock_t *sb = journal->j_superblock; /* * Create a slab for this blocksize */ err = jbd2_journal_create_slab(be32_to_cpu(sb->s_blocksize)); if (err) return err; /* Let the recovery code check whether it needs to recover any * data from the journal. */ err = jbd2_journal_recover(journal); if (err) { pr_warn("JBD2: journal recovery failed\n"); return err; } if (journal->j_failed_commit) { printk(KERN_ERR "JBD2: journal transaction %u on %s " "is corrupt.\n", journal->j_failed_commit, journal->j_devname); return -EFSCORRUPTED; } /* * clear JBD2_ABORT flag initialized in journal_init_common * here to update log tail information with the newest seq. */ journal->j_flags &= ~JBD2_ABORT; /* OK, we've finished with the dynamic journal bits: * reinitialise the dynamic contents of the superblock in memory * and reset them on disk. */ err = journal_reset(journal); if (err) { pr_warn("JBD2: journal reset failed\n"); return err; } journal->j_flags |= JBD2_LOADED; return 0; } /** * jbd2_journal_destroy() - Release a journal_t structure. * @journal: Journal to act on. * * Release a journal_t structure once it is no longer in use by the * journaled object. * Return <0 if we couldn't clean up the journal. */ int jbd2_journal_destroy(journal_t *journal) { int err = 0; /* Wait for the commit thread to wake up and die. */ journal_kill_thread(journal); /* Force a final log commit */ if (journal->j_running_transaction) jbd2_journal_commit_transaction(journal); /* Force any old transactions to disk */ /* Totally anal locking here... */ spin_lock(&journal->j_list_lock); while (journal->j_checkpoint_transactions != NULL) { spin_unlock(&journal->j_list_lock); mutex_lock_io(&journal->j_checkpoint_mutex); err = jbd2_log_do_checkpoint(journal); mutex_unlock(&journal->j_checkpoint_mutex); /* * If checkpointing failed, just free the buffers to avoid * looping forever */ if (err) { jbd2_journal_destroy_checkpoint(journal); spin_lock(&journal->j_list_lock); break; } spin_lock(&journal->j_list_lock); } J_ASSERT(journal->j_running_transaction == NULL); J_ASSERT(journal->j_committing_transaction == NULL); J_ASSERT(journal->j_checkpoint_transactions == NULL); spin_unlock(&journal->j_list_lock); /* * OK, all checkpoint transactions have been checked, now check the * write out io error flag and abort the journal if some buffer failed * to write back to the original location, otherwise the filesystem * may become inconsistent. */ if (!is_journal_aborted(journal) && test_bit(JBD2_CHECKPOINT_IO_ERROR, &journal->j_atomic_flags)) jbd2_journal_abort(journal, -EIO); if (journal->j_sb_buffer) { if (!is_journal_aborted(journal)) { mutex_lock_io(&journal->j_checkpoint_mutex); write_lock(&journal->j_state_lock); journal->j_tail_sequence = ++journal->j_transaction_sequence; write_unlock(&journal->j_state_lock); jbd2_mark_journal_empty(journal, REQ_SYNC | REQ_PREFLUSH | REQ_FUA); mutex_unlock(&journal->j_checkpoint_mutex); } else err = -EIO; brelse(journal->j_sb_buffer); } if (journal->j_shrinker.flags & SHRINKER_REGISTERED) { percpu_counter_destroy(&journal->j_checkpoint_jh_count); unregister_shrinker(&journal->j_shrinker); } if (journal->j_proc_entry) jbd2_stats_proc_exit(journal); iput(journal->j_inode); if (journal->j_revoke) jbd2_journal_destroy_revoke(journal); if (journal->j_chksum_driver) crypto_free_shash(journal->j_chksum_driver); kfree(journal->j_fc_wbuf); kfree(journal->j_wbuf); kfree(journal); return err; } /** * jbd2_journal_check_used_features() - Check if features specified are used. * @journal: Journal to check. * @compat: bitmask of compatible features * @ro: bitmask of features that force read-only mount * @incompat: bitmask of incompatible features * * Check whether the journal uses all of a given set of * features. Return true (non-zero) if it does. **/ int jbd2_journal_check_used_features(journal_t *journal, unsigned long compat, unsigned long ro, unsigned long incompat) { journal_superblock_t *sb; if (!compat && !ro && !incompat) return 1; if (!jbd2_format_support_feature(journal)) return 0; sb = journal->j_superblock; if (((be32_to_cpu(sb->s_feature_compat) & compat) == compat) && ((be32_to_cpu(sb->s_feature_ro_compat) & ro) == ro) && ((be32_to_cpu(sb->s_feature_incompat) & incompat) == incompat)) return 1; return 0; } /** * jbd2_journal_check_available_features() - Check feature set in journalling layer * @journal: Journal to check. * @compat: bitmask of compatible features * @ro: bitmask of features that force read-only mount * @incompat: bitmask of incompatible features * * Check whether the journaling code supports the use of * all of a given set of features on this journal. Return true * (non-zero) if it can. */ int jbd2_journal_check_available_features(journal_t *journal, unsigned long compat, unsigned long ro, unsigned long incompat) { if (!compat && !ro && !incompat) return 1; if (!jbd2_format_support_feature(journal)) return 0; if ((compat & JBD2_KNOWN_COMPAT_FEATURES) == compat && (ro & JBD2_KNOWN_ROCOMPAT_FEATURES) == ro && (incompat & JBD2_KNOWN_INCOMPAT_FEATURES) == incompat) return 1; return 0; } static int jbd2_journal_initialize_fast_commit(journal_t *journal) { journal_superblock_t *sb = journal->j_superblock; unsigned long long num_fc_blks; num_fc_blks = jbd2_journal_get_num_fc_blks(sb); if (journal->j_last - num_fc_blks < JBD2_MIN_JOURNAL_BLOCKS) return -ENOSPC; /* Are we called twice? */ WARN_ON(journal->j_fc_wbuf != NULL); journal->j_fc_wbuf = kmalloc_array(num_fc_blks, sizeof(struct buffer_head *), GFP_KERNEL); if (!journal->j_fc_wbuf) return -ENOMEM; journal->j_fc_wbufsize = num_fc_blks; journal->j_fc_last = journal->j_last; journal->j_last = journal->j_fc_last - num_fc_blks; journal->j_fc_first = journal->j_last + 1; journal->j_fc_off = 0; journal->j_free = journal->j_last - journal->j_first; journal->j_max_transaction_buffers = jbd2_journal_get_max_txn_bufs(journal); return 0; } /** * jbd2_journal_set_features() - Mark a given journal feature in the superblock * @journal: Journal to act on. * @compat: bitmask of compatible features * @ro: bitmask of features that force read-only mount * @incompat: bitmask of incompatible features * * Mark a given journal feature as present on the * superblock. Returns true if the requested features could be set. * */ int jbd2_journal_set_features(journal_t *journal, unsigned long compat, unsigned long ro, unsigned long incompat) { #define INCOMPAT_FEATURE_ON(f) \ ((incompat & (f)) && !(sb->s_feature_incompat & cpu_to_be32(f))) #define COMPAT_FEATURE_ON(f) \ ((compat & (f)) && !(sb->s_feature_compat & cpu_to_be32(f))) journal_superblock_t *sb; if (jbd2_journal_check_used_features(journal, compat, ro, incompat)) return 1; if (!jbd2_journal_check_available_features(journal, compat, ro, incompat)) return 0; /* If enabling v2 checksums, turn on v3 instead */ if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V2) { incompat &= ~JBD2_FEATURE_INCOMPAT_CSUM_V2; incompat |= JBD2_FEATURE_INCOMPAT_CSUM_V3; } /* Asking for checksumming v3 and v1? Only give them v3. */ if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V3 && compat & JBD2_FEATURE_COMPAT_CHECKSUM) compat &= ~JBD2_FEATURE_COMPAT_CHECKSUM; jbd2_debug(1, "Setting new features 0x%lx/0x%lx/0x%lx\n", compat, ro, incompat); sb = journal->j_superblock; if (incompat & JBD2_FEATURE_INCOMPAT_FAST_COMMIT) { if (jbd2_journal_initialize_fast_commit(journal)) { pr_err("JBD2: Cannot enable fast commits.\n"); return 0; } } /* Load the checksum driver if necessary */ if ((journal->j_chksum_driver == NULL) && INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) { journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); if (IS_ERR(journal->j_chksum_driver)) { printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n"); journal->j_chksum_driver = NULL; return 0; } /* Precompute checksum seed for all metadata */ journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid, sizeof(sb->s_uuid)); } lock_buffer(journal->j_sb_buffer); /* If enabling v3 checksums, update superblock */ if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) { sb->s_checksum_type = JBD2_CRC32C_CHKSUM; sb->s_feature_compat &= ~cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM); } /* If enabling v1 checksums, downgrade superblock */ if (COMPAT_FEATURE_ON(JBD2_FEATURE_COMPAT_CHECKSUM)) sb->s_feature_incompat &= ~cpu_to_be32(JBD2_FEATURE_INCOMPAT_CSUM_V2 | JBD2_FEATURE_INCOMPAT_CSUM_V3); sb->s_feature_compat |= cpu_to_be32(compat); sb->s_feature_ro_compat |= cpu_to_be32(ro); sb->s_feature_incompat |= cpu_to_be32(incompat); unlock_buffer(journal->j_sb_buffer); journal->j_revoke_records_per_block = journal_revoke_records_per_block(journal); return 1; #undef COMPAT_FEATURE_ON #undef INCOMPAT_FEATURE_ON } /* * jbd2_journal_clear_features() - Clear a given journal feature in the * superblock * @journal: Journal to act on. * @compat: bitmask of compatible features * @ro: bitmask of features that force read-only mount * @incompat: bitmask of incompatible features * * Clear a given journal feature as present on the * superblock. */ void jbd2_journal_clear_features(journal_t *journal, unsigned long compat, unsigned long ro, unsigned long incompat) { journal_superblock_t *sb; jbd2_debug(1, "Clear features 0x%lx/0x%lx/0x%lx\n", compat, ro, incompat); sb = journal->j_superblock; sb->s_feature_compat &= ~cpu_to_be32(compat); sb->s_feature_ro_compat &= ~cpu_to_be32(ro); sb->s_feature_incompat &= ~cpu_to_be32(incompat); journal->j_revoke_records_per_block = journal_revoke_records_per_block(journal); } EXPORT_SYMBOL(jbd2_journal_clear_features); /** * jbd2_journal_flush() - Flush journal * @journal: Journal to act on. * @flags: optional operation on the journal blocks after the flush (see below) * * Flush all data for a given journal to disk and empty the journal. * Filesystems can use this when remounting readonly to ensure that * recovery does not need to happen on remount. Optionally, a discard or zeroout * can be issued on the journal blocks after flushing. * * flags: * JBD2_JOURNAL_FLUSH_DISCARD: issues discards for the journal blocks * JBD2_JOURNAL_FLUSH_ZEROOUT: issues zeroouts for the journal blocks */ int jbd2_journal_flush(journal_t *journal, unsigned int flags) { int err = 0; transaction_t *transaction = NULL; write_lock(&journal->j_state_lock); /* Force everything buffered to the log... */ if (journal->j_running_transaction) { transaction = journal->j_running_transaction; __jbd2_log_start_commit(journal, transaction->t_tid); } else if (journal->j_committing_transaction) transaction = journal->j_committing_transaction; /* Wait for the log commit to complete... */ if (transaction) { tid_t tid = transaction->t_tid; write_unlock(&journal->j_state_lock); jbd2_log_wait_commit(journal, tid); } else { write_unlock(&journal->j_state_lock); } /* ...and flush everything in the log out to disk. */ spin_lock(&journal->j_list_lock); while (!err && journal->j_checkpoint_transactions != NULL) { spin_unlock(&journal->j_list_lock); mutex_lock_io(&journal->j_checkpoint_mutex); err = jbd2_log_do_checkpoint(journal); mutex_unlock(&journal->j_checkpoint_mutex); spin_lock(&journal->j_list_lock); } spin_unlock(&journal->j_list_lock); if (is_journal_aborted(journal)) return -EIO; mutex_lock_io(&journal->j_checkpoint_mutex); if (!err) { err = jbd2_cleanup_journal_tail(journal); if (err < 0) { mutex_unlock(&journal->j_checkpoint_mutex); goto out; } err = 0; } /* Finally, mark the journal as really needing no recovery. * This sets s_start==0 in the underlying superblock, which is * the magic code for a fully-recovered superblock. Any future * commits of data to the journal will restore the current * s_start value. */ jbd2_mark_journal_empty(journal, REQ_SYNC | REQ_FUA); if (flags) err = __jbd2_journal_erase(journal, flags); mutex_unlock(&journal->j_checkpoint_mutex); write_lock(&journal->j_state_lock); J_ASSERT(!journal->j_running_transaction); J_ASSERT(!journal->j_committing_transaction); J_ASSERT(!journal->j_checkpoint_transactions); J_ASSERT(journal->j_head == journal->j_tail); J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence); write_unlock(&journal->j_state_lock); out: return err; } /** * jbd2_journal_wipe() - Wipe journal contents * @journal: Journal to act on. * @write: flag (see below) * * Wipe out all of the contents of a journal, safely. This will produce * a warning if the journal contains any valid recovery information. * Must be called between journal_init_*() and jbd2_journal_load(). * * If 'write' is non-zero, then we wipe out the journal on disk; otherwise * we merely suppress recovery. */ int jbd2_journal_wipe(journal_t *journal, int write) { int err; J_ASSERT (!(journal->j_flags & JBD2_LOADED)); if (!journal->j_tail) return 0; printk(KERN_WARNING "JBD2: %s recovery information on journal\n", write ? "Clearing" : "Ignoring"); err = jbd2_journal_skip_recovery(journal); if (write) { /* Lock to make assertions happy... */ mutex_lock_io(&journal->j_checkpoint_mutex); jbd2_mark_journal_empty(journal, REQ_SYNC | REQ_FUA); mutex_unlock(&journal->j_checkpoint_mutex); } return err; } /** * jbd2_journal_abort () - Shutdown the journal immediately. * @journal: the journal to shutdown. * @errno: an error number to record in the journal indicating * the reason for the shutdown. * * Perform a complete, immediate shutdown of the ENTIRE * journal (not of a single transaction). This operation cannot be * undone without closing and reopening the journal. * * The jbd2_journal_abort function is intended to support higher level error * recovery mechanisms such as the ext2/ext3 remount-readonly error * mode. * * Journal abort has very specific semantics. Any existing dirty, * unjournaled buffers in the main filesystem will still be written to * disk by bdflush, but the journaling mechanism will be suspended * immediately and no further transaction commits will be honoured. * * Any dirty, journaled buffers will be written back to disk without * hitting the journal. Atomicity cannot be guaranteed on an aborted * filesystem, but we _do_ attempt to leave as much data as possible * behind for fsck to use for cleanup. * * Any attempt to get a new transaction handle on a journal which is in * ABORT state will just result in an -EROFS error return. A * jbd2_journal_stop on an existing handle will return -EIO if we have * entered abort state during the update. * * Recursive transactions are not disturbed by journal abort until the * final jbd2_journal_stop, which will receive the -EIO error. * * Finally, the jbd2_journal_abort call allows the caller to supply an errno * which will be recorded (if possible) in the journal superblock. This * allows a client to record failure conditions in the middle of a * transaction without having to complete the transaction to record the * failure to disk. ext3_error, for example, now uses this * functionality. * */ void jbd2_journal_abort(journal_t *journal, int errno) { transaction_t *transaction; /* * Lock the aborting procedure until everything is done, this avoid * races between filesystem's error handling flow (e.g. ext4_abort()), * ensure panic after the error info is written into journal's * superblock. */ mutex_lock(&journal->j_abort_mutex); /* * ESHUTDOWN always takes precedence because a file system check * caused by any other journal abort error is not required after * a shutdown triggered. */ write_lock(&journal->j_state_lock); if (journal->j_flags & JBD2_ABORT) { int old_errno = journal->j_errno; write_unlock(&journal->j_state_lock); if (old_errno != -ESHUTDOWN && errno == -ESHUTDOWN) { journal->j_errno = errno; jbd2_journal_update_sb_errno(journal); } mutex_unlock(&journal->j_abort_mutex); return; } /* * Mark the abort as occurred and start current running transaction * to release all journaled buffer. */ pr_err("Aborting journal on device %s.\n", journal->j_devname); journal->j_flags |= JBD2_ABORT; journal->j_errno = errno; transaction = journal->j_running_transaction; if (transaction) __jbd2_log_start_commit(journal, transaction->t_tid); write_unlock(&journal->j_state_lock); /* * Record errno to the journal super block, so that fsck and jbd2 * layer could realise that a filesystem check is needed. */ jbd2_journal_update_sb_errno(journal); mutex_unlock(&journal->j_abort_mutex); } /** * jbd2_journal_errno() - returns the journal's error state. * @journal: journal to examine. * * This is the errno number set with jbd2_journal_abort(), the last * time the journal was mounted - if the journal was stopped * without calling abort this will be 0. * * If the journal has been aborted on this mount time -EROFS will * be returned. */ int jbd2_journal_errno(journal_t *journal) { int err; read_lock(&journal->j_state_lock); if (journal->j_flags & JBD2_ABORT) err = -EROFS; else err = journal->j_errno; read_unlock(&journal->j_state_lock); return err; } /** * jbd2_journal_clear_err() - clears the journal's error state * @journal: journal to act on. * * An error must be cleared or acked to take a FS out of readonly * mode. */ int jbd2_journal_clear_err(journal_t *journal) { int err = 0; write_lock(&journal->j_state_lock); if (journal->j_flags & JBD2_ABORT) err = -EROFS; else journal->j_errno = 0; write_unlock(&journal->j_state_lock); return err; } /** * jbd2_journal_ack_err() - Ack journal err. * @journal: journal to act on. * * An error must be cleared or acked to take a FS out of readonly * mode. */ void jbd2_journal_ack_err(journal_t *journal) { write_lock(&journal->j_state_lock); if (journal->j_errno) journal->j_flags |= JBD2_ACK_ERR; write_unlock(&journal->j_state_lock); } int jbd2_journal_blocks_per_page(struct inode *inode) { return 1 << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits); } /* * helper functions to deal with 32 or 64bit block numbers. */ size_t journal_tag_bytes(journal_t *journal) { size_t sz; if (jbd2_has_feature_csum3(journal)) return sizeof(journal_block_tag3_t); sz = sizeof(journal_block_tag_t); if (jbd2_has_feature_csum2(journal)) sz += sizeof(__u16); if (jbd2_has_feature_64bit(journal)) return sz; else return sz - sizeof(__u32); } /* * JBD memory management * * These functions are used to allocate block-sized chunks of memory * used for making copies of buffer_head data. Very often it will be * page-sized chunks of data, but sometimes it will be in * sub-page-size chunks. (For example, 16k pages on Power systems * with a 4k block file system.) For blocks smaller than a page, we * use a SLAB allocator. There are slab caches for each block size, * which are allocated at mount time, if necessary, and we only free * (all of) the slab caches when/if the jbd2 module is unloaded. For * this reason we don't need to a mutex to protect access to * jbd2_slab[] allocating or releasing memory; only in * jbd2_journal_create_slab(). */ #define JBD2_MAX_SLABS 8 static struct kmem_cache *jbd2_slab[JBD2_MAX_SLABS]; static const char *jbd2_slab_names[JBD2_MAX_SLABS] = { "jbd2_1k", "jbd2_2k", "jbd2_4k", "jbd2_8k", "jbd2_16k", "jbd2_32k", "jbd2_64k", "jbd2_128k" }; static void jbd2_journal_destroy_slabs(void) { int i; for (i = 0; i < JBD2_MAX_SLABS; i++) { kmem_cache_destroy(jbd2_slab[i]); jbd2_slab[i] = NULL; } } static int jbd2_journal_create_slab(size_t size) { static DEFINE_MUTEX(jbd2_slab_create_mutex); int i = order_base_2(size) - 10; size_t slab_size; if (size == PAGE_SIZE) return 0; if (i >= JBD2_MAX_SLABS) return -EINVAL; if (unlikely(i < 0)) i = 0; mutex_lock(&jbd2_slab_create_mutex); if (jbd2_slab[i]) { mutex_unlock(&jbd2_slab_create_mutex); return 0; /* Already created */ } slab_size = 1 << (i+10); jbd2_slab[i] = kmem_cache_create(jbd2_slab_names[i], slab_size, slab_size, 0, NULL); mutex_unlock(&jbd2_slab_create_mutex); if (!jbd2_slab[i]) { printk(KERN_EMERG "JBD2: no memory for jbd2_slab cache\n"); return -ENOMEM; } return 0; } static struct kmem_cache *get_slab(size_t size) { int i = order_base_2(size) - 10; BUG_ON(i >= JBD2_MAX_SLABS); if (unlikely(i < 0)) i = 0; BUG_ON(jbd2_slab[i] == NULL); return jbd2_slab[i]; } void *jbd2_alloc(size_t size, gfp_t flags) { void *ptr; BUG_ON(size & (size-1)); /* Must be a power of 2 */ if (size < PAGE_SIZE) ptr = kmem_cache_alloc(get_slab(size), flags); else ptr = (void *)__get_free_pages(flags, get_order(size)); /* Check alignment; SLUB has gotten this wrong in the past, * and this can lead to user data corruption! */ BUG_ON(((unsigned long) ptr) & (size-1)); return ptr; } void jbd2_free(void *ptr, size_t size) { if (size < PAGE_SIZE) kmem_cache_free(get_slab(size), ptr); else free_pages((unsigned long)ptr, get_order(size)); }; /* * Journal_head storage management */ static struct kmem_cache *jbd2_journal_head_cache; #ifdef CONFIG_JBD2_DEBUG static atomic_t nr_journal_heads = ATOMIC_INIT(0); #endif static int __init jbd2_journal_init_journal_head_cache(void) { J_ASSERT(!jbd2_journal_head_cache); jbd2_journal_head_cache = kmem_cache_create("jbd2_journal_head", sizeof(struct journal_head), 0, /* offset */ SLAB_TEMPORARY | SLAB_TYPESAFE_BY_RCU, NULL); /* ctor */ if (!jbd2_journal_head_cache) { printk(KERN_EMERG "JBD2: no memory for journal_head cache\n"); return -ENOMEM; } return 0; } static void jbd2_journal_destroy_journal_head_cache(void) { kmem_cache_destroy(jbd2_journal_head_cache); jbd2_journal_head_cache = NULL; } /* * journal_head splicing and dicing */ static struct journal_head *journal_alloc_journal_head(void) { struct journal_head *ret; #ifdef CONFIG_JBD2_DEBUG atomic_inc(&nr_journal_heads); #endif ret = kmem_cache_zalloc(jbd2_journal_head_cache, GFP_NOFS); if (!ret) { jbd2_debug(1, "out of memory for journal_head\n"); pr_notice_ratelimited("ENOMEM in %s, retrying.\n", __func__); ret = kmem_cache_zalloc(jbd2_journal_head_cache, GFP_NOFS | __GFP_NOFAIL); } if (ret) spin_lock_init(&ret->b_state_lock); return ret; } static void journal_free_journal_head(struct journal_head *jh) { #ifdef CONFIG_JBD2_DEBUG atomic_dec(&nr_journal_heads); memset(jh, JBD2_POISON_FREE, sizeof(*jh)); #endif kmem_cache_free(jbd2_journal_head_cache, jh); } /* * A journal_head is attached to a buffer_head whenever JBD has an * interest in the buffer. * * Whenever a buffer has an attached journal_head, its ->b_state:BH_JBD bit * is set. This bit is tested in core kernel code where we need to take * JBD-specific actions. Testing the zeroness of ->b_private is not reliable * there. * * When a buffer has its BH_JBD bit set, its ->b_count is elevated by one. * * When a buffer has its BH_JBD bit set it is immune from being released by * core kernel code, mainly via ->b_count. * * A journal_head is detached from its buffer_head when the journal_head's * b_jcount reaches zero. Running transaction (b_transaction) and checkpoint * transaction (b_cp_transaction) hold their references to b_jcount. * * Various places in the kernel want to attach a journal_head to a buffer_head * _before_ attaching the journal_head to a transaction. To protect the * journal_head in this situation, jbd2_journal_add_journal_head elevates the * journal_head's b_jcount refcount by one. The caller must call * jbd2_journal_put_journal_head() to undo this. * * So the typical usage would be: * * (Attach a journal_head if needed. Increments b_jcount) * struct journal_head *jh = jbd2_journal_add_journal_head(bh); * ... * (Get another reference for transaction) * jbd2_journal_grab_journal_head(bh); * jh->b_transaction = xxx; * (Put original reference) * jbd2_journal_put_journal_head(jh); */ /* * Give a buffer_head a journal_head. * * May sleep. */ struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh) { struct journal_head *jh; struct journal_head *new_jh = NULL; repeat: if (!buffer_jbd(bh)) new_jh = journal_alloc_journal_head(); jbd_lock_bh_journal_head(bh); if (buffer_jbd(bh)) { jh = bh2jh(bh); } else { J_ASSERT_BH(bh, (atomic_read(&bh->b_count) > 0) || (bh->b_folio && bh->b_folio->mapping)); if (!new_jh) { jbd_unlock_bh_journal_head(bh); goto repeat; } jh = new_jh; new_jh = NULL; /* We consumed it */ set_buffer_jbd(bh); bh->b_private = jh; jh->b_bh = bh; get_bh(bh); BUFFER_TRACE(bh, "added journal_head"); } jh->b_jcount++; jbd_unlock_bh_journal_head(bh); if (new_jh) journal_free_journal_head(new_jh); return bh->b_private; } /* * Grab a ref against this buffer_head's journal_head. If it ended up not * having a journal_head, return NULL */ struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh) { struct journal_head *jh = NULL; jbd_lock_bh_journal_head(bh); if (buffer_jbd(bh)) { jh = bh2jh(bh); jh->b_jcount++; } jbd_unlock_bh_journal_head(bh); return jh; } EXPORT_SYMBOL(jbd2_journal_grab_journal_head); static void __journal_remove_journal_head(struct buffer_head *bh) { struct journal_head *jh = bh2jh(bh); J_ASSERT_JH(jh, jh->b_transaction == NULL); J_ASSERT_JH(jh, jh->b_next_transaction == NULL); J_ASSERT_JH(jh, jh->b_cp_transaction == NULL); J_ASSERT_JH(jh, jh->b_jlist == BJ_None); J_ASSERT_BH(bh, buffer_jbd(bh)); J_ASSERT_BH(bh, jh2bh(jh) == bh); BUFFER_TRACE(bh, "remove journal_head"); /* Unlink before dropping the lock */ bh->b_private = NULL; jh->b_bh = NULL; /* debug, really */ clear_buffer_jbd(bh); } static void journal_release_journal_head(struct journal_head *jh, size_t b_size) { if (jh->b_frozen_data) { printk(KERN_WARNING "%s: freeing b_frozen_data\n", __func__); jbd2_free(jh->b_frozen_data, b_size); } if (jh->b_committed_data) { printk(KERN_WARNING "%s: freeing b_committed_data\n", __func__); jbd2_free(jh->b_committed_data, b_size); } journal_free_journal_head(jh); } /* * Drop a reference on the passed journal_head. If it fell to zero then * release the journal_head from the buffer_head. */ void jbd2_journal_put_journal_head(struct journal_head *jh) { struct buffer_head *bh = jh2bh(jh); jbd_lock_bh_journal_head(bh); J_ASSERT_JH(jh, jh->b_jcount > 0); --jh->b_jcount; if (!jh->b_jcount) { __journal_remove_journal_head(bh); jbd_unlock_bh_journal_head(bh); journal_release_journal_head(jh, bh->b_size); __brelse(bh); } else { jbd_unlock_bh_journal_head(bh); } } EXPORT_SYMBOL(jbd2_journal_put_journal_head); /* * Initialize jbd inode head */ void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode) { jinode->i_transaction = NULL; jinode->i_next_transaction = NULL; jinode->i_vfs_inode = inode; jinode->i_flags = 0; jinode->i_dirty_start = 0; jinode->i_dirty_end = 0; INIT_LIST_HEAD(&jinode->i_list); } /* * Function to be called before we start removing inode from memory (i.e., * clear_inode() is a fine place to be called from). It removes inode from * transaction's lists. */ void jbd2_journal_release_jbd_inode(journal_t *journal, struct jbd2_inode *jinode) { if (!journal) return; restart: spin_lock(&journal->j_list_lock); /* Is commit writing out inode - we have to wait */ if (jinode->i_flags & JI_COMMIT_RUNNING) { wait_queue_head_t *wq; DEFINE_WAIT_BIT(wait, &jinode->i_flags, __JI_COMMIT_RUNNING); wq = bit_waitqueue(&jinode->i_flags, __JI_COMMIT_RUNNING); prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); spin_unlock(&journal->j_list_lock); schedule(); finish_wait(wq, &wait.wq_entry); goto restart; } if (jinode->i_transaction) { list_del(&jinode->i_list); jinode->i_transaction = NULL; } spin_unlock(&journal->j_list_lock); } #ifdef CONFIG_PROC_FS #define JBD2_STATS_PROC_NAME "fs/jbd2" static void __init jbd2_create_jbd_stats_proc_entry(void) { proc_jbd2_stats = proc_mkdir(JBD2_STATS_PROC_NAME, NULL); } static void __exit jbd2_remove_jbd_stats_proc_entry(void) { if (proc_jbd2_stats) remove_proc_entry(JBD2_STATS_PROC_NAME, NULL); } #else #define jbd2_create_jbd_stats_proc_entry() do {} while (0) #define jbd2_remove_jbd_stats_proc_entry() do {} while (0) #endif struct kmem_cache *jbd2_handle_cache, *jbd2_inode_cache; static int __init jbd2_journal_init_inode_cache(void) { J_ASSERT(!jbd2_inode_cache); jbd2_inode_cache = KMEM_CACHE(jbd2_inode, 0); if (!jbd2_inode_cache) { pr_emerg("JBD2: failed to create inode cache\n"); return -ENOMEM; } return 0; } static int __init jbd2_journal_init_handle_cache(void) { J_ASSERT(!jbd2_handle_cache); jbd2_handle_cache = KMEM_CACHE(jbd2_journal_handle, SLAB_TEMPORARY); if (!jbd2_handle_cache) { printk(KERN_EMERG "JBD2: failed to create handle cache\n"); return -ENOMEM; } return 0; } static void jbd2_journal_destroy_inode_cache(void) { kmem_cache_destroy(jbd2_inode_cache); jbd2_inode_cache = NULL; } static void jbd2_journal_destroy_handle_cache(void) { kmem_cache_destroy(jbd2_handle_cache); jbd2_handle_cache = NULL; } /* * Module startup and shutdown */ static int __init journal_init_caches(void) { int ret; ret = jbd2_journal_init_revoke_record_cache(); if (ret == 0) ret = jbd2_journal_init_revoke_table_cache(); if (ret == 0) ret = jbd2_journal_init_journal_head_cache(); if (ret == 0) ret = jbd2_journal_init_handle_cache(); if (ret == 0) ret = jbd2_journal_init_inode_cache(); if (ret == 0) ret = jbd2_journal_init_transaction_cache(); return ret; } static void jbd2_journal_destroy_caches(void) { jbd2_journal_destroy_revoke_record_cache(); jbd2_journal_destroy_revoke_table_cache(); jbd2_journal_destroy_journal_head_cache(); jbd2_journal_destroy_handle_cache(); jbd2_journal_destroy_inode_cache(); jbd2_journal_destroy_transaction_cache(); jbd2_journal_destroy_slabs(); } static int __init journal_init(void) { int ret; BUILD_BUG_ON(sizeof(struct journal_superblock_s) != 1024); ret = journal_init_caches(); if (ret == 0) { jbd2_create_jbd_stats_proc_entry(); } else { jbd2_journal_destroy_caches(); } return ret; } static void __exit journal_exit(void) { #ifdef CONFIG_JBD2_DEBUG int n = atomic_read(&nr_journal_heads); if (n) printk(KERN_ERR "JBD2: leaked %d journal_heads!\n", n); #endif jbd2_remove_jbd_stats_proc_entry(); jbd2_journal_destroy_caches(); } MODULE_LICENSE("GPL"); module_init(journal_init); module_exit(journal_exit);
linux-master
fs/jbd2/journal.c
// SPDX-License-Identifier: GPL-2.0+ /* * linux/fs/jbd2/commit.c * * Written by Stephen C. Tweedie <[email protected]>, 1998 * * Copyright 1998 Red Hat corp --- All Rights Reserved * * Journal commit routines for the generic filesystem journaling code; * part of the ext2fs journaling system. */ #include <linux/time.h> #include <linux/fs.h> #include <linux/jbd2.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/jiffies.h> #include <linux/crc32.h> #include <linux/writeback.h> #include <linux/backing-dev.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/bitops.h> #include <trace/events/jbd2.h> /* * IO end handler for temporary buffer_heads handling writes to the journal. */ static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate) { struct buffer_head *orig_bh = bh->b_private; BUFFER_TRACE(bh, ""); if (uptodate) set_buffer_uptodate(bh); else clear_buffer_uptodate(bh); if (orig_bh) { clear_bit_unlock(BH_Shadow, &orig_bh->b_state); smp_mb__after_atomic(); wake_up_bit(&orig_bh->b_state, BH_Shadow); } unlock_buffer(bh); } /* * When an ext4 file is truncated, it is possible that some pages are not * successfully freed, because they are attached to a committing transaction. * After the transaction commits, these pages are left on the LRU, with no * ->mapping, and with attached buffers. These pages are trivially reclaimable * by the VM, but their apparent absence upsets the VM accounting, and it makes * the numbers in /proc/meminfo look odd. * * So here, we have a buffer which has just come off the forget list. Look to * see if we can strip all buffers from the backing page. * * Called under lock_journal(), and possibly under journal_datalist_lock. The * caller provided us with a ref against the buffer, and we drop that here. */ static void release_buffer_page(struct buffer_head *bh) { struct folio *folio; if (buffer_dirty(bh)) goto nope; if (atomic_read(&bh->b_count) != 1) goto nope; folio = bh->b_folio; if (folio->mapping) goto nope; /* OK, it's a truncated page */ if (!folio_trylock(folio)) goto nope; folio_get(folio); __brelse(bh); try_to_free_buffers(folio); folio_unlock(folio); folio_put(folio); return; nope: __brelse(bh); } static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh) { struct commit_header *h; __u32 csum; if (!jbd2_journal_has_csum_v2or3(j)) return; h = (struct commit_header *)(bh->b_data); h->h_chksum_type = 0; h->h_chksum_size = 0; h->h_chksum[0] = 0; csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize); h->h_chksum[0] = cpu_to_be32(csum); } /* * Done it all: now submit the commit record. We should have * cleaned up our previous buffers by now, so if we are in abort * mode we can now just skip the rest of the journal write * entirely. * * Returns 1 if the journal needs to be aborted or 0 on success */ static int journal_submit_commit_record(journal_t *journal, transaction_t *commit_transaction, struct buffer_head **cbh, __u32 crc32_sum) { struct commit_header *tmp; struct buffer_head *bh; struct timespec64 now; blk_opf_t write_flags = REQ_OP_WRITE | REQ_SYNC; *cbh = NULL; if (is_journal_aborted(journal)) return 0; bh = jbd2_journal_get_descriptor_buffer(commit_transaction, JBD2_COMMIT_BLOCK); if (!bh) return 1; tmp = (struct commit_header *)bh->b_data; ktime_get_coarse_real_ts64(&now); tmp->h_commit_sec = cpu_to_be64(now.tv_sec); tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec); if (jbd2_has_feature_checksum(journal)) { tmp->h_chksum_type = JBD2_CRC32_CHKSUM; tmp->h_chksum_size = JBD2_CRC32_CHKSUM_SIZE; tmp->h_chksum[0] = cpu_to_be32(crc32_sum); } jbd2_commit_block_csum_set(journal, bh); BUFFER_TRACE(bh, "submit commit block"); lock_buffer(bh); clear_buffer_dirty(bh); set_buffer_uptodate(bh); bh->b_end_io = journal_end_buffer_io_sync; if (journal->j_flags & JBD2_BARRIER && !jbd2_has_feature_async_commit(journal)) write_flags |= REQ_PREFLUSH | REQ_FUA; submit_bh(write_flags, bh); *cbh = bh; return 0; } /* * This function along with journal_submit_commit_record * allows to write the commit record asynchronously. */ static int journal_wait_on_commit_record(journal_t *journal, struct buffer_head *bh) { int ret = 0; clear_buffer_dirty(bh); wait_on_buffer(bh); if (unlikely(!buffer_uptodate(bh))) ret = -EIO; put_bh(bh); /* One for getblk() */ return ret; } /* Send all the data buffers related to an inode */ int jbd2_submit_inode_data(journal_t *journal, struct jbd2_inode *jinode) { if (!jinode || !(jinode->i_flags & JI_WRITE_DATA)) return 0; trace_jbd2_submit_inode_data(jinode->i_vfs_inode); return journal->j_submit_inode_data_buffers(jinode); } EXPORT_SYMBOL(jbd2_submit_inode_data); int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode) { if (!jinode || !(jinode->i_flags & JI_WAIT_DATA) || !jinode->i_vfs_inode || !jinode->i_vfs_inode->i_mapping) return 0; return filemap_fdatawait_range_keep_errors( jinode->i_vfs_inode->i_mapping, jinode->i_dirty_start, jinode->i_dirty_end); } EXPORT_SYMBOL(jbd2_wait_inode_data); /* * Submit all the data buffers of inode associated with the transaction to * disk. * * We are in a committing transaction. Therefore no new inode can be added to * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently * operate on from being released while we write out pages. */ static int journal_submit_data_buffers(journal_t *journal, transaction_t *commit_transaction) { struct jbd2_inode *jinode; int err, ret = 0; spin_lock(&journal->j_list_lock); list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) { if (!(jinode->i_flags & JI_WRITE_DATA)) continue; jinode->i_flags |= JI_COMMIT_RUNNING; spin_unlock(&journal->j_list_lock); /* submit the inode data buffers. */ trace_jbd2_submit_inode_data(jinode->i_vfs_inode); if (journal->j_submit_inode_data_buffers) { err = journal->j_submit_inode_data_buffers(jinode); if (!ret) ret = err; } spin_lock(&journal->j_list_lock); J_ASSERT(jinode->i_transaction == commit_transaction); jinode->i_flags &= ~JI_COMMIT_RUNNING; smp_mb(); wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING); } spin_unlock(&journal->j_list_lock); return ret; } int jbd2_journal_finish_inode_data_buffers(struct jbd2_inode *jinode) { struct address_space *mapping = jinode->i_vfs_inode->i_mapping; return filemap_fdatawait_range_keep_errors(mapping, jinode->i_dirty_start, jinode->i_dirty_end); } /* * Wait for data submitted for writeout, refile inodes to proper * transaction if needed. * */ static int journal_finish_inode_data_buffers(journal_t *journal, transaction_t *commit_transaction) { struct jbd2_inode *jinode, *next_i; int err, ret = 0; /* For locking, see the comment in journal_submit_data_buffers() */ spin_lock(&journal->j_list_lock); list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) { if (!(jinode->i_flags & JI_WAIT_DATA)) continue; jinode->i_flags |= JI_COMMIT_RUNNING; spin_unlock(&journal->j_list_lock); /* wait for the inode data buffers writeout. */ if (journal->j_finish_inode_data_buffers) { err = journal->j_finish_inode_data_buffers(jinode); if (!ret) ret = err; } spin_lock(&journal->j_list_lock); jinode->i_flags &= ~JI_COMMIT_RUNNING; smp_mb(); wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING); } /* Now refile inode to proper lists */ list_for_each_entry_safe(jinode, next_i, &commit_transaction->t_inode_list, i_list) { list_del(&jinode->i_list); if (jinode->i_next_transaction) { jinode->i_transaction = jinode->i_next_transaction; jinode->i_next_transaction = NULL; list_add(&jinode->i_list, &jinode->i_transaction->t_inode_list); } else { jinode->i_transaction = NULL; jinode->i_dirty_start = 0; jinode->i_dirty_end = 0; } } spin_unlock(&journal->j_list_lock); return ret; } static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh) { char *addr; __u32 checksum; addr = kmap_local_folio(bh->b_folio, bh_offset(bh)); checksum = crc32_be(crc32_sum, addr, bh->b_size); kunmap_local(addr); return checksum; } static void write_tag_block(journal_t *j, journal_block_tag_t *tag, unsigned long long block) { tag->t_blocknr = cpu_to_be32(block & (u32)~0); if (jbd2_has_feature_64bit(j)) tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1); } static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag, struct buffer_head *bh, __u32 sequence) { journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag; __u8 *addr; __u32 csum32; __be32 seq; if (!jbd2_journal_has_csum_v2or3(j)) return; seq = cpu_to_be32(sequence); addr = kmap_local_folio(bh->b_folio, bh_offset(bh)); csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq)); csum32 = jbd2_chksum(j, csum32, addr, bh->b_size); kunmap_local(addr); if (jbd2_has_feature_csum3(j)) tag3->t_checksum = cpu_to_be32(csum32); else tag->t_checksum = cpu_to_be16(csum32); } /* * jbd2_journal_commit_transaction * * The primary function for committing a transaction to the log. This * function is called by the journal thread to begin a complete commit. */ void jbd2_journal_commit_transaction(journal_t *journal) { struct transaction_stats_s stats; transaction_t *commit_transaction; struct journal_head *jh; struct buffer_head *descriptor; struct buffer_head **wbuf = journal->j_wbuf; int bufs; int flags; int err; unsigned long long blocknr; ktime_t start_time; u64 commit_time; char *tagp = NULL; journal_block_tag_t *tag = NULL; int space_left = 0; int first_tag = 0; int tag_flag; int i; int tag_bytes = journal_tag_bytes(journal); struct buffer_head *cbh = NULL; /* For transactional checksums */ __u32 crc32_sum = ~0; struct blk_plug plug; /* Tail of the journal */ unsigned long first_block; tid_t first_tid; int update_tail; int csum_size = 0; LIST_HEAD(io_bufs); LIST_HEAD(log_bufs); if (jbd2_journal_has_csum_v2or3(journal)) csum_size = sizeof(struct jbd2_journal_block_tail); /* * First job: lock down the current transaction and wait for * all outstanding updates to complete. */ /* Do we need to erase the effects of a prior jbd2_journal_flush? */ if (journal->j_flags & JBD2_FLUSHED) { jbd2_debug(3, "super block updated\n"); mutex_lock_io(&journal->j_checkpoint_mutex); /* * We hold j_checkpoint_mutex so tail cannot change under us. * We don't need any special data guarantees for writing sb * since journal is empty and it is ok for write to be * flushed only with transaction commit. */ jbd2_journal_update_sb_log_tail(journal, journal->j_tail_sequence, journal->j_tail, REQ_SYNC); mutex_unlock(&journal->j_checkpoint_mutex); } else { jbd2_debug(3, "superblock not updated\n"); } J_ASSERT(journal->j_running_transaction != NULL); J_ASSERT(journal->j_committing_transaction == NULL); write_lock(&journal->j_state_lock); journal->j_flags |= JBD2_FULL_COMMIT_ONGOING; while (journal->j_flags & JBD2_FAST_COMMIT_ONGOING) { DEFINE_WAIT(wait); prepare_to_wait(&journal->j_fc_wait, &wait, TASK_UNINTERRUPTIBLE); write_unlock(&journal->j_state_lock); schedule(); write_lock(&journal->j_state_lock); finish_wait(&journal->j_fc_wait, &wait); /* * TODO: by blocking fast commits here, we are increasing * fsync() latency slightly. Strictly speaking, we don't need * to block fast commits until the transaction enters T_FLUSH * state. So an optimization is possible where we block new fast * commits here and wait for existing ones to complete * just before we enter T_FLUSH. That way, the existing fast * commits and this full commit can proceed parallely. */ } write_unlock(&journal->j_state_lock); commit_transaction = journal->j_running_transaction; trace_jbd2_start_commit(journal, commit_transaction); jbd2_debug(1, "JBD2: starting commit of transaction %d\n", commit_transaction->t_tid); write_lock(&journal->j_state_lock); journal->j_fc_off = 0; J_ASSERT(commit_transaction->t_state == T_RUNNING); commit_transaction->t_state = T_LOCKED; trace_jbd2_commit_locking(journal, commit_transaction); stats.run.rs_wait = commit_transaction->t_max_wait; stats.run.rs_request_delay = 0; stats.run.rs_locked = jiffies; if (commit_transaction->t_requested) stats.run.rs_request_delay = jbd2_time_diff(commit_transaction->t_requested, stats.run.rs_locked); stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start, stats.run.rs_locked); // waits for any t_updates to finish jbd2_journal_wait_updates(journal); commit_transaction->t_state = T_SWITCH; J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <= journal->j_max_transaction_buffers); /* * First thing we are allowed to do is to discard any remaining * BJ_Reserved buffers. Note, it is _not_ permissible to assume * that there are no such buffers: if a large filesystem * operation like a truncate needs to split itself over multiple * transactions, then it may try to do a jbd2_journal_restart() while * there are still BJ_Reserved buffers outstanding. These must * be released cleanly from the current transaction. * * In this case, the filesystem must still reserve write access * again before modifying the buffer in the new transaction, but * we do not require it to remember exactly which old buffers it * has reserved. This is consistent with the existing behaviour * that multiple jbd2_journal_get_write_access() calls to the same * buffer are perfectly permissible. * We use journal->j_state_lock here to serialize processing of * t_reserved_list with eviction of buffers from journal_unmap_buffer(). */ while (commit_transaction->t_reserved_list) { jh = commit_transaction->t_reserved_list; JBUFFER_TRACE(jh, "reserved, unused: refile"); /* * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may * leave undo-committed data. */ if (jh->b_committed_data) { struct buffer_head *bh = jh2bh(jh); spin_lock(&jh->b_state_lock); jbd2_free(jh->b_committed_data, bh->b_size); jh->b_committed_data = NULL; spin_unlock(&jh->b_state_lock); } jbd2_journal_refile_buffer(journal, jh); } write_unlock(&journal->j_state_lock); /* * Now try to drop any written-back buffers from the journal's * checkpoint lists. We do this *before* commit because it potentially * frees some memory */ spin_lock(&journal->j_list_lock); __jbd2_journal_clean_checkpoint_list(journal, false); spin_unlock(&journal->j_list_lock); jbd2_debug(3, "JBD2: commit phase 1\n"); /* * Clear revoked flag to reflect there is no revoked buffers * in the next transaction which is going to be started. */ jbd2_clear_buffer_revoked_flags(journal); /* * Switch to a new revoke table. */ jbd2_journal_switch_revoke_table(journal); write_lock(&journal->j_state_lock); /* * Reserved credits cannot be claimed anymore, free them */ atomic_sub(atomic_read(&journal->j_reserved_credits), &commit_transaction->t_outstanding_credits); trace_jbd2_commit_flushing(journal, commit_transaction); stats.run.rs_flushing = jiffies; stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked, stats.run.rs_flushing); commit_transaction->t_state = T_FLUSH; journal->j_committing_transaction = commit_transaction; journal->j_running_transaction = NULL; start_time = ktime_get(); commit_transaction->t_log_start = journal->j_head; wake_up_all(&journal->j_wait_transaction_locked); write_unlock(&journal->j_state_lock); jbd2_debug(3, "JBD2: commit phase 2a\n"); /* * Now start flushing things to disk, in the order they appear * on the transaction lists. Data blocks go first. */ err = journal_submit_data_buffers(journal, commit_transaction); if (err) jbd2_journal_abort(journal, err); blk_start_plug(&plug); jbd2_journal_write_revoke_records(commit_transaction, &log_bufs); jbd2_debug(3, "JBD2: commit phase 2b\n"); /* * Way to go: we have now written out all of the data for a * transaction! Now comes the tricky part: we need to write out * metadata. Loop over the transaction's entire buffer list: */ write_lock(&journal->j_state_lock); commit_transaction->t_state = T_COMMIT; write_unlock(&journal->j_state_lock); trace_jbd2_commit_logging(journal, commit_transaction); stats.run.rs_logging = jiffies; stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing, stats.run.rs_logging); stats.run.rs_blocks = commit_transaction->t_nr_buffers; stats.run.rs_blocks_logged = 0; J_ASSERT(commit_transaction->t_nr_buffers <= atomic_read(&commit_transaction->t_outstanding_credits)); err = 0; bufs = 0; descriptor = NULL; while (commit_transaction->t_buffers) { /* Find the next buffer to be journaled... */ jh = commit_transaction->t_buffers; /* If we're in abort mode, we just un-journal the buffer and release it. */ if (is_journal_aborted(journal)) { clear_buffer_jbddirty(jh2bh(jh)); JBUFFER_TRACE(jh, "journal is aborting: refile"); jbd2_buffer_abort_trigger(jh, jh->b_frozen_data ? jh->b_frozen_triggers : jh->b_triggers); jbd2_journal_refile_buffer(journal, jh); /* If that was the last one, we need to clean up * any descriptor buffers which may have been * already allocated, even if we are now * aborting. */ if (!commit_transaction->t_buffers) goto start_journal_io; continue; } /* Make sure we have a descriptor block in which to record the metadata buffer. */ if (!descriptor) { J_ASSERT (bufs == 0); jbd2_debug(4, "JBD2: get descriptor\n"); descriptor = jbd2_journal_get_descriptor_buffer( commit_transaction, JBD2_DESCRIPTOR_BLOCK); if (!descriptor) { jbd2_journal_abort(journal, -EIO); continue; } jbd2_debug(4, "JBD2: got buffer %llu (%p)\n", (unsigned long long)descriptor->b_blocknr, descriptor->b_data); tagp = &descriptor->b_data[sizeof(journal_header_t)]; space_left = descriptor->b_size - sizeof(journal_header_t); first_tag = 1; set_buffer_jwrite(descriptor); set_buffer_dirty(descriptor); wbuf[bufs++] = descriptor; /* Record it so that we can wait for IO completion later */ BUFFER_TRACE(descriptor, "ph3: file as descriptor"); jbd2_file_log_bh(&log_bufs, descriptor); } /* Where is the buffer to be written? */ err = jbd2_journal_next_log_block(journal, &blocknr); /* If the block mapping failed, just abandon the buffer and repeat this loop: we'll fall into the refile-on-abort condition above. */ if (err) { jbd2_journal_abort(journal, err); continue; } /* * start_this_handle() uses t_outstanding_credits to determine * the free space in the log. */ atomic_dec(&commit_transaction->t_outstanding_credits); /* Bump b_count to prevent truncate from stumbling over the shadowed buffer! @@@ This can go if we ever get rid of the shadow pairing of buffers. */ atomic_inc(&jh2bh(jh)->b_count); /* * Make a temporary IO buffer with which to write it out * (this will requeue the metadata buffer to BJ_Shadow). */ set_bit(BH_JWrite, &jh2bh(jh)->b_state); JBUFFER_TRACE(jh, "ph3: write metadata"); flags = jbd2_journal_write_metadata_buffer(commit_transaction, jh, &wbuf[bufs], blocknr); if (flags < 0) { jbd2_journal_abort(journal, flags); continue; } jbd2_file_log_bh(&io_bufs, wbuf[bufs]); /* Record the new block's tag in the current descriptor buffer */ tag_flag = 0; if (flags & 1) tag_flag |= JBD2_FLAG_ESCAPE; if (!first_tag) tag_flag |= JBD2_FLAG_SAME_UUID; tag = (journal_block_tag_t *) tagp; write_tag_block(journal, tag, jh2bh(jh)->b_blocknr); tag->t_flags = cpu_to_be16(tag_flag); jbd2_block_tag_csum_set(journal, tag, wbuf[bufs], commit_transaction->t_tid); tagp += tag_bytes; space_left -= tag_bytes; bufs++; if (first_tag) { memcpy (tagp, journal->j_uuid, 16); tagp += 16; space_left -= 16; first_tag = 0; } /* If there's no more to do, or if the descriptor is full, let the IO rip! */ if (bufs == journal->j_wbufsize || commit_transaction->t_buffers == NULL || space_left < tag_bytes + 16 + csum_size) { jbd2_debug(4, "JBD2: Submit %d IOs\n", bufs); /* Write an end-of-descriptor marker before submitting the IOs. "tag" still points to the last tag we set up. */ tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG); start_journal_io: if (descriptor) jbd2_descriptor_block_csum_set(journal, descriptor); for (i = 0; i < bufs; i++) { struct buffer_head *bh = wbuf[i]; /* * Compute checksum. */ if (jbd2_has_feature_checksum(journal)) { crc32_sum = jbd2_checksum_data(crc32_sum, bh); } lock_buffer(bh); clear_buffer_dirty(bh); set_buffer_uptodate(bh); bh->b_end_io = journal_end_buffer_io_sync; submit_bh(REQ_OP_WRITE | REQ_SYNC, bh); } cond_resched(); /* Force a new descriptor to be generated next time round the loop. */ descriptor = NULL; bufs = 0; } } err = journal_finish_inode_data_buffers(journal, commit_transaction); if (err) { printk(KERN_WARNING "JBD2: Detected IO errors while flushing file data " "on %s\n", journal->j_devname); if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR) jbd2_journal_abort(journal, err); err = 0; } /* * Get current oldest transaction in the log before we issue flush * to the filesystem device. After the flush we can be sure that * blocks of all older transactions are checkpointed to persistent * storage and we will be safe to update journal start in the * superblock with the numbers we get here. */ update_tail = jbd2_journal_get_log_tail(journal, &first_tid, &first_block); write_lock(&journal->j_state_lock); if (update_tail) { long freed = first_block - journal->j_tail; if (first_block < journal->j_tail) freed += journal->j_last - journal->j_first; /* Update tail only if we free significant amount of space */ if (freed < jbd2_journal_get_max_txn_bufs(journal)) update_tail = 0; } J_ASSERT(commit_transaction->t_state == T_COMMIT); commit_transaction->t_state = T_COMMIT_DFLUSH; write_unlock(&journal->j_state_lock); /* * If the journal is not located on the file system device, * then we must flush the file system device before we issue * the commit record */ if (commit_transaction->t_need_data_flush && (journal->j_fs_dev != journal->j_dev) && (journal->j_flags & JBD2_BARRIER)) blkdev_issue_flush(journal->j_fs_dev); /* Done it all: now write the commit record asynchronously. */ if (jbd2_has_feature_async_commit(journal)) { err = journal_submit_commit_record(journal, commit_transaction, &cbh, crc32_sum); if (err) jbd2_journal_abort(journal, err); } blk_finish_plug(&plug); /* Lo and behold: we have just managed to send a transaction to the log. Before we can commit it, wait for the IO so far to complete. Control buffers being written are on the transaction's t_log_list queue, and metadata buffers are on the io_bufs list. Wait for the buffers in reverse order. That way we are less likely to be woken up until all IOs have completed, and so we incur less scheduling load. */ jbd2_debug(3, "JBD2: commit phase 3\n"); while (!list_empty(&io_bufs)) { struct buffer_head *bh = list_entry(io_bufs.prev, struct buffer_head, b_assoc_buffers); wait_on_buffer(bh); cond_resched(); if (unlikely(!buffer_uptodate(bh))) err = -EIO; jbd2_unfile_log_bh(bh); stats.run.rs_blocks_logged++; /* * The list contains temporary buffer heads created by * jbd2_journal_write_metadata_buffer(). */ BUFFER_TRACE(bh, "dumping temporary bh"); __brelse(bh); J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0); free_buffer_head(bh); /* We also have to refile the corresponding shadowed buffer */ jh = commit_transaction->t_shadow_list->b_tprev; bh = jh2bh(jh); clear_buffer_jwrite(bh); J_ASSERT_BH(bh, buffer_jbddirty(bh)); J_ASSERT_BH(bh, !buffer_shadow(bh)); /* The metadata is now released for reuse, but we need to remember it against this transaction so that when we finally commit, we can do any checkpointing required. */ JBUFFER_TRACE(jh, "file as BJ_Forget"); jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget); JBUFFER_TRACE(jh, "brelse shadowed buffer"); __brelse(bh); } J_ASSERT (commit_transaction->t_shadow_list == NULL); jbd2_debug(3, "JBD2: commit phase 4\n"); /* Here we wait for the revoke record and descriptor record buffers */ while (!list_empty(&log_bufs)) { struct buffer_head *bh; bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers); wait_on_buffer(bh); cond_resched(); if (unlikely(!buffer_uptodate(bh))) err = -EIO; BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile"); clear_buffer_jwrite(bh); jbd2_unfile_log_bh(bh); stats.run.rs_blocks_logged++; __brelse(bh); /* One for getblk */ /* AKPM: bforget here */ } if (err) jbd2_journal_abort(journal, err); jbd2_debug(3, "JBD2: commit phase 5\n"); write_lock(&journal->j_state_lock); J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH); commit_transaction->t_state = T_COMMIT_JFLUSH; write_unlock(&journal->j_state_lock); if (!jbd2_has_feature_async_commit(journal)) { err = journal_submit_commit_record(journal, commit_transaction, &cbh, crc32_sum); if (err) jbd2_journal_abort(journal, err); } if (cbh) err = journal_wait_on_commit_record(journal, cbh); stats.run.rs_blocks_logged++; if (jbd2_has_feature_async_commit(journal) && journal->j_flags & JBD2_BARRIER) { blkdev_issue_flush(journal->j_dev); } if (err) jbd2_journal_abort(journal, err); WARN_ON_ONCE( atomic_read(&commit_transaction->t_outstanding_credits) < 0); /* * Now disk caches for filesystem device are flushed so we are safe to * erase checkpointed transactions from the log by updating journal * superblock. */ if (update_tail) jbd2_update_log_tail(journal, first_tid, first_block); /* End of a transaction! Finally, we can do checkpoint processing: any buffers committed as a result of this transaction can be removed from any checkpoint list it was on before. */ jbd2_debug(3, "JBD2: commit phase 6\n"); J_ASSERT(list_empty(&commit_transaction->t_inode_list)); J_ASSERT(commit_transaction->t_buffers == NULL); J_ASSERT(commit_transaction->t_checkpoint_list == NULL); J_ASSERT(commit_transaction->t_shadow_list == NULL); restart_loop: /* * As there are other places (journal_unmap_buffer()) adding buffers * to this list we have to be careful and hold the j_list_lock. */ spin_lock(&journal->j_list_lock); while (commit_transaction->t_forget) { transaction_t *cp_transaction; struct buffer_head *bh; int try_to_free = 0; bool drop_ref; jh = commit_transaction->t_forget; spin_unlock(&journal->j_list_lock); bh = jh2bh(jh); /* * Get a reference so that bh cannot be freed before we are * done with it. */ get_bh(bh); spin_lock(&jh->b_state_lock); J_ASSERT_JH(jh, jh->b_transaction == commit_transaction); /* * If there is undo-protected committed data against * this buffer, then we can remove it now. If it is a * buffer needing such protection, the old frozen_data * field now points to a committed version of the * buffer, so rotate that field to the new committed * data. * * Otherwise, we can just throw away the frozen data now. * * We also know that the frozen data has already fired * its triggers if they exist, so we can clear that too. */ if (jh->b_committed_data) { jbd2_free(jh->b_committed_data, bh->b_size); jh->b_committed_data = NULL; if (jh->b_frozen_data) { jh->b_committed_data = jh->b_frozen_data; jh->b_frozen_data = NULL; jh->b_frozen_triggers = NULL; } } else if (jh->b_frozen_data) { jbd2_free(jh->b_frozen_data, bh->b_size); jh->b_frozen_data = NULL; jh->b_frozen_triggers = NULL; } spin_lock(&journal->j_list_lock); cp_transaction = jh->b_cp_transaction; if (cp_transaction) { JBUFFER_TRACE(jh, "remove from old cp transaction"); cp_transaction->t_chp_stats.cs_dropped++; __jbd2_journal_remove_checkpoint(jh); } /* Only re-checkpoint the buffer_head if it is marked * dirty. If the buffer was added to the BJ_Forget list * by jbd2_journal_forget, it may no longer be dirty and * there's no point in keeping a checkpoint record for * it. */ /* * A buffer which has been freed while still being journaled * by a previous transaction, refile the buffer to BJ_Forget of * the running transaction. If the just committed transaction * contains "add to orphan" operation, we can completely * invalidate the buffer now. We are rather through in that * since the buffer may be still accessible when blocksize < * pagesize and it is attached to the last partial page. */ if (buffer_freed(bh) && !jh->b_next_transaction) { struct address_space *mapping; clear_buffer_freed(bh); clear_buffer_jbddirty(bh); /* * Block device buffers need to stay mapped all the * time, so it is enough to clear buffer_jbddirty and * buffer_freed bits. For the file mapping buffers (i.e. * journalled data) we need to unmap buffer and clear * more bits. We also need to be careful about the check * because the data page mapping can get cleared under * our hands. Note that if mapping == NULL, we don't * need to make buffer unmapped because the page is * already detached from the mapping and buffers cannot * get reused. */ mapping = READ_ONCE(bh->b_folio->mapping); if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) { clear_buffer_mapped(bh); clear_buffer_new(bh); clear_buffer_req(bh); bh->b_bdev = NULL; } } if (buffer_jbddirty(bh)) { JBUFFER_TRACE(jh, "add to new checkpointing trans"); __jbd2_journal_insert_checkpoint(jh, commit_transaction); if (is_journal_aborted(journal)) clear_buffer_jbddirty(bh); } else { J_ASSERT_BH(bh, !buffer_dirty(bh)); /* * The buffer on BJ_Forget list and not jbddirty means * it has been freed by this transaction and hence it * could not have been reallocated until this * transaction has committed. *BUT* it could be * reallocated once we have written all the data to * disk and before we process the buffer on BJ_Forget * list. */ if (!jh->b_next_transaction) try_to_free = 1; } JBUFFER_TRACE(jh, "refile or unfile buffer"); drop_ref = __jbd2_journal_refile_buffer(jh); spin_unlock(&jh->b_state_lock); if (drop_ref) jbd2_journal_put_journal_head(jh); if (try_to_free) release_buffer_page(bh); /* Drops bh reference */ else __brelse(bh); cond_resched_lock(&journal->j_list_lock); } spin_unlock(&journal->j_list_lock); /* * This is a bit sleazy. We use j_list_lock to protect transition * of a transaction into T_FINISHED state and calling * __jbd2_journal_drop_transaction(). Otherwise we could race with * other checkpointing code processing the transaction... */ write_lock(&journal->j_state_lock); spin_lock(&journal->j_list_lock); /* * Now recheck if some buffers did not get attached to the transaction * while the lock was dropped... */ if (commit_transaction->t_forget) { spin_unlock(&journal->j_list_lock); write_unlock(&journal->j_state_lock); goto restart_loop; } /* Add the transaction to the checkpoint list * __journal_remove_checkpoint() can not destroy transaction * under us because it is not marked as T_FINISHED yet */ if (journal->j_checkpoint_transactions == NULL) { journal->j_checkpoint_transactions = commit_transaction; commit_transaction->t_cpnext = commit_transaction; commit_transaction->t_cpprev = commit_transaction; } else { commit_transaction->t_cpnext = journal->j_checkpoint_transactions; commit_transaction->t_cpprev = commit_transaction->t_cpnext->t_cpprev; commit_transaction->t_cpnext->t_cpprev = commit_transaction; commit_transaction->t_cpprev->t_cpnext = commit_transaction; } spin_unlock(&journal->j_list_lock); /* Done with this transaction! */ jbd2_debug(3, "JBD2: commit phase 7\n"); J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH); commit_transaction->t_start = jiffies; stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging, commit_transaction->t_start); /* * File the transaction statistics */ stats.ts_tid = commit_transaction->t_tid; stats.run.rs_handle_count = atomic_read(&commit_transaction->t_handle_count); trace_jbd2_run_stats(journal->j_fs_dev->bd_dev, commit_transaction->t_tid, &stats.run); stats.ts_requested = (commit_transaction->t_requested) ? 1 : 0; commit_transaction->t_state = T_COMMIT_CALLBACK; J_ASSERT(commit_transaction == journal->j_committing_transaction); journal->j_commit_sequence = commit_transaction->t_tid; journal->j_committing_transaction = NULL; commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time)); /* * weight the commit time higher than the average time so we don't * react too strongly to vast changes in the commit time */ if (likely(journal->j_average_commit_time)) journal->j_average_commit_time = (commit_time + journal->j_average_commit_time*3) / 4; else journal->j_average_commit_time = commit_time; write_unlock(&journal->j_state_lock); if (journal->j_commit_callback) journal->j_commit_callback(journal, commit_transaction); if (journal->j_fc_cleanup_callback) journal->j_fc_cleanup_callback(journal, 1, commit_transaction->t_tid); trace_jbd2_end_commit(journal, commit_transaction); jbd2_debug(1, "JBD2: commit %d complete, head %d\n", journal->j_commit_sequence, journal->j_tail_sequence); write_lock(&journal->j_state_lock); journal->j_flags &= ~JBD2_FULL_COMMIT_ONGOING; journal->j_flags &= ~JBD2_FAST_COMMIT_ONGOING; spin_lock(&journal->j_list_lock); commit_transaction->t_state = T_FINISHED; /* Check if the transaction can be dropped now that we are finished */ if (commit_transaction->t_checkpoint_list == NULL) { __jbd2_journal_drop_transaction(journal, commit_transaction); jbd2_journal_free_transaction(commit_transaction); } spin_unlock(&journal->j_list_lock); write_unlock(&journal->j_state_lock); wake_up(&journal->j_wait_done_commit); wake_up(&journal->j_fc_wait); /* * Calculate overall stats */ spin_lock(&journal->j_history_lock); journal->j_stats.ts_tid++; journal->j_stats.ts_requested += stats.ts_requested; journal->j_stats.run.rs_wait += stats.run.rs_wait; journal->j_stats.run.rs_request_delay += stats.run.rs_request_delay; journal->j_stats.run.rs_running += stats.run.rs_running; journal->j_stats.run.rs_locked += stats.run.rs_locked; journal->j_stats.run.rs_flushing += stats.run.rs_flushing; journal->j_stats.run.rs_logging += stats.run.rs_logging; journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count; journal->j_stats.run.rs_blocks += stats.run.rs_blocks; journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged; spin_unlock(&journal->j_history_lock); }
linux-master
fs/jbd2/commit.c
// SPDX-License-Identifier: GPL-2.0+ /* * linux/fs/jbd2/transaction.c * * Written by Stephen C. Tweedie <[email protected]>, 1998 * * Copyright 1998 Red Hat corp --- All Rights Reserved * * Generic filesystem transaction handling code; part of the ext2fs * journaling system. * * This file manages transactions (compound commits managed by the * journaling code) and handles (individual atomic operations by the * filesystem). */ #include <linux/time.h> #include <linux/fs.h> #include <linux/jbd2.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/hrtimer.h> #include <linux/backing-dev.h> #include <linux/bug.h> #include <linux/module.h> #include <linux/sched/mm.h> #include <trace/events/jbd2.h> static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh); static void __jbd2_journal_unfile_buffer(struct journal_head *jh); static struct kmem_cache *transaction_cache; int __init jbd2_journal_init_transaction_cache(void) { J_ASSERT(!transaction_cache); transaction_cache = kmem_cache_create("jbd2_transaction_s", sizeof(transaction_t), 0, SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY, NULL); if (!transaction_cache) { pr_emerg("JBD2: failed to create transaction cache\n"); return -ENOMEM; } return 0; } void jbd2_journal_destroy_transaction_cache(void) { kmem_cache_destroy(transaction_cache); transaction_cache = NULL; } void jbd2_journal_free_transaction(transaction_t *transaction) { if (unlikely(ZERO_OR_NULL_PTR(transaction))) return; kmem_cache_free(transaction_cache, transaction); } /* * Base amount of descriptor blocks we reserve for each transaction. */ static int jbd2_descriptor_blocks_per_trans(journal_t *journal) { int tag_space = journal->j_blocksize - sizeof(journal_header_t); int tags_per_block; /* Subtract UUID */ tag_space -= 16; if (jbd2_journal_has_csum_v2or3(journal)) tag_space -= sizeof(struct jbd2_journal_block_tail); /* Commit code leaves a slack space of 16 bytes at the end of block */ tags_per_block = (tag_space - 16) / journal_tag_bytes(journal); /* * Revoke descriptors are accounted separately so we need to reserve * space for commit block and normal transaction descriptor blocks. */ return 1 + DIV_ROUND_UP(journal->j_max_transaction_buffers, tags_per_block); } /* * jbd2_get_transaction: obtain a new transaction_t object. * * Simply initialise a new transaction. Initialize it in * RUNNING state and add it to the current journal (which should not * have an existing running transaction: we only make a new transaction * once we have started to commit the old one). * * Preconditions: * The journal MUST be locked. We don't perform atomic mallocs on the * new transaction and we can't block without protecting against other * processes trying to touch the journal while it is in transition. * */ static void jbd2_get_transaction(journal_t *journal, transaction_t *transaction) { transaction->t_journal = journal; transaction->t_state = T_RUNNING; transaction->t_start_time = ktime_get(); transaction->t_tid = journal->j_transaction_sequence++; transaction->t_expires = jiffies + journal->j_commit_interval; atomic_set(&transaction->t_updates, 0); atomic_set(&transaction->t_outstanding_credits, jbd2_descriptor_blocks_per_trans(journal) + atomic_read(&journal->j_reserved_credits)); atomic_set(&transaction->t_outstanding_revokes, 0); atomic_set(&transaction->t_handle_count, 0); INIT_LIST_HEAD(&transaction->t_inode_list); INIT_LIST_HEAD(&transaction->t_private_list); /* Set up the commit timer for the new transaction. */ journal->j_commit_timer.expires = round_jiffies_up(transaction->t_expires); add_timer(&journal->j_commit_timer); J_ASSERT(journal->j_running_transaction == NULL); journal->j_running_transaction = transaction; transaction->t_max_wait = 0; transaction->t_start = jiffies; transaction->t_requested = 0; } /* * Handle management. * * A handle_t is an object which represents a single atomic update to a * filesystem, and which tracks all of the modifications which form part * of that one update. */ /* * Update transaction's maximum wait time, if debugging is enabled. * * t_max_wait is carefully updated here with use of atomic compare exchange. * Note that there could be multiplre threads trying to do this simultaneously * hence using cmpxchg to avoid any use of locks in this case. * With this t_max_wait can be updated w/o enabling jbd2_journal_enable_debug. */ static inline void update_t_max_wait(transaction_t *transaction, unsigned long ts) { unsigned long oldts, newts; if (time_after(transaction->t_start, ts)) { newts = jbd2_time_diff(ts, transaction->t_start); oldts = READ_ONCE(transaction->t_max_wait); while (oldts < newts) oldts = cmpxchg(&transaction->t_max_wait, oldts, newts); } } /* * Wait until running transaction passes to T_FLUSH state and new transaction * can thus be started. Also starts the commit if needed. The function expects * running transaction to exist and releases j_state_lock. */ static void wait_transaction_locked(journal_t *journal) __releases(journal->j_state_lock) { DEFINE_WAIT(wait); int need_to_start; tid_t tid = journal->j_running_transaction->t_tid; prepare_to_wait_exclusive(&journal->j_wait_transaction_locked, &wait, TASK_UNINTERRUPTIBLE); need_to_start = !tid_geq(journal->j_commit_request, tid); read_unlock(&journal->j_state_lock); if (need_to_start) jbd2_log_start_commit(journal, tid); jbd2_might_wait_for_commit(journal); schedule(); finish_wait(&journal->j_wait_transaction_locked, &wait); } /* * Wait until running transaction transitions from T_SWITCH to T_FLUSH * state and new transaction can thus be started. The function releases * j_state_lock. */ static void wait_transaction_switching(journal_t *journal) __releases(journal->j_state_lock) { DEFINE_WAIT(wait); if (WARN_ON(!journal->j_running_transaction || journal->j_running_transaction->t_state != T_SWITCH)) { read_unlock(&journal->j_state_lock); return; } prepare_to_wait_exclusive(&journal->j_wait_transaction_locked, &wait, TASK_UNINTERRUPTIBLE); read_unlock(&journal->j_state_lock); /* * We don't call jbd2_might_wait_for_commit() here as there's no * waiting for outstanding handles happening anymore in T_SWITCH state * and handling of reserved handles actually relies on that for * correctness. */ schedule(); finish_wait(&journal->j_wait_transaction_locked, &wait); } static void sub_reserved_credits(journal_t *journal, int blocks) { atomic_sub(blocks, &journal->j_reserved_credits); wake_up(&journal->j_wait_reserved); } /* * Wait until we can add credits for handle to the running transaction. Called * with j_state_lock held for reading. Returns 0 if handle joined the running * transaction. Returns 1 if we had to wait, j_state_lock is dropped, and * caller must retry. * * Note: because j_state_lock may be dropped depending on the return * value, we need to fake out sparse so ti doesn't complain about a * locking imbalance. Callers of add_transaction_credits will need to * make a similar accomodation. */ static int add_transaction_credits(journal_t *journal, int blocks, int rsv_blocks) __must_hold(&journal->j_state_lock) { transaction_t *t = journal->j_running_transaction; int needed; int total = blocks + rsv_blocks; /* * If the current transaction is locked down for commit, wait * for the lock to be released. */ if (t->t_state != T_RUNNING) { WARN_ON_ONCE(t->t_state >= T_FLUSH); wait_transaction_locked(journal); __acquire(&journal->j_state_lock); /* fake out sparse */ return 1; } /* * If there is not enough space left in the log to write all * potential buffers requested by this operation, we need to * stall pending a log checkpoint to free some more log space. */ needed = atomic_add_return(total, &t->t_outstanding_credits); if (needed > journal->j_max_transaction_buffers) { /* * If the current transaction is already too large, * then start to commit it: we can then go back and * attach this handle to a new transaction. */ atomic_sub(total, &t->t_outstanding_credits); /* * Is the number of reserved credits in the current transaction too * big to fit this handle? Wait until reserved credits are freed. */ if (atomic_read(&journal->j_reserved_credits) + total > journal->j_max_transaction_buffers) { read_unlock(&journal->j_state_lock); jbd2_might_wait_for_commit(journal); wait_event(journal->j_wait_reserved, atomic_read(&journal->j_reserved_credits) + total <= journal->j_max_transaction_buffers); __acquire(&journal->j_state_lock); /* fake out sparse */ return 1; } wait_transaction_locked(journal); __acquire(&journal->j_state_lock); /* fake out sparse */ return 1; } /* * The commit code assumes that it can get enough log space * without forcing a checkpoint. This is *critical* for * correctness: a checkpoint of a buffer which is also * associated with a committing transaction creates a deadlock, * so commit simply cannot force through checkpoints. * * We must therefore ensure the necessary space in the journal * *before* starting to dirty potentially checkpointed buffers * in the new transaction. */ if (jbd2_log_space_left(journal) < journal->j_max_transaction_buffers) { atomic_sub(total, &t->t_outstanding_credits); read_unlock(&journal->j_state_lock); jbd2_might_wait_for_commit(journal); write_lock(&journal->j_state_lock); if (jbd2_log_space_left(journal) < journal->j_max_transaction_buffers) __jbd2_log_wait_for_space(journal); write_unlock(&journal->j_state_lock); __acquire(&journal->j_state_lock); /* fake out sparse */ return 1; } /* No reservation? We are done... */ if (!rsv_blocks) return 0; needed = atomic_add_return(rsv_blocks, &journal->j_reserved_credits); /* We allow at most half of a transaction to be reserved */ if (needed > journal->j_max_transaction_buffers / 2) { sub_reserved_credits(journal, rsv_blocks); atomic_sub(total, &t->t_outstanding_credits); read_unlock(&journal->j_state_lock); jbd2_might_wait_for_commit(journal); wait_event(journal->j_wait_reserved, atomic_read(&journal->j_reserved_credits) + rsv_blocks <= journal->j_max_transaction_buffers / 2); __acquire(&journal->j_state_lock); /* fake out sparse */ return 1; } return 0; } /* * start_this_handle: Given a handle, deal with any locking or stalling * needed to make sure that there is enough journal space for the handle * to begin. Attach the handle to a transaction and set up the * transaction's buffer credits. */ static int start_this_handle(journal_t *journal, handle_t *handle, gfp_t gfp_mask) { transaction_t *transaction, *new_transaction = NULL; int blocks = handle->h_total_credits; int rsv_blocks = 0; unsigned long ts = jiffies; if (handle->h_rsv_handle) rsv_blocks = handle->h_rsv_handle->h_total_credits; /* * Limit the number of reserved credits to 1/2 of maximum transaction * size and limit the number of total credits to not exceed maximum * transaction size per operation. */ if ((rsv_blocks > journal->j_max_transaction_buffers / 2) || (rsv_blocks + blocks > journal->j_max_transaction_buffers)) { printk(KERN_ERR "JBD2: %s wants too many credits " "credits:%d rsv_credits:%d max:%d\n", current->comm, blocks, rsv_blocks, journal->j_max_transaction_buffers); WARN_ON(1); return -ENOSPC; } alloc_transaction: /* * This check is racy but it is just an optimization of allocating new * transaction early if there are high chances we'll need it. If we * guess wrong, we'll retry or free unused transaction. */ if (!data_race(journal->j_running_transaction)) { /* * If __GFP_FS is not present, then we may be being called from * inside the fs writeback layer, so we MUST NOT fail. */ if ((gfp_mask & __GFP_FS) == 0) gfp_mask |= __GFP_NOFAIL; new_transaction = kmem_cache_zalloc(transaction_cache, gfp_mask); if (!new_transaction) return -ENOMEM; } jbd2_debug(3, "New handle %p going live.\n", handle); /* * We need to hold j_state_lock until t_updates has been incremented, * for proper journal barrier handling */ repeat: read_lock(&journal->j_state_lock); BUG_ON(journal->j_flags & JBD2_UNMOUNT); if (is_journal_aborted(journal) || (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) { read_unlock(&journal->j_state_lock); jbd2_journal_free_transaction(new_transaction); return -EROFS; } /* * Wait on the journal's transaction barrier if necessary. Specifically * we allow reserved handles to proceed because otherwise commit could * deadlock on page writeback not being able to complete. */ if (!handle->h_reserved && journal->j_barrier_count) { read_unlock(&journal->j_state_lock); wait_event(journal->j_wait_transaction_locked, journal->j_barrier_count == 0); goto repeat; } if (!journal->j_running_transaction) { read_unlock(&journal->j_state_lock); if (!new_transaction) goto alloc_transaction; write_lock(&journal->j_state_lock); if (!journal->j_running_transaction && (handle->h_reserved || !journal->j_barrier_count)) { jbd2_get_transaction(journal, new_transaction); new_transaction = NULL; } write_unlock(&journal->j_state_lock); goto repeat; } transaction = journal->j_running_transaction; if (!handle->h_reserved) { /* We may have dropped j_state_lock - restart in that case */ if (add_transaction_credits(journal, blocks, rsv_blocks)) { /* * add_transaction_credits releases * j_state_lock on a non-zero return */ __release(&journal->j_state_lock); goto repeat; } } else { /* * We have handle reserved so we are allowed to join T_LOCKED * transaction and we don't have to check for transaction size * and journal space. But we still have to wait while running * transaction is being switched to a committing one as it * won't wait for any handles anymore. */ if (transaction->t_state == T_SWITCH) { wait_transaction_switching(journal); goto repeat; } sub_reserved_credits(journal, blocks); handle->h_reserved = 0; } /* OK, account for the buffers that this operation expects to * use and add the handle to the running transaction. */ update_t_max_wait(transaction, ts); handle->h_transaction = transaction; handle->h_requested_credits = blocks; handle->h_revoke_credits_requested = handle->h_revoke_credits; handle->h_start_jiffies = jiffies; atomic_inc(&transaction->t_updates); atomic_inc(&transaction->t_handle_count); jbd2_debug(4, "Handle %p given %d credits (total %d, free %lu)\n", handle, blocks, atomic_read(&transaction->t_outstanding_credits), jbd2_log_space_left(journal)); read_unlock(&journal->j_state_lock); current->journal_info = handle; rwsem_acquire_read(&journal->j_trans_commit_map, 0, 0, _THIS_IP_); jbd2_journal_free_transaction(new_transaction); /* * Ensure that no allocations done while the transaction is open are * going to recurse back to the fs layer. */ handle->saved_alloc_context = memalloc_nofs_save(); return 0; } /* Allocate a new handle. This should probably be in a slab... */ static handle_t *new_handle(int nblocks) { handle_t *handle = jbd2_alloc_handle(GFP_NOFS); if (!handle) return NULL; handle->h_total_credits = nblocks; handle->h_ref = 1; return handle; } handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks, int revoke_records, gfp_t gfp_mask, unsigned int type, unsigned int line_no) { handle_t *handle = journal_current_handle(); int err; if (!journal) return ERR_PTR(-EROFS); if (handle) { J_ASSERT(handle->h_transaction->t_journal == journal); handle->h_ref++; return handle; } nblocks += DIV_ROUND_UP(revoke_records, journal->j_revoke_records_per_block); handle = new_handle(nblocks); if (!handle) return ERR_PTR(-ENOMEM); if (rsv_blocks) { handle_t *rsv_handle; rsv_handle = new_handle(rsv_blocks); if (!rsv_handle) { jbd2_free_handle(handle); return ERR_PTR(-ENOMEM); } rsv_handle->h_reserved = 1; rsv_handle->h_journal = journal; handle->h_rsv_handle = rsv_handle; } handle->h_revoke_credits = revoke_records; err = start_this_handle(journal, handle, gfp_mask); if (err < 0) { if (handle->h_rsv_handle) jbd2_free_handle(handle->h_rsv_handle); jbd2_free_handle(handle); return ERR_PTR(err); } handle->h_type = type; handle->h_line_no = line_no; trace_jbd2_handle_start(journal->j_fs_dev->bd_dev, handle->h_transaction->t_tid, type, line_no, nblocks); return handle; } EXPORT_SYMBOL(jbd2__journal_start); /** * jbd2_journal_start() - Obtain a new handle. * @journal: Journal to start transaction on. * @nblocks: number of block buffer we might modify * * We make sure that the transaction can guarantee at least nblocks of * modified buffers in the log. We block until the log can guarantee * that much space. Additionally, if rsv_blocks > 0, we also create another * handle with rsv_blocks reserved blocks in the journal. This handle is * stored in h_rsv_handle. It is not attached to any particular transaction * and thus doesn't block transaction commit. If the caller uses this reserved * handle, it has to set h_rsv_handle to NULL as otherwise jbd2_journal_stop() * on the parent handle will dispose the reserved one. Reserved handle has to * be converted to a normal handle using jbd2_journal_start_reserved() before * it can be used. * * Return a pointer to a newly allocated handle, or an ERR_PTR() value * on failure. */ handle_t *jbd2_journal_start(journal_t *journal, int nblocks) { return jbd2__journal_start(journal, nblocks, 0, 0, GFP_NOFS, 0, 0); } EXPORT_SYMBOL(jbd2_journal_start); static void __jbd2_journal_unreserve_handle(handle_t *handle, transaction_t *t) { journal_t *journal = handle->h_journal; WARN_ON(!handle->h_reserved); sub_reserved_credits(journal, handle->h_total_credits); if (t) atomic_sub(handle->h_total_credits, &t->t_outstanding_credits); } void jbd2_journal_free_reserved(handle_t *handle) { journal_t *journal = handle->h_journal; /* Get j_state_lock to pin running transaction if it exists */ read_lock(&journal->j_state_lock); __jbd2_journal_unreserve_handle(handle, journal->j_running_transaction); read_unlock(&journal->j_state_lock); jbd2_free_handle(handle); } EXPORT_SYMBOL(jbd2_journal_free_reserved); /** * jbd2_journal_start_reserved() - start reserved handle * @handle: handle to start * @type: for handle statistics * @line_no: for handle statistics * * Start handle that has been previously reserved with jbd2_journal_reserve(). * This attaches @handle to the running transaction (or creates one if there's * not transaction running). Unlike jbd2_journal_start() this function cannot * block on journal commit, checkpointing, or similar stuff. It can block on * memory allocation or frozen journal though. * * Return 0 on success, non-zero on error - handle is freed in that case. */ int jbd2_journal_start_reserved(handle_t *handle, unsigned int type, unsigned int line_no) { journal_t *journal = handle->h_journal; int ret = -EIO; if (WARN_ON(!handle->h_reserved)) { /* Someone passed in normal handle? Just stop it. */ jbd2_journal_stop(handle); return ret; } /* * Usefulness of mixing of reserved and unreserved handles is * questionable. So far nobody seems to need it so just error out. */ if (WARN_ON(current->journal_info)) { jbd2_journal_free_reserved(handle); return ret; } handle->h_journal = NULL; /* * GFP_NOFS is here because callers are likely from writeback or * similarly constrained call sites */ ret = start_this_handle(journal, handle, GFP_NOFS); if (ret < 0) { handle->h_journal = journal; jbd2_journal_free_reserved(handle); return ret; } handle->h_type = type; handle->h_line_no = line_no; trace_jbd2_handle_start(journal->j_fs_dev->bd_dev, handle->h_transaction->t_tid, type, line_no, handle->h_total_credits); return 0; } EXPORT_SYMBOL(jbd2_journal_start_reserved); /** * jbd2_journal_extend() - extend buffer credits. * @handle: handle to 'extend' * @nblocks: nr blocks to try to extend by. * @revoke_records: number of revoke records to try to extend by. * * Some transactions, such as large extends and truncates, can be done * atomically all at once or in several stages. The operation requests * a credit for a number of buffer modifications in advance, but can * extend its credit if it needs more. * * jbd2_journal_extend tries to give the running handle more buffer credits. * It does not guarantee that allocation - this is a best-effort only. * The calling process MUST be able to deal cleanly with a failure to * extend here. * * Return 0 on success, non-zero on failure. * * return code < 0 implies an error * return code > 0 implies normal transaction-full status. */ int jbd2_journal_extend(handle_t *handle, int nblocks, int revoke_records) { transaction_t *transaction = handle->h_transaction; journal_t *journal; int result; int wanted; if (is_handle_aborted(handle)) return -EROFS; journal = transaction->t_journal; result = 1; read_lock(&journal->j_state_lock); /* Don't extend a locked-down transaction! */ if (transaction->t_state != T_RUNNING) { jbd2_debug(3, "denied handle %p %d blocks: " "transaction not running\n", handle, nblocks); goto error_out; } nblocks += DIV_ROUND_UP( handle->h_revoke_credits_requested + revoke_records, journal->j_revoke_records_per_block) - DIV_ROUND_UP( handle->h_revoke_credits_requested, journal->j_revoke_records_per_block); wanted = atomic_add_return(nblocks, &transaction->t_outstanding_credits); if (wanted > journal->j_max_transaction_buffers) { jbd2_debug(3, "denied handle %p %d blocks: " "transaction too large\n", handle, nblocks); atomic_sub(nblocks, &transaction->t_outstanding_credits); goto error_out; } trace_jbd2_handle_extend(journal->j_fs_dev->bd_dev, transaction->t_tid, handle->h_type, handle->h_line_no, handle->h_total_credits, nblocks); handle->h_total_credits += nblocks; handle->h_requested_credits += nblocks; handle->h_revoke_credits += revoke_records; handle->h_revoke_credits_requested += revoke_records; result = 0; jbd2_debug(3, "extended handle %p by %d\n", handle, nblocks); error_out: read_unlock(&journal->j_state_lock); return result; } static void stop_this_handle(handle_t *handle) { transaction_t *transaction = handle->h_transaction; journal_t *journal = transaction->t_journal; int revokes; J_ASSERT(journal_current_handle() == handle); J_ASSERT(atomic_read(&transaction->t_updates) > 0); current->journal_info = NULL; /* * Subtract necessary revoke descriptor blocks from handle credits. We * take care to account only for revoke descriptor blocks the * transaction will really need as large sequences of transactions with * small numbers of revokes are relatively common. */ revokes = handle->h_revoke_credits_requested - handle->h_revoke_credits; if (revokes) { int t_revokes, revoke_descriptors; int rr_per_blk = journal->j_revoke_records_per_block; WARN_ON_ONCE(DIV_ROUND_UP(revokes, rr_per_blk) > handle->h_total_credits); t_revokes = atomic_add_return(revokes, &transaction->t_outstanding_revokes); revoke_descriptors = DIV_ROUND_UP(t_revokes, rr_per_blk) - DIV_ROUND_UP(t_revokes - revokes, rr_per_blk); handle->h_total_credits -= revoke_descriptors; } atomic_sub(handle->h_total_credits, &transaction->t_outstanding_credits); if (handle->h_rsv_handle) __jbd2_journal_unreserve_handle(handle->h_rsv_handle, transaction); if (atomic_dec_and_test(&transaction->t_updates)) wake_up(&journal->j_wait_updates); rwsem_release(&journal->j_trans_commit_map, _THIS_IP_); /* * Scope of the GFP_NOFS context is over here and so we can restore the * original alloc context. */ memalloc_nofs_restore(handle->saved_alloc_context); } /** * jbd2__journal_restart() - restart a handle . * @handle: handle to restart * @nblocks: nr credits requested * @revoke_records: number of revoke record credits requested * @gfp_mask: memory allocation flags (for start_this_handle) * * Restart a handle for a multi-transaction filesystem * operation. * * If the jbd2_journal_extend() call above fails to grant new buffer credits * to a running handle, a call to jbd2_journal_restart will commit the * handle's transaction so far and reattach the handle to a new * transaction capable of guaranteeing the requested number of * credits. We preserve reserved handle if there's any attached to the * passed in handle. */ int jbd2__journal_restart(handle_t *handle, int nblocks, int revoke_records, gfp_t gfp_mask) { transaction_t *transaction = handle->h_transaction; journal_t *journal; tid_t tid; int need_to_start; int ret; /* If we've had an abort of any type, don't even think about * actually doing the restart! */ if (is_handle_aborted(handle)) return 0; journal = transaction->t_journal; tid = transaction->t_tid; /* * First unlink the handle from its current transaction, and start the * commit on that. */ jbd2_debug(2, "restarting handle %p\n", handle); stop_this_handle(handle); handle->h_transaction = NULL; /* * TODO: If we use READ_ONCE / WRITE_ONCE for j_commit_request we can * get rid of pointless j_state_lock traffic like this. */ read_lock(&journal->j_state_lock); need_to_start = !tid_geq(journal->j_commit_request, tid); read_unlock(&journal->j_state_lock); if (need_to_start) jbd2_log_start_commit(journal, tid); handle->h_total_credits = nblocks + DIV_ROUND_UP(revoke_records, journal->j_revoke_records_per_block); handle->h_revoke_credits = revoke_records; ret = start_this_handle(journal, handle, gfp_mask); trace_jbd2_handle_restart(journal->j_fs_dev->bd_dev, ret ? 0 : handle->h_transaction->t_tid, handle->h_type, handle->h_line_no, handle->h_total_credits); return ret; } EXPORT_SYMBOL(jbd2__journal_restart); int jbd2_journal_restart(handle_t *handle, int nblocks) { return jbd2__journal_restart(handle, nblocks, 0, GFP_NOFS); } EXPORT_SYMBOL(jbd2_journal_restart); /* * Waits for any outstanding t_updates to finish. * This is called with write j_state_lock held. */ void jbd2_journal_wait_updates(journal_t *journal) { DEFINE_WAIT(wait); while (1) { /* * Note that the running transaction can get freed under us if * this transaction is getting committed in * jbd2_journal_commit_transaction() -> * jbd2_journal_free_transaction(). This can only happen when we * release j_state_lock -> schedule() -> acquire j_state_lock. * Hence we should everytime retrieve new j_running_transaction * value (after j_state_lock release acquire cycle), else it may * lead to use-after-free of old freed transaction. */ transaction_t *transaction = journal->j_running_transaction; if (!transaction) break; prepare_to_wait(&journal->j_wait_updates, &wait, TASK_UNINTERRUPTIBLE); if (!atomic_read(&transaction->t_updates)) { finish_wait(&journal->j_wait_updates, &wait); break; } write_unlock(&journal->j_state_lock); schedule(); finish_wait(&journal->j_wait_updates, &wait); write_lock(&journal->j_state_lock); } } /** * jbd2_journal_lock_updates () - establish a transaction barrier. * @journal: Journal to establish a barrier on. * * This locks out any further updates from being started, and blocks * until all existing updates have completed, returning only once the * journal is in a quiescent state with no updates running. * * The journal lock should not be held on entry. */ void jbd2_journal_lock_updates(journal_t *journal) { jbd2_might_wait_for_commit(journal); write_lock(&journal->j_state_lock); ++journal->j_barrier_count; /* Wait until there are no reserved handles */ if (atomic_read(&journal->j_reserved_credits)) { write_unlock(&journal->j_state_lock); wait_event(journal->j_wait_reserved, atomic_read(&journal->j_reserved_credits) == 0); write_lock(&journal->j_state_lock); } /* Wait until there are no running t_updates */ jbd2_journal_wait_updates(journal); write_unlock(&journal->j_state_lock); /* * We have now established a barrier against other normal updates, but * we also need to barrier against other jbd2_journal_lock_updates() calls * to make sure that we serialise special journal-locked operations * too. */ mutex_lock(&journal->j_barrier); } /** * jbd2_journal_unlock_updates () - release barrier * @journal: Journal to release the barrier on. * * Release a transaction barrier obtained with jbd2_journal_lock_updates(). * * Should be called without the journal lock held. */ void jbd2_journal_unlock_updates (journal_t *journal) { J_ASSERT(journal->j_barrier_count != 0); mutex_unlock(&journal->j_barrier); write_lock(&journal->j_state_lock); --journal->j_barrier_count; write_unlock(&journal->j_state_lock); wake_up_all(&journal->j_wait_transaction_locked); } static void warn_dirty_buffer(struct buffer_head *bh) { printk(KERN_WARNING "JBD2: Spotted dirty metadata buffer (dev = %pg, blocknr = %llu). " "There's a risk of filesystem corruption in case of system " "crash.\n", bh->b_bdev, (unsigned long long)bh->b_blocknr); } /* Call t_frozen trigger and copy buffer data into jh->b_frozen_data. */ static void jbd2_freeze_jh_data(struct journal_head *jh) { char *source; struct buffer_head *bh = jh2bh(jh); J_EXPECT_JH(jh, buffer_uptodate(bh), "Possible IO failure.\n"); source = kmap_local_folio(bh->b_folio, bh_offset(bh)); /* Fire data frozen trigger just before we copy the data */ jbd2_buffer_frozen_trigger(jh, source, jh->b_triggers); memcpy(jh->b_frozen_data, source, bh->b_size); kunmap_local(source); /* * Now that the frozen data is saved off, we need to store any matching * triggers. */ jh->b_frozen_triggers = jh->b_triggers; } /* * If the buffer is already part of the current transaction, then there * is nothing we need to do. If it is already part of a prior * transaction which we are still committing to disk, then we need to * make sure that we do not overwrite the old copy: we do copy-out to * preserve the copy going to disk. We also account the buffer against * the handle's metadata buffer credits (unless the buffer is already * part of the transaction, that is). * */ static int do_get_write_access(handle_t *handle, struct journal_head *jh, int force_copy) { struct buffer_head *bh; transaction_t *transaction = handle->h_transaction; journal_t *journal; int error; char *frozen_buffer = NULL; unsigned long start_lock, time_lock; journal = transaction->t_journal; jbd2_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy); JBUFFER_TRACE(jh, "entry"); repeat: bh = jh2bh(jh); /* @@@ Need to check for errors here at some point. */ start_lock = jiffies; lock_buffer(bh); spin_lock(&jh->b_state_lock); /* If it takes too long to lock the buffer, trace it */ time_lock = jbd2_time_diff(start_lock, jiffies); if (time_lock > HZ/10) trace_jbd2_lock_buffer_stall(bh->b_bdev->bd_dev, jiffies_to_msecs(time_lock)); /* We now hold the buffer lock so it is safe to query the buffer * state. Is the buffer dirty? * * If so, there are two possibilities. The buffer may be * non-journaled, and undergoing a quite legitimate writeback. * Otherwise, it is journaled, and we don't expect dirty buffers * in that state (the buffers should be marked JBD_Dirty * instead.) So either the IO is being done under our own * control and this is a bug, or it's a third party IO such as * dump(8) (which may leave the buffer scheduled for read --- * ie. locked but not dirty) or tune2fs (which may actually have * the buffer dirtied, ugh.) */ if (buffer_dirty(bh) && jh->b_transaction) { warn_dirty_buffer(bh); /* * We need to clean the dirty flag and we must do it under the * buffer lock to be sure we don't race with running write-out. */ JBUFFER_TRACE(jh, "Journalling dirty buffer"); clear_buffer_dirty(bh); /* * The buffer is going to be added to BJ_Reserved list now and * nothing guarantees jbd2_journal_dirty_metadata() will be * ever called for it. So we need to set jbddirty bit here to * make sure the buffer is dirtied and written out when the * journaling machinery is done with it. */ set_buffer_jbddirty(bh); } error = -EROFS; if (is_handle_aborted(handle)) { spin_unlock(&jh->b_state_lock); unlock_buffer(bh); goto out; } error = 0; /* * The buffer is already part of this transaction if b_transaction or * b_next_transaction points to it */ if (jh->b_transaction == transaction || jh->b_next_transaction == transaction) { unlock_buffer(bh); goto done; } /* * this is the first time this transaction is touching this buffer, * reset the modified flag */ jh->b_modified = 0; /* * If the buffer is not journaled right now, we need to make sure it * doesn't get written to disk before the caller actually commits the * new data */ if (!jh->b_transaction) { JBUFFER_TRACE(jh, "no transaction"); J_ASSERT_JH(jh, !jh->b_next_transaction); JBUFFER_TRACE(jh, "file as BJ_Reserved"); /* * Make sure all stores to jh (b_modified, b_frozen_data) are * visible before attaching it to the running transaction. * Paired with barrier in jbd2_write_access_granted() */ smp_wmb(); spin_lock(&journal->j_list_lock); if (test_clear_buffer_dirty(bh)) { /* * Execute buffer dirty clearing and jh->b_transaction * assignment under journal->j_list_lock locked to * prevent bh being removed from checkpoint list if * the buffer is in an intermediate state (not dirty * and jh->b_transaction is NULL). */ JBUFFER_TRACE(jh, "Journalling dirty buffer"); set_buffer_jbddirty(bh); } __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved); spin_unlock(&journal->j_list_lock); unlock_buffer(bh); goto done; } unlock_buffer(bh); /* * If there is already a copy-out version of this buffer, then we don't * need to make another one */ if (jh->b_frozen_data) { JBUFFER_TRACE(jh, "has frozen data"); J_ASSERT_JH(jh, jh->b_next_transaction == NULL); goto attach_next; } JBUFFER_TRACE(jh, "owned by older transaction"); J_ASSERT_JH(jh, jh->b_next_transaction == NULL); J_ASSERT_JH(jh, jh->b_transaction == journal->j_committing_transaction); /* * There is one case we have to be very careful about. If the * committing transaction is currently writing this buffer out to disk * and has NOT made a copy-out, then we cannot modify the buffer * contents at all right now. The essence of copy-out is that it is * the extra copy, not the primary copy, which gets journaled. If the * primary copy is already going to disk then we cannot do copy-out * here. */ if (buffer_shadow(bh)) { JBUFFER_TRACE(jh, "on shadow: sleep"); spin_unlock(&jh->b_state_lock); wait_on_bit_io(&bh->b_state, BH_Shadow, TASK_UNINTERRUPTIBLE); goto repeat; } /* * Only do the copy if the currently-owning transaction still needs it. * If buffer isn't on BJ_Metadata list, the committing transaction is * past that stage (here we use the fact that BH_Shadow is set under * bh_state lock together with refiling to BJ_Shadow list and at this * point we know the buffer doesn't have BH_Shadow set). * * Subtle point, though: if this is a get_undo_access, then we will be * relying on the frozen_data to contain the new value of the * committed_data record after the transaction, so we HAVE to force the * frozen_data copy in that case. */ if (jh->b_jlist == BJ_Metadata || force_copy) { JBUFFER_TRACE(jh, "generate frozen data"); if (!frozen_buffer) { JBUFFER_TRACE(jh, "allocate memory for buffer"); spin_unlock(&jh->b_state_lock); frozen_buffer = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS | __GFP_NOFAIL); goto repeat; } jh->b_frozen_data = frozen_buffer; frozen_buffer = NULL; jbd2_freeze_jh_data(jh); } attach_next: /* * Make sure all stores to jh (b_modified, b_frozen_data) are visible * before attaching it to the running transaction. Paired with barrier * in jbd2_write_access_granted() */ smp_wmb(); jh->b_next_transaction = transaction; done: spin_unlock(&jh->b_state_lock); /* * If we are about to journal a buffer, then any revoke pending on it is * no longer valid */ jbd2_journal_cancel_revoke(handle, jh); out: if (unlikely(frozen_buffer)) /* It's usually NULL */ jbd2_free(frozen_buffer, bh->b_size); JBUFFER_TRACE(jh, "exit"); return error; } /* Fast check whether buffer is already attached to the required transaction */ static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh, bool undo) { struct journal_head *jh; bool ret = false; /* Dirty buffers require special handling... */ if (buffer_dirty(bh)) return false; /* * RCU protects us from dereferencing freed pages. So the checks we do * are guaranteed not to oops. However the jh slab object can get freed * & reallocated while we work with it. So we have to be careful. When * we see jh attached to the running transaction, we know it must stay * so until the transaction is committed. Thus jh won't be freed and * will be attached to the same bh while we run. However it can * happen jh gets freed, reallocated, and attached to the transaction * just after we get pointer to it from bh. So we have to be careful * and recheck jh still belongs to our bh before we return success. */ rcu_read_lock(); if (!buffer_jbd(bh)) goto out; /* This should be bh2jh() but that doesn't work with inline functions */ jh = READ_ONCE(bh->b_private); if (!jh) goto out; /* For undo access buffer must have data copied */ if (undo && !jh->b_committed_data) goto out; if (READ_ONCE(jh->b_transaction) != handle->h_transaction && READ_ONCE(jh->b_next_transaction) != handle->h_transaction) goto out; /* * There are two reasons for the barrier here: * 1) Make sure to fetch b_bh after we did previous checks so that we * detect when jh went through free, realloc, attach to transaction * while we were checking. Paired with implicit barrier in that path. * 2) So that access to bh done after jbd2_write_access_granted() * doesn't get reordered and see inconsistent state of concurrent * do_get_write_access(). */ smp_mb(); if (unlikely(jh->b_bh != bh)) goto out; ret = true; out: rcu_read_unlock(); return ret; } /** * jbd2_journal_get_write_access() - notify intent to modify a buffer * for metadata (not data) update. * @handle: transaction to add buffer modifications to * @bh: bh to be used for metadata writes * * Returns: error code or 0 on success. * * In full data journalling mode the buffer may be of type BJ_AsyncData, * because we're ``write()ing`` a buffer which is also part of a shared mapping. */ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh) { struct journal_head *jh; int rc; if (is_handle_aborted(handle)) return -EROFS; if (jbd2_write_access_granted(handle, bh, false)) return 0; jh = jbd2_journal_add_journal_head(bh); /* We do not want to get caught playing with fields which the * log thread also manipulates. Make sure that the buffer * completes any outstanding IO before proceeding. */ rc = do_get_write_access(handle, jh, 0); jbd2_journal_put_journal_head(jh); return rc; } /* * When the user wants to journal a newly created buffer_head * (ie. getblk() returned a new buffer and we are going to populate it * manually rather than reading off disk), then we need to keep the * buffer_head locked until it has been completely filled with new * data. In this case, we should be able to make the assertion that * the bh is not already part of an existing transaction. * * The buffer should already be locked by the caller by this point. * There is no lock ranking violation: it was a newly created, * unlocked buffer beforehand. */ /** * jbd2_journal_get_create_access () - notify intent to use newly created bh * @handle: transaction to new buffer to * @bh: new buffer. * * Call this if you create a new bh. */ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh) { transaction_t *transaction = handle->h_transaction; journal_t *journal; struct journal_head *jh = jbd2_journal_add_journal_head(bh); int err; jbd2_debug(5, "journal_head %p\n", jh); err = -EROFS; if (is_handle_aborted(handle)) goto out; journal = transaction->t_journal; err = 0; JBUFFER_TRACE(jh, "entry"); /* * The buffer may already belong to this transaction due to pre-zeroing * in the filesystem's new_block code. It may also be on the previous, * committing transaction's lists, but it HAS to be in Forget state in * that case: the transaction must have deleted the buffer for it to be * reused here. */ spin_lock(&jh->b_state_lock); J_ASSERT_JH(jh, (jh->b_transaction == transaction || jh->b_transaction == NULL || (jh->b_transaction == journal->j_committing_transaction && jh->b_jlist == BJ_Forget))); J_ASSERT_JH(jh, jh->b_next_transaction == NULL); J_ASSERT_JH(jh, buffer_locked(jh2bh(jh))); if (jh->b_transaction == NULL) { /* * Previous jbd2_journal_forget() could have left the buffer * with jbddirty bit set because it was being committed. When * the commit finished, we've filed the buffer for * checkpointing and marked it dirty. Now we are reallocating * the buffer so the transaction freeing it must have * committed and so it's safe to clear the dirty bit. */ clear_buffer_dirty(jh2bh(jh)); /* first access by this transaction */ jh->b_modified = 0; JBUFFER_TRACE(jh, "file as BJ_Reserved"); spin_lock(&journal->j_list_lock); __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved); spin_unlock(&journal->j_list_lock); } else if (jh->b_transaction == journal->j_committing_transaction) { /* first access by this transaction */ jh->b_modified = 0; JBUFFER_TRACE(jh, "set next transaction"); spin_lock(&journal->j_list_lock); jh->b_next_transaction = transaction; spin_unlock(&journal->j_list_lock); } spin_unlock(&jh->b_state_lock); /* * akpm: I added this. ext3_alloc_branch can pick up new indirect * blocks which contain freed but then revoked metadata. We need * to cancel the revoke in case we end up freeing it yet again * and the reallocating as data - this would cause a second revoke, * which hits an assertion error. */ JBUFFER_TRACE(jh, "cancelling revoke"); jbd2_journal_cancel_revoke(handle, jh); out: jbd2_journal_put_journal_head(jh); return err; } /** * jbd2_journal_get_undo_access() - Notify intent to modify metadata with * non-rewindable consequences * @handle: transaction * @bh: buffer to undo * * Sometimes there is a need to distinguish between metadata which has * been committed to disk and that which has not. The ext3fs code uses * this for freeing and allocating space, we have to make sure that we * do not reuse freed space until the deallocation has been committed, * since if we overwrote that space we would make the delete * un-rewindable in case of a crash. * * To deal with that, jbd2_journal_get_undo_access requests write access to a * buffer for parts of non-rewindable operations such as delete * operations on the bitmaps. The journaling code must keep a copy of * the buffer's contents prior to the undo_access call until such time * as we know that the buffer has definitely been committed to disk. * * We never need to know which transaction the committed data is part * of, buffers touched here are guaranteed to be dirtied later and so * will be committed to a new transaction in due course, at which point * we can discard the old committed data pointer. * * Returns error number or 0 on success. */ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh) { int err; struct journal_head *jh; char *committed_data = NULL; if (is_handle_aborted(handle)) return -EROFS; if (jbd2_write_access_granted(handle, bh, true)) return 0; jh = jbd2_journal_add_journal_head(bh); JBUFFER_TRACE(jh, "entry"); /* * Do this first --- it can drop the journal lock, so we want to * make sure that obtaining the committed_data is done * atomically wrt. completion of any outstanding commits. */ err = do_get_write_access(handle, jh, 1); if (err) goto out; repeat: if (!jh->b_committed_data) committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS|__GFP_NOFAIL); spin_lock(&jh->b_state_lock); if (!jh->b_committed_data) { /* Copy out the current buffer contents into the * preserved, committed copy. */ JBUFFER_TRACE(jh, "generate b_committed data"); if (!committed_data) { spin_unlock(&jh->b_state_lock); goto repeat; } jh->b_committed_data = committed_data; committed_data = NULL; memcpy(jh->b_committed_data, bh->b_data, bh->b_size); } spin_unlock(&jh->b_state_lock); out: jbd2_journal_put_journal_head(jh); if (unlikely(committed_data)) jbd2_free(committed_data, bh->b_size); return err; } /** * jbd2_journal_set_triggers() - Add triggers for commit writeout * @bh: buffer to trigger on * @type: struct jbd2_buffer_trigger_type containing the trigger(s). * * Set any triggers on this journal_head. This is always safe, because * triggers for a committing buffer will be saved off, and triggers for * a running transaction will match the buffer in that transaction. * * Call with NULL to clear the triggers. */ void jbd2_journal_set_triggers(struct buffer_head *bh, struct jbd2_buffer_trigger_type *type) { struct journal_head *jh = jbd2_journal_grab_journal_head(bh); if (WARN_ON_ONCE(!jh)) return; jh->b_triggers = type; jbd2_journal_put_journal_head(jh); } void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data, struct jbd2_buffer_trigger_type *triggers) { struct buffer_head *bh = jh2bh(jh); if (!triggers || !triggers->t_frozen) return; triggers->t_frozen(triggers, bh, mapped_data, bh->b_size); } void jbd2_buffer_abort_trigger(struct journal_head *jh, struct jbd2_buffer_trigger_type *triggers) { if (!triggers || !triggers->t_abort) return; triggers->t_abort(triggers, jh2bh(jh)); } /** * jbd2_journal_dirty_metadata() - mark a buffer as containing dirty metadata * @handle: transaction to add buffer to. * @bh: buffer to mark * * mark dirty metadata which needs to be journaled as part of the current * transaction. * * The buffer must have previously had jbd2_journal_get_write_access() * called so that it has a valid journal_head attached to the buffer * head. * * The buffer is placed on the transaction's metadata list and is marked * as belonging to the transaction. * * Returns error number or 0 on success. * * Special care needs to be taken if the buffer already belongs to the * current committing transaction (in which case we should have frozen * data present for that commit). In that case, we don't relink the * buffer: that only gets done when the old transaction finally * completes its commit. */ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) { transaction_t *transaction = handle->h_transaction; journal_t *journal; struct journal_head *jh; int ret = 0; if (!buffer_jbd(bh)) return -EUCLEAN; /* * We don't grab jh reference here since the buffer must be part * of the running transaction. */ jh = bh2jh(bh); jbd2_debug(5, "journal_head %p\n", jh); JBUFFER_TRACE(jh, "entry"); /* * This and the following assertions are unreliable since we may see jh * in inconsistent state unless we grab bh_state lock. But this is * crucial to catch bugs so let's do a reliable check until the * lockless handling is fully proven. */ if (data_race(jh->b_transaction != transaction && jh->b_next_transaction != transaction)) { spin_lock(&jh->b_state_lock); J_ASSERT_JH(jh, jh->b_transaction == transaction || jh->b_next_transaction == transaction); spin_unlock(&jh->b_state_lock); } if (jh->b_modified == 1) { /* If it's in our transaction it must be in BJ_Metadata list. */ if (data_race(jh->b_transaction == transaction && jh->b_jlist != BJ_Metadata)) { spin_lock(&jh->b_state_lock); if (jh->b_transaction == transaction && jh->b_jlist != BJ_Metadata) pr_err("JBD2: assertion failure: h_type=%u " "h_line_no=%u block_no=%llu jlist=%u\n", handle->h_type, handle->h_line_no, (unsigned long long) bh->b_blocknr, jh->b_jlist); J_ASSERT_JH(jh, jh->b_transaction != transaction || jh->b_jlist == BJ_Metadata); spin_unlock(&jh->b_state_lock); } goto out; } journal = transaction->t_journal; spin_lock(&jh->b_state_lock); if (is_handle_aborted(handle)) { /* * Check journal aborting with @jh->b_state_lock locked, * since 'jh->b_transaction' could be replaced with * 'jh->b_next_transaction' during old transaction * committing if journal aborted, which may fail * assertion on 'jh->b_frozen_data == NULL'. */ ret = -EROFS; goto out_unlock_bh; } if (jh->b_modified == 0) { /* * This buffer's got modified and becoming part * of the transaction. This needs to be done * once a transaction -bzzz */ if (WARN_ON_ONCE(jbd2_handle_buffer_credits(handle) <= 0)) { ret = -ENOSPC; goto out_unlock_bh; } jh->b_modified = 1; handle->h_total_credits--; } /* * fastpath, to avoid expensive locking. If this buffer is already * on the running transaction's metadata list there is nothing to do. * Nobody can take it off again because there is a handle open. * I _think_ we're OK here with SMP barriers - a mistaken decision will * result in this test being false, so we go in and take the locks. */ if (jh->b_transaction == transaction && jh->b_jlist == BJ_Metadata) { JBUFFER_TRACE(jh, "fastpath"); if (unlikely(jh->b_transaction != journal->j_running_transaction)) { printk(KERN_ERR "JBD2: %s: " "jh->b_transaction (%llu, %p, %u) != " "journal->j_running_transaction (%p, %u)\n", journal->j_devname, (unsigned long long) bh->b_blocknr, jh->b_transaction, jh->b_transaction ? jh->b_transaction->t_tid : 0, journal->j_running_transaction, journal->j_running_transaction ? journal->j_running_transaction->t_tid : 0); ret = -EINVAL; } goto out_unlock_bh; } set_buffer_jbddirty(bh); /* * Metadata already on the current transaction list doesn't * need to be filed. Metadata on another transaction's list must * be committing, and will be refiled once the commit completes: * leave it alone for now. */ if (jh->b_transaction != transaction) { JBUFFER_TRACE(jh, "already on other transaction"); if (unlikely(((jh->b_transaction != journal->j_committing_transaction)) || (jh->b_next_transaction != transaction))) { printk(KERN_ERR "jbd2_journal_dirty_metadata: %s: " "bad jh for block %llu: " "transaction (%p, %u), " "jh->b_transaction (%p, %u), " "jh->b_next_transaction (%p, %u), jlist %u\n", journal->j_devname, (unsigned long long) bh->b_blocknr, transaction, transaction->t_tid, jh->b_transaction, jh->b_transaction ? jh->b_transaction->t_tid : 0, jh->b_next_transaction, jh->b_next_transaction ? jh->b_next_transaction->t_tid : 0, jh->b_jlist); WARN_ON(1); ret = -EINVAL; } /* And this case is illegal: we can't reuse another * transaction's data buffer, ever. */ goto out_unlock_bh; } /* That test should have eliminated the following case: */ J_ASSERT_JH(jh, jh->b_frozen_data == NULL); JBUFFER_TRACE(jh, "file as BJ_Metadata"); spin_lock(&journal->j_list_lock); __jbd2_journal_file_buffer(jh, transaction, BJ_Metadata); spin_unlock(&journal->j_list_lock); out_unlock_bh: spin_unlock(&jh->b_state_lock); out: JBUFFER_TRACE(jh, "exit"); return ret; } /** * jbd2_journal_forget() - bforget() for potentially-journaled buffers. * @handle: transaction handle * @bh: bh to 'forget' * * We can only do the bforget if there are no commits pending against the * buffer. If the buffer is dirty in the current running transaction we * can safely unlink it. * * bh may not be a journalled buffer at all - it may be a non-JBD * buffer which came off the hashtable. Check for this. * * Decrements bh->b_count by one. * * Allow this call even if the handle has aborted --- it may be part of * the caller's cleanup after an abort. */ int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh) { transaction_t *transaction = handle->h_transaction; journal_t *journal; struct journal_head *jh; int drop_reserve = 0; int err = 0; int was_modified = 0; if (is_handle_aborted(handle)) return -EROFS; journal = transaction->t_journal; BUFFER_TRACE(bh, "entry"); jh = jbd2_journal_grab_journal_head(bh); if (!jh) { __bforget(bh); return 0; } spin_lock(&jh->b_state_lock); /* Critical error: attempting to delete a bitmap buffer, maybe? * Don't do any jbd operations, and return an error. */ if (!J_EXPECT_JH(jh, !jh->b_committed_data, "inconsistent data on disk")) { err = -EIO; goto drop; } /* keep track of whether or not this transaction modified us */ was_modified = jh->b_modified; /* * The buffer's going from the transaction, we must drop * all references -bzzz */ jh->b_modified = 0; if (jh->b_transaction == transaction) { J_ASSERT_JH(jh, !jh->b_frozen_data); /* If we are forgetting a buffer which is already part * of this transaction, then we can just drop it from * the transaction immediately. */ clear_buffer_dirty(bh); clear_buffer_jbddirty(bh); JBUFFER_TRACE(jh, "belongs to current transaction: unfile"); /* * we only want to drop a reference if this transaction * modified the buffer */ if (was_modified) drop_reserve = 1; /* * We are no longer going to journal this buffer. * However, the commit of this transaction is still * important to the buffer: the delete that we are now * processing might obsolete an old log entry, so by * committing, we can satisfy the buffer's checkpoint. * * So, if we have a checkpoint on the buffer, we should * now refile the buffer on our BJ_Forget list so that * we know to remove the checkpoint after we commit. */ spin_lock(&journal->j_list_lock); if (jh->b_cp_transaction) { __jbd2_journal_temp_unlink_buffer(jh); __jbd2_journal_file_buffer(jh, transaction, BJ_Forget); } else { __jbd2_journal_unfile_buffer(jh); jbd2_journal_put_journal_head(jh); } spin_unlock(&journal->j_list_lock); } else if (jh->b_transaction) { J_ASSERT_JH(jh, (jh->b_transaction == journal->j_committing_transaction)); /* However, if the buffer is still owned by a prior * (committing) transaction, we can't drop it yet... */ JBUFFER_TRACE(jh, "belongs to older transaction"); /* ... but we CAN drop it from the new transaction through * marking the buffer as freed and set j_next_transaction to * the new transaction, so that not only the commit code * knows it should clear dirty bits when it is done with the * buffer, but also the buffer can be checkpointed only * after the new transaction commits. */ set_buffer_freed(bh); if (!jh->b_next_transaction) { spin_lock(&journal->j_list_lock); jh->b_next_transaction = transaction; spin_unlock(&journal->j_list_lock); } else { J_ASSERT(jh->b_next_transaction == transaction); /* * only drop a reference if this transaction modified * the buffer */ if (was_modified) drop_reserve = 1; } } else { /* * Finally, if the buffer is not belongs to any * transaction, we can just drop it now if it has no * checkpoint. */ spin_lock(&journal->j_list_lock); if (!jh->b_cp_transaction) { JBUFFER_TRACE(jh, "belongs to none transaction"); spin_unlock(&journal->j_list_lock); goto drop; } /* * Otherwise, if the buffer has been written to disk, * it is safe to remove the checkpoint and drop it. */ if (jbd2_journal_try_remove_checkpoint(jh) >= 0) { spin_unlock(&journal->j_list_lock); goto drop; } /* * The buffer is still not written to disk, we should * attach this buffer to current transaction so that the * buffer can be checkpointed only after the current * transaction commits. */ clear_buffer_dirty(bh); __jbd2_journal_file_buffer(jh, transaction, BJ_Forget); spin_unlock(&journal->j_list_lock); } drop: __brelse(bh); spin_unlock(&jh->b_state_lock); jbd2_journal_put_journal_head(jh); if (drop_reserve) { /* no need to reserve log space for this block -bzzz */ handle->h_total_credits++; } return err; } /** * jbd2_journal_stop() - complete a transaction * @handle: transaction to complete. * * All done for a particular handle. * * There is not much action needed here. We just return any remaining * buffer credits to the transaction and remove the handle. The only * complication is that we need to start a commit operation if the * filesystem is marked for synchronous update. * * jbd2_journal_stop itself will not usually return an error, but it may * do so in unusual circumstances. In particular, expect it to * return -EIO if a jbd2_journal_abort has been executed since the * transaction began. */ int jbd2_journal_stop(handle_t *handle) { transaction_t *transaction = handle->h_transaction; journal_t *journal; int err = 0, wait_for_commit = 0; tid_t tid; pid_t pid; if (--handle->h_ref > 0) { jbd2_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1, handle->h_ref); if (is_handle_aborted(handle)) return -EIO; return 0; } if (!transaction) { /* * Handle is already detached from the transaction so there is * nothing to do other than free the handle. */ memalloc_nofs_restore(handle->saved_alloc_context); goto free_and_exit; } journal = transaction->t_journal; tid = transaction->t_tid; if (is_handle_aborted(handle)) err = -EIO; jbd2_debug(4, "Handle %p going down\n", handle); trace_jbd2_handle_stats(journal->j_fs_dev->bd_dev, tid, handle->h_type, handle->h_line_no, jiffies - handle->h_start_jiffies, handle->h_sync, handle->h_requested_credits, (handle->h_requested_credits - handle->h_total_credits)); /* * Implement synchronous transaction batching. If the handle * was synchronous, don't force a commit immediately. Let's * yield and let another thread piggyback onto this * transaction. Keep doing that while new threads continue to * arrive. It doesn't cost much - we're about to run a commit * and sleep on IO anyway. Speeds up many-threaded, many-dir * operations by 30x or more... * * We try and optimize the sleep time against what the * underlying disk can do, instead of having a static sleep * time. This is useful for the case where our storage is so * fast that it is more optimal to go ahead and force a flush * and wait for the transaction to be committed than it is to * wait for an arbitrary amount of time for new writers to * join the transaction. We achieve this by measuring how * long it takes to commit a transaction, and compare it with * how long this transaction has been running, and if run time * < commit time then we sleep for the delta and commit. This * greatly helps super fast disks that would see slowdowns as * more threads started doing fsyncs. * * But don't do this if this process was the most recent one * to perform a synchronous write. We do this to detect the * case where a single process is doing a stream of sync * writes. No point in waiting for joiners in that case. * * Setting max_batch_time to 0 disables this completely. */ pid = current->pid; if (handle->h_sync && journal->j_last_sync_writer != pid && journal->j_max_batch_time) { u64 commit_time, trans_time; journal->j_last_sync_writer = pid; read_lock(&journal->j_state_lock); commit_time = journal->j_average_commit_time; read_unlock(&journal->j_state_lock); trans_time = ktime_to_ns(ktime_sub(ktime_get(), transaction->t_start_time)); commit_time = max_t(u64, commit_time, 1000*journal->j_min_batch_time); commit_time = min_t(u64, commit_time, 1000*journal->j_max_batch_time); if (trans_time < commit_time) { ktime_t expires = ktime_add_ns(ktime_get(), commit_time); set_current_state(TASK_UNINTERRUPTIBLE); schedule_hrtimeout(&expires, HRTIMER_MODE_ABS); } } if (handle->h_sync) transaction->t_synchronous_commit = 1; /* * If the handle is marked SYNC, we need to set another commit * going! We also want to force a commit if the transaction is too * old now. */ if (handle->h_sync || time_after_eq(jiffies, transaction->t_expires)) { /* Do this even for aborted journals: an abort still * completes the commit thread, it just doesn't write * anything to disk. */ jbd2_debug(2, "transaction too old, requesting commit for " "handle %p\n", handle); /* This is non-blocking */ jbd2_log_start_commit(journal, tid); /* * Special case: JBD2_SYNC synchronous updates require us * to wait for the commit to complete. */ if (handle->h_sync && !(current->flags & PF_MEMALLOC)) wait_for_commit = 1; } /* * Once stop_this_handle() drops t_updates, the transaction could start * committing on us and eventually disappear. So we must not * dereference transaction pointer again after calling * stop_this_handle(). */ stop_this_handle(handle); if (wait_for_commit) err = jbd2_log_wait_commit(journal, tid); free_and_exit: if (handle->h_rsv_handle) jbd2_free_handle(handle->h_rsv_handle); jbd2_free_handle(handle); return err; } /* * * List management code snippets: various functions for manipulating the * transaction buffer lists. * */ /* * Append a buffer to a transaction list, given the transaction's list head * pointer. * * j_list_lock is held. * * jh->b_state_lock is held. */ static inline void __blist_add_buffer(struct journal_head **list, struct journal_head *jh) { if (!*list) { jh->b_tnext = jh->b_tprev = jh; *list = jh; } else { /* Insert at the tail of the list to preserve order */ struct journal_head *first = *list, *last = first->b_tprev; jh->b_tprev = last; jh->b_tnext = first; last->b_tnext = first->b_tprev = jh; } } /* * Remove a buffer from a transaction list, given the transaction's list * head pointer. * * Called with j_list_lock held, and the journal may not be locked. * * jh->b_state_lock is held. */ static inline void __blist_del_buffer(struct journal_head **list, struct journal_head *jh) { if (*list == jh) { *list = jh->b_tnext; if (*list == jh) *list = NULL; } jh->b_tprev->b_tnext = jh->b_tnext; jh->b_tnext->b_tprev = jh->b_tprev; } /* * Remove a buffer from the appropriate transaction list. * * Note that this function can *change* the value of * bh->b_transaction->t_buffers, t_forget, t_shadow_list, t_log_list or * t_reserved_list. If the caller is holding onto a copy of one of these * pointers, it could go bad. Generally the caller needs to re-read the * pointer from the transaction_t. * * Called under j_list_lock. */ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh) { struct journal_head **list = NULL; transaction_t *transaction; struct buffer_head *bh = jh2bh(jh); lockdep_assert_held(&jh->b_state_lock); transaction = jh->b_transaction; if (transaction) assert_spin_locked(&transaction->t_journal->j_list_lock); J_ASSERT_JH(jh, jh->b_jlist < BJ_Types); if (jh->b_jlist != BJ_None) J_ASSERT_JH(jh, transaction != NULL); switch (jh->b_jlist) { case BJ_None: return; case BJ_Metadata: transaction->t_nr_buffers--; J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0); list = &transaction->t_buffers; break; case BJ_Forget: list = &transaction->t_forget; break; case BJ_Shadow: list = &transaction->t_shadow_list; break; case BJ_Reserved: list = &transaction->t_reserved_list; break; } __blist_del_buffer(list, jh); jh->b_jlist = BJ_None; if (transaction && is_journal_aborted(transaction->t_journal)) clear_buffer_jbddirty(bh); else if (test_clear_buffer_jbddirty(bh)) mark_buffer_dirty(bh); /* Expose it to the VM */ } /* * Remove buffer from all transactions. The caller is responsible for dropping * the jh reference that belonged to the transaction. * * Called with bh_state lock and j_list_lock */ static void __jbd2_journal_unfile_buffer(struct journal_head *jh) { J_ASSERT_JH(jh, jh->b_transaction != NULL); J_ASSERT_JH(jh, jh->b_next_transaction == NULL); __jbd2_journal_temp_unlink_buffer(jh); jh->b_transaction = NULL; } void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh) { struct buffer_head *bh = jh2bh(jh); /* Get reference so that buffer cannot be freed before we unlock it */ get_bh(bh); spin_lock(&jh->b_state_lock); spin_lock(&journal->j_list_lock); __jbd2_journal_unfile_buffer(jh); spin_unlock(&journal->j_list_lock); spin_unlock(&jh->b_state_lock); jbd2_journal_put_journal_head(jh); __brelse(bh); } /** * jbd2_journal_try_to_free_buffers() - try to free page buffers. * @journal: journal for operation * @folio: Folio to detach data from. * * For all the buffers on this page, * if they are fully written out ordered data, move them onto BUF_CLEAN * so try_to_free_buffers() can reap them. * * This function returns non-zero if we wish try_to_free_buffers() * to be called. We do this if the page is releasable by try_to_free_buffers(). * We also do it if the page has locked or dirty buffers and the caller wants * us to perform sync or async writeout. * * This complicates JBD locking somewhat. We aren't protected by the * BKL here. We wish to remove the buffer from its committing or * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer. * * This may *change* the value of transaction_t->t_datalist, so anyone * who looks at t_datalist needs to lock against this function. * * Even worse, someone may be doing a jbd2_journal_dirty_data on this * buffer. So we need to lock against that. jbd2_journal_dirty_data() * will come out of the lock with the buffer dirty, which makes it * ineligible for release here. * * Who else is affected by this? hmm... Really the only contender * is do_get_write_access() - it could be looking at the buffer while * journal_try_to_free_buffer() is changing its state. But that * cannot happen because we never reallocate freed data as metadata * while the data is part of a transaction. Yes? * * Return false on failure, true on success */ bool jbd2_journal_try_to_free_buffers(journal_t *journal, struct folio *folio) { struct buffer_head *head; struct buffer_head *bh; bool ret = false; J_ASSERT(folio_test_locked(folio)); head = folio_buffers(folio); bh = head; do { struct journal_head *jh; /* * We take our own ref against the journal_head here to avoid * having to add tons of locking around each instance of * jbd2_journal_put_journal_head(). */ jh = jbd2_journal_grab_journal_head(bh); if (!jh) continue; spin_lock(&jh->b_state_lock); if (!jh->b_transaction && !jh->b_next_transaction) { spin_lock(&journal->j_list_lock); /* Remove written-back checkpointed metadata buffer */ if (jh->b_cp_transaction != NULL) jbd2_journal_try_remove_checkpoint(jh); spin_unlock(&journal->j_list_lock); } spin_unlock(&jh->b_state_lock); jbd2_journal_put_journal_head(jh); if (buffer_jbd(bh)) goto busy; } while ((bh = bh->b_this_page) != head); ret = try_to_free_buffers(folio); busy: return ret; } /* * This buffer is no longer needed. If it is on an older transaction's * checkpoint list we need to record it on this transaction's forget list * to pin this buffer (and hence its checkpointing transaction) down until * this transaction commits. If the buffer isn't on a checkpoint list, we * release it. * Returns non-zero if JBD no longer has an interest in the buffer. * * Called under j_list_lock. * * Called under jh->b_state_lock. */ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction) { int may_free = 1; struct buffer_head *bh = jh2bh(jh); if (jh->b_cp_transaction) { JBUFFER_TRACE(jh, "on running+cp transaction"); __jbd2_journal_temp_unlink_buffer(jh); /* * We don't want to write the buffer anymore, clear the * bit so that we don't confuse checks in * __journal_file_buffer */ clear_buffer_dirty(bh); __jbd2_journal_file_buffer(jh, transaction, BJ_Forget); may_free = 0; } else { JBUFFER_TRACE(jh, "on running transaction"); __jbd2_journal_unfile_buffer(jh); jbd2_journal_put_journal_head(jh); } return may_free; } /* * jbd2_journal_invalidate_folio * * This code is tricky. It has a number of cases to deal with. * * There are two invariants which this code relies on: * * i_size must be updated on disk before we start calling invalidate_folio * on the data. * * This is done in ext3 by defining an ext3_setattr method which * updates i_size before truncate gets going. By maintaining this * invariant, we can be sure that it is safe to throw away any buffers * attached to the current transaction: once the transaction commits, * we know that the data will not be needed. * * Note however that we can *not* throw away data belonging to the * previous, committing transaction! * * Any disk blocks which *are* part of the previous, committing * transaction (and which therefore cannot be discarded immediately) are * not going to be reused in the new running transaction * * The bitmap committed_data images guarantee this: any block which is * allocated in one transaction and removed in the next will be marked * as in-use in the committed_data bitmap, so cannot be reused until * the next transaction to delete the block commits. This means that * leaving committing buffers dirty is quite safe: the disk blocks * cannot be reallocated to a different file and so buffer aliasing is * not possible. * * * The above applies mainly to ordered data mode. In writeback mode we * don't make guarantees about the order in which data hits disk --- in * particular we don't guarantee that new dirty data is flushed before * transaction commit --- so it is always safe just to discard data * immediately in that mode. --sct */ /* * The journal_unmap_buffer helper function returns zero if the buffer * concerned remains pinned as an anonymous buffer belonging to an older * transaction. * * We're outside-transaction here. Either or both of j_running_transaction * and j_committing_transaction may be NULL. */ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh, int partial_page) { transaction_t *transaction; struct journal_head *jh; int may_free = 1; BUFFER_TRACE(bh, "entry"); /* * It is safe to proceed here without the j_list_lock because the * buffers cannot be stolen by try_to_free_buffers as long as we are * holding the page lock. --sct */ jh = jbd2_journal_grab_journal_head(bh); if (!jh) goto zap_buffer_unlocked; /* OK, we have data buffer in journaled mode */ write_lock(&journal->j_state_lock); spin_lock(&jh->b_state_lock); spin_lock(&journal->j_list_lock); /* * We cannot remove the buffer from checkpoint lists until the * transaction adding inode to orphan list (let's call it T) * is committed. Otherwise if the transaction changing the * buffer would be cleaned from the journal before T is * committed, a crash will cause that the correct contents of * the buffer will be lost. On the other hand we have to * clear the buffer dirty bit at latest at the moment when the * transaction marking the buffer as freed in the filesystem * structures is committed because from that moment on the * block can be reallocated and used by a different page. * Since the block hasn't been freed yet but the inode has * already been added to orphan list, it is safe for us to add * the buffer to BJ_Forget list of the newest transaction. * * Also we have to clear buffer_mapped flag of a truncated buffer * because the buffer_head may be attached to the page straddling * i_size (can happen only when blocksize < pagesize) and thus the * buffer_head can be reused when the file is extended again. So we end * up keeping around invalidated buffers attached to transactions' * BJ_Forget list just to stop checkpointing code from cleaning up * the transaction this buffer was modified in. */ transaction = jh->b_transaction; if (transaction == NULL) { /* First case: not on any transaction. If it * has no checkpoint link, then we can zap it: * it's a writeback-mode buffer so we don't care * if it hits disk safely. */ if (!jh->b_cp_transaction) { JBUFFER_TRACE(jh, "not on any transaction: zap"); goto zap_buffer; } if (!buffer_dirty(bh)) { /* bdflush has written it. We can drop it now */ __jbd2_journal_remove_checkpoint(jh); goto zap_buffer; } /* OK, it must be in the journal but still not * written fully to disk: it's metadata or * journaled data... */ if (journal->j_running_transaction) { /* ... and once the current transaction has * committed, the buffer won't be needed any * longer. */ JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget"); may_free = __dispose_buffer(jh, journal->j_running_transaction); goto zap_buffer; } else { /* There is no currently-running transaction. So the * orphan record which we wrote for this file must have * passed into commit. We must attach this buffer to * the committing transaction, if it exists. */ if (journal->j_committing_transaction) { JBUFFER_TRACE(jh, "give to committing trans"); may_free = __dispose_buffer(jh, journal->j_committing_transaction); goto zap_buffer; } else { /* The orphan record's transaction has * committed. We can cleanse this buffer */ clear_buffer_jbddirty(bh); __jbd2_journal_remove_checkpoint(jh); goto zap_buffer; } } } else if (transaction == journal->j_committing_transaction) { JBUFFER_TRACE(jh, "on committing transaction"); /* * The buffer is committing, we simply cannot touch * it. If the page is straddling i_size we have to wait * for commit and try again. */ if (partial_page) { spin_unlock(&journal->j_list_lock); spin_unlock(&jh->b_state_lock); write_unlock(&journal->j_state_lock); jbd2_journal_put_journal_head(jh); /* Already zapped buffer? Nothing to do... */ if (!bh->b_bdev) return 0; return -EBUSY; } /* * OK, buffer won't be reachable after truncate. We just clear * b_modified to not confuse transaction credit accounting, and * set j_next_transaction to the running transaction (if there * is one) and mark buffer as freed so that commit code knows * it should clear dirty bits when it is done with the buffer. */ set_buffer_freed(bh); if (journal->j_running_transaction && buffer_jbddirty(bh)) jh->b_next_transaction = journal->j_running_transaction; jh->b_modified = 0; spin_unlock(&journal->j_list_lock); spin_unlock(&jh->b_state_lock); write_unlock(&journal->j_state_lock); jbd2_journal_put_journal_head(jh); return 0; } else { /* Good, the buffer belongs to the running transaction. * We are writing our own transaction's data, not any * previous one's, so it is safe to throw it away * (remember that we expect the filesystem to have set * i_size already for this truncate so recovery will not * expose the disk blocks we are discarding here.) */ J_ASSERT_JH(jh, transaction == journal->j_running_transaction); JBUFFER_TRACE(jh, "on running transaction"); may_free = __dispose_buffer(jh, transaction); } zap_buffer: /* * This is tricky. Although the buffer is truncated, it may be reused * if blocksize < pagesize and it is attached to the page straddling * EOF. Since the buffer might have been added to BJ_Forget list of the * running transaction, journal_get_write_access() won't clear * b_modified and credit accounting gets confused. So clear b_modified * here. */ jh->b_modified = 0; spin_unlock(&journal->j_list_lock); spin_unlock(&jh->b_state_lock); write_unlock(&journal->j_state_lock); jbd2_journal_put_journal_head(jh); zap_buffer_unlocked: clear_buffer_dirty(bh); J_ASSERT_BH(bh, !buffer_jbddirty(bh)); clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); clear_buffer_delay(bh); clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; } /** * jbd2_journal_invalidate_folio() * @journal: journal to use for flush... * @folio: folio to flush * @offset: start of the range to invalidate * @length: length of the range to invalidate * * Reap page buffers containing data after in the specified range in page. * Can return -EBUSY if buffers are part of the committing transaction and * the page is straddling i_size. Caller then has to wait for current commit * and try again. */ int jbd2_journal_invalidate_folio(journal_t *journal, struct folio *folio, size_t offset, size_t length) { struct buffer_head *head, *bh, *next; unsigned int stop = offset + length; unsigned int curr_off = 0; int partial_page = (offset || length < folio_size(folio)); int may_free = 1; int ret = 0; if (!folio_test_locked(folio)) BUG(); head = folio_buffers(folio); if (!head) return 0; BUG_ON(stop > folio_size(folio) || stop < length); /* We will potentially be playing with lists other than just the * data lists (especially for journaled data mode), so be * cautious in our locking. */ bh = head; do { unsigned int next_off = curr_off + bh->b_size; next = bh->b_this_page; if (next_off > stop) return 0; if (offset <= curr_off) { /* This block is wholly outside the truncation point */ lock_buffer(bh); ret = journal_unmap_buffer(journal, bh, partial_page); unlock_buffer(bh); if (ret < 0) return ret; may_free &= ret; } curr_off = next_off; bh = next; } while (bh != head); if (!partial_page) { if (may_free && try_to_free_buffers(folio)) J_ASSERT(!folio_buffers(folio)); } return 0; } /* * File a buffer on the given transaction list. */ void __jbd2_journal_file_buffer(struct journal_head *jh, transaction_t *transaction, int jlist) { struct journal_head **list = NULL; int was_dirty = 0; struct buffer_head *bh = jh2bh(jh); lockdep_assert_held(&jh->b_state_lock); assert_spin_locked(&transaction->t_journal->j_list_lock); J_ASSERT_JH(jh, jh->b_jlist < BJ_Types); J_ASSERT_JH(jh, jh->b_transaction == transaction || jh->b_transaction == NULL); if (jh->b_transaction && jh->b_jlist == jlist) return; if (jlist == BJ_Metadata || jlist == BJ_Reserved || jlist == BJ_Shadow || jlist == BJ_Forget) { /* * For metadata buffers, we track dirty bit in buffer_jbddirty * instead of buffer_dirty. We should not see a dirty bit set * here because we clear it in do_get_write_access but e.g. * tune2fs can modify the sb and set the dirty bit at any time * so we try to gracefully handle that. */ if (buffer_dirty(bh)) warn_dirty_buffer(bh); if (test_clear_buffer_dirty(bh) || test_clear_buffer_jbddirty(bh)) was_dirty = 1; } if (jh->b_transaction) __jbd2_journal_temp_unlink_buffer(jh); else jbd2_journal_grab_journal_head(bh); jh->b_transaction = transaction; switch (jlist) { case BJ_None: J_ASSERT_JH(jh, !jh->b_committed_data); J_ASSERT_JH(jh, !jh->b_frozen_data); return; case BJ_Metadata: transaction->t_nr_buffers++; list = &transaction->t_buffers; break; case BJ_Forget: list = &transaction->t_forget; break; case BJ_Shadow: list = &transaction->t_shadow_list; break; case BJ_Reserved: list = &transaction->t_reserved_list; break; } __blist_add_buffer(list, jh); jh->b_jlist = jlist; if (was_dirty) set_buffer_jbddirty(bh); } void jbd2_journal_file_buffer(struct journal_head *jh, transaction_t *transaction, int jlist) { spin_lock(&jh->b_state_lock); spin_lock(&transaction->t_journal->j_list_lock); __jbd2_journal_file_buffer(jh, transaction, jlist); spin_unlock(&transaction->t_journal->j_list_lock); spin_unlock(&jh->b_state_lock); } /* * Remove a buffer from its current buffer list in preparation for * dropping it from its current transaction entirely. If the buffer has * already started to be used by a subsequent transaction, refile the * buffer on that transaction's metadata list. * * Called under j_list_lock * Called under jh->b_state_lock * * When this function returns true, there's no next transaction to refile to * and the caller has to drop jh reference through * jbd2_journal_put_journal_head(). */ bool __jbd2_journal_refile_buffer(struct journal_head *jh) { int was_dirty, jlist; struct buffer_head *bh = jh2bh(jh); lockdep_assert_held(&jh->b_state_lock); if (jh->b_transaction) assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock); /* If the buffer is now unused, just drop it. */ if (jh->b_next_transaction == NULL) { __jbd2_journal_unfile_buffer(jh); return true; } /* * It has been modified by a later transaction: add it to the new * transaction's metadata list. */ was_dirty = test_clear_buffer_jbddirty(bh); __jbd2_journal_temp_unlink_buffer(jh); /* * b_transaction must be set, otherwise the new b_transaction won't * be holding jh reference */ J_ASSERT_JH(jh, jh->b_transaction != NULL); /* * We set b_transaction here because b_next_transaction will inherit * our jh reference and thus __jbd2_journal_file_buffer() must not * take a new one. */ WRITE_ONCE(jh->b_transaction, jh->b_next_transaction); WRITE_ONCE(jh->b_next_transaction, NULL); if (buffer_freed(bh)) jlist = BJ_Forget; else if (jh->b_modified) jlist = BJ_Metadata; else jlist = BJ_Reserved; __jbd2_journal_file_buffer(jh, jh->b_transaction, jlist); J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING); if (was_dirty) set_buffer_jbddirty(bh); return false; } /* * __jbd2_journal_refile_buffer() with necessary locking added. We take our * bh reference so that we can safely unlock bh. * * The jh and bh may be freed by this call. */ void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh) { bool drop; spin_lock(&jh->b_state_lock); spin_lock(&journal->j_list_lock); drop = __jbd2_journal_refile_buffer(jh); spin_unlock(&jh->b_state_lock); spin_unlock(&journal->j_list_lock); if (drop) jbd2_journal_put_journal_head(jh); } /* * File inode in the inode list of the handle's transaction */ static int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode, unsigned long flags, loff_t start_byte, loff_t end_byte) { transaction_t *transaction = handle->h_transaction; journal_t *journal; if (is_handle_aborted(handle)) return -EROFS; journal = transaction->t_journal; jbd2_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino, transaction->t_tid); spin_lock(&journal->j_list_lock); jinode->i_flags |= flags; if (jinode->i_dirty_end) { jinode->i_dirty_start = min(jinode->i_dirty_start, start_byte); jinode->i_dirty_end = max(jinode->i_dirty_end, end_byte); } else { jinode->i_dirty_start = start_byte; jinode->i_dirty_end = end_byte; } /* Is inode already attached where we need it? */ if (jinode->i_transaction == transaction || jinode->i_next_transaction == transaction) goto done; /* * We only ever set this variable to 1 so the test is safe. Since * t_need_data_flush is likely to be set, we do the test to save some * cacheline bouncing */ if (!transaction->t_need_data_flush) transaction->t_need_data_flush = 1; /* On some different transaction's list - should be * the committing one */ if (jinode->i_transaction) { J_ASSERT(jinode->i_next_transaction == NULL); J_ASSERT(jinode->i_transaction == journal->j_committing_transaction); jinode->i_next_transaction = transaction; goto done; } /* Not on any transaction list... */ J_ASSERT(!jinode->i_next_transaction); jinode->i_transaction = transaction; list_add(&jinode->i_list, &transaction->t_inode_list); done: spin_unlock(&journal->j_list_lock); return 0; } int jbd2_journal_inode_ranged_write(handle_t *handle, struct jbd2_inode *jinode, loff_t start_byte, loff_t length) { return jbd2_journal_file_inode(handle, jinode, JI_WRITE_DATA | JI_WAIT_DATA, start_byte, start_byte + length - 1); } int jbd2_journal_inode_ranged_wait(handle_t *handle, struct jbd2_inode *jinode, loff_t start_byte, loff_t length) { return jbd2_journal_file_inode(handle, jinode, JI_WAIT_DATA, start_byte, start_byte + length - 1); } /* * File truncate and transaction commit interact with each other in a * non-trivial way. If a transaction writing data block A is * committing, we cannot discard the data by truncate until we have * written them. Otherwise if we crashed after the transaction with * write has committed but before the transaction with truncate has * committed, we could see stale data in block A. This function is a * helper to solve this problem. It starts writeout of the truncated * part in case it is in the committing transaction. * * Filesystem code must call this function when inode is journaled in * ordered mode before truncation happens and after the inode has been * placed on orphan list with the new inode size. The second condition * avoids the race that someone writes new data and we start * committing the transaction after this function has been called but * before a transaction for truncate is started (and furthermore it * allows us to optimize the case where the addition to orphan list * happens in the same transaction as write --- we don't have to write * any data in such case). */ int jbd2_journal_begin_ordered_truncate(journal_t *journal, struct jbd2_inode *jinode, loff_t new_size) { transaction_t *inode_trans, *commit_trans; int ret = 0; /* This is a quick check to avoid locking if not necessary */ if (!jinode->i_transaction) goto out; /* Locks are here just to force reading of recent values, it is * enough that the transaction was not committing before we started * a transaction adding the inode to orphan list */ read_lock(&journal->j_state_lock); commit_trans = journal->j_committing_transaction; read_unlock(&journal->j_state_lock); spin_lock(&journal->j_list_lock); inode_trans = jinode->i_transaction; spin_unlock(&journal->j_list_lock); if (inode_trans == commit_trans) { ret = filemap_fdatawrite_range(jinode->i_vfs_inode->i_mapping, new_size, LLONG_MAX); if (ret) jbd2_journal_abort(journal, ret); } out: return ret; }
linux-master
fs/jbd2/transaction.c
// SPDX-License-Identifier: GPL-2.0+ /* * linux/fs/jbd2/checkpoint.c * * Written by Stephen C. Tweedie <[email protected]>, 1999 * * Copyright 1999 Red Hat Software --- All Rights Reserved * * Checkpoint routines for the generic filesystem journaling code. * Part of the ext2fs journaling system. * * Checkpointing is the process of ensuring that a section of the log is * committed fully to disk, so that that portion of the log can be * reused. */ #include <linux/time.h> #include <linux/fs.h> #include <linux/jbd2.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <trace/events/jbd2.h> /* * Unlink a buffer from a transaction checkpoint list. * * Called with j_list_lock held. */ static inline void __buffer_unlink(struct journal_head *jh) { transaction_t *transaction = jh->b_cp_transaction; jh->b_cpnext->b_cpprev = jh->b_cpprev; jh->b_cpprev->b_cpnext = jh->b_cpnext; if (transaction->t_checkpoint_list == jh) { transaction->t_checkpoint_list = jh->b_cpnext; if (transaction->t_checkpoint_list == jh) transaction->t_checkpoint_list = NULL; } } /* * __jbd2_log_wait_for_space: wait until there is space in the journal. * * Called under j-state_lock *only*. It will be unlocked if we have to wait * for a checkpoint to free up some space in the log. */ void __jbd2_log_wait_for_space(journal_t *journal) __acquires(&journal->j_state_lock) __releases(&journal->j_state_lock) { int nblocks, space_left; /* assert_spin_locked(&journal->j_state_lock); */ nblocks = journal->j_max_transaction_buffers; while (jbd2_log_space_left(journal) < nblocks) { write_unlock(&journal->j_state_lock); mutex_lock_io(&journal->j_checkpoint_mutex); /* * Test again, another process may have checkpointed while we * were waiting for the checkpoint lock. If there are no * transactions ready to be checkpointed, try to recover * journal space by calling cleanup_journal_tail(), and if * that doesn't work, by waiting for the currently committing * transaction to complete. If there is absolutely no way * to make progress, this is either a BUG or corrupted * filesystem, so abort the journal and leave a stack * trace for forensic evidence. */ write_lock(&journal->j_state_lock); if (journal->j_flags & JBD2_ABORT) { mutex_unlock(&journal->j_checkpoint_mutex); return; } spin_lock(&journal->j_list_lock); space_left = jbd2_log_space_left(journal); if (space_left < nblocks) { int chkpt = journal->j_checkpoint_transactions != NULL; tid_t tid = 0; if (journal->j_committing_transaction) tid = journal->j_committing_transaction->t_tid; spin_unlock(&journal->j_list_lock); write_unlock(&journal->j_state_lock); if (chkpt) { jbd2_log_do_checkpoint(journal); } else if (jbd2_cleanup_journal_tail(journal) == 0) { /* We were able to recover space; yay! */ ; } else if (tid) { /* * jbd2_journal_commit_transaction() may want * to take the checkpoint_mutex if JBD2_FLUSHED * is set. So we need to temporarily drop it. */ mutex_unlock(&journal->j_checkpoint_mutex); jbd2_log_wait_commit(journal, tid); write_lock(&journal->j_state_lock); continue; } else { printk(KERN_ERR "%s: needed %d blocks and " "only had %d space available\n", __func__, nblocks, space_left); printk(KERN_ERR "%s: no way to get more " "journal space in %s\n", __func__, journal->j_devname); WARN_ON(1); jbd2_journal_abort(journal, -EIO); } write_lock(&journal->j_state_lock); } else { spin_unlock(&journal->j_list_lock); } mutex_unlock(&journal->j_checkpoint_mutex); } } static void __flush_batch(journal_t *journal, int *batch_count) { int i; struct blk_plug plug; blk_start_plug(&plug); for (i = 0; i < *batch_count; i++) write_dirty_buffer(journal->j_chkpt_bhs[i], REQ_SYNC); blk_finish_plug(&plug); for (i = 0; i < *batch_count; i++) { struct buffer_head *bh = journal->j_chkpt_bhs[i]; BUFFER_TRACE(bh, "brelse"); __brelse(bh); journal->j_chkpt_bhs[i] = NULL; } *batch_count = 0; } /* * Perform an actual checkpoint. We take the first transaction on the * list of transactions to be checkpointed and send all its buffers * to disk. We submit larger chunks of data at once. * * The journal should be locked before calling this function. * Called with j_checkpoint_mutex held. */ int jbd2_log_do_checkpoint(journal_t *journal) { struct journal_head *jh; struct buffer_head *bh; transaction_t *transaction; tid_t this_tid; int result, batch_count = 0; jbd2_debug(1, "Start checkpoint\n"); /* * First thing: if there are any transactions in the log which * don't need checkpointing, just eliminate them from the * journal straight away. */ result = jbd2_cleanup_journal_tail(journal); trace_jbd2_checkpoint(journal, result); jbd2_debug(1, "cleanup_journal_tail returned %d\n", result); if (result <= 0) return result; /* * OK, we need to start writing disk blocks. Take one transaction * and write it. */ spin_lock(&journal->j_list_lock); if (!journal->j_checkpoint_transactions) goto out; transaction = journal->j_checkpoint_transactions; if (transaction->t_chp_stats.cs_chp_time == 0) transaction->t_chp_stats.cs_chp_time = jiffies; this_tid = transaction->t_tid; restart: /* * If someone cleaned up this transaction while we slept, we're * done (maybe it's a new transaction, but it fell at the same * address). */ if (journal->j_checkpoint_transactions != transaction || transaction->t_tid != this_tid) goto out; /* checkpoint all of the transaction's buffers */ while (transaction->t_checkpoint_list) { jh = transaction->t_checkpoint_list; bh = jh2bh(jh); if (jh->b_transaction != NULL) { transaction_t *t = jh->b_transaction; tid_t tid = t->t_tid; transaction->t_chp_stats.cs_forced_to_close++; spin_unlock(&journal->j_list_lock); if (unlikely(journal->j_flags & JBD2_UNMOUNT)) /* * The journal thread is dead; so * starting and waiting for a commit * to finish will cause us to wait for * a _very_ long time. */ printk(KERN_ERR "JBD2: %s: Waiting for Godot: block %llu\n", journal->j_devname, (unsigned long long) bh->b_blocknr); if (batch_count) __flush_batch(journal, &batch_count); jbd2_log_start_commit(journal, tid); /* * jbd2_journal_commit_transaction() may want * to take the checkpoint_mutex if JBD2_FLUSHED * is set, jbd2_update_log_tail() called by * jbd2_journal_commit_transaction() may also take * checkpoint_mutex. So we need to temporarily * drop it. */ mutex_unlock(&journal->j_checkpoint_mutex); jbd2_log_wait_commit(journal, tid); mutex_lock_io(&journal->j_checkpoint_mutex); spin_lock(&journal->j_list_lock); goto restart; } if (!trylock_buffer(bh)) { /* * The buffer is locked, it may be writing back, or * flushing out in the last couple of cycles, or * re-adding into a new transaction, need to check * it again until it's unlocked. */ get_bh(bh); spin_unlock(&journal->j_list_lock); wait_on_buffer(bh); /* the journal_head may have gone by now */ BUFFER_TRACE(bh, "brelse"); __brelse(bh); goto retry; } else if (!buffer_dirty(bh)) { unlock_buffer(bh); BUFFER_TRACE(bh, "remove from checkpoint"); /* * If the transaction was released or the checkpoint * list was empty, we're done. */ if (__jbd2_journal_remove_checkpoint(jh) || !transaction->t_checkpoint_list) goto out; } else { unlock_buffer(bh); /* * We are about to write the buffer, it could be * raced by some other transaction shrink or buffer * re-log logic once we release the j_list_lock, * leave it on the checkpoint list and check status * again to make sure it's clean. */ BUFFER_TRACE(bh, "queue"); get_bh(bh); J_ASSERT_BH(bh, !buffer_jwrite(bh)); journal->j_chkpt_bhs[batch_count++] = bh; transaction->t_chp_stats.cs_written++; transaction->t_checkpoint_list = jh->b_cpnext; } if ((batch_count == JBD2_NR_BATCH) || need_resched() || spin_needbreak(&journal->j_list_lock) || jh2bh(transaction->t_checkpoint_list) == journal->j_chkpt_bhs[0]) goto unlock_and_flush; } if (batch_count) { unlock_and_flush: spin_unlock(&journal->j_list_lock); retry: if (batch_count) __flush_batch(journal, &batch_count); spin_lock(&journal->j_list_lock); goto restart; } out: spin_unlock(&journal->j_list_lock); result = jbd2_cleanup_journal_tail(journal); return (result < 0) ? result : 0; } /* * Check the list of checkpoint transactions for the journal to see if * we have already got rid of any since the last update of the log tail * in the journal superblock. If so, we can instantly roll the * superblock forward to remove those transactions from the log. * * Return <0 on error, 0 on success, 1 if there was nothing to clean up. * * Called with the journal lock held. * * This is the only part of the journaling code which really needs to be * aware of transaction aborts. Checkpointing involves writing to the * main filesystem area rather than to the journal, so it can proceed * even in abort state, but we must not update the super block if * checkpointing may have failed. Otherwise, we would lose some metadata * buffers which should be written-back to the filesystem. */ int jbd2_cleanup_journal_tail(journal_t *journal) { tid_t first_tid; unsigned long blocknr; if (is_journal_aborted(journal)) return -EIO; if (!jbd2_journal_get_log_tail(journal, &first_tid, &blocknr)) return 1; J_ASSERT(blocknr != 0); /* * We need to make sure that any blocks that were recently written out * --- perhaps by jbd2_log_do_checkpoint() --- are flushed out before * we drop the transactions from the journal. It's unlikely this will * be necessary, especially with an appropriately sized journal, but we * need this to guarantee correctness. Fortunately * jbd2_cleanup_journal_tail() doesn't get called all that often. */ if (journal->j_flags & JBD2_BARRIER) blkdev_issue_flush(journal->j_fs_dev); return __jbd2_update_log_tail(journal, first_tid, blocknr); } /* Checkpoint list management */ enum shrink_type {SHRINK_DESTROY, SHRINK_BUSY_STOP, SHRINK_BUSY_SKIP}; /* * journal_shrink_one_cp_list * * Find all the written-back checkpoint buffers in the given list * and try to release them. If the whole transaction is released, set * the 'released' parameter. Return the number of released checkpointed * buffers. * * Called with j_list_lock held. */ static unsigned long journal_shrink_one_cp_list(struct journal_head *jh, enum shrink_type type, bool *released) { struct journal_head *last_jh; struct journal_head *next_jh = jh; unsigned long nr_freed = 0; int ret; *released = false; if (!jh) return 0; last_jh = jh->b_cpprev; do { jh = next_jh; next_jh = jh->b_cpnext; if (type == SHRINK_DESTROY) { ret = __jbd2_journal_remove_checkpoint(jh); } else { ret = jbd2_journal_try_remove_checkpoint(jh); if (ret < 0) { if (type == SHRINK_BUSY_SKIP) continue; break; } } nr_freed++; if (ret) { *released = true; break; } if (need_resched()) break; } while (jh != last_jh); return nr_freed; } /* * jbd2_journal_shrink_checkpoint_list * * Find 'nr_to_scan' written-back checkpoint buffers in the journal * and try to release them. Return the number of released checkpointed * buffers. * * Called with j_list_lock held. */ unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal, unsigned long *nr_to_scan) { transaction_t *transaction, *last_transaction, *next_transaction; bool __maybe_unused released; tid_t first_tid = 0, last_tid = 0, next_tid = 0; tid_t tid = 0; unsigned long nr_freed = 0; unsigned long freed; again: spin_lock(&journal->j_list_lock); if (!journal->j_checkpoint_transactions) { spin_unlock(&journal->j_list_lock); goto out; } /* * Get next shrink transaction, resume previous scan or start * over again. If some others do checkpoint and drop transaction * from the checkpoint list, we ignore saved j_shrink_transaction * and start over unconditionally. */ if (journal->j_shrink_transaction) transaction = journal->j_shrink_transaction; else transaction = journal->j_checkpoint_transactions; if (!first_tid) first_tid = transaction->t_tid; last_transaction = journal->j_checkpoint_transactions->t_cpprev; next_transaction = transaction; last_tid = last_transaction->t_tid; do { transaction = next_transaction; next_transaction = transaction->t_cpnext; tid = transaction->t_tid; freed = journal_shrink_one_cp_list(transaction->t_checkpoint_list, SHRINK_BUSY_SKIP, &released); nr_freed += freed; (*nr_to_scan) -= min(*nr_to_scan, freed); if (*nr_to_scan == 0) break; if (need_resched() || spin_needbreak(&journal->j_list_lock)) break; } while (transaction != last_transaction); if (transaction != last_transaction) { journal->j_shrink_transaction = next_transaction; next_tid = next_transaction->t_tid; } else { journal->j_shrink_transaction = NULL; next_tid = 0; } spin_unlock(&journal->j_list_lock); cond_resched(); if (*nr_to_scan && next_tid) goto again; out: trace_jbd2_shrink_checkpoint_list(journal, first_tid, tid, last_tid, nr_freed, next_tid); return nr_freed; } /* * journal_clean_checkpoint_list * * Find all the written-back checkpoint buffers in the journal and release them. * If 'destroy' is set, release all buffers unconditionally. * * Called with j_list_lock held. */ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy) { transaction_t *transaction, *last_transaction, *next_transaction; enum shrink_type type; bool released; transaction = journal->j_checkpoint_transactions; if (!transaction) return; type = destroy ? SHRINK_DESTROY : SHRINK_BUSY_STOP; last_transaction = transaction->t_cpprev; next_transaction = transaction; do { transaction = next_transaction; next_transaction = transaction->t_cpnext; journal_shrink_one_cp_list(transaction->t_checkpoint_list, type, &released); /* * This function only frees up some memory if possible so we * dont have an obligation to finish processing. Bail out if * preemption requested: */ if (need_resched()) return; /* * Stop scanning if we couldn't free the transaction. This * avoids pointless scanning of transactions which still * weren't checkpointed. */ if (!released) return; } while (transaction != last_transaction); } /* * Remove buffers from all checkpoint lists as journal is aborted and we just * need to free memory */ void jbd2_journal_destroy_checkpoint(journal_t *journal) { /* * We loop because __jbd2_journal_clean_checkpoint_list() may abort * early due to a need of rescheduling. */ while (1) { spin_lock(&journal->j_list_lock); if (!journal->j_checkpoint_transactions) { spin_unlock(&journal->j_list_lock); break; } __jbd2_journal_clean_checkpoint_list(journal, true); spin_unlock(&journal->j_list_lock); cond_resched(); } } /* * journal_remove_checkpoint: called after a buffer has been committed * to disk (either by being write-back flushed to disk, or being * committed to the log). * * We cannot safely clean a transaction out of the log until all of the * buffer updates committed in that transaction have safely been stored * elsewhere on disk. To achieve this, all of the buffers in a * transaction need to be maintained on the transaction's checkpoint * lists until they have been rewritten, at which point this function is * called to remove the buffer from the existing transaction's * checkpoint lists. * * The function returns 1 if it frees the transaction, 0 otherwise. * The function can free jh and bh. * * This function is called with j_list_lock held. */ int __jbd2_journal_remove_checkpoint(struct journal_head *jh) { struct transaction_chp_stats_s *stats; transaction_t *transaction; journal_t *journal; struct buffer_head *bh = jh2bh(jh); JBUFFER_TRACE(jh, "entry"); transaction = jh->b_cp_transaction; if (!transaction) { JBUFFER_TRACE(jh, "not on transaction"); return 0; } journal = transaction->t_journal; JBUFFER_TRACE(jh, "removing from transaction"); /* * If we have failed to write the buffer out to disk, the filesystem * may become inconsistent. We cannot abort the journal here since * we hold j_list_lock and we have to be careful about races with * jbd2_journal_destroy(). So mark the writeback IO error in the * journal here and we abort the journal later from a better context. */ if (buffer_write_io_error(bh)) set_bit(JBD2_CHECKPOINT_IO_ERROR, &journal->j_atomic_flags); __buffer_unlink(jh); jh->b_cp_transaction = NULL; percpu_counter_dec(&journal->j_checkpoint_jh_count); jbd2_journal_put_journal_head(jh); /* Is this transaction empty? */ if (transaction->t_checkpoint_list) return 0; /* * There is one special case to worry about: if we have just pulled the * buffer off a running or committing transaction's checkpoing list, * then even if the checkpoint list is empty, the transaction obviously * cannot be dropped! * * The locking here around t_state is a bit sleazy. * See the comment at the end of jbd2_journal_commit_transaction(). */ if (transaction->t_state != T_FINISHED) return 0; /* * OK, that was the last buffer for the transaction, we can now * safely remove this transaction from the log. */ stats = &transaction->t_chp_stats; if (stats->cs_chp_time) stats->cs_chp_time = jbd2_time_diff(stats->cs_chp_time, jiffies); trace_jbd2_checkpoint_stats(journal->j_fs_dev->bd_dev, transaction->t_tid, stats); __jbd2_journal_drop_transaction(journal, transaction); jbd2_journal_free_transaction(transaction); return 1; } /* * Check the checkpoint buffer and try to remove it from the checkpoint * list if it's clean. Returns -EBUSY if it is not clean, returns 1 if * it frees the transaction, 0 otherwise. * * This function is called with j_list_lock held. */ int jbd2_journal_try_remove_checkpoint(struct journal_head *jh) { struct buffer_head *bh = jh2bh(jh); if (jh->b_transaction) return -EBUSY; if (!trylock_buffer(bh)) return -EBUSY; if (buffer_dirty(bh)) { unlock_buffer(bh); return -EBUSY; } unlock_buffer(bh); /* * Buffer is clean and the IO has finished (we held the buffer * lock) so the checkpoint is done. We can safely remove the * buffer from this transaction. */ JBUFFER_TRACE(jh, "remove from checkpoint list"); return __jbd2_journal_remove_checkpoint(jh); } /* * journal_insert_checkpoint: put a committed buffer onto a checkpoint * list so that we know when it is safe to clean the transaction out of * the log. * * Called with the journal locked. * Called with j_list_lock held. */ void __jbd2_journal_insert_checkpoint(struct journal_head *jh, transaction_t *transaction) { JBUFFER_TRACE(jh, "entry"); J_ASSERT_JH(jh, buffer_dirty(jh2bh(jh)) || buffer_jbddirty(jh2bh(jh))); J_ASSERT_JH(jh, jh->b_cp_transaction == NULL); /* Get reference for checkpointing transaction */ jbd2_journal_grab_journal_head(jh2bh(jh)); jh->b_cp_transaction = transaction; if (!transaction->t_checkpoint_list) { jh->b_cpnext = jh->b_cpprev = jh; } else { jh->b_cpnext = transaction->t_checkpoint_list; jh->b_cpprev = transaction->t_checkpoint_list->b_cpprev; jh->b_cpprev->b_cpnext = jh; jh->b_cpnext->b_cpprev = jh; } transaction->t_checkpoint_list = jh; percpu_counter_inc(&transaction->t_journal->j_checkpoint_jh_count); } /* * We've finished with this transaction structure: adios... * * The transaction must have no links except for the checkpoint by this * point. * * Called with the journal locked. * Called with j_list_lock held. */ void __jbd2_journal_drop_transaction(journal_t *journal, transaction_t *transaction) { assert_spin_locked(&journal->j_list_lock); journal->j_shrink_transaction = NULL; if (transaction->t_cpnext) { transaction->t_cpnext->t_cpprev = transaction->t_cpprev; transaction->t_cpprev->t_cpnext = transaction->t_cpnext; if (journal->j_checkpoint_transactions == transaction) journal->j_checkpoint_transactions = transaction->t_cpnext; if (journal->j_checkpoint_transactions == transaction) journal->j_checkpoint_transactions = NULL; } J_ASSERT(transaction->t_state == T_FINISHED); J_ASSERT(transaction->t_buffers == NULL); J_ASSERT(transaction->t_forget == NULL); J_ASSERT(transaction->t_shadow_list == NULL); J_ASSERT(transaction->t_checkpoint_list == NULL); J_ASSERT(atomic_read(&transaction->t_updates) == 0); J_ASSERT(journal->j_committing_transaction != transaction); J_ASSERT(journal->j_running_transaction != transaction); trace_jbd2_drop_transaction(journal, transaction); jbd2_debug(1, "Dropping transaction %d, all done\n", transaction->t_tid); }
linux-master
fs/jbd2/checkpoint.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/affs/inode.c * * (c) 1996 Hans-Joachim Widmaier - Rewritten * * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem. * * (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem. * * (C) 1991 Linus Torvalds - minix filesystem */ #include <linux/module.h> #include <linux/init.h> #include <linux/statfs.h> #include <linux/parser.h> #include <linux/magic.h> #include <linux/sched.h> #include <linux/cred.h> #include <linux/slab.h> #include <linux/writeback.h> #include <linux/blkdev.h> #include <linux/seq_file.h> #include <linux/iversion.h> #include "affs.h" static int affs_statfs(struct dentry *dentry, struct kstatfs *buf); static int affs_show_options(struct seq_file *m, struct dentry *root); static int affs_remount (struct super_block *sb, int *flags, char *data); static void affs_commit_super(struct super_block *sb, int wait) { struct affs_sb_info *sbi = AFFS_SB(sb); struct buffer_head *bh = sbi->s_root_bh; struct affs_root_tail *tail = AFFS_ROOT_TAIL(sb, bh); lock_buffer(bh); affs_secs_to_datestamp(ktime_get_real_seconds(), &tail->disk_change); affs_fix_checksum(sb, bh); unlock_buffer(bh); mark_buffer_dirty(bh); if (wait) sync_dirty_buffer(bh); } static void affs_put_super(struct super_block *sb) { struct affs_sb_info *sbi = AFFS_SB(sb); pr_debug("%s()\n", __func__); cancel_delayed_work_sync(&sbi->sb_work); } static int affs_sync_fs(struct super_block *sb, int wait) { affs_commit_super(sb, wait); return 0; } static void flush_superblock(struct work_struct *work) { struct affs_sb_info *sbi; struct super_block *sb; sbi = container_of(work, struct affs_sb_info, sb_work.work); sb = sbi->sb; spin_lock(&sbi->work_lock); sbi->work_queued = 0; spin_unlock(&sbi->work_lock); affs_commit_super(sb, 1); } void affs_mark_sb_dirty(struct super_block *sb) { struct affs_sb_info *sbi = AFFS_SB(sb); unsigned long delay; if (sb_rdonly(sb)) return; spin_lock(&sbi->work_lock); if (!sbi->work_queued) { delay = msecs_to_jiffies(dirty_writeback_interval * 10); queue_delayed_work(system_long_wq, &sbi->sb_work, delay); sbi->work_queued = 1; } spin_unlock(&sbi->work_lock); } static struct kmem_cache * affs_inode_cachep; static struct inode *affs_alloc_inode(struct super_block *sb) { struct affs_inode_info *i; i = alloc_inode_sb(sb, affs_inode_cachep, GFP_KERNEL); if (!i) return NULL; inode_set_iversion(&i->vfs_inode, 1); i->i_lc = NULL; i->i_ext_bh = NULL; i->i_pa_cnt = 0; return &i->vfs_inode; } static void affs_free_inode(struct inode *inode) { kmem_cache_free(affs_inode_cachep, AFFS_I(inode)); } static void init_once(void *foo) { struct affs_inode_info *ei = (struct affs_inode_info *) foo; mutex_init(&ei->i_link_lock); mutex_init(&ei->i_ext_lock); inode_init_once(&ei->vfs_inode); } static int __init init_inodecache(void) { affs_inode_cachep = kmem_cache_create("affs_inode_cache", sizeof(struct affs_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD|SLAB_ACCOUNT), init_once); if (affs_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(affs_inode_cachep); } static const struct super_operations affs_sops = { .alloc_inode = affs_alloc_inode, .free_inode = affs_free_inode, .write_inode = affs_write_inode, .evict_inode = affs_evict_inode, .put_super = affs_put_super, .sync_fs = affs_sync_fs, .statfs = affs_statfs, .remount_fs = affs_remount, .show_options = affs_show_options, }; enum { Opt_bs, Opt_mode, Opt_mufs, Opt_notruncate, Opt_prefix, Opt_protect, Opt_reserved, Opt_root, Opt_setgid, Opt_setuid, Opt_verbose, Opt_volume, Opt_ignore, Opt_err, }; static const match_table_t tokens = { {Opt_bs, "bs=%u"}, {Opt_mode, "mode=%o"}, {Opt_mufs, "mufs"}, {Opt_notruncate, "nofilenametruncate"}, {Opt_prefix, "prefix=%s"}, {Opt_protect, "protect"}, {Opt_reserved, "reserved=%u"}, {Opt_root, "root=%u"}, {Opt_setgid, "setgid=%u"}, {Opt_setuid, "setuid=%u"}, {Opt_verbose, "verbose"}, {Opt_volume, "volume=%s"}, {Opt_ignore, "grpquota"}, {Opt_ignore, "noquota"}, {Opt_ignore, "quota"}, {Opt_ignore, "usrquota"}, {Opt_err, NULL}, }; static int parse_options(char *options, kuid_t *uid, kgid_t *gid, int *mode, int *reserved, s32 *root, int *blocksize, char **prefix, char *volume, unsigned long *mount_opts) { char *p; substring_t args[MAX_OPT_ARGS]; /* Fill in defaults */ *uid = current_uid(); *gid = current_gid(); *reserved = 2; *root = -1; *blocksize = -1; volume[0] = ':'; volume[1] = 0; *mount_opts = 0; if (!options) return 1; while ((p = strsep(&options, ",")) != NULL) { int token, n, option; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_bs: if (match_int(&args[0], &n)) return 0; if (n != 512 && n != 1024 && n != 2048 && n != 4096) { pr_warn("Invalid blocksize (512, 1024, 2048, 4096 allowed)\n"); return 0; } *blocksize = n; break; case Opt_mode: if (match_octal(&args[0], &option)) return 0; *mode = option & 0777; affs_set_opt(*mount_opts, SF_SETMODE); break; case Opt_mufs: affs_set_opt(*mount_opts, SF_MUFS); break; case Opt_notruncate: affs_set_opt(*mount_opts, SF_NO_TRUNCATE); break; case Opt_prefix: kfree(*prefix); *prefix = match_strdup(&args[0]); if (!*prefix) return 0; affs_set_opt(*mount_opts, SF_PREFIX); break; case Opt_protect: affs_set_opt(*mount_opts, SF_IMMUTABLE); break; case Opt_reserved: if (match_int(&args[0], reserved)) return 0; break; case Opt_root: if (match_int(&args[0], root)) return 0; break; case Opt_setgid: if (match_int(&args[0], &option)) return 0; *gid = make_kgid(current_user_ns(), option); if (!gid_valid(*gid)) return 0; affs_set_opt(*mount_opts, SF_SETGID); break; case Opt_setuid: if (match_int(&args[0], &option)) return 0; *uid = make_kuid(current_user_ns(), option); if (!uid_valid(*uid)) return 0; affs_set_opt(*mount_opts, SF_SETUID); break; case Opt_verbose: affs_set_opt(*mount_opts, SF_VERBOSE); break; case Opt_volume: { char *vol = match_strdup(&args[0]); if (!vol) return 0; strscpy(volume, vol, 32); kfree(vol); break; } case Opt_ignore: /* Silently ignore the quota options */ break; default: pr_warn("Unrecognized mount option \"%s\" or missing value\n", p); return 0; } } return 1; } static int affs_show_options(struct seq_file *m, struct dentry *root) { struct super_block *sb = root->d_sb; struct affs_sb_info *sbi = AFFS_SB(sb); if (sb->s_blocksize) seq_printf(m, ",bs=%lu", sb->s_blocksize); if (affs_test_opt(sbi->s_flags, SF_SETMODE)) seq_printf(m, ",mode=%o", sbi->s_mode); if (affs_test_opt(sbi->s_flags, SF_MUFS)) seq_puts(m, ",mufs"); if (affs_test_opt(sbi->s_flags, SF_NO_TRUNCATE)) seq_puts(m, ",nofilenametruncate"); if (affs_test_opt(sbi->s_flags, SF_PREFIX)) seq_printf(m, ",prefix=%s", sbi->s_prefix); if (affs_test_opt(sbi->s_flags, SF_IMMUTABLE)) seq_puts(m, ",protect"); if (sbi->s_reserved != 2) seq_printf(m, ",reserved=%u", sbi->s_reserved); if (sbi->s_root_block != (sbi->s_reserved + sbi->s_partition_size - 1) / 2) seq_printf(m, ",root=%u", sbi->s_root_block); if (affs_test_opt(sbi->s_flags, SF_SETGID)) seq_printf(m, ",setgid=%u", from_kgid_munged(&init_user_ns, sbi->s_gid)); if (affs_test_opt(sbi->s_flags, SF_SETUID)) seq_printf(m, ",setuid=%u", from_kuid_munged(&init_user_ns, sbi->s_uid)); if (affs_test_opt(sbi->s_flags, SF_VERBOSE)) seq_puts(m, ",verbose"); if (sbi->s_volume[0]) seq_printf(m, ",volume=%s", sbi->s_volume); return 0; } /* This function definitely needs to be split up. Some fine day I'll * hopefully have the guts to do so. Until then: sorry for the mess. */ static int affs_fill_super(struct super_block *sb, void *data, int silent) { struct affs_sb_info *sbi; struct buffer_head *root_bh = NULL; struct buffer_head *boot_bh; struct inode *root_inode = NULL; s32 root_block; int size, blocksize; u32 chksum; int num_bm; int i, j; kuid_t uid; kgid_t gid; int reserved; unsigned long mount_flags; int tmp_flags; /* fix remount prototype... */ u8 sig[4]; int ret; pr_debug("read_super(%s)\n", data ? (const char *)data : "no options"); sb->s_magic = AFFS_SUPER_MAGIC; sb->s_op = &affs_sops; sb->s_flags |= SB_NODIRATIME; sb->s_time_gran = NSEC_PER_SEC; sb->s_time_min = sys_tz.tz_minuteswest * 60 + AFFS_EPOCH_DELTA; sb->s_time_max = 86400LL * U32_MAX + 86400 + sb->s_time_min; sbi = kzalloc(sizeof(struct affs_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; sb->s_fs_info = sbi; sbi->sb = sb; mutex_init(&sbi->s_bmlock); spin_lock_init(&sbi->symlink_lock); spin_lock_init(&sbi->work_lock); INIT_DELAYED_WORK(&sbi->sb_work, flush_superblock); if (!parse_options(data,&uid,&gid,&i,&reserved,&root_block, &blocksize,&sbi->s_prefix, sbi->s_volume, &mount_flags)) { pr_err("Error parsing options\n"); return -EINVAL; } /* N.B. after this point s_prefix must be released */ sbi->s_flags = mount_flags; sbi->s_mode = i; sbi->s_uid = uid; sbi->s_gid = gid; sbi->s_reserved= reserved; /* Get the size of the device in 512-byte blocks. * If we later see that the partition uses bigger * blocks, we will have to change it. */ size = bdev_nr_sectors(sb->s_bdev); pr_debug("initial blocksize=%d, #blocks=%d\n", 512, size); affs_set_blocksize(sb, PAGE_SIZE); /* Try to find root block. Its location depends on the block size. */ i = bdev_logical_block_size(sb->s_bdev); j = PAGE_SIZE; if (blocksize > 0) { i = j = blocksize; size = size / (blocksize / 512); } for (blocksize = i; blocksize <= j; blocksize <<= 1, size >>= 1) { sbi->s_root_block = root_block; if (root_block < 0) sbi->s_root_block = (reserved + size - 1) / 2; pr_debug("setting blocksize to %d\n", blocksize); affs_set_blocksize(sb, blocksize); sbi->s_partition_size = size; /* The root block location that was calculated above is not * correct if the partition size is an odd number of 512- * byte blocks, which will be rounded down to a number of * 1024-byte blocks, and if there were an even number of * reserved blocks. Ideally, all partition checkers should * report the real number of blocks of the real blocksize, * but since this just cannot be done, we have to try to * find the root block anyways. In the above case, it is one * block behind the calculated one. So we check this one, too. */ for (num_bm = 0; num_bm < 2; num_bm++) { pr_debug("Dev %s, trying root=%u, bs=%d, " "size=%d, reserved=%d\n", sb->s_id, sbi->s_root_block + num_bm, blocksize, size, reserved); root_bh = affs_bread(sb, sbi->s_root_block + num_bm); if (!root_bh) continue; if (!affs_checksum_block(sb, root_bh) && be32_to_cpu(AFFS_ROOT_HEAD(root_bh)->ptype) == T_SHORT && be32_to_cpu(AFFS_ROOT_TAIL(sb, root_bh)->stype) == ST_ROOT) { sbi->s_hashsize = blocksize / 4 - 56; sbi->s_root_block += num_bm; goto got_root; } affs_brelse(root_bh); root_bh = NULL; } } if (!silent) pr_err("No valid root block on device %s\n", sb->s_id); return -EINVAL; /* N.B. after this point bh must be released */ got_root: /* Keep super block in cache */ sbi->s_root_bh = root_bh; root_block = sbi->s_root_block; /* Find out which kind of FS we have */ boot_bh = sb_bread(sb, 0); if (!boot_bh) { pr_err("Cannot read boot block\n"); return -EINVAL; } memcpy(sig, boot_bh->b_data, 4); brelse(boot_bh); chksum = be32_to_cpu(*(__be32 *)sig); /* Dircache filesystems are compatible with non-dircache ones * when reading. As long as they aren't supported, writing is * not recommended. */ if ((chksum == FS_DCFFS || chksum == MUFS_DCFFS || chksum == FS_DCOFS || chksum == MUFS_DCOFS) && !sb_rdonly(sb)) { pr_notice("Dircache FS - mounting %s read only\n", sb->s_id); sb->s_flags |= SB_RDONLY; } switch (chksum) { case MUFS_FS: case MUFS_INTLFFS: case MUFS_DCFFS: affs_set_opt(sbi->s_flags, SF_MUFS); fallthrough; case FS_INTLFFS: case FS_DCFFS: affs_set_opt(sbi->s_flags, SF_INTL); break; case MUFS_FFS: affs_set_opt(sbi->s_flags, SF_MUFS); break; case FS_FFS: break; case MUFS_OFS: affs_set_opt(sbi->s_flags, SF_MUFS); fallthrough; case FS_OFS: affs_set_opt(sbi->s_flags, SF_OFS); sb->s_flags |= SB_NOEXEC; break; case MUFS_DCOFS: case MUFS_INTLOFS: affs_set_opt(sbi->s_flags, SF_MUFS); fallthrough; case FS_DCOFS: case FS_INTLOFS: affs_set_opt(sbi->s_flags, SF_INTL); affs_set_opt(sbi->s_flags, SF_OFS); sb->s_flags |= SB_NOEXEC; break; default: pr_err("Unknown filesystem on device %s: %08X\n", sb->s_id, chksum); return -EINVAL; } if (affs_test_opt(mount_flags, SF_VERBOSE)) { u8 len = AFFS_ROOT_TAIL(sb, root_bh)->disk_name[0]; pr_notice("Mounting volume \"%.*s\": Type=%.3s\\%c, Blocksize=%d\n", len > 31 ? 31 : len, AFFS_ROOT_TAIL(sb, root_bh)->disk_name + 1, sig, sig[3] + '0', blocksize); } sb->s_flags |= SB_NODEV | SB_NOSUID; sbi->s_data_blksize = sb->s_blocksize; if (affs_test_opt(sbi->s_flags, SF_OFS)) sbi->s_data_blksize -= 24; tmp_flags = sb->s_flags; ret = affs_init_bitmap(sb, &tmp_flags); if (ret) return ret; sb->s_flags = tmp_flags; /* set up enough so that it can read an inode */ root_inode = affs_iget(sb, root_block); if (IS_ERR(root_inode)) return PTR_ERR(root_inode); if (affs_test_opt(AFFS_SB(sb)->s_flags, SF_INTL)) sb->s_d_op = &affs_intl_dentry_operations; else sb->s_d_op = &affs_dentry_operations; sb->s_root = d_make_root(root_inode); if (!sb->s_root) { pr_err("AFFS: Get root inode failed\n"); return -ENOMEM; } sb->s_export_op = &affs_export_ops; pr_debug("s_flags=%lX\n", sb->s_flags); return 0; } static int affs_remount(struct super_block *sb, int *flags, char *data) { struct affs_sb_info *sbi = AFFS_SB(sb); int blocksize; kuid_t uid; kgid_t gid; int mode; int reserved; int root_block; unsigned long mount_flags; int res = 0; char volume[32]; char *prefix = NULL; pr_debug("%s(flags=0x%x,opts=\"%s\")\n", __func__, *flags, data); sync_filesystem(sb); *flags |= SB_NODIRATIME; memcpy(volume, sbi->s_volume, 32); if (!parse_options(data, &uid, &gid, &mode, &reserved, &root_block, &blocksize, &prefix, volume, &mount_flags)) { kfree(prefix); return -EINVAL; } flush_delayed_work(&sbi->sb_work); sbi->s_flags = mount_flags; sbi->s_mode = mode; sbi->s_uid = uid; sbi->s_gid = gid; /* protect against readers */ spin_lock(&sbi->symlink_lock); if (prefix) { kfree(sbi->s_prefix); sbi->s_prefix = prefix; } memcpy(sbi->s_volume, volume, 32); spin_unlock(&sbi->symlink_lock); if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb)) return 0; if (*flags & SB_RDONLY) affs_free_bitmap(sb); else res = affs_init_bitmap(sb, flags); return res; } static int affs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; int free; u64 id = huge_encode_dev(sb->s_bdev->bd_dev); pr_debug("%s() partsize=%d, reserved=%d\n", __func__, AFFS_SB(sb)->s_partition_size, AFFS_SB(sb)->s_reserved); free = affs_count_free_blocks(sb); buf->f_type = AFFS_SUPER_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_blocks = AFFS_SB(sb)->s_partition_size - AFFS_SB(sb)->s_reserved; buf->f_bfree = free; buf->f_bavail = free; buf->f_fsid = u64_to_fsid(id); buf->f_namelen = AFFSNAMEMAX; return 0; } static struct dentry *affs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, affs_fill_super); } static void affs_kill_sb(struct super_block *sb) { struct affs_sb_info *sbi = AFFS_SB(sb); kill_block_super(sb); if (sbi) { affs_free_bitmap(sb); affs_brelse(sbi->s_root_bh); kfree(sbi->s_prefix); mutex_destroy(&sbi->s_bmlock); kfree(sbi); } } static struct file_system_type affs_fs_type = { .owner = THIS_MODULE, .name = "affs", .mount = affs_mount, .kill_sb = affs_kill_sb, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("affs"); static int __init init_affs_fs(void) { int err = init_inodecache(); if (err) goto out1; err = register_filesystem(&affs_fs_type); if (err) goto out; return 0; out: destroy_inodecache(); out1: return err; } static void __exit exit_affs_fs(void) { unregister_filesystem(&affs_fs_type); destroy_inodecache(); } MODULE_DESCRIPTION("Amiga filesystem support for Linux"); MODULE_LICENSE("GPL"); module_init(init_affs_fs) module_exit(exit_affs_fs)
linux-master
fs/affs/super.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/affs/bitmap.c * * (c) 1996 Hans-Joachim Widmaier * * bitmap.c contains the code that handles all bitmap related stuff - * block allocation, deallocation, calculation of free space. */ #include <linux/slab.h> #include "affs.h" u32 affs_count_free_blocks(struct super_block *sb) { struct affs_bm_info *bm; u32 free; int i; pr_debug("%s()\n", __func__); if (sb_rdonly(sb)) return 0; mutex_lock(&AFFS_SB(sb)->s_bmlock); bm = AFFS_SB(sb)->s_bitmap; free = 0; for (i = AFFS_SB(sb)->s_bmap_count; i > 0; bm++, i--) free += bm->bm_free; mutex_unlock(&AFFS_SB(sb)->s_bmlock); return free; } void affs_free_block(struct super_block *sb, u32 block) { struct affs_sb_info *sbi = AFFS_SB(sb); struct affs_bm_info *bm; struct buffer_head *bh; u32 blk, bmap, bit, mask, tmp; __be32 *data; pr_debug("%s(%u)\n", __func__, block); if (block > sbi->s_partition_size) goto err_range; blk = block - sbi->s_reserved; bmap = blk / sbi->s_bmap_bits; bit = blk % sbi->s_bmap_bits; bm = &sbi->s_bitmap[bmap]; mutex_lock(&sbi->s_bmlock); bh = sbi->s_bmap_bh; if (sbi->s_last_bmap != bmap) { affs_brelse(bh); bh = affs_bread(sb, bm->bm_key); if (!bh) goto err_bh_read; sbi->s_bmap_bh = bh; sbi->s_last_bmap = bmap; } mask = 1 << (bit & 31); data = (__be32 *)bh->b_data + bit / 32 + 1; /* mark block free */ tmp = be32_to_cpu(*data); if (tmp & mask) goto err_free; *data = cpu_to_be32(tmp | mask); /* fix checksum */ tmp = be32_to_cpu(*(__be32 *)bh->b_data); *(__be32 *)bh->b_data = cpu_to_be32(tmp - mask); mark_buffer_dirty(bh); affs_mark_sb_dirty(sb); bm->bm_free++; mutex_unlock(&sbi->s_bmlock); return; err_free: affs_warning(sb,"affs_free_block","Trying to free block %u which is already free", block); mutex_unlock(&sbi->s_bmlock); return; err_bh_read: affs_error(sb,"affs_free_block","Cannot read bitmap block %u", bm->bm_key); sbi->s_bmap_bh = NULL; sbi->s_last_bmap = ~0; mutex_unlock(&sbi->s_bmlock); return; err_range: affs_error(sb, "affs_free_block","Block %u outside partition", block); } /* * Allocate a block in the given allocation zone. * Since we have to byte-swap the bitmap on little-endian * machines, this is rather expensive. Therefore we will * preallocate up to 16 blocks from the same word, if * possible. We are not doing preallocations in the * header zone, though. */ u32 affs_alloc_block(struct inode *inode, u32 goal) { struct super_block *sb; struct affs_sb_info *sbi; struct affs_bm_info *bm; struct buffer_head *bh; __be32 *data, *enddata; u32 blk, bmap, bit, mask, mask2, tmp; int i; sb = inode->i_sb; sbi = AFFS_SB(sb); pr_debug("balloc(inode=%lu,goal=%u): ", inode->i_ino, goal); if (AFFS_I(inode)->i_pa_cnt) { pr_debug("%d\n", AFFS_I(inode)->i_lastalloc+1); AFFS_I(inode)->i_pa_cnt--; return ++AFFS_I(inode)->i_lastalloc; } if (!goal || goal > sbi->s_partition_size) { if (goal) affs_warning(sb, "affs_balloc", "invalid goal %d", goal); //if (!AFFS_I(inode)->i_last_block) // affs_warning(sb, "affs_balloc", "no last alloc block"); goal = sbi->s_reserved; } blk = goal - sbi->s_reserved; bmap = blk / sbi->s_bmap_bits; bm = &sbi->s_bitmap[bmap]; mutex_lock(&sbi->s_bmlock); if (bm->bm_free) goto find_bmap_bit; find_bmap: /* search for the next bmap buffer with free bits */ i = sbi->s_bmap_count; do { if (--i < 0) goto err_full; bmap++; bm++; if (bmap < sbi->s_bmap_count) continue; /* restart search at zero */ bmap = 0; bm = sbi->s_bitmap; } while (!bm->bm_free); blk = bmap * sbi->s_bmap_bits; find_bmap_bit: bh = sbi->s_bmap_bh; if (sbi->s_last_bmap != bmap) { affs_brelse(bh); bh = affs_bread(sb, bm->bm_key); if (!bh) goto err_bh_read; sbi->s_bmap_bh = bh; sbi->s_last_bmap = bmap; } /* find an unused block in this bitmap block */ bit = blk % sbi->s_bmap_bits; data = (__be32 *)bh->b_data + bit / 32 + 1; enddata = (__be32 *)((u8 *)bh->b_data + sb->s_blocksize); mask = ~0UL << (bit & 31); blk &= ~31UL; tmp = be32_to_cpu(*data); if (tmp & mask) goto find_bit; /* scan the rest of the buffer */ do { blk += 32; if (++data >= enddata) /* didn't find something, can only happen * if scan didn't start at 0, try next bmap */ goto find_bmap; } while (!*data); tmp = be32_to_cpu(*data); mask = ~0; find_bit: /* finally look for a free bit in the word */ bit = ffs(tmp & mask) - 1; blk += bit + sbi->s_reserved; mask2 = mask = 1 << (bit & 31); AFFS_I(inode)->i_lastalloc = blk; /* prealloc as much as possible within this word */ while ((mask2 <<= 1)) { if (!(tmp & mask2)) break; AFFS_I(inode)->i_pa_cnt++; mask |= mask2; } bm->bm_free -= AFFS_I(inode)->i_pa_cnt + 1; *data = cpu_to_be32(tmp & ~mask); /* fix checksum */ tmp = be32_to_cpu(*(__be32 *)bh->b_data); *(__be32 *)bh->b_data = cpu_to_be32(tmp + mask); mark_buffer_dirty(bh); affs_mark_sb_dirty(sb); mutex_unlock(&sbi->s_bmlock); pr_debug("%d\n", blk); return blk; err_bh_read: affs_error(sb,"affs_read_block","Cannot read bitmap block %u", bm->bm_key); sbi->s_bmap_bh = NULL; sbi->s_last_bmap = ~0; err_full: mutex_unlock(&sbi->s_bmlock); pr_debug("failed\n"); return 0; } int affs_init_bitmap(struct super_block *sb, int *flags) { struct affs_bm_info *bm; struct buffer_head *bmap_bh = NULL, *bh = NULL; __be32 *bmap_blk; u32 size, blk, end, offset, mask; int i, res = 0; struct affs_sb_info *sbi = AFFS_SB(sb); if (*flags & SB_RDONLY) return 0; if (!AFFS_ROOT_TAIL(sb, sbi->s_root_bh)->bm_flag) { pr_notice("Bitmap invalid - mounting %s read only\n", sb->s_id); *flags |= SB_RDONLY; return 0; } sbi->s_last_bmap = ~0; sbi->s_bmap_bh = NULL; sbi->s_bmap_bits = sb->s_blocksize * 8 - 32; sbi->s_bmap_count = (sbi->s_partition_size - sbi->s_reserved + sbi->s_bmap_bits - 1) / sbi->s_bmap_bits; size = sbi->s_bmap_count * sizeof(*bm); bm = sbi->s_bitmap = kzalloc(size, GFP_KERNEL); if (!sbi->s_bitmap) { pr_err("Bitmap allocation failed\n"); return -ENOMEM; } bmap_blk = (__be32 *)sbi->s_root_bh->b_data; blk = sb->s_blocksize / 4 - 49; end = blk + 25; for (i = sbi->s_bmap_count; i > 0; bm++, i--) { affs_brelse(bh); bm->bm_key = be32_to_cpu(bmap_blk[blk]); bh = affs_bread(sb, bm->bm_key); if (!bh) { pr_err("Cannot read bitmap\n"); res = -EIO; goto out; } if (affs_checksum_block(sb, bh)) { pr_warn("Bitmap %u invalid - mounting %s read only.\n", bm->bm_key, sb->s_id); *flags |= SB_RDONLY; goto out; } pr_debug("read bitmap block %d: %d\n", blk, bm->bm_key); bm->bm_free = memweight(bh->b_data + 4, sb->s_blocksize - 4); /* Don't try read the extension if this is the last block, * but we also need the right bm pointer below */ if (++blk < end || i == 1) continue; if (bmap_bh) affs_brelse(bmap_bh); bmap_bh = affs_bread(sb, be32_to_cpu(bmap_blk[blk])); if (!bmap_bh) { pr_err("Cannot read bitmap extension\n"); res = -EIO; goto out; } bmap_blk = (__be32 *)bmap_bh->b_data; blk = 0; end = sb->s_blocksize / 4 - 1; } offset = (sbi->s_partition_size - sbi->s_reserved) % sbi->s_bmap_bits; mask = ~(0xFFFFFFFFU << (offset & 31)); pr_debug("last word: %d %d %d\n", offset, offset / 32 + 1, mask); offset = offset / 32 + 1; if (mask) { u32 old, new; /* Mark unused bits in the last word as allocated */ old = be32_to_cpu(((__be32 *)bh->b_data)[offset]); new = old & mask; //if (old != new) { ((__be32 *)bh->b_data)[offset] = cpu_to_be32(new); /* fix checksum */ //new -= old; //old = be32_to_cpu(*(__be32 *)bh->b_data); //*(__be32 *)bh->b_data = cpu_to_be32(old - new); //mark_buffer_dirty(bh); //} /* correct offset for the bitmap count below */ //offset++; } while (++offset < sb->s_blocksize / 4) ((__be32 *)bh->b_data)[offset] = 0; ((__be32 *)bh->b_data)[0] = 0; ((__be32 *)bh->b_data)[0] = cpu_to_be32(-affs_checksum_block(sb, bh)); mark_buffer_dirty(bh); /* recalculate bitmap count for last block */ bm--; bm->bm_free = memweight(bh->b_data + 4, sb->s_blocksize - 4); out: affs_brelse(bh); affs_brelse(bmap_bh); return res; } void affs_free_bitmap(struct super_block *sb) { struct affs_sb_info *sbi = AFFS_SB(sb); if (!sbi->s_bitmap) return; affs_brelse(sbi->s_bmap_bh); sbi->s_bmap_bh = NULL; sbi->s_last_bmap = ~0; kfree(sbi->s_bitmap); sbi->s_bitmap = NULL; }
linux-master
fs/affs/bitmap.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/affs/dir.c * * (c) 1996 Hans-Joachim Widmaier - Rewritten * * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem. * * (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem. * * (C) 1991 Linus Torvalds - minix filesystem * * affs directory handling functions * */ #include <linux/iversion.h> #include "affs.h" static int affs_readdir(struct file *, struct dir_context *); const struct file_operations affs_dir_operations = { .read = generic_read_dir, .llseek = generic_file_llseek, .iterate_shared = affs_readdir, .fsync = affs_file_fsync, }; /* * directories can handle most operations... */ const struct inode_operations affs_dir_inode_operations = { .create = affs_create, .lookup = affs_lookup, .link = affs_link, .unlink = affs_unlink, .symlink = affs_symlink, .mkdir = affs_mkdir, .rmdir = affs_rmdir, .rename = affs_rename2, .setattr = affs_notify_change, }; static int affs_readdir(struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; struct buffer_head *dir_bh = NULL; struct buffer_head *fh_bh = NULL; unsigned char *name; int namelen; u32 i; int hash_pos; int chain_pos; u32 ino; int error = 0; pr_debug("%s(ino=%lu,f_pos=%llx)\n", __func__, inode->i_ino, ctx->pos); if (ctx->pos < 2) { file->private_data = (void *)0; if (!dir_emit_dots(file, ctx)) return 0; } affs_lock_dir(inode); chain_pos = (ctx->pos - 2) & 0xffff; hash_pos = (ctx->pos - 2) >> 16; if (chain_pos == 0xffff) { affs_warning(sb, "readdir", "More than 65535 entries in chain"); chain_pos = 0; hash_pos++; ctx->pos = ((hash_pos << 16) | chain_pos) + 2; } dir_bh = affs_bread(sb, inode->i_ino); if (!dir_bh) goto out_unlock_dir; /* If the directory hasn't changed since the last call to readdir(), * we can jump directly to where we left off. */ ino = (u32)(long)file->private_data; if (ino && inode_eq_iversion(inode, file->f_version)) { pr_debug("readdir() left off=%d\n", ino); goto inside; } ino = be32_to_cpu(AFFS_HEAD(dir_bh)->table[hash_pos]); for (i = 0; ino && i < chain_pos; i++) { fh_bh = affs_bread(sb, ino); if (!fh_bh) { affs_error(sb, "readdir","Cannot read block %d", i); error = -EIO; goto out_brelse_dir; } ino = be32_to_cpu(AFFS_TAIL(sb, fh_bh)->hash_chain); affs_brelse(fh_bh); fh_bh = NULL; } if (ino) goto inside; hash_pos++; for (; hash_pos < AFFS_SB(sb)->s_hashsize; hash_pos++) { ino = be32_to_cpu(AFFS_HEAD(dir_bh)->table[hash_pos]); if (!ino) continue; ctx->pos = (hash_pos << 16) + 2; inside: do { fh_bh = affs_bread(sb, ino); if (!fh_bh) { affs_error(sb, "readdir", "Cannot read block %d", ino); break; } namelen = min(AFFS_TAIL(sb, fh_bh)->name[0], (u8)AFFSNAMEMAX); name = AFFS_TAIL(sb, fh_bh)->name + 1; pr_debug("readdir(): dir_emit(\"%.*s\", ino=%u), hash=%d, f_pos=%llx\n", namelen, name, ino, hash_pos, ctx->pos); if (!dir_emit(ctx, name, namelen, ino, DT_UNKNOWN)) goto done; ctx->pos++; ino = be32_to_cpu(AFFS_TAIL(sb, fh_bh)->hash_chain); affs_brelse(fh_bh); fh_bh = NULL; } while (ino); } done: file->f_version = inode_query_iversion(inode); file->private_data = (void *)(long)ino; affs_brelse(fh_bh); out_brelse_dir: affs_brelse(dir_bh); out_unlock_dir: affs_unlock_dir(inode); return error; }
linux-master
fs/affs/dir.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/affs/amigaffs.c * * (c) 1996 Hans-Joachim Widmaier - Rewritten * * (C) 1993 Ray Burr - Amiga FFS filesystem. * * Please send bug reports to: [email protected] */ #include <linux/math64.h> #include <linux/iversion.h> #include "affs.h" /* * Functions for accessing Amiga-FFS structures. */ /* Insert a header block bh into the directory dir * caller must hold AFFS_DIR->i_hash_lock! */ int affs_insert_hash(struct inode *dir, struct buffer_head *bh) { struct super_block *sb = dir->i_sb; struct buffer_head *dir_bh; u32 ino, hash_ino; int offset; ino = bh->b_blocknr; offset = affs_hash_name(sb, AFFS_TAIL(sb, bh)->name + 1, AFFS_TAIL(sb, bh)->name[0]); pr_debug("%s(dir=%lu, ino=%d)\n", __func__, dir->i_ino, ino); dir_bh = affs_bread(sb, dir->i_ino); if (!dir_bh) return -EIO; hash_ino = be32_to_cpu(AFFS_HEAD(dir_bh)->table[offset]); while (hash_ino) { affs_brelse(dir_bh); dir_bh = affs_bread(sb, hash_ino); if (!dir_bh) return -EIO; hash_ino = be32_to_cpu(AFFS_TAIL(sb, dir_bh)->hash_chain); } AFFS_TAIL(sb, bh)->parent = cpu_to_be32(dir->i_ino); AFFS_TAIL(sb, bh)->hash_chain = 0; affs_fix_checksum(sb, bh); if (dir->i_ino == dir_bh->b_blocknr) AFFS_HEAD(dir_bh)->table[offset] = cpu_to_be32(ino); else AFFS_TAIL(sb, dir_bh)->hash_chain = cpu_to_be32(ino); affs_adjust_checksum(dir_bh, ino); mark_buffer_dirty_inode(dir_bh, dir); affs_brelse(dir_bh); dir->i_mtime = inode_set_ctime_current(dir); inode_inc_iversion(dir); mark_inode_dirty(dir); return 0; } /* Remove a header block from its directory. * caller must hold AFFS_DIR->i_hash_lock! */ int affs_remove_hash(struct inode *dir, struct buffer_head *rem_bh) { struct super_block *sb; struct buffer_head *bh; u32 rem_ino, hash_ino; __be32 ino; int offset, retval; sb = dir->i_sb; rem_ino = rem_bh->b_blocknr; offset = affs_hash_name(sb, AFFS_TAIL(sb, rem_bh)->name+1, AFFS_TAIL(sb, rem_bh)->name[0]); pr_debug("%s(dir=%lu, ino=%d, hashval=%d)\n", __func__, dir->i_ino, rem_ino, offset); bh = affs_bread(sb, dir->i_ino); if (!bh) return -EIO; retval = -ENOENT; hash_ino = be32_to_cpu(AFFS_HEAD(bh)->table[offset]); while (hash_ino) { if (hash_ino == rem_ino) { ino = AFFS_TAIL(sb, rem_bh)->hash_chain; if (dir->i_ino == bh->b_blocknr) AFFS_HEAD(bh)->table[offset] = ino; else AFFS_TAIL(sb, bh)->hash_chain = ino; affs_adjust_checksum(bh, be32_to_cpu(ino) - hash_ino); mark_buffer_dirty_inode(bh, dir); AFFS_TAIL(sb, rem_bh)->parent = 0; retval = 0; break; } affs_brelse(bh); bh = affs_bread(sb, hash_ino); if (!bh) return -EIO; hash_ino = be32_to_cpu(AFFS_TAIL(sb, bh)->hash_chain); } affs_brelse(bh); dir->i_mtime = inode_set_ctime_current(dir); inode_inc_iversion(dir); mark_inode_dirty(dir); return retval; } static void affs_fix_dcache(struct inode *inode, u32 entry_ino) { struct dentry *dentry; spin_lock(&inode->i_lock); hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) { if (entry_ino == (u32)(long)dentry->d_fsdata) { dentry->d_fsdata = (void *)inode->i_ino; break; } } spin_unlock(&inode->i_lock); } /* Remove header from link chain */ static int affs_remove_link(struct dentry *dentry) { struct inode *dir, *inode = d_inode(dentry); struct super_block *sb = inode->i_sb; struct buffer_head *bh, *link_bh = NULL; u32 link_ino, ino; int retval; pr_debug("%s(key=%ld)\n", __func__, inode->i_ino); retval = -EIO; bh = affs_bread(sb, inode->i_ino); if (!bh) goto done; link_ino = (u32)(long)dentry->d_fsdata; if (inode->i_ino == link_ino) { /* we can't remove the head of the link, as its blocknr is still used as ino, * so we remove the block of the first link instead. */ link_ino = be32_to_cpu(AFFS_TAIL(sb, bh)->link_chain); link_bh = affs_bread(sb, link_ino); if (!link_bh) goto done; dir = affs_iget(sb, be32_to_cpu(AFFS_TAIL(sb, link_bh)->parent)); if (IS_ERR(dir)) { retval = PTR_ERR(dir); goto done; } affs_lock_dir(dir); /* * if there's a dentry for that block, make it * refer to inode itself. */ affs_fix_dcache(inode, link_ino); retval = affs_remove_hash(dir, link_bh); if (retval) { affs_unlock_dir(dir); goto done; } mark_buffer_dirty_inode(link_bh, inode); memcpy(AFFS_TAIL(sb, bh)->name, AFFS_TAIL(sb, link_bh)->name, 32); retval = affs_insert_hash(dir, bh); if (retval) { affs_unlock_dir(dir); goto done; } mark_buffer_dirty_inode(bh, inode); affs_unlock_dir(dir); iput(dir); } else { link_bh = affs_bread(sb, link_ino); if (!link_bh) goto done; } while ((ino = be32_to_cpu(AFFS_TAIL(sb, bh)->link_chain)) != 0) { if (ino == link_ino) { __be32 ino2 = AFFS_TAIL(sb, link_bh)->link_chain; AFFS_TAIL(sb, bh)->link_chain = ino2; affs_adjust_checksum(bh, be32_to_cpu(ino2) - link_ino); mark_buffer_dirty_inode(bh, inode); retval = 0; /* Fix the link count, if bh is a normal header block without links */ switch (be32_to_cpu(AFFS_TAIL(sb, bh)->stype)) { case ST_LINKDIR: case ST_LINKFILE: break; default: if (!AFFS_TAIL(sb, bh)->link_chain) set_nlink(inode, 1); } affs_free_block(sb, link_ino); goto done; } affs_brelse(bh); bh = affs_bread(sb, ino); if (!bh) goto done; } retval = -ENOENT; done: affs_brelse(link_bh); affs_brelse(bh); return retval; } static int affs_empty_dir(struct inode *inode) { struct super_block *sb = inode->i_sb; struct buffer_head *bh; int retval, size; retval = -EIO; bh = affs_bread(sb, inode->i_ino); if (!bh) goto done; retval = -ENOTEMPTY; for (size = AFFS_SB(sb)->s_hashsize - 1; size >= 0; size--) if (AFFS_HEAD(bh)->table[size]) goto not_empty; retval = 0; not_empty: affs_brelse(bh); done: return retval; } /* Remove a filesystem object. If the object to be removed has * links to it, one of the links must be changed to inherit * the file or directory. As above, any inode will do. * The buffer will not be freed. If the header is a link, the * block will be marked as free. * This function returns a negative error number in case of * an error, else 0 if the inode is to be deleted or 1 if not. */ int affs_remove_header(struct dentry *dentry) { struct super_block *sb; struct inode *inode, *dir; struct buffer_head *bh = NULL; int retval; dir = d_inode(dentry->d_parent); sb = dir->i_sb; retval = -ENOENT; inode = d_inode(dentry); if (!inode) goto done; pr_debug("%s(key=%ld)\n", __func__, inode->i_ino); retval = -EIO; bh = affs_bread(sb, (u32)(long)dentry->d_fsdata); if (!bh) goto done; affs_lock_link(inode); affs_lock_dir(dir); switch (be32_to_cpu(AFFS_TAIL(sb, bh)->stype)) { case ST_USERDIR: /* if we ever want to support links to dirs * i_hash_lock of the inode must only be * taken after some checks */ affs_lock_dir(inode); retval = affs_empty_dir(inode); affs_unlock_dir(inode); if (retval) goto done_unlock; break; default: break; } retval = affs_remove_hash(dir, bh); if (retval) goto done_unlock; mark_buffer_dirty_inode(bh, inode); affs_unlock_dir(dir); if (inode->i_nlink > 1) retval = affs_remove_link(dentry); else clear_nlink(inode); affs_unlock_link(inode); inode_set_ctime_current(inode); mark_inode_dirty(inode); done: affs_brelse(bh); return retval; done_unlock: affs_unlock_dir(dir); affs_unlock_link(inode); goto done; } /* Checksum a block, do various consistency checks and optionally return the blocks type number. DATA points to the block. If their pointers are non-null, *PTYPE and *STYPE are set to the primary and secondary block types respectively, *HASHSIZE is set to the size of the hashtable (which lets us calculate the block size). Returns non-zero if the block is not consistent. */ u32 affs_checksum_block(struct super_block *sb, struct buffer_head *bh) { __be32 *ptr = (__be32 *)bh->b_data; u32 sum; int bsize; sum = 0; for (bsize = sb->s_blocksize / sizeof(__be32); bsize > 0; bsize--) sum += be32_to_cpu(*ptr++); return sum; } /* * Calculate the checksum of a disk block and store it * at the indicated position. */ void affs_fix_checksum(struct super_block *sb, struct buffer_head *bh) { int cnt = sb->s_blocksize / sizeof(__be32); __be32 *ptr = (__be32 *)bh->b_data; u32 checksum; __be32 *checksumptr; checksumptr = ptr + 5; *checksumptr = 0; for (checksum = 0; cnt > 0; ptr++, cnt--) checksum += be32_to_cpu(*ptr); *checksumptr = cpu_to_be32(-checksum); } void affs_secs_to_datestamp(time64_t secs, struct affs_date *ds) { u32 days; u32 minute; s32 rem; secs -= sys_tz.tz_minuteswest * 60 + AFFS_EPOCH_DELTA; if (secs < 0) secs = 0; days = div_s64_rem(secs, 86400, &rem); minute = rem / 60; rem -= minute * 60; ds->days = cpu_to_be32(days); ds->mins = cpu_to_be32(minute); ds->ticks = cpu_to_be32(rem * 50); } umode_t affs_prot_to_mode(u32 prot) { umode_t mode = 0; if (!(prot & FIBF_NOWRITE)) mode |= 0200; if (!(prot & FIBF_NOREAD)) mode |= 0400; if (!(prot & FIBF_NOEXECUTE)) mode |= 0100; if (prot & FIBF_GRP_WRITE) mode |= 0020; if (prot & FIBF_GRP_READ) mode |= 0040; if (prot & FIBF_GRP_EXECUTE) mode |= 0010; if (prot & FIBF_OTR_WRITE) mode |= 0002; if (prot & FIBF_OTR_READ) mode |= 0004; if (prot & FIBF_OTR_EXECUTE) mode |= 0001; return mode; } void affs_mode_to_prot(struct inode *inode) { u32 prot = AFFS_I(inode)->i_protect; umode_t mode = inode->i_mode; /* * First, clear all RWED bits for owner, group, other. * Then, recalculate them afresh. * * We'll always clear the delete-inhibit bit for the owner, as that is * the classic single-user mode AmigaOS protection bit and we need to * stay compatible with all scenarios. * * Since multi-user AmigaOS is an extension, we'll only set the * delete-allow bit if any of the other bits in the same user class * (group/other) are used. */ prot &= ~(FIBF_NOEXECUTE | FIBF_NOREAD | FIBF_NOWRITE | FIBF_NODELETE | FIBF_GRP_EXECUTE | FIBF_GRP_READ | FIBF_GRP_WRITE | FIBF_GRP_DELETE | FIBF_OTR_EXECUTE | FIBF_OTR_READ | FIBF_OTR_WRITE | FIBF_OTR_DELETE); /* Classic single-user AmigaOS flags. These are inverted. */ if (!(mode & 0100)) prot |= FIBF_NOEXECUTE; if (!(mode & 0400)) prot |= FIBF_NOREAD; if (!(mode & 0200)) prot |= FIBF_NOWRITE; /* Multi-user extended flags. Not inverted. */ if (mode & 0010) prot |= FIBF_GRP_EXECUTE; if (mode & 0040) prot |= FIBF_GRP_READ; if (mode & 0020) prot |= FIBF_GRP_WRITE; if (mode & 0070) prot |= FIBF_GRP_DELETE; if (mode & 0001) prot |= FIBF_OTR_EXECUTE; if (mode & 0004) prot |= FIBF_OTR_READ; if (mode & 0002) prot |= FIBF_OTR_WRITE; if (mode & 0007) prot |= FIBF_OTR_DELETE; AFFS_I(inode)->i_protect = prot; } void affs_error(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; pr_crit("error (device %s): %s(): %pV\n", sb->s_id, function, &vaf); if (!sb_rdonly(sb)) pr_warn("Remounting filesystem read-only\n"); sb->s_flags |= SB_RDONLY; va_end(args); } void affs_warning(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; pr_warn("(device %s): %s(): %pV\n", sb->s_id, function, &vaf); va_end(args); } bool affs_nofilenametruncate(const struct dentry *dentry) { return affs_test_opt(AFFS_SB(dentry->d_sb)->s_flags, SF_NO_TRUNCATE); } /* Check if the name is valid for a affs object. */ int affs_check_name(const unsigned char *name, int len, bool notruncate) { int i; if (len > AFFSNAMEMAX) { if (notruncate) return -ENAMETOOLONG; len = AFFSNAMEMAX; } for (i = 0; i < len; i++) { if (name[i] < ' ' || name[i] == ':' || (name[i] > 0x7e && name[i] < 0xa0)) return -EINVAL; } return 0; } /* This function copies name to bstr, with at most 30 * characters length. The bstr will be prepended by * a length byte. * NOTE: The name will must be already checked by * affs_check_name()! */ int affs_copy_name(unsigned char *bstr, struct dentry *dentry) { u32 len = min(dentry->d_name.len, AFFSNAMEMAX); *bstr++ = len; memcpy(bstr, dentry->d_name.name, len); return len; }
linux-master
fs/affs/amigaffs.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/affs/inode.c * * (c) 1996 Hans-Joachim Widmaier - Rewritten * * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem. * * (C) 1992 Eric Youngdale Modified for ISO9660 filesystem. * * (C) 1991 Linus Torvalds - minix filesystem */ #include <linux/sched.h> #include <linux/cred.h> #include <linux/gfp.h> #include "affs.h" struct inode *affs_iget(struct super_block *sb, unsigned long ino) { struct affs_sb_info *sbi = AFFS_SB(sb); struct buffer_head *bh; struct affs_tail *tail; struct inode *inode; u32 block; u32 size; u32 prot; u16 id; inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; pr_debug("affs_iget(%lu)\n", inode->i_ino); block = inode->i_ino; bh = affs_bread(sb, block); if (!bh) { affs_warning(sb, "read_inode", "Cannot read block %d", block); goto bad_inode; } if (affs_checksum_block(sb, bh) || be32_to_cpu(AFFS_HEAD(bh)->ptype) != T_SHORT) { affs_warning(sb,"read_inode", "Checksum or type (ptype=%d) error on inode %d", AFFS_HEAD(bh)->ptype, block); goto bad_inode; } tail = AFFS_TAIL(sb, bh); prot = be32_to_cpu(tail->protect); inode->i_size = 0; set_nlink(inode, 1); inode->i_mode = 0; AFFS_I(inode)->i_extcnt = 1; AFFS_I(inode)->i_ext_last = ~1; AFFS_I(inode)->i_protect = prot; atomic_set(&AFFS_I(inode)->i_opencnt, 0); AFFS_I(inode)->i_blkcnt = 0; AFFS_I(inode)->i_lc = NULL; AFFS_I(inode)->i_lc_size = 0; AFFS_I(inode)->i_lc_shift = 0; AFFS_I(inode)->i_lc_mask = 0; AFFS_I(inode)->i_ac = NULL; AFFS_I(inode)->i_ext_bh = NULL; AFFS_I(inode)->mmu_private = 0; AFFS_I(inode)->i_lastalloc = 0; AFFS_I(inode)->i_pa_cnt = 0; if (affs_test_opt(sbi->s_flags, SF_SETMODE)) inode->i_mode = sbi->s_mode; else inode->i_mode = affs_prot_to_mode(prot); id = be16_to_cpu(tail->uid); if (id == 0 || affs_test_opt(sbi->s_flags, SF_SETUID)) inode->i_uid = sbi->s_uid; else if (id == 0xFFFF && affs_test_opt(sbi->s_flags, SF_MUFS)) i_uid_write(inode, 0); else i_uid_write(inode, id); id = be16_to_cpu(tail->gid); if (id == 0 || affs_test_opt(sbi->s_flags, SF_SETGID)) inode->i_gid = sbi->s_gid; else if (id == 0xFFFF && affs_test_opt(sbi->s_flags, SF_MUFS)) i_gid_write(inode, 0); else i_gid_write(inode, id); switch (be32_to_cpu(tail->stype)) { case ST_ROOT: inode->i_uid = sbi->s_uid; inode->i_gid = sbi->s_gid; fallthrough; case ST_USERDIR: if (be32_to_cpu(tail->stype) == ST_USERDIR || affs_test_opt(sbi->s_flags, SF_SETMODE)) { if (inode->i_mode & S_IRUSR) inode->i_mode |= S_IXUSR; if (inode->i_mode & S_IRGRP) inode->i_mode |= S_IXGRP; if (inode->i_mode & S_IROTH) inode->i_mode |= S_IXOTH; inode->i_mode |= S_IFDIR; } else inode->i_mode = S_IRUGO | S_IXUGO | S_IWUSR | S_IFDIR; /* Maybe it should be controlled by mount parameter? */ //inode->i_mode |= S_ISVTX; inode->i_op = &affs_dir_inode_operations; inode->i_fop = &affs_dir_operations; break; case ST_LINKDIR: #if 0 affs_warning(sb, "read_inode", "inode is LINKDIR"); goto bad_inode; #else inode->i_mode |= S_IFDIR; /* ... and leave ->i_op and ->i_fop pointing to empty */ break; #endif case ST_LINKFILE: affs_warning(sb, "read_inode", "inode is LINKFILE"); goto bad_inode; case ST_FILE: size = be32_to_cpu(tail->size); inode->i_mode |= S_IFREG; AFFS_I(inode)->mmu_private = inode->i_size = size; if (inode->i_size) { AFFS_I(inode)->i_blkcnt = (size - 1) / sbi->s_data_blksize + 1; AFFS_I(inode)->i_extcnt = (AFFS_I(inode)->i_blkcnt - 1) / sbi->s_hashsize + 1; } if (tail->link_chain) set_nlink(inode, 2); inode->i_mapping->a_ops = affs_test_opt(sbi->s_flags, SF_OFS) ? &affs_aops_ofs : &affs_aops; inode->i_op = &affs_file_inode_operations; inode->i_fop = &affs_file_operations; break; case ST_SOFTLINK: inode->i_size = strlen((char *)AFFS_HEAD(bh)->table); inode->i_mode |= S_IFLNK; inode_nohighmem(inode); inode->i_op = &affs_symlink_inode_operations; inode->i_data.a_ops = &affs_symlink_aops; break; } inode->i_mtime.tv_sec = inode->i_atime.tv_sec = inode_set_ctime(inode, (be32_to_cpu(tail->change.days) * 86400LL + be32_to_cpu(tail->change.mins) * 60 + be32_to_cpu(tail->change.ticks) / 50 + AFFS_EPOCH_DELTA) + sys_tz.tz_minuteswest * 60, 0).tv_sec; inode->i_mtime.tv_nsec = inode->i_atime.tv_nsec = 0; affs_brelse(bh); unlock_new_inode(inode); return inode; bad_inode: affs_brelse(bh); iget_failed(inode); return ERR_PTR(-EIO); } int affs_write_inode(struct inode *inode, struct writeback_control *wbc) { struct super_block *sb = inode->i_sb; struct buffer_head *bh; struct affs_tail *tail; uid_t uid; gid_t gid; pr_debug("write_inode(%lu)\n", inode->i_ino); if (!inode->i_nlink) // possibly free block return 0; bh = affs_bread(sb, inode->i_ino); if (!bh) { affs_error(sb,"write_inode","Cannot read block %lu",inode->i_ino); return -EIO; } tail = AFFS_TAIL(sb, bh); if (tail->stype == cpu_to_be32(ST_ROOT)) { affs_secs_to_datestamp(inode->i_mtime.tv_sec, &AFFS_ROOT_TAIL(sb, bh)->root_change); } else { tail->protect = cpu_to_be32(AFFS_I(inode)->i_protect); tail->size = cpu_to_be32(inode->i_size); affs_secs_to_datestamp(inode->i_mtime.tv_sec, &tail->change); if (!(inode->i_ino == AFFS_SB(sb)->s_root_block)) { uid = i_uid_read(inode); gid = i_gid_read(inode); if (affs_test_opt(AFFS_SB(sb)->s_flags, SF_MUFS)) { if (uid == 0 || uid == 0xFFFF) uid = uid ^ ~0; if (gid == 0 || gid == 0xFFFF) gid = gid ^ ~0; } if (!affs_test_opt(AFFS_SB(sb)->s_flags, SF_SETUID)) tail->uid = cpu_to_be16(uid); if (!affs_test_opt(AFFS_SB(sb)->s_flags, SF_SETGID)) tail->gid = cpu_to_be16(gid); } } affs_fix_checksum(sb, bh); mark_buffer_dirty_inode(bh, inode); affs_brelse(bh); affs_free_prealloc(inode); return 0; } int affs_notify_change(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); int error; pr_debug("notify_change(%lu,0x%x)\n", inode->i_ino, attr->ia_valid); error = setattr_prepare(&nop_mnt_idmap, dentry, attr); if (error) goto out; if (((attr->ia_valid & ATTR_UID) && affs_test_opt(AFFS_SB(inode->i_sb)->s_flags, SF_SETUID)) || ((attr->ia_valid & ATTR_GID) && affs_test_opt(AFFS_SB(inode->i_sb)->s_flags, SF_SETGID)) || ((attr->ia_valid & ATTR_MODE) && (AFFS_SB(inode->i_sb)->s_flags & (AFFS_MOUNT_SF_SETMODE | AFFS_MOUNT_SF_IMMUTABLE)))) { if (!affs_test_opt(AFFS_SB(inode->i_sb)->s_flags, SF_QUIET)) error = -EPERM; goto out; } if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size != i_size_read(inode)) { error = inode_newsize_ok(inode, attr->ia_size); if (error) return error; truncate_setsize(inode, attr->ia_size); affs_truncate(inode); } setattr_copy(&nop_mnt_idmap, inode, attr); mark_inode_dirty(inode); if (attr->ia_valid & ATTR_MODE) affs_mode_to_prot(inode); out: return error; } void affs_evict_inode(struct inode *inode) { unsigned long cache_page; pr_debug("evict_inode(ino=%lu, nlink=%u)\n", inode->i_ino, inode->i_nlink); truncate_inode_pages_final(&inode->i_data); if (!inode->i_nlink) { inode->i_size = 0; affs_truncate(inode); } invalidate_inode_buffers(inode); clear_inode(inode); affs_free_prealloc(inode); cache_page = (unsigned long)AFFS_I(inode)->i_lc; if (cache_page) { pr_debug("freeing ext cache\n"); AFFS_I(inode)->i_lc = NULL; AFFS_I(inode)->i_ac = NULL; free_page(cache_page); } affs_brelse(AFFS_I(inode)->i_ext_bh); AFFS_I(inode)->i_ext_last = ~1; AFFS_I(inode)->i_ext_bh = NULL; if (!inode->i_nlink) affs_free_block(inode->i_sb, inode->i_ino); } struct inode * affs_new_inode(struct inode *dir) { struct super_block *sb = dir->i_sb; struct inode *inode; u32 block; struct buffer_head *bh; if (!(inode = new_inode(sb))) goto err_inode; if (!(block = affs_alloc_block(dir, dir->i_ino))) goto err_block; bh = affs_getzeroblk(sb, block); if (!bh) goto err_bh; mark_buffer_dirty_inode(bh, inode); affs_brelse(bh); inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); inode->i_ino = block; set_nlink(inode, 1); inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode); atomic_set(&AFFS_I(inode)->i_opencnt, 0); AFFS_I(inode)->i_blkcnt = 0; AFFS_I(inode)->i_lc = NULL; AFFS_I(inode)->i_lc_size = 0; AFFS_I(inode)->i_lc_shift = 0; AFFS_I(inode)->i_lc_mask = 0; AFFS_I(inode)->i_ac = NULL; AFFS_I(inode)->i_ext_bh = NULL; AFFS_I(inode)->mmu_private = 0; AFFS_I(inode)->i_protect = 0; AFFS_I(inode)->i_lastalloc = 0; AFFS_I(inode)->i_pa_cnt = 0; AFFS_I(inode)->i_extcnt = 1; AFFS_I(inode)->i_ext_last = ~1; insert_inode_hash(inode); return inode; err_bh: affs_free_block(sb, block); err_block: iput(inode); err_inode: return NULL; } /* * Add an entry to a directory. Create the header block * and insert it into the hash table. */ int affs_add_entry(struct inode *dir, struct inode *inode, struct dentry *dentry, s32 type) { struct super_block *sb = dir->i_sb; struct buffer_head *inode_bh = NULL; struct buffer_head *bh; u32 block = 0; int retval; pr_debug("%s(dir=%lu, inode=%lu, \"%pd\", type=%d)\n", __func__, dir->i_ino, inode->i_ino, dentry, type); retval = -EIO; bh = affs_bread(sb, inode->i_ino); if (!bh) goto done; affs_lock_link(inode); switch (type) { case ST_LINKFILE: case ST_LINKDIR: retval = -ENOSPC; block = affs_alloc_block(dir, dir->i_ino); if (!block) goto err; retval = -EIO; inode_bh = bh; bh = affs_getzeroblk(sb, block); if (!bh) goto err; break; default: break; } AFFS_HEAD(bh)->ptype = cpu_to_be32(T_SHORT); AFFS_HEAD(bh)->key = cpu_to_be32(bh->b_blocknr); affs_copy_name(AFFS_TAIL(sb, bh)->name, dentry); AFFS_TAIL(sb, bh)->stype = cpu_to_be32(type); AFFS_TAIL(sb, bh)->parent = cpu_to_be32(dir->i_ino); if (inode_bh) { __be32 chain; chain = AFFS_TAIL(sb, inode_bh)->link_chain; AFFS_TAIL(sb, bh)->original = cpu_to_be32(inode->i_ino); AFFS_TAIL(sb, bh)->link_chain = chain; AFFS_TAIL(sb, inode_bh)->link_chain = cpu_to_be32(block); affs_adjust_checksum(inode_bh, block - be32_to_cpu(chain)); mark_buffer_dirty_inode(inode_bh, inode); set_nlink(inode, 2); ihold(inode); } affs_fix_checksum(sb, bh); mark_buffer_dirty_inode(bh, inode); dentry->d_fsdata = (void *)(long)bh->b_blocknr; affs_lock_dir(dir); retval = affs_insert_hash(dir, bh); mark_buffer_dirty_inode(bh, inode); affs_unlock_dir(dir); affs_unlock_link(inode); d_instantiate(dentry, inode); done: affs_brelse(inode_bh); affs_brelse(bh); return retval; err: if (block) affs_free_block(sb, block); affs_unlock_link(inode); goto done; }
linux-master
fs/affs/inode.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/affs/namei.c * * (c) 1996 Hans-Joachim Widmaier - Rewritten * * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem. * * (C) 1991 Linus Torvalds - minix filesystem */ #include "affs.h" #include <linux/exportfs.h> typedef int (*toupper_t)(int); /* Simple toupper() for DOS\1 */ static int affs_toupper(int ch) { return ch >= 'a' && ch <= 'z' ? ch -= ('a' - 'A') : ch; } /* International toupper() for DOS\3 ("international") */ static int affs_intl_toupper(int ch) { return (ch >= 'a' && ch <= 'z') || (ch >= 0xE0 && ch <= 0xFE && ch != 0xF7) ? ch - ('a' - 'A') : ch; } static inline toupper_t affs_get_toupper(struct super_block *sb) { return affs_test_opt(AFFS_SB(sb)->s_flags, SF_INTL) ? affs_intl_toupper : affs_toupper; } /* * Note: the dentry argument is the parent dentry. */ static inline int __affs_hash_dentry(const struct dentry *dentry, struct qstr *qstr, toupper_t fn, bool notruncate) { const u8 *name = qstr->name; unsigned long hash; int retval; u32 len; retval = affs_check_name(qstr->name, qstr->len, notruncate); if (retval) return retval; hash = init_name_hash(dentry); len = min(qstr->len, AFFSNAMEMAX); for (; len > 0; name++, len--) hash = partial_name_hash(fn(*name), hash); qstr->hash = end_name_hash(hash); return 0; } static int affs_hash_dentry(const struct dentry *dentry, struct qstr *qstr) { return __affs_hash_dentry(dentry, qstr, affs_toupper, affs_nofilenametruncate(dentry)); } static int affs_intl_hash_dentry(const struct dentry *dentry, struct qstr *qstr) { return __affs_hash_dentry(dentry, qstr, affs_intl_toupper, affs_nofilenametruncate(dentry)); } static inline int __affs_compare_dentry(unsigned int len, const char *str, const struct qstr *name, toupper_t fn, bool notruncate) { const u8 *aname = str; const u8 *bname = name->name; /* * 'str' is the name of an already existing dentry, so the name * must be valid. 'name' must be validated first. */ if (affs_check_name(name->name, name->len, notruncate)) return 1; /* * If the names are longer than the allowed 30 chars, * the excess is ignored, so their length may differ. */ if (len >= AFFSNAMEMAX) { if (name->len < AFFSNAMEMAX) return 1; len = AFFSNAMEMAX; } else if (len != name->len) return 1; for (; len > 0; len--) if (fn(*aname++) != fn(*bname++)) return 1; return 0; } static int affs_compare_dentry(const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { return __affs_compare_dentry(len, str, name, affs_toupper, affs_nofilenametruncate(dentry)); } static int affs_intl_compare_dentry(const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { return __affs_compare_dentry(len, str, name, affs_intl_toupper, affs_nofilenametruncate(dentry)); } /* * NOTE! unlike strncmp, affs_match returns 1 for success, 0 for failure. */ static inline int affs_match(struct dentry *dentry, const u8 *name2, toupper_t fn) { const u8 *name = dentry->d_name.name; int len = dentry->d_name.len; if (len >= AFFSNAMEMAX) { if (*name2 < AFFSNAMEMAX) return 0; len = AFFSNAMEMAX; } else if (len != *name2) return 0; for (name2++; len > 0; len--) if (fn(*name++) != fn(*name2++)) return 0; return 1; } int affs_hash_name(struct super_block *sb, const u8 *name, unsigned int len) { toupper_t fn = affs_get_toupper(sb); u32 hash; hash = len = min(len, AFFSNAMEMAX); for (; len > 0; len--) hash = (hash * 13 + fn(*name++)) & 0x7ff; return hash % AFFS_SB(sb)->s_hashsize; } static struct buffer_head * affs_find_entry(struct inode *dir, struct dentry *dentry) { struct super_block *sb = dir->i_sb; struct buffer_head *bh; toupper_t fn = affs_get_toupper(sb); u32 key; pr_debug("%s(\"%pd\")\n", __func__, dentry); bh = affs_bread(sb, dir->i_ino); if (!bh) return ERR_PTR(-EIO); key = be32_to_cpu(AFFS_HEAD(bh)->table[affs_hash_name(sb, dentry->d_name.name, dentry->d_name.len)]); for (;;) { affs_brelse(bh); if (key == 0) return NULL; bh = affs_bread(sb, key); if (!bh) return ERR_PTR(-EIO); if (affs_match(dentry, AFFS_TAIL(sb, bh)->name, fn)) return bh; key = be32_to_cpu(AFFS_TAIL(sb, bh)->hash_chain); } } struct dentry * affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct super_block *sb = dir->i_sb; struct buffer_head *bh; struct inode *inode = NULL; struct dentry *res; pr_debug("%s(\"%pd\")\n", __func__, dentry); affs_lock_dir(dir); bh = affs_find_entry(dir, dentry); if (IS_ERR(bh)) { affs_unlock_dir(dir); return ERR_CAST(bh); } if (bh) { u32 ino = bh->b_blocknr; /* store the real header ino in d_fsdata for faster lookups */ dentry->d_fsdata = (void *)(long)ino; switch (be32_to_cpu(AFFS_TAIL(sb, bh)->stype)) { //link to dirs disabled //case ST_LINKDIR: case ST_LINKFILE: ino = be32_to_cpu(AFFS_TAIL(sb, bh)->original); } affs_brelse(bh); inode = affs_iget(sb, ino); } res = d_splice_alias(inode, dentry); if (!IS_ERR_OR_NULL(res)) res->d_fsdata = dentry->d_fsdata; affs_unlock_dir(dir); return res; } int affs_unlink(struct inode *dir, struct dentry *dentry) { pr_debug("%s(dir=%lu, %lu \"%pd\")\n", __func__, dir->i_ino, d_inode(dentry)->i_ino, dentry); return affs_remove_header(dentry); } int affs_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { struct super_block *sb = dir->i_sb; struct inode *inode; int error; pr_debug("%s(%lu,\"%pd\",0%ho)\n", __func__, dir->i_ino, dentry, mode); inode = affs_new_inode(dir); if (!inode) return -ENOSPC; inode->i_mode = mode; affs_mode_to_prot(inode); mark_inode_dirty(inode); inode->i_op = &affs_file_inode_operations; inode->i_fop = &affs_file_operations; inode->i_mapping->a_ops = affs_test_opt(AFFS_SB(sb)->s_flags, SF_OFS) ? &affs_aops_ofs : &affs_aops; error = affs_add_entry(dir, inode, dentry, ST_FILE); if (error) { clear_nlink(inode); iput(inode); return error; } return 0; } int affs_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) { struct inode *inode; int error; pr_debug("%s(%lu,\"%pd\",0%ho)\n", __func__, dir->i_ino, dentry, mode); inode = affs_new_inode(dir); if (!inode) return -ENOSPC; inode->i_mode = S_IFDIR | mode; affs_mode_to_prot(inode); inode->i_op = &affs_dir_inode_operations; inode->i_fop = &affs_dir_operations; error = affs_add_entry(dir, inode, dentry, ST_USERDIR); if (error) { clear_nlink(inode); mark_inode_dirty(inode); iput(inode); return error; } return 0; } int affs_rmdir(struct inode *dir, struct dentry *dentry) { pr_debug("%s(dir=%lu, %lu \"%pd\")\n", __func__, dir->i_ino, d_inode(dentry)->i_ino, dentry); return affs_remove_header(dentry); } int affs_symlink(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, const char *symname) { struct super_block *sb = dir->i_sb; struct buffer_head *bh; struct inode *inode; char *p; int i, maxlen, error; char c, lc; pr_debug("%s(%lu,\"%pd\" -> \"%s\")\n", __func__, dir->i_ino, dentry, symname); maxlen = AFFS_SB(sb)->s_hashsize * sizeof(u32) - 1; inode = affs_new_inode(dir); if (!inode) return -ENOSPC; inode->i_op = &affs_symlink_inode_operations; inode_nohighmem(inode); inode->i_data.a_ops = &affs_symlink_aops; inode->i_mode = S_IFLNK | 0777; affs_mode_to_prot(inode); error = -EIO; bh = affs_bread(sb, inode->i_ino); if (!bh) goto err; i = 0; p = (char *)AFFS_HEAD(bh)->table; lc = '/'; if (*symname == '/') { struct affs_sb_info *sbi = AFFS_SB(sb); while (*symname == '/') symname++; spin_lock(&sbi->symlink_lock); while (sbi->s_volume[i]) /* Cannot overflow */ *p++ = sbi->s_volume[i++]; spin_unlock(&sbi->symlink_lock); } while (i < maxlen && (c = *symname++)) { if (c == '.' && lc == '/' && *symname == '.' && symname[1] == '/') { *p++ = '/'; i++; symname += 2; lc = '/'; } else if (c == '.' && lc == '/' && *symname == '/') { symname++; lc = '/'; } else { *p++ = c; lc = c; i++; } if (lc == '/') while (*symname == '/') symname++; } *p = 0; inode->i_size = i + 1; mark_buffer_dirty_inode(bh, inode); affs_brelse(bh); mark_inode_dirty(inode); error = affs_add_entry(dir, inode, dentry, ST_SOFTLINK); if (error) goto err; return 0; err: clear_nlink(inode); mark_inode_dirty(inode); iput(inode); return error; } int affs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(old_dentry); pr_debug("%s(%lu, %lu, \"%pd\")\n", __func__, inode->i_ino, dir->i_ino, dentry); return affs_add_entry(dir, inode, dentry, ST_LINKFILE); } static int affs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct super_block *sb = old_dir->i_sb; struct buffer_head *bh = NULL; int retval; retval = affs_check_name(new_dentry->d_name.name, new_dentry->d_name.len, affs_nofilenametruncate(old_dentry)); if (retval) return retval; /* Unlink destination if it already exists */ if (d_really_is_positive(new_dentry)) { retval = affs_remove_header(new_dentry); if (retval) return retval; } bh = affs_bread(sb, d_inode(old_dentry)->i_ino); if (!bh) return -EIO; /* Remove header from its parent directory. */ affs_lock_dir(old_dir); retval = affs_remove_hash(old_dir, bh); affs_unlock_dir(old_dir); if (retval) goto done; /* And insert it into the new directory with the new name. */ affs_copy_name(AFFS_TAIL(sb, bh)->name, new_dentry); affs_fix_checksum(sb, bh); affs_lock_dir(new_dir); retval = affs_insert_hash(new_dir, bh); affs_unlock_dir(new_dir); /* TODO: move it back to old_dir, if error? */ done: mark_buffer_dirty_inode(bh, retval ? old_dir : new_dir); affs_brelse(bh); return retval; } static int affs_xrename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct super_block *sb = old_dir->i_sb; struct buffer_head *bh_old = NULL; struct buffer_head *bh_new = NULL; int retval; bh_old = affs_bread(sb, d_inode(old_dentry)->i_ino); if (!bh_old) return -EIO; bh_new = affs_bread(sb, d_inode(new_dentry)->i_ino); if (!bh_new) { affs_brelse(bh_old); return -EIO; } /* Remove old header from its parent directory. */ affs_lock_dir(old_dir); retval = affs_remove_hash(old_dir, bh_old); affs_unlock_dir(old_dir); if (retval) goto done; /* Remove new header from its parent directory. */ affs_lock_dir(new_dir); retval = affs_remove_hash(new_dir, bh_new); affs_unlock_dir(new_dir); if (retval) goto done; /* Insert old into the new directory with the new name. */ affs_copy_name(AFFS_TAIL(sb, bh_old)->name, new_dentry); affs_fix_checksum(sb, bh_old); affs_lock_dir(new_dir); retval = affs_insert_hash(new_dir, bh_old); affs_unlock_dir(new_dir); /* Insert new into the old directory with the old name. */ affs_copy_name(AFFS_TAIL(sb, bh_new)->name, old_dentry); affs_fix_checksum(sb, bh_new); affs_lock_dir(old_dir); retval = affs_insert_hash(old_dir, bh_new); affs_unlock_dir(old_dir); done: mark_buffer_dirty_inode(bh_old, new_dir); mark_buffer_dirty_inode(bh_new, old_dir); affs_brelse(bh_old); affs_brelse(bh_new); return retval; } int affs_rename2(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE)) return -EINVAL; pr_debug("%s(old=%lu,\"%pd\" to new=%lu,\"%pd\")\n", __func__, old_dir->i_ino, old_dentry, new_dir->i_ino, new_dentry); if (flags & RENAME_EXCHANGE) return affs_xrename(old_dir, old_dentry, new_dir, new_dentry); return affs_rename(old_dir, old_dentry, new_dir, new_dentry); } static struct dentry *affs_get_parent(struct dentry *child) { struct inode *parent; struct buffer_head *bh; bh = affs_bread(child->d_sb, d_inode(child)->i_ino); if (!bh) return ERR_PTR(-EIO); parent = affs_iget(child->d_sb, be32_to_cpu(AFFS_TAIL(child->d_sb, bh)->parent)); brelse(bh); if (IS_ERR(parent)) return ERR_CAST(parent); return d_obtain_alias(parent); } static struct inode *affs_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation) { struct inode *inode; if (!affs_validblock(sb, ino)) return ERR_PTR(-ESTALE); inode = affs_iget(sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); return inode; } static struct dentry *affs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_dentry(sb, fid, fh_len, fh_type, affs_nfs_get_inode); } static struct dentry *affs_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_parent(sb, fid, fh_len, fh_type, affs_nfs_get_inode); } const struct export_operations affs_export_ops = { .fh_to_dentry = affs_fh_to_dentry, .fh_to_parent = affs_fh_to_parent, .get_parent = affs_get_parent, }; const struct dentry_operations affs_dentry_operations = { .d_hash = affs_hash_dentry, .d_compare = affs_compare_dentry, }; const struct dentry_operations affs_intl_dentry_operations = { .d_hash = affs_intl_hash_dentry, .d_compare = affs_intl_compare_dentry, };
linux-master
fs/affs/namei.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/affs/symlink.c * * 1995 Hans-Joachim Widmaier - Modified for affs. * * Copyright (C) 1991, 1992 Linus Torvalds * * affs symlink handling code */ #include "affs.h" static int affs_symlink_read_folio(struct file *file, struct folio *folio) { struct buffer_head *bh; struct inode *inode = folio->mapping->host; char *link = folio_address(folio); struct slink_front *lf; int i, j; char c; char lc; pr_debug("get_link(ino=%lu)\n", inode->i_ino); bh = affs_bread(inode->i_sb, inode->i_ino); if (!bh) goto fail; i = 0; j = 0; lf = (struct slink_front *)bh->b_data; lc = 0; if (strchr(lf->symname,':')) { /* Handle assign or volume name */ struct affs_sb_info *sbi = AFFS_SB(inode->i_sb); char *pf; spin_lock(&sbi->symlink_lock); pf = sbi->s_prefix ? sbi->s_prefix : "/"; while (i < 1023 && (c = pf[i])) link[i++] = c; spin_unlock(&sbi->symlink_lock); while (i < 1023 && lf->symname[j] != ':') link[i++] = lf->symname[j++]; if (i < 1023) link[i++] = '/'; j++; lc = '/'; } while (i < 1023 && (c = lf->symname[j])) { if (c == '/' && lc == '/' && i < 1020) { /* parent dir */ link[i++] = '.'; link[i++] = '.'; } link[i++] = c; lc = c; j++; } link[i] = '\0'; affs_brelse(bh); folio_mark_uptodate(folio); folio_unlock(folio); return 0; fail: folio_unlock(folio); return -EIO; } const struct address_space_operations affs_symlink_aops = { .read_folio = affs_symlink_read_folio, }; const struct inode_operations affs_symlink_inode_operations = { .get_link = page_get_link, .setattr = affs_notify_change, };
linux-master
fs/affs/symlink.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/affs/file.c * * (c) 1996 Hans-Joachim Widmaier - Rewritten * * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem. * * (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem. * * (C) 1991 Linus Torvalds - minix filesystem * * affs regular file handling primitives */ #include <linux/uio.h> #include <linux/blkdev.h> #include <linux/mpage.h> #include "affs.h" static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext); static int affs_file_open(struct inode *inode, struct file *filp) { pr_debug("open(%lu,%d)\n", inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt)); atomic_inc(&AFFS_I(inode)->i_opencnt); return 0; } static int affs_file_release(struct inode *inode, struct file *filp) { pr_debug("release(%lu, %d)\n", inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt)); if (atomic_dec_and_test(&AFFS_I(inode)->i_opencnt)) { inode_lock(inode); if (inode->i_size != AFFS_I(inode)->mmu_private) affs_truncate(inode); affs_free_prealloc(inode); inode_unlock(inode); } return 0; } static int affs_grow_extcache(struct inode *inode, u32 lc_idx) { struct super_block *sb = inode->i_sb; struct buffer_head *bh; u32 lc_max; int i, j, key; if (!AFFS_I(inode)->i_lc) { char *ptr = (char *)get_zeroed_page(GFP_NOFS); if (!ptr) return -ENOMEM; AFFS_I(inode)->i_lc = (u32 *)ptr; AFFS_I(inode)->i_ac = (struct affs_ext_key *)(ptr + AFFS_CACHE_SIZE / 2); } lc_max = AFFS_LC_SIZE << AFFS_I(inode)->i_lc_shift; if (AFFS_I(inode)->i_extcnt > lc_max) { u32 lc_shift, lc_mask, tmp, off; /* need to recalculate linear cache, start from old size */ lc_shift = AFFS_I(inode)->i_lc_shift; tmp = (AFFS_I(inode)->i_extcnt / AFFS_LC_SIZE) >> lc_shift; for (; tmp; tmp >>= 1) lc_shift++; lc_mask = (1 << lc_shift) - 1; /* fix idx and old size to new shift */ lc_idx >>= (lc_shift - AFFS_I(inode)->i_lc_shift); AFFS_I(inode)->i_lc_size >>= (lc_shift - AFFS_I(inode)->i_lc_shift); /* first shrink old cache to make more space */ off = 1 << (lc_shift - AFFS_I(inode)->i_lc_shift); for (i = 1, j = off; j < AFFS_LC_SIZE; i++, j += off) AFFS_I(inode)->i_ac[i] = AFFS_I(inode)->i_ac[j]; AFFS_I(inode)->i_lc_shift = lc_shift; AFFS_I(inode)->i_lc_mask = lc_mask; } /* fill cache to the needed index */ i = AFFS_I(inode)->i_lc_size; AFFS_I(inode)->i_lc_size = lc_idx + 1; for (; i <= lc_idx; i++) { if (!i) { AFFS_I(inode)->i_lc[0] = inode->i_ino; continue; } key = AFFS_I(inode)->i_lc[i - 1]; j = AFFS_I(inode)->i_lc_mask + 1; // unlock cache for (; j > 0; j--) { bh = affs_bread(sb, key); if (!bh) goto err; key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); affs_brelse(bh); } // lock cache AFFS_I(inode)->i_lc[i] = key; } return 0; err: // lock cache return -EIO; } static struct buffer_head * affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext) { struct super_block *sb = inode->i_sb; struct buffer_head *new_bh; u32 blocknr, tmp; blocknr = affs_alloc_block(inode, bh->b_blocknr); if (!blocknr) return ERR_PTR(-ENOSPC); new_bh = affs_getzeroblk(sb, blocknr); if (!new_bh) { affs_free_block(sb, blocknr); return ERR_PTR(-EIO); } AFFS_HEAD(new_bh)->ptype = cpu_to_be32(T_LIST); AFFS_HEAD(new_bh)->key = cpu_to_be32(blocknr); AFFS_TAIL(sb, new_bh)->stype = cpu_to_be32(ST_FILE); AFFS_TAIL(sb, new_bh)->parent = cpu_to_be32(inode->i_ino); affs_fix_checksum(sb, new_bh); mark_buffer_dirty_inode(new_bh, inode); tmp = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); if (tmp) affs_warning(sb, "alloc_ext", "previous extension set (%x)", tmp); AFFS_TAIL(sb, bh)->extension = cpu_to_be32(blocknr); affs_adjust_checksum(bh, blocknr - tmp); mark_buffer_dirty_inode(bh, inode); AFFS_I(inode)->i_extcnt++; mark_inode_dirty(inode); return new_bh; } static inline struct buffer_head * affs_get_extblock(struct inode *inode, u32 ext) { /* inline the simplest case: same extended block as last time */ struct buffer_head *bh = AFFS_I(inode)->i_ext_bh; if (ext == AFFS_I(inode)->i_ext_last) get_bh(bh); else /* we have to do more (not inlined) */ bh = affs_get_extblock_slow(inode, ext); return bh; } static struct buffer_head * affs_get_extblock_slow(struct inode *inode, u32 ext) { struct super_block *sb = inode->i_sb; struct buffer_head *bh; u32 ext_key; u32 lc_idx, lc_off, ac_idx; u32 tmp, idx; if (ext == AFFS_I(inode)->i_ext_last + 1) { /* read the next extended block from the current one */ bh = AFFS_I(inode)->i_ext_bh; ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); if (ext < AFFS_I(inode)->i_extcnt) goto read_ext; BUG_ON(ext > AFFS_I(inode)->i_extcnt); bh = affs_alloc_extblock(inode, bh, ext); if (IS_ERR(bh)) return bh; goto store_ext; } if (ext == 0) { /* we seek back to the file header block */ ext_key = inode->i_ino; goto read_ext; } if (ext >= AFFS_I(inode)->i_extcnt) { struct buffer_head *prev_bh; /* allocate a new extended block */ BUG_ON(ext > AFFS_I(inode)->i_extcnt); /* get previous extended block */ prev_bh = affs_get_extblock(inode, ext - 1); if (IS_ERR(prev_bh)) return prev_bh; bh = affs_alloc_extblock(inode, prev_bh, ext); affs_brelse(prev_bh); if (IS_ERR(bh)) return bh; goto store_ext; } again: /* check if there is an extended cache and whether it's large enough */ lc_idx = ext >> AFFS_I(inode)->i_lc_shift; lc_off = ext & AFFS_I(inode)->i_lc_mask; if (lc_idx >= AFFS_I(inode)->i_lc_size) { int err; err = affs_grow_extcache(inode, lc_idx); if (err) return ERR_PTR(err); goto again; } /* every n'th key we find in the linear cache */ if (!lc_off) { ext_key = AFFS_I(inode)->i_lc[lc_idx]; goto read_ext; } /* maybe it's still in the associative cache */ ac_idx = (ext - lc_idx - 1) & AFFS_AC_MASK; if (AFFS_I(inode)->i_ac[ac_idx].ext == ext) { ext_key = AFFS_I(inode)->i_ac[ac_idx].key; goto read_ext; } /* try to find one of the previous extended blocks */ tmp = ext; idx = ac_idx; while (--tmp, --lc_off > 0) { idx = (idx - 1) & AFFS_AC_MASK; if (AFFS_I(inode)->i_ac[idx].ext == tmp) { ext_key = AFFS_I(inode)->i_ac[idx].key; goto find_ext; } } /* fall back to the linear cache */ ext_key = AFFS_I(inode)->i_lc[lc_idx]; find_ext: /* read all extended blocks until we find the one we need */ //unlock cache do { bh = affs_bread(sb, ext_key); if (!bh) goto err_bread; ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); affs_brelse(bh); tmp++; } while (tmp < ext); //lock cache /* store it in the associative cache */ // recalculate ac_idx? AFFS_I(inode)->i_ac[ac_idx].ext = ext; AFFS_I(inode)->i_ac[ac_idx].key = ext_key; read_ext: /* finally read the right extended block */ //unlock cache bh = affs_bread(sb, ext_key); if (!bh) goto err_bread; //lock cache store_ext: /* release old cached extended block and store the new one */ affs_brelse(AFFS_I(inode)->i_ext_bh); AFFS_I(inode)->i_ext_last = ext; AFFS_I(inode)->i_ext_bh = bh; get_bh(bh); return bh; err_bread: affs_brelse(bh); return ERR_PTR(-EIO); } static int affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create) { struct super_block *sb = inode->i_sb; struct buffer_head *ext_bh; u32 ext; pr_debug("%s(%lu, %llu)\n", __func__, inode->i_ino, (unsigned long long)block); BUG_ON(block > (sector_t)0x7fffffffUL); if (block >= AFFS_I(inode)->i_blkcnt) { if (block > AFFS_I(inode)->i_blkcnt || !create) goto err_big; } else create = 0; //lock cache affs_lock_ext(inode); ext = (u32)block / AFFS_SB(sb)->s_hashsize; block -= ext * AFFS_SB(sb)->s_hashsize; ext_bh = affs_get_extblock(inode, ext); if (IS_ERR(ext_bh)) goto err_ext; map_bh(bh_result, sb, (sector_t)be32_to_cpu(AFFS_BLOCK(sb, ext_bh, block))); if (create) { u32 blocknr = affs_alloc_block(inode, ext_bh->b_blocknr); if (!blocknr) goto err_alloc; set_buffer_new(bh_result); AFFS_I(inode)->mmu_private += AFFS_SB(sb)->s_data_blksize; AFFS_I(inode)->i_blkcnt++; /* store new block */ if (bh_result->b_blocknr) affs_warning(sb, "get_block", "block already set (%llx)", (unsigned long long)bh_result->b_blocknr); AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr); AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1); affs_adjust_checksum(ext_bh, blocknr - bh_result->b_blocknr + 1); bh_result->b_blocknr = blocknr; if (!block) { /* insert first block into header block */ u32 tmp = be32_to_cpu(AFFS_HEAD(ext_bh)->first_data); if (tmp) affs_warning(sb, "get_block", "first block already set (%d)", tmp); AFFS_HEAD(ext_bh)->first_data = cpu_to_be32(blocknr); affs_adjust_checksum(ext_bh, blocknr - tmp); } } affs_brelse(ext_bh); //unlock cache affs_unlock_ext(inode); return 0; err_big: affs_error(inode->i_sb, "get_block", "strange block request %llu", (unsigned long long)block); return -EIO; err_ext: // unlock cache affs_unlock_ext(inode); return PTR_ERR(ext_bh); err_alloc: brelse(ext_bh); clear_buffer_mapped(bh_result); bh_result->b_bdev = NULL; // unlock cache affs_unlock_ext(inode); return -ENOSPC; } static int affs_writepages(struct address_space *mapping, struct writeback_control *wbc) { return mpage_writepages(mapping, wbc, affs_get_block); } static int affs_read_folio(struct file *file, struct folio *folio) { return block_read_full_folio(folio, affs_get_block); } static void affs_write_failed(struct address_space *mapping, loff_t to) { struct inode *inode = mapping->host; if (to > inode->i_size) { truncate_pagecache(inode, inode->i_size); affs_truncate(inode); } } static ssize_t affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; size_t count = iov_iter_count(iter); loff_t offset = iocb->ki_pos; ssize_t ret; if (iov_iter_rw(iter) == WRITE) { loff_t size = offset + count; if (AFFS_I(inode)->mmu_private < size) return 0; } ret = blockdev_direct_IO(iocb, inode, iter, affs_get_block); if (ret < 0 && iov_iter_rw(iter) == WRITE) affs_write_failed(mapping, offset + count); return ret; } static int affs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int ret; *pagep = NULL; ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata, affs_get_block, &AFFS_I(mapping->host)->mmu_private); if (unlikely(ret)) affs_write_failed(mapping, pos + len); return ret; } static int affs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned int len, unsigned int copied, struct page *page, void *fsdata) { struct inode *inode = mapping->host; int ret; ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); /* Clear Archived bit on file writes, as AmigaOS would do */ if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) { AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED; mark_inode_dirty(inode); } return ret; } static sector_t _affs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping,block,affs_get_block); } const struct address_space_operations affs_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .read_folio = affs_read_folio, .writepages = affs_writepages, .write_begin = affs_write_begin, .write_end = affs_write_end, .direct_IO = affs_direct_IO, .migrate_folio = buffer_migrate_folio, .bmap = _affs_bmap }; static inline struct buffer_head * affs_bread_ino(struct inode *inode, int block, int create) { struct buffer_head *bh, tmp_bh; int err; tmp_bh.b_state = 0; err = affs_get_block(inode, block, &tmp_bh, create); if (!err) { bh = affs_bread(inode->i_sb, tmp_bh.b_blocknr); if (bh) { bh->b_state |= tmp_bh.b_state; return bh; } err = -EIO; } return ERR_PTR(err); } static inline struct buffer_head * affs_getzeroblk_ino(struct inode *inode, int block) { struct buffer_head *bh, tmp_bh; int err; tmp_bh.b_state = 0; err = affs_get_block(inode, block, &tmp_bh, 1); if (!err) { bh = affs_getzeroblk(inode->i_sb, tmp_bh.b_blocknr); if (bh) { bh->b_state |= tmp_bh.b_state; return bh; } err = -EIO; } return ERR_PTR(err); } static inline struct buffer_head * affs_getemptyblk_ino(struct inode *inode, int block) { struct buffer_head *bh, tmp_bh; int err; tmp_bh.b_state = 0; err = affs_get_block(inode, block, &tmp_bh, 1); if (!err) { bh = affs_getemptyblk(inode->i_sb, tmp_bh.b_blocknr); if (bh) { bh->b_state |= tmp_bh.b_state; return bh; } err = -EIO; } return ERR_PTR(err); } static int affs_do_read_folio_ofs(struct folio *folio, size_t to, int create) { struct inode *inode = folio->mapping->host; struct super_block *sb = inode->i_sb; struct buffer_head *bh; size_t pos = 0; size_t bidx, boff, bsize; u32 tmp; pr_debug("%s(%lu, %ld, 0, %zu)\n", __func__, inode->i_ino, folio->index, to); BUG_ON(to > folio_size(folio)); bsize = AFFS_SB(sb)->s_data_blksize; tmp = folio_pos(folio); bidx = tmp / bsize; boff = tmp % bsize; while (pos < to) { bh = affs_bread_ino(inode, bidx, create); if (IS_ERR(bh)) return PTR_ERR(bh); tmp = min(bsize - boff, to - pos); BUG_ON(pos + tmp > to || tmp > bsize); memcpy_to_folio(folio, pos, AFFS_DATA(bh) + boff, tmp); affs_brelse(bh); bidx++; pos += tmp; boff = 0; } return 0; } static int affs_extent_file_ofs(struct inode *inode, u32 newsize) { struct super_block *sb = inode->i_sb; struct buffer_head *bh, *prev_bh; u32 bidx, boff; u32 size, bsize; u32 tmp; pr_debug("%s(%lu, %d)\n", __func__, inode->i_ino, newsize); bsize = AFFS_SB(sb)->s_data_blksize; bh = NULL; size = AFFS_I(inode)->mmu_private; bidx = size / bsize; boff = size % bsize; if (boff) { bh = affs_bread_ino(inode, bidx, 0); if (IS_ERR(bh)) return PTR_ERR(bh); tmp = min(bsize - boff, newsize - size); BUG_ON(boff + tmp > bsize || tmp > bsize); memset(AFFS_DATA(bh) + boff, 0, tmp); be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp); affs_fix_checksum(sb, bh); mark_buffer_dirty_inode(bh, inode); size += tmp; bidx++; } else if (bidx) { bh = affs_bread_ino(inode, bidx - 1, 0); if (IS_ERR(bh)) return PTR_ERR(bh); } while (size < newsize) { prev_bh = bh; bh = affs_getzeroblk_ino(inode, bidx); if (IS_ERR(bh)) goto out; tmp = min(bsize, newsize - size); BUG_ON(tmp > bsize); AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); affs_fix_checksum(sb, bh); bh->b_state &= ~(1UL << BH_New); mark_buffer_dirty_inode(bh, inode); if (prev_bh) { u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next); if (tmp_next) affs_warning(sb, "extent_file_ofs", "next block already set for %d (%d)", bidx, tmp_next); AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr); affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next); mark_buffer_dirty_inode(prev_bh, inode); affs_brelse(prev_bh); } size += bsize; bidx++; } affs_brelse(bh); inode->i_size = AFFS_I(inode)->mmu_private = newsize; return 0; out: inode->i_size = AFFS_I(inode)->mmu_private = newsize; return PTR_ERR(bh); } static int affs_read_folio_ofs(struct file *file, struct folio *folio) { struct inode *inode = folio->mapping->host; size_t to; int err; pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, folio->index); to = folio_size(folio); if (folio_pos(folio) + to > inode->i_size) { to = inode->i_size - folio_pos(folio); folio_zero_segment(folio, to, folio_size(folio)); } err = affs_do_read_folio_ofs(folio, to, 0); if (!err) folio_mark_uptodate(folio); folio_unlock(folio); return err; } static int affs_write_begin_ofs(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; struct folio *folio; pgoff_t index; int err = 0; pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos, pos + len); if (pos > AFFS_I(inode)->mmu_private) { /* XXX: this probably leaves a too-big i_size in case of * failure. Should really be updating i_size at write_end time */ err = affs_extent_file_ofs(inode, pos); if (err) return err; } index = pos >> PAGE_SHIFT; folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, mapping_gfp_mask(mapping)); if (IS_ERR(folio)) return PTR_ERR(folio); *pagep = &folio->page; if (folio_test_uptodate(folio)) return 0; /* XXX: inefficient but safe in the face of short writes */ err = affs_do_read_folio_ofs(folio, folio_size(folio), 1); if (err) { folio_unlock(folio); folio_put(folio); } return err; } static int affs_write_end_ofs(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct folio *folio = page_folio(page); struct inode *inode = mapping->host; struct super_block *sb = inode->i_sb; struct buffer_head *bh, *prev_bh; char *data; u32 bidx, boff, bsize; unsigned from, to; u32 tmp; int written; from = pos & (PAGE_SIZE - 1); to = from + len; /* * XXX: not sure if this can handle short copies (len < copied), but * we don't have to, because the folio should always be uptodate here, * due to write_begin. */ pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos, pos + len); bsize = AFFS_SB(sb)->s_data_blksize; data = folio_address(folio); bh = NULL; written = 0; tmp = (folio->index << PAGE_SHIFT) + from; bidx = tmp / bsize; boff = tmp % bsize; if (boff) { bh = affs_bread_ino(inode, bidx, 0); if (IS_ERR(bh)) { written = PTR_ERR(bh); goto err_first_bh; } tmp = min(bsize - boff, to - from); BUG_ON(boff + tmp > bsize || tmp > bsize); memcpy(AFFS_DATA(bh) + boff, data + from, tmp); be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp); affs_fix_checksum(sb, bh); mark_buffer_dirty_inode(bh, inode); written += tmp; from += tmp; bidx++; } else if (bidx) { bh = affs_bread_ino(inode, bidx - 1, 0); if (IS_ERR(bh)) { written = PTR_ERR(bh); goto err_first_bh; } } while (from + bsize <= to) { prev_bh = bh; bh = affs_getemptyblk_ino(inode, bidx); if (IS_ERR(bh)) goto err_bh; memcpy(AFFS_DATA(bh), data + from, bsize); if (buffer_new(bh)) { AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize); AFFS_DATA_HEAD(bh)->next = 0; bh->b_state &= ~(1UL << BH_New); if (prev_bh) { u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next); if (tmp_next) affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp_next); AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr); affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next); mark_buffer_dirty_inode(prev_bh, inode); } } affs_brelse(prev_bh); affs_fix_checksum(sb, bh); mark_buffer_dirty_inode(bh, inode); written += bsize; from += bsize; bidx++; } if (from < to) { prev_bh = bh; bh = affs_bread_ino(inode, bidx, 1); if (IS_ERR(bh)) goto err_bh; tmp = min(bsize, to - from); BUG_ON(tmp > bsize); memcpy(AFFS_DATA(bh), data + from, tmp); if (buffer_new(bh)) { AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); AFFS_DATA_HEAD(bh)->next = 0; bh->b_state &= ~(1UL << BH_New); if (prev_bh) { u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next); if (tmp_next) affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp_next); AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr); affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next); mark_buffer_dirty_inode(prev_bh, inode); } } else if (be32_to_cpu(AFFS_DATA_HEAD(bh)->size) < tmp) AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); affs_brelse(prev_bh); affs_fix_checksum(sb, bh); mark_buffer_dirty_inode(bh, inode); written += tmp; from += tmp; bidx++; } folio_mark_uptodate(folio); done: affs_brelse(bh); tmp = (folio->index << PAGE_SHIFT) + from; if (tmp > inode->i_size) inode->i_size = AFFS_I(inode)->mmu_private = tmp; /* Clear Archived bit on file writes, as AmigaOS would do */ if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) { AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED; mark_inode_dirty(inode); } err_first_bh: folio_unlock(folio); folio_put(folio); return written; err_bh: bh = prev_bh; if (!written) written = PTR_ERR(bh); goto done; } const struct address_space_operations affs_aops_ofs = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .read_folio = affs_read_folio_ofs, //.writepages = affs_writepages_ofs, .write_begin = affs_write_begin_ofs, .write_end = affs_write_end_ofs, .migrate_folio = filemap_migrate_folio, }; /* Free any preallocated blocks. */ void affs_free_prealloc(struct inode *inode) { struct super_block *sb = inode->i_sb; pr_debug("free_prealloc(ino=%lu)\n", inode->i_ino); while (AFFS_I(inode)->i_pa_cnt) { AFFS_I(inode)->i_pa_cnt--; affs_free_block(sb, ++AFFS_I(inode)->i_lastalloc); } } /* Truncate (or enlarge) a file to the requested size. */ void affs_truncate(struct inode *inode) { struct super_block *sb = inode->i_sb; u32 ext, ext_key; u32 last_blk, blkcnt, blk; u32 size; struct buffer_head *ext_bh; int i; pr_debug("truncate(inode=%lu, oldsize=%llu, newsize=%llu)\n", inode->i_ino, AFFS_I(inode)->mmu_private, inode->i_size); last_blk = 0; ext = 0; if (inode->i_size) { last_blk = ((u32)inode->i_size - 1) / AFFS_SB(sb)->s_data_blksize; ext = last_blk / AFFS_SB(sb)->s_hashsize; } if (inode->i_size > AFFS_I(inode)->mmu_private) { struct address_space *mapping = inode->i_mapping; struct page *page; void *fsdata = NULL; loff_t isize = inode->i_size; int res; res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, &page, &fsdata); if (!res) res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, page, fsdata); else inode->i_size = AFFS_I(inode)->mmu_private; mark_inode_dirty(inode); return; } else if (inode->i_size == AFFS_I(inode)->mmu_private) return; // lock cache ext_bh = affs_get_extblock(inode, ext); if (IS_ERR(ext_bh)) { affs_warning(sb, "truncate", "unexpected read error for ext block %u (%ld)", ext, PTR_ERR(ext_bh)); return; } if (AFFS_I(inode)->i_lc) { /* clear linear cache */ i = (ext + 1) >> AFFS_I(inode)->i_lc_shift; if (AFFS_I(inode)->i_lc_size > i) { AFFS_I(inode)->i_lc_size = i; for (; i < AFFS_LC_SIZE; i++) AFFS_I(inode)->i_lc[i] = 0; } /* clear associative cache */ for (i = 0; i < AFFS_AC_SIZE; i++) if (AFFS_I(inode)->i_ac[i].ext >= ext) AFFS_I(inode)->i_ac[i].ext = 0; } ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension); blkcnt = AFFS_I(inode)->i_blkcnt; i = 0; blk = last_blk; if (inode->i_size) { i = last_blk % AFFS_SB(sb)->s_hashsize + 1; blk++; } else AFFS_HEAD(ext_bh)->first_data = 0; AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(i); size = AFFS_SB(sb)->s_hashsize; if (size > blkcnt - blk + i) size = blkcnt - blk + i; for (; i < size; i++, blk++) { affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i))); AFFS_BLOCK(sb, ext_bh, i) = 0; } AFFS_TAIL(sb, ext_bh)->extension = 0; affs_fix_checksum(sb, ext_bh); mark_buffer_dirty_inode(ext_bh, inode); affs_brelse(ext_bh); if (inode->i_size) { AFFS_I(inode)->i_blkcnt = last_blk + 1; AFFS_I(inode)->i_extcnt = ext + 1; if (affs_test_opt(AFFS_SB(sb)->s_flags, SF_OFS)) { struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0); u32 tmp; if (IS_ERR(bh)) { affs_warning(sb, "truncate", "unexpected read error for last block %u (%ld)", ext, PTR_ERR(bh)); return; } tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next); AFFS_DATA_HEAD(bh)->next = 0; affs_adjust_checksum(bh, -tmp); affs_brelse(bh); } } else { AFFS_I(inode)->i_blkcnt = 0; AFFS_I(inode)->i_extcnt = 1; } AFFS_I(inode)->mmu_private = inode->i_size; // unlock cache while (ext_key) { ext_bh = affs_bread(sb, ext_key); size = AFFS_SB(sb)->s_hashsize; if (size > blkcnt - blk) size = blkcnt - blk; for (i = 0; i < size; i++, blk++) affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i))); affs_free_block(sb, ext_key); ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension); affs_brelse(ext_bh); } affs_free_prealloc(inode); } int affs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) { struct inode *inode = filp->f_mapping->host; int ret, err; err = file_write_and_wait_range(filp, start, end); if (err) return err; inode_lock(inode); ret = write_inode_now(inode, 0); err = sync_blockdev(inode->i_sb->s_bdev); if (!ret) ret = err; inode_unlock(inode); return ret; } const struct file_operations affs_file_operations = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .mmap = generic_file_mmap, .open = affs_file_open, .release = affs_file_release, .fsync = affs_file_fsync, .splice_read = filemap_splice_read, }; const struct inode_operations affs_file_inode_operations = { .setattr = affs_notify_change, };
linux-master
fs/affs/file.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/lockd/svcproc.c * * Lockd server procedures. We don't implement the NLM_*_RES * procedures because we don't use the async procedures. * * Copyright (C) 1996, Olaf Kirch <[email protected]> */ #include <linux/types.h> #include <linux/time.h> #include <linux/lockd/lockd.h> #include <linux/lockd/share.h> #include <linux/sunrpc/svc_xprt.h> #define NLMDBG_FACILITY NLMDBG_CLIENT #ifdef CONFIG_LOCKD_V4 static __be32 cast_to_nlm(__be32 status, u32 vers) { /* Note: status is assumed to be in network byte order !!! */ if (vers != 4){ switch (status) { case nlm_granted: case nlm_lck_denied: case nlm_lck_denied_nolocks: case nlm_lck_blocked: case nlm_lck_denied_grace_period: case nlm_drop_reply: break; case nlm4_deadlock: status = nlm_lck_denied; break; default: status = nlm_lck_denied_nolocks; } } return (status); } #define cast_status(status) (cast_to_nlm(status, rqstp->rq_vers)) #else #define cast_status(status) (status) #endif /* * Obtain client and file from arguments */ static __be32 nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_host **hostp, struct nlm_file **filp) { struct nlm_host *host = NULL; struct nlm_file *file = NULL; struct nlm_lock *lock = &argp->lock; int mode; __be32 error = 0; /* nfsd callbacks must have been installed for this procedure */ if (!nlmsvc_ops) return nlm_lck_denied_nolocks; /* Obtain host handle */ if (!(host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len)) || (argp->monitor && nsm_monitor(host) < 0)) goto no_locks; *hostp = host; /* Obtain file pointer. Not used by FREE_ALL call. */ if (filp != NULL) { error = cast_status(nlm_lookup_file(rqstp, &file, lock)); if (error != 0) goto no_locks; *filp = file; /* Set up the missing parts of the file_lock structure */ mode = lock_to_openmode(&lock->fl); lock->fl.fl_flags = FL_POSIX; lock->fl.fl_file = file->f_file[mode]; lock->fl.fl_pid = current->tgid; lock->fl.fl_lmops = &nlmsvc_lock_operations; nlmsvc_locks_init_private(&lock->fl, host, (pid_t)lock->svid); if (!lock->fl.fl_owner) { /* lockowner allocation has failed */ nlmsvc_release_host(host); return nlm_lck_denied_nolocks; } } return 0; no_locks: nlmsvc_release_host(host); if (error) return error; return nlm_lck_denied_nolocks; } /* * NULL: Test for presence of service */ static __be32 nlmsvc_proc_null(struct svc_rqst *rqstp) { dprintk("lockd: NULL called\n"); return rpc_success; } /* * TEST: Check for conflicting lock */ static __be32 __nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_res *resp) { struct nlm_args *argp = rqstp->rq_argp; struct nlm_host *host; struct nlm_file *file; struct nlm_lockowner *test_owner; __be32 rc = rpc_success; dprintk("lockd: TEST called\n"); resp->cookie = argp->cookie; /* Obtain client and file */ if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; test_owner = argp->lock.fl.fl_owner; /* Now check for conflicting locks */ resp->status = cast_status(nlmsvc_testlock(rqstp, file, host, &argp->lock, &resp->lock, &resp->cookie)); if (resp->status == nlm_drop_reply) rc = rpc_drop_reply; else dprintk("lockd: TEST status %d vers %d\n", ntohl(resp->status), rqstp->rq_vers); nlmsvc_put_lockowner(test_owner); nlmsvc_release_host(host); nlm_release_file(file); return rc; } static __be32 nlmsvc_proc_test(struct svc_rqst *rqstp) { return __nlmsvc_proc_test(rqstp, rqstp->rq_resp); } static __be32 __nlmsvc_proc_lock(struct svc_rqst *rqstp, struct nlm_res *resp) { struct nlm_args *argp = rqstp->rq_argp; struct nlm_host *host; struct nlm_file *file; __be32 rc = rpc_success; dprintk("lockd: LOCK called\n"); resp->cookie = argp->cookie; /* Obtain client and file */ if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; #if 0 /* If supplied state doesn't match current state, we assume it's * an old request that time-warped somehow. Any error return would * do in this case because it's irrelevant anyway. * * NB: We don't retrieve the remote host's state yet. */ if (host->h_nsmstate && host->h_nsmstate != argp->state) { resp->status = nlm_lck_denied_nolocks; } else #endif /* Now try to lock the file */ resp->status = cast_status(nlmsvc_lock(rqstp, file, host, &argp->lock, argp->block, &argp->cookie, argp->reclaim)); if (resp->status == nlm_drop_reply) rc = rpc_drop_reply; else dprintk("lockd: LOCK status %d\n", ntohl(resp->status)); nlmsvc_release_lockowner(&argp->lock); nlmsvc_release_host(host); nlm_release_file(file); return rc; } static __be32 nlmsvc_proc_lock(struct svc_rqst *rqstp) { return __nlmsvc_proc_lock(rqstp, rqstp->rq_resp); } static __be32 __nlmsvc_proc_cancel(struct svc_rqst *rqstp, struct nlm_res *resp) { struct nlm_args *argp = rqstp->rq_argp; struct nlm_host *host; struct nlm_file *file; struct net *net = SVC_NET(rqstp); dprintk("lockd: CANCEL called\n"); resp->cookie = argp->cookie; /* Don't accept requests during grace period */ if (locks_in_grace(net)) { resp->status = nlm_lck_denied_grace_period; return rpc_success; } /* Obtain client and file */ if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Try to cancel request. */ resp->status = cast_status(nlmsvc_cancel_blocked(net, file, &argp->lock)); dprintk("lockd: CANCEL status %d\n", ntohl(resp->status)); nlmsvc_release_lockowner(&argp->lock); nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } static __be32 nlmsvc_proc_cancel(struct svc_rqst *rqstp) { return __nlmsvc_proc_cancel(rqstp, rqstp->rq_resp); } /* * UNLOCK: release a lock */ static __be32 __nlmsvc_proc_unlock(struct svc_rqst *rqstp, struct nlm_res *resp) { struct nlm_args *argp = rqstp->rq_argp; struct nlm_host *host; struct nlm_file *file; struct net *net = SVC_NET(rqstp); dprintk("lockd: UNLOCK called\n"); resp->cookie = argp->cookie; /* Don't accept new lock requests during grace period */ if (locks_in_grace(net)) { resp->status = nlm_lck_denied_grace_period; return rpc_success; } /* Obtain client and file */ if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Now try to remove the lock */ resp->status = cast_status(nlmsvc_unlock(net, file, &argp->lock)); dprintk("lockd: UNLOCK status %d\n", ntohl(resp->status)); nlmsvc_release_lockowner(&argp->lock); nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } static __be32 nlmsvc_proc_unlock(struct svc_rqst *rqstp) { return __nlmsvc_proc_unlock(rqstp, rqstp->rq_resp); } /* * GRANTED: A server calls us to tell that a process' lock request * was granted */ static __be32 __nlmsvc_proc_granted(struct svc_rqst *rqstp, struct nlm_res *resp) { struct nlm_args *argp = rqstp->rq_argp; resp->cookie = argp->cookie; dprintk("lockd: GRANTED called\n"); resp->status = nlmclnt_grant(svc_addr(rqstp), &argp->lock); dprintk("lockd: GRANTED status %d\n", ntohl(resp->status)); return rpc_success; } static __be32 nlmsvc_proc_granted(struct svc_rqst *rqstp) { return __nlmsvc_proc_granted(rqstp, rqstp->rq_resp); } /* * This is the generic lockd callback for async RPC calls */ static void nlmsvc_callback_exit(struct rpc_task *task, void *data) { } void nlmsvc_release_call(struct nlm_rqst *call) { if (!refcount_dec_and_test(&call->a_count)) return; nlmsvc_release_host(call->a_host); kfree(call); } static void nlmsvc_callback_release(void *data) { nlmsvc_release_call(data); } static const struct rpc_call_ops nlmsvc_callback_ops = { .rpc_call_done = nlmsvc_callback_exit, .rpc_release = nlmsvc_callback_release, }; /* * `Async' versions of the above service routines. They aren't really, * because we send the callback before the reply proper. I hope this * doesn't break any clients. */ static __be32 nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, __be32 (*func)(struct svc_rqst *, struct nlm_res *)) { struct nlm_args *argp = rqstp->rq_argp; struct nlm_host *host; struct nlm_rqst *call; __be32 stat; host = nlmsvc_lookup_host(rqstp, argp->lock.caller, argp->lock.len); if (host == NULL) return rpc_system_err; call = nlm_alloc_call(host); nlmsvc_release_host(host); if (call == NULL) return rpc_system_err; stat = func(rqstp, &call->a_res); if (stat != 0) { nlmsvc_release_call(call); return stat; } call->a_flags = RPC_TASK_ASYNC; if (nlm_async_reply(call, proc, &nlmsvc_callback_ops) < 0) return rpc_system_err; return rpc_success; } static __be32 nlmsvc_proc_test_msg(struct svc_rqst *rqstp) { dprintk("lockd: TEST_MSG called\n"); return nlmsvc_callback(rqstp, NLMPROC_TEST_RES, __nlmsvc_proc_test); } static __be32 nlmsvc_proc_lock_msg(struct svc_rqst *rqstp) { dprintk("lockd: LOCK_MSG called\n"); return nlmsvc_callback(rqstp, NLMPROC_LOCK_RES, __nlmsvc_proc_lock); } static __be32 nlmsvc_proc_cancel_msg(struct svc_rqst *rqstp) { dprintk("lockd: CANCEL_MSG called\n"); return nlmsvc_callback(rqstp, NLMPROC_CANCEL_RES, __nlmsvc_proc_cancel); } static __be32 nlmsvc_proc_unlock_msg(struct svc_rqst *rqstp) { dprintk("lockd: UNLOCK_MSG called\n"); return nlmsvc_callback(rqstp, NLMPROC_UNLOCK_RES, __nlmsvc_proc_unlock); } static __be32 nlmsvc_proc_granted_msg(struct svc_rqst *rqstp) { dprintk("lockd: GRANTED_MSG called\n"); return nlmsvc_callback(rqstp, NLMPROC_GRANTED_RES, __nlmsvc_proc_granted); } /* * SHARE: create a DOS share or alter existing share. */ static __be32 nlmsvc_proc_share(struct svc_rqst *rqstp) { struct nlm_args *argp = rqstp->rq_argp; struct nlm_res *resp = rqstp->rq_resp; struct nlm_host *host; struct nlm_file *file; dprintk("lockd: SHARE called\n"); resp->cookie = argp->cookie; /* Don't accept new lock requests during grace period */ if (locks_in_grace(SVC_NET(rqstp)) && !argp->reclaim) { resp->status = nlm_lck_denied_grace_period; return rpc_success; } /* Obtain client and file */ if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Now try to create the share */ resp->status = cast_status(nlmsvc_share_file(host, file, argp)); dprintk("lockd: SHARE status %d\n", ntohl(resp->status)); nlmsvc_release_lockowner(&argp->lock); nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } /* * UNSHARE: Release a DOS share. */ static __be32 nlmsvc_proc_unshare(struct svc_rqst *rqstp) { struct nlm_args *argp = rqstp->rq_argp; struct nlm_res *resp = rqstp->rq_resp; struct nlm_host *host; struct nlm_file *file; dprintk("lockd: UNSHARE called\n"); resp->cookie = argp->cookie; /* Don't accept requests during grace period */ if (locks_in_grace(SVC_NET(rqstp))) { resp->status = nlm_lck_denied_grace_period; return rpc_success; } /* Obtain client and file */ if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Now try to unshare the file */ resp->status = cast_status(nlmsvc_unshare_file(host, file, argp)); dprintk("lockd: UNSHARE status %d\n", ntohl(resp->status)); nlmsvc_release_lockowner(&argp->lock); nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } /* * NM_LOCK: Create an unmonitored lock */ static __be32 nlmsvc_proc_nm_lock(struct svc_rqst *rqstp) { struct nlm_args *argp = rqstp->rq_argp; dprintk("lockd: NM_LOCK called\n"); argp->monitor = 0; /* just clean the monitor flag */ return nlmsvc_proc_lock(rqstp); } /* * FREE_ALL: Release all locks and shares held by client */ static __be32 nlmsvc_proc_free_all(struct svc_rqst *rqstp) { struct nlm_args *argp = rqstp->rq_argp; struct nlm_host *host; /* Obtain client */ if (nlmsvc_retrieve_args(rqstp, argp, &host, NULL)) return rpc_success; nlmsvc_free_host_resources(host); nlmsvc_release_host(host); return rpc_success; } /* * SM_NOTIFY: private callback from statd (not part of official NLM proto) */ static __be32 nlmsvc_proc_sm_notify(struct svc_rqst *rqstp) { struct nlm_reboot *argp = rqstp->rq_argp; dprintk("lockd: SM_NOTIFY called\n"); if (!nlm_privileged_requester(rqstp)) { char buf[RPC_MAX_ADDRBUFLEN]; printk(KERN_WARNING "lockd: rejected NSM callback from %s\n", svc_print_addr(rqstp, buf, sizeof(buf))); return rpc_system_err; } nlm_host_rebooted(SVC_NET(rqstp), argp); return rpc_success; } /* * client sent a GRANTED_RES, let's remove the associated block */ static __be32 nlmsvc_proc_granted_res(struct svc_rqst *rqstp) { struct nlm_res *argp = rqstp->rq_argp; if (!nlmsvc_ops) return rpc_success; dprintk("lockd: GRANTED_RES called\n"); nlmsvc_grant_reply(&argp->cookie, argp->status); return rpc_success; } static __be32 nlmsvc_proc_unused(struct svc_rqst *rqstp) { return rpc_proc_unavail; } /* * NLM Server procedures. */ struct nlm_void { int dummy; }; #define Ck (1+XDR_QUADLEN(NLM_MAXCOOKIELEN)) /* cookie */ #define St 1 /* status */ #define No (1+1024/4) /* Net Obj */ #define Rg 2 /* range - offset + size */ const struct svc_procedure nlmsvc_procedures[24] = { [NLMPROC_NULL] = { .pc_func = nlmsvc_proc_null, .pc_decode = nlmsvc_decode_void, .pc_encode = nlmsvc_encode_void, .pc_argsize = sizeof(struct nlm_void), .pc_argzero = sizeof(struct nlm_void), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "NULL", }, [NLMPROC_TEST] = { .pc_func = nlmsvc_proc_test, .pc_decode = nlmsvc_decode_testargs, .pc_encode = nlmsvc_encode_testres, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_res), .pc_xdrressize = Ck+St+2+No+Rg, .pc_name = "TEST", }, [NLMPROC_LOCK] = { .pc_func = nlmsvc_proc_lock, .pc_decode = nlmsvc_decode_lockargs, .pc_encode = nlmsvc_encode_res, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_res), .pc_xdrressize = Ck+St, .pc_name = "LOCK", }, [NLMPROC_CANCEL] = { .pc_func = nlmsvc_proc_cancel, .pc_decode = nlmsvc_decode_cancargs, .pc_encode = nlmsvc_encode_res, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_res), .pc_xdrressize = Ck+St, .pc_name = "CANCEL", }, [NLMPROC_UNLOCK] = { .pc_func = nlmsvc_proc_unlock, .pc_decode = nlmsvc_decode_unlockargs, .pc_encode = nlmsvc_encode_res, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_res), .pc_xdrressize = Ck+St, .pc_name = "UNLOCK", }, [NLMPROC_GRANTED] = { .pc_func = nlmsvc_proc_granted, .pc_decode = nlmsvc_decode_testargs, .pc_encode = nlmsvc_encode_res, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_res), .pc_xdrressize = Ck+St, .pc_name = "GRANTED", }, [NLMPROC_TEST_MSG] = { .pc_func = nlmsvc_proc_test_msg, .pc_decode = nlmsvc_decode_testargs, .pc_encode = nlmsvc_encode_void, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "TEST_MSG", }, [NLMPROC_LOCK_MSG] = { .pc_func = nlmsvc_proc_lock_msg, .pc_decode = nlmsvc_decode_lockargs, .pc_encode = nlmsvc_encode_void, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "LOCK_MSG", }, [NLMPROC_CANCEL_MSG] = { .pc_func = nlmsvc_proc_cancel_msg, .pc_decode = nlmsvc_decode_cancargs, .pc_encode = nlmsvc_encode_void, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "CANCEL_MSG", }, [NLMPROC_UNLOCK_MSG] = { .pc_func = nlmsvc_proc_unlock_msg, .pc_decode = nlmsvc_decode_unlockargs, .pc_encode = nlmsvc_encode_void, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "UNLOCK_MSG", }, [NLMPROC_GRANTED_MSG] = { .pc_func = nlmsvc_proc_granted_msg, .pc_decode = nlmsvc_decode_testargs, .pc_encode = nlmsvc_encode_void, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "GRANTED_MSG", }, [NLMPROC_TEST_RES] = { .pc_func = nlmsvc_proc_null, .pc_decode = nlmsvc_decode_void, .pc_encode = nlmsvc_encode_void, .pc_argsize = sizeof(struct nlm_res), .pc_argzero = sizeof(struct nlm_res), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "TEST_RES", }, [NLMPROC_LOCK_RES] = { .pc_func = nlmsvc_proc_null, .pc_decode = nlmsvc_decode_void, .pc_encode = nlmsvc_encode_void, .pc_argsize = sizeof(struct nlm_res), .pc_argzero = sizeof(struct nlm_res), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "LOCK_RES", }, [NLMPROC_CANCEL_RES] = { .pc_func = nlmsvc_proc_null, .pc_decode = nlmsvc_decode_void, .pc_encode = nlmsvc_encode_void, .pc_argsize = sizeof(struct nlm_res), .pc_argzero = sizeof(struct nlm_res), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "CANCEL_RES", }, [NLMPROC_UNLOCK_RES] = { .pc_func = nlmsvc_proc_null, .pc_decode = nlmsvc_decode_void, .pc_encode = nlmsvc_encode_void, .pc_argsize = sizeof(struct nlm_res), .pc_argzero = sizeof(struct nlm_res), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "UNLOCK_RES", }, [NLMPROC_GRANTED_RES] = { .pc_func = nlmsvc_proc_granted_res, .pc_decode = nlmsvc_decode_res, .pc_encode = nlmsvc_encode_void, .pc_argsize = sizeof(struct nlm_res), .pc_argzero = sizeof(struct nlm_res), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "GRANTED_RES", }, [NLMPROC_NSM_NOTIFY] = { .pc_func = nlmsvc_proc_sm_notify, .pc_decode = nlmsvc_decode_reboot, .pc_encode = nlmsvc_encode_void, .pc_argsize = sizeof(struct nlm_reboot), .pc_argzero = sizeof(struct nlm_reboot), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "SM_NOTIFY", }, [17] = { .pc_func = nlmsvc_proc_unused, .pc_decode = nlmsvc_decode_void, .pc_encode = nlmsvc_encode_void, .pc_argsize = sizeof(struct nlm_void), .pc_argzero = sizeof(struct nlm_void), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "UNUSED", }, [18] = { .pc_func = nlmsvc_proc_unused, .pc_decode = nlmsvc_decode_void, .pc_encode = nlmsvc_encode_void, .pc_argsize = sizeof(struct nlm_void), .pc_argzero = sizeof(struct nlm_void), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "UNUSED", }, [19] = { .pc_func = nlmsvc_proc_unused, .pc_decode = nlmsvc_decode_void, .pc_encode = nlmsvc_encode_void, .pc_argsize = sizeof(struct nlm_void), .pc_argzero = sizeof(struct nlm_void), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "UNUSED", }, [NLMPROC_SHARE] = { .pc_func = nlmsvc_proc_share, .pc_decode = nlmsvc_decode_shareargs, .pc_encode = nlmsvc_encode_shareres, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_res), .pc_xdrressize = Ck+St+1, .pc_name = "SHARE", }, [NLMPROC_UNSHARE] = { .pc_func = nlmsvc_proc_unshare, .pc_decode = nlmsvc_decode_shareargs, .pc_encode = nlmsvc_encode_shareres, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_res), .pc_xdrressize = Ck+St+1, .pc_name = "UNSHARE", }, [NLMPROC_NM_LOCK] = { .pc_func = nlmsvc_proc_nm_lock, .pc_decode = nlmsvc_decode_lockargs, .pc_encode = nlmsvc_encode_res, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_res), .pc_xdrressize = Ck+St, .pc_name = "NM_LOCK", }, [NLMPROC_FREE_ALL] = { .pc_func = nlmsvc_proc_free_all, .pc_decode = nlmsvc_decode_notify, .pc_encode = nlmsvc_encode_void, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = 0, .pc_name = "FREE_ALL", }, };
linux-master
fs/lockd/svcproc.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/lockd/clnt4xdr.c * * XDR functions to encode/decode NLM version 4 RPC arguments and results. * * NLM client-side only. * * Copyright (C) 2010, Oracle. All rights reserved. */ #include <linux/types.h> #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/stats.h> #include <linux/lockd/lockd.h> #include <uapi/linux/nfs3.h> #define NLMDBG_FACILITY NLMDBG_XDR #if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ) # error "NLM host name cannot be larger than XDR_MAX_NETOBJ!" #endif #if (NLMCLNT_OHSIZE > NLM_MAXSTRLEN) # error "NLM host name cannot be larger than NLM's maximum string length!" #endif /* * Declare the space requirements for NLM arguments and replies as * number of 32bit-words */ #define NLM4_void_sz (0) #define NLM4_cookie_sz (1+(NLM_MAXCOOKIELEN>>2)) #define NLM4_caller_sz (1+(NLMCLNT_OHSIZE>>2)) #define NLM4_owner_sz (1+(NLMCLNT_OHSIZE>>2)) #define NLM4_fhandle_sz (1+(NFS3_FHSIZE>>2)) #define NLM4_lock_sz (5+NLM4_caller_sz+NLM4_owner_sz+NLM4_fhandle_sz) #define NLM4_holder_sz (6+NLM4_owner_sz) #define NLM4_testargs_sz (NLM4_cookie_sz+1+NLM4_lock_sz) #define NLM4_lockargs_sz (NLM4_cookie_sz+4+NLM4_lock_sz) #define NLM4_cancargs_sz (NLM4_cookie_sz+2+NLM4_lock_sz) #define NLM4_unlockargs_sz (NLM4_cookie_sz+NLM4_lock_sz) #define NLM4_testres_sz (NLM4_cookie_sz+1+NLM4_holder_sz) #define NLM4_res_sz (NLM4_cookie_sz+1) #define NLM4_norep_sz (0) static s64 loff_t_to_s64(loff_t offset) { s64 res; if (offset >= NLM4_OFFSET_MAX) res = NLM4_OFFSET_MAX; else if (offset <= -NLM4_OFFSET_MAX) res = -NLM4_OFFSET_MAX; else res = offset; return res; } static void nlm4_compute_offsets(const struct nlm_lock *lock, u64 *l_offset, u64 *l_len) { const struct file_lock *fl = &lock->fl; *l_offset = loff_t_to_s64(fl->fl_start); if (fl->fl_end == OFFSET_MAX) *l_len = 0; else *l_len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1); } /* * Encode/decode NLMv4 basic data types * * Basic NLMv4 data types are defined in Appendix II, section 6.1.4 * of RFC 1813: "NFS Version 3 Protocol Specification" and in Chapter * 10 of X/Open's "Protocols for Interworking: XNFS, Version 3W". * * Not all basic data types have their own encoding and decoding * functions. For run-time efficiency, some data types are encoded * or decoded inline. */ static void encode_bool(struct xdr_stream *xdr, const int value) { __be32 *p; p = xdr_reserve_space(xdr, 4); *p = value ? xdr_one : xdr_zero; } static void encode_int32(struct xdr_stream *xdr, const s32 value) { __be32 *p; p = xdr_reserve_space(xdr, 4); *p = cpu_to_be32(value); } /* * typedef opaque netobj<MAXNETOBJ_SZ> */ static void encode_netobj(struct xdr_stream *xdr, const u8 *data, const unsigned int length) { __be32 *p; p = xdr_reserve_space(xdr, 4 + length); xdr_encode_opaque(p, data, length); } static int decode_netobj(struct xdr_stream *xdr, struct xdr_netobj *obj) { ssize_t ret; ret = xdr_stream_decode_opaque_inline(xdr, (void *)&obj->data, XDR_MAX_NETOBJ); if (unlikely(ret < 0)) return -EIO; obj->len = ret; return 0; } /* * netobj cookie; */ static void encode_cookie(struct xdr_stream *xdr, const struct nlm_cookie *cookie) { encode_netobj(xdr, (u8 *)&cookie->data, cookie->len); } static int decode_cookie(struct xdr_stream *xdr, struct nlm_cookie *cookie) { u32 length; __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) goto out_overflow; length = be32_to_cpup(p++); /* apparently HPUX can return empty cookies */ if (length == 0) goto out_hpux; if (length > NLM_MAXCOOKIELEN) goto out_size; p = xdr_inline_decode(xdr, length); if (unlikely(p == NULL)) goto out_overflow; cookie->len = length; memcpy(cookie->data, p, length); return 0; out_hpux: cookie->len = 4; memset(cookie->data, 0, 4); return 0; out_size: dprintk("NFS: returned cookie was too long: %u\n", length); return -EIO; out_overflow: return -EIO; } /* * netobj fh; */ static void encode_fh(struct xdr_stream *xdr, const struct nfs_fh *fh) { encode_netobj(xdr, (u8 *)&fh->data, fh->size); } /* * enum nlm4_stats { * NLM4_GRANTED = 0, * NLM4_DENIED = 1, * NLM4_DENIED_NOLOCKS = 2, * NLM4_BLOCKED = 3, * NLM4_DENIED_GRACE_PERIOD = 4, * NLM4_DEADLCK = 5, * NLM4_ROFS = 6, * NLM4_STALE_FH = 7, * NLM4_FBIG = 8, * NLM4_FAILED = 9 * }; * * struct nlm4_stat { * nlm4_stats stat; * }; * * NB: we don't swap bytes for the NLM status values. The upper * layers deal directly with the status value in network byte * order. */ static void encode_nlm4_stat(struct xdr_stream *xdr, const __be32 stat) { __be32 *p; BUG_ON(be32_to_cpu(stat) > NLM_FAILED); p = xdr_reserve_space(xdr, 4); *p = stat; } static int decode_nlm4_stat(struct xdr_stream *xdr, __be32 *stat) { __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) goto out_overflow; if (unlikely(ntohl(*p) > ntohl(nlm4_failed))) goto out_bad_xdr; *stat = *p; return 0; out_bad_xdr: dprintk("%s: server returned invalid nlm4_stats value: %u\n", __func__, be32_to_cpup(p)); return -EIO; out_overflow: return -EIO; } /* * struct nlm4_holder { * bool exclusive; * int32 svid; * netobj oh; * uint64 l_offset; * uint64 l_len; * }; */ static void encode_nlm4_holder(struct xdr_stream *xdr, const struct nlm_res *result) { const struct nlm_lock *lock = &result->lock; u64 l_offset, l_len; __be32 *p; encode_bool(xdr, lock->fl.fl_type == F_RDLCK); encode_int32(xdr, lock->svid); encode_netobj(xdr, lock->oh.data, lock->oh.len); p = xdr_reserve_space(xdr, 4 + 4); nlm4_compute_offsets(lock, &l_offset, &l_len); p = xdr_encode_hyper(p, l_offset); xdr_encode_hyper(p, l_len); } static int decode_nlm4_holder(struct xdr_stream *xdr, struct nlm_res *result) { struct nlm_lock *lock = &result->lock; struct file_lock *fl = &lock->fl; u64 l_offset, l_len; u32 exclusive; int error; __be32 *p; memset(lock, 0, sizeof(*lock)); locks_init_lock(fl); p = xdr_inline_decode(xdr, 4 + 4); if (unlikely(p == NULL)) goto out_overflow; exclusive = be32_to_cpup(p++); lock->svid = be32_to_cpup(p); fl->fl_pid = (pid_t)lock->svid; error = decode_netobj(xdr, &lock->oh); if (unlikely(error)) goto out; p = xdr_inline_decode(xdr, 8 + 8); if (unlikely(p == NULL)) goto out_overflow; fl->fl_flags = FL_POSIX; fl->fl_type = exclusive != 0 ? F_WRLCK : F_RDLCK; p = xdr_decode_hyper(p, &l_offset); xdr_decode_hyper(p, &l_len); nlm4svc_set_file_lock_range(fl, l_offset, l_len); error = 0; out: return error; out_overflow: return -EIO; } /* * string caller_name<LM_MAXSTRLEN>; */ static void encode_caller_name(struct xdr_stream *xdr, const char *name) { /* NB: client-side does not set lock->len */ u32 length = strlen(name); __be32 *p; p = xdr_reserve_space(xdr, 4 + length); xdr_encode_opaque(p, name, length); } /* * struct nlm4_lock { * string caller_name<LM_MAXSTRLEN>; * netobj fh; * netobj oh; * int32 svid; * uint64 l_offset; * uint64 l_len; * }; */ static void encode_nlm4_lock(struct xdr_stream *xdr, const struct nlm_lock *lock) { u64 l_offset, l_len; __be32 *p; encode_caller_name(xdr, lock->caller); encode_fh(xdr, &lock->fh); encode_netobj(xdr, lock->oh.data, lock->oh.len); p = xdr_reserve_space(xdr, 4 + 8 + 8); *p++ = cpu_to_be32(lock->svid); nlm4_compute_offsets(lock, &l_offset, &l_len); p = xdr_encode_hyper(p, l_offset); xdr_encode_hyper(p, l_len); } /* * NLMv4 XDR encode functions * * NLMv4 argument types are defined in Appendix II of RFC 1813: * "NFS Version 3 Protocol Specification" and Chapter 10 of X/Open's * "Protocols for Interworking: XNFS, Version 3W". */ /* * struct nlm4_testargs { * netobj cookie; * bool exclusive; * struct nlm4_lock alock; * }; */ static void nlm4_xdr_enc_testargs(struct rpc_rqst *req, struct xdr_stream *xdr, const void *data) { const struct nlm_args *args = data; const struct nlm_lock *lock = &args->lock; encode_cookie(xdr, &args->cookie); encode_bool(xdr, lock->fl.fl_type == F_WRLCK); encode_nlm4_lock(xdr, lock); } /* * struct nlm4_lockargs { * netobj cookie; * bool block; * bool exclusive; * struct nlm4_lock alock; * bool reclaim; * int state; * }; */ static void nlm4_xdr_enc_lockargs(struct rpc_rqst *req, struct xdr_stream *xdr, const void *data) { const struct nlm_args *args = data; const struct nlm_lock *lock = &args->lock; encode_cookie(xdr, &args->cookie); encode_bool(xdr, args->block); encode_bool(xdr, lock->fl.fl_type == F_WRLCK); encode_nlm4_lock(xdr, lock); encode_bool(xdr, args->reclaim); encode_int32(xdr, args->state); } /* * struct nlm4_cancargs { * netobj cookie; * bool block; * bool exclusive; * struct nlm4_lock alock; * }; */ static void nlm4_xdr_enc_cancargs(struct rpc_rqst *req, struct xdr_stream *xdr, const void *data) { const struct nlm_args *args = data; const struct nlm_lock *lock = &args->lock; encode_cookie(xdr, &args->cookie); encode_bool(xdr, args->block); encode_bool(xdr, lock->fl.fl_type == F_WRLCK); encode_nlm4_lock(xdr, lock); } /* * struct nlm4_unlockargs { * netobj cookie; * struct nlm4_lock alock; * }; */ static void nlm4_xdr_enc_unlockargs(struct rpc_rqst *req, struct xdr_stream *xdr, const void *data) { const struct nlm_args *args = data; const struct nlm_lock *lock = &args->lock; encode_cookie(xdr, &args->cookie); encode_nlm4_lock(xdr, lock); } /* * struct nlm4_res { * netobj cookie; * nlm4_stat stat; * }; */ static void nlm4_xdr_enc_res(struct rpc_rqst *req, struct xdr_stream *xdr, const void *data) { const struct nlm_res *result = data; encode_cookie(xdr, &result->cookie); encode_nlm4_stat(xdr, result->status); } /* * union nlm4_testrply switch (nlm4_stats stat) { * case NLM4_DENIED: * struct nlm4_holder holder; * default: * void; * }; * * struct nlm4_testres { * netobj cookie; * nlm4_testrply test_stat; * }; */ static void nlm4_xdr_enc_testres(struct rpc_rqst *req, struct xdr_stream *xdr, const void *data) { const struct nlm_res *result = data; encode_cookie(xdr, &result->cookie); encode_nlm4_stat(xdr, result->status); if (result->status == nlm_lck_denied) encode_nlm4_holder(xdr, result); } /* * NLMv4 XDR decode functions * * NLMv4 argument types are defined in Appendix II of RFC 1813: * "NFS Version 3 Protocol Specification" and Chapter 10 of X/Open's * "Protocols for Interworking: XNFS, Version 3W". */ /* * union nlm4_testrply switch (nlm4_stats stat) { * case NLM4_DENIED: * struct nlm4_holder holder; * default: * void; * }; * * struct nlm4_testres { * netobj cookie; * nlm4_testrply test_stat; * }; */ static int decode_nlm4_testrply(struct xdr_stream *xdr, struct nlm_res *result) { int error; error = decode_nlm4_stat(xdr, &result->status); if (unlikely(error)) goto out; if (result->status == nlm_lck_denied) error = decode_nlm4_holder(xdr, result); out: return error; } static int nlm4_xdr_dec_testres(struct rpc_rqst *req, struct xdr_stream *xdr, void *data) { struct nlm_res *result = data; int error; error = decode_cookie(xdr, &result->cookie); if (unlikely(error)) goto out; error = decode_nlm4_testrply(xdr, result); out: return error; } /* * struct nlm4_res { * netobj cookie; * nlm4_stat stat; * }; */ static int nlm4_xdr_dec_res(struct rpc_rqst *req, struct xdr_stream *xdr, void *data) { struct nlm_res *result = data; int error; error = decode_cookie(xdr, &result->cookie); if (unlikely(error)) goto out; error = decode_nlm4_stat(xdr, &result->status); out: return error; } /* * For NLM, a void procedure really returns nothing */ #define nlm4_xdr_dec_norep NULL #define PROC(proc, argtype, restype) \ [NLMPROC_##proc] = { \ .p_proc = NLMPROC_##proc, \ .p_encode = nlm4_xdr_enc_##argtype, \ .p_decode = nlm4_xdr_dec_##restype, \ .p_arglen = NLM4_##argtype##_sz, \ .p_replen = NLM4_##restype##_sz, \ .p_statidx = NLMPROC_##proc, \ .p_name = #proc, \ } static const struct rpc_procinfo nlm4_procedures[] = { PROC(TEST, testargs, testres), PROC(LOCK, lockargs, res), PROC(CANCEL, cancargs, res), PROC(UNLOCK, unlockargs, res), PROC(GRANTED, testargs, res), PROC(TEST_MSG, testargs, norep), PROC(LOCK_MSG, lockargs, norep), PROC(CANCEL_MSG, cancargs, norep), PROC(UNLOCK_MSG, unlockargs, norep), PROC(GRANTED_MSG, testargs, norep), PROC(TEST_RES, testres, norep), PROC(LOCK_RES, res, norep), PROC(CANCEL_RES, res, norep), PROC(UNLOCK_RES, res, norep), PROC(GRANTED_RES, res, norep), }; static unsigned int nlm_version4_counts[ARRAY_SIZE(nlm4_procedures)]; const struct rpc_version nlm_version4 = { .number = 4, .nrprocs = ARRAY_SIZE(nlm4_procedures), .procs = nlm4_procedures, .counts = nlm_version4_counts, };
linux-master
fs/lockd/clnt4xdr.c
// SPDX-License-Identifier: GPL-2.0 #define CREATE_TRACE_POINTS #include "trace.h"
linux-master
fs/lockd/trace.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/lockd/xdr.c * * XDR support for lockd and the lock client. * * Copyright (C) 1995, 1996 Olaf Kirch <[email protected]> */ #include <linux/types.h> #include <linux/sched.h> #include <linux/nfs.h> #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/svc.h> #include <linux/sunrpc/stats.h> #include <linux/lockd/lockd.h> #include <uapi/linux/nfs2.h> #include "svcxdr.h" static inline loff_t s32_to_loff_t(__s32 offset) { return (loff_t)offset; } static inline __s32 loff_t_to_s32(loff_t offset) { __s32 res; if (offset >= NLM_OFFSET_MAX) res = NLM_OFFSET_MAX; else if (offset <= -NLM_OFFSET_MAX) res = -NLM_OFFSET_MAX; else res = offset; return res; } /* * NLM file handles are defined by specification to be a variable-length * XDR opaque no longer than 1024 bytes. However, this implementation * constrains their length to exactly the length of an NFSv2 file * handle. */ static bool svcxdr_decode_fhandle(struct xdr_stream *xdr, struct nfs_fh *fh) { __be32 *p; u32 len; if (xdr_stream_decode_u32(xdr, &len) < 0) return false; if (len != NFS2_FHSIZE) return false; p = xdr_inline_decode(xdr, len); if (!p) return false; fh->size = NFS2_FHSIZE; memcpy(fh->data, p, len); memset(fh->data + NFS2_FHSIZE, 0, sizeof(fh->data) - NFS2_FHSIZE); return true; } static bool svcxdr_decode_lock(struct xdr_stream *xdr, struct nlm_lock *lock) { struct file_lock *fl = &lock->fl; s32 start, len, end; if (!svcxdr_decode_string(xdr, &lock->caller, &lock->len)) return false; if (!svcxdr_decode_fhandle(xdr, &lock->fh)) return false; if (!svcxdr_decode_owner(xdr, &lock->oh)) return false; if (xdr_stream_decode_u32(xdr, &lock->svid) < 0) return false; if (xdr_stream_decode_u32(xdr, &start) < 0) return false; if (xdr_stream_decode_u32(xdr, &len) < 0) return false; locks_init_lock(fl); fl->fl_flags = FL_POSIX; fl->fl_type = F_RDLCK; end = start + len - 1; fl->fl_start = s32_to_loff_t(start); if (len == 0 || end < 0) fl->fl_end = OFFSET_MAX; else fl->fl_end = s32_to_loff_t(end); return true; } static bool svcxdr_encode_holder(struct xdr_stream *xdr, const struct nlm_lock *lock) { const struct file_lock *fl = &lock->fl; s32 start, len; /* exclusive */ if (xdr_stream_encode_bool(xdr, fl->fl_type != F_RDLCK) < 0) return false; if (xdr_stream_encode_u32(xdr, lock->svid) < 0) return false; if (!svcxdr_encode_owner(xdr, &lock->oh)) return false; start = loff_t_to_s32(fl->fl_start); if (fl->fl_end == OFFSET_MAX) len = 0; else len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1); if (xdr_stream_encode_u32(xdr, start) < 0) return false; if (xdr_stream_encode_u32(xdr, len) < 0) return false; return true; } static bool svcxdr_encode_testrply(struct xdr_stream *xdr, const struct nlm_res *resp) { if (!svcxdr_encode_stats(xdr, resp->status)) return false; switch (resp->status) { case nlm_lck_denied: if (!svcxdr_encode_holder(xdr, &resp->lock)) return false; } return true; } /* * Decode Call arguments */ bool nlmsvc_decode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr) { return true; } bool nlmsvc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr) { struct nlm_args *argp = rqstp->rq_argp; u32 exclusive; if (!svcxdr_decode_cookie(xdr, &argp->cookie)) return false; if (xdr_stream_decode_bool(xdr, &exclusive) < 0) return false; if (!svcxdr_decode_lock(xdr, &argp->lock)) return false; if (exclusive) argp->lock.fl.fl_type = F_WRLCK; return true; } bool nlmsvc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr) { struct nlm_args *argp = rqstp->rq_argp; u32 exclusive; if (!svcxdr_decode_cookie(xdr, &argp->cookie)) return false; if (xdr_stream_decode_bool(xdr, &argp->block) < 0) return false; if (xdr_stream_decode_bool(xdr, &exclusive) < 0) return false; if (!svcxdr_decode_lock(xdr, &argp->lock)) return false; if (exclusive) argp->lock.fl.fl_type = F_WRLCK; if (xdr_stream_decode_bool(xdr, &argp->reclaim) < 0) return false; if (xdr_stream_decode_u32(xdr, &argp->state) < 0) return false; argp->monitor = 1; /* monitor client by default */ return true; } bool nlmsvc_decode_cancargs(struct svc_rqst *rqstp, struct xdr_stream *xdr) { struct nlm_args *argp = rqstp->rq_argp; u32 exclusive; if (!svcxdr_decode_cookie(xdr, &argp->cookie)) return false; if (xdr_stream_decode_bool(xdr, &argp->block) < 0) return false; if (xdr_stream_decode_bool(xdr, &exclusive) < 0) return false; if (!svcxdr_decode_lock(xdr, &argp->lock)) return false; if (exclusive) argp->lock.fl.fl_type = F_WRLCK; return true; } bool nlmsvc_decode_unlockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr) { struct nlm_args *argp = rqstp->rq_argp; if (!svcxdr_decode_cookie(xdr, &argp->cookie)) return false; if (!svcxdr_decode_lock(xdr, &argp->lock)) return false; argp->lock.fl.fl_type = F_UNLCK; return true; } bool nlmsvc_decode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr) { struct nlm_res *resp = rqstp->rq_argp; if (!svcxdr_decode_cookie(xdr, &resp->cookie)) return false; if (!svcxdr_decode_stats(xdr, &resp->status)) return false; return true; } bool nlmsvc_decode_reboot(struct svc_rqst *rqstp, struct xdr_stream *xdr) { struct nlm_reboot *argp = rqstp->rq_argp; __be32 *p; u32 len; if (xdr_stream_decode_u32(xdr, &len) < 0) return false; if (len > SM_MAXSTRLEN) return false; p = xdr_inline_decode(xdr, len); if (!p) return false; argp->len = len; argp->mon = (char *)p; if (xdr_stream_decode_u32(xdr, &argp->state) < 0) return false; p = xdr_inline_decode(xdr, SM_PRIV_SIZE); if (!p) return false; memcpy(&argp->priv.data, p, sizeof(argp->priv.data)); return true; } bool nlmsvc_decode_shareargs(struct svc_rqst *rqstp, struct xdr_stream *xdr) { struct nlm_args *argp = rqstp->rq_argp; struct nlm_lock *lock = &argp->lock; memset(lock, 0, sizeof(*lock)); locks_init_lock(&lock->fl); lock->svid = ~(u32)0; if (!svcxdr_decode_cookie(xdr, &argp->cookie)) return false; if (!svcxdr_decode_string(xdr, &lock->caller, &lock->len)) return false; if (!svcxdr_decode_fhandle(xdr, &lock->fh)) return false; if (!svcxdr_decode_owner(xdr, &lock->oh)) return false; /* XXX: Range checks are missing in the original code */ if (xdr_stream_decode_u32(xdr, &argp->fsm_mode) < 0) return false; if (xdr_stream_decode_u32(xdr, &argp->fsm_access) < 0) return false; return true; } bool nlmsvc_decode_notify(struct svc_rqst *rqstp, struct xdr_stream *xdr) { struct nlm_args *argp = rqstp->rq_argp; struct nlm_lock *lock = &argp->lock; if (!svcxdr_decode_string(xdr, &lock->caller, &lock->len)) return false; if (xdr_stream_decode_u32(xdr, &argp->state) < 0) return false; return true; } /* * Encode Reply results */ bool nlmsvc_encode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr) { return true; } bool nlmsvc_encode_testres(struct svc_rqst *rqstp, struct xdr_stream *xdr) { struct nlm_res *resp = rqstp->rq_resp; return svcxdr_encode_cookie(xdr, &resp->cookie) && svcxdr_encode_testrply(xdr, resp); } bool nlmsvc_encode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr) { struct nlm_res *resp = rqstp->rq_resp; return svcxdr_encode_cookie(xdr, &resp->cookie) && svcxdr_encode_stats(xdr, resp->status); } bool nlmsvc_encode_shareres(struct svc_rqst *rqstp, struct xdr_stream *xdr) { struct nlm_res *resp = rqstp->rq_resp; if (!svcxdr_encode_cookie(xdr, &resp->cookie)) return false; if (!svcxdr_encode_stats(xdr, resp->status)) return false; /* sequence */ if (xdr_stream_encode_u32(xdr, 0) < 0) return false; return true; }
linux-master
fs/lockd/xdr.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/lockd/clntxdr.c * * XDR functions to encode/decode NLM version 3 RPC arguments and results. * NLM version 3 is backwards compatible with NLM versions 1 and 2. * * NLM client-side only. * * Copyright (C) 2010, Oracle. All rights reserved. */ #include <linux/types.h> #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/stats.h> #include <linux/lockd/lockd.h> #include <uapi/linux/nfs2.h> #define NLMDBG_FACILITY NLMDBG_XDR #if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ) # error "NLM host name cannot be larger than XDR_MAX_NETOBJ!" #endif /* * Declare the space requirements for NLM arguments and replies as * number of 32bit-words */ #define NLM_cookie_sz (1+(NLM_MAXCOOKIELEN>>2)) #define NLM_caller_sz (1+(NLMCLNT_OHSIZE>>2)) #define NLM_owner_sz (1+(NLMCLNT_OHSIZE>>2)) #define NLM_fhandle_sz (1+(NFS2_FHSIZE>>2)) #define NLM_lock_sz (3+NLM_caller_sz+NLM_owner_sz+NLM_fhandle_sz) #define NLM_holder_sz (4+NLM_owner_sz) #define NLM_testargs_sz (NLM_cookie_sz+1+NLM_lock_sz) #define NLM_lockargs_sz (NLM_cookie_sz+4+NLM_lock_sz) #define NLM_cancargs_sz (NLM_cookie_sz+2+NLM_lock_sz) #define NLM_unlockargs_sz (NLM_cookie_sz+NLM_lock_sz) #define NLM_testres_sz (NLM_cookie_sz+1+NLM_holder_sz) #define NLM_res_sz (NLM_cookie_sz+1) #define NLM_norep_sz (0) static s32 loff_t_to_s32(loff_t offset) { s32 res; if (offset >= NLM_OFFSET_MAX) res = NLM_OFFSET_MAX; else if (offset <= -NLM_OFFSET_MAX) res = -NLM_OFFSET_MAX; else res = offset; return res; } static void nlm_compute_offsets(const struct nlm_lock *lock, u32 *l_offset, u32 *l_len) { const struct file_lock *fl = &lock->fl; *l_offset = loff_t_to_s32(fl->fl_start); if (fl->fl_end == OFFSET_MAX) *l_len = 0; else *l_len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1); } /* * Encode/decode NLMv3 basic data types * * Basic NLMv3 data types are not defined in an IETF standards * document. X/Open has a description of these data types that * is useful. See Chapter 10 of "Protocols for Interworking: * XNFS, Version 3W". * * Not all basic data types have their own encoding and decoding * functions. For run-time efficiency, some data types are encoded * or decoded inline. */ static void encode_bool(struct xdr_stream *xdr, const int value) { __be32 *p; p = xdr_reserve_space(xdr, 4); *p = value ? xdr_one : xdr_zero; } static void encode_int32(struct xdr_stream *xdr, const s32 value) { __be32 *p; p = xdr_reserve_space(xdr, 4); *p = cpu_to_be32(value); } /* * typedef opaque netobj<MAXNETOBJ_SZ> */ static void encode_netobj(struct xdr_stream *xdr, const u8 *data, const unsigned int length) { __be32 *p; p = xdr_reserve_space(xdr, 4 + length); xdr_encode_opaque(p, data, length); } static int decode_netobj(struct xdr_stream *xdr, struct xdr_netobj *obj) { ssize_t ret; ret = xdr_stream_decode_opaque_inline(xdr, (void *)&obj->data, XDR_MAX_NETOBJ); if (unlikely(ret < 0)) return -EIO; obj->len = ret; return 0; } /* * netobj cookie; */ static void encode_cookie(struct xdr_stream *xdr, const struct nlm_cookie *cookie) { encode_netobj(xdr, (u8 *)&cookie->data, cookie->len); } static int decode_cookie(struct xdr_stream *xdr, struct nlm_cookie *cookie) { u32 length; __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) goto out_overflow; length = be32_to_cpup(p++); /* apparently HPUX can return empty cookies */ if (length == 0) goto out_hpux; if (length > NLM_MAXCOOKIELEN) goto out_size; p = xdr_inline_decode(xdr, length); if (unlikely(p == NULL)) goto out_overflow; cookie->len = length; memcpy(cookie->data, p, length); return 0; out_hpux: cookie->len = 4; memset(cookie->data, 0, 4); return 0; out_size: dprintk("NFS: returned cookie was too long: %u\n", length); return -EIO; out_overflow: return -EIO; } /* * netobj fh; */ static void encode_fh(struct xdr_stream *xdr, const struct nfs_fh *fh) { encode_netobj(xdr, (u8 *)&fh->data, NFS2_FHSIZE); } /* * enum nlm_stats { * LCK_GRANTED = 0, * LCK_DENIED = 1, * LCK_DENIED_NOLOCKS = 2, * LCK_BLOCKED = 3, * LCK_DENIED_GRACE_PERIOD = 4 * }; * * * struct nlm_stat { * nlm_stats stat; * }; * * NB: we don't swap bytes for the NLM status values. The upper * layers deal directly with the status value in network byte * order. */ static void encode_nlm_stat(struct xdr_stream *xdr, const __be32 stat) { __be32 *p; WARN_ON_ONCE(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD); p = xdr_reserve_space(xdr, 4); *p = stat; } static int decode_nlm_stat(struct xdr_stream *xdr, __be32 *stat) { __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) goto out_overflow; if (unlikely(ntohl(*p) > ntohl(nlm_lck_denied_grace_period))) goto out_enum; *stat = *p; return 0; out_enum: dprintk("%s: server returned invalid nlm_stats value: %u\n", __func__, be32_to_cpup(p)); return -EIO; out_overflow: return -EIO; } /* * struct nlm_holder { * bool exclusive; * int uppid; * netobj oh; * unsigned l_offset; * unsigned l_len; * }; */ static void encode_nlm_holder(struct xdr_stream *xdr, const struct nlm_res *result) { const struct nlm_lock *lock = &result->lock; u32 l_offset, l_len; __be32 *p; encode_bool(xdr, lock->fl.fl_type == F_RDLCK); encode_int32(xdr, lock->svid); encode_netobj(xdr, lock->oh.data, lock->oh.len); p = xdr_reserve_space(xdr, 4 + 4); nlm_compute_offsets(lock, &l_offset, &l_len); *p++ = cpu_to_be32(l_offset); *p = cpu_to_be32(l_len); } static int decode_nlm_holder(struct xdr_stream *xdr, struct nlm_res *result) { struct nlm_lock *lock = &result->lock; struct file_lock *fl = &lock->fl; u32 exclusive, l_offset, l_len; int error; __be32 *p; s32 end; memset(lock, 0, sizeof(*lock)); locks_init_lock(fl); p = xdr_inline_decode(xdr, 4 + 4); if (unlikely(p == NULL)) goto out_overflow; exclusive = be32_to_cpup(p++); lock->svid = be32_to_cpup(p); fl->fl_pid = (pid_t)lock->svid; error = decode_netobj(xdr, &lock->oh); if (unlikely(error)) goto out; p = xdr_inline_decode(xdr, 4 + 4); if (unlikely(p == NULL)) goto out_overflow; fl->fl_flags = FL_POSIX; fl->fl_type = exclusive != 0 ? F_WRLCK : F_RDLCK; l_offset = be32_to_cpup(p++); l_len = be32_to_cpup(p); end = l_offset + l_len - 1; fl->fl_start = (loff_t)l_offset; if (l_len == 0 || end < 0) fl->fl_end = OFFSET_MAX; else fl->fl_end = (loff_t)end; error = 0; out: return error; out_overflow: return -EIO; } /* * string caller_name<LM_MAXSTRLEN>; */ static void encode_caller_name(struct xdr_stream *xdr, const char *name) { /* NB: client-side does not set lock->len */ u32 length = strlen(name); __be32 *p; p = xdr_reserve_space(xdr, 4 + length); xdr_encode_opaque(p, name, length); } /* * struct nlm_lock { * string caller_name<LM_MAXSTRLEN>; * netobj fh; * netobj oh; * int uppid; * unsigned l_offset; * unsigned l_len; * }; */ static void encode_nlm_lock(struct xdr_stream *xdr, const struct nlm_lock *lock) { u32 l_offset, l_len; __be32 *p; encode_caller_name(xdr, lock->caller); encode_fh(xdr, &lock->fh); encode_netobj(xdr, lock->oh.data, lock->oh.len); p = xdr_reserve_space(xdr, 4 + 4 + 4); *p++ = cpu_to_be32(lock->svid); nlm_compute_offsets(lock, &l_offset, &l_len); *p++ = cpu_to_be32(l_offset); *p = cpu_to_be32(l_len); } /* * NLMv3 XDR encode functions * * NLMv3 argument types are defined in Chapter 10 of The Open Group's * "Protocols for Interworking: XNFS, Version 3W". */ /* * struct nlm_testargs { * netobj cookie; * bool exclusive; * struct nlm_lock alock; * }; */ static void nlm_xdr_enc_testargs(struct rpc_rqst *req, struct xdr_stream *xdr, const void *data) { const struct nlm_args *args = data; const struct nlm_lock *lock = &args->lock; encode_cookie(xdr, &args->cookie); encode_bool(xdr, lock->fl.fl_type == F_WRLCK); encode_nlm_lock(xdr, lock); } /* * struct nlm_lockargs { * netobj cookie; * bool block; * bool exclusive; * struct nlm_lock alock; * bool reclaim; * int state; * }; */ static void nlm_xdr_enc_lockargs(struct rpc_rqst *req, struct xdr_stream *xdr, const void *data) { const struct nlm_args *args = data; const struct nlm_lock *lock = &args->lock; encode_cookie(xdr, &args->cookie); encode_bool(xdr, args->block); encode_bool(xdr, lock->fl.fl_type == F_WRLCK); encode_nlm_lock(xdr, lock); encode_bool(xdr, args->reclaim); encode_int32(xdr, args->state); } /* * struct nlm_cancargs { * netobj cookie; * bool block; * bool exclusive; * struct nlm_lock alock; * }; */ static void nlm_xdr_enc_cancargs(struct rpc_rqst *req, struct xdr_stream *xdr, const void *data) { const struct nlm_args *args = data; const struct nlm_lock *lock = &args->lock; encode_cookie(xdr, &args->cookie); encode_bool(xdr, args->block); encode_bool(xdr, lock->fl.fl_type == F_WRLCK); encode_nlm_lock(xdr, lock); } /* * struct nlm_unlockargs { * netobj cookie; * struct nlm_lock alock; * }; */ static void nlm_xdr_enc_unlockargs(struct rpc_rqst *req, struct xdr_stream *xdr, const void *data) { const struct nlm_args *args = data; const struct nlm_lock *lock = &args->lock; encode_cookie(xdr, &args->cookie); encode_nlm_lock(xdr, lock); } /* * struct nlm_res { * netobj cookie; * nlm_stat stat; * }; */ static void nlm_xdr_enc_res(struct rpc_rqst *req, struct xdr_stream *xdr, const void *data) { const struct nlm_res *result = data; encode_cookie(xdr, &result->cookie); encode_nlm_stat(xdr, result->status); } /* * union nlm_testrply switch (nlm_stats stat) { * case LCK_DENIED: * struct nlm_holder holder; * default: * void; * }; * * struct nlm_testres { * netobj cookie; * nlm_testrply test_stat; * }; */ static void encode_nlm_testrply(struct xdr_stream *xdr, const struct nlm_res *result) { if (result->status == nlm_lck_denied) encode_nlm_holder(xdr, result); } static void nlm_xdr_enc_testres(struct rpc_rqst *req, struct xdr_stream *xdr, const void *data) { const struct nlm_res *result = data; encode_cookie(xdr, &result->cookie); encode_nlm_stat(xdr, result->status); encode_nlm_testrply(xdr, result); } /* * NLMv3 XDR decode functions * * NLMv3 result types are defined in Chapter 10 of The Open Group's * "Protocols for Interworking: XNFS, Version 3W". */ /* * union nlm_testrply switch (nlm_stats stat) { * case LCK_DENIED: * struct nlm_holder holder; * default: * void; * }; * * struct nlm_testres { * netobj cookie; * nlm_testrply test_stat; * }; */ static int decode_nlm_testrply(struct xdr_stream *xdr, struct nlm_res *result) { int error; error = decode_nlm_stat(xdr, &result->status); if (unlikely(error)) goto out; if (result->status == nlm_lck_denied) error = decode_nlm_holder(xdr, result); out: return error; } static int nlm_xdr_dec_testres(struct rpc_rqst *req, struct xdr_stream *xdr, void *data) { struct nlm_res *result = data; int error; error = decode_cookie(xdr, &result->cookie); if (unlikely(error)) goto out; error = decode_nlm_testrply(xdr, result); out: return error; } /* * struct nlm_res { * netobj cookie; * nlm_stat stat; * }; */ static int nlm_xdr_dec_res(struct rpc_rqst *req, struct xdr_stream *xdr, void *data) { struct nlm_res *result = data; int error; error = decode_cookie(xdr, &result->cookie); if (unlikely(error)) goto out; error = decode_nlm_stat(xdr, &result->status); out: return error; } /* * For NLM, a void procedure really returns nothing */ #define nlm_xdr_dec_norep NULL #define PROC(proc, argtype, restype) \ [NLMPROC_##proc] = { \ .p_proc = NLMPROC_##proc, \ .p_encode = nlm_xdr_enc_##argtype, \ .p_decode = nlm_xdr_dec_##restype, \ .p_arglen = NLM_##argtype##_sz, \ .p_replen = NLM_##restype##_sz, \ .p_statidx = NLMPROC_##proc, \ .p_name = #proc, \ } static const struct rpc_procinfo nlm_procedures[] = { PROC(TEST, testargs, testres), PROC(LOCK, lockargs, res), PROC(CANCEL, cancargs, res), PROC(UNLOCK, unlockargs, res), PROC(GRANTED, testargs, res), PROC(TEST_MSG, testargs, norep), PROC(LOCK_MSG, lockargs, norep), PROC(CANCEL_MSG, cancargs, norep), PROC(UNLOCK_MSG, unlockargs, norep), PROC(GRANTED_MSG, testargs, norep), PROC(TEST_RES, testres, norep), PROC(LOCK_RES, res, norep), PROC(CANCEL_RES, res, norep), PROC(UNLOCK_RES, res, norep), PROC(GRANTED_RES, res, norep), }; static unsigned int nlm_version1_counts[ARRAY_SIZE(nlm_procedures)]; static const struct rpc_version nlm_version1 = { .number = 1, .nrprocs = ARRAY_SIZE(nlm_procedures), .procs = nlm_procedures, .counts = nlm_version1_counts, }; static unsigned int nlm_version3_counts[ARRAY_SIZE(nlm_procedures)]; static const struct rpc_version nlm_version3 = { .number = 3, .nrprocs = ARRAY_SIZE(nlm_procedures), .procs = nlm_procedures, .counts = nlm_version3_counts, }; static const struct rpc_version *nlm_versions[] = { [1] = &nlm_version1, [3] = &nlm_version3, #ifdef CONFIG_LOCKD_V4 [4] = &nlm_version4, #endif }; static struct rpc_stat nlm_rpc_stats; const struct rpc_program nlm_program = { .name = "lockd", .number = NLM_PROGRAM, .nrvers = ARRAY_SIZE(nlm_versions), .version = nlm_versions, .stats = &nlm_rpc_stats, };
linux-master
fs/lockd/clntxdr.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/lockd/mon.c * * The kernel statd client. * * Copyright (C) 1996, Olaf Kirch <[email protected]> */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/ktime.h> #include <linux/slab.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/addr.h> #include <linux/sunrpc/xprtsock.h> #include <linux/sunrpc/svc.h> #include <linux/lockd/lockd.h> #include <asm/unaligned.h> #include "netns.h" #define NLMDBG_FACILITY NLMDBG_MONITOR #define NSM_PROGRAM 100024 #define NSM_VERSION 1 enum { NSMPROC_NULL, NSMPROC_STAT, NSMPROC_MON, NSMPROC_UNMON, NSMPROC_UNMON_ALL, NSMPROC_SIMU_CRASH, NSMPROC_NOTIFY, }; struct nsm_args { struct nsm_private *priv; u32 prog; /* RPC callback info */ u32 vers; u32 proc; char *mon_name; const char *nodename; }; struct nsm_res { u32 status; u32 state; }; static const struct rpc_program nsm_program; static DEFINE_SPINLOCK(nsm_lock); /* * Local NSM state */ u32 __read_mostly nsm_local_state; bool __read_mostly nsm_use_hostnames; static inline struct sockaddr *nsm_addr(const struct nsm_handle *nsm) { return (struct sockaddr *)&nsm->sm_addr; } static struct rpc_clnt *nsm_create(struct net *net, const char *nodename) { struct sockaddr_in sin = { .sin_family = AF_INET, .sin_addr.s_addr = htonl(INADDR_LOOPBACK), }; struct rpc_create_args args = { .net = net, .protocol = XPRT_TRANSPORT_TCP, .address = (struct sockaddr *)&sin, .addrsize = sizeof(sin), .servername = "rpc.statd", .nodename = nodename, .program = &nsm_program, .version = NSM_VERSION, .authflavor = RPC_AUTH_NULL, .flags = RPC_CLNT_CREATE_NOPING, .cred = current_cred(), }; return rpc_create(&args); } static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res, const struct nlm_host *host) { int status; struct rpc_clnt *clnt; struct nsm_args args = { .priv = &nsm->sm_priv, .prog = NLM_PROGRAM, .vers = 3, .proc = NLMPROC_NSM_NOTIFY, .mon_name = nsm->sm_mon_name, .nodename = host->nodename, }; struct rpc_message msg = { .rpc_argp = &args, .rpc_resp = res, }; memset(res, 0, sizeof(*res)); clnt = nsm_create(host->net, host->nodename); if (IS_ERR(clnt)) { dprintk("lockd: failed to create NSM upcall transport, " "status=%ld, net=%x\n", PTR_ERR(clnt), host->net->ns.inum); return PTR_ERR(clnt); } msg.rpc_proc = &clnt->cl_procinfo[proc]; status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN); if (status == -ECONNREFUSED) { dprintk("lockd: NSM upcall RPC failed, status=%d, forcing rebind\n", status); rpc_force_rebind(clnt); status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN); } if (status < 0) dprintk("lockd: NSM upcall RPC failed, status=%d\n", status); else status = 0; rpc_shutdown_client(clnt); return status; } /** * nsm_monitor - Notify a peer in case we reboot * @host: pointer to nlm_host of peer to notify * * If this peer is not already monitored, this function sends an * upcall to the local rpc.statd to record the name/address of * the peer to notify in case we reboot. * * Returns zero if the peer is monitored by the local rpc.statd; * otherwise a negative errno value is returned. */ int nsm_monitor(const struct nlm_host *host) { struct nsm_handle *nsm = host->h_nsmhandle; struct nsm_res res; int status; dprintk("lockd: nsm_monitor(%s)\n", nsm->sm_name); if (nsm->sm_monitored) return 0; /* * Choose whether to record the caller_name or IP address of * this peer in the local rpc.statd's database. */ nsm->sm_mon_name = nsm_use_hostnames ? nsm->sm_name : nsm->sm_addrbuf; status = nsm_mon_unmon(nsm, NSMPROC_MON, &res, host); if (unlikely(res.status != 0)) status = -EIO; if (unlikely(status < 0)) { pr_notice_ratelimited("lockd: cannot monitor %s\n", nsm->sm_name); return status; } nsm->sm_monitored = 1; if (unlikely(nsm_local_state != res.state)) { nsm_local_state = res.state; dprintk("lockd: NSM state changed to %d\n", nsm_local_state); } return 0; } /** * nsm_unmonitor - Unregister peer notification * @host: pointer to nlm_host of peer to stop monitoring * * If this peer is monitored, this function sends an upcall to * tell the local rpc.statd not to send this peer a notification * when we reboot. */ void nsm_unmonitor(const struct nlm_host *host) { struct nsm_handle *nsm = host->h_nsmhandle; struct nsm_res res; int status; if (refcount_read(&nsm->sm_count) == 1 && nsm->sm_monitored && !nsm->sm_sticky) { dprintk("lockd: nsm_unmonitor(%s)\n", nsm->sm_name); status = nsm_mon_unmon(nsm, NSMPROC_UNMON, &res, host); if (res.status != 0) status = -EIO; if (status < 0) printk(KERN_NOTICE "lockd: cannot unmonitor %s\n", nsm->sm_name); else nsm->sm_monitored = 0; } } static struct nsm_handle *nsm_lookup_hostname(const struct list_head *nsm_handles, const char *hostname, const size_t len) { struct nsm_handle *nsm; list_for_each_entry(nsm, nsm_handles, sm_link) if (strlen(nsm->sm_name) == len && memcmp(nsm->sm_name, hostname, len) == 0) return nsm; return NULL; } static struct nsm_handle *nsm_lookup_addr(const struct list_head *nsm_handles, const struct sockaddr *sap) { struct nsm_handle *nsm; list_for_each_entry(nsm, nsm_handles, sm_link) if (rpc_cmp_addr(nsm_addr(nsm), sap)) return nsm; return NULL; } static struct nsm_handle *nsm_lookup_priv(const struct list_head *nsm_handles, const struct nsm_private *priv) { struct nsm_handle *nsm; list_for_each_entry(nsm, nsm_handles, sm_link) if (memcmp(nsm->sm_priv.data, priv->data, sizeof(priv->data)) == 0) return nsm; return NULL; } /* * Construct a unique cookie to match this nsm_handle to this monitored * host. It is passed to the local rpc.statd via NSMPROC_MON, and * returned via NLMPROC_SM_NOTIFY, in the "priv" field of these * requests. * * The NSM protocol requires that these cookies be unique while the * system is running. We prefer a stronger requirement of making them * unique across reboots. If user space bugs cause a stale cookie to * be sent to the kernel, it could cause the wrong host to lose its * lock state if cookies were not unique across reboots. * * The cookies are exposed only to local user space via loopback. They * do not appear on the physical network. If we want greater security * for some reason, nsm_init_private() could perform a one-way hash to * obscure the contents of the cookie. */ static void nsm_init_private(struct nsm_handle *nsm) { u64 *p = (u64 *)&nsm->sm_priv.data; s64 ns; ns = ktime_get_ns(); put_unaligned(ns, p); put_unaligned((unsigned long)nsm, p + 1); } static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap, const size_t salen, const char *hostname, const size_t hostname_len) { struct nsm_handle *new; if (!hostname) return NULL; new = kzalloc(sizeof(*new) + hostname_len + 1, GFP_KERNEL); if (unlikely(new == NULL)) return NULL; refcount_set(&new->sm_count, 1); new->sm_name = (char *)(new + 1); memcpy(nsm_addr(new), sap, salen); new->sm_addrlen = salen; nsm_init_private(new); if (rpc_ntop(nsm_addr(new), new->sm_addrbuf, sizeof(new->sm_addrbuf)) == 0) (void)snprintf(new->sm_addrbuf, sizeof(new->sm_addrbuf), "unsupported address family"); memcpy(new->sm_name, hostname, hostname_len); new->sm_name[hostname_len] = '\0'; return new; } /** * nsm_get_handle - Find or create a cached nsm_handle * @net: network namespace * @sap: pointer to socket address of handle to find * @salen: length of socket address * @hostname: pointer to C string containing hostname to find * @hostname_len: length of C string * * Behavior is modulated by the global nsm_use_hostnames variable. * * Returns a cached nsm_handle after bumping its ref count, or * returns a fresh nsm_handle if a handle that matches @sap and/or * @hostname cannot be found in the handle cache. Returns NULL if * an error occurs. */ struct nsm_handle *nsm_get_handle(const struct net *net, const struct sockaddr *sap, const size_t salen, const char *hostname, const size_t hostname_len) { struct nsm_handle *cached, *new = NULL; struct lockd_net *ln = net_generic(net, lockd_net_id); if (hostname && memchr(hostname, '/', hostname_len) != NULL) { if (printk_ratelimit()) { printk(KERN_WARNING "Invalid hostname \"%.*s\" " "in NFS lock request\n", (int)hostname_len, hostname); } return NULL; } retry: spin_lock(&nsm_lock); if (nsm_use_hostnames && hostname != NULL) cached = nsm_lookup_hostname(&ln->nsm_handles, hostname, hostname_len); else cached = nsm_lookup_addr(&ln->nsm_handles, sap); if (cached != NULL) { refcount_inc(&cached->sm_count); spin_unlock(&nsm_lock); kfree(new); dprintk("lockd: found nsm_handle for %s (%s), " "cnt %d\n", cached->sm_name, cached->sm_addrbuf, refcount_read(&cached->sm_count)); return cached; } if (new != NULL) { list_add(&new->sm_link, &ln->nsm_handles); spin_unlock(&nsm_lock); dprintk("lockd: created nsm_handle for %s (%s)\n", new->sm_name, new->sm_addrbuf); return new; } spin_unlock(&nsm_lock); new = nsm_create_handle(sap, salen, hostname, hostname_len); if (unlikely(new == NULL)) return NULL; goto retry; } /** * nsm_reboot_lookup - match NLMPROC_SM_NOTIFY arguments to an nsm_handle * @net: network namespace * @info: pointer to NLMPROC_SM_NOTIFY arguments * * Returns a matching nsm_handle if found in the nsm cache. The returned * nsm_handle's reference count is bumped. Otherwise returns NULL if some * error occurred. */ struct nsm_handle *nsm_reboot_lookup(const struct net *net, const struct nlm_reboot *info) { struct nsm_handle *cached; struct lockd_net *ln = net_generic(net, lockd_net_id); spin_lock(&nsm_lock); cached = nsm_lookup_priv(&ln->nsm_handles, &info->priv); if (unlikely(cached == NULL)) { spin_unlock(&nsm_lock); dprintk("lockd: never saw rebooted peer '%.*s' before\n", info->len, info->mon); return cached; } refcount_inc(&cached->sm_count); spin_unlock(&nsm_lock); dprintk("lockd: host %s (%s) rebooted, cnt %d\n", cached->sm_name, cached->sm_addrbuf, refcount_read(&cached->sm_count)); return cached; } /** * nsm_release - Release an NSM handle * @nsm: pointer to handle to be released * */ void nsm_release(struct nsm_handle *nsm) { if (refcount_dec_and_lock(&nsm->sm_count, &nsm_lock)) { list_del(&nsm->sm_link); spin_unlock(&nsm_lock); dprintk("lockd: destroyed nsm_handle for %s (%s)\n", nsm->sm_name, nsm->sm_addrbuf); kfree(nsm); } } /* * XDR functions for NSM. * * See https://www.opengroup.org/ for details on the Network * Status Monitor wire protocol. */ static void encode_nsm_string(struct xdr_stream *xdr, const char *string) { const u32 len = strlen(string); __be32 *p; p = xdr_reserve_space(xdr, 4 + len); xdr_encode_opaque(p, string, len); } /* * "mon_name" specifies the host to be monitored. */ static void encode_mon_name(struct xdr_stream *xdr, const struct nsm_args *argp) { encode_nsm_string(xdr, argp->mon_name); } /* * The "my_id" argument specifies the hostname and RPC procedure * to be called when the status manager receives notification * (via the NLMPROC_SM_NOTIFY call) that the state of host "mon_name" * has changed. */ static void encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp) { __be32 *p; encode_nsm_string(xdr, argp->nodename); p = xdr_reserve_space(xdr, 4 + 4 + 4); *p++ = cpu_to_be32(argp->prog); *p++ = cpu_to_be32(argp->vers); *p = cpu_to_be32(argp->proc); } /* * The "mon_id" argument specifies the non-private arguments * of an NSMPROC_MON or NSMPROC_UNMON call. */ static void encode_mon_id(struct xdr_stream *xdr, const struct nsm_args *argp) { encode_mon_name(xdr, argp); encode_my_id(xdr, argp); } /* * The "priv" argument may contain private information required * by the NSMPROC_MON call. This information will be supplied in the * NLMPROC_SM_NOTIFY call. */ static void encode_priv(struct xdr_stream *xdr, const struct nsm_args *argp) { __be32 *p; p = xdr_reserve_space(xdr, SM_PRIV_SIZE); xdr_encode_opaque_fixed(p, argp->priv->data, SM_PRIV_SIZE); } static void nsm_xdr_enc_mon(struct rpc_rqst *req, struct xdr_stream *xdr, const void *argp) { encode_mon_id(xdr, argp); encode_priv(xdr, argp); } static void nsm_xdr_enc_unmon(struct rpc_rqst *req, struct xdr_stream *xdr, const void *argp) { encode_mon_id(xdr, argp); } static int nsm_xdr_dec_stat_res(struct rpc_rqst *rqstp, struct xdr_stream *xdr, void *data) { struct nsm_res *resp = data; __be32 *p; p = xdr_inline_decode(xdr, 4 + 4); if (unlikely(p == NULL)) return -EIO; resp->status = be32_to_cpup(p++); resp->state = be32_to_cpup(p); dprintk("lockd: %s status %d state %d\n", __func__, resp->status, resp->state); return 0; } static int nsm_xdr_dec_stat(struct rpc_rqst *rqstp, struct xdr_stream *xdr, void *data) { struct nsm_res *resp = data; __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) return -EIO; resp->state = be32_to_cpup(p); dprintk("lockd: %s state %d\n", __func__, resp->state); return 0; } #define SM_my_name_sz (1+XDR_QUADLEN(SM_MAXSTRLEN)) #define SM_my_id_sz (SM_my_name_sz+3) #define SM_mon_name_sz (1+XDR_QUADLEN(SM_MAXSTRLEN)) #define SM_mon_id_sz (SM_mon_name_sz+SM_my_id_sz) #define SM_priv_sz (XDR_QUADLEN(SM_PRIV_SIZE)) #define SM_mon_sz (SM_mon_id_sz+SM_priv_sz) #define SM_monres_sz 2 #define SM_unmonres_sz 1 static const struct rpc_procinfo nsm_procedures[] = { [NSMPROC_MON] = { .p_proc = NSMPROC_MON, .p_encode = nsm_xdr_enc_mon, .p_decode = nsm_xdr_dec_stat_res, .p_arglen = SM_mon_sz, .p_replen = SM_monres_sz, .p_statidx = NSMPROC_MON, .p_name = "MONITOR", }, [NSMPROC_UNMON] = { .p_proc = NSMPROC_UNMON, .p_encode = nsm_xdr_enc_unmon, .p_decode = nsm_xdr_dec_stat, .p_arglen = SM_mon_id_sz, .p_replen = SM_unmonres_sz, .p_statidx = NSMPROC_UNMON, .p_name = "UNMONITOR", }, }; static unsigned int nsm_version1_counts[ARRAY_SIZE(nsm_procedures)]; static const struct rpc_version nsm_version1 = { .number = 1, .nrprocs = ARRAY_SIZE(nsm_procedures), .procs = nsm_procedures, .counts = nsm_version1_counts, }; static const struct rpc_version *nsm_version[] = { [1] = &nsm_version1, }; static struct rpc_stat nsm_stats; static const struct rpc_program nsm_program = { .name = "statd", .number = NSM_PROGRAM, .nrvers = ARRAY_SIZE(nsm_version), .version = nsm_version, .stats = &nsm_stats };
linux-master
fs/lockd/mon.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/lockd/svclock.c * * Handling of server-side locks, mostly of the blocked variety. * This is the ugliest part of lockd because we tread on very thin ice. * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc. * IMNSHO introducing the grant callback into the NLM protocol was one * of the worst ideas Sun ever had. Except maybe for the idea of doing * NFS file locking at all. * * I'm trying hard to avoid race conditions by protecting most accesses * to a file's list of blocked locks through a semaphore. The global * list of blocked locks is not protected in this fashion however. * Therefore, some functions (such as the RPC callback for the async grant * call) move blocked locks towards the head of the list *while some other * process might be traversing it*. This should not be a problem in * practice, because this will only cause functions traversing the list * to visit some blocks twice. * * Copyright (C) 1996, Olaf Kirch <[email protected]> */ #include <linux/types.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/svc_xprt.h> #include <linux/lockd/nlm.h> #include <linux/lockd/lockd.h> #include <linux/kthread.h> #include <linux/exportfs.h> #define NLMDBG_FACILITY NLMDBG_SVCLOCK #ifdef CONFIG_LOCKD_V4 #define nlm_deadlock nlm4_deadlock #else #define nlm_deadlock nlm_lck_denied #endif static void nlmsvc_release_block(struct nlm_block *block); static void nlmsvc_insert_block(struct nlm_block *block, unsigned long); static void nlmsvc_remove_block(struct nlm_block *block); static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock); static void nlmsvc_freegrantargs(struct nlm_rqst *call); static const struct rpc_call_ops nlmsvc_grant_ops; /* * The list of blocked locks to retry */ static LIST_HEAD(nlm_blocked); static DEFINE_SPINLOCK(nlm_blocked_lock); #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie) { /* * We can get away with a static buffer because this is only called * from lockd, which is single-threaded. */ static char buf[2*NLM_MAXCOOKIELEN+1]; unsigned int i, len = sizeof(buf); char *p = buf; len--; /* allow for trailing \0 */ if (len < 3) return "???"; for (i = 0 ; i < cookie->len ; i++) { if (len < 2) { strcpy(p-3, "..."); break; } sprintf(p, "%02x", cookie->data[i]); p += 2; len -= 2; } *p = '\0'; return buf; } #endif /* * Insert a blocked lock into the global list */ static void nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when) { struct nlm_block *b; struct list_head *pos; dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when); if (list_empty(&block->b_list)) { kref_get(&block->b_count); } else { list_del_init(&block->b_list); } pos = &nlm_blocked; if (when != NLM_NEVER) { if ((when += jiffies) == NLM_NEVER) when ++; list_for_each(pos, &nlm_blocked) { b = list_entry(pos, struct nlm_block, b_list); if (time_after(b->b_when,when) || b->b_when == NLM_NEVER) break; } /* On normal exit from the loop, pos == &nlm_blocked, * so we will be adding to the end of the list - good */ } list_add_tail(&block->b_list, pos); block->b_when = when; } static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when) { spin_lock(&nlm_blocked_lock); nlmsvc_insert_block_locked(block, when); spin_unlock(&nlm_blocked_lock); } /* * Remove a block from the global list */ static inline void nlmsvc_remove_block(struct nlm_block *block) { spin_lock(&nlm_blocked_lock); if (!list_empty(&block->b_list)) { list_del_init(&block->b_list); spin_unlock(&nlm_blocked_lock); nlmsvc_release_block(block); return; } spin_unlock(&nlm_blocked_lock); } /* * Find a block for a given lock */ static struct nlm_block * nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock) { struct nlm_block *block; struct file_lock *fl; dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n", file, lock->fl.fl_pid, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end, lock->fl.fl_type); spin_lock(&nlm_blocked_lock); list_for_each_entry(block, &nlm_blocked, b_list) { fl = &block->b_call->a_args.lock.fl; dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n", block->b_file, fl->fl_pid, (long long)fl->fl_start, (long long)fl->fl_end, fl->fl_type, nlmdbg_cookie2a(&block->b_call->a_args.cookie)); if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) { kref_get(&block->b_count); spin_unlock(&nlm_blocked_lock); return block; } } spin_unlock(&nlm_blocked_lock); return NULL; } static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b) { if (a->len != b->len) return 0; if (memcmp(a->data, b->data, a->len)) return 0; return 1; } /* * Find a block with a given NLM cookie. */ static inline struct nlm_block * nlmsvc_find_block(struct nlm_cookie *cookie) { struct nlm_block *block; spin_lock(&nlm_blocked_lock); list_for_each_entry(block, &nlm_blocked, b_list) { if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie)) goto found; } spin_unlock(&nlm_blocked_lock); return NULL; found: dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block); kref_get(&block->b_count); spin_unlock(&nlm_blocked_lock); return block; } /* * Create a block and initialize it. * * Note: we explicitly set the cookie of the grant reply to that of * the blocked lock request. The spec explicitly mentions that the client * should _not_ rely on the callback containing the same cookie as the * request, but (as I found out later) that's because some implementations * do just this. Never mind the standards comittees, they support our * logging industries. * * 10 years later: I hope we can safely ignore these old and broken * clients by now. Let's fix this so we can uniquely identify an incoming * GRANTED_RES message by cookie, without having to rely on the client's IP * address. --okir */ static struct nlm_block * nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host, struct nlm_file *file, struct nlm_lock *lock, struct nlm_cookie *cookie) { struct nlm_block *block; struct nlm_rqst *call = NULL; call = nlm_alloc_call(host); if (call == NULL) return NULL; /* Allocate memory for block, and initialize arguments */ block = kzalloc(sizeof(*block), GFP_KERNEL); if (block == NULL) goto failed; kref_init(&block->b_count); INIT_LIST_HEAD(&block->b_list); INIT_LIST_HEAD(&block->b_flist); if (!nlmsvc_setgrantargs(call, lock)) goto failed_free; /* Set notifier function for VFS, and init args */ call->a_args.lock.fl.fl_flags |= FL_SLEEP; call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations; nlmclnt_next_cookie(&call->a_args.cookie); dprintk("lockd: created block %p...\n", block); /* Create and initialize the block */ block->b_daemon = rqstp->rq_server; block->b_host = host; block->b_file = file; file->f_count++; /* Add to file's list of blocks */ list_add(&block->b_flist, &file->f_blocks); /* Set up RPC arguments for callback */ block->b_call = call; call->a_flags = RPC_TASK_ASYNC; call->a_block = block; return block; failed_free: kfree(block); failed: nlmsvc_release_call(call); return NULL; } /* * Delete a block. * It is the caller's responsibility to check whether the file * can be closed hereafter. */ static int nlmsvc_unlink_block(struct nlm_block *block) { int status; dprintk("lockd: unlinking block %p...\n", block); /* Remove block from list */ status = locks_delete_block(&block->b_call->a_args.lock.fl); nlmsvc_remove_block(block); return status; } static void nlmsvc_free_block(struct kref *kref) { struct nlm_block *block = container_of(kref, struct nlm_block, b_count); struct nlm_file *file = block->b_file; dprintk("lockd: freeing block %p...\n", block); /* Remove block from file's list of blocks */ list_del_init(&block->b_flist); mutex_unlock(&file->f_mutex); nlmsvc_freegrantargs(block->b_call); nlmsvc_release_call(block->b_call); nlm_release_file(block->b_file); kfree(block); } static void nlmsvc_release_block(struct nlm_block *block) { if (block != NULL) kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex); } /* * Loop over all blocks and delete blocks held by * a matching host. */ void nlmsvc_traverse_blocks(struct nlm_host *host, struct nlm_file *file, nlm_host_match_fn_t match) { struct nlm_block *block, *next; restart: mutex_lock(&file->f_mutex); spin_lock(&nlm_blocked_lock); list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) { if (!match(block->b_host, host)) continue; /* Do not destroy blocks that are not on * the global retry list - why? */ if (list_empty(&block->b_list)) continue; kref_get(&block->b_count); spin_unlock(&nlm_blocked_lock); mutex_unlock(&file->f_mutex); nlmsvc_unlink_block(block); nlmsvc_release_block(block); goto restart; } spin_unlock(&nlm_blocked_lock); mutex_unlock(&file->f_mutex); } static struct nlm_lockowner * nlmsvc_get_lockowner(struct nlm_lockowner *lockowner) { refcount_inc(&lockowner->count); return lockowner; } void nlmsvc_put_lockowner(struct nlm_lockowner *lockowner) { if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock)) return; list_del(&lockowner->list); spin_unlock(&lockowner->host->h_lock); nlmsvc_release_host(lockowner->host); kfree(lockowner); } static struct nlm_lockowner *__nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid) { struct nlm_lockowner *lockowner; list_for_each_entry(lockowner, &host->h_lockowners, list) { if (lockowner->pid != pid) continue; return nlmsvc_get_lockowner(lockowner); } return NULL; } static struct nlm_lockowner *nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid) { struct nlm_lockowner *res, *new = NULL; spin_lock(&host->h_lock); res = __nlmsvc_find_lockowner(host, pid); if (res == NULL) { spin_unlock(&host->h_lock); new = kmalloc(sizeof(*res), GFP_KERNEL); spin_lock(&host->h_lock); res = __nlmsvc_find_lockowner(host, pid); if (res == NULL && new != NULL) { res = new; /* fs/locks.c will manage the refcount through lock_ops */ refcount_set(&new->count, 1); new->pid = pid; new->host = nlm_get_host(host); list_add(&new->list, &host->h_lockowners); new = NULL; } } spin_unlock(&host->h_lock); kfree(new); return res; } void nlmsvc_release_lockowner(struct nlm_lock *lock) { if (lock->fl.fl_owner) nlmsvc_put_lockowner(lock->fl.fl_owner); } void nlmsvc_locks_init_private(struct file_lock *fl, struct nlm_host *host, pid_t pid) { fl->fl_owner = nlmsvc_find_lockowner(host, pid); } /* * Initialize arguments for GRANTED call. The nlm_rqst structure * has been cleared already. */ static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock) { locks_copy_lock(&call->a_args.lock.fl, &lock->fl); memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh)); call->a_args.lock.caller = utsname()->nodename; call->a_args.lock.oh.len = lock->oh.len; /* set default data area */ call->a_args.lock.oh.data = call->a_owner; call->a_args.lock.svid = ((struct nlm_lockowner *)lock->fl.fl_owner)->pid; if (lock->oh.len > NLMCLNT_OHSIZE) { void *data = kmalloc(lock->oh.len, GFP_KERNEL); if (!data) return 0; call->a_args.lock.oh.data = (u8 *) data; } memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len); return 1; } static void nlmsvc_freegrantargs(struct nlm_rqst *call) { if (call->a_args.lock.oh.data != call->a_owner) kfree(call->a_args.lock.oh.data); locks_release_private(&call->a_args.lock.fl); } /* * Deferred lock request handling for non-blocking lock */ static __be32 nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block) { __be32 status = nlm_lck_denied_nolocks; block->b_flags |= B_QUEUED; nlmsvc_insert_block(block, NLM_TIMEOUT); block->b_cache_req = &rqstp->rq_chandle; if (rqstp->rq_chandle.defer) { block->b_deferred_req = rqstp->rq_chandle.defer(block->b_cache_req); if (block->b_deferred_req != NULL) status = nlm_drop_reply; } dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n", block, block->b_flags, ntohl(status)); return status; } /* * Attempt to establish a lock, and if it can't be granted, block it * if required. */ __be32 nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, struct nlm_host *host, struct nlm_lock *lock, int wait, struct nlm_cookie *cookie, int reclaim) { #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) struct inode *inode = nlmsvc_file_inode(file); #endif struct nlm_block *block = NULL; int error; int mode; int async_block = 0; __be32 ret; dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n", inode->i_sb->s_id, inode->i_ino, lock->fl.fl_type, lock->fl.fl_pid, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end, wait); if (nlmsvc_file_file(file)->f_op->lock) { async_block = wait; wait = 0; } /* Lock file against concurrent access */ mutex_lock(&file->f_mutex); /* Get existing block (in case client is busy-waiting) * or create new block */ block = nlmsvc_lookup_block(file, lock); if (block == NULL) { block = nlmsvc_create_block(rqstp, host, file, lock, cookie); ret = nlm_lck_denied_nolocks; if (block == NULL) goto out; lock = &block->b_call->a_args.lock; } else lock->fl.fl_flags &= ~FL_SLEEP; if (block->b_flags & B_QUEUED) { dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n", block, block->b_flags); if (block->b_granted) { nlmsvc_unlink_block(block); ret = nlm_granted; goto out; } if (block->b_flags & B_TIMED_OUT) { nlmsvc_unlink_block(block); ret = nlm_lck_denied; goto out; } ret = nlm_drop_reply; goto out; } if (locks_in_grace(SVC_NET(rqstp)) && !reclaim) { ret = nlm_lck_denied_grace_period; goto out; } if (reclaim && !locks_in_grace(SVC_NET(rqstp))) { ret = nlm_lck_denied_grace_period; goto out; } if (!wait) lock->fl.fl_flags &= ~FL_SLEEP; mode = lock_to_openmode(&lock->fl); error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL); lock->fl.fl_flags &= ~FL_SLEEP; dprintk("lockd: vfs_lock_file returned %d\n", error); switch (error) { case 0: ret = nlm_granted; goto out; case -EAGAIN: /* * If this is a blocking request for an * already pending lock request then we need * to put it back on lockd's block list */ if (wait) break; ret = async_block ? nlm_lck_blocked : nlm_lck_denied; goto out; case FILE_LOCK_DEFERRED: if (wait) break; /* Filesystem lock operation is in progress Add it to the queue waiting for callback */ ret = nlmsvc_defer_lock_rqst(rqstp, block); goto out; case -EDEADLK: ret = nlm_deadlock; goto out; default: /* includes ENOLCK */ ret = nlm_lck_denied_nolocks; goto out; } ret = nlm_lck_blocked; /* Append to list of blocked */ nlmsvc_insert_block(block, NLM_NEVER); out: mutex_unlock(&file->f_mutex); nlmsvc_release_block(block); dprintk("lockd: nlmsvc_lock returned %u\n", ret); return ret; } /* * Test for presence of a conflicting lock. */ __be32 nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file, struct nlm_host *host, struct nlm_lock *lock, struct nlm_lock *conflock, struct nlm_cookie *cookie) { int error; int mode; __be32 ret; dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n", nlmsvc_file_inode(file)->i_sb->s_id, nlmsvc_file_inode(file)->i_ino, lock->fl.fl_type, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end); if (locks_in_grace(SVC_NET(rqstp))) { ret = nlm_lck_denied_grace_period; goto out; } mode = lock_to_openmode(&lock->fl); error = vfs_test_lock(file->f_file[mode], &lock->fl); if (error) { /* We can't currently deal with deferred test requests */ if (error == FILE_LOCK_DEFERRED) WARN_ON_ONCE(1); ret = nlm_lck_denied_nolocks; goto out; } if (lock->fl.fl_type == F_UNLCK) { ret = nlm_granted; goto out; } dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n", lock->fl.fl_type, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end); conflock->caller = "somehost"; /* FIXME */ conflock->len = strlen(conflock->caller); conflock->oh.len = 0; /* don't return OH info */ conflock->svid = lock->fl.fl_pid; conflock->fl.fl_type = lock->fl.fl_type; conflock->fl.fl_start = lock->fl.fl_start; conflock->fl.fl_end = lock->fl.fl_end; locks_release_private(&lock->fl); ret = nlm_lck_denied; out: return ret; } /* * Remove a lock. * This implies a CANCEL call: We send a GRANT_MSG, the client replies * with a GRANT_RES call which gets lost, and calls UNLOCK immediately * afterwards. In this case the block will still be there, and hence * must be removed. */ __be32 nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock) { int error = 0; dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n", nlmsvc_file_inode(file)->i_sb->s_id, nlmsvc_file_inode(file)->i_ino, lock->fl.fl_pid, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end); /* First, cancel any lock that might be there */ nlmsvc_cancel_blocked(net, file, lock); lock->fl.fl_type = F_UNLCK; lock->fl.fl_file = file->f_file[O_RDONLY]; if (lock->fl.fl_file) error = vfs_lock_file(lock->fl.fl_file, F_SETLK, &lock->fl, NULL); lock->fl.fl_file = file->f_file[O_WRONLY]; if (lock->fl.fl_file) error |= vfs_lock_file(lock->fl.fl_file, F_SETLK, &lock->fl, NULL); return (error < 0)? nlm_lck_denied_nolocks : nlm_granted; } /* * Cancel a previously blocked request. * * A cancel request always overrides any grant that may currently * be in progress. * The calling procedure must check whether the file can be closed. */ __be32 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *lock) { struct nlm_block *block; int status = 0; int mode; dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n", nlmsvc_file_inode(file)->i_sb->s_id, nlmsvc_file_inode(file)->i_ino, lock->fl.fl_pid, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end); if (locks_in_grace(net)) return nlm_lck_denied_grace_period; mutex_lock(&file->f_mutex); block = nlmsvc_lookup_block(file, lock); mutex_unlock(&file->f_mutex); if (block != NULL) { struct file_lock *fl = &block->b_call->a_args.lock.fl; mode = lock_to_openmode(fl); vfs_cancel_lock(block->b_file->f_file[mode], fl); status = nlmsvc_unlink_block(block); nlmsvc_release_block(block); } return status ? nlm_lck_denied : nlm_granted; } /* * This is a callback from the filesystem for VFS file lock requests. * It will be used if lm_grant is defined and the filesystem can not * respond to the request immediately. * For SETLK or SETLKW request it will get the local posix lock. * In all cases it will move the block to the head of nlm_blocked q where * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the * deferred rpc for GETLK and SETLK. */ static void nlmsvc_update_deferred_block(struct nlm_block *block, int result) { block->b_flags |= B_GOT_CALLBACK; if (result == 0) block->b_granted = 1; else block->b_flags |= B_TIMED_OUT; } static int nlmsvc_grant_deferred(struct file_lock *fl, int result) { struct nlm_block *block; int rc = -ENOENT; spin_lock(&nlm_blocked_lock); list_for_each_entry(block, &nlm_blocked, b_list) { if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n", block, block->b_flags); if (block->b_flags & B_QUEUED) { if (block->b_flags & B_TIMED_OUT) { rc = -ENOLCK; break; } nlmsvc_update_deferred_block(block, result); } else if (result == 0) block->b_granted = 1; nlmsvc_insert_block_locked(block, 0); svc_wake_up(block->b_daemon); rc = 0; break; } } spin_unlock(&nlm_blocked_lock); if (rc == -ENOENT) printk(KERN_WARNING "lockd: grant for unknown block\n"); return rc; } /* * Unblock a blocked lock request. This is a callback invoked from the * VFS layer when a lock on which we blocked is removed. * * This function doesn't grant the blocked lock instantly, but rather moves * the block to the head of nlm_blocked where it can be picked up by lockd. */ static void nlmsvc_notify_blocked(struct file_lock *fl) { struct nlm_block *block; dprintk("lockd: VFS unblock notification for block %p\n", fl); spin_lock(&nlm_blocked_lock); list_for_each_entry(block, &nlm_blocked, b_list) { if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { nlmsvc_insert_block_locked(block, 0); spin_unlock(&nlm_blocked_lock); svc_wake_up(block->b_daemon); return; } } spin_unlock(&nlm_blocked_lock); printk(KERN_WARNING "lockd: notification for unknown block!\n"); } static fl_owner_t nlmsvc_get_owner(fl_owner_t owner) { return nlmsvc_get_lockowner(owner); } static void nlmsvc_put_owner(fl_owner_t owner) { nlmsvc_put_lockowner(owner); } const struct lock_manager_operations nlmsvc_lock_operations = { .lm_notify = nlmsvc_notify_blocked, .lm_grant = nlmsvc_grant_deferred, .lm_get_owner = nlmsvc_get_owner, .lm_put_owner = nlmsvc_put_owner, }; /* * Try to claim a lock that was previously blocked. * * Note that we use both the RPC_GRANTED_MSG call _and_ an async * RPC thread when notifying the client. This seems like overkill... * Here's why: * - we don't want to use a synchronous RPC thread, otherwise * we might find ourselves hanging on a dead portmapper. * - Some lockd implementations (e.g. HP) don't react to * RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls. */ static void nlmsvc_grant_blocked(struct nlm_block *block) { struct nlm_file *file = block->b_file; struct nlm_lock *lock = &block->b_call->a_args.lock; int mode; int error; loff_t fl_start, fl_end; dprintk("lockd: grant blocked lock %p\n", block); kref_get(&block->b_count); /* Unlink block request from list */ nlmsvc_unlink_block(block); /* If b_granted is true this means we've been here before. * Just retry the grant callback, possibly refreshing the RPC * binding */ if (block->b_granted) { nlm_rebind_host(block->b_host); goto callback; } /* Try the lock operation again */ /* vfs_lock_file() can mangle fl_start and fl_end, but we need * them unchanged for the GRANT_MSG */ lock->fl.fl_flags |= FL_SLEEP; fl_start = lock->fl.fl_start; fl_end = lock->fl.fl_end; mode = lock_to_openmode(&lock->fl); error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL); lock->fl.fl_flags &= ~FL_SLEEP; lock->fl.fl_start = fl_start; lock->fl.fl_end = fl_end; switch (error) { case 0: break; case FILE_LOCK_DEFERRED: dprintk("lockd: lock still blocked error %d\n", error); nlmsvc_insert_block(block, NLM_NEVER); nlmsvc_release_block(block); return; default: printk(KERN_WARNING "lockd: unexpected error %d in %s!\n", -error, __func__); nlmsvc_insert_block(block, 10 * HZ); nlmsvc_release_block(block); return; } callback: /* Lock was granted by VFS. */ dprintk("lockd: GRANTing blocked lock.\n"); block->b_granted = 1; /* keep block on the list, but don't reattempt until the RPC * completes or the submission fails */ nlmsvc_insert_block(block, NLM_NEVER); /* Call the client -- use a soft RPC task since nlmsvc_retry_blocked * will queue up a new one if this one times out */ error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG, &nlmsvc_grant_ops); /* RPC submission failed, wait a bit and retry */ if (error < 0) nlmsvc_insert_block(block, 10 * HZ); } /* * This is the callback from the RPC layer when the NLM_GRANTED_MSG * RPC call has succeeded or timed out. * Like all RPC callbacks, it is invoked by the rpciod process, so it * better not sleep. Therefore, we put the blocked lock on the nlm_blocked * chain once more in order to have it removed by lockd itself (which can * then sleep on the file semaphore without disrupting e.g. the nfs client). */ static void nlmsvc_grant_callback(struct rpc_task *task, void *data) { struct nlm_rqst *call = data; struct nlm_block *block = call->a_block; unsigned long timeout; dprintk("lockd: GRANT_MSG RPC callback\n"); spin_lock(&nlm_blocked_lock); /* if the block is not on a list at this point then it has * been invalidated. Don't try to requeue it. * * FIXME: it's possible that the block is removed from the list * after this check but before the nlmsvc_insert_block. In that * case it will be added back. Perhaps we need better locking * for nlm_blocked? */ if (list_empty(&block->b_list)) goto out; /* Technically, we should down the file semaphore here. Since we * move the block towards the head of the queue only, no harm * can be done, though. */ if (task->tk_status < 0) { /* RPC error: Re-insert for retransmission */ timeout = 10 * HZ; } else { /* Call was successful, now wait for client callback */ timeout = 60 * HZ; } nlmsvc_insert_block_locked(block, timeout); svc_wake_up(block->b_daemon); out: spin_unlock(&nlm_blocked_lock); } /* * FIXME: nlmsvc_release_block() grabs a mutex. This is not allowed for an * .rpc_release rpc_call_op */ static void nlmsvc_grant_release(void *data) { struct nlm_rqst *call = data; nlmsvc_release_block(call->a_block); } static const struct rpc_call_ops nlmsvc_grant_ops = { .rpc_call_done = nlmsvc_grant_callback, .rpc_release = nlmsvc_grant_release, }; /* * We received a GRANT_RES callback. Try to find the corresponding * block. */ void nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status) { struct nlm_block *block; struct file_lock *fl; int error; dprintk("grant_reply: looking for cookie %x, s=%d \n", *(unsigned int *)(cookie->data), status); if (!(block = nlmsvc_find_block(cookie))) return; switch (status) { case nlm_lck_denied_grace_period: /* Try again in a couple of seconds */ nlmsvc_insert_block(block, 10 * HZ); break; case nlm_lck_denied: /* Client doesn't want it, just unlock it */ nlmsvc_unlink_block(block); fl = &block->b_call->a_args.lock.fl; fl->fl_type = F_UNLCK; error = vfs_lock_file(fl->fl_file, F_SETLK, fl, NULL); if (error) pr_warn("lockd: unable to unlock lock rejected by client!\n"); break; default: /* * Either it was accepted or the status makes no sense * just unlink it either way. */ nlmsvc_unlink_block(block); } nlmsvc_release_block(block); } /* Helper function to handle retry of a deferred block. * If it is a blocking lock, call grant_blocked. * For a non-blocking lock or test lock, revisit the request. */ static void retry_deferred_block(struct nlm_block *block) { if (!(block->b_flags & B_GOT_CALLBACK)) block->b_flags |= B_TIMED_OUT; nlmsvc_insert_block(block, NLM_TIMEOUT); dprintk("revisit block %p flags %d\n", block, block->b_flags); if (block->b_deferred_req) { block->b_deferred_req->revisit(block->b_deferred_req, 0); block->b_deferred_req = NULL; } } /* * Retry all blocked locks that have been notified. This is where lockd * picks up locks that can be granted, or grant notifications that must * be retransmitted. */ void nlmsvc_retry_blocked(void) { unsigned long timeout = MAX_SCHEDULE_TIMEOUT; struct nlm_block *block; spin_lock(&nlm_blocked_lock); while (!list_empty(&nlm_blocked) && !kthread_should_stop()) { block = list_entry(nlm_blocked.next, struct nlm_block, b_list); if (block->b_when == NLM_NEVER) break; if (time_after(block->b_when, jiffies)) { timeout = block->b_when - jiffies; break; } spin_unlock(&nlm_blocked_lock); dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n", block, block->b_when); if (block->b_flags & B_QUEUED) { dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n", block, block->b_granted, block->b_flags); retry_deferred_block(block); } else nlmsvc_grant_blocked(block); spin_lock(&nlm_blocked_lock); } spin_unlock(&nlm_blocked_lock); if (timeout < MAX_SCHEDULE_TIMEOUT) mod_timer(&nlmsvc_retry, jiffies + timeout); }
linux-master
fs/lockd/svclock.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/lockd/svc4proc.c * * Lockd server procedures. We don't implement the NLM_*_RES * procedures because we don't use the async procedures. * * Copyright (C) 1996, Olaf Kirch <[email protected]> */ #include <linux/types.h> #include <linux/time.h> #include <linux/lockd/lockd.h> #include <linux/lockd/share.h> #include <linux/sunrpc/svc_xprt.h> #define NLMDBG_FACILITY NLMDBG_CLIENT /* * Obtain client and file from arguments */ static __be32 nlm4svc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_host **hostp, struct nlm_file **filp) { struct nlm_host *host = NULL; struct nlm_file *file = NULL; struct nlm_lock *lock = &argp->lock; __be32 error = 0; /* nfsd callbacks must have been installed for this procedure */ if (!nlmsvc_ops) return nlm_lck_denied_nolocks; if (lock->lock_start > OFFSET_MAX || (lock->lock_len && ((lock->lock_len - 1) > (OFFSET_MAX - lock->lock_start)))) return nlm4_fbig; /* Obtain host handle */ if (!(host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len)) || (argp->monitor && nsm_monitor(host) < 0)) goto no_locks; *hostp = host; /* Obtain file pointer. Not used by FREE_ALL call. */ if (filp != NULL) { int mode = lock_to_openmode(&lock->fl); error = nlm_lookup_file(rqstp, &file, lock); if (error) goto no_locks; *filp = file; /* Set up the missing parts of the file_lock structure */ lock->fl.fl_flags = FL_POSIX; lock->fl.fl_file = file->f_file[mode]; lock->fl.fl_pid = current->tgid; lock->fl.fl_start = (loff_t)lock->lock_start; lock->fl.fl_end = lock->lock_len ? (loff_t)(lock->lock_start + lock->lock_len - 1) : OFFSET_MAX; lock->fl.fl_lmops = &nlmsvc_lock_operations; nlmsvc_locks_init_private(&lock->fl, host, (pid_t)lock->svid); if (!lock->fl.fl_owner) { /* lockowner allocation has failed */ nlmsvc_release_host(host); return nlm_lck_denied_nolocks; } } return 0; no_locks: nlmsvc_release_host(host); if (error) return error; return nlm_lck_denied_nolocks; } /* * NULL: Test for presence of service */ static __be32 nlm4svc_proc_null(struct svc_rqst *rqstp) { dprintk("lockd: NULL called\n"); return rpc_success; } /* * TEST: Check for conflicting lock */ static __be32 __nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_res *resp) { struct nlm_args *argp = rqstp->rq_argp; struct nlm_host *host; struct nlm_file *file; struct nlm_lockowner *test_owner; __be32 rc = rpc_success; dprintk("lockd: TEST4 called\n"); resp->cookie = argp->cookie; /* Obtain client and file */ if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; test_owner = argp->lock.fl.fl_owner; /* Now check for conflicting locks */ resp->status = nlmsvc_testlock(rqstp, file, host, &argp->lock, &resp->lock, &resp->cookie); if (resp->status == nlm_drop_reply) rc = rpc_drop_reply; else dprintk("lockd: TEST4 status %d\n", ntohl(resp->status)); nlmsvc_put_lockowner(test_owner); nlmsvc_release_host(host); nlm_release_file(file); return rc; } static __be32 nlm4svc_proc_test(struct svc_rqst *rqstp) { return __nlm4svc_proc_test(rqstp, rqstp->rq_resp); } static __be32 __nlm4svc_proc_lock(struct svc_rqst *rqstp, struct nlm_res *resp) { struct nlm_args *argp = rqstp->rq_argp; struct nlm_host *host; struct nlm_file *file; __be32 rc = rpc_success; dprintk("lockd: LOCK called\n"); resp->cookie = argp->cookie; /* Obtain client and file */ if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; #if 0 /* If supplied state doesn't match current state, we assume it's * an old request that time-warped somehow. Any error return would * do in this case because it's irrelevant anyway. * * NB: We don't retrieve the remote host's state yet. */ if (host->h_nsmstate && host->h_nsmstate != argp->state) { resp->status = nlm_lck_denied_nolocks; } else #endif /* Now try to lock the file */ resp->status = nlmsvc_lock(rqstp, file, host, &argp->lock, argp->block, &argp->cookie, argp->reclaim); if (resp->status == nlm_drop_reply) rc = rpc_drop_reply; else dprintk("lockd: LOCK status %d\n", ntohl(resp->status)); nlmsvc_release_lockowner(&argp->lock); nlmsvc_release_host(host); nlm_release_file(file); return rc; } static __be32 nlm4svc_proc_lock(struct svc_rqst *rqstp) { return __nlm4svc_proc_lock(rqstp, rqstp->rq_resp); } static __be32 __nlm4svc_proc_cancel(struct svc_rqst *rqstp, struct nlm_res *resp) { struct nlm_args *argp = rqstp->rq_argp; struct nlm_host *host; struct nlm_file *file; dprintk("lockd: CANCEL called\n"); resp->cookie = argp->cookie; /* Don't accept requests during grace period */ if (locks_in_grace(SVC_NET(rqstp))) { resp->status = nlm_lck_denied_grace_period; return rpc_success; } /* Obtain client and file */ if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Try to cancel request. */ resp->status = nlmsvc_cancel_blocked(SVC_NET(rqstp), file, &argp->lock); dprintk("lockd: CANCEL status %d\n", ntohl(resp->status)); nlmsvc_release_lockowner(&argp->lock); nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } static __be32 nlm4svc_proc_cancel(struct svc_rqst *rqstp) { return __nlm4svc_proc_cancel(rqstp, rqstp->rq_resp); } /* * UNLOCK: release a lock */ static __be32 __nlm4svc_proc_unlock(struct svc_rqst *rqstp, struct nlm_res *resp) { struct nlm_args *argp = rqstp->rq_argp; struct nlm_host *host; struct nlm_file *file; dprintk("lockd: UNLOCK called\n"); resp->cookie = argp->cookie; /* Don't accept new lock requests during grace period */ if (locks_in_grace(SVC_NET(rqstp))) { resp->status = nlm_lck_denied_grace_period; return rpc_success; } /* Obtain client and file */ if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Now try to remove the lock */ resp->status = nlmsvc_unlock(SVC_NET(rqstp), file, &argp->lock); dprintk("lockd: UNLOCK status %d\n", ntohl(resp->status)); nlmsvc_release_lockowner(&argp->lock); nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } static __be32 nlm4svc_proc_unlock(struct svc_rqst *rqstp) { return __nlm4svc_proc_unlock(rqstp, rqstp->rq_resp); } /* * GRANTED: A server calls us to tell that a process' lock request * was granted */ static __be32 __nlm4svc_proc_granted(struct svc_rqst *rqstp, struct nlm_res *resp) { struct nlm_args *argp = rqstp->rq_argp; resp->cookie = argp->cookie; dprintk("lockd: GRANTED called\n"); resp->status = nlmclnt_grant(svc_addr(rqstp), &argp->lock); dprintk("lockd: GRANTED status %d\n", ntohl(resp->status)); return rpc_success; } static __be32 nlm4svc_proc_granted(struct svc_rqst *rqstp) { return __nlm4svc_proc_granted(rqstp, rqstp->rq_resp); } /* * This is the generic lockd callback for async RPC calls */ static void nlm4svc_callback_exit(struct rpc_task *task, void *data) { } static void nlm4svc_callback_release(void *data) { nlmsvc_release_call(data); } static const struct rpc_call_ops nlm4svc_callback_ops = { .rpc_call_done = nlm4svc_callback_exit, .rpc_release = nlm4svc_callback_release, }; /* * `Async' versions of the above service routines. They aren't really, * because we send the callback before the reply proper. I hope this * doesn't break any clients. */ static __be32 nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, __be32 (*func)(struct svc_rqst *, struct nlm_res *)) { struct nlm_args *argp = rqstp->rq_argp; struct nlm_host *host; struct nlm_rqst *call; __be32 stat; host = nlmsvc_lookup_host(rqstp, argp->lock.caller, argp->lock.len); if (host == NULL) return rpc_system_err; call = nlm_alloc_call(host); nlmsvc_release_host(host); if (call == NULL) return rpc_system_err; stat = func(rqstp, &call->a_res); if (stat != 0) { nlmsvc_release_call(call); return stat; } call->a_flags = RPC_TASK_ASYNC; if (nlm_async_reply(call, proc, &nlm4svc_callback_ops) < 0) return rpc_system_err; return rpc_success; } static __be32 nlm4svc_proc_test_msg(struct svc_rqst *rqstp) { dprintk("lockd: TEST_MSG called\n"); return nlm4svc_callback(rqstp, NLMPROC_TEST_RES, __nlm4svc_proc_test); } static __be32 nlm4svc_proc_lock_msg(struct svc_rqst *rqstp) { dprintk("lockd: LOCK_MSG called\n"); return nlm4svc_callback(rqstp, NLMPROC_LOCK_RES, __nlm4svc_proc_lock); } static __be32 nlm4svc_proc_cancel_msg(struct svc_rqst *rqstp) { dprintk("lockd: CANCEL_MSG called\n"); return nlm4svc_callback(rqstp, NLMPROC_CANCEL_RES, __nlm4svc_proc_cancel); } static __be32 nlm4svc_proc_unlock_msg(struct svc_rqst *rqstp) { dprintk("lockd: UNLOCK_MSG called\n"); return nlm4svc_callback(rqstp, NLMPROC_UNLOCK_RES, __nlm4svc_proc_unlock); } static __be32 nlm4svc_proc_granted_msg(struct svc_rqst *rqstp) { dprintk("lockd: GRANTED_MSG called\n"); return nlm4svc_callback(rqstp, NLMPROC_GRANTED_RES, __nlm4svc_proc_granted); } /* * SHARE: create a DOS share or alter existing share. */ static __be32 nlm4svc_proc_share(struct svc_rqst *rqstp) { struct nlm_args *argp = rqstp->rq_argp; struct nlm_res *resp = rqstp->rq_resp; struct nlm_host *host; struct nlm_file *file; dprintk("lockd: SHARE called\n"); resp->cookie = argp->cookie; /* Don't accept new lock requests during grace period */ if (locks_in_grace(SVC_NET(rqstp)) && !argp->reclaim) { resp->status = nlm_lck_denied_grace_period; return rpc_success; } /* Obtain client and file */ if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Now try to create the share */ resp->status = nlmsvc_share_file(host, file, argp); dprintk("lockd: SHARE status %d\n", ntohl(resp->status)); nlmsvc_release_lockowner(&argp->lock); nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } /* * UNSHARE: Release a DOS share. */ static __be32 nlm4svc_proc_unshare(struct svc_rqst *rqstp) { struct nlm_args *argp = rqstp->rq_argp; struct nlm_res *resp = rqstp->rq_resp; struct nlm_host *host; struct nlm_file *file; dprintk("lockd: UNSHARE called\n"); resp->cookie = argp->cookie; /* Don't accept requests during grace period */ if (locks_in_grace(SVC_NET(rqstp))) { resp->status = nlm_lck_denied_grace_period; return rpc_success; } /* Obtain client and file */ if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Now try to lock the file */ resp->status = nlmsvc_unshare_file(host, file, argp); dprintk("lockd: UNSHARE status %d\n", ntohl(resp->status)); nlmsvc_release_lockowner(&argp->lock); nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } /* * NM_LOCK: Create an unmonitored lock */ static __be32 nlm4svc_proc_nm_lock(struct svc_rqst *rqstp) { struct nlm_args *argp = rqstp->rq_argp; dprintk("lockd: NM_LOCK called\n"); argp->monitor = 0; /* just clean the monitor flag */ return nlm4svc_proc_lock(rqstp); } /* * FREE_ALL: Release all locks and shares held by client */ static __be32 nlm4svc_proc_free_all(struct svc_rqst *rqstp) { struct nlm_args *argp = rqstp->rq_argp; struct nlm_host *host; /* Obtain client */ if (nlm4svc_retrieve_args(rqstp, argp, &host, NULL)) return rpc_success; nlmsvc_free_host_resources(host); nlmsvc_release_host(host); return rpc_success; } /* * SM_NOTIFY: private callback from statd (not part of official NLM proto) */ static __be32 nlm4svc_proc_sm_notify(struct svc_rqst *rqstp) { struct nlm_reboot *argp = rqstp->rq_argp; dprintk("lockd: SM_NOTIFY called\n"); if (!nlm_privileged_requester(rqstp)) { char buf[RPC_MAX_ADDRBUFLEN]; printk(KERN_WARNING "lockd: rejected NSM callback from %s\n", svc_print_addr(rqstp, buf, sizeof(buf))); return rpc_system_err; } nlm_host_rebooted(SVC_NET(rqstp), argp); return rpc_success; } /* * client sent a GRANTED_RES, let's remove the associated block */ static __be32 nlm4svc_proc_granted_res(struct svc_rqst *rqstp) { struct nlm_res *argp = rqstp->rq_argp; if (!nlmsvc_ops) return rpc_success; dprintk("lockd: GRANTED_RES called\n"); nlmsvc_grant_reply(&argp->cookie, argp->status); return rpc_success; } static __be32 nlm4svc_proc_unused(struct svc_rqst *rqstp) { return rpc_proc_unavail; } /* * NLM Server procedures. */ struct nlm_void { int dummy; }; #define Ck (1+XDR_QUADLEN(NLM_MAXCOOKIELEN)) /* cookie */ #define No (1+1024/4) /* netobj */ #define St 1 /* status */ #define Rg 4 /* range (offset + length) */ const struct svc_procedure nlmsvc_procedures4[24] = { [NLMPROC_NULL] = { .pc_func = nlm4svc_proc_null, .pc_decode = nlm4svc_decode_void, .pc_encode = nlm4svc_encode_void, .pc_argsize = sizeof(struct nlm_void), .pc_argzero = sizeof(struct nlm_void), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "NULL", }, [NLMPROC_TEST] = { .pc_func = nlm4svc_proc_test, .pc_decode = nlm4svc_decode_testargs, .pc_encode = nlm4svc_encode_testres, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_res), .pc_xdrressize = Ck+St+2+No+Rg, .pc_name = "TEST", }, [NLMPROC_LOCK] = { .pc_func = nlm4svc_proc_lock, .pc_decode = nlm4svc_decode_lockargs, .pc_encode = nlm4svc_encode_res, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_res), .pc_xdrressize = Ck+St, .pc_name = "LOCK", }, [NLMPROC_CANCEL] = { .pc_func = nlm4svc_proc_cancel, .pc_decode = nlm4svc_decode_cancargs, .pc_encode = nlm4svc_encode_res, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_res), .pc_xdrressize = Ck+St, .pc_name = "CANCEL", }, [NLMPROC_UNLOCK] = { .pc_func = nlm4svc_proc_unlock, .pc_decode = nlm4svc_decode_unlockargs, .pc_encode = nlm4svc_encode_res, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_res), .pc_xdrressize = Ck+St, .pc_name = "UNLOCK", }, [NLMPROC_GRANTED] = { .pc_func = nlm4svc_proc_granted, .pc_decode = nlm4svc_decode_testargs, .pc_encode = nlm4svc_encode_res, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_res), .pc_xdrressize = Ck+St, .pc_name = "GRANTED", }, [NLMPROC_TEST_MSG] = { .pc_func = nlm4svc_proc_test_msg, .pc_decode = nlm4svc_decode_testargs, .pc_encode = nlm4svc_encode_void, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "TEST_MSG", }, [NLMPROC_LOCK_MSG] = { .pc_func = nlm4svc_proc_lock_msg, .pc_decode = nlm4svc_decode_lockargs, .pc_encode = nlm4svc_encode_void, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "LOCK_MSG", }, [NLMPROC_CANCEL_MSG] = { .pc_func = nlm4svc_proc_cancel_msg, .pc_decode = nlm4svc_decode_cancargs, .pc_encode = nlm4svc_encode_void, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "CANCEL_MSG", }, [NLMPROC_UNLOCK_MSG] = { .pc_func = nlm4svc_proc_unlock_msg, .pc_decode = nlm4svc_decode_unlockargs, .pc_encode = nlm4svc_encode_void, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "UNLOCK_MSG", }, [NLMPROC_GRANTED_MSG] = { .pc_func = nlm4svc_proc_granted_msg, .pc_decode = nlm4svc_decode_testargs, .pc_encode = nlm4svc_encode_void, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "GRANTED_MSG", }, [NLMPROC_TEST_RES] = { .pc_func = nlm4svc_proc_null, .pc_decode = nlm4svc_decode_void, .pc_encode = nlm4svc_encode_void, .pc_argsize = sizeof(struct nlm_res), .pc_argzero = sizeof(struct nlm_res), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "TEST_RES", }, [NLMPROC_LOCK_RES] = { .pc_func = nlm4svc_proc_null, .pc_decode = nlm4svc_decode_void, .pc_encode = nlm4svc_encode_void, .pc_argsize = sizeof(struct nlm_res), .pc_argzero = sizeof(struct nlm_res), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "LOCK_RES", }, [NLMPROC_CANCEL_RES] = { .pc_func = nlm4svc_proc_null, .pc_decode = nlm4svc_decode_void, .pc_encode = nlm4svc_encode_void, .pc_argsize = sizeof(struct nlm_res), .pc_argzero = sizeof(struct nlm_res), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "CANCEL_RES", }, [NLMPROC_UNLOCK_RES] = { .pc_func = nlm4svc_proc_null, .pc_decode = nlm4svc_decode_void, .pc_encode = nlm4svc_encode_void, .pc_argsize = sizeof(struct nlm_res), .pc_argzero = sizeof(struct nlm_res), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "UNLOCK_RES", }, [NLMPROC_GRANTED_RES] = { .pc_func = nlm4svc_proc_granted_res, .pc_decode = nlm4svc_decode_res, .pc_encode = nlm4svc_encode_void, .pc_argsize = sizeof(struct nlm_res), .pc_argzero = sizeof(struct nlm_res), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "GRANTED_RES", }, [NLMPROC_NSM_NOTIFY] = { .pc_func = nlm4svc_proc_sm_notify, .pc_decode = nlm4svc_decode_reboot, .pc_encode = nlm4svc_encode_void, .pc_argsize = sizeof(struct nlm_reboot), .pc_argzero = sizeof(struct nlm_reboot), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "SM_NOTIFY", }, [17] = { .pc_func = nlm4svc_proc_unused, .pc_decode = nlm4svc_decode_void, .pc_encode = nlm4svc_encode_void, .pc_argsize = sizeof(struct nlm_void), .pc_argzero = sizeof(struct nlm_void), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = 0, .pc_name = "UNUSED", }, [18] = { .pc_func = nlm4svc_proc_unused, .pc_decode = nlm4svc_decode_void, .pc_encode = nlm4svc_encode_void, .pc_argsize = sizeof(struct nlm_void), .pc_argzero = sizeof(struct nlm_void), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = 0, .pc_name = "UNUSED", }, [19] = { .pc_func = nlm4svc_proc_unused, .pc_decode = nlm4svc_decode_void, .pc_encode = nlm4svc_encode_void, .pc_argsize = sizeof(struct nlm_void), .pc_argzero = sizeof(struct nlm_void), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = 0, .pc_name = "UNUSED", }, [NLMPROC_SHARE] = { .pc_func = nlm4svc_proc_share, .pc_decode = nlm4svc_decode_shareargs, .pc_encode = nlm4svc_encode_shareres, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_res), .pc_xdrressize = Ck+St+1, .pc_name = "SHARE", }, [NLMPROC_UNSHARE] = { .pc_func = nlm4svc_proc_unshare, .pc_decode = nlm4svc_decode_shareargs, .pc_encode = nlm4svc_encode_shareres, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_res), .pc_xdrressize = Ck+St+1, .pc_name = "UNSHARE", }, [NLMPROC_NM_LOCK] = { .pc_func = nlm4svc_proc_nm_lock, .pc_decode = nlm4svc_decode_lockargs, .pc_encode = nlm4svc_encode_res, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_res), .pc_xdrressize = Ck+St, .pc_name = "NM_LOCK", }, [NLMPROC_FREE_ALL] = { .pc_func = nlm4svc_proc_free_all, .pc_decode = nlm4svc_decode_notify, .pc_encode = nlm4svc_encode_void, .pc_argsize = sizeof(struct nlm_args), .pc_argzero = sizeof(struct nlm_args), .pc_ressize = sizeof(struct nlm_void), .pc_xdrressize = St, .pc_name = "FREE_ALL", }, };
linux-master
fs/lockd/svc4proc.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/lockd/xdr4.c * * XDR support for lockd and the lock client. * * Copyright (C) 1995, 1996 Olaf Kirch <[email protected]> * Copyright (C) 1999, Trond Myklebust <[email protected]> */ #include <linux/types.h> #include <linux/sched.h> #include <linux/nfs.h> #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/svc.h> #include <linux/sunrpc/stats.h> #include <linux/lockd/lockd.h> #include "svcxdr.h" static inline s64 loff_t_to_s64(loff_t offset) { s64 res; if (offset > NLM4_OFFSET_MAX) res = NLM4_OFFSET_MAX; else if (offset < -NLM4_OFFSET_MAX) res = -NLM4_OFFSET_MAX; else res = offset; return res; } void nlm4svc_set_file_lock_range(struct file_lock *fl, u64 off, u64 len) { s64 end = off + len - 1; fl->fl_start = off; if (len == 0 || end < 0) fl->fl_end = OFFSET_MAX; else fl->fl_end = end; } /* * NLM file handles are defined by specification to be a variable-length * XDR opaque no longer than 1024 bytes. However, this implementation * limits their length to the size of an NFSv3 file handle. */ static bool svcxdr_decode_fhandle(struct xdr_stream *xdr, struct nfs_fh *fh) { __be32 *p; u32 len; if (xdr_stream_decode_u32(xdr, &len) < 0) return false; if (len > NFS_MAXFHSIZE) return false; p = xdr_inline_decode(xdr, len); if (!p) return false; fh->size = len; memcpy(fh->data, p, len); memset(fh->data + len, 0, sizeof(fh->data) - len); return true; } static bool svcxdr_decode_lock(struct xdr_stream *xdr, struct nlm_lock *lock) { struct file_lock *fl = &lock->fl; if (!svcxdr_decode_string(xdr, &lock->caller, &lock->len)) return false; if (!svcxdr_decode_fhandle(xdr, &lock->fh)) return false; if (!svcxdr_decode_owner(xdr, &lock->oh)) return false; if (xdr_stream_decode_u32(xdr, &lock->svid) < 0) return false; if (xdr_stream_decode_u64(xdr, &lock->lock_start) < 0) return false; if (xdr_stream_decode_u64(xdr, &lock->lock_len) < 0) return false; locks_init_lock(fl); fl->fl_flags = FL_POSIX; fl->fl_type = F_RDLCK; nlm4svc_set_file_lock_range(fl, lock->lock_start, lock->lock_len); return true; } static bool svcxdr_encode_holder(struct xdr_stream *xdr, const struct nlm_lock *lock) { const struct file_lock *fl = &lock->fl; s64 start, len; /* exclusive */ if (xdr_stream_encode_bool(xdr, fl->fl_type != F_RDLCK) < 0) return false; if (xdr_stream_encode_u32(xdr, lock->svid) < 0) return false; if (!svcxdr_encode_owner(xdr, &lock->oh)) return false; start = loff_t_to_s64(fl->fl_start); if (fl->fl_end == OFFSET_MAX) len = 0; else len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1); if (xdr_stream_encode_u64(xdr, start) < 0) return false; if (xdr_stream_encode_u64(xdr, len) < 0) return false; return true; } static bool svcxdr_encode_testrply(struct xdr_stream *xdr, const struct nlm_res *resp) { if (!svcxdr_encode_stats(xdr, resp->status)) return false; switch (resp->status) { case nlm_lck_denied: if (!svcxdr_encode_holder(xdr, &resp->lock)) return false; } return true; } /* * Decode Call arguments */ bool nlm4svc_decode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr) { return true; } bool nlm4svc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr) { struct nlm_args *argp = rqstp->rq_argp; u32 exclusive; if (!svcxdr_decode_cookie(xdr, &argp->cookie)) return false; if (xdr_stream_decode_bool(xdr, &exclusive) < 0) return false; if (!svcxdr_decode_lock(xdr, &argp->lock)) return false; if (exclusive) argp->lock.fl.fl_type = F_WRLCK; return true; } bool nlm4svc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr) { struct nlm_args *argp = rqstp->rq_argp; u32 exclusive; if (!svcxdr_decode_cookie(xdr, &argp->cookie)) return false; if (xdr_stream_decode_bool(xdr, &argp->block) < 0) return false; if (xdr_stream_decode_bool(xdr, &exclusive) < 0) return false; if (!svcxdr_decode_lock(xdr, &argp->lock)) return false; if (exclusive) argp->lock.fl.fl_type = F_WRLCK; if (xdr_stream_decode_bool(xdr, &argp->reclaim) < 0) return false; if (xdr_stream_decode_u32(xdr, &argp->state) < 0) return false; argp->monitor = 1; /* monitor client by default */ return true; } bool nlm4svc_decode_cancargs(struct svc_rqst *rqstp, struct xdr_stream *xdr) { struct nlm_args *argp = rqstp->rq_argp; u32 exclusive; if (!svcxdr_decode_cookie(xdr, &argp->cookie)) return false; if (xdr_stream_decode_bool(xdr, &argp->block) < 0) return false; if (xdr_stream_decode_bool(xdr, &exclusive) < 0) return false; if (!svcxdr_decode_lock(xdr, &argp->lock)) return false; if (exclusive) argp->lock.fl.fl_type = F_WRLCK; return true; } bool nlm4svc_decode_unlockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr) { struct nlm_args *argp = rqstp->rq_argp; if (!svcxdr_decode_cookie(xdr, &argp->cookie)) return false; if (!svcxdr_decode_lock(xdr, &argp->lock)) return false; argp->lock.fl.fl_type = F_UNLCK; return true; } bool nlm4svc_decode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr) { struct nlm_res *resp = rqstp->rq_argp; if (!svcxdr_decode_cookie(xdr, &resp->cookie)) return false; if (!svcxdr_decode_stats(xdr, &resp->status)) return false; return true; } bool nlm4svc_decode_reboot(struct svc_rqst *rqstp, struct xdr_stream *xdr) { struct nlm_reboot *argp = rqstp->rq_argp; __be32 *p; u32 len; if (xdr_stream_decode_u32(xdr, &len) < 0) return false; if (len > SM_MAXSTRLEN) return false; p = xdr_inline_decode(xdr, len); if (!p) return false; argp->len = len; argp->mon = (char *)p; if (xdr_stream_decode_u32(xdr, &argp->state) < 0) return false; p = xdr_inline_decode(xdr, SM_PRIV_SIZE); if (!p) return false; memcpy(&argp->priv.data, p, sizeof(argp->priv.data)); return true; } bool nlm4svc_decode_shareargs(struct svc_rqst *rqstp, struct xdr_stream *xdr) { struct nlm_args *argp = rqstp->rq_argp; struct nlm_lock *lock = &argp->lock; memset(lock, 0, sizeof(*lock)); locks_init_lock(&lock->fl); lock->svid = ~(u32)0; if (!svcxdr_decode_cookie(xdr, &argp->cookie)) return false; if (!svcxdr_decode_string(xdr, &lock->caller, &lock->len)) return false; if (!svcxdr_decode_fhandle(xdr, &lock->fh)) return false; if (!svcxdr_decode_owner(xdr, &lock->oh)) return false; /* XXX: Range checks are missing in the original code */ if (xdr_stream_decode_u32(xdr, &argp->fsm_mode) < 0) return false; if (xdr_stream_decode_u32(xdr, &argp->fsm_access) < 0) return false; return true; } bool nlm4svc_decode_notify(struct svc_rqst *rqstp, struct xdr_stream *xdr) { struct nlm_args *argp = rqstp->rq_argp; struct nlm_lock *lock = &argp->lock; if (!svcxdr_decode_string(xdr, &lock->caller, &lock->len)) return false; if (xdr_stream_decode_u32(xdr, &argp->state) < 0) return false; return true; } /* * Encode Reply results */ bool nlm4svc_encode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr) { return true; } bool nlm4svc_encode_testres(struct svc_rqst *rqstp, struct xdr_stream *xdr) { struct nlm_res *resp = rqstp->rq_resp; return svcxdr_encode_cookie(xdr, &resp->cookie) && svcxdr_encode_testrply(xdr, resp); } bool nlm4svc_encode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr) { struct nlm_res *resp = rqstp->rq_resp; return svcxdr_encode_cookie(xdr, &resp->cookie) && svcxdr_encode_stats(xdr, resp->status); } bool nlm4svc_encode_shareres(struct svc_rqst *rqstp, struct xdr_stream *xdr) { struct nlm_res *resp = rqstp->rq_resp; if (!svcxdr_encode_cookie(xdr, &resp->cookie)) return false; if (!svcxdr_encode_stats(xdr, resp->status)) return false; /* sequence */ if (xdr_stream_encode_u32(xdr, 0) < 0) return false; return true; }
linux-master
fs/lockd/xdr4.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/lockd/svcshare.c * * Management of DOS shares. * * Copyright (C) 1996 Olaf Kirch <[email protected]> */ #include <linux/time.h> #include <linux/unistd.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/svc.h> #include <linux/lockd/lockd.h> #include <linux/lockd/share.h> static inline int nlm_cmp_owner(struct nlm_share *share, struct xdr_netobj *oh) { return share->s_owner.len == oh->len && !memcmp(share->s_owner.data, oh->data, oh->len); } __be32 nlmsvc_share_file(struct nlm_host *host, struct nlm_file *file, struct nlm_args *argp) { struct nlm_share *share; struct xdr_netobj *oh = &argp->lock.oh; u8 *ohdata; for (share = file->f_shares; share; share = share->s_next) { if (share->s_host == host && nlm_cmp_owner(share, oh)) goto update; if ((argp->fsm_access & share->s_mode) || (argp->fsm_mode & share->s_access )) return nlm_lck_denied; } share = kmalloc(sizeof(*share) + oh->len, GFP_KERNEL); if (share == NULL) return nlm_lck_denied_nolocks; /* Copy owner handle */ ohdata = (u8 *) (share + 1); memcpy(ohdata, oh->data, oh->len); share->s_file = file; share->s_host = host; share->s_owner.data = ohdata; share->s_owner.len = oh->len; share->s_next = file->f_shares; file->f_shares = share; update: share->s_access = argp->fsm_access; share->s_mode = argp->fsm_mode; return nlm_granted; } /* * Delete a share. */ __be32 nlmsvc_unshare_file(struct nlm_host *host, struct nlm_file *file, struct nlm_args *argp) { struct nlm_share *share, **shpp; struct xdr_netobj *oh = &argp->lock.oh; for (shpp = &file->f_shares; (share = *shpp) != NULL; shpp = &share->s_next) { if (share->s_host == host && nlm_cmp_owner(share, oh)) { *shpp = share->s_next; kfree(share); return nlm_granted; } } /* X/Open spec says return success even if there was no * corresponding share. */ return nlm_granted; } /* * Traverse all shares for a given file, and delete * those owned by the given (type of) host */ void nlmsvc_traverse_shares(struct nlm_host *host, struct nlm_file *file, nlm_host_match_fn_t match) { struct nlm_share *share, **shpp; shpp = &file->f_shares; while ((share = *shpp) != NULL) { if (match(share->s_host, host)) { *shpp = share->s_next; kfree(share); continue; } shpp = &share->s_next; } }
linux-master
fs/lockd/svcshare.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/lockd/clntproc.c * * RPC procedures for the client side NLM implementation * * Copyright (C) 1996, Olaf Kirch <[email protected]> */ #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/filelock.h> #include <linux/nfs_fs.h> #include <linux/utsname.h> #include <linux/freezer.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/svc.h> #include <linux/lockd/lockd.h> #include "trace.h" #define NLMDBG_FACILITY NLMDBG_CLIENT #define NLMCLNT_GRACE_WAIT (5*HZ) #define NLMCLNT_POLL_TIMEOUT (30*HZ) #define NLMCLNT_MAX_RETRIES 3 static int nlmclnt_test(struct nlm_rqst *, struct file_lock *); static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *); static int nlmclnt_unlock(struct nlm_rqst *, struct file_lock *); static int nlm_stat_to_errno(__be32 stat); static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host); static int nlmclnt_cancel(struct nlm_host *, int , struct file_lock *); static const struct rpc_call_ops nlmclnt_unlock_ops; static const struct rpc_call_ops nlmclnt_cancel_ops; /* * Cookie counter for NLM requests */ static atomic_t nlm_cookie = ATOMIC_INIT(0x1234); void nlmclnt_next_cookie(struct nlm_cookie *c) { u32 cookie = atomic_inc_return(&nlm_cookie); memcpy(c->data, &cookie, 4); c->len=4; } static struct nlm_lockowner * nlmclnt_get_lockowner(struct nlm_lockowner *lockowner) { refcount_inc(&lockowner->count); return lockowner; } static void nlmclnt_put_lockowner(struct nlm_lockowner *lockowner) { if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock)) return; list_del(&lockowner->list); spin_unlock(&lockowner->host->h_lock); nlmclnt_release_host(lockowner->host); kfree(lockowner); } static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid) { struct nlm_lockowner *lockowner; list_for_each_entry(lockowner, &host->h_lockowners, list) { if (lockowner->pid == pid) return -EBUSY; } return 0; } static inline uint32_t __nlm_alloc_pid(struct nlm_host *host) { uint32_t res; do { res = host->h_pidcount++; } while (nlm_pidbusy(host, res) < 0); return res; } static struct nlm_lockowner *__nlmclnt_find_lockowner(struct nlm_host *host, fl_owner_t owner) { struct nlm_lockowner *lockowner; list_for_each_entry(lockowner, &host->h_lockowners, list) { if (lockowner->owner != owner) continue; return nlmclnt_get_lockowner(lockowner); } return NULL; } static struct nlm_lockowner *nlmclnt_find_lockowner(struct nlm_host *host, fl_owner_t owner) { struct nlm_lockowner *res, *new = NULL; spin_lock(&host->h_lock); res = __nlmclnt_find_lockowner(host, owner); if (res == NULL) { spin_unlock(&host->h_lock); new = kmalloc(sizeof(*new), GFP_KERNEL); spin_lock(&host->h_lock); res = __nlmclnt_find_lockowner(host, owner); if (res == NULL && new != NULL) { res = new; refcount_set(&new->count, 1); new->owner = owner; new->pid = __nlm_alloc_pid(host); new->host = nlm_get_host(host); list_add(&new->list, &host->h_lockowners); new = NULL; } } spin_unlock(&host->h_lock); kfree(new); return res; } /* * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls */ static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl) { struct nlm_args *argp = &req->a_args; struct nlm_lock *lock = &argp->lock; char *nodename = req->a_host->h_rpcclnt->cl_nodename; nlmclnt_next_cookie(&argp->cookie); memcpy(&lock->fh, NFS_FH(file_inode(fl->fl_file)), sizeof(struct nfs_fh)); lock->caller = nodename; lock->oh.data = req->a_owner; lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s", (unsigned int)fl->fl_u.nfs_fl.owner->pid, nodename); lock->svid = fl->fl_u.nfs_fl.owner->pid; lock->fl.fl_start = fl->fl_start; lock->fl.fl_end = fl->fl_end; lock->fl.fl_type = fl->fl_type; } static void nlmclnt_release_lockargs(struct nlm_rqst *req) { WARN_ON_ONCE(req->a_args.lock.fl.fl_ops != NULL); } /** * nlmclnt_proc - Perform a single client-side lock request * @host: address of a valid nlm_host context representing the NLM server * @cmd: fcntl-style file lock operation to perform * @fl: address of arguments for the lock operation * @data: address of data to be sent to callback operations * */ int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, void *data) { struct nlm_rqst *call; int status; const struct nlmclnt_operations *nlmclnt_ops = host->h_nlmclnt_ops; call = nlm_alloc_call(host); if (call == NULL) return -ENOMEM; if (nlmclnt_ops && nlmclnt_ops->nlmclnt_alloc_call) nlmclnt_ops->nlmclnt_alloc_call(data); nlmclnt_locks_init_private(fl, host); if (!fl->fl_u.nfs_fl.owner) { /* lockowner allocation has failed */ nlmclnt_release_call(call); return -ENOMEM; } /* Set up the argument struct */ nlmclnt_setlockargs(call, fl); call->a_callback_data = data; if (IS_SETLK(cmd) || IS_SETLKW(cmd)) { if (fl->fl_type != F_UNLCK) { call->a_args.block = IS_SETLKW(cmd) ? 1 : 0; status = nlmclnt_lock(call, fl); } else status = nlmclnt_unlock(call, fl); } else if (IS_GETLK(cmd)) status = nlmclnt_test(call, fl); else status = -EINVAL; fl->fl_ops->fl_release_private(fl); fl->fl_ops = NULL; dprintk("lockd: clnt proc returns %d\n", status); return status; } EXPORT_SYMBOL_GPL(nlmclnt_proc); /* * Allocate an NLM RPC call struct */ struct nlm_rqst *nlm_alloc_call(struct nlm_host *host) { struct nlm_rqst *call; for(;;) { call = kzalloc(sizeof(*call), GFP_KERNEL); if (call != NULL) { refcount_set(&call->a_count, 1); locks_init_lock(&call->a_args.lock.fl); locks_init_lock(&call->a_res.lock.fl); call->a_host = nlm_get_host(host); return call; } if (signalled()) break; printk("nlm_alloc_call: failed, waiting for memory\n"); schedule_timeout_interruptible(5*HZ); } return NULL; } void nlmclnt_release_call(struct nlm_rqst *call) { const struct nlmclnt_operations *nlmclnt_ops = call->a_host->h_nlmclnt_ops; if (!refcount_dec_and_test(&call->a_count)) return; if (nlmclnt_ops && nlmclnt_ops->nlmclnt_release_call) nlmclnt_ops->nlmclnt_release_call(call->a_callback_data); nlmclnt_release_host(call->a_host); nlmclnt_release_lockargs(call); kfree(call); } static void nlmclnt_rpc_release(void *data) { nlmclnt_release_call(data); } static int nlm_wait_on_grace(wait_queue_head_t *queue) { DEFINE_WAIT(wait); int status = -EINTR; prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE); if (!signalled ()) { schedule_timeout(NLMCLNT_GRACE_WAIT); try_to_freeze(); if (!signalled ()) status = 0; } finish_wait(queue, &wait); return status; } /* * Generic NLM call */ static int nlmclnt_call(const struct cred *cred, struct nlm_rqst *req, u32 proc) { struct nlm_host *host = req->a_host; struct rpc_clnt *clnt; struct nlm_args *argp = &req->a_args; struct nlm_res *resp = &req->a_res; struct rpc_message msg = { .rpc_argp = argp, .rpc_resp = resp, .rpc_cred = cred, }; int status; dprintk("lockd: call procedure %d on %s\n", (int)proc, host->h_name); do { if (host->h_reclaiming && !argp->reclaim) goto in_grace_period; /* If we have no RPC client yet, create one. */ if ((clnt = nlm_bind_host(host)) == NULL) return -ENOLCK; msg.rpc_proc = &clnt->cl_procinfo[proc]; /* Perform the RPC call. If an error occurs, try again */ if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) { dprintk("lockd: rpc_call returned error %d\n", -status); switch (status) { case -EPROTONOSUPPORT: status = -EINVAL; break; case -ECONNREFUSED: case -ETIMEDOUT: case -ENOTCONN: nlm_rebind_host(host); status = -EAGAIN; break; case -ERESTARTSYS: return signalled () ? -EINTR : status; default: break; } break; } else if (resp->status == nlm_lck_denied_grace_period) { dprintk("lockd: server in grace period\n"); if (argp->reclaim) { printk(KERN_WARNING "lockd: spurious grace period reject?!\n"); return -ENOLCK; } } else { if (!argp->reclaim) { /* We appear to be out of the grace period */ wake_up_all(&host->h_gracewait); } dprintk("lockd: server returns status %d\n", ntohl(resp->status)); return 0; /* Okay, call complete */ } in_grace_period: /* * The server has rebooted and appears to be in the grace * period during which locks are only allowed to be * reclaimed. * We can only back off and try again later. */ status = nlm_wait_on_grace(&host->h_gracewait); } while (status == 0); return status; } /* * Generic NLM call, async version. */ static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops) { struct nlm_host *host = req->a_host; struct rpc_clnt *clnt; struct rpc_task_setup task_setup_data = { .rpc_message = msg, .callback_ops = tk_ops, .callback_data = req, .flags = RPC_TASK_ASYNC, }; dprintk("lockd: call procedure %d on %s (async)\n", (int)proc, host->h_name); /* If we have no RPC client yet, create one. */ clnt = nlm_bind_host(host); if (clnt == NULL) goto out_err; msg->rpc_proc = &clnt->cl_procinfo[proc]; task_setup_data.rpc_client = clnt; /* bootstrap and kick off the async RPC call */ return rpc_run_task(&task_setup_data); out_err: tk_ops->rpc_release(req); return ERR_PTR(-ENOLCK); } static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops) { struct rpc_task *task; task = __nlm_async_call(req, proc, msg, tk_ops); if (IS_ERR(task)) return PTR_ERR(task); rpc_put_task(task); return 0; } /* * NLM asynchronous call. */ int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) { struct rpc_message msg = { .rpc_argp = &req->a_args, .rpc_resp = &req->a_res, }; return nlm_do_async_call(req, proc, &msg, tk_ops); } int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) { struct rpc_message msg = { .rpc_argp = &req->a_res, }; return nlm_do_async_call(req, proc, &msg, tk_ops); } /* * NLM client asynchronous call. * * Note that although the calls are asynchronous, and are therefore * guaranteed to complete, we still always attempt to wait for * completion in order to be able to correctly track the lock * state. */ static int nlmclnt_async_call(const struct cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) { struct rpc_message msg = { .rpc_argp = &req->a_args, .rpc_resp = &req->a_res, .rpc_cred = cred, }; struct rpc_task *task; int err; task = __nlm_async_call(req, proc, &msg, tk_ops); if (IS_ERR(task)) return PTR_ERR(task); err = rpc_wait_for_completion_task(task); rpc_put_task(task); return err; } /* * TEST for the presence of a conflicting lock */ static int nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl) { int status; status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST); if (status < 0) goto out; switch (req->a_res.status) { case nlm_granted: fl->fl_type = F_UNLCK; break; case nlm_lck_denied: /* * Report the conflicting lock back to the application. */ fl->fl_start = req->a_res.lock.fl.fl_start; fl->fl_end = req->a_res.lock.fl.fl_end; fl->fl_type = req->a_res.lock.fl.fl_type; fl->fl_pid = -req->a_res.lock.fl.fl_pid; break; default: status = nlm_stat_to_errno(req->a_res.status); } out: trace_nlmclnt_test(&req->a_args.lock, (const struct sockaddr *)&req->a_host->h_addr, req->a_host->h_addrlen, req->a_res.status); nlmclnt_release_call(req); return status; } static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl) { spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock); new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state; new->fl_u.nfs_fl.owner = nlmclnt_get_lockowner(fl->fl_u.nfs_fl.owner); list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted); spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock); } static void nlmclnt_locks_release_private(struct file_lock *fl) { spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock); list_del(&fl->fl_u.nfs_fl.list); spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock); nlmclnt_put_lockowner(fl->fl_u.nfs_fl.owner); } static const struct file_lock_operations nlmclnt_lock_ops = { .fl_copy_lock = nlmclnt_locks_copy_lock, .fl_release_private = nlmclnt_locks_release_private, }; static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host) { fl->fl_u.nfs_fl.state = 0; fl->fl_u.nfs_fl.owner = nlmclnt_find_lockowner(host, fl->fl_owner); INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list); fl->fl_ops = &nlmclnt_lock_ops; } static int do_vfs_lock(struct file_lock *fl) { return locks_lock_file_wait(fl->fl_file, fl); } /* * LOCK: Try to create a lock * * Programmer Harassment Alert * * When given a blocking lock request in a sync RPC call, the HPUX lockd * will faithfully return LCK_BLOCKED but never cares to notify us when * the lock could be granted. This way, our local process could hang * around forever waiting for the callback. * * Solution A: Implement busy-waiting * Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES}) * * For now I am implementing solution A, because I hate the idea of * re-implementing lockd for a third time in two months. The async * calls shouldn't be too hard to do, however. * * This is one of the lovely things about standards in the NFS area: * they're so soft and squishy you can't really blame HP for doing this. */ static int nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) { const struct cred *cred = nfs_file_cred(fl->fl_file); struct nlm_host *host = req->a_host; struct nlm_res *resp = &req->a_res; struct nlm_wait block; unsigned char fl_flags = fl->fl_flags; unsigned char fl_type; __be32 b_status; int status = -ENOLCK; if (nsm_monitor(host) < 0) goto out; req->a_args.state = nsm_local_state; fl->fl_flags |= FL_ACCESS; status = do_vfs_lock(fl); fl->fl_flags = fl_flags; if (status < 0) goto out; nlmclnt_prepare_block(&block, host, fl); again: /* * Initialise resp->status to a valid non-zero value, * since 0 == nlm_lck_granted */ resp->status = nlm_lck_blocked; /* * A GRANTED callback can come at any time -- even before the reply * to the LOCK request arrives, so we queue the wait before * requesting the lock. */ nlmclnt_queue_block(&block); for (;;) { /* Reboot protection */ fl->fl_u.nfs_fl.state = host->h_state; status = nlmclnt_call(cred, req, NLMPROC_LOCK); if (status < 0) break; /* Did a reclaimer thread notify us of a server reboot? */ if (resp->status == nlm_lck_denied_grace_period) continue; if (resp->status != nlm_lck_blocked) break; /* Wait on an NLM blocking lock */ status = nlmclnt_wait(&block, req, NLMCLNT_POLL_TIMEOUT); if (status < 0) break; if (block.b_status != nlm_lck_blocked) break; } b_status = nlmclnt_dequeue_block(&block); if (resp->status == nlm_lck_blocked) resp->status = b_status; /* if we were interrupted while blocking, then cancel the lock request * and exit */ if (resp->status == nlm_lck_blocked) { if (!req->a_args.block) goto out_unlock; if (nlmclnt_cancel(host, req->a_args.block, fl) == 0) goto out; } if (resp->status == nlm_granted) { down_read(&host->h_rwsem); /* Check whether or not the server has rebooted */ if (fl->fl_u.nfs_fl.state != host->h_state) { up_read(&host->h_rwsem); goto again; } /* Ensure the resulting lock will get added to granted list */ fl->fl_flags |= FL_SLEEP; if (do_vfs_lock(fl) < 0) printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__); up_read(&host->h_rwsem); fl->fl_flags = fl_flags; status = 0; } if (status < 0) goto out_unlock; /* * EAGAIN doesn't make sense for sleeping locks, and in some * cases NLM_LCK_DENIED is returned for a permanent error. So * turn it into an ENOLCK. */ if (resp->status == nlm_lck_denied && (fl_flags & FL_SLEEP)) status = -ENOLCK; else status = nlm_stat_to_errno(resp->status); out: trace_nlmclnt_lock(&req->a_args.lock, (const struct sockaddr *)&req->a_host->h_addr, req->a_host->h_addrlen, req->a_res.status); nlmclnt_release_call(req); return status; out_unlock: /* Fatal error: ensure that we remove the lock altogether */ trace_nlmclnt_lock(&req->a_args.lock, (const struct sockaddr *)&req->a_host->h_addr, req->a_host->h_addrlen, req->a_res.status); dprintk("lockd: lock attempt ended in fatal error.\n" " Attempting to unlock.\n"); fl_type = fl->fl_type; fl->fl_type = F_UNLCK; down_read(&host->h_rwsem); do_vfs_lock(fl); up_read(&host->h_rwsem); fl->fl_type = fl_type; fl->fl_flags = fl_flags; nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops); return status; } /* * RECLAIM: Try to reclaim a lock */ int nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl, struct nlm_rqst *req) { int status; memset(req, 0, sizeof(*req)); locks_init_lock(&req->a_args.lock.fl); locks_init_lock(&req->a_res.lock.fl); req->a_host = host; /* Set up the argument struct */ nlmclnt_setlockargs(req, fl); req->a_args.reclaim = 1; status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK); if (status >= 0 && req->a_res.status == nlm_granted) return 0; printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d " "(errno %d, status %d)\n", fl->fl_pid, status, ntohl(req->a_res.status)); /* * FIXME: This is a serious failure. We can * * a. Ignore the problem * b. Send the owning process some signal (Linux doesn't have * SIGLOST, though...) * c. Retry the operation * * Until someone comes up with a simple implementation * for b or c, I'll choose option a. */ return -ENOLCK; } /* * UNLOCK: remove an existing lock */ static int nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) { struct nlm_host *host = req->a_host; struct nlm_res *resp = &req->a_res; int status; unsigned char fl_flags = fl->fl_flags; /* * Note: the server is supposed to either grant us the unlock * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either * case, we want to unlock. */ fl->fl_flags |= FL_EXISTS; down_read(&host->h_rwsem); status = do_vfs_lock(fl); up_read(&host->h_rwsem); fl->fl_flags = fl_flags; if (status == -ENOENT) { status = 0; goto out; } refcount_inc(&req->a_count); status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops); if (status < 0) goto out; if (resp->status == nlm_granted) goto out; if (resp->status != nlm_lck_denied_nolocks) printk("lockd: unexpected unlock status: %d\n", ntohl(resp->status)); /* What to do now? I'm out of my depth... */ status = -ENOLCK; out: trace_nlmclnt_unlock(&req->a_args.lock, (const struct sockaddr *)&req->a_host->h_addr, req->a_host->h_addrlen, req->a_res.status); nlmclnt_release_call(req); return status; } static void nlmclnt_unlock_prepare(struct rpc_task *task, void *data) { struct nlm_rqst *req = data; const struct nlmclnt_operations *nlmclnt_ops = req->a_host->h_nlmclnt_ops; bool defer_call = false; if (nlmclnt_ops && nlmclnt_ops->nlmclnt_unlock_prepare) defer_call = nlmclnt_ops->nlmclnt_unlock_prepare(task, req->a_callback_data); if (!defer_call) rpc_call_start(task); } static void nlmclnt_unlock_callback(struct rpc_task *task, void *data) { struct nlm_rqst *req = data; u32 status = ntohl(req->a_res.status); if (RPC_SIGNALLED(task)) goto die; if (task->tk_status < 0) { dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status); switch (task->tk_status) { case -EACCES: case -EIO: goto die; default: goto retry_rebind; } } if (status == NLM_LCK_DENIED_GRACE_PERIOD) { rpc_delay(task, NLMCLNT_GRACE_WAIT); goto retry_unlock; } if (status != NLM_LCK_GRANTED) printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status); die: return; retry_rebind: nlm_rebind_host(req->a_host); retry_unlock: rpc_restart_call(task); } static const struct rpc_call_ops nlmclnt_unlock_ops = { .rpc_call_prepare = nlmclnt_unlock_prepare, .rpc_call_done = nlmclnt_unlock_callback, .rpc_release = nlmclnt_rpc_release, }; /* * Cancel a blocked lock request. * We always use an async RPC call for this in order not to hang a * process that has been Ctrl-C'ed. */ static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl) { struct nlm_rqst *req; int status; dprintk("lockd: blocking lock attempt was interrupted by a signal.\n" " Attempting to cancel lock.\n"); req = nlm_alloc_call(host); if (!req) return -ENOMEM; req->a_flags = RPC_TASK_ASYNC; nlmclnt_setlockargs(req, fl); req->a_args.block = block; refcount_inc(&req->a_count); status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req, NLMPROC_CANCEL, &nlmclnt_cancel_ops); if (status == 0 && req->a_res.status == nlm_lck_denied) status = -ENOLCK; nlmclnt_release_call(req); return status; } static void nlmclnt_cancel_callback(struct rpc_task *task, void *data) { struct nlm_rqst *req = data; u32 status = ntohl(req->a_res.status); if (RPC_SIGNALLED(task)) goto die; if (task->tk_status < 0) { dprintk("lockd: CANCEL call error %d, retrying.\n", task->tk_status); goto retry_cancel; } switch (status) { case NLM_LCK_GRANTED: case NLM_LCK_DENIED_GRACE_PERIOD: case NLM_LCK_DENIED: /* Everything's good */ break; case NLM_LCK_DENIED_NOLOCKS: dprintk("lockd: CANCEL failed (server has no locks)\n"); goto retry_cancel; default: printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n", status); } die: return; retry_cancel: /* Don't ever retry more than 3 times */ if (req->a_retries++ >= NLMCLNT_MAX_RETRIES) goto die; nlm_rebind_host(req->a_host); rpc_restart_call(task); rpc_delay(task, 30 * HZ); } static const struct rpc_call_ops nlmclnt_cancel_ops = { .rpc_call_done = nlmclnt_cancel_callback, .rpc_release = nlmclnt_rpc_release, }; /* * Convert an NLM status code to a generic kernel errno */ static int nlm_stat_to_errno(__be32 status) { switch(ntohl(status)) { case NLM_LCK_GRANTED: return 0; case NLM_LCK_DENIED: return -EAGAIN; case NLM_LCK_DENIED_NOLOCKS: case NLM_LCK_DENIED_GRACE_PERIOD: return -ENOLCK; case NLM_LCK_BLOCKED: printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n"); return -ENOLCK; #ifdef CONFIG_LOCKD_V4 case NLM_DEADLCK: return -EDEADLK; case NLM_ROFS: return -EROFS; case NLM_STALE_FH: return -ESTALE; case NLM_FBIG: return -EOVERFLOW; case NLM_FAILED: return -ENOLCK; #endif } printk(KERN_NOTICE "lockd: unexpected server status %d\n", ntohl(status)); return -ENOLCK; }
linux-master
fs/lockd/clntproc.c
// SPDX-License-Identifier: GPL-2.0 /* * Procfs support for lockd * * Copyright (c) 2014 Jeff Layton <[email protected]> */ #include <linux/fs.h> #include <linux/proc_fs.h> #include <linux/module.h> #include <linux/nsproxy.h> #include <net/net_namespace.h> #include "netns.h" #include "procfs.h" /* * We only allow strings that start with 'Y', 'y', or '1'. */ static ssize_t nlm_end_grace_write(struct file *file, const char __user *buf, size_t size, loff_t *pos) { char *data; struct lockd_net *ln = net_generic(current->nsproxy->net_ns, lockd_net_id); if (size < 1) return -EINVAL; data = simple_transaction_get(file, buf, size); if (IS_ERR(data)) return PTR_ERR(data); switch(data[0]) { case 'Y': case 'y': case '1': locks_end_grace(&ln->lockd_manager); break; default: return -EINVAL; } return size; } static ssize_t nlm_end_grace_read(struct file *file, char __user *buf, size_t size, loff_t *pos) { struct lockd_net *ln = net_generic(current->nsproxy->net_ns, lockd_net_id); char resp[3]; resp[0] = list_empty(&ln->lockd_manager.list) ? 'Y' : 'N'; resp[1] = '\n'; resp[2] = '\0'; return simple_read_from_buffer(buf, size, pos, resp, sizeof(resp)); } static const struct proc_ops lockd_end_grace_proc_ops = { .proc_write = nlm_end_grace_write, .proc_read = nlm_end_grace_read, .proc_lseek = default_llseek, .proc_release = simple_transaction_release, }; int __init lockd_create_procfs(void) { struct proc_dir_entry *entry; entry = proc_mkdir("fs/lockd", NULL); if (!entry) return -ENOMEM; entry = proc_create("nlm_end_grace", S_IRUGO|S_IWUSR, entry, &lockd_end_grace_proc_ops); if (!entry) { remove_proc_entry("fs/lockd", NULL); return -ENOMEM; } return 0; } void __exit lockd_remove_procfs(void) { remove_proc_entry("fs/lockd/nlm_end_grace", NULL); remove_proc_entry("fs/lockd", NULL); }
linux-master
fs/lockd/procfs.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/lockd/host.c * * Management for NLM peer hosts. The nlm_host struct is shared * between client and server implementation. The only reason to * do so is to reduce code bloat. * * Copyright (C) 1996, Olaf Kirch <[email protected]> */ #include <linux/types.h> #include <linux/slab.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/addr.h> #include <linux/sunrpc/svc.h> #include <linux/lockd/lockd.h> #include <linux/mutex.h> #include <linux/sunrpc/svc_xprt.h> #include <net/ipv6.h> #include "netns.h" #define NLMDBG_FACILITY NLMDBG_HOSTCACHE #define NLM_HOST_NRHASH 32 #define NLM_HOST_REBIND (60 * HZ) #define NLM_HOST_EXPIRE (300 * HZ) #define NLM_HOST_COLLECT (120 * HZ) static struct hlist_head nlm_server_hosts[NLM_HOST_NRHASH]; static struct hlist_head nlm_client_hosts[NLM_HOST_NRHASH]; #define for_each_host(host, chain, table) \ for ((chain) = (table); \ (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \ hlist_for_each_entry((host), (chain), h_hash) #define for_each_host_safe(host, next, chain, table) \ for ((chain) = (table); \ (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \ hlist_for_each_entry_safe((host), (next), \ (chain), h_hash) static unsigned long nrhosts; static DEFINE_MUTEX(nlm_host_mutex); static void nlm_gc_hosts(struct net *net); struct nlm_lookup_host_info { const int server; /* search for server|client */ const struct sockaddr *sap; /* address to search for */ const size_t salen; /* it's length */ const unsigned short protocol; /* transport to search for*/ const u32 version; /* NLM version to search for */ const char *hostname; /* remote's hostname */ const size_t hostname_len; /* it's length */ const int noresvport; /* use non-priv port */ struct net *net; /* network namespace to bind */ const struct cred *cred; }; /* * Hash function must work well on big- and little-endian platforms */ static unsigned int __nlm_hash32(const __be32 n) { unsigned int hash = (__force u32)n ^ ((__force u32)n >> 16); return hash ^ (hash >> 8); } static unsigned int __nlm_hash_addr4(const struct sockaddr *sap) { const struct sockaddr_in *sin = (struct sockaddr_in *)sap; return __nlm_hash32(sin->sin_addr.s_addr); } static unsigned int __nlm_hash_addr6(const struct sockaddr *sap) { const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; const struct in6_addr addr = sin6->sin6_addr; return __nlm_hash32(addr.s6_addr32[0]) ^ __nlm_hash32(addr.s6_addr32[1]) ^ __nlm_hash32(addr.s6_addr32[2]) ^ __nlm_hash32(addr.s6_addr32[3]); } static unsigned int nlm_hash_address(const struct sockaddr *sap) { unsigned int hash; switch (sap->sa_family) { case AF_INET: hash = __nlm_hash_addr4(sap); break; case AF_INET6: hash = __nlm_hash_addr6(sap); break; default: hash = 0; } return hash & (NLM_HOST_NRHASH - 1); } /* * Allocate and initialize an nlm_host. Common to both client and server. */ static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni, struct nsm_handle *nsm) { struct nlm_host *host = NULL; unsigned long now = jiffies; if (nsm != NULL) refcount_inc(&nsm->sm_count); else { host = NULL; nsm = nsm_get_handle(ni->net, ni->sap, ni->salen, ni->hostname, ni->hostname_len); if (unlikely(nsm == NULL)) { dprintk("lockd: %s failed; no nsm handle\n", __func__); goto out; } } host = kmalloc(sizeof(*host), GFP_KERNEL); if (unlikely(host == NULL)) { dprintk("lockd: %s failed; no memory\n", __func__); nsm_release(nsm); goto out; } memcpy(nlm_addr(host), ni->sap, ni->salen); host->h_addrlen = ni->salen; rpc_set_port(nlm_addr(host), 0); host->h_srcaddrlen = 0; host->h_rpcclnt = NULL; host->h_name = nsm->sm_name; host->h_version = ni->version; host->h_proto = ni->protocol; host->h_reclaiming = 0; host->h_server = ni->server; host->h_noresvport = ni->noresvport; host->h_inuse = 0; init_waitqueue_head(&host->h_gracewait); init_rwsem(&host->h_rwsem); host->h_state = 0; host->h_nsmstate = 0; host->h_pidcount = 0; refcount_set(&host->h_count, 1); mutex_init(&host->h_mutex); host->h_nextrebind = now + NLM_HOST_REBIND; host->h_expires = now + NLM_HOST_EXPIRE; INIT_LIST_HEAD(&host->h_lockowners); spin_lock_init(&host->h_lock); INIT_LIST_HEAD(&host->h_granted); INIT_LIST_HEAD(&host->h_reclaim); host->h_nsmhandle = nsm; host->h_addrbuf = nsm->sm_addrbuf; host->net = ni->net; host->h_cred = get_cred(ni->cred); strscpy(host->nodename, utsname()->nodename, sizeof(host->nodename)); out: return host; } /* * Destroy an nlm_host and free associated resources * * Caller must hold nlm_host_mutex. */ static void nlm_destroy_host_locked(struct nlm_host *host) { struct rpc_clnt *clnt; struct lockd_net *ln = net_generic(host->net, lockd_net_id); dprintk("lockd: destroy host %s\n", host->h_name); hlist_del_init(&host->h_hash); nsm_unmonitor(host); nsm_release(host->h_nsmhandle); clnt = host->h_rpcclnt; if (clnt != NULL) rpc_shutdown_client(clnt); put_cred(host->h_cred); kfree(host); ln->nrhosts--; nrhosts--; } /** * nlmclnt_lookup_host - Find an NLM host handle matching a remote server * @sap: network address of server * @salen: length of server address * @protocol: transport protocol to use * @version: NLM protocol version * @hostname: '\0'-terminated hostname of server * @noresvport: 1 if non-privileged port should be used * @net: pointer to net namespace * @cred: pointer to cred * * Returns an nlm_host structure that matches the passed-in * [server address, transport protocol, NLM version, server hostname]. * If one doesn't already exist in the host cache, a new handle is * created and returned. */ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap, const size_t salen, const unsigned short protocol, const u32 version, const char *hostname, int noresvport, struct net *net, const struct cred *cred) { struct nlm_lookup_host_info ni = { .server = 0, .sap = sap, .salen = salen, .protocol = protocol, .version = version, .hostname = hostname, .hostname_len = strlen(hostname), .noresvport = noresvport, .net = net, .cred = cred, }; struct hlist_head *chain; struct nlm_host *host; struct nsm_handle *nsm = NULL; struct lockd_net *ln = net_generic(net, lockd_net_id); dprintk("lockd: %s(host='%s', vers=%u, proto=%s)\n", __func__, (hostname ? hostname : "<none>"), version, (protocol == IPPROTO_UDP ? "udp" : "tcp")); mutex_lock(&nlm_host_mutex); chain = &nlm_client_hosts[nlm_hash_address(sap)]; hlist_for_each_entry(host, chain, h_hash) { if (host->net != net) continue; if (!rpc_cmp_addr(nlm_addr(host), sap)) continue; /* Same address. Share an NSM handle if we already have one */ if (nsm == NULL) nsm = host->h_nsmhandle; if (host->h_proto != protocol) continue; if (host->h_version != version) continue; nlm_get_host(host); dprintk("lockd: %s found host %s (%s)\n", __func__, host->h_name, host->h_addrbuf); goto out; } host = nlm_alloc_host(&ni, nsm); if (unlikely(host == NULL)) goto out; hlist_add_head(&host->h_hash, chain); ln->nrhosts++; nrhosts++; dprintk("lockd: %s created host %s (%s)\n", __func__, host->h_name, host->h_addrbuf); out: mutex_unlock(&nlm_host_mutex); return host; } /** * nlmclnt_release_host - release client nlm_host * @host: nlm_host to release * */ void nlmclnt_release_host(struct nlm_host *host) { if (host == NULL) return; dprintk("lockd: release client host %s\n", host->h_name); WARN_ON_ONCE(host->h_server); if (refcount_dec_and_mutex_lock(&host->h_count, &nlm_host_mutex)) { WARN_ON_ONCE(!list_empty(&host->h_lockowners)); WARN_ON_ONCE(!list_empty(&host->h_granted)); WARN_ON_ONCE(!list_empty(&host->h_reclaim)); nlm_destroy_host_locked(host); mutex_unlock(&nlm_host_mutex); } } /** * nlmsvc_lookup_host - Find an NLM host handle matching a remote client * @rqstp: incoming NLM request * @hostname: name of client host * @hostname_len: length of client hostname * * Returns an nlm_host structure that matches the [client address, * transport protocol, NLM version, client hostname] of the passed-in * NLM request. If one doesn't already exist in the host cache, a * new handle is created and returned. * * Before possibly creating a new nlm_host, construct a sockaddr * for a specific source address in case the local system has * multiple network addresses. The family of the address in * rq_daddr is guaranteed to be the same as the family of the * address in rq_addr, so it's safe to use the same family for * the source address. */ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, const char *hostname, const size_t hostname_len) { struct hlist_head *chain; struct nlm_host *host = NULL; struct nsm_handle *nsm = NULL; struct sockaddr *src_sap = svc_daddr(rqstp); size_t src_len = rqstp->rq_daddrlen; struct net *net = SVC_NET(rqstp); struct nlm_lookup_host_info ni = { .server = 1, .sap = svc_addr(rqstp), .salen = rqstp->rq_addrlen, .protocol = rqstp->rq_prot, .version = rqstp->rq_vers, .hostname = hostname, .hostname_len = hostname_len, .net = net, }; struct lockd_net *ln = net_generic(net, lockd_net_id); dprintk("lockd: %s(host='%.*s', vers=%u, proto=%s)\n", __func__, (int)hostname_len, hostname, rqstp->rq_vers, (rqstp->rq_prot == IPPROTO_UDP ? "udp" : "tcp")); mutex_lock(&nlm_host_mutex); if (time_after_eq(jiffies, ln->next_gc)) nlm_gc_hosts(net); chain = &nlm_server_hosts[nlm_hash_address(ni.sap)]; hlist_for_each_entry(host, chain, h_hash) { if (host->net != net) continue; if (!rpc_cmp_addr(nlm_addr(host), ni.sap)) continue; /* Same address. Share an NSM handle if we already have one */ if (nsm == NULL) nsm = host->h_nsmhandle; if (host->h_proto != ni.protocol) continue; if (host->h_version != ni.version) continue; if (!rpc_cmp_addr(nlm_srcaddr(host), src_sap)) continue; /* Move to head of hash chain. */ hlist_del(&host->h_hash); hlist_add_head(&host->h_hash, chain); nlm_get_host(host); dprintk("lockd: %s found host %s (%s)\n", __func__, host->h_name, host->h_addrbuf); goto out; } host = nlm_alloc_host(&ni, nsm); if (unlikely(host == NULL)) goto out; memcpy(nlm_srcaddr(host), src_sap, src_len); host->h_srcaddrlen = src_len; hlist_add_head(&host->h_hash, chain); ln->nrhosts++; nrhosts++; refcount_inc(&host->h_count); dprintk("lockd: %s created host %s (%s)\n", __func__, host->h_name, host->h_addrbuf); out: mutex_unlock(&nlm_host_mutex); return host; } /** * nlmsvc_release_host - release server nlm_host * @host: nlm_host to release * * Host is destroyed later in nlm_gc_host(). */ void nlmsvc_release_host(struct nlm_host *host) { if (host == NULL) return; dprintk("lockd: release server host %s\n", host->h_name); WARN_ON_ONCE(!host->h_server); refcount_dec(&host->h_count); } /* * Create the NLM RPC client for an NLM peer */ struct rpc_clnt * nlm_bind_host(struct nlm_host *host) { struct rpc_clnt *clnt; dprintk("lockd: nlm_bind_host %s (%s)\n", host->h_name, host->h_addrbuf); /* Lock host handle */ mutex_lock(&host->h_mutex); /* If we've already created an RPC client, check whether * RPC rebind is required */ if ((clnt = host->h_rpcclnt) != NULL) { nlm_rebind_host(host); } else { unsigned long increment = nlmsvc_timeout; struct rpc_timeout timeparms = { .to_initval = increment, .to_increment = increment, .to_maxval = increment * 6UL, .to_retries = 5U, }; struct rpc_create_args args = { .net = host->net, .protocol = host->h_proto, .address = nlm_addr(host), .addrsize = host->h_addrlen, .timeout = &timeparms, .servername = host->h_name, .program = &nlm_program, .version = host->h_version, .authflavor = RPC_AUTH_UNIX, .flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_AUTOBIND | RPC_CLNT_CREATE_REUSEPORT), .cred = host->h_cred, }; /* * lockd retries server side blocks automatically so we want * those to be soft RPC calls. Client side calls need to be * hard RPC tasks. */ if (!host->h_server) args.flags |= RPC_CLNT_CREATE_HARDRTRY; if (host->h_noresvport) args.flags |= RPC_CLNT_CREATE_NONPRIVPORT; if (host->h_srcaddrlen) args.saddress = nlm_srcaddr(host); clnt = rpc_create(&args); if (!IS_ERR(clnt)) host->h_rpcclnt = clnt; else { printk("lockd: couldn't create RPC handle for %s\n", host->h_name); clnt = NULL; } } mutex_unlock(&host->h_mutex); return clnt; } /** * nlm_rebind_host - If needed, force a portmap lookup of the peer's lockd port * @host: NLM host handle for peer * * This is not needed when using a connection-oriented protocol, such as TCP. * The existing autobind mechanism is sufficient to force a rebind when * required, e.g. on connection state transitions. */ void nlm_rebind_host(struct nlm_host *host) { if (host->h_proto != IPPROTO_UDP) return; if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) { rpc_force_rebind(host->h_rpcclnt); host->h_nextrebind = jiffies + NLM_HOST_REBIND; } } /* * Increment NLM host count */ struct nlm_host * nlm_get_host(struct nlm_host *host) { if (host) { dprintk("lockd: get host %s\n", host->h_name); refcount_inc(&host->h_count); host->h_expires = jiffies + NLM_HOST_EXPIRE; } return host; } static struct nlm_host *next_host_state(struct hlist_head *cache, struct nsm_handle *nsm, const struct nlm_reboot *info) { struct nlm_host *host; struct hlist_head *chain; mutex_lock(&nlm_host_mutex); for_each_host(host, chain, cache) { if (host->h_nsmhandle == nsm && host->h_nsmstate != info->state) { host->h_nsmstate = info->state; host->h_state++; nlm_get_host(host); mutex_unlock(&nlm_host_mutex); return host; } } mutex_unlock(&nlm_host_mutex); return NULL; } /** * nlm_host_rebooted - Release all resources held by rebooted host * @net: network namespace * @info: pointer to decoded results of NLM_SM_NOTIFY call * * We were notified that the specified host has rebooted. Release * all resources held by that peer. */ void nlm_host_rebooted(const struct net *net, const struct nlm_reboot *info) { struct nsm_handle *nsm; struct nlm_host *host; nsm = nsm_reboot_lookup(net, info); if (unlikely(nsm == NULL)) return; /* Mark all hosts tied to this NSM state as having rebooted. * We run the loop repeatedly, because we drop the host table * lock for this. * To avoid processing a host several times, we match the nsmstate. */ while ((host = next_host_state(nlm_server_hosts, nsm, info)) != NULL) { nlmsvc_free_host_resources(host); nlmsvc_release_host(host); } while ((host = next_host_state(nlm_client_hosts, nsm, info)) != NULL) { nlmclnt_recovery(host); nlmclnt_release_host(host); } nsm_release(nsm); } static void nlm_complain_hosts(struct net *net) { struct hlist_head *chain; struct nlm_host *host; if (net) { struct lockd_net *ln = net_generic(net, lockd_net_id); if (ln->nrhosts == 0) return; pr_warn("lockd: couldn't shutdown host module for net %x!\n", net->ns.inum); dprintk("lockd: %lu hosts left in net %x:\n", ln->nrhosts, net->ns.inum); } else { if (nrhosts == 0) return; printk(KERN_WARNING "lockd: couldn't shutdown host module!\n"); dprintk("lockd: %lu hosts left:\n", nrhosts); } for_each_host(host, chain, nlm_server_hosts) { if (net && host->net != net) continue; dprintk(" %s (cnt %d use %d exp %ld net %x)\n", host->h_name, refcount_read(&host->h_count), host->h_inuse, host->h_expires, host->net->ns.inum); } } void nlm_shutdown_hosts_net(struct net *net) { struct hlist_head *chain; struct nlm_host *host; mutex_lock(&nlm_host_mutex); /* First, make all hosts eligible for gc */ dprintk("lockd: nuking all hosts in net %x...\n", net ? net->ns.inum : 0); for_each_host(host, chain, nlm_server_hosts) { if (net && host->net != net) continue; host->h_expires = jiffies - 1; if (host->h_rpcclnt) { rpc_shutdown_client(host->h_rpcclnt); host->h_rpcclnt = NULL; } nlmsvc_free_host_resources(host); } /* Then, perform a garbage collection pass */ nlm_gc_hosts(net); nlm_complain_hosts(net); mutex_unlock(&nlm_host_mutex); } /* * Shut down the hosts module. * Note that this routine is called only at server shutdown time. */ void nlm_shutdown_hosts(void) { dprintk("lockd: shutting down host module\n"); nlm_shutdown_hosts_net(NULL); } /* * Garbage collect any unused NLM hosts. * This GC combines reference counting for async operations with * mark & sweep for resources held by remote clients. */ static void nlm_gc_hosts(struct net *net) { struct hlist_head *chain; struct hlist_node *next; struct nlm_host *host; dprintk("lockd: host garbage collection for net %x\n", net ? net->ns.inum : 0); for_each_host(host, chain, nlm_server_hosts) { if (net && host->net != net) continue; host->h_inuse = 0; } /* Mark all hosts that hold locks, blocks or shares */ nlmsvc_mark_resources(net); for_each_host_safe(host, next, chain, nlm_server_hosts) { if (net && host->net != net) continue; if (host->h_inuse || time_before(jiffies, host->h_expires)) { dprintk("nlm_gc_hosts skipping %s " "(cnt %d use %d exp %ld net %x)\n", host->h_name, refcount_read(&host->h_count), host->h_inuse, host->h_expires, host->net->ns.inum); continue; } if (refcount_dec_if_one(&host->h_count)) nlm_destroy_host_locked(host); } if (net) { struct lockd_net *ln = net_generic(net, lockd_net_id); ln->next_gc = jiffies + NLM_HOST_COLLECT; } }
linux-master
fs/lockd/host.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/lockd/svcsubs.c * * Various support routines for the NLM server. * * Copyright (C) 1996, Olaf Kirch <[email protected]> */ #include <linux/types.h> #include <linux/string.h> #include <linux/time.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/sunrpc/svc.h> #include <linux/sunrpc/addr.h> #include <linux/lockd/lockd.h> #include <linux/lockd/share.h> #include <linux/module.h> #include <linux/mount.h> #include <uapi/linux/nfs2.h> #define NLMDBG_FACILITY NLMDBG_SVCSUBS /* * Global file hash table */ #define FILE_HASH_BITS 7 #define FILE_NRHASH (1<<FILE_HASH_BITS) static struct hlist_head nlm_files[FILE_NRHASH]; static DEFINE_MUTEX(nlm_file_mutex); #ifdef CONFIG_SUNRPC_DEBUG static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f) { u32 *fhp = (u32*)f->data; /* print the first 32 bytes of the fh */ dprintk("lockd: %s (%08x %08x %08x %08x %08x %08x %08x %08x)\n", msg, fhp[0], fhp[1], fhp[2], fhp[3], fhp[4], fhp[5], fhp[6], fhp[7]); } static inline void nlm_debug_print_file(char *msg, struct nlm_file *file) { struct inode *inode = nlmsvc_file_inode(file); dprintk("lockd: %s %s/%ld\n", msg, inode->i_sb->s_id, inode->i_ino); } #else static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f) { return; } static inline void nlm_debug_print_file(char *msg, struct nlm_file *file) { return; } #endif static inline unsigned int file_hash(struct nfs_fh *f) { unsigned int tmp=0; int i; for (i=0; i<NFS2_FHSIZE;i++) tmp += f->data[i]; return tmp & (FILE_NRHASH - 1); } int lock_to_openmode(struct file_lock *lock) { return (lock->fl_type == F_WRLCK) ? O_WRONLY : O_RDONLY; } /* * Open the file. Note that if we're reexporting, for example, * this could block the lockd thread for a while. * * We have to make sure we have the right credential to open * the file. */ static __be32 nlm_do_fopen(struct svc_rqst *rqstp, struct nlm_file *file, int mode) { struct file **fp = &file->f_file[mode]; __be32 nfserr; if (*fp) return 0; nfserr = nlmsvc_ops->fopen(rqstp, &file->f_handle, fp, mode); if (nfserr) dprintk("lockd: open failed (error %d)\n", nfserr); return nfserr; } /* * Lookup file info. If it doesn't exist, create a file info struct * and open a (VFS) file for the given inode. */ __be32 nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result, struct nlm_lock *lock) { struct nlm_file *file; unsigned int hash; __be32 nfserr; int mode; nlm_debug_print_fh("nlm_lookup_file", &lock->fh); hash = file_hash(&lock->fh); mode = lock_to_openmode(&lock->fl); /* Lock file table */ mutex_lock(&nlm_file_mutex); hlist_for_each_entry(file, &nlm_files[hash], f_list) if (!nfs_compare_fh(&file->f_handle, &lock->fh)) { mutex_lock(&file->f_mutex); nfserr = nlm_do_fopen(rqstp, file, mode); mutex_unlock(&file->f_mutex); goto found; } nlm_debug_print_fh("creating file for", &lock->fh); nfserr = nlm_lck_denied_nolocks; file = kzalloc(sizeof(*file), GFP_KERNEL); if (!file) goto out_free; memcpy(&file->f_handle, &lock->fh, sizeof(struct nfs_fh)); mutex_init(&file->f_mutex); INIT_HLIST_NODE(&file->f_list); INIT_LIST_HEAD(&file->f_blocks); nfserr = nlm_do_fopen(rqstp, file, mode); if (nfserr) goto out_unlock; hlist_add_head(&file->f_list, &nlm_files[hash]); found: dprintk("lockd: found file %p (count %d)\n", file, file->f_count); *result = file; file->f_count++; out_unlock: mutex_unlock(&nlm_file_mutex); return nfserr; out_free: kfree(file); goto out_unlock; } /* * Delete a file after having released all locks, blocks and shares */ static inline void nlm_delete_file(struct nlm_file *file) { nlm_debug_print_file("closing file", file); if (!hlist_unhashed(&file->f_list)) { hlist_del(&file->f_list); if (file->f_file[O_RDONLY]) nlmsvc_ops->fclose(file->f_file[O_RDONLY]); if (file->f_file[O_WRONLY]) nlmsvc_ops->fclose(file->f_file[O_WRONLY]); kfree(file); } else { printk(KERN_WARNING "lockd: attempt to release unknown file!\n"); } } static int nlm_unlock_files(struct nlm_file *file, const struct file_lock *fl) { struct file_lock lock; locks_init_lock(&lock); lock.fl_type = F_UNLCK; lock.fl_start = 0; lock.fl_end = OFFSET_MAX; lock.fl_owner = fl->fl_owner; lock.fl_pid = fl->fl_pid; lock.fl_flags = FL_POSIX; lock.fl_file = file->f_file[O_RDONLY]; if (lock.fl_file && vfs_lock_file(lock.fl_file, F_SETLK, &lock, NULL)) goto out_err; lock.fl_file = file->f_file[O_WRONLY]; if (lock.fl_file && vfs_lock_file(lock.fl_file, F_SETLK, &lock, NULL)) goto out_err; return 0; out_err: pr_warn("lockd: unlock failure in %s:%d\n", __FILE__, __LINE__); return 1; } /* * Loop over all locks on the given file and perform the specified * action. */ static int nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file, nlm_host_match_fn_t match) { struct inode *inode = nlmsvc_file_inode(file); struct file_lock *fl; struct file_lock_context *flctx = locks_inode_context(inode); struct nlm_host *lockhost; if (!flctx || list_empty_careful(&flctx->flc_posix)) return 0; again: file->f_locks = 0; spin_lock(&flctx->flc_lock); list_for_each_entry(fl, &flctx->flc_posix, fl_list) { if (fl->fl_lmops != &nlmsvc_lock_operations) continue; /* update current lock count */ file->f_locks++; lockhost = ((struct nlm_lockowner *)fl->fl_owner)->host; if (match(lockhost, host)) { spin_unlock(&flctx->flc_lock); if (nlm_unlock_files(file, fl)) return 1; goto again; } } spin_unlock(&flctx->flc_lock); return 0; } static int nlmsvc_always_match(void *dummy1, struct nlm_host *dummy2) { return 1; } /* * Inspect a single file */ static inline int nlm_inspect_file(struct nlm_host *host, struct nlm_file *file, nlm_host_match_fn_t match) { nlmsvc_traverse_blocks(host, file, match); nlmsvc_traverse_shares(host, file, match); return nlm_traverse_locks(host, file, match); } /* * Quick check whether there are still any locks, blocks or * shares on a given file. */ static inline int nlm_file_inuse(struct nlm_file *file) { struct inode *inode = nlmsvc_file_inode(file); struct file_lock *fl; struct file_lock_context *flctx = locks_inode_context(inode); if (file->f_count || !list_empty(&file->f_blocks) || file->f_shares) return 1; if (flctx && !list_empty_careful(&flctx->flc_posix)) { spin_lock(&flctx->flc_lock); list_for_each_entry(fl, &flctx->flc_posix, fl_list) { if (fl->fl_lmops == &nlmsvc_lock_operations) { spin_unlock(&flctx->flc_lock); return 1; } } spin_unlock(&flctx->flc_lock); } file->f_locks = 0; return 0; } static void nlm_close_files(struct nlm_file *file) { if (file->f_file[O_RDONLY]) nlmsvc_ops->fclose(file->f_file[O_RDONLY]); if (file->f_file[O_WRONLY]) nlmsvc_ops->fclose(file->f_file[O_WRONLY]); } /* * Loop over all files in the file table. */ static int nlm_traverse_files(void *data, nlm_host_match_fn_t match, int (*is_failover_file)(void *data, struct nlm_file *file)) { struct hlist_node *next; struct nlm_file *file; int i, ret = 0; mutex_lock(&nlm_file_mutex); for (i = 0; i < FILE_NRHASH; i++) { hlist_for_each_entry_safe(file, next, &nlm_files[i], f_list) { if (is_failover_file && !is_failover_file(data, file)) continue; file->f_count++; mutex_unlock(&nlm_file_mutex); /* Traverse locks, blocks and shares of this file * and update file->f_locks count */ if (nlm_inspect_file(data, file, match)) ret = 1; mutex_lock(&nlm_file_mutex); file->f_count--; /* No more references to this file. Let go of it. */ if (list_empty(&file->f_blocks) && !file->f_locks && !file->f_shares && !file->f_count) { hlist_del(&file->f_list); nlm_close_files(file); kfree(file); } } } mutex_unlock(&nlm_file_mutex); return ret; } /* * Release file. If there are no more remote locks on this file, * close it and free the handle. * * Note that we can't do proper reference counting without major * contortions because the code in fs/locks.c creates, deletes and * splits locks without notification. Our only way is to walk the * entire lock list each time we remove a lock. */ void nlm_release_file(struct nlm_file *file) { dprintk("lockd: nlm_release_file(%p, ct = %d)\n", file, file->f_count); /* Lock file table */ mutex_lock(&nlm_file_mutex); /* If there are no more locks etc, delete the file */ if (--file->f_count == 0 && !nlm_file_inuse(file)) nlm_delete_file(file); mutex_unlock(&nlm_file_mutex); } /* * Helpers function for resource traversal * * nlmsvc_mark_host: * used by the garbage collector; simply sets h_inuse only for those * hosts, which passed network check. * Always returns 0. * * nlmsvc_same_host: * returns 1 iff the two hosts match. Used to release * all resources bound to a specific host. * * nlmsvc_is_client: * returns 1 iff the host is a client. * Used by nlmsvc_invalidate_all */ static int nlmsvc_mark_host(void *data, struct nlm_host *hint) { struct nlm_host *host = data; if ((hint->net == NULL) || (host->net == hint->net)) host->h_inuse = 1; return 0; } static int nlmsvc_same_host(void *data, struct nlm_host *other) { struct nlm_host *host = data; return host == other; } static int nlmsvc_is_client(void *data, struct nlm_host *dummy) { struct nlm_host *host = data; if (host->h_server) { /* we are destroying locks even though the client * hasn't asked us too, so don't unmonitor the * client */ if (host->h_nsmhandle) host->h_nsmhandle->sm_sticky = 1; return 1; } else return 0; } /* * Mark all hosts that still hold resources */ void nlmsvc_mark_resources(struct net *net) { struct nlm_host hint; dprintk("lockd: %s for net %x\n", __func__, net ? net->ns.inum : 0); hint.net = net; nlm_traverse_files(&hint, nlmsvc_mark_host, NULL); } /* * Release all resources held by the given client */ void nlmsvc_free_host_resources(struct nlm_host *host) { dprintk("lockd: nlmsvc_free_host_resources\n"); if (nlm_traverse_files(host, nlmsvc_same_host, NULL)) { printk(KERN_WARNING "lockd: couldn't remove all locks held by %s\n", host->h_name); BUG(); } } /** * nlmsvc_invalidate_all - remove all locks held for clients * * Release all locks held by NFS clients. * */ void nlmsvc_invalidate_all(void) { /* * Previously, the code would call * nlmsvc_free_host_resources for each client in * turn, which is about as inefficient as it gets. * Now we just do it once in nlm_traverse_files. */ nlm_traverse_files(NULL, nlmsvc_is_client, NULL); } static int nlmsvc_match_sb(void *datap, struct nlm_file *file) { struct super_block *sb = datap; return sb == nlmsvc_file_inode(file)->i_sb; } /** * nlmsvc_unlock_all_by_sb - release locks held on this file system * @sb: super block * * Release all locks held by clients accessing this file system. */ int nlmsvc_unlock_all_by_sb(struct super_block *sb) { int ret; ret = nlm_traverse_files(sb, nlmsvc_always_match, nlmsvc_match_sb); return ret ? -EIO : 0; } EXPORT_SYMBOL_GPL(nlmsvc_unlock_all_by_sb); static int nlmsvc_match_ip(void *datap, struct nlm_host *host) { return rpc_cmp_addr(nlm_srcaddr(host), datap); } /** * nlmsvc_unlock_all_by_ip - release local locks by IP address * @server_addr: server's IP address as seen by clients * * Release all locks held by clients accessing this host * via the passed in IP address. */ int nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr) { int ret; ret = nlm_traverse_files(server_addr, nlmsvc_match_ip, NULL); return ret ? -EIO : 0; } EXPORT_SYMBOL_GPL(nlmsvc_unlock_all_by_ip);
linux-master
fs/lockd/svcsubs.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/lockd/svc.c * * This is the central lockd service. * * FIXME: Separate the lockd NFS server functionality from the lockd NFS * client functionality. Oh why didn't Sun create two separate * services in the first place? * * Authors: Olaf Kirch ([email protected]) * * Copyright (C) 1995, 1996 Olaf Kirch <[email protected]> */ #include <linux/module.h> #include <linux/init.h> #include <linux/sysctl.h> #include <linux/moduleparam.h> #include <linux/sched/signal.h> #include <linux/errno.h> #include <linux/in.h> #include <linux/uio.h> #include <linux/smp.h> #include <linux/mutex.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/inetdevice.h> #include <linux/sunrpc/types.h> #include <linux/sunrpc/stats.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/svc.h> #include <linux/sunrpc/svcsock.h> #include <linux/sunrpc/svc_xprt.h> #include <net/ip.h> #include <net/addrconf.h> #include <net/ipv6.h> #include <linux/lockd/lockd.h> #include <linux/nfs.h> #include "netns.h" #include "procfs.h" #define NLMDBG_FACILITY NLMDBG_SVC #define LOCKD_BUFSIZE (1024 + NLMSVC_XDRSIZE) static struct svc_program nlmsvc_program; const struct nlmsvc_binding *nlmsvc_ops; EXPORT_SYMBOL_GPL(nlmsvc_ops); static DEFINE_MUTEX(nlmsvc_mutex); static unsigned int nlmsvc_users; static struct svc_serv *nlmsvc_serv; unsigned long nlmsvc_timeout; static void nlmsvc_request_retry(struct timer_list *tl) { svc_wake_up(nlmsvc_serv); } DEFINE_TIMER(nlmsvc_retry, nlmsvc_request_retry); unsigned int lockd_net_id; /* * These can be set at insmod time (useful for NFS as root filesystem), * and also changed through the sysctl interface. -- Jamie Lokier, Aug 2003 */ static unsigned long nlm_grace_period; static unsigned long nlm_timeout = LOCKD_DFLT_TIMEO; static int nlm_udpport, nlm_tcpport; /* RLIM_NOFILE defaults to 1024. That seems like a reasonable default here. */ static unsigned int nlm_max_connections = 1024; /* * Constants needed for the sysctl interface. */ static const unsigned long nlm_grace_period_min = 0; static const unsigned long nlm_grace_period_max = 240; static const unsigned long nlm_timeout_min = 3; static const unsigned long nlm_timeout_max = 20; #ifdef CONFIG_SYSCTL static const int nlm_port_min = 0, nlm_port_max = 65535; static struct ctl_table_header * nlm_sysctl_table; #endif static unsigned long get_lockd_grace_period(void) { /* Note: nlm_timeout should always be nonzero */ if (nlm_grace_period) return roundup(nlm_grace_period, nlm_timeout) * HZ; else return nlm_timeout * 5 * HZ; } static void grace_ender(struct work_struct *grace) { struct delayed_work *dwork = to_delayed_work(grace); struct lockd_net *ln = container_of(dwork, struct lockd_net, grace_period_end); locks_end_grace(&ln->lockd_manager); } static void set_grace_period(struct net *net) { unsigned long grace_period = get_lockd_grace_period(); struct lockd_net *ln = net_generic(net, lockd_net_id); locks_start_grace(net, &ln->lockd_manager); cancel_delayed_work_sync(&ln->grace_period_end); schedule_delayed_work(&ln->grace_period_end, grace_period); } /* * This is the lockd kernel thread */ static int lockd(void *vrqstp) { struct svc_rqst *rqstp = vrqstp; struct net *net = &init_net; struct lockd_net *ln = net_generic(net, lockd_net_id); /* try_to_freeze() is called from svc_recv() */ set_freezable(); dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); /* * The main request loop. We don't terminate until the last * NFS mount or NFS daemon has gone away. */ while (!kthread_should_stop()) { /* update sv_maxconn if it has changed */ rqstp->rq_server->sv_maxconn = nlm_max_connections; nlmsvc_retry_blocked(); svc_recv(rqstp); } if (nlmsvc_ops) nlmsvc_invalidate_all(); nlm_shutdown_hosts(); cancel_delayed_work_sync(&ln->grace_period_end); locks_end_grace(&ln->lockd_manager); dprintk("lockd_down: service stopped\n"); svc_exit_thread(rqstp); return 0; } static int create_lockd_listener(struct svc_serv *serv, const char *name, struct net *net, const int family, const unsigned short port, const struct cred *cred) { struct svc_xprt *xprt; xprt = svc_find_xprt(serv, name, net, family, 0); if (xprt == NULL) return svc_xprt_create(serv, name, net, family, port, SVC_SOCK_DEFAULTS, cred); svc_xprt_put(xprt); return 0; } static int create_lockd_family(struct svc_serv *serv, struct net *net, const int family, const struct cred *cred) { int err; err = create_lockd_listener(serv, "udp", net, family, nlm_udpport, cred); if (err < 0) return err; return create_lockd_listener(serv, "tcp", net, family, nlm_tcpport, cred); } /* * Ensure there are active UDP and TCP listeners for lockd. * * Even if we have only TCP NFS mounts and/or TCP NFSDs, some * local services (such as rpc.statd) still require UDP, and * some NFS servers do not yet support NLM over TCP. * * Returns zero if all listeners are available; otherwise a * negative errno value is returned. */ static int make_socks(struct svc_serv *serv, struct net *net, const struct cred *cred) { static int warned; int err; err = create_lockd_family(serv, net, PF_INET, cred); if (err < 0) goto out_err; err = create_lockd_family(serv, net, PF_INET6, cred); if (err < 0 && err != -EAFNOSUPPORT) goto out_err; warned = 0; return 0; out_err: if (warned++ == 0) printk(KERN_WARNING "lockd_up: makesock failed, error=%d\n", err); svc_xprt_destroy_all(serv, net); svc_rpcb_cleanup(serv, net); return err; } static int lockd_up_net(struct svc_serv *serv, struct net *net, const struct cred *cred) { struct lockd_net *ln = net_generic(net, lockd_net_id); int error; if (ln->nlmsvc_users++) return 0; error = svc_bind(serv, net); if (error) goto err_bind; error = make_socks(serv, net, cred); if (error < 0) goto err_bind; set_grace_period(net); dprintk("%s: per-net data created; net=%x\n", __func__, net->ns.inum); return 0; err_bind: ln->nlmsvc_users--; return error; } static void lockd_down_net(struct svc_serv *serv, struct net *net) { struct lockd_net *ln = net_generic(net, lockd_net_id); if (ln->nlmsvc_users) { if (--ln->nlmsvc_users == 0) { nlm_shutdown_hosts_net(net); cancel_delayed_work_sync(&ln->grace_period_end); locks_end_grace(&ln->lockd_manager); svc_xprt_destroy_all(serv, net); svc_rpcb_cleanup(serv, net); } } else { pr_err("%s: no users! net=%x\n", __func__, net->ns.inum); BUG(); } } static int lockd_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr) { struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; struct sockaddr_in sin; if (event != NETDEV_DOWN) goto out; if (nlmsvc_serv) { dprintk("lockd_inetaddr_event: removed %pI4\n", &ifa->ifa_local); sin.sin_family = AF_INET; sin.sin_addr.s_addr = ifa->ifa_local; svc_age_temp_xprts_now(nlmsvc_serv, (struct sockaddr *)&sin); } out: return NOTIFY_DONE; } static struct notifier_block lockd_inetaddr_notifier = { .notifier_call = lockd_inetaddr_event, }; #if IS_ENABLED(CONFIG_IPV6) static int lockd_inet6addr_event(struct notifier_block *this, unsigned long event, void *ptr) { struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; struct sockaddr_in6 sin6; if (event != NETDEV_DOWN) goto out; if (nlmsvc_serv) { dprintk("lockd_inet6addr_event: removed %pI6\n", &ifa->addr); sin6.sin6_family = AF_INET6; sin6.sin6_addr = ifa->addr; if (ipv6_addr_type(&sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL) sin6.sin6_scope_id = ifa->idev->dev->ifindex; svc_age_temp_xprts_now(nlmsvc_serv, (struct sockaddr *)&sin6); } out: return NOTIFY_DONE; } static struct notifier_block lockd_inet6addr_notifier = { .notifier_call = lockd_inet6addr_event, }; #endif static int lockd_get(void) { struct svc_serv *serv; int error; if (nlmsvc_serv) { nlmsvc_users++; return 0; } /* * Sanity check: if there's no pid, * we should be the first user ... */ if (nlmsvc_users) printk(KERN_WARNING "lockd_up: no pid, %d users??\n", nlmsvc_users); if (!nlm_timeout) nlm_timeout = LOCKD_DFLT_TIMEO; nlmsvc_timeout = nlm_timeout * HZ; serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, lockd); if (!serv) { printk(KERN_WARNING "lockd_up: create service failed\n"); return -ENOMEM; } serv->sv_maxconn = nlm_max_connections; error = svc_set_num_threads(serv, NULL, 1); /* The thread now holds the only reference */ svc_put(serv); if (error < 0) return error; nlmsvc_serv = serv; register_inetaddr_notifier(&lockd_inetaddr_notifier); #if IS_ENABLED(CONFIG_IPV6) register_inet6addr_notifier(&lockd_inet6addr_notifier); #endif dprintk("lockd_up: service created\n"); nlmsvc_users++; return 0; } static void lockd_put(void) { if (WARN(nlmsvc_users <= 0, "lockd_down: no users!\n")) return; if (--nlmsvc_users) return; unregister_inetaddr_notifier(&lockd_inetaddr_notifier); #if IS_ENABLED(CONFIG_IPV6) unregister_inet6addr_notifier(&lockd_inet6addr_notifier); #endif svc_set_num_threads(nlmsvc_serv, NULL, 0); timer_delete_sync(&nlmsvc_retry); nlmsvc_serv = NULL; dprintk("lockd_down: service destroyed\n"); } /* * Bring up the lockd process if it's not already up. */ int lockd_up(struct net *net, const struct cred *cred) { int error; mutex_lock(&nlmsvc_mutex); error = lockd_get(); if (error) goto err; error = lockd_up_net(nlmsvc_serv, net, cred); if (error < 0) { lockd_put(); goto err; } err: mutex_unlock(&nlmsvc_mutex); return error; } EXPORT_SYMBOL_GPL(lockd_up); /* * Decrement the user count and bring down lockd if we're the last. */ void lockd_down(struct net *net) { mutex_lock(&nlmsvc_mutex); lockd_down_net(nlmsvc_serv, net); lockd_put(); mutex_unlock(&nlmsvc_mutex); } EXPORT_SYMBOL_GPL(lockd_down); #ifdef CONFIG_SYSCTL /* * Sysctl parameters (same as module parameters, different interface). */ static struct ctl_table nlm_sysctls[] = { { .procname = "nlm_grace_period", .data = &nlm_grace_period, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, .extra1 = (unsigned long *) &nlm_grace_period_min, .extra2 = (unsigned long *) &nlm_grace_period_max, }, { .procname = "nlm_timeout", .data = &nlm_timeout, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, .extra1 = (unsigned long *) &nlm_timeout_min, .extra2 = (unsigned long *) &nlm_timeout_max, }, { .procname = "nlm_udpport", .data = &nlm_udpport, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = (int *) &nlm_port_min, .extra2 = (int *) &nlm_port_max, }, { .procname = "nlm_tcpport", .data = &nlm_tcpport, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = (int *) &nlm_port_min, .extra2 = (int *) &nlm_port_max, }, { .procname = "nsm_use_hostnames", .data = &nsm_use_hostnames, .maxlen = sizeof(bool), .mode = 0644, .proc_handler = proc_dobool, }, { .procname = "nsm_local_state", .data = &nsm_local_state, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { } }; #endif /* CONFIG_SYSCTL */ /* * Module (and sysfs) parameters. */ #define param_set_min_max(name, type, which_strtol, min, max) \ static int param_set_##name(const char *val, const struct kernel_param *kp) \ { \ char *endp; \ __typeof__(type) num = which_strtol(val, &endp, 0); \ if (endp == val || *endp || num < (min) || num > (max)) \ return -EINVAL; \ *((type *) kp->arg) = num; \ return 0; \ } static inline int is_callback(u32 proc) { return proc == NLMPROC_GRANTED || proc == NLMPROC_GRANTED_MSG || proc == NLMPROC_TEST_RES || proc == NLMPROC_LOCK_RES || proc == NLMPROC_CANCEL_RES || proc == NLMPROC_UNLOCK_RES || proc == NLMPROC_NSM_NOTIFY; } static enum svc_auth_status lockd_authenticate(struct svc_rqst *rqstp) { rqstp->rq_client = NULL; switch (rqstp->rq_authop->flavour) { case RPC_AUTH_NULL: case RPC_AUTH_UNIX: rqstp->rq_auth_stat = rpc_auth_ok; if (rqstp->rq_proc == 0) return SVC_OK; if (is_callback(rqstp->rq_proc)) { /* Leave it to individual procedures to * call nlmsvc_lookup_host(rqstp) */ return SVC_OK; } return svc_set_client(rqstp); } rqstp->rq_auth_stat = rpc_autherr_badcred; return SVC_DENIED; } param_set_min_max(port, int, simple_strtol, 0, 65535) param_set_min_max(grace_period, unsigned long, simple_strtoul, nlm_grace_period_min, nlm_grace_period_max) param_set_min_max(timeout, unsigned long, simple_strtoul, nlm_timeout_min, nlm_timeout_max) MODULE_AUTHOR("Olaf Kirch <[email protected]>"); MODULE_DESCRIPTION("NFS file locking service version " LOCKD_VERSION "."); MODULE_LICENSE("GPL"); module_param_call(nlm_grace_period, param_set_grace_period, param_get_ulong, &nlm_grace_period, 0644); module_param_call(nlm_timeout, param_set_timeout, param_get_ulong, &nlm_timeout, 0644); module_param_call(nlm_udpport, param_set_port, param_get_int, &nlm_udpport, 0644); module_param_call(nlm_tcpport, param_set_port, param_get_int, &nlm_tcpport, 0644); module_param(nsm_use_hostnames, bool, 0644); module_param(nlm_max_connections, uint, 0644); static int lockd_init_net(struct net *net) { struct lockd_net *ln = net_generic(net, lockd_net_id); INIT_DELAYED_WORK(&ln->grace_period_end, grace_ender); INIT_LIST_HEAD(&ln->lockd_manager.list); ln->lockd_manager.block_opens = false; INIT_LIST_HEAD(&ln->nsm_handles); return 0; } static void lockd_exit_net(struct net *net) { struct lockd_net *ln = net_generic(net, lockd_net_id); WARN_ONCE(!list_empty(&ln->lockd_manager.list), "net %x %s: lockd_manager.list is not empty\n", net->ns.inum, __func__); WARN_ONCE(!list_empty(&ln->nsm_handles), "net %x %s: nsm_handles list is not empty\n", net->ns.inum, __func__); WARN_ONCE(delayed_work_pending(&ln->grace_period_end), "net %x %s: grace_period_end was not cancelled\n", net->ns.inum, __func__); } static struct pernet_operations lockd_net_ops = { .init = lockd_init_net, .exit = lockd_exit_net, .id = &lockd_net_id, .size = sizeof(struct lockd_net), }; /* * Initialising and terminating the module. */ static int __init init_nlm(void) { int err; #ifdef CONFIG_SYSCTL err = -ENOMEM; nlm_sysctl_table = register_sysctl("fs/nfs", nlm_sysctls); if (nlm_sysctl_table == NULL) goto err_sysctl; #endif err = register_pernet_subsys(&lockd_net_ops); if (err) goto err_pernet; err = lockd_create_procfs(); if (err) goto err_procfs; return 0; err_procfs: unregister_pernet_subsys(&lockd_net_ops); err_pernet: #ifdef CONFIG_SYSCTL unregister_sysctl_table(nlm_sysctl_table); err_sysctl: #endif return err; } static void __exit exit_nlm(void) { /* FIXME: delete all NLM clients */ nlm_shutdown_hosts(); lockd_remove_procfs(); unregister_pernet_subsys(&lockd_net_ops); #ifdef CONFIG_SYSCTL unregister_sysctl_table(nlm_sysctl_table); #endif } module_init(init_nlm); module_exit(exit_nlm); /** * nlmsvc_dispatch - Process an NLM Request * @rqstp: incoming request * * Return values: * %0: Processing complete; do not send a Reply * %1: Processing complete; send Reply in rqstp->rq_res */ static int nlmsvc_dispatch(struct svc_rqst *rqstp) { const struct svc_procedure *procp = rqstp->rq_procinfo; __be32 *statp = rqstp->rq_accept_statp; if (!procp->pc_decode(rqstp, &rqstp->rq_arg_stream)) goto out_decode_err; *statp = procp->pc_func(rqstp); if (*statp == rpc_drop_reply) return 0; if (*statp != rpc_success) return 1; if (!procp->pc_encode(rqstp, &rqstp->rq_res_stream)) goto out_encode_err; return 1; out_decode_err: *statp = rpc_garbage_args; return 1; out_encode_err: *statp = rpc_system_err; return 1; } /* * Define NLM program and procedures */ static DEFINE_PER_CPU_ALIGNED(unsigned long, nlmsvc_version1_count[17]); static const struct svc_version nlmsvc_version1 = { .vs_vers = 1, .vs_nproc = 17, .vs_proc = nlmsvc_procedures, .vs_count = nlmsvc_version1_count, .vs_dispatch = nlmsvc_dispatch, .vs_xdrsize = NLMSVC_XDRSIZE, }; static DEFINE_PER_CPU_ALIGNED(unsigned long, nlmsvc_version3_count[ARRAY_SIZE(nlmsvc_procedures)]); static const struct svc_version nlmsvc_version3 = { .vs_vers = 3, .vs_nproc = ARRAY_SIZE(nlmsvc_procedures), .vs_proc = nlmsvc_procedures, .vs_count = nlmsvc_version3_count, .vs_dispatch = nlmsvc_dispatch, .vs_xdrsize = NLMSVC_XDRSIZE, }; #ifdef CONFIG_LOCKD_V4 static DEFINE_PER_CPU_ALIGNED(unsigned long, nlmsvc_version4_count[ARRAY_SIZE(nlmsvc_procedures4)]); static const struct svc_version nlmsvc_version4 = { .vs_vers = 4, .vs_nproc = ARRAY_SIZE(nlmsvc_procedures4), .vs_proc = nlmsvc_procedures4, .vs_count = nlmsvc_version4_count, .vs_dispatch = nlmsvc_dispatch, .vs_xdrsize = NLMSVC_XDRSIZE, }; #endif static const struct svc_version *nlmsvc_version[] = { [1] = &nlmsvc_version1, [3] = &nlmsvc_version3, #ifdef CONFIG_LOCKD_V4 [4] = &nlmsvc_version4, #endif }; static struct svc_stat nlmsvc_stats; #define NLM_NRVERS ARRAY_SIZE(nlmsvc_version) static struct svc_program nlmsvc_program = { .pg_prog = NLM_PROGRAM, /* program number */ .pg_nvers = NLM_NRVERS, /* number of entries in nlmsvc_version */ .pg_vers = nlmsvc_version, /* version table */ .pg_name = "lockd", /* service name */ .pg_class = "nfsd", /* share authentication with nfsd */ .pg_stats = &nlmsvc_stats, /* stats table */ .pg_authenticate = &lockd_authenticate, /* export authentication */ .pg_init_request = svc_generic_init_request, .pg_rpcbind_set = svc_generic_rpcbind_set, };
linux-master
fs/lockd/svc.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/lockd/clntlock.c * * Lock handling for the client side NLM implementation * * Copyright (C) 1996, Olaf Kirch <[email protected]> */ #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/nfs_fs.h> #include <linux/sunrpc/addr.h> #include <linux/sunrpc/svc.h> #include <linux/sunrpc/svc_xprt.h> #include <linux/lockd/lockd.h> #include <linux/kthread.h> #include "trace.h" #define NLMDBG_FACILITY NLMDBG_CLIENT /* * Local function prototypes */ static int reclaimer(void *ptr); /* * The following functions handle blocking and granting from the * client perspective. */ static LIST_HEAD(nlm_blocked); static DEFINE_SPINLOCK(nlm_blocked_lock); /** * nlmclnt_init - Set up per-NFS mount point lockd data structures * @nlm_init: pointer to arguments structure * * Returns pointer to an appropriate nlm_host struct, * or an ERR_PTR value. */ struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init) { struct nlm_host *host; u32 nlm_version = (nlm_init->nfs_version == 2) ? 1 : 4; int status; status = lockd_up(nlm_init->net, nlm_init->cred); if (status < 0) return ERR_PTR(status); host = nlmclnt_lookup_host(nlm_init->address, nlm_init->addrlen, nlm_init->protocol, nlm_version, nlm_init->hostname, nlm_init->noresvport, nlm_init->net, nlm_init->cred); if (host == NULL) goto out_nohost; if (host->h_rpcclnt == NULL && nlm_bind_host(host) == NULL) goto out_nobind; host->h_nlmclnt_ops = nlm_init->nlmclnt_ops; return host; out_nobind: nlmclnt_release_host(host); out_nohost: lockd_down(nlm_init->net); return ERR_PTR(-ENOLCK); } EXPORT_SYMBOL_GPL(nlmclnt_init); /** * nlmclnt_done - Release resources allocated by nlmclnt_init() * @host: nlm_host structure reserved by nlmclnt_init() * */ void nlmclnt_done(struct nlm_host *host) { struct net *net = host->net; nlmclnt_release_host(host); lockd_down(net); } EXPORT_SYMBOL_GPL(nlmclnt_done); void nlmclnt_prepare_block(struct nlm_wait *block, struct nlm_host *host, struct file_lock *fl) { block->b_host = host; block->b_lock = fl; init_waitqueue_head(&block->b_wait); block->b_status = nlm_lck_blocked; } struct rpc_clnt *nlmclnt_rpc_clnt(struct nlm_host *host) { return host->h_rpcclnt; } EXPORT_SYMBOL_GPL(nlmclnt_rpc_clnt); /* * Queue up a lock for blocking so that the GRANTED request can see it */ void nlmclnt_queue_block(struct nlm_wait *block) { spin_lock(&nlm_blocked_lock); list_add(&block->b_list, &nlm_blocked); spin_unlock(&nlm_blocked_lock); } /* * Dequeue the block and return its final status */ __be32 nlmclnt_dequeue_block(struct nlm_wait *block) { __be32 status; spin_lock(&nlm_blocked_lock); list_del(&block->b_list); status = block->b_status; spin_unlock(&nlm_blocked_lock); return status; } /* * Block on a lock */ int nlmclnt_wait(struct nlm_wait *block, struct nlm_rqst *req, long timeout) { long ret; /* A borken server might ask us to block even if we didn't * request it. Just say no! */ if (block == NULL) return -EAGAIN; /* Go to sleep waiting for GRANT callback. Some servers seem * to lose callbacks, however, so we're going to poll from * time to time just to make sure. * * For now, the retry frequency is pretty high; normally * a 1 minute timeout would do. See the comment before * nlmclnt_lock for an explanation. */ ret = wait_event_interruptible_timeout(block->b_wait, block->b_status != nlm_lck_blocked, timeout); if (ret < 0) return -ERESTARTSYS; /* Reset the lock status after a server reboot so we resend */ if (block->b_status == nlm_lck_denied_grace_period) block->b_status = nlm_lck_blocked; return 0; } /* * The server lockd has called us back to tell us the lock was granted */ __be32 nlmclnt_grant(const struct sockaddr *addr, const struct nlm_lock *lock) { const struct file_lock *fl = &lock->fl; const struct nfs_fh *fh = &lock->fh; struct nlm_wait *block; __be32 res = nlm_lck_denied; /* * Look up blocked request based on arguments. * Warning: must not use cookie to match it! */ spin_lock(&nlm_blocked_lock); list_for_each_entry(block, &nlm_blocked, b_list) { struct file_lock *fl_blocked = block->b_lock; if (fl_blocked->fl_start != fl->fl_start) continue; if (fl_blocked->fl_end != fl->fl_end) continue; /* * Careful! The NLM server will return the 32-bit "pid" that * we put on the wire: in this case the lockowner "pid". */ if (fl_blocked->fl_u.nfs_fl.owner->pid != lock->svid) continue; if (!rpc_cmp_addr(nlm_addr(block->b_host), addr)) continue; if (nfs_compare_fh(NFS_FH(file_inode(fl_blocked->fl_file)), fh) != 0) continue; /* Alright, we found a lock. Set the return status * and wake up the caller */ block->b_status = nlm_granted; wake_up(&block->b_wait); res = nlm_granted; } spin_unlock(&nlm_blocked_lock); trace_nlmclnt_grant(lock, addr, svc_addr_len(addr), res); return res; } /* * The following procedures deal with the recovery of locks after a * server crash. */ /* * Reclaim all locks on server host. We do this by spawning a separate * reclaimer thread. */ void nlmclnt_recovery(struct nlm_host *host) { struct task_struct *task; if (!host->h_reclaiming++) { nlm_get_host(host); task = kthread_run(reclaimer, host, "%s-reclaim", host->h_name); if (IS_ERR(task)) printk(KERN_ERR "lockd: unable to spawn reclaimer " "thread. Locks for %s won't be reclaimed! " "(%ld)\n", host->h_name, PTR_ERR(task)); } } static int reclaimer(void *ptr) { struct nlm_host *host = (struct nlm_host *) ptr; struct nlm_wait *block; struct nlm_rqst *req; struct file_lock *fl, *next; u32 nsmstate; struct net *net = host->net; req = kmalloc(sizeof(*req), GFP_KERNEL); if (!req) return 0; allow_signal(SIGKILL); down_write(&host->h_rwsem); lockd_up(net, NULL); /* note: this cannot fail as lockd is already running */ dprintk("lockd: reclaiming locks for host %s\n", host->h_name); restart: nsmstate = host->h_nsmstate; /* Force a portmap getport - the peer's lockd will * most likely end up on a different port. */ host->h_nextrebind = jiffies; nlm_rebind_host(host); /* First, reclaim all locks that have been granted. */ list_splice_init(&host->h_granted, &host->h_reclaim); list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) { list_del_init(&fl->fl_u.nfs_fl.list); /* * sending this thread a SIGKILL will result in any unreclaimed * locks being removed from the h_granted list. This means that * the kernel will not attempt to reclaim them again if a new * reclaimer thread is spawned for this host. */ if (signalled()) continue; if (nlmclnt_reclaim(host, fl, req) != 0) continue; list_add_tail(&fl->fl_u.nfs_fl.list, &host->h_granted); if (host->h_nsmstate != nsmstate) { /* Argh! The server rebooted again! */ goto restart; } } host->h_reclaiming = 0; up_write(&host->h_rwsem); dprintk("NLM: done reclaiming locks for host %s\n", host->h_name); /* Now, wake up all processes that sleep on a blocked lock */ spin_lock(&nlm_blocked_lock); list_for_each_entry(block, &nlm_blocked, b_list) { if (block->b_host == host) { block->b_status = nlm_lck_denied_grace_period; wake_up(&block->b_wait); } } spin_unlock(&nlm_blocked_lock); /* Release host handle after use */ nlmclnt_release_host(host); lockd_down(net); kfree(req); return 0; }
linux-master
fs/lockd/clntlock.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2019 Christoph Hellwig */ #include <linux/iomap.h> #include <linux/uio.h> /* * We include this last to have the helpers above available for the trace * event implementations. */ #define CREATE_TRACE_POINTS #include "trace.h"
linux-master
fs/iomap/trace.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018 Oracle. All Rights Reserved. * Author: Darrick J. Wong <[email protected]> */ #include <linux/module.h> #include <linux/compiler.h> #include <linux/fs.h> #include <linux/iomap.h> #include <linux/swap.h> /* Swapfile activation */ struct iomap_swapfile_info { struct iomap iomap; /* accumulated iomap */ struct swap_info_struct *sis; uint64_t lowest_ppage; /* lowest physical addr seen (pages) */ uint64_t highest_ppage; /* highest physical addr seen (pages) */ unsigned long nr_pages; /* number of pages collected */ int nr_extents; /* extent count */ struct file *file; }; /* * Collect physical extents for this swap file. Physical extents reported to * the swap code must be trimmed to align to a page boundary. The logical * offset within the file is irrelevant since the swapfile code maps logical * page numbers of the swap device to the physical page-aligned extents. */ static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi) { struct iomap *iomap = &isi->iomap; unsigned long nr_pages; unsigned long max_pages; uint64_t first_ppage; uint64_t first_ppage_reported; uint64_t next_ppage; int error; if (unlikely(isi->nr_pages >= isi->sis->max)) return 0; max_pages = isi->sis->max - isi->nr_pages; /* * Round the start up and the end down so that the physical * extent aligns to a page boundary. */ first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT; next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >> PAGE_SHIFT; /* Skip too-short physical extents. */ if (first_ppage >= next_ppage) return 0; nr_pages = next_ppage - first_ppage; nr_pages = min(nr_pages, max_pages); /* * Calculate how much swap space we're adding; the first page contains * the swap header and doesn't count. The mm still wants that first * page fed to add_swap_extent, however. */ first_ppage_reported = first_ppage; if (iomap->offset == 0) first_ppage_reported++; if (isi->lowest_ppage > first_ppage_reported) isi->lowest_ppage = first_ppage_reported; if (isi->highest_ppage < (next_ppage - 1)) isi->highest_ppage = next_ppage - 1; /* Add extent, set up for the next call. */ error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage); if (error < 0) return error; isi->nr_extents += error; isi->nr_pages += nr_pages; return 0; } static int iomap_swapfile_fail(struct iomap_swapfile_info *isi, const char *str) { char *buf, *p = ERR_PTR(-ENOMEM); buf = kmalloc(PATH_MAX, GFP_KERNEL); if (buf) p = file_path(isi->file, buf, PATH_MAX); pr_err("swapon: file %s %s\n", IS_ERR(p) ? "<unknown>" : p, str); kfree(buf); return -EINVAL; } /* * Accumulate iomaps for this swap file. We have to accumulate iomaps because * swap only cares about contiguous page-aligned physical extents and makes no * distinction between written and unwritten extents. */ static loff_t iomap_swapfile_iter(const struct iomap_iter *iter, struct iomap *iomap, struct iomap_swapfile_info *isi) { switch (iomap->type) { case IOMAP_MAPPED: case IOMAP_UNWRITTEN: /* Only real or unwritten extents. */ break; case IOMAP_INLINE: /* No inline data. */ return iomap_swapfile_fail(isi, "is inline"); default: return iomap_swapfile_fail(isi, "has unallocated extents"); } /* No uncommitted metadata or shared blocks. */ if (iomap->flags & IOMAP_F_DIRTY) return iomap_swapfile_fail(isi, "is not committed"); if (iomap->flags & IOMAP_F_SHARED) return iomap_swapfile_fail(isi, "has shared extents"); /* Only one bdev per swap file. */ if (iomap->bdev != isi->sis->bdev) return iomap_swapfile_fail(isi, "outside the main device"); if (isi->iomap.length == 0) { /* No accumulated extent, so just store it. */ memcpy(&isi->iomap, iomap, sizeof(isi->iomap)); } else if (isi->iomap.addr + isi->iomap.length == iomap->addr) { /* Append this to the accumulated extent. */ isi->iomap.length += iomap->length; } else { /* Otherwise, add the retained iomap and store this one. */ int error = iomap_swapfile_add_extent(isi); if (error) return error; memcpy(&isi->iomap, iomap, sizeof(isi->iomap)); } return iomap_length(iter); } /* * Iterate a swap file's iomaps to construct physical extents that can be * passed to the swapfile subsystem. */ int iomap_swapfile_activate(struct swap_info_struct *sis, struct file *swap_file, sector_t *pagespan, const struct iomap_ops *ops) { struct inode *inode = swap_file->f_mapping->host; struct iomap_iter iter = { .inode = inode, .pos = 0, .len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE), .flags = IOMAP_REPORT, }; struct iomap_swapfile_info isi = { .sis = sis, .lowest_ppage = (sector_t)-1ULL, .file = swap_file, }; int ret; /* * Persist all file mapping metadata so that we won't have any * IOMAP_F_DIRTY iomaps. */ ret = vfs_fsync(swap_file, 1); if (ret) return ret; while ((ret = iomap_iter(&iter, ops)) > 0) iter.processed = iomap_swapfile_iter(&iter, &iter.iomap, &isi); if (ret < 0) return ret; if (isi.iomap.length) { ret = iomap_swapfile_add_extent(&isi); if (ret) return ret; } /* * If this swapfile doesn't contain even a single page-aligned * contiguous range of blocks, reject this useless swapfile to * prevent confusion later on. */ if (isi.nr_pages == 0) { pr_warn("swapon: Cannot find a single usable page in file.\n"); return -EINVAL; } *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage; sis->max = isi.nr_pages; sis->pages = isi.nr_pages - 1; sis->highest_bit = isi.nr_pages - 1; return isi.nr_extents; } EXPORT_SYMBOL_GPL(iomap_swapfile_activate);
linux-master
fs/iomap/swapfile.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2017 Red Hat, Inc. * Copyright (c) 2018-2021 Christoph Hellwig. */ #include <linux/module.h> #include <linux/compiler.h> #include <linux/fs.h> #include <linux/iomap.h> #include <linux/pagemap.h> #include <linux/pagevec.h> static loff_t iomap_seek_hole_iter(const struct iomap_iter *iter, loff_t *hole_pos) { loff_t length = iomap_length(iter); switch (iter->iomap.type) { case IOMAP_UNWRITTEN: *hole_pos = mapping_seek_hole_data(iter->inode->i_mapping, iter->pos, iter->pos + length, SEEK_HOLE); if (*hole_pos == iter->pos + length) return length; return 0; case IOMAP_HOLE: *hole_pos = iter->pos; return 0; default: return length; } } loff_t iomap_seek_hole(struct inode *inode, loff_t pos, const struct iomap_ops *ops) { loff_t size = i_size_read(inode); struct iomap_iter iter = { .inode = inode, .pos = pos, .flags = IOMAP_REPORT, }; int ret; /* Nothing to be found before or beyond the end of the file. */ if (pos < 0 || pos >= size) return -ENXIO; iter.len = size - pos; while ((ret = iomap_iter(&iter, ops)) > 0) iter.processed = iomap_seek_hole_iter(&iter, &pos); if (ret < 0) return ret; if (iter.len) /* found hole before EOF */ return pos; return size; } EXPORT_SYMBOL_GPL(iomap_seek_hole); static loff_t iomap_seek_data_iter(const struct iomap_iter *iter, loff_t *hole_pos) { loff_t length = iomap_length(iter); switch (iter->iomap.type) { case IOMAP_HOLE: return length; case IOMAP_UNWRITTEN: *hole_pos = mapping_seek_hole_data(iter->inode->i_mapping, iter->pos, iter->pos + length, SEEK_DATA); if (*hole_pos < 0) return length; return 0; default: *hole_pos = iter->pos; return 0; } } loff_t iomap_seek_data(struct inode *inode, loff_t pos, const struct iomap_ops *ops) { loff_t size = i_size_read(inode); struct iomap_iter iter = { .inode = inode, .pos = pos, .flags = IOMAP_REPORT, }; int ret; /* Nothing to be found before or beyond the end of the file. */ if (pos < 0 || pos >= size) return -ENXIO; iter.len = size - pos; while ((ret = iomap_iter(&iter, ops)) > 0) iter.processed = iomap_seek_data_iter(&iter, &pos); if (ret < 0) return ret; if (iter.len) /* found data before EOF */ return pos; /* We've reached the end of the file without finding data */ return -ENXIO; } EXPORT_SYMBOL_GPL(iomap_seek_data);
linux-master
fs/iomap/seek.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2010 Red Hat, Inc. * Copyright (c) 2016-2021 Christoph Hellwig. */ #include <linux/fs.h> #include <linux/iomap.h> #include "trace.h" /* * Advance to the next range we need to map. * * If the iomap is marked IOMAP_F_STALE, it means the existing map was not fully * processed - it was aborted because the extent the iomap spanned may have been * changed during the operation. In this case, the iteration behaviour is to * remap the unprocessed range of the iter, and that means we may need to remap * even when we've made no progress (i.e. iter->processed = 0). Hence the * "finished iterating" case needs to distinguish between * (processed = 0) meaning we are done and (processed = 0 && stale) meaning we * need to remap the entire remaining range. */ static inline int iomap_iter_advance(struct iomap_iter *iter) { bool stale = iter->iomap.flags & IOMAP_F_STALE; /* handle the previous iteration (if any) */ if (iter->iomap.length) { if (iter->processed < 0) return iter->processed; if (!iter->processed && !stale) return 0; if (WARN_ON_ONCE(iter->processed > iomap_length(iter))) return -EIO; iter->pos += iter->processed; iter->len -= iter->processed; if (!iter->len) return 0; } /* clear the state for the next iteration */ iter->processed = 0; memset(&iter->iomap, 0, sizeof(iter->iomap)); memset(&iter->srcmap, 0, sizeof(iter->srcmap)); return 1; } static inline void iomap_iter_done(struct iomap_iter *iter) { WARN_ON_ONCE(iter->iomap.offset > iter->pos); WARN_ON_ONCE(iter->iomap.length == 0); WARN_ON_ONCE(iter->iomap.offset + iter->iomap.length <= iter->pos); WARN_ON_ONCE(iter->iomap.flags & IOMAP_F_STALE); trace_iomap_iter_dstmap(iter->inode, &iter->iomap); if (iter->srcmap.type != IOMAP_HOLE) trace_iomap_iter_srcmap(iter->inode, &iter->srcmap); } /** * iomap_iter - iterate over a ranges in a file * @iter: iteration structue * @ops: iomap ops provided by the file system * * Iterate over filesystem-provided space mappings for the provided file range. * * This function handles cleanup of resources acquired for iteration when the * filesystem indicates there are no more space mappings, which means that this * function must be called in a loop that continues as long it returns a * positive value. If 0 or a negative value is returned, the caller must not * return to the loop body. Within a loop body, there are two ways to break out * of the loop body: leave @iter.processed unchanged, or set it to a negative * errno. */ int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops) { int ret; if (iter->iomap.length && ops->iomap_end) { ret = ops->iomap_end(iter->inode, iter->pos, iomap_length(iter), iter->processed > 0 ? iter->processed : 0, iter->flags, &iter->iomap); if (ret < 0 && !iter->processed) return ret; } trace_iomap_iter(iter, ops, _RET_IP_); ret = iomap_iter_advance(iter); if (ret <= 0) return ret; ret = ops->iomap_begin(iter->inode, iter->pos, iter->len, iter->flags, &iter->iomap, &iter->srcmap); if (ret < 0) return ret; iomap_iter_done(iter); return 1; }
linux-master
fs/iomap/iter.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2016-2021 Christoph Hellwig. */ #include <linux/module.h> #include <linux/compiler.h> #include <linux/fs.h> #include <linux/iomap.h> #include <linux/fiemap.h> #include <linux/pagemap.h> static int iomap_to_fiemap(struct fiemap_extent_info *fi, const struct iomap *iomap, u32 flags) { switch (iomap->type) { case IOMAP_HOLE: /* skip holes */ return 0; case IOMAP_DELALLOC: flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN; break; case IOMAP_MAPPED: break; case IOMAP_UNWRITTEN: flags |= FIEMAP_EXTENT_UNWRITTEN; break; case IOMAP_INLINE: flags |= FIEMAP_EXTENT_DATA_INLINE; break; } if (iomap->flags & IOMAP_F_MERGED) flags |= FIEMAP_EXTENT_MERGED; if (iomap->flags & IOMAP_F_SHARED) flags |= FIEMAP_EXTENT_SHARED; return fiemap_fill_next_extent(fi, iomap->offset, iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0, iomap->length, flags); } static loff_t iomap_fiemap_iter(const struct iomap_iter *iter, struct fiemap_extent_info *fi, struct iomap *prev) { int ret; if (iter->iomap.type == IOMAP_HOLE) return iomap_length(iter); ret = iomap_to_fiemap(fi, prev, 0); *prev = iter->iomap; switch (ret) { case 0: /* success */ return iomap_length(iter); case 1: /* extent array full */ return 0; default: /* error */ return ret; } } int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi, u64 start, u64 len, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = inode, .pos = start, .len = len, .flags = IOMAP_REPORT, }; struct iomap prev = { .type = IOMAP_HOLE, }; int ret; ret = fiemap_prep(inode, fi, start, &iter.len, 0); if (ret) return ret; while ((ret = iomap_iter(&iter, ops)) > 0) iter.processed = iomap_fiemap_iter(&iter, fi, &prev); if (prev.type != IOMAP_HOLE) { ret = iomap_to_fiemap(fi, &prev, FIEMAP_EXTENT_LAST); if (ret < 0) return ret; } /* inode with no (attribute) mapping will give ENOENT */ if (ret < 0 && ret != -ENOENT) return ret; return 0; } EXPORT_SYMBOL_GPL(iomap_fiemap); /* legacy ->bmap interface. 0 is the error return (!) */ sector_t iomap_bmap(struct address_space *mapping, sector_t bno, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = mapping->host, .pos = (loff_t)bno << mapping->host->i_blkbits, .len = i_blocksize(mapping->host), .flags = IOMAP_REPORT, }; const unsigned int blkshift = mapping->host->i_blkbits - SECTOR_SHIFT; int ret; if (filemap_write_and_wait(mapping)) return 0; bno = 0; while ((ret = iomap_iter(&iter, ops)) > 0) { if (iter.iomap.type == IOMAP_MAPPED) bno = iomap_sector(&iter.iomap, iter.pos) >> blkshift; /* leave iter.processed unset to abort loop */ } if (ret) return 0; return bno; } EXPORT_SYMBOL_GPL(iomap_bmap);
linux-master
fs/iomap/fiemap.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2010 Red Hat, Inc. * Copyright (c) 2016-2021 Christoph Hellwig. */ #include <linux/module.h> #include <linux/compiler.h> #include <linux/fs.h> #include <linux/fscrypt.h> #include <linux/pagemap.h> #include <linux/iomap.h> #include <linux/backing-dev.h> #include <linux/uio.h> #include <linux/task_io_accounting_ops.h> #include "trace.h" #include "../internal.h" /* * Private flags for iomap_dio, must not overlap with the public ones in * iomap.h: */ #define IOMAP_DIO_CALLER_COMP (1U << 26) #define IOMAP_DIO_INLINE_COMP (1U << 27) #define IOMAP_DIO_WRITE_THROUGH (1U << 28) #define IOMAP_DIO_NEED_SYNC (1U << 29) #define IOMAP_DIO_WRITE (1U << 30) #define IOMAP_DIO_DIRTY (1U << 31) struct iomap_dio { struct kiocb *iocb; const struct iomap_dio_ops *dops; loff_t i_size; loff_t size; atomic_t ref; unsigned flags; int error; size_t done_before; bool wait_for_completion; union { /* used during submission and for synchronous completion: */ struct { struct iov_iter *iter; struct task_struct *waiter; } submit; /* used for aio completion: */ struct { struct work_struct work; } aio; }; }; static struct bio *iomap_dio_alloc_bio(const struct iomap_iter *iter, struct iomap_dio *dio, unsigned short nr_vecs, blk_opf_t opf) { if (dio->dops && dio->dops->bio_set) return bio_alloc_bioset(iter->iomap.bdev, nr_vecs, opf, GFP_KERNEL, dio->dops->bio_set); return bio_alloc(iter->iomap.bdev, nr_vecs, opf, GFP_KERNEL); } static void iomap_dio_submit_bio(const struct iomap_iter *iter, struct iomap_dio *dio, struct bio *bio, loff_t pos) { struct kiocb *iocb = dio->iocb; atomic_inc(&dio->ref); /* Sync dio can't be polled reliably */ if ((iocb->ki_flags & IOCB_HIPRI) && !is_sync_kiocb(iocb)) { bio_set_polled(bio, iocb); WRITE_ONCE(iocb->private, bio); } if (dio->dops && dio->dops->submit_io) dio->dops->submit_io(iter, bio, pos); else submit_bio(bio); } ssize_t iomap_dio_complete(struct iomap_dio *dio) { const struct iomap_dio_ops *dops = dio->dops; struct kiocb *iocb = dio->iocb; loff_t offset = iocb->ki_pos; ssize_t ret = dio->error; if (dops && dops->end_io) ret = dops->end_io(iocb, dio->size, ret, dio->flags); if (likely(!ret)) { ret = dio->size; /* check for short read */ if (offset + ret > dio->i_size && !(dio->flags & IOMAP_DIO_WRITE)) ret = dio->i_size - offset; } /* * Try again to invalidate clean pages which might have been cached by * non-direct readahead, or faulted in by get_user_pages() if the source * of the write was an mmap'ed region of the file we're writing. Either * one is a pretty crazy thing to do, so we don't support it 100%. If * this invalidation fails, tough, the write still worked... * * And this page cache invalidation has to be after ->end_io(), as some * filesystems convert unwritten extents to real allocations in * ->end_io() when necessary, otherwise a racing buffer read would cache * zeros from unwritten extents. */ if (!dio->error && dio->size && (dio->flags & IOMAP_DIO_WRITE)) kiocb_invalidate_post_direct_write(iocb, dio->size); inode_dio_end(file_inode(iocb->ki_filp)); if (ret > 0) { iocb->ki_pos += ret; /* * If this is a DSYNC write, make sure we push it to stable * storage now that we've written data. */ if (dio->flags & IOMAP_DIO_NEED_SYNC) ret = generic_write_sync(iocb, ret); if (ret > 0) ret += dio->done_before; } trace_iomap_dio_complete(iocb, dio->error, ret); kfree(dio); return ret; } EXPORT_SYMBOL_GPL(iomap_dio_complete); static ssize_t iomap_dio_deferred_complete(void *data) { return iomap_dio_complete(data); } static void iomap_dio_complete_work(struct work_struct *work) { struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work); struct kiocb *iocb = dio->iocb; iocb->ki_complete(iocb, iomap_dio_complete(dio)); } /* * Set an error in the dio if none is set yet. We have to use cmpxchg * as the submission context and the completion context(s) can race to * update the error. */ static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret) { cmpxchg(&dio->error, 0, ret); } void iomap_dio_bio_end_io(struct bio *bio) { struct iomap_dio *dio = bio->bi_private; bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); struct kiocb *iocb = dio->iocb; if (bio->bi_status) iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status)); if (!atomic_dec_and_test(&dio->ref)) goto release_bio; /* * Synchronous dio, task itself will handle any completion work * that needs after IO. All we need to do is wake the task. */ if (dio->wait_for_completion) { struct task_struct *waiter = dio->submit.waiter; WRITE_ONCE(dio->submit.waiter, NULL); blk_wake_io_task(waiter); goto release_bio; } /* * Flagged with IOMAP_DIO_INLINE_COMP, we can complete it inline */ if (dio->flags & IOMAP_DIO_INLINE_COMP) { WRITE_ONCE(iocb->private, NULL); iomap_dio_complete_work(&dio->aio.work); goto release_bio; } /* * If this dio is flagged with IOMAP_DIO_CALLER_COMP, then schedule * our completion that way to avoid an async punt to a workqueue. */ if (dio->flags & IOMAP_DIO_CALLER_COMP) { /* only polled IO cares about private cleared */ iocb->private = dio; iocb->dio_complete = iomap_dio_deferred_complete; /* * Invoke ->ki_complete() directly. We've assigned our * dio_complete callback handler, and since the issuer set * IOCB_DIO_CALLER_COMP, we know their ki_complete handler will * notice ->dio_complete being set and will defer calling that * handler until it can be done from a safe task context. * * Note that the 'res' being passed in here is not important * for this case. The actual completion value of the request * will be gotten from dio_complete when that is run by the * issuer. */ iocb->ki_complete(iocb, 0); goto release_bio; } /* * Async DIO completion that requires filesystem level completion work * gets punted to a work queue to complete as the operation may require * more IO to be issued to finalise filesystem metadata changes or * guarantee data integrity. */ INIT_WORK(&dio->aio.work, iomap_dio_complete_work); queue_work(file_inode(iocb->ki_filp)->i_sb->s_dio_done_wq, &dio->aio.work); release_bio: if (should_dirty) { bio_check_pages_dirty(bio); } else { bio_release_pages(bio, false); bio_put(bio); } } EXPORT_SYMBOL_GPL(iomap_dio_bio_end_io); static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio, loff_t pos, unsigned len) { struct inode *inode = file_inode(dio->iocb->ki_filp); struct page *page = ZERO_PAGE(0); struct bio *bio; bio = iomap_dio_alloc_bio(iter, dio, 1, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE); fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits, GFP_KERNEL); bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos); bio->bi_private = dio; bio->bi_end_io = iomap_dio_bio_end_io; __bio_add_page(bio, page, len, 0); iomap_dio_submit_bio(iter, dio, bio, pos); } /* * Figure out the bio's operation flags from the dio request, the * mapping, and whether or not we want FUA. Note that we can end up * clearing the WRITE_THROUGH flag in the dio request. */ static inline blk_opf_t iomap_dio_bio_opflags(struct iomap_dio *dio, const struct iomap *iomap, bool use_fua) { blk_opf_t opflags = REQ_SYNC | REQ_IDLE; if (!(dio->flags & IOMAP_DIO_WRITE)) return REQ_OP_READ; opflags |= REQ_OP_WRITE; if (use_fua) opflags |= REQ_FUA; else dio->flags &= ~IOMAP_DIO_WRITE_THROUGH; return opflags; } static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter, struct iomap_dio *dio) { const struct iomap *iomap = &iter->iomap; struct inode *inode = iter->inode; unsigned int fs_block_size = i_blocksize(inode), pad; loff_t length = iomap_length(iter); loff_t pos = iter->pos; blk_opf_t bio_opf; struct bio *bio; bool need_zeroout = false; bool use_fua = false; int nr_pages, ret = 0; size_t copied = 0; size_t orig_count; if ((pos | length) & (bdev_logical_block_size(iomap->bdev) - 1) || !bdev_iter_is_aligned(iomap->bdev, dio->submit.iter)) return -EINVAL; if (iomap->type == IOMAP_UNWRITTEN) { dio->flags |= IOMAP_DIO_UNWRITTEN; need_zeroout = true; } if (iomap->flags & IOMAP_F_SHARED) dio->flags |= IOMAP_DIO_COW; if (iomap->flags & IOMAP_F_NEW) { need_zeroout = true; } else if (iomap->type == IOMAP_MAPPED) { /* * Use a FUA write if we need datasync semantics, this is a pure * data IO that doesn't require any metadata updates (including * after IO completion such as unwritten extent conversion) and * the underlying device either supports FUA or doesn't have * a volatile write cache. This allows us to avoid cache flushes * on IO completion. If we can't use writethrough and need to * sync, disable in-task completions as dio completion will * need to call generic_write_sync() which will do a blocking * fsync / cache flush call. */ if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) && (dio->flags & IOMAP_DIO_WRITE_THROUGH) && (bdev_fua(iomap->bdev) || !bdev_write_cache(iomap->bdev))) use_fua = true; else if (dio->flags & IOMAP_DIO_NEED_SYNC) dio->flags &= ~IOMAP_DIO_CALLER_COMP; } /* * Save the original count and trim the iter to just the extent we * are operating on right now. The iter will be re-expanded once * we are done. */ orig_count = iov_iter_count(dio->submit.iter); iov_iter_truncate(dio->submit.iter, length); if (!iov_iter_count(dio->submit.iter)) goto out; /* * We can only do deferred completion for pure overwrites that * don't require additional IO at completion. This rules out * writes that need zeroing or extent conversion, extend * the file size, or issue journal IO or cache flushes * during completion processing. */ if (need_zeroout || ((dio->flags & IOMAP_DIO_NEED_SYNC) && !use_fua) || ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) dio->flags &= ~IOMAP_DIO_CALLER_COMP; /* * The rules for polled IO completions follow the guidelines as the * ones we set for inline and deferred completions. If none of those * are available for this IO, clear the polled flag. */ if (!(dio->flags & (IOMAP_DIO_INLINE_COMP|IOMAP_DIO_CALLER_COMP))) dio->iocb->ki_flags &= ~IOCB_HIPRI; if (need_zeroout) { /* zero out from the start of the block to the write offset */ pad = pos & (fs_block_size - 1); if (pad) iomap_dio_zero(iter, dio, pos - pad, pad); } /* * Set the operation flags early so that bio_iov_iter_get_pages * can set up the page vector appropriately for a ZONE_APPEND * operation. */ bio_opf = iomap_dio_bio_opflags(dio, iomap, use_fua); nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS); do { size_t n; if (dio->error) { iov_iter_revert(dio->submit.iter, copied); copied = ret = 0; goto out; } bio = iomap_dio_alloc_bio(iter, dio, nr_pages, bio_opf); fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits, GFP_KERNEL); bio->bi_iter.bi_sector = iomap_sector(iomap, pos); bio->bi_ioprio = dio->iocb->ki_ioprio; bio->bi_private = dio; bio->bi_end_io = iomap_dio_bio_end_io; ret = bio_iov_iter_get_pages(bio, dio->submit.iter); if (unlikely(ret)) { /* * We have to stop part way through an IO. We must fall * through to the sub-block tail zeroing here, otherwise * this short IO may expose stale data in the tail of * the block we haven't written data to. */ bio_put(bio); goto zero_tail; } n = bio->bi_iter.bi_size; if (dio->flags & IOMAP_DIO_WRITE) { task_io_account_write(n); } else { if (dio->flags & IOMAP_DIO_DIRTY) bio_set_pages_dirty(bio); } dio->size += n; copied += n; nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS); /* * We can only poll for single bio I/Os. */ if (nr_pages) dio->iocb->ki_flags &= ~IOCB_HIPRI; iomap_dio_submit_bio(iter, dio, bio, pos); pos += n; } while (nr_pages); /* * We need to zeroout the tail of a sub-block write if the extent type * requires zeroing or the write extends beyond EOF. If we don't zero * the block tail in the latter case, we can expose stale data via mmap * reads of the EOF block. */ zero_tail: if (need_zeroout || ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) { /* zero out from the end of the write to the end of the block */ pad = pos & (fs_block_size - 1); if (pad) iomap_dio_zero(iter, dio, pos, fs_block_size - pad); } out: /* Undo iter limitation to current extent */ iov_iter_reexpand(dio->submit.iter, orig_count - copied); if (copied) return copied; return ret; } static loff_t iomap_dio_hole_iter(const struct iomap_iter *iter, struct iomap_dio *dio) { loff_t length = iov_iter_zero(iomap_length(iter), dio->submit.iter); dio->size += length; if (!length) return -EFAULT; return length; } static loff_t iomap_dio_inline_iter(const struct iomap_iter *iomi, struct iomap_dio *dio) { const struct iomap *iomap = &iomi->iomap; struct iov_iter *iter = dio->submit.iter; void *inline_data = iomap_inline_data(iomap, iomi->pos); loff_t length = iomap_length(iomi); loff_t pos = iomi->pos; size_t copied; if (WARN_ON_ONCE(!iomap_inline_data_valid(iomap))) return -EIO; if (dio->flags & IOMAP_DIO_WRITE) { loff_t size = iomi->inode->i_size; if (pos > size) memset(iomap_inline_data(iomap, size), 0, pos - size); copied = copy_from_iter(inline_data, length, iter); if (copied) { if (pos + copied > size) i_size_write(iomi->inode, pos + copied); mark_inode_dirty(iomi->inode); } } else { copied = copy_to_iter(inline_data, length, iter); } dio->size += copied; if (!copied) return -EFAULT; return copied; } static loff_t iomap_dio_iter(const struct iomap_iter *iter, struct iomap_dio *dio) { switch (iter->iomap.type) { case IOMAP_HOLE: if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE)) return -EIO; return iomap_dio_hole_iter(iter, dio); case IOMAP_UNWRITTEN: if (!(dio->flags & IOMAP_DIO_WRITE)) return iomap_dio_hole_iter(iter, dio); return iomap_dio_bio_iter(iter, dio); case IOMAP_MAPPED: return iomap_dio_bio_iter(iter, dio); case IOMAP_INLINE: return iomap_dio_inline_iter(iter, dio); case IOMAP_DELALLOC: /* * DIO is not serialised against mmap() access at all, and so * if the page_mkwrite occurs between the writeback and the * iomap_iter() call in the DIO path, then it will see the * DELALLOC block that the page-mkwrite allocated. */ pr_warn_ratelimited("Direct I/O collision with buffered writes! File: %pD4 Comm: %.20s\n", dio->iocb->ki_filp, current->comm); return -EIO; default: WARN_ON_ONCE(1); return -EIO; } } /* * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO * is being issued as AIO or not. This allows us to optimise pure data writes * to use REQ_FUA rather than requiring generic_write_sync() to issue a * REQ_FLUSH post write. This is slightly tricky because a single request here * can be mapped into multiple disjoint IOs and only a subset of the IOs issued * may be pure data writes. In that case, we still need to do a full data sync * completion. * * When page faults are disabled and @dio_flags includes IOMAP_DIO_PARTIAL, * __iomap_dio_rw can return a partial result if it encounters a non-resident * page in @iter after preparing a transfer. In that case, the non-resident * pages can be faulted in and the request resumed with @done_before set to the * number of bytes previously transferred. The request will then complete with * the correct total number of bytes transferred; this is essential for * completing partial requests asynchronously. * * Returns -ENOTBLK In case of a page invalidation invalidation failure for * writes. The callers needs to fall back to buffered I/O in this case. */ struct iomap_dio * __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops, const struct iomap_dio_ops *dops, unsigned int dio_flags, void *private, size_t done_before) { struct inode *inode = file_inode(iocb->ki_filp); struct iomap_iter iomi = { .inode = inode, .pos = iocb->ki_pos, .len = iov_iter_count(iter), .flags = IOMAP_DIRECT, .private = private, }; bool wait_for_completion = is_sync_kiocb(iocb) || (dio_flags & IOMAP_DIO_FORCE_WAIT); struct blk_plug plug; struct iomap_dio *dio; loff_t ret = 0; trace_iomap_dio_rw_begin(iocb, iter, dio_flags, done_before); if (!iomi.len) return NULL; dio = kmalloc(sizeof(*dio), GFP_KERNEL); if (!dio) return ERR_PTR(-ENOMEM); dio->iocb = iocb; atomic_set(&dio->ref, 1); dio->size = 0; dio->i_size = i_size_read(inode); dio->dops = dops; dio->error = 0; dio->flags = 0; dio->done_before = done_before; dio->submit.iter = iter; dio->submit.waiter = current; if (iocb->ki_flags & IOCB_NOWAIT) iomi.flags |= IOMAP_NOWAIT; if (iov_iter_rw(iter) == READ) { /* reads can always complete inline */ dio->flags |= IOMAP_DIO_INLINE_COMP; if (iomi.pos >= dio->i_size) goto out_free_dio; if (user_backed_iter(iter)) dio->flags |= IOMAP_DIO_DIRTY; ret = kiocb_write_and_wait(iocb, iomi.len); if (ret) goto out_free_dio; } else { iomi.flags |= IOMAP_WRITE; dio->flags |= IOMAP_DIO_WRITE; /* * Flag as supporting deferred completions, if the issuer * groks it. This can avoid a workqueue punt for writes. * We may later clear this flag if we need to do other IO * as part of this IO completion. */ if (iocb->ki_flags & IOCB_DIO_CALLER_COMP) dio->flags |= IOMAP_DIO_CALLER_COMP; if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) { ret = -EAGAIN; if (iomi.pos >= dio->i_size || iomi.pos + iomi.len > dio->i_size) goto out_free_dio; iomi.flags |= IOMAP_OVERWRITE_ONLY; } /* for data sync or sync, we need sync completion processing */ if (iocb_is_dsync(iocb)) { dio->flags |= IOMAP_DIO_NEED_SYNC; /* * For datasync only writes, we optimistically try using * WRITE_THROUGH for this IO. This flag requires either * FUA writes through the device's write cache, or a * normal write to a device without a volatile write * cache. For the former, Any non-FUA write that occurs * will clear this flag, hence we know before completion * whether a cache flush is necessary. */ if (!(iocb->ki_flags & IOCB_SYNC)) dio->flags |= IOMAP_DIO_WRITE_THROUGH; } /* * Try to invalidate cache pages for the range we are writing. * If this invalidation fails, let the caller fall back to * buffered I/O. */ ret = kiocb_invalidate_pages(iocb, iomi.len); if (ret) { if (ret != -EAGAIN) { trace_iomap_dio_invalidate_fail(inode, iomi.pos, iomi.len); ret = -ENOTBLK; } goto out_free_dio; } if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) { ret = sb_init_dio_done_wq(inode->i_sb); if (ret < 0) goto out_free_dio; } } inode_dio_begin(inode); blk_start_plug(&plug); while ((ret = iomap_iter(&iomi, ops)) > 0) { iomi.processed = iomap_dio_iter(&iomi, dio); /* * We can only poll for single bio I/Os. */ iocb->ki_flags &= ~IOCB_HIPRI; } blk_finish_plug(&plug); /* * We only report that we've read data up to i_size. * Revert iter to a state corresponding to that as some callers (such * as the splice code) rely on it. */ if (iov_iter_rw(iter) == READ && iomi.pos >= dio->i_size) iov_iter_revert(iter, iomi.pos - dio->i_size); if (ret == -EFAULT && dio->size && (dio_flags & IOMAP_DIO_PARTIAL)) { if (!(iocb->ki_flags & IOCB_NOWAIT)) wait_for_completion = true; ret = 0; } /* magic error code to fall back to buffered I/O */ if (ret == -ENOTBLK) { wait_for_completion = true; ret = 0; } if (ret < 0) iomap_dio_set_error(dio, ret); /* * If all the writes we issued were already written through to the * media, we don't need to flush the cache on IO completion. Clear the * sync flag for this case. */ if (dio->flags & IOMAP_DIO_WRITE_THROUGH) dio->flags &= ~IOMAP_DIO_NEED_SYNC; /* * We are about to drop our additional submission reference, which * might be the last reference to the dio. There are three different * ways we can progress here: * * (a) If this is the last reference we will always complete and free * the dio ourselves. * (b) If this is not the last reference, and we serve an asynchronous * iocb, we must never touch the dio after the decrement, the * I/O completion handler will complete and free it. * (c) If this is not the last reference, but we serve a synchronous * iocb, the I/O completion handler will wake us up on the drop * of the final reference, and we will complete and free it here * after we got woken by the I/O completion handler. */ dio->wait_for_completion = wait_for_completion; if (!atomic_dec_and_test(&dio->ref)) { if (!wait_for_completion) { trace_iomap_dio_rw_queued(inode, iomi.pos, iomi.len); return ERR_PTR(-EIOCBQUEUED); } for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); if (!READ_ONCE(dio->submit.waiter)) break; blk_io_schedule(); } __set_current_state(TASK_RUNNING); } return dio; out_free_dio: kfree(dio); if (ret) return ERR_PTR(ret); return NULL; } EXPORT_SYMBOL_GPL(__iomap_dio_rw); ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops, const struct iomap_dio_ops *dops, unsigned int dio_flags, void *private, size_t done_before) { struct iomap_dio *dio; dio = __iomap_dio_rw(iocb, iter, ops, dops, dio_flags, private, done_before); if (IS_ERR_OR_NULL(dio)) return PTR_ERR_OR_ZERO(dio); return iomap_dio_complete(dio); } EXPORT_SYMBOL_GPL(iomap_dio_rw);
linux-master
fs/iomap/direct-io.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2010 Red Hat, Inc. * Copyright (C) 2016-2019 Christoph Hellwig. */ #include <linux/module.h> #include <linux/compiler.h> #include <linux/fs.h> #include <linux/iomap.h> #include <linux/pagemap.h> #include <linux/uio.h> #include <linux/buffer_head.h> #include <linux/dax.h> #include <linux/writeback.h> #include <linux/list_sort.h> #include <linux/swap.h> #include <linux/bio.h> #include <linux/sched/signal.h> #include <linux/migrate.h> #include "trace.h" #include "../internal.h" #define IOEND_BATCH_SIZE 4096 typedef int (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length); /* * Structure allocated for each folio to track per-block uptodate, dirty state * and I/O completions. */ struct iomap_folio_state { atomic_t read_bytes_pending; atomic_t write_bytes_pending; spinlock_t state_lock; /* * Each block has two bits in this bitmap: * Bits [0..blocks_per_folio) has the uptodate status. * Bits [b_p_f...(2*b_p_f)) has the dirty status. */ unsigned long state[]; }; static struct bio_set iomap_ioend_bioset; static inline bool ifs_is_fully_uptodate(struct folio *folio, struct iomap_folio_state *ifs) { struct inode *inode = folio->mapping->host; return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio)); } static inline bool ifs_block_is_uptodate(struct iomap_folio_state *ifs, unsigned int block) { return test_bit(block, ifs->state); } static void ifs_set_range_uptodate(struct folio *folio, struct iomap_folio_state *ifs, size_t off, size_t len) { struct inode *inode = folio->mapping->host; unsigned int first_blk = off >> inode->i_blkbits; unsigned int last_blk = (off + len - 1) >> inode->i_blkbits; unsigned int nr_blks = last_blk - first_blk + 1; unsigned long flags; spin_lock_irqsave(&ifs->state_lock, flags); bitmap_set(ifs->state, first_blk, nr_blks); if (ifs_is_fully_uptodate(folio, ifs)) folio_mark_uptodate(folio); spin_unlock_irqrestore(&ifs->state_lock, flags); } static void iomap_set_range_uptodate(struct folio *folio, size_t off, size_t len) { struct iomap_folio_state *ifs = folio->private; if (ifs) ifs_set_range_uptodate(folio, ifs, off, len); else folio_mark_uptodate(folio); } static inline bool ifs_block_is_dirty(struct folio *folio, struct iomap_folio_state *ifs, int block) { struct inode *inode = folio->mapping->host; unsigned int blks_per_folio = i_blocks_per_folio(inode, folio); return test_bit(block + blks_per_folio, ifs->state); } static void ifs_clear_range_dirty(struct folio *folio, struct iomap_folio_state *ifs, size_t off, size_t len) { struct inode *inode = folio->mapping->host; unsigned int blks_per_folio = i_blocks_per_folio(inode, folio); unsigned int first_blk = (off >> inode->i_blkbits); unsigned int last_blk = (off + len - 1) >> inode->i_blkbits; unsigned int nr_blks = last_blk - first_blk + 1; unsigned long flags; spin_lock_irqsave(&ifs->state_lock, flags); bitmap_clear(ifs->state, first_blk + blks_per_folio, nr_blks); spin_unlock_irqrestore(&ifs->state_lock, flags); } static void iomap_clear_range_dirty(struct folio *folio, size_t off, size_t len) { struct iomap_folio_state *ifs = folio->private; if (ifs) ifs_clear_range_dirty(folio, ifs, off, len); } static void ifs_set_range_dirty(struct folio *folio, struct iomap_folio_state *ifs, size_t off, size_t len) { struct inode *inode = folio->mapping->host; unsigned int blks_per_folio = i_blocks_per_folio(inode, folio); unsigned int first_blk = (off >> inode->i_blkbits); unsigned int last_blk = (off + len - 1) >> inode->i_blkbits; unsigned int nr_blks = last_blk - first_blk + 1; unsigned long flags; spin_lock_irqsave(&ifs->state_lock, flags); bitmap_set(ifs->state, first_blk + blks_per_folio, nr_blks); spin_unlock_irqrestore(&ifs->state_lock, flags); } static void iomap_set_range_dirty(struct folio *folio, size_t off, size_t len) { struct iomap_folio_state *ifs = folio->private; if (ifs) ifs_set_range_dirty(folio, ifs, off, len); } static struct iomap_folio_state *ifs_alloc(struct inode *inode, struct folio *folio, unsigned int flags) { struct iomap_folio_state *ifs = folio->private; unsigned int nr_blocks = i_blocks_per_folio(inode, folio); gfp_t gfp; if (ifs || nr_blocks <= 1) return ifs; if (flags & IOMAP_NOWAIT) gfp = GFP_NOWAIT; else gfp = GFP_NOFS | __GFP_NOFAIL; /* * ifs->state tracks two sets of state flags when the * filesystem block size is smaller than the folio size. * The first state tracks per-block uptodate and the * second tracks per-block dirty state. */ ifs = kzalloc(struct_size(ifs, state, BITS_TO_LONGS(2 * nr_blocks)), gfp); if (!ifs) return ifs; spin_lock_init(&ifs->state_lock); if (folio_test_uptodate(folio)) bitmap_set(ifs->state, 0, nr_blocks); if (folio_test_dirty(folio)) bitmap_set(ifs->state, nr_blocks, nr_blocks); folio_attach_private(folio, ifs); return ifs; } static void ifs_free(struct folio *folio) { struct iomap_folio_state *ifs = folio_detach_private(folio); if (!ifs) return; WARN_ON_ONCE(atomic_read(&ifs->read_bytes_pending)); WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending)); WARN_ON_ONCE(ifs_is_fully_uptodate(folio, ifs) != folio_test_uptodate(folio)); kfree(ifs); } /* * Calculate the range inside the folio that we actually need to read. */ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio, loff_t *pos, loff_t length, size_t *offp, size_t *lenp) { struct iomap_folio_state *ifs = folio->private; loff_t orig_pos = *pos; loff_t isize = i_size_read(inode); unsigned block_bits = inode->i_blkbits; unsigned block_size = (1 << block_bits); size_t poff = offset_in_folio(folio, *pos); size_t plen = min_t(loff_t, folio_size(folio) - poff, length); unsigned first = poff >> block_bits; unsigned last = (poff + plen - 1) >> block_bits; /* * If the block size is smaller than the page size, we need to check the * per-block uptodate status and adjust the offset and length if needed * to avoid reading in already uptodate ranges. */ if (ifs) { unsigned int i; /* move forward for each leading block marked uptodate */ for (i = first; i <= last; i++) { if (!ifs_block_is_uptodate(ifs, i)) break; *pos += block_size; poff += block_size; plen -= block_size; first++; } /* truncate len if we find any trailing uptodate block(s) */ for ( ; i <= last; i++) { if (ifs_block_is_uptodate(ifs, i)) { plen -= (last - i + 1) * block_size; last = i - 1; break; } } } /* * If the extent spans the block that contains the i_size, we need to * handle both halves separately so that we properly zero data in the * page cache for blocks that are entirely outside of i_size. */ if (orig_pos <= isize && orig_pos + length > isize) { unsigned end = offset_in_folio(folio, isize - 1) >> block_bits; if (first <= end && last > end) plen -= (last - end) * block_size; } *offp = poff; *lenp = plen; } static void iomap_finish_folio_read(struct folio *folio, size_t offset, size_t len, int error) { struct iomap_folio_state *ifs = folio->private; if (unlikely(error)) { folio_clear_uptodate(folio); folio_set_error(folio); } else { iomap_set_range_uptodate(folio, offset, len); } if (!ifs || atomic_sub_and_test(len, &ifs->read_bytes_pending)) folio_unlock(folio); } static void iomap_read_end_io(struct bio *bio) { int error = blk_status_to_errno(bio->bi_status); struct folio_iter fi; bio_for_each_folio_all(fi, bio) iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error); bio_put(bio); } struct iomap_readpage_ctx { struct folio *cur_folio; bool cur_folio_in_bio; struct bio *bio; struct readahead_control *rac; }; /** * iomap_read_inline_data - copy inline data into the page cache * @iter: iteration structure * @folio: folio to copy to * * Copy the inline data in @iter into @folio and zero out the rest of the folio. * Only a single IOMAP_INLINE extent is allowed at the end of each file. * Returns zero for success to complete the read, or the usual negative errno. */ static int iomap_read_inline_data(const struct iomap_iter *iter, struct folio *folio) { const struct iomap *iomap = iomap_iter_srcmap(iter); size_t size = i_size_read(iter->inode) - iomap->offset; size_t poff = offset_in_page(iomap->offset); size_t offset = offset_in_folio(folio, iomap->offset); void *addr; if (folio_test_uptodate(folio)) return 0; if (WARN_ON_ONCE(size > PAGE_SIZE - poff)) return -EIO; if (WARN_ON_ONCE(size > PAGE_SIZE - offset_in_page(iomap->inline_data))) return -EIO; if (WARN_ON_ONCE(size > iomap->length)) return -EIO; if (offset > 0) ifs_alloc(iter->inode, folio, iter->flags); addr = kmap_local_folio(folio, offset); memcpy(addr, iomap->inline_data, size); memset(addr + size, 0, PAGE_SIZE - poff - size); kunmap_local(addr); iomap_set_range_uptodate(folio, offset, PAGE_SIZE - poff); return 0; } static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter, loff_t pos) { const struct iomap *srcmap = iomap_iter_srcmap(iter); return srcmap->type != IOMAP_MAPPED || (srcmap->flags & IOMAP_F_NEW) || pos >= i_size_read(iter->inode); } static loff_t iomap_readpage_iter(const struct iomap_iter *iter, struct iomap_readpage_ctx *ctx, loff_t offset) { const struct iomap *iomap = &iter->iomap; loff_t pos = iter->pos + offset; loff_t length = iomap_length(iter) - offset; struct folio *folio = ctx->cur_folio; struct iomap_folio_state *ifs; loff_t orig_pos = pos; size_t poff, plen; sector_t sector; if (iomap->type == IOMAP_INLINE) return iomap_read_inline_data(iter, folio); /* zero post-eof blocks as the page may be mapped */ ifs = ifs_alloc(iter->inode, folio, iter->flags); iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen); if (plen == 0) goto done; if (iomap_block_needs_zeroing(iter, pos)) { folio_zero_range(folio, poff, plen); iomap_set_range_uptodate(folio, poff, plen); goto done; } ctx->cur_folio_in_bio = true; if (ifs) atomic_add(plen, &ifs->read_bytes_pending); sector = iomap_sector(iomap, pos); if (!ctx->bio || bio_end_sector(ctx->bio) != sector || !bio_add_folio(ctx->bio, folio, plen, poff)) { gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL); gfp_t orig_gfp = gfp; unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE); if (ctx->bio) submit_bio(ctx->bio); if (ctx->rac) /* same as readahead_gfp_mask */ gfp |= __GFP_NORETRY | __GFP_NOWARN; ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ, gfp); /* * If the bio_alloc fails, try it again for a single page to * avoid having to deal with partial page reads. This emulates * what do_mpage_read_folio does. */ if (!ctx->bio) { ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp); } if (ctx->rac) ctx->bio->bi_opf |= REQ_RAHEAD; ctx->bio->bi_iter.bi_sector = sector; ctx->bio->bi_end_io = iomap_read_end_io; bio_add_folio_nofail(ctx->bio, folio, plen, poff); } done: /* * Move the caller beyond our range so that it keeps making progress. * For that, we have to include any leading non-uptodate ranges, but * we can skip trailing ones as they will be handled in the next * iteration. */ return pos - orig_pos + plen; } int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = folio->mapping->host, .pos = folio_pos(folio), .len = folio_size(folio), }; struct iomap_readpage_ctx ctx = { .cur_folio = folio, }; int ret; trace_iomap_readpage(iter.inode, 1); while ((ret = iomap_iter(&iter, ops)) > 0) iter.processed = iomap_readpage_iter(&iter, &ctx, 0); if (ret < 0) folio_set_error(folio); if (ctx.bio) { submit_bio(ctx.bio); WARN_ON_ONCE(!ctx.cur_folio_in_bio); } else { WARN_ON_ONCE(ctx.cur_folio_in_bio); folio_unlock(folio); } /* * Just like mpage_readahead and block_read_full_folio, we always * return 0 and just set the folio error flag on errors. This * should be cleaned up throughout the stack eventually. */ return 0; } EXPORT_SYMBOL_GPL(iomap_read_folio); static loff_t iomap_readahead_iter(const struct iomap_iter *iter, struct iomap_readpage_ctx *ctx) { loff_t length = iomap_length(iter); loff_t done, ret; for (done = 0; done < length; done += ret) { if (ctx->cur_folio && offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) { if (!ctx->cur_folio_in_bio) folio_unlock(ctx->cur_folio); ctx->cur_folio = NULL; } if (!ctx->cur_folio) { ctx->cur_folio = readahead_folio(ctx->rac); ctx->cur_folio_in_bio = false; } ret = iomap_readpage_iter(iter, ctx, done); if (ret <= 0) return ret; } return done; } /** * iomap_readahead - Attempt to read pages from a file. * @rac: Describes the pages to be read. * @ops: The operations vector for the filesystem. * * This function is for filesystems to call to implement their readahead * address_space operation. * * Context: The @ops callbacks may submit I/O (eg to read the addresses of * blocks from disc), and may wait for it. The caller may be trying to * access a different page, and so sleeping excessively should be avoided. * It may allocate memory, but should avoid costly allocations. This * function is called with memalloc_nofs set, so allocations will not cause * the filesystem to be reentered. */ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = rac->mapping->host, .pos = readahead_pos(rac), .len = readahead_length(rac), }; struct iomap_readpage_ctx ctx = { .rac = rac, }; trace_iomap_readahead(rac->mapping->host, readahead_count(rac)); while (iomap_iter(&iter, ops) > 0) iter.processed = iomap_readahead_iter(&iter, &ctx); if (ctx.bio) submit_bio(ctx.bio); if (ctx.cur_folio) { if (!ctx.cur_folio_in_bio) folio_unlock(ctx.cur_folio); } } EXPORT_SYMBOL_GPL(iomap_readahead); /* * iomap_is_partially_uptodate checks whether blocks within a folio are * uptodate or not. * * Returns true if all blocks which correspond to the specified part * of the folio are uptodate. */ bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count) { struct iomap_folio_state *ifs = folio->private; struct inode *inode = folio->mapping->host; unsigned first, last, i; if (!ifs) return false; /* Caller's range may extend past the end of this folio */ count = min(folio_size(folio) - from, count); /* First and last blocks in range within folio */ first = from >> inode->i_blkbits; last = (from + count - 1) >> inode->i_blkbits; for (i = first; i <= last; i++) if (!ifs_block_is_uptodate(ifs, i)) return false; return true; } EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); /** * iomap_get_folio - get a folio reference for writing * @iter: iteration structure * @pos: start offset of write * @len: Suggested size of folio to create. * * Returns a locked reference to the folio at @pos, or an error pointer if the * folio could not be obtained. */ struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len) { fgf_t fgp = FGP_WRITEBEGIN | FGP_NOFS; if (iter->flags & IOMAP_NOWAIT) fgp |= FGP_NOWAIT; fgp |= fgf_set_order(len); return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT, fgp, mapping_gfp_mask(iter->inode->i_mapping)); } EXPORT_SYMBOL_GPL(iomap_get_folio); bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags) { trace_iomap_release_folio(folio->mapping->host, folio_pos(folio), folio_size(folio)); /* * If the folio is dirty, we refuse to release our metadata because * it may be partially dirty. Once we track per-block dirty state, * we can release the metadata if every block is dirty. */ if (folio_test_dirty(folio)) return false; ifs_free(folio); return true; } EXPORT_SYMBOL_GPL(iomap_release_folio); void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len) { trace_iomap_invalidate_folio(folio->mapping->host, folio_pos(folio) + offset, len); /* * If we're invalidating the entire folio, clear the dirty state * from it and release it to avoid unnecessary buildup of the LRU. */ if (offset == 0 && len == folio_size(folio)) { WARN_ON_ONCE(folio_test_writeback(folio)); folio_cancel_dirty(folio); ifs_free(folio); } } EXPORT_SYMBOL_GPL(iomap_invalidate_folio); bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio) { struct inode *inode = mapping->host; size_t len = folio_size(folio); ifs_alloc(inode, folio, 0); iomap_set_range_dirty(folio, 0, len); return filemap_dirty_folio(mapping, folio); } EXPORT_SYMBOL_GPL(iomap_dirty_folio); static void iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) { loff_t i_size = i_size_read(inode); /* * Only truncate newly allocated pages beyoned EOF, even if the * write started inside the existing inode size. */ if (pos + len > i_size) truncate_pagecache_range(inode, max(pos, i_size), pos + len - 1); } static int iomap_read_folio_sync(loff_t block_start, struct folio *folio, size_t poff, size_t plen, const struct iomap *iomap) { struct bio_vec bvec; struct bio bio; bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ); bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); bio_add_folio_nofail(&bio, folio, plen, poff); return submit_bio_wait(&bio); } static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, size_t len, struct folio *folio) { const struct iomap *srcmap = iomap_iter_srcmap(iter); struct iomap_folio_state *ifs; loff_t block_size = i_blocksize(iter->inode); loff_t block_start = round_down(pos, block_size); loff_t block_end = round_up(pos + len, block_size); unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio); size_t from = offset_in_folio(folio, pos), to = from + len; size_t poff, plen; /* * If the write or zeroing completely overlaps the current folio, then * entire folio will be dirtied so there is no need for * per-block state tracking structures to be attached to this folio. * For the unshare case, we must read in the ondisk contents because we * are not changing pagecache contents. */ if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) && pos + len >= folio_pos(folio) + folio_size(folio)) return 0; ifs = ifs_alloc(iter->inode, folio, iter->flags); if ((iter->flags & IOMAP_NOWAIT) && !ifs && nr_blocks > 1) return -EAGAIN; if (folio_test_uptodate(folio)) return 0; folio_clear_error(folio); do { iomap_adjust_read_range(iter->inode, folio, &block_start, block_end - block_start, &poff, &plen); if (plen == 0) break; if (!(iter->flags & IOMAP_UNSHARE) && (from <= poff || from >= poff + plen) && (to <= poff || to >= poff + plen)) continue; if (iomap_block_needs_zeroing(iter, block_start)) { if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE)) return -EIO; folio_zero_segments(folio, poff, from, to, poff + plen); } else { int status; if (iter->flags & IOMAP_NOWAIT) return -EAGAIN; status = iomap_read_folio_sync(block_start, folio, poff, plen, srcmap); if (status) return status; } iomap_set_range_uptodate(folio, poff, plen); } while ((block_start += plen) < block_end); return 0; } static struct folio *__iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len) { const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; if (folio_ops && folio_ops->get_folio) return folio_ops->get_folio(iter, pos, len); else return iomap_get_folio(iter, pos, len); } static void __iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret, struct folio *folio) { const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; if (folio_ops && folio_ops->put_folio) { folio_ops->put_folio(iter->inode, pos, ret, folio); } else { folio_unlock(folio); folio_put(folio); } } static int iomap_write_begin_inline(const struct iomap_iter *iter, struct folio *folio) { /* needs more work for the tailpacking case; disable for now */ if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0)) return -EIO; return iomap_read_inline_data(iter, folio); } static int iomap_write_begin(struct iomap_iter *iter, loff_t pos, size_t len, struct folio **foliop) { const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; const struct iomap *srcmap = iomap_iter_srcmap(iter); struct folio *folio; int status = 0; BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length); if (srcmap != &iter->iomap) BUG_ON(pos + len > srcmap->offset + srcmap->length); if (fatal_signal_pending(current)) return -EINTR; if (!mapping_large_folio_support(iter->inode->i_mapping)) len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos)); folio = __iomap_get_folio(iter, pos, len); if (IS_ERR(folio)) return PTR_ERR(folio); /* * Now we have a locked folio, before we do anything with it we need to * check that the iomap we have cached is not stale. The inode extent * mapping can change due to concurrent IO in flight (e.g. * IOMAP_UNWRITTEN state can change and memory reclaim could have * reclaimed a previously partially written page at this index after IO * completion before this write reaches this file offset) and hence we * could do the wrong thing here (zero a page range incorrectly or fail * to zero) and corrupt data. */ if (folio_ops && folio_ops->iomap_valid) { bool iomap_valid = folio_ops->iomap_valid(iter->inode, &iter->iomap); if (!iomap_valid) { iter->iomap.flags |= IOMAP_F_STALE; status = 0; goto out_unlock; } } if (pos + len > folio_pos(folio) + folio_size(folio)) len = folio_pos(folio) + folio_size(folio) - pos; if (srcmap->type == IOMAP_INLINE) status = iomap_write_begin_inline(iter, folio); else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) status = __block_write_begin_int(folio, pos, len, NULL, srcmap); else status = __iomap_write_begin(iter, pos, len, folio); if (unlikely(status)) goto out_unlock; *foliop = folio; return 0; out_unlock: __iomap_put_folio(iter, pos, 0, folio); iomap_write_failed(iter->inode, pos, len); return status; } static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len, size_t copied, struct folio *folio) { flush_dcache_folio(folio); /* * The blocks that were entirely written will now be uptodate, so we * don't have to worry about a read_folio reading them and overwriting a * partial write. However, if we've encountered a short write and only * partially written into a block, it will not be marked uptodate, so a * read_folio might come in and destroy our partial write. * * Do the simplest thing and just treat any short write to a * non-uptodate page as a zero-length write, and force the caller to * redo the whole thing. */ if (unlikely(copied < len && !folio_test_uptodate(folio))) return 0; iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len); iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied); filemap_dirty_folio(inode->i_mapping, folio); return copied; } static size_t iomap_write_end_inline(const struct iomap_iter *iter, struct folio *folio, loff_t pos, size_t copied) { const struct iomap *iomap = &iter->iomap; void *addr; WARN_ON_ONCE(!folio_test_uptodate(folio)); BUG_ON(!iomap_inline_data_valid(iomap)); flush_dcache_folio(folio); addr = kmap_local_folio(folio, pos); memcpy(iomap_inline_data(iomap, pos), addr, copied); kunmap_local(addr); mark_inode_dirty(iter->inode); return copied; } /* Returns the number of bytes copied. May be 0. Cannot be an errno. */ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, size_t copied, struct folio *folio) { const struct iomap *srcmap = iomap_iter_srcmap(iter); loff_t old_size = iter->inode->i_size; size_t ret; if (srcmap->type == IOMAP_INLINE) { ret = iomap_write_end_inline(iter, folio, pos, copied); } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) { ret = block_write_end(NULL, iter->inode->i_mapping, pos, len, copied, &folio->page, NULL); } else { ret = __iomap_write_end(iter->inode, pos, len, copied, folio); } /* * Update the in-memory inode size after copying the data into the page * cache. It's up to the file system to write the updated size to disk, * preferably after I/O completion so that no stale data is exposed. */ if (pos + ret > old_size) { i_size_write(iter->inode, pos + ret); iter->iomap.flags |= IOMAP_F_SIZE_CHANGED; } __iomap_put_folio(iter, pos, ret, folio); if (old_size < pos) pagecache_isize_extended(iter->inode, old_size, pos); if (ret < len) iomap_write_failed(iter->inode, pos + ret, len - ret); return ret; } static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) { loff_t length = iomap_length(iter); size_t chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER; loff_t pos = iter->pos; ssize_t written = 0; long status = 0; struct address_space *mapping = iter->inode->i_mapping; unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0; do { struct folio *folio; size_t offset; /* Offset into folio */ size_t bytes; /* Bytes to write to folio */ size_t copied; /* Bytes copied from user */ offset = pos & (chunk - 1); bytes = min(chunk - offset, iov_iter_count(i)); status = balance_dirty_pages_ratelimited_flags(mapping, bdp_flags); if (unlikely(status)) break; if (bytes > length) bytes = length; /* * Bring in the user page that we'll copy from _first_. * Otherwise there's a nasty deadlock on copying from the * same page as we're writing to, without it being marked * up-to-date. * * For async buffered writes the assumption is that the user * page has already been faulted in. This can be optimized by * faulting the user page. */ if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) { status = -EFAULT; break; } status = iomap_write_begin(iter, pos, bytes, &folio); if (unlikely(status)) break; if (iter->iomap.flags & IOMAP_F_STALE) break; offset = offset_in_folio(folio, pos); if (bytes > folio_size(folio) - offset) bytes = folio_size(folio) - offset; if (mapping_writably_mapped(mapping)) flush_dcache_folio(folio); copied = copy_folio_from_iter_atomic(folio, offset, bytes, i); status = iomap_write_end(iter, pos, bytes, copied, folio); if (unlikely(copied != status)) iov_iter_revert(i, copied - status); cond_resched(); if (unlikely(status == 0)) { /* * A short copy made iomap_write_end() reject the * thing entirely. Might be memory poisoning * halfway through, might be a race with munmap, * might be severe memory pressure. */ if (copied) bytes = copied; if (chunk > PAGE_SIZE) chunk /= 2; } else { pos += status; written += status; length -= status; } } while (iov_iter_count(i) && length); if (status == -EAGAIN) { iov_iter_revert(i, written); return -EAGAIN; } return written ? written : status; } ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = iocb->ki_filp->f_mapping->host, .pos = iocb->ki_pos, .len = iov_iter_count(i), .flags = IOMAP_WRITE, }; ssize_t ret; if (iocb->ki_flags & IOCB_NOWAIT) iter.flags |= IOMAP_NOWAIT; while ((ret = iomap_iter(&iter, ops)) > 0) iter.processed = iomap_write_iter(&iter, i); if (unlikely(iter.pos == iocb->ki_pos)) return ret; ret = iter.pos - iocb->ki_pos; iocb->ki_pos = iter.pos; return ret; } EXPORT_SYMBOL_GPL(iomap_file_buffered_write); static int iomap_write_delalloc_ifs_punch(struct inode *inode, struct folio *folio, loff_t start_byte, loff_t end_byte, iomap_punch_t punch) { unsigned int first_blk, last_blk, i; loff_t last_byte; u8 blkbits = inode->i_blkbits; struct iomap_folio_state *ifs; int ret = 0; /* * When we have per-block dirty tracking, there can be * blocks within a folio which are marked uptodate * but not dirty. In that case it is necessary to punch * out such blocks to avoid leaking any delalloc blocks. */ ifs = folio->private; if (!ifs) return ret; last_byte = min_t(loff_t, end_byte - 1, folio_pos(folio) + folio_size(folio) - 1); first_blk = offset_in_folio(folio, start_byte) >> blkbits; last_blk = offset_in_folio(folio, last_byte) >> blkbits; for (i = first_blk; i <= last_blk; i++) { if (!ifs_block_is_dirty(folio, ifs, i)) { ret = punch(inode, folio_pos(folio) + (i << blkbits), 1 << blkbits); if (ret) return ret; } } return ret; } static int iomap_write_delalloc_punch(struct inode *inode, struct folio *folio, loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte, iomap_punch_t punch) { int ret = 0; if (!folio_test_dirty(folio)) return ret; /* if dirty, punch up to offset */ if (start_byte > *punch_start_byte) { ret = punch(inode, *punch_start_byte, start_byte - *punch_start_byte); if (ret) return ret; } /* Punch non-dirty blocks within folio */ ret = iomap_write_delalloc_ifs_punch(inode, folio, start_byte, end_byte, punch); if (ret) return ret; /* * Make sure the next punch start is correctly bound to * the end of this data range, not the end of the folio. */ *punch_start_byte = min_t(loff_t, end_byte, folio_pos(folio) + folio_size(folio)); return ret; } /* * Scan the data range passed to us for dirty page cache folios. If we find a * dirty folio, punch out the preceeding range and update the offset from which * the next punch will start from. * * We can punch out storage reservations under clean pages because they either * contain data that has been written back - in which case the delalloc punch * over that range is a no-op - or they have been read faults in which case they * contain zeroes and we can remove the delalloc backing range and any new * writes to those pages will do the normal hole filling operation... * * This makes the logic simple: we only need to keep the delalloc extents only * over the dirty ranges of the page cache. * * This function uses [start_byte, end_byte) intervals (i.e. open ended) to * simplify range iterations. */ static int iomap_write_delalloc_scan(struct inode *inode, loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte, iomap_punch_t punch) { while (start_byte < end_byte) { struct folio *folio; int ret; /* grab locked page */ folio = filemap_lock_folio(inode->i_mapping, start_byte >> PAGE_SHIFT); if (IS_ERR(folio)) { start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) + PAGE_SIZE; continue; } ret = iomap_write_delalloc_punch(inode, folio, punch_start_byte, start_byte, end_byte, punch); if (ret) { folio_unlock(folio); folio_put(folio); return ret; } /* move offset to start of next folio in range */ start_byte = folio_next_index(folio) << PAGE_SHIFT; folio_unlock(folio); folio_put(folio); } return 0; } /* * Punch out all the delalloc blocks in the range given except for those that * have dirty data still pending in the page cache - those are going to be * written and so must still retain the delalloc backing for writeback. * * As we are scanning the page cache for data, we don't need to reimplement the * wheel - mapping_seek_hole_data() does exactly what we need to identify the * start and end of data ranges correctly even for sub-folio block sizes. This * byte range based iteration is especially convenient because it means we * don't have to care about variable size folios, nor where the start or end of * the data range lies within a folio, if they lie within the same folio or even * if there are multiple discontiguous data ranges within the folio. * * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so * can return data ranges that exist in the cache beyond EOF. e.g. a page fault * spanning EOF will initialise the post-EOF data to zeroes and mark it up to * date. A write page fault can then mark it dirty. If we then fail a write() * beyond EOF into that up to date cached range, we allocate a delalloc block * beyond EOF and then have to punch it out. Because the range is up to date, * mapping_seek_hole_data() will return it, and we will skip the punch because * the folio is dirty. THis is incorrect - we always need to punch out delalloc * beyond EOF in this case as writeback will never write back and covert that * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF, * resulting in always punching out the range from the EOF to the end of the * range the iomap spans. * * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte) * returns the end of the data range (data_end). Using closed intervals would * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose * the code to subtle off-by-one bugs.... */ static int iomap_write_delalloc_release(struct inode *inode, loff_t start_byte, loff_t end_byte, iomap_punch_t punch) { loff_t punch_start_byte = start_byte; loff_t scan_end_byte = min(i_size_read(inode), end_byte); int error = 0; /* * Lock the mapping to avoid races with page faults re-instantiating * folios and dirtying them via ->page_mkwrite whilst we walk the * cache and perform delalloc extent removal. Failing to do this can * leave dirty pages with no space reservation in the cache. */ filemap_invalidate_lock(inode->i_mapping); while (start_byte < scan_end_byte) { loff_t data_end; start_byte = mapping_seek_hole_data(inode->i_mapping, start_byte, scan_end_byte, SEEK_DATA); /* * If there is no more data to scan, all that is left is to * punch out the remaining range. */ if (start_byte == -ENXIO || start_byte == scan_end_byte) break; if (start_byte < 0) { error = start_byte; goto out_unlock; } WARN_ON_ONCE(start_byte < punch_start_byte); WARN_ON_ONCE(start_byte > scan_end_byte); /* * We find the end of this contiguous cached data range by * seeking from start_byte to the beginning of the next hole. */ data_end = mapping_seek_hole_data(inode->i_mapping, start_byte, scan_end_byte, SEEK_HOLE); if (data_end < 0) { error = data_end; goto out_unlock; } WARN_ON_ONCE(data_end <= start_byte); WARN_ON_ONCE(data_end > scan_end_byte); error = iomap_write_delalloc_scan(inode, &punch_start_byte, start_byte, data_end, punch); if (error) goto out_unlock; /* The next data search starts at the end of this one. */ start_byte = data_end; } if (punch_start_byte < end_byte) error = punch(inode, punch_start_byte, end_byte - punch_start_byte); out_unlock: filemap_invalidate_unlock(inode->i_mapping); return error; } /* * When a short write occurs, the filesystem may need to remove reserved space * that was allocated in ->iomap_begin from it's ->iomap_end method. For * filesystems that use delayed allocation, we need to punch out delalloc * extents from the range that are not dirty in the page cache. As the write can * race with page faults, there can be dirty pages over the delalloc extent * outside the range of a short write but still within the delalloc extent * allocated for this iomap. * * This function uses [start_byte, end_byte) intervals (i.e. open ended) to * simplify range iterations. * * The punch() callback *must* only punch delalloc extents in the range passed * to it. It must skip over all other types of extents in the range and leave * them completely unchanged. It must do this punch atomically with respect to * other extent modifications. * * The punch() callback may be called with a folio locked to prevent writeback * extent allocation racing at the edge of the range we are currently punching. * The locked folio may or may not cover the range being punched, so it is not * safe for the punch() callback to lock folios itself. * * Lock order is: * * inode->i_rwsem (shared or exclusive) * inode->i_mapping->invalidate_lock (exclusive) * folio_lock() * ->punch * internal filesystem allocation lock */ int iomap_file_buffered_write_punch_delalloc(struct inode *inode, struct iomap *iomap, loff_t pos, loff_t length, ssize_t written, iomap_punch_t punch) { loff_t start_byte; loff_t end_byte; unsigned int blocksize = i_blocksize(inode); if (iomap->type != IOMAP_DELALLOC) return 0; /* If we didn't reserve the blocks, we're not allowed to punch them. */ if (!(iomap->flags & IOMAP_F_NEW)) return 0; /* * start_byte refers to the first unused block after a short write. If * nothing was written, round offset down to point at the first block in * the range. */ if (unlikely(!written)) start_byte = round_down(pos, blocksize); else start_byte = round_up(pos + written, blocksize); end_byte = round_up(pos + length, blocksize); /* Nothing to do if we've written the entire delalloc extent */ if (start_byte >= end_byte) return 0; return iomap_write_delalloc_release(inode, start_byte, end_byte, punch); } EXPORT_SYMBOL_GPL(iomap_file_buffered_write_punch_delalloc); static loff_t iomap_unshare_iter(struct iomap_iter *iter) { struct iomap *iomap = &iter->iomap; const struct iomap *srcmap = iomap_iter_srcmap(iter); loff_t pos = iter->pos; loff_t length = iomap_length(iter); loff_t written = 0; /* don't bother with blocks that are not shared to start with */ if (!(iomap->flags & IOMAP_F_SHARED)) return length; /* don't bother with holes or unwritten extents */ if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) return length; do { struct folio *folio; int status; size_t offset; size_t bytes = min_t(u64, SIZE_MAX, length); status = iomap_write_begin(iter, pos, bytes, &folio); if (unlikely(status)) return status; if (iomap->flags & IOMAP_F_STALE) break; offset = offset_in_folio(folio, pos); if (bytes > folio_size(folio) - offset) bytes = folio_size(folio) - offset; bytes = iomap_write_end(iter, pos, bytes, bytes, folio); if (WARN_ON_ONCE(bytes == 0)) return -EIO; cond_resched(); pos += bytes; written += bytes; length -= bytes; balance_dirty_pages_ratelimited(iter->inode->i_mapping); } while (length > 0); return written; } int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = inode, .pos = pos, .len = len, .flags = IOMAP_WRITE | IOMAP_UNSHARE, }; int ret; while ((ret = iomap_iter(&iter, ops)) > 0) iter.processed = iomap_unshare_iter(&iter); return ret; } EXPORT_SYMBOL_GPL(iomap_file_unshare); static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero) { const struct iomap *srcmap = iomap_iter_srcmap(iter); loff_t pos = iter->pos; loff_t length = iomap_length(iter); loff_t written = 0; /* already zeroed? we're done. */ if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) return length; do { struct folio *folio; int status; size_t offset; size_t bytes = min_t(u64, SIZE_MAX, length); status = iomap_write_begin(iter, pos, bytes, &folio); if (status) return status; if (iter->iomap.flags & IOMAP_F_STALE) break; offset = offset_in_folio(folio, pos); if (bytes > folio_size(folio) - offset) bytes = folio_size(folio) - offset; folio_zero_range(folio, offset, bytes); folio_mark_accessed(folio); bytes = iomap_write_end(iter, pos, bytes, bytes, folio); if (WARN_ON_ONCE(bytes == 0)) return -EIO; pos += bytes; length -= bytes; written += bytes; } while (length > 0); if (did_zero) *did_zero = true; return written; } int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = inode, .pos = pos, .len = len, .flags = IOMAP_ZERO, }; int ret; while ((ret = iomap_iter(&iter, ops)) > 0) iter.processed = iomap_zero_iter(&iter, did_zero); return ret; } EXPORT_SYMBOL_GPL(iomap_zero_range); int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, const struct iomap_ops *ops) { unsigned int blocksize = i_blocksize(inode); unsigned int off = pos & (blocksize - 1); /* Block boundary? Nothing to do */ if (!off) return 0; return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops); } EXPORT_SYMBOL_GPL(iomap_truncate_page); static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter, struct folio *folio) { loff_t length = iomap_length(iter); int ret; if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) { ret = __block_write_begin_int(folio, iter->pos, length, NULL, &iter->iomap); if (ret) return ret; block_commit_write(&folio->page, 0, length); } else { WARN_ON_ONCE(!folio_test_uptodate(folio)); folio_mark_dirty(folio); } return length; } vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = file_inode(vmf->vma->vm_file), .flags = IOMAP_WRITE | IOMAP_FAULT, }; struct folio *folio = page_folio(vmf->page); ssize_t ret; folio_lock(folio); ret = folio_mkwrite_check_truncate(folio, iter.inode); if (ret < 0) goto out_unlock; iter.pos = folio_pos(folio); iter.len = ret; while ((ret = iomap_iter(&iter, ops)) > 0) iter.processed = iomap_folio_mkwrite_iter(&iter, folio); if (ret < 0) goto out_unlock; folio_wait_stable(folio); return VM_FAULT_LOCKED; out_unlock: folio_unlock(folio); return vmf_fs_error(ret); } EXPORT_SYMBOL_GPL(iomap_page_mkwrite); static void iomap_finish_folio_write(struct inode *inode, struct folio *folio, size_t len, int error) { struct iomap_folio_state *ifs = folio->private; if (error) { folio_set_error(folio); mapping_set_error(inode->i_mapping, error); } WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs); WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0); if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending)) folio_end_writeback(folio); } /* * We're now finished for good with this ioend structure. Update the page * state, release holds on bios, and finally free up memory. Do not use the * ioend after this. */ static u32 iomap_finish_ioend(struct iomap_ioend *ioend, int error) { struct inode *inode = ioend->io_inode; struct bio *bio = &ioend->io_inline_bio; struct bio *last = ioend->io_bio, *next; u64 start = bio->bi_iter.bi_sector; loff_t offset = ioend->io_offset; bool quiet = bio_flagged(bio, BIO_QUIET); u32 folio_count = 0; for (bio = &ioend->io_inline_bio; bio; bio = next) { struct folio_iter fi; /* * For the last bio, bi_private points to the ioend, so we * need to explicitly end the iteration here. */ if (bio == last) next = NULL; else next = bio->bi_private; /* walk all folios in bio, ending page IO on them */ bio_for_each_folio_all(fi, bio) { iomap_finish_folio_write(inode, fi.folio, fi.length, error); folio_count++; } bio_put(bio); } /* The ioend has been freed by bio_put() */ if (unlikely(error && !quiet)) { printk_ratelimited(KERN_ERR "%s: writeback error on inode %lu, offset %lld, sector %llu", inode->i_sb->s_id, inode->i_ino, offset, start); } return folio_count; } /* * Ioend completion routine for merged bios. This can only be called from task * contexts as merged ioends can be of unbound length. Hence we have to break up * the writeback completions into manageable chunks to avoid long scheduler * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get * good batch processing throughput without creating adverse scheduler latency * conditions. */ void iomap_finish_ioends(struct iomap_ioend *ioend, int error) { struct list_head tmp; u32 completions; might_sleep(); list_replace_init(&ioend->io_list, &tmp); completions = iomap_finish_ioend(ioend, error); while (!list_empty(&tmp)) { if (completions > IOEND_BATCH_SIZE * 8) { cond_resched(); completions = 0; } ioend = list_first_entry(&tmp, struct iomap_ioend, io_list); list_del_init(&ioend->io_list); completions += iomap_finish_ioend(ioend, error); } } EXPORT_SYMBOL_GPL(iomap_finish_ioends); /* * We can merge two adjacent ioends if they have the same set of work to do. */ static bool iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next) { if (ioend->io_bio->bi_status != next->io_bio->bi_status) return false; if ((ioend->io_flags & IOMAP_F_SHARED) ^ (next->io_flags & IOMAP_F_SHARED)) return false; if ((ioend->io_type == IOMAP_UNWRITTEN) ^ (next->io_type == IOMAP_UNWRITTEN)) return false; if (ioend->io_offset + ioend->io_size != next->io_offset) return false; /* * Do not merge physically discontiguous ioends. The filesystem * completion functions will have to iterate the physical * discontiguities even if we merge the ioends at a logical level, so * we don't gain anything by merging physical discontiguities here. * * We cannot use bio->bi_iter.bi_sector here as it is modified during * submission so does not point to the start sector of the bio at * completion. */ if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector) return false; return true; } void iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends) { struct iomap_ioend *next; INIT_LIST_HEAD(&ioend->io_list); while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend, io_list))) { if (!iomap_ioend_can_merge(ioend, next)) break; list_move_tail(&next->io_list, &ioend->io_list); ioend->io_size += next->io_size; } } EXPORT_SYMBOL_GPL(iomap_ioend_try_merge); static int iomap_ioend_compare(void *priv, const struct list_head *a, const struct list_head *b) { struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list); struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list); if (ia->io_offset < ib->io_offset) return -1; if (ia->io_offset > ib->io_offset) return 1; return 0; } void iomap_sort_ioends(struct list_head *ioend_list) { list_sort(NULL, ioend_list, iomap_ioend_compare); } EXPORT_SYMBOL_GPL(iomap_sort_ioends); static void iomap_writepage_end_bio(struct bio *bio) { struct iomap_ioend *ioend = bio->bi_private; iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status)); } /* * Submit the final bio for an ioend. * * If @error is non-zero, it means that we have a situation where some part of * the submission process has failed after we've marked pages for writeback * and unlocked them. In this situation, we need to fail the bio instead of * submitting it. This typically only happens on a filesystem shutdown. */ static int iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend, int error) { ioend->io_bio->bi_private = ioend; ioend->io_bio->bi_end_io = iomap_writepage_end_bio; if (wpc->ops->prepare_ioend) error = wpc->ops->prepare_ioend(ioend, error); if (error) { /* * If we're failing the IO now, just mark the ioend with an * error and finish it. This will run IO completion immediately * as there is only one reference to the ioend at this point in * time. */ ioend->io_bio->bi_status = errno_to_blk_status(error); bio_endio(ioend->io_bio); return error; } submit_bio(ioend->io_bio); return 0; } static struct iomap_ioend * iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc, loff_t offset, sector_t sector, struct writeback_control *wbc) { struct iomap_ioend *ioend; struct bio *bio; bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS, REQ_OP_WRITE | wbc_to_write_flags(wbc), GFP_NOFS, &iomap_ioend_bioset); bio->bi_iter.bi_sector = sector; wbc_init_bio(wbc, bio); ioend = container_of(bio, struct iomap_ioend, io_inline_bio); INIT_LIST_HEAD(&ioend->io_list); ioend->io_type = wpc->iomap.type; ioend->io_flags = wpc->iomap.flags; ioend->io_inode = inode; ioend->io_size = 0; ioend->io_folios = 0; ioend->io_offset = offset; ioend->io_bio = bio; ioend->io_sector = sector; return ioend; } /* * Allocate a new bio, and chain the old bio to the new one. * * Note that we have to perform the chaining in this unintuitive order * so that the bi_private linkage is set up in the right direction for the * traversal in iomap_finish_ioend(). */ static struct bio * iomap_chain_bio(struct bio *prev) { struct bio *new; new = bio_alloc(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOFS); bio_clone_blkg_association(new, prev); new->bi_iter.bi_sector = bio_end_sector(prev); bio_chain(prev, new); bio_get(prev); /* for iomap_finish_ioend */ submit_bio(prev); return new; } static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset, sector_t sector) { if ((wpc->iomap.flags & IOMAP_F_SHARED) != (wpc->ioend->io_flags & IOMAP_F_SHARED)) return false; if (wpc->iomap.type != wpc->ioend->io_type) return false; if (offset != wpc->ioend->io_offset + wpc->ioend->io_size) return false; if (sector != bio_end_sector(wpc->ioend->io_bio)) return false; /* * Limit ioend bio chain lengths to minimise IO completion latency. This * also prevents long tight loops ending page writeback on all the * folios in the ioend. */ if (wpc->ioend->io_folios >= IOEND_BATCH_SIZE) return false; return true; } /* * Test to see if we have an existing ioend structure that we could append to * first; otherwise finish off the current ioend and start another. */ static void iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio, struct iomap_folio_state *ifs, struct iomap_writepage_ctx *wpc, struct writeback_control *wbc, struct list_head *iolist) { sector_t sector = iomap_sector(&wpc->iomap, pos); unsigned len = i_blocksize(inode); size_t poff = offset_in_folio(folio, pos); if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) { if (wpc->ioend) list_add(&wpc->ioend->io_list, iolist); wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc); } if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) { wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio); bio_add_folio_nofail(wpc->ioend->io_bio, folio, len, poff); } if (ifs) atomic_add(len, &ifs->write_bytes_pending); wpc->ioend->io_size += len; wbc_account_cgroup_owner(wbc, &folio->page, len); } /* * We implement an immediate ioend submission policy here to avoid needing to * chain multiple ioends and hence nest mempool allocations which can violate * the forward progress guarantees we need to provide. The current ioend we're * adding blocks to is cached in the writepage context, and if the new block * doesn't append to the cached ioend, it will create a new ioend and cache that * instead. * * If a new ioend is created and cached, the old ioend is returned and queued * locally for submission once the entire page is processed or an error has been * detected. While ioends are submitted immediately after they are completed, * batching optimisations are provided by higher level block plugging. * * At the end of a writeback pass, there will be a cached ioend remaining on the * writepage context that the caller will need to submit. */ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc, struct writeback_control *wbc, struct inode *inode, struct folio *folio, u64 end_pos) { struct iomap_folio_state *ifs = folio->private; struct iomap_ioend *ioend, *next; unsigned len = i_blocksize(inode); unsigned nblocks = i_blocks_per_folio(inode, folio); u64 pos = folio_pos(folio); int error = 0, count = 0, i; LIST_HEAD(submit_list); WARN_ON_ONCE(end_pos <= pos); if (!ifs && nblocks > 1) { ifs = ifs_alloc(inode, folio, 0); iomap_set_range_dirty(folio, 0, end_pos - pos); } WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) != 0); /* * Walk through the folio to find areas to write back. If we * run off the end of the current map or find the current map * invalid, grab a new one. */ for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) { if (ifs && !ifs_block_is_dirty(folio, ifs, i)) continue; error = wpc->ops->map_blocks(wpc, inode, pos); if (error) break; trace_iomap_writepage_map(inode, &wpc->iomap); if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE)) continue; if (wpc->iomap.type == IOMAP_HOLE) continue; iomap_add_to_ioend(inode, pos, folio, ifs, wpc, wbc, &submit_list); count++; } if (count) wpc->ioend->io_folios++; WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list)); WARN_ON_ONCE(!folio_test_locked(folio)); WARN_ON_ONCE(folio_test_writeback(folio)); WARN_ON_ONCE(folio_test_dirty(folio)); /* * We cannot cancel the ioend directly here on error. We may have * already set other pages under writeback and hence we have to run I/O * completion to mark the error state of the pages under writeback * appropriately. */ if (unlikely(error)) { /* * Let the filesystem know what portion of the current page * failed to map. If the page hasn't been added to ioend, it * won't be affected by I/O completion and we must unlock it * now. */ if (wpc->ops->discard_folio) wpc->ops->discard_folio(folio, pos); if (!count) { folio_unlock(folio); goto done; } } /* * We can have dirty bits set past end of file in page_mkwrite path * while mapping the last partial folio. Hence it's better to clear * all the dirty bits in the folio here. */ iomap_clear_range_dirty(folio, 0, folio_size(folio)); folio_start_writeback(folio); folio_unlock(folio); /* * Preserve the original error if there was one; catch * submission errors here and propagate into subsequent ioend * submissions. */ list_for_each_entry_safe(ioend, next, &submit_list, io_list) { int error2; list_del_init(&ioend->io_list); error2 = iomap_submit_ioend(wpc, ioend, error); if (error2 && !error) error = error2; } /* * We can end up here with no error and nothing to write only if we race * with a partial page truncate on a sub-page block sized filesystem. */ if (!count) folio_end_writeback(folio); done: mapping_set_error(inode->i_mapping, error); return error; } /* * Write out a dirty page. * * For delalloc space on the page, we need to allocate space and flush it. * For unwritten space on the page, we need to start the conversion to * regular allocated space. */ static int iomap_do_writepage(struct folio *folio, struct writeback_control *wbc, void *data) { struct iomap_writepage_ctx *wpc = data; struct inode *inode = folio->mapping->host; u64 end_pos, isize; trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio)); /* * Refuse to write the folio out if we're called from reclaim context. * * This avoids stack overflows when called from deeply used stacks in * random callers for direct reclaim or memcg reclaim. We explicitly * allow reclaim from kswapd as the stack usage there is relatively low. * * This should never happen except in the case of a VM regression so * warn about it. */ if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC)) goto redirty; /* * Is this folio beyond the end of the file? * * The folio index is less than the end_index, adjust the end_pos * to the highest offset that this folio should represent. * ----------------------------------------------------- * | file mapping | <EOF> | * ----------------------------------------------------- * | Page ... | Page N-2 | Page N-1 | Page N | | * ^--------------------------------^----------|-------- * | desired writeback range | see else | * ---------------------------------^------------------| */ isize = i_size_read(inode); end_pos = folio_pos(folio) + folio_size(folio); if (end_pos > isize) { /* * Check whether the page to write out is beyond or straddles * i_size or not. * ------------------------------------------------------- * | file mapping | <EOF> | * ------------------------------------------------------- * | Page ... | Page N-2 | Page N-1 | Page N | Beyond | * ^--------------------------------^-----------|--------- * | | Straddles | * ---------------------------------^-----------|--------| */ size_t poff = offset_in_folio(folio, isize); pgoff_t end_index = isize >> PAGE_SHIFT; /* * Skip the page if it's fully outside i_size, e.g. * due to a truncate operation that's in progress. We've * cleaned this page and truncate will finish things off for * us. * * Note that the end_index is unsigned long. If the given * offset is greater than 16TB on a 32-bit system then if we * checked if the page is fully outside i_size with * "if (page->index >= end_index + 1)", "end_index + 1" would * overflow and evaluate to 0. Hence this page would be * redirtied and written out repeatedly, which would result in * an infinite loop; the user program performing this operation * would hang. Instead, we can detect this situation by * checking if the page is totally beyond i_size or if its * offset is just equal to the EOF. */ if (folio->index > end_index || (folio->index == end_index && poff == 0)) goto unlock; /* * The page straddles i_size. It must be zeroed out on each * and every writepage invocation because it may be mmapped. * "A file is mapped in multiples of the page size. For a file * that is not a multiple of the page size, the remaining * memory is zeroed when mapped, and writes to that region are * not written out to the file." */ folio_zero_segment(folio, poff, folio_size(folio)); end_pos = isize; } return iomap_writepage_map(wpc, wbc, inode, folio, end_pos); redirty: folio_redirty_for_writepage(wbc, folio); unlock: folio_unlock(folio); return 0; } int iomap_writepages(struct address_space *mapping, struct writeback_control *wbc, struct iomap_writepage_ctx *wpc, const struct iomap_writeback_ops *ops) { int ret; wpc->ops = ops; ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc); if (!wpc->ioend) return ret; return iomap_submit_ioend(wpc, wpc->ioend, ret); } EXPORT_SYMBOL_GPL(iomap_writepages); static int __init iomap_init(void) { return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE), offsetof(struct iomap_ioend, io_inline_bio), BIOSET_NEED_BVECS); } fs_initcall(iomap_init);
linux-master
fs/iomap/buffered-io.c
// SPDX-License-Identifier: GPL-2.0 /* * fs/bfs/dir.c * BFS directory operations. * Copyright (C) 1999-2018 Tigran Aivazian <[email protected]> * Made endianness-clean by Andrew Stribblehill <[email protected]> 2005 */ #include <linux/time.h> #include <linux/string.h> #include <linux/fs.h> #include <linux/buffer_head.h> #include <linux/sched.h> #include "bfs.h" #undef DEBUG #ifdef DEBUG #define dprintf(x...) printf(x) #else #define dprintf(x...) #endif static int bfs_add_entry(struct inode *dir, const struct qstr *child, int ino); static struct buffer_head *bfs_find_entry(struct inode *dir, const struct qstr *child, struct bfs_dirent **res_dir); static int bfs_readdir(struct file *f, struct dir_context *ctx) { struct inode *dir = file_inode(f); struct buffer_head *bh; struct bfs_dirent *de; unsigned int offset; int block; if (ctx->pos & (BFS_DIRENT_SIZE - 1)) { printf("Bad f_pos=%08lx for %s:%08lx\n", (unsigned long)ctx->pos, dir->i_sb->s_id, dir->i_ino); return -EINVAL; } while (ctx->pos < dir->i_size) { offset = ctx->pos & (BFS_BSIZE - 1); block = BFS_I(dir)->i_sblock + (ctx->pos >> BFS_BSIZE_BITS); bh = sb_bread(dir->i_sb, block); if (!bh) { ctx->pos += BFS_BSIZE - offset; continue; } do { de = (struct bfs_dirent *)(bh->b_data + offset); if (de->ino) { int size = strnlen(de->name, BFS_NAMELEN); if (!dir_emit(ctx, de->name, size, le16_to_cpu(de->ino), DT_UNKNOWN)) { brelse(bh); return 0; } } offset += BFS_DIRENT_SIZE; ctx->pos += BFS_DIRENT_SIZE; } while ((offset < BFS_BSIZE) && (ctx->pos < dir->i_size)); brelse(bh); } return 0; } const struct file_operations bfs_dir_operations = { .read = generic_read_dir, .iterate_shared = bfs_readdir, .fsync = generic_file_fsync, .llseek = generic_file_llseek, }; static int bfs_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { int err; struct inode *inode; struct super_block *s = dir->i_sb; struct bfs_sb_info *info = BFS_SB(s); unsigned long ino; inode = new_inode(s); if (!inode) return -ENOMEM; mutex_lock(&info->bfs_lock); ino = find_first_zero_bit(info->si_imap, info->si_lasti + 1); if (ino > info->si_lasti) { mutex_unlock(&info->bfs_lock); iput(inode); return -ENOSPC; } set_bit(ino, info->si_imap); info->si_freei--; inode_init_owner(&nop_mnt_idmap, inode, dir, mode); inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode); inode->i_blocks = 0; inode->i_op = &bfs_file_inops; inode->i_fop = &bfs_file_operations; inode->i_mapping->a_ops = &bfs_aops; inode->i_ino = ino; BFS_I(inode)->i_dsk_ino = ino; BFS_I(inode)->i_sblock = 0; BFS_I(inode)->i_eblock = 0; insert_inode_hash(inode); mark_inode_dirty(inode); bfs_dump_imap("create", s); err = bfs_add_entry(dir, &dentry->d_name, inode->i_ino); if (err) { inode_dec_link_count(inode); mutex_unlock(&info->bfs_lock); iput(inode); return err; } mutex_unlock(&info->bfs_lock); d_instantiate(dentry, inode); return 0; } static struct dentry *bfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct inode *inode = NULL; struct buffer_head *bh; struct bfs_dirent *de; struct bfs_sb_info *info = BFS_SB(dir->i_sb); if (dentry->d_name.len > BFS_NAMELEN) return ERR_PTR(-ENAMETOOLONG); mutex_lock(&info->bfs_lock); bh = bfs_find_entry(dir, &dentry->d_name, &de); if (bh) { unsigned long ino = (unsigned long)le16_to_cpu(de->ino); brelse(bh); inode = bfs_iget(dir->i_sb, ino); } mutex_unlock(&info->bfs_lock); return d_splice_alias(inode, dentry); } static int bfs_link(struct dentry *old, struct inode *dir, struct dentry *new) { struct inode *inode = d_inode(old); struct bfs_sb_info *info = BFS_SB(inode->i_sb); int err; mutex_lock(&info->bfs_lock); err = bfs_add_entry(dir, &new->d_name, inode->i_ino); if (err) { mutex_unlock(&info->bfs_lock); return err; } inc_nlink(inode); inode_set_ctime_current(inode); mark_inode_dirty(inode); ihold(inode); d_instantiate(new, inode); mutex_unlock(&info->bfs_lock); return 0; } static int bfs_unlink(struct inode *dir, struct dentry *dentry) { int error = -ENOENT; struct inode *inode = d_inode(dentry); struct buffer_head *bh; struct bfs_dirent *de; struct bfs_sb_info *info = BFS_SB(inode->i_sb); mutex_lock(&info->bfs_lock); bh = bfs_find_entry(dir, &dentry->d_name, &de); if (!bh || (le16_to_cpu(de->ino) != inode->i_ino)) goto out_brelse; if (!inode->i_nlink) { printf("unlinking non-existent file %s:%lu (nlink=%d)\n", inode->i_sb->s_id, inode->i_ino, inode->i_nlink); set_nlink(inode, 1); } de->ino = 0; mark_buffer_dirty_inode(bh, dir); dir->i_mtime = inode_set_ctime_current(dir); mark_inode_dirty(dir); inode_set_ctime_to_ts(inode, inode_get_ctime(dir)); inode_dec_link_count(inode); error = 0; out_brelse: brelse(bh); mutex_unlock(&info->bfs_lock); return error; } static int bfs_rename(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { struct inode *old_inode, *new_inode; struct buffer_head *old_bh, *new_bh; struct bfs_dirent *old_de, *new_de; struct bfs_sb_info *info; int error = -ENOENT; if (flags & ~RENAME_NOREPLACE) return -EINVAL; old_bh = new_bh = NULL; old_inode = d_inode(old_dentry); if (S_ISDIR(old_inode->i_mode)) return -EINVAL; info = BFS_SB(old_inode->i_sb); mutex_lock(&info->bfs_lock); old_bh = bfs_find_entry(old_dir, &old_dentry->d_name, &old_de); if (!old_bh || (le16_to_cpu(old_de->ino) != old_inode->i_ino)) goto end_rename; error = -EPERM; new_inode = d_inode(new_dentry); new_bh = bfs_find_entry(new_dir, &new_dentry->d_name, &new_de); if (new_bh && !new_inode) { brelse(new_bh); new_bh = NULL; } if (!new_bh) { error = bfs_add_entry(new_dir, &new_dentry->d_name, old_inode->i_ino); if (error) goto end_rename; } old_de->ino = 0; old_dir->i_mtime = inode_set_ctime_current(old_dir); mark_inode_dirty(old_dir); if (new_inode) { inode_set_ctime_current(new_inode); inode_dec_link_count(new_inode); } mark_buffer_dirty_inode(old_bh, old_dir); error = 0; end_rename: mutex_unlock(&info->bfs_lock); brelse(old_bh); brelse(new_bh); return error; } const struct inode_operations bfs_dir_inops = { .create = bfs_create, .lookup = bfs_lookup, .link = bfs_link, .unlink = bfs_unlink, .rename = bfs_rename, }; static int bfs_add_entry(struct inode *dir, const struct qstr *child, int ino) { const unsigned char *name = child->name; int namelen = child->len; struct buffer_head *bh; struct bfs_dirent *de; int block, sblock, eblock, off, pos; int i; dprintf("name=%s, namelen=%d\n", name, namelen); if (!namelen) return -ENOENT; if (namelen > BFS_NAMELEN) return -ENAMETOOLONG; sblock = BFS_I(dir)->i_sblock; eblock = BFS_I(dir)->i_eblock; for (block = sblock; block <= eblock; block++) { bh = sb_bread(dir->i_sb, block); if (!bh) return -EIO; for (off = 0; off < BFS_BSIZE; off += BFS_DIRENT_SIZE) { de = (struct bfs_dirent *)(bh->b_data + off); if (!de->ino) { pos = (block - sblock) * BFS_BSIZE + off; if (pos >= dir->i_size) { dir->i_size += BFS_DIRENT_SIZE; inode_set_ctime_current(dir); } dir->i_mtime = inode_set_ctime_current(dir); mark_inode_dirty(dir); de->ino = cpu_to_le16((u16)ino); for (i = 0; i < BFS_NAMELEN; i++) de->name[i] = (i < namelen) ? name[i] : 0; mark_buffer_dirty_inode(bh, dir); brelse(bh); return 0; } } brelse(bh); } return -ENOSPC; } static inline int bfs_namecmp(int len, const unsigned char *name, const char *buffer) { if ((len < BFS_NAMELEN) && buffer[len]) return 0; return !memcmp(name, buffer, len); } static struct buffer_head *bfs_find_entry(struct inode *dir, const struct qstr *child, struct bfs_dirent **res_dir) { unsigned long block = 0, offset = 0; struct buffer_head *bh = NULL; struct bfs_dirent *de; const unsigned char *name = child->name; int namelen = child->len; *res_dir = NULL; if (namelen > BFS_NAMELEN) return NULL; while (block * BFS_BSIZE + offset < dir->i_size) { if (!bh) { bh = sb_bread(dir->i_sb, BFS_I(dir)->i_sblock + block); if (!bh) { block++; continue; } } de = (struct bfs_dirent *)(bh->b_data + offset); offset += BFS_DIRENT_SIZE; if (le16_to_cpu(de->ino) && bfs_namecmp(namelen, name, de->name)) { *res_dir = de; return bh; } if (offset < bh->b_size) continue; brelse(bh); bh = NULL; offset = 0; block++; } brelse(bh); return NULL; }
linux-master
fs/bfs/dir.c
// SPDX-License-Identifier: GPL-2.0-only /* * fs/bfs/inode.c * BFS superblock and inode operations. * Copyright (C) 1999-2018 Tigran Aivazian <[email protected]> * From fs/minix, Copyright (C) 1991, 1992 Linus Torvalds. * Made endianness-clean by Andrew Stribblehill <[email protected]>, 2005. */ #include <linux/module.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/buffer_head.h> #include <linux/vfs.h> #include <linux/writeback.h> #include <linux/uio.h> #include <linux/uaccess.h> #include "bfs.h" MODULE_AUTHOR("Tigran Aivazian <[email protected]>"); MODULE_DESCRIPTION("SCO UnixWare BFS filesystem for Linux"); MODULE_LICENSE("GPL"); #undef DEBUG #ifdef DEBUG #define dprintf(x...) printf(x) #else #define dprintf(x...) #endif struct inode *bfs_iget(struct super_block *sb, unsigned long ino) { struct bfs_inode *di; struct inode *inode; struct buffer_head *bh; int block, off; inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; if ((ino < BFS_ROOT_INO) || (ino > BFS_SB(inode->i_sb)->si_lasti)) { printf("Bad inode number %s:%08lx\n", inode->i_sb->s_id, ino); goto error; } block = (ino - BFS_ROOT_INO) / BFS_INODES_PER_BLOCK + 1; bh = sb_bread(inode->i_sb, block); if (!bh) { printf("Unable to read inode %s:%08lx\n", inode->i_sb->s_id, ino); goto error; } off = (ino - BFS_ROOT_INO) % BFS_INODES_PER_BLOCK; di = (struct bfs_inode *)bh->b_data + off; inode->i_mode = 0x0000FFFF & le32_to_cpu(di->i_mode); if (le32_to_cpu(di->i_vtype) == BFS_VDIR) { inode->i_mode |= S_IFDIR; inode->i_op = &bfs_dir_inops; inode->i_fop = &bfs_dir_operations; } else if (le32_to_cpu(di->i_vtype) == BFS_VREG) { inode->i_mode |= S_IFREG; inode->i_op = &bfs_file_inops; inode->i_fop = &bfs_file_operations; inode->i_mapping->a_ops = &bfs_aops; } BFS_I(inode)->i_sblock = le32_to_cpu(di->i_sblock); BFS_I(inode)->i_eblock = le32_to_cpu(di->i_eblock); BFS_I(inode)->i_dsk_ino = le16_to_cpu(di->i_ino); i_uid_write(inode, le32_to_cpu(di->i_uid)); i_gid_write(inode, le32_to_cpu(di->i_gid)); set_nlink(inode, le32_to_cpu(di->i_nlink)); inode->i_size = BFS_FILESIZE(di); inode->i_blocks = BFS_FILEBLOCKS(di); inode->i_atime.tv_sec = le32_to_cpu(di->i_atime); inode->i_mtime.tv_sec = le32_to_cpu(di->i_mtime); inode_set_ctime(inode, le32_to_cpu(di->i_ctime), 0); inode->i_atime.tv_nsec = 0; inode->i_mtime.tv_nsec = 0; brelse(bh); unlock_new_inode(inode); return inode; error: iget_failed(inode); return ERR_PTR(-EIO); } static struct bfs_inode *find_inode(struct super_block *sb, u16 ino, struct buffer_head **p) { if ((ino < BFS_ROOT_INO) || (ino > BFS_SB(sb)->si_lasti)) { printf("Bad inode number %s:%08x\n", sb->s_id, ino); return ERR_PTR(-EIO); } ino -= BFS_ROOT_INO; *p = sb_bread(sb, 1 + ino / BFS_INODES_PER_BLOCK); if (!*p) { printf("Unable to read inode %s:%08x\n", sb->s_id, ino); return ERR_PTR(-EIO); } return (struct bfs_inode *)(*p)->b_data + ino % BFS_INODES_PER_BLOCK; } static int bfs_write_inode(struct inode *inode, struct writeback_control *wbc) { struct bfs_sb_info *info = BFS_SB(inode->i_sb); unsigned int ino = (u16)inode->i_ino; unsigned long i_sblock; struct bfs_inode *di; struct buffer_head *bh; int err = 0; dprintf("ino=%08x\n", ino); di = find_inode(inode->i_sb, ino, &bh); if (IS_ERR(di)) return PTR_ERR(di); mutex_lock(&info->bfs_lock); if (ino == BFS_ROOT_INO) di->i_vtype = cpu_to_le32(BFS_VDIR); else di->i_vtype = cpu_to_le32(BFS_VREG); di->i_ino = cpu_to_le16(ino); di->i_mode = cpu_to_le32(inode->i_mode); di->i_uid = cpu_to_le32(i_uid_read(inode)); di->i_gid = cpu_to_le32(i_gid_read(inode)); di->i_nlink = cpu_to_le32(inode->i_nlink); di->i_atime = cpu_to_le32(inode->i_atime.tv_sec); di->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec); di->i_ctime = cpu_to_le32(inode_get_ctime(inode).tv_sec); i_sblock = BFS_I(inode)->i_sblock; di->i_sblock = cpu_to_le32(i_sblock); di->i_eblock = cpu_to_le32(BFS_I(inode)->i_eblock); di->i_eoffset = cpu_to_le32(i_sblock * BFS_BSIZE + inode->i_size - 1); mark_buffer_dirty(bh); if (wbc->sync_mode == WB_SYNC_ALL) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) err = -EIO; } brelse(bh); mutex_unlock(&info->bfs_lock); return err; } static void bfs_evict_inode(struct inode *inode) { unsigned long ino = inode->i_ino; struct bfs_inode *di; struct buffer_head *bh; struct super_block *s = inode->i_sb; struct bfs_sb_info *info = BFS_SB(s); struct bfs_inode_info *bi = BFS_I(inode); dprintf("ino=%08lx\n", ino); truncate_inode_pages_final(&inode->i_data); invalidate_inode_buffers(inode); clear_inode(inode); if (inode->i_nlink) return; di = find_inode(s, inode->i_ino, &bh); if (IS_ERR(di)) return; mutex_lock(&info->bfs_lock); /* clear on-disk inode */ memset(di, 0, sizeof(struct bfs_inode)); mark_buffer_dirty(bh); brelse(bh); if (bi->i_dsk_ino) { if (bi->i_sblock) info->si_freeb += bi->i_eblock + 1 - bi->i_sblock; info->si_freei++; clear_bit(ino, info->si_imap); bfs_dump_imap("evict_inode", s); } /* * If this was the last file, make the previous block * "last block of the last file" even if there is no * real file there, saves us 1 gap. */ if (info->si_lf_eblk == bi->i_eblock) info->si_lf_eblk = bi->i_sblock - 1; mutex_unlock(&info->bfs_lock); } static void bfs_put_super(struct super_block *s) { struct bfs_sb_info *info = BFS_SB(s); if (!info) return; mutex_destroy(&info->bfs_lock); kfree(info); s->s_fs_info = NULL; } static int bfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *s = dentry->d_sb; struct bfs_sb_info *info = BFS_SB(s); u64 id = huge_encode_dev(s->s_bdev->bd_dev); buf->f_type = BFS_MAGIC; buf->f_bsize = s->s_blocksize; buf->f_blocks = info->si_blocks; buf->f_bfree = buf->f_bavail = info->si_freeb; buf->f_files = info->si_lasti + 1 - BFS_ROOT_INO; buf->f_ffree = info->si_freei; buf->f_fsid = u64_to_fsid(id); buf->f_namelen = BFS_NAMELEN; return 0; } static struct kmem_cache *bfs_inode_cachep; static struct inode *bfs_alloc_inode(struct super_block *sb) { struct bfs_inode_info *bi; bi = alloc_inode_sb(sb, bfs_inode_cachep, GFP_KERNEL); if (!bi) return NULL; return &bi->vfs_inode; } static void bfs_free_inode(struct inode *inode) { kmem_cache_free(bfs_inode_cachep, BFS_I(inode)); } static void init_once(void *foo) { struct bfs_inode_info *bi = foo; inode_init_once(&bi->vfs_inode); } static int __init init_inodecache(void) { bfs_inode_cachep = kmem_cache_create("bfs_inode_cache", sizeof(struct bfs_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD|SLAB_ACCOUNT), init_once); if (bfs_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(bfs_inode_cachep); } static const struct super_operations bfs_sops = { .alloc_inode = bfs_alloc_inode, .free_inode = bfs_free_inode, .write_inode = bfs_write_inode, .evict_inode = bfs_evict_inode, .put_super = bfs_put_super, .statfs = bfs_statfs, }; void bfs_dump_imap(const char *prefix, struct super_block *s) { #ifdef DEBUG int i; char *tmpbuf = (char *)get_zeroed_page(GFP_KERNEL); if (!tmpbuf) return; for (i = BFS_SB(s)->si_lasti; i >= 0; i--) { if (i > PAGE_SIZE - 100) break; if (test_bit(i, BFS_SB(s)->si_imap)) strcat(tmpbuf, "1"); else strcat(tmpbuf, "0"); } printf("%s: lasti=%08lx <%s>\n", prefix, BFS_SB(s)->si_lasti, tmpbuf); free_page((unsigned long)tmpbuf); #endif } static int bfs_fill_super(struct super_block *s, void *data, int silent) { struct buffer_head *bh, *sbh; struct bfs_super_block *bfs_sb; struct inode *inode; unsigned i; struct bfs_sb_info *info; int ret = -EINVAL; unsigned long i_sblock, i_eblock, i_eoff, s_size; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; mutex_init(&info->bfs_lock); s->s_fs_info = info; s->s_time_min = 0; s->s_time_max = U32_MAX; sb_set_blocksize(s, BFS_BSIZE); sbh = sb_bread(s, 0); if (!sbh) goto out; bfs_sb = (struct bfs_super_block *)sbh->b_data; if (le32_to_cpu(bfs_sb->s_magic) != BFS_MAGIC) { if (!silent) printf("No BFS filesystem on %s (magic=%08x)\n", s->s_id, le32_to_cpu(bfs_sb->s_magic)); goto out1; } if (BFS_UNCLEAN(bfs_sb, s) && !silent) printf("%s is unclean, continuing\n", s->s_id); s->s_magic = BFS_MAGIC; if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end) || le32_to_cpu(bfs_sb->s_start) < sizeof(struct bfs_super_block) + sizeof(struct bfs_dirent)) { printf("Superblock is corrupted on %s\n", s->s_id); goto out1; } info->si_lasti = (le32_to_cpu(bfs_sb->s_start) - BFS_BSIZE) / sizeof(struct bfs_inode) + BFS_ROOT_INO - 1; if (info->si_lasti == BFS_MAX_LASTI) printf("NOTE: filesystem %s was created with 512 inodes, the real maximum is 511, mounting anyway\n", s->s_id); else if (info->si_lasti > BFS_MAX_LASTI) { printf("Impossible last inode number %lu > %d on %s\n", info->si_lasti, BFS_MAX_LASTI, s->s_id); goto out1; } for (i = 0; i < BFS_ROOT_INO; i++) set_bit(i, info->si_imap); s->s_op = &bfs_sops; inode = bfs_iget(s, BFS_ROOT_INO); if (IS_ERR(inode)) { ret = PTR_ERR(inode); goto out1; } s->s_root = d_make_root(inode); if (!s->s_root) { ret = -ENOMEM; goto out1; } info->si_blocks = (le32_to_cpu(bfs_sb->s_end) + 1) >> BFS_BSIZE_BITS; info->si_freeb = (le32_to_cpu(bfs_sb->s_end) + 1 - le32_to_cpu(bfs_sb->s_start)) >> BFS_BSIZE_BITS; info->si_freei = 0; info->si_lf_eblk = 0; /* can we read the last block? */ bh = sb_bread(s, info->si_blocks - 1); if (!bh) { printf("Last block not available on %s: %lu\n", s->s_id, info->si_blocks - 1); ret = -EIO; goto out2; } brelse(bh); bh = NULL; for (i = BFS_ROOT_INO; i <= info->si_lasti; i++) { struct bfs_inode *di; int block = (i - BFS_ROOT_INO) / BFS_INODES_PER_BLOCK + 1; int off = (i - BFS_ROOT_INO) % BFS_INODES_PER_BLOCK; unsigned long eblock; if (!off) { brelse(bh); bh = sb_bread(s, block); } if (!bh) continue; di = (struct bfs_inode *)bh->b_data + off; /* test if filesystem is not corrupted */ i_eoff = le32_to_cpu(di->i_eoffset); i_sblock = le32_to_cpu(di->i_sblock); i_eblock = le32_to_cpu(di->i_eblock); s_size = le32_to_cpu(bfs_sb->s_end); if (i_sblock > info->si_blocks || i_eblock > info->si_blocks || i_sblock > i_eblock || (i_eoff != le32_to_cpu(-1) && i_eoff > s_size) || i_sblock * BFS_BSIZE > i_eoff) { printf("Inode 0x%08x corrupted on %s\n", i, s->s_id); brelse(bh); ret = -EIO; goto out2; } if (!di->i_ino) { info->si_freei++; continue; } set_bit(i, info->si_imap); info->si_freeb -= BFS_FILEBLOCKS(di); eblock = le32_to_cpu(di->i_eblock); if (eblock > info->si_lf_eblk) info->si_lf_eblk = eblock; } brelse(bh); brelse(sbh); bfs_dump_imap("fill_super", s); return 0; out2: dput(s->s_root); s->s_root = NULL; out1: brelse(sbh); out: mutex_destroy(&info->bfs_lock); kfree(info); s->s_fs_info = NULL; return ret; } static struct dentry *bfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, bfs_fill_super); } static struct file_system_type bfs_fs_type = { .owner = THIS_MODULE, .name = "bfs", .mount = bfs_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("bfs"); static int __init init_bfs_fs(void) { int err = init_inodecache(); if (err) goto out1; err = register_filesystem(&bfs_fs_type); if (err) goto out; return 0; out: destroy_inodecache(); out1: return err; } static void __exit exit_bfs_fs(void) { unregister_filesystem(&bfs_fs_type); destroy_inodecache(); } module_init(init_bfs_fs) module_exit(exit_bfs_fs)
linux-master
fs/bfs/inode.c
// SPDX-License-Identifier: GPL-2.0 /* * fs/bfs/file.c * BFS file operations. * Copyright (C) 1999-2018 Tigran Aivazian <[email protected]> * * Make the file block allocation algorithm understand the size * of the underlying block device. * Copyright (C) 2007 Dmitri Vorobiev <[email protected]> * */ #include <linux/fs.h> #include <linux/buffer_head.h> #include "bfs.h" #undef DEBUG #ifdef DEBUG #define dprintf(x...) printf(x) #else #define dprintf(x...) #endif const struct file_operations bfs_file_operations = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .mmap = generic_file_mmap, .splice_read = filemap_splice_read, }; static int bfs_move_block(unsigned long from, unsigned long to, struct super_block *sb) { struct buffer_head *bh, *new; bh = sb_bread(sb, from); if (!bh) return -EIO; new = sb_getblk(sb, to); memcpy(new->b_data, bh->b_data, bh->b_size); mark_buffer_dirty(new); bforget(bh); brelse(new); return 0; } static int bfs_move_blocks(struct super_block *sb, unsigned long start, unsigned long end, unsigned long where) { unsigned long i; dprintf("%08lx-%08lx->%08lx\n", start, end, where); for (i = start; i <= end; i++) if(bfs_move_block(i, where + i, sb)) { dprintf("failed to move block %08lx -> %08lx\n", i, where + i); return -EIO; } return 0; } static int bfs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create) { unsigned long phys; int err; struct super_block *sb = inode->i_sb; struct bfs_sb_info *info = BFS_SB(sb); struct bfs_inode_info *bi = BFS_I(inode); phys = bi->i_sblock + block; if (!create) { if (phys <= bi->i_eblock) { dprintf("c=%d, b=%08lx, phys=%09lx (granted)\n", create, (unsigned long)block, phys); map_bh(bh_result, sb, phys); } return 0; } /* * If the file is not empty and the requested block is within the * range of blocks allocated for this file, we can grant it. */ if (bi->i_sblock && (phys <= bi->i_eblock)) { dprintf("c=%d, b=%08lx, phys=%08lx (interim block granted)\n", create, (unsigned long)block, phys); map_bh(bh_result, sb, phys); return 0; } /* The file will be extended, so let's see if there is enough space. */ if (phys >= info->si_blocks) return -ENOSPC; /* The rest has to be protected against itself. */ mutex_lock(&info->bfs_lock); /* * If the last data block for this file is the last allocated * block, we can extend the file trivially, without moving it * anywhere. */ if (bi->i_eblock == info->si_lf_eblk) { dprintf("c=%d, b=%08lx, phys=%08lx (simple extension)\n", create, (unsigned long)block, phys); map_bh(bh_result, sb, phys); info->si_freeb -= phys - bi->i_eblock; info->si_lf_eblk = bi->i_eblock = phys; mark_inode_dirty(inode); err = 0; goto out; } /* Ok, we have to move this entire file to the next free block. */ phys = info->si_lf_eblk + 1; if (phys + block >= info->si_blocks) { err = -ENOSPC; goto out; } if (bi->i_sblock) { err = bfs_move_blocks(inode->i_sb, bi->i_sblock, bi->i_eblock, phys); if (err) { dprintf("failed to move ino=%08lx -> fs corruption\n", inode->i_ino); goto out; } } else err = 0; dprintf("c=%d, b=%08lx, phys=%08lx (moved)\n", create, (unsigned long)block, phys); bi->i_sblock = phys; phys += block; info->si_lf_eblk = bi->i_eblock = phys; /* * This assumes nothing can write the inode back while we are here * and thus update inode->i_blocks! (XXX) */ info->si_freeb -= bi->i_eblock - bi->i_sblock + 1 - inode->i_blocks; mark_inode_dirty(inode); map_bh(bh_result, sb, phys); out: mutex_unlock(&info->bfs_lock); return err; } static int bfs_writepage(struct page *page, struct writeback_control *wbc) { return block_write_full_page(page, bfs_get_block, wbc); } static int bfs_read_folio(struct file *file, struct folio *folio) { return block_read_full_folio(folio, bfs_get_block); } static void bfs_write_failed(struct address_space *mapping, loff_t to) { struct inode *inode = mapping->host; if (to > inode->i_size) truncate_pagecache(inode, inode->i_size); } static int bfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int ret; ret = block_write_begin(mapping, pos, len, pagep, bfs_get_block); if (unlikely(ret)) bfs_write_failed(mapping, pos + len); return ret; } static sector_t bfs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping, block, bfs_get_block); } const struct address_space_operations bfs_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .read_folio = bfs_read_folio, .writepage = bfs_writepage, .write_begin = bfs_write_begin, .write_end = generic_write_end, .bmap = bfs_bmap, }; const struct inode_operations bfs_file_inops;
linux-master
fs/bfs/file.c
// SPDX-License-Identifier: GPL-2.0+ /* * NILFS module and super block management. * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * Written by Ryusuke Konishi. */ /* * linux/fs/ext2/super.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card ([email protected]) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller ([email protected]), 1995 */ #include <linux/module.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/parser.h> #include <linux/crc32.h> #include <linux/vfs.h> #include <linux/writeback.h> #include <linux/seq_file.h> #include <linux/mount.h> #include <linux/fs_context.h> #include "nilfs.h" #include "export.h" #include "mdt.h" #include "alloc.h" #include "btree.h" #include "btnode.h" #include "page.h" #include "cpfile.h" #include "sufile.h" /* nilfs_sufile_resize(), nilfs_sufile_set_alloc_range() */ #include "ifile.h" #include "dat.h" #include "segment.h" #include "segbuf.h" MODULE_AUTHOR("NTT Corp."); MODULE_DESCRIPTION("A New Implementation of the Log-structured Filesystem " "(NILFS)"); MODULE_LICENSE("GPL"); static struct kmem_cache *nilfs_inode_cachep; struct kmem_cache *nilfs_transaction_cachep; struct kmem_cache *nilfs_segbuf_cachep; struct kmem_cache *nilfs_btree_path_cache; static int nilfs_setup_super(struct super_block *sb, int is_mount); static int nilfs_remount(struct super_block *sb, int *flags, char *data); void __nilfs_msg(struct super_block *sb, const char *fmt, ...) { struct va_format vaf; va_list args; int level; va_start(args, fmt); level = printk_get_level(fmt); vaf.fmt = printk_skip_level(fmt); vaf.va = &args; if (sb) printk("%c%cNILFS (%s): %pV\n", KERN_SOH_ASCII, level, sb->s_id, &vaf); else printk("%c%cNILFS: %pV\n", KERN_SOH_ASCII, level, &vaf); va_end(args); } static void nilfs_set_error(struct super_block *sb) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_super_block **sbp; down_write(&nilfs->ns_sem); if (!(nilfs->ns_mount_state & NILFS_ERROR_FS)) { nilfs->ns_mount_state |= NILFS_ERROR_FS; sbp = nilfs_prepare_super(sb, 0); if (likely(sbp)) { sbp[0]->s_state |= cpu_to_le16(NILFS_ERROR_FS); if (sbp[1]) sbp[1]->s_state |= cpu_to_le16(NILFS_ERROR_FS); nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL); } } up_write(&nilfs->ns_sem); } /** * __nilfs_error() - report failure condition on a filesystem * * __nilfs_error() sets an ERROR_FS flag on the superblock as well as * reporting an error message. This function should be called when * NILFS detects incoherences or defects of meta data on disk. * * This implements the body of nilfs_error() macro. Normally, * nilfs_error() should be used. As for sustainable errors such as a * single-shot I/O error, nilfs_err() should be used instead. * * Callers should not add a trailing newline since this will do it. */ void __nilfs_error(struct super_block *sb, const char *function, const char *fmt, ...) { struct the_nilfs *nilfs = sb->s_fs_info; struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_CRIT "NILFS error (device %s): %s: %pV\n", sb->s_id, function, &vaf); va_end(args); if (!sb_rdonly(sb)) { nilfs_set_error(sb); if (nilfs_test_opt(nilfs, ERRORS_RO)) { printk(KERN_CRIT "Remounting filesystem read-only\n"); sb->s_flags |= SB_RDONLY; } } if (nilfs_test_opt(nilfs, ERRORS_PANIC)) panic("NILFS (device %s): panic forced after error\n", sb->s_id); } struct inode *nilfs_alloc_inode(struct super_block *sb) { struct nilfs_inode_info *ii; ii = alloc_inode_sb(sb, nilfs_inode_cachep, GFP_NOFS); if (!ii) return NULL; ii->i_bh = NULL; ii->i_state = 0; ii->i_cno = 0; ii->i_assoc_inode = NULL; ii->i_bmap = &ii->i_bmap_data; return &ii->vfs_inode; } static void nilfs_free_inode(struct inode *inode) { if (nilfs_is_metadata_file_inode(inode)) nilfs_mdt_destroy(inode); kmem_cache_free(nilfs_inode_cachep, NILFS_I(inode)); } static int nilfs_sync_super(struct super_block *sb, int flag) { struct the_nilfs *nilfs = sb->s_fs_info; int err; retry: set_buffer_dirty(nilfs->ns_sbh[0]); if (nilfs_test_opt(nilfs, BARRIER)) { err = __sync_dirty_buffer(nilfs->ns_sbh[0], REQ_SYNC | REQ_PREFLUSH | REQ_FUA); } else { err = sync_dirty_buffer(nilfs->ns_sbh[0]); } if (unlikely(err)) { nilfs_err(sb, "unable to write superblock: err=%d", err); if (err == -EIO && nilfs->ns_sbh[1]) { /* * sbp[0] points to newer log than sbp[1], * so copy sbp[0] to sbp[1] to take over sbp[0]. */ memcpy(nilfs->ns_sbp[1], nilfs->ns_sbp[0], nilfs->ns_sbsize); nilfs_fall_back_super_block(nilfs); goto retry; } } else { struct nilfs_super_block *sbp = nilfs->ns_sbp[0]; nilfs->ns_sbwcount++; /* * The latest segment becomes trailable from the position * written in superblock. */ clear_nilfs_discontinued(nilfs); /* update GC protection for recent segments */ if (nilfs->ns_sbh[1]) { if (flag == NILFS_SB_COMMIT_ALL) { set_buffer_dirty(nilfs->ns_sbh[1]); if (sync_dirty_buffer(nilfs->ns_sbh[1]) < 0) goto out; } if (le64_to_cpu(nilfs->ns_sbp[1]->s_last_cno) < le64_to_cpu(nilfs->ns_sbp[0]->s_last_cno)) sbp = nilfs->ns_sbp[1]; } spin_lock(&nilfs->ns_last_segment_lock); nilfs->ns_prot_seq = le64_to_cpu(sbp->s_last_seq); spin_unlock(&nilfs->ns_last_segment_lock); } out: return err; } void nilfs_set_log_cursor(struct nilfs_super_block *sbp, struct the_nilfs *nilfs) { sector_t nfreeblocks; /* nilfs->ns_sem must be locked by the caller. */ nilfs_count_free_blocks(nilfs, &nfreeblocks); sbp->s_free_blocks_count = cpu_to_le64(nfreeblocks); spin_lock(&nilfs->ns_last_segment_lock); sbp->s_last_seq = cpu_to_le64(nilfs->ns_last_seq); sbp->s_last_pseg = cpu_to_le64(nilfs->ns_last_pseg); sbp->s_last_cno = cpu_to_le64(nilfs->ns_last_cno); spin_unlock(&nilfs->ns_last_segment_lock); } struct nilfs_super_block **nilfs_prepare_super(struct super_block *sb, int flip) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_super_block **sbp = nilfs->ns_sbp; /* nilfs->ns_sem must be locked by the caller. */ if (sbp[0]->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) { if (sbp[1] && sbp[1]->s_magic == cpu_to_le16(NILFS_SUPER_MAGIC)) { memcpy(sbp[0], sbp[1], nilfs->ns_sbsize); } else { nilfs_crit(sb, "superblock broke"); return NULL; } } else if (sbp[1] && sbp[1]->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) { memcpy(sbp[1], sbp[0], nilfs->ns_sbsize); } if (flip && sbp[1]) nilfs_swap_super_block(nilfs); return sbp; } int nilfs_commit_super(struct super_block *sb, int flag) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_super_block **sbp = nilfs->ns_sbp; time64_t t; /* nilfs->ns_sem must be locked by the caller. */ t = ktime_get_real_seconds(); nilfs->ns_sbwtime = t; sbp[0]->s_wtime = cpu_to_le64(t); sbp[0]->s_sum = 0; sbp[0]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed, (unsigned char *)sbp[0], nilfs->ns_sbsize)); if (flag == NILFS_SB_COMMIT_ALL && sbp[1]) { sbp[1]->s_wtime = sbp[0]->s_wtime; sbp[1]->s_sum = 0; sbp[1]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed, (unsigned char *)sbp[1], nilfs->ns_sbsize)); } clear_nilfs_sb_dirty(nilfs); nilfs->ns_flushed_device = 1; /* make sure store to ns_flushed_device cannot be reordered */ smp_wmb(); return nilfs_sync_super(sb, flag); } /** * nilfs_cleanup_super() - write filesystem state for cleanup * @sb: super block instance to be unmounted or degraded to read-only * * This function restores state flags in the on-disk super block. * This will set "clean" flag (i.e. NILFS_VALID_FS) unless the * filesystem was not clean previously. */ int nilfs_cleanup_super(struct super_block *sb) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_super_block **sbp; int flag = NILFS_SB_COMMIT; int ret = -EIO; sbp = nilfs_prepare_super(sb, 0); if (sbp) { sbp[0]->s_state = cpu_to_le16(nilfs->ns_mount_state); nilfs_set_log_cursor(sbp[0], nilfs); if (sbp[1] && sbp[0]->s_last_cno == sbp[1]->s_last_cno) { /* * make the "clean" flag also to the opposite * super block if both super blocks point to * the same checkpoint. */ sbp[1]->s_state = sbp[0]->s_state; flag = NILFS_SB_COMMIT_ALL; } ret = nilfs_commit_super(sb, flag); } return ret; } /** * nilfs_move_2nd_super - relocate secondary super block * @sb: super block instance * @sb2off: new offset of the secondary super block (in bytes) */ static int nilfs_move_2nd_super(struct super_block *sb, loff_t sb2off) { struct the_nilfs *nilfs = sb->s_fs_info; struct buffer_head *nsbh; struct nilfs_super_block *nsbp; sector_t blocknr, newblocknr; unsigned long offset; int sb2i; /* array index of the secondary superblock */ int ret = 0; /* nilfs->ns_sem must be locked by the caller. */ if (nilfs->ns_sbh[1] && nilfs->ns_sbh[1]->b_blocknr > nilfs->ns_first_data_block) { sb2i = 1; blocknr = nilfs->ns_sbh[1]->b_blocknr; } else if (nilfs->ns_sbh[0]->b_blocknr > nilfs->ns_first_data_block) { sb2i = 0; blocknr = nilfs->ns_sbh[0]->b_blocknr; } else { sb2i = -1; blocknr = 0; } if (sb2i >= 0 && (u64)blocknr << nilfs->ns_blocksize_bits == sb2off) goto out; /* super block location is unchanged */ /* Get new super block buffer */ newblocknr = sb2off >> nilfs->ns_blocksize_bits; offset = sb2off & (nilfs->ns_blocksize - 1); nsbh = sb_getblk(sb, newblocknr); if (!nsbh) { nilfs_warn(sb, "unable to move secondary superblock to block %llu", (unsigned long long)newblocknr); ret = -EIO; goto out; } nsbp = (void *)nsbh->b_data + offset; lock_buffer(nsbh); if (sb2i >= 0) { /* * The position of the second superblock only changes by 4KiB, * which is larger than the maximum superblock data size * (= 1KiB), so there is no need to use memmove() to allow * overlap between source and destination. */ memcpy(nsbp, nilfs->ns_sbp[sb2i], nilfs->ns_sbsize); /* * Zero fill after copy to avoid overwriting in case of move * within the same block. */ memset(nsbh->b_data, 0, offset); memset((void *)nsbp + nilfs->ns_sbsize, 0, nsbh->b_size - offset - nilfs->ns_sbsize); } else { memset(nsbh->b_data, 0, nsbh->b_size); } set_buffer_uptodate(nsbh); unlock_buffer(nsbh); if (sb2i >= 0) { brelse(nilfs->ns_sbh[sb2i]); nilfs->ns_sbh[sb2i] = nsbh; nilfs->ns_sbp[sb2i] = nsbp; } else if (nilfs->ns_sbh[0]->b_blocknr < nilfs->ns_first_data_block) { /* secondary super block will be restored to index 1 */ nilfs->ns_sbh[1] = nsbh; nilfs->ns_sbp[1] = nsbp; } else { brelse(nsbh); } out: return ret; } /** * nilfs_resize_fs - resize the filesystem * @sb: super block instance * @newsize: new size of the filesystem (in bytes) */ int nilfs_resize_fs(struct super_block *sb, __u64 newsize) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_super_block **sbp; __u64 devsize, newnsegs; loff_t sb2off; int ret; ret = -ERANGE; devsize = bdev_nr_bytes(sb->s_bdev); if (newsize > devsize) goto out; /* * Prevent underflow in second superblock position calculation. * The exact minimum size check is done in nilfs_sufile_resize(). */ if (newsize < 4096) { ret = -ENOSPC; goto out; } /* * Write lock is required to protect some functions depending * on the number of segments, the number of reserved segments, * and so forth. */ down_write(&nilfs->ns_segctor_sem); sb2off = NILFS_SB2_OFFSET_BYTES(newsize); newnsegs = sb2off >> nilfs->ns_blocksize_bits; do_div(newnsegs, nilfs->ns_blocks_per_segment); ret = nilfs_sufile_resize(nilfs->ns_sufile, newnsegs); up_write(&nilfs->ns_segctor_sem); if (ret < 0) goto out; ret = nilfs_construct_segment(sb); if (ret < 0) goto out; down_write(&nilfs->ns_sem); nilfs_move_2nd_super(sb, sb2off); ret = -EIO; sbp = nilfs_prepare_super(sb, 0); if (likely(sbp)) { nilfs_set_log_cursor(sbp[0], nilfs); /* * Drop NILFS_RESIZE_FS flag for compatibility with * mount-time resize which may be implemented in a * future release. */ sbp[0]->s_state = cpu_to_le16(le16_to_cpu(sbp[0]->s_state) & ~NILFS_RESIZE_FS); sbp[0]->s_dev_size = cpu_to_le64(newsize); sbp[0]->s_nsegments = cpu_to_le64(nilfs->ns_nsegments); if (sbp[1]) memcpy(sbp[1], sbp[0], nilfs->ns_sbsize); ret = nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL); } up_write(&nilfs->ns_sem); /* * Reset the range of allocatable segments last. This order * is important in the case of expansion because the secondary * superblock must be protected from log write until migration * completes. */ if (!ret) nilfs_sufile_set_alloc_range(nilfs->ns_sufile, 0, newnsegs - 1); out: return ret; } static void nilfs_put_super(struct super_block *sb) { struct the_nilfs *nilfs = sb->s_fs_info; nilfs_detach_log_writer(sb); if (!sb_rdonly(sb)) { down_write(&nilfs->ns_sem); nilfs_cleanup_super(sb); up_write(&nilfs->ns_sem); } nilfs_sysfs_delete_device_group(nilfs); iput(nilfs->ns_sufile); iput(nilfs->ns_cpfile); iput(nilfs->ns_dat); destroy_nilfs(nilfs); sb->s_fs_info = NULL; } static int nilfs_sync_fs(struct super_block *sb, int wait) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_super_block **sbp; int err = 0; /* This function is called when super block should be written back */ if (wait) err = nilfs_construct_segment(sb); down_write(&nilfs->ns_sem); if (nilfs_sb_dirty(nilfs)) { sbp = nilfs_prepare_super(sb, nilfs_sb_will_flip(nilfs)); if (likely(sbp)) { nilfs_set_log_cursor(sbp[0], nilfs); nilfs_commit_super(sb, NILFS_SB_COMMIT); } } up_write(&nilfs->ns_sem); if (!err) err = nilfs_flush_device(nilfs); return err; } int nilfs_attach_checkpoint(struct super_block *sb, __u64 cno, int curr_mnt, struct nilfs_root **rootp) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_root *root; struct nilfs_checkpoint *raw_cp; struct buffer_head *bh_cp; int err = -ENOMEM; root = nilfs_find_or_create_root( nilfs, curr_mnt ? NILFS_CPTREE_CURRENT_CNO : cno); if (!root) return err; if (root->ifile) goto reuse; /* already attached checkpoint */ down_read(&nilfs->ns_segctor_sem); err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, cno, 0, &raw_cp, &bh_cp); up_read(&nilfs->ns_segctor_sem); if (unlikely(err)) { if (err == -ENOENT || err == -EINVAL) { nilfs_err(sb, "Invalid checkpoint (checkpoint number=%llu)", (unsigned long long)cno); err = -EINVAL; } goto failed; } err = nilfs_ifile_read(sb, root, nilfs->ns_inode_size, &raw_cp->cp_ifile_inode, &root->ifile); if (err) goto failed_bh; atomic64_set(&root->inodes_count, le64_to_cpu(raw_cp->cp_inodes_count)); atomic64_set(&root->blocks_count, le64_to_cpu(raw_cp->cp_blocks_count)); nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, cno, bh_cp); reuse: *rootp = root; return 0; failed_bh: nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, cno, bh_cp); failed: nilfs_put_root(root); return err; } static int nilfs_freeze(struct super_block *sb) { struct the_nilfs *nilfs = sb->s_fs_info; int err; if (sb_rdonly(sb)) return 0; /* Mark super block clean */ down_write(&nilfs->ns_sem); err = nilfs_cleanup_super(sb); up_write(&nilfs->ns_sem); return err; } static int nilfs_unfreeze(struct super_block *sb) { struct the_nilfs *nilfs = sb->s_fs_info; if (sb_rdonly(sb)) return 0; down_write(&nilfs->ns_sem); nilfs_setup_super(sb, false); up_write(&nilfs->ns_sem); return 0; } static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct nilfs_root *root = NILFS_I(d_inode(dentry))->i_root; struct the_nilfs *nilfs = root->nilfs; u64 id = huge_encode_dev(sb->s_bdev->bd_dev); unsigned long long blocks; unsigned long overhead; unsigned long nrsvblocks; sector_t nfreeblocks; u64 nmaxinodes, nfreeinodes; int err; /* * Compute all of the segment blocks * * The blocks before first segment and after last segment * are excluded. */ blocks = nilfs->ns_blocks_per_segment * nilfs->ns_nsegments - nilfs->ns_first_data_block; nrsvblocks = nilfs->ns_nrsvsegs * nilfs->ns_blocks_per_segment; /* * Compute the overhead * * When distributing meta data blocks outside segment structure, * We must count them as the overhead. */ overhead = 0; err = nilfs_count_free_blocks(nilfs, &nfreeblocks); if (unlikely(err)) return err; err = nilfs_ifile_count_free_inodes(root->ifile, &nmaxinodes, &nfreeinodes); if (unlikely(err)) { nilfs_warn(sb, "failed to count free inodes: err=%d", err); if (err == -ERANGE) { /* * If nilfs_palloc_count_max_entries() returns * -ERANGE error code then we simply treat * curent inodes count as maximum possible and * zero as free inodes value. */ nmaxinodes = atomic64_read(&root->inodes_count); nfreeinodes = 0; err = 0; } else return err; } buf->f_type = NILFS_SUPER_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_blocks = blocks - overhead; buf->f_bfree = nfreeblocks; buf->f_bavail = (buf->f_bfree >= nrsvblocks) ? (buf->f_bfree - nrsvblocks) : 0; buf->f_files = nmaxinodes; buf->f_ffree = nfreeinodes; buf->f_namelen = NILFS_NAME_LEN; buf->f_fsid = u64_to_fsid(id); return 0; } static int nilfs_show_options(struct seq_file *seq, struct dentry *dentry) { struct super_block *sb = dentry->d_sb; struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_root *root = NILFS_I(d_inode(dentry))->i_root; if (!nilfs_test_opt(nilfs, BARRIER)) seq_puts(seq, ",nobarrier"); if (root->cno != NILFS_CPTREE_CURRENT_CNO) seq_printf(seq, ",cp=%llu", (unsigned long long)root->cno); if (nilfs_test_opt(nilfs, ERRORS_PANIC)) seq_puts(seq, ",errors=panic"); if (nilfs_test_opt(nilfs, ERRORS_CONT)) seq_puts(seq, ",errors=continue"); if (nilfs_test_opt(nilfs, STRICT_ORDER)) seq_puts(seq, ",order=strict"); if (nilfs_test_opt(nilfs, NORECOVERY)) seq_puts(seq, ",norecovery"); if (nilfs_test_opt(nilfs, DISCARD)) seq_puts(seq, ",discard"); return 0; } static const struct super_operations nilfs_sops = { .alloc_inode = nilfs_alloc_inode, .free_inode = nilfs_free_inode, .dirty_inode = nilfs_dirty_inode, .evict_inode = nilfs_evict_inode, .put_super = nilfs_put_super, .sync_fs = nilfs_sync_fs, .freeze_fs = nilfs_freeze, .unfreeze_fs = nilfs_unfreeze, .statfs = nilfs_statfs, .remount_fs = nilfs_remount, .show_options = nilfs_show_options }; enum { Opt_err_cont, Opt_err_panic, Opt_err_ro, Opt_barrier, Opt_nobarrier, Opt_snapshot, Opt_order, Opt_norecovery, Opt_discard, Opt_nodiscard, Opt_err, }; static match_table_t tokens = { {Opt_err_cont, "errors=continue"}, {Opt_err_panic, "errors=panic"}, {Opt_err_ro, "errors=remount-ro"}, {Opt_barrier, "barrier"}, {Opt_nobarrier, "nobarrier"}, {Opt_snapshot, "cp=%u"}, {Opt_order, "order=%s"}, {Opt_norecovery, "norecovery"}, {Opt_discard, "discard"}, {Opt_nodiscard, "nodiscard"}, {Opt_err, NULL} }; static int parse_options(char *options, struct super_block *sb, int is_remount) { struct the_nilfs *nilfs = sb->s_fs_info; char *p; substring_t args[MAX_OPT_ARGS]; if (!options) return 1; while ((p = strsep(&options, ",")) != NULL) { int token; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_barrier: nilfs_set_opt(nilfs, BARRIER); break; case Opt_nobarrier: nilfs_clear_opt(nilfs, BARRIER); break; case Opt_order: if (strcmp(args[0].from, "relaxed") == 0) /* Ordered data semantics */ nilfs_clear_opt(nilfs, STRICT_ORDER); else if (strcmp(args[0].from, "strict") == 0) /* Strict in-order semantics */ nilfs_set_opt(nilfs, STRICT_ORDER); else return 0; break; case Opt_err_panic: nilfs_write_opt(nilfs, ERROR_MODE, ERRORS_PANIC); break; case Opt_err_ro: nilfs_write_opt(nilfs, ERROR_MODE, ERRORS_RO); break; case Opt_err_cont: nilfs_write_opt(nilfs, ERROR_MODE, ERRORS_CONT); break; case Opt_snapshot: if (is_remount) { nilfs_err(sb, "\"%s\" option is invalid for remount", p); return 0; } break; case Opt_norecovery: nilfs_set_opt(nilfs, NORECOVERY); break; case Opt_discard: nilfs_set_opt(nilfs, DISCARD); break; case Opt_nodiscard: nilfs_clear_opt(nilfs, DISCARD); break; default: nilfs_err(sb, "unrecognized mount option \"%s\"", p); return 0; } } return 1; } static inline void nilfs_set_default_options(struct super_block *sb, struct nilfs_super_block *sbp) { struct the_nilfs *nilfs = sb->s_fs_info; nilfs->ns_mount_opt = NILFS_MOUNT_ERRORS_RO | NILFS_MOUNT_BARRIER; } static int nilfs_setup_super(struct super_block *sb, int is_mount) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_super_block **sbp; int max_mnt_count; int mnt_count; /* nilfs->ns_sem must be locked by the caller. */ sbp = nilfs_prepare_super(sb, 0); if (!sbp) return -EIO; if (!is_mount) goto skip_mount_setup; max_mnt_count = le16_to_cpu(sbp[0]->s_max_mnt_count); mnt_count = le16_to_cpu(sbp[0]->s_mnt_count); if (nilfs->ns_mount_state & NILFS_ERROR_FS) { nilfs_warn(sb, "mounting fs with errors"); #if 0 } else if (max_mnt_count >= 0 && mnt_count >= max_mnt_count) { nilfs_warn(sb, "maximal mount count reached"); #endif } if (!max_mnt_count) sbp[0]->s_max_mnt_count = cpu_to_le16(NILFS_DFL_MAX_MNT_COUNT); sbp[0]->s_mnt_count = cpu_to_le16(mnt_count + 1); sbp[0]->s_mtime = cpu_to_le64(ktime_get_real_seconds()); skip_mount_setup: sbp[0]->s_state = cpu_to_le16(le16_to_cpu(sbp[0]->s_state) & ~NILFS_VALID_FS); /* synchronize sbp[1] with sbp[0] */ if (sbp[1]) memcpy(sbp[1], sbp[0], nilfs->ns_sbsize); return nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL); } struct nilfs_super_block *nilfs_read_super_block(struct super_block *sb, u64 pos, int blocksize, struct buffer_head **pbh) { unsigned long long sb_index = pos; unsigned long offset; offset = do_div(sb_index, blocksize); *pbh = sb_bread(sb, sb_index); if (!*pbh) return NULL; return (struct nilfs_super_block *)((char *)(*pbh)->b_data + offset); } int nilfs_store_magic_and_option(struct super_block *sb, struct nilfs_super_block *sbp, char *data) { struct the_nilfs *nilfs = sb->s_fs_info; sb->s_magic = le16_to_cpu(sbp->s_magic); /* FS independent flags */ #ifdef NILFS_ATIME_DISABLE sb->s_flags |= SB_NOATIME; #endif nilfs_set_default_options(sb, sbp); nilfs->ns_resuid = le16_to_cpu(sbp->s_def_resuid); nilfs->ns_resgid = le16_to_cpu(sbp->s_def_resgid); nilfs->ns_interval = le32_to_cpu(sbp->s_c_interval); nilfs->ns_watermark = le32_to_cpu(sbp->s_c_block_max); return !parse_options(data, sb, 0) ? -EINVAL : 0; } int nilfs_check_feature_compatibility(struct super_block *sb, struct nilfs_super_block *sbp) { __u64 features; features = le64_to_cpu(sbp->s_feature_incompat) & ~NILFS_FEATURE_INCOMPAT_SUPP; if (features) { nilfs_err(sb, "couldn't mount because of unsupported optional features (%llx)", (unsigned long long)features); return -EINVAL; } features = le64_to_cpu(sbp->s_feature_compat_ro) & ~NILFS_FEATURE_COMPAT_RO_SUPP; if (!sb_rdonly(sb) && features) { nilfs_err(sb, "couldn't mount RDWR because of unsupported optional features (%llx)", (unsigned long long)features); return -EINVAL; } return 0; } static int nilfs_get_root_dentry(struct super_block *sb, struct nilfs_root *root, struct dentry **root_dentry) { struct inode *inode; struct dentry *dentry; int ret = 0; inode = nilfs_iget(sb, root, NILFS_ROOT_INO); if (IS_ERR(inode)) { ret = PTR_ERR(inode); nilfs_err(sb, "error %d getting root inode", ret); goto out; } if (!S_ISDIR(inode->i_mode) || !inode->i_blocks || !inode->i_size) { iput(inode); nilfs_err(sb, "corrupt root inode"); ret = -EINVAL; goto out; } if (root->cno == NILFS_CPTREE_CURRENT_CNO) { dentry = d_find_alias(inode); if (!dentry) { dentry = d_make_root(inode); if (!dentry) { ret = -ENOMEM; goto failed_dentry; } } else { iput(inode); } } else { dentry = d_obtain_root(inode); if (IS_ERR(dentry)) { ret = PTR_ERR(dentry); goto failed_dentry; } } *root_dentry = dentry; out: return ret; failed_dentry: nilfs_err(sb, "error %d getting root dentry", ret); goto out; } static int nilfs_attach_snapshot(struct super_block *s, __u64 cno, struct dentry **root_dentry) { struct the_nilfs *nilfs = s->s_fs_info; struct nilfs_root *root; int ret; mutex_lock(&nilfs->ns_snapshot_mount_mutex); down_read(&nilfs->ns_segctor_sem); ret = nilfs_cpfile_is_snapshot(nilfs->ns_cpfile, cno); up_read(&nilfs->ns_segctor_sem); if (ret < 0) { ret = (ret == -ENOENT) ? -EINVAL : ret; goto out; } else if (!ret) { nilfs_err(s, "The specified checkpoint is not a snapshot (checkpoint number=%llu)", (unsigned long long)cno); ret = -EINVAL; goto out; } ret = nilfs_attach_checkpoint(s, cno, false, &root); if (ret) { nilfs_err(s, "error %d while loading snapshot (checkpoint number=%llu)", ret, (unsigned long long)cno); goto out; } ret = nilfs_get_root_dentry(s, root, root_dentry); nilfs_put_root(root); out: mutex_unlock(&nilfs->ns_snapshot_mount_mutex); return ret; } /** * nilfs_tree_is_busy() - try to shrink dentries of a checkpoint * @root_dentry: root dentry of the tree to be shrunk * * This function returns true if the tree was in-use. */ static bool nilfs_tree_is_busy(struct dentry *root_dentry) { shrink_dcache_parent(root_dentry); return d_count(root_dentry) > 1; } int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_root *root; struct inode *inode; struct dentry *dentry; int ret; if (cno > nilfs->ns_cno) return false; if (cno >= nilfs_last_cno(nilfs)) return true; /* protect recent checkpoints */ ret = false; root = nilfs_lookup_root(nilfs, cno); if (root) { inode = nilfs_ilookup(sb, root, NILFS_ROOT_INO); if (inode) { dentry = d_find_alias(inode); if (dentry) { ret = nilfs_tree_is_busy(dentry); dput(dentry); } iput(inode); } nilfs_put_root(root); } return ret; } /** * nilfs_fill_super() - initialize a super block instance * @sb: super_block * @data: mount options * @silent: silent mode flag * * This function is called exclusively by nilfs->ns_mount_mutex. * So, the recovery process is protected from other simultaneous mounts. */ static int nilfs_fill_super(struct super_block *sb, void *data, int silent) { struct the_nilfs *nilfs; struct nilfs_root *fsroot; __u64 cno; int err; nilfs = alloc_nilfs(sb); if (!nilfs) return -ENOMEM; sb->s_fs_info = nilfs; err = init_nilfs(nilfs, sb, (char *)data); if (err) goto failed_nilfs; sb->s_op = &nilfs_sops; sb->s_export_op = &nilfs_export_ops; sb->s_root = NULL; sb->s_time_gran = 1; sb->s_max_links = NILFS_LINK_MAX; sb->s_bdi = bdi_get(sb->s_bdev->bd_disk->bdi); err = load_nilfs(nilfs, sb); if (err) goto failed_nilfs; cno = nilfs_last_cno(nilfs); err = nilfs_attach_checkpoint(sb, cno, true, &fsroot); if (err) { nilfs_err(sb, "error %d while loading last checkpoint (checkpoint number=%llu)", err, (unsigned long long)cno); goto failed_unload; } if (!sb_rdonly(sb)) { err = nilfs_attach_log_writer(sb, fsroot); if (err) goto failed_checkpoint; } err = nilfs_get_root_dentry(sb, fsroot, &sb->s_root); if (err) goto failed_segctor; nilfs_put_root(fsroot); if (!sb_rdonly(sb)) { down_write(&nilfs->ns_sem); nilfs_setup_super(sb, true); up_write(&nilfs->ns_sem); } return 0; failed_segctor: nilfs_detach_log_writer(sb); failed_checkpoint: nilfs_put_root(fsroot); failed_unload: nilfs_sysfs_delete_device_group(nilfs); iput(nilfs->ns_sufile); iput(nilfs->ns_cpfile); iput(nilfs->ns_dat); failed_nilfs: destroy_nilfs(nilfs); return err; } static int nilfs_remount(struct super_block *sb, int *flags, char *data) { struct the_nilfs *nilfs = sb->s_fs_info; unsigned long old_sb_flags; unsigned long old_mount_opt; int err; sync_filesystem(sb); old_sb_flags = sb->s_flags; old_mount_opt = nilfs->ns_mount_opt; if (!parse_options(data, sb, 1)) { err = -EINVAL; goto restore_opts; } sb->s_flags = (sb->s_flags & ~SB_POSIXACL); err = -EINVAL; if (!nilfs_valid_fs(nilfs)) { nilfs_warn(sb, "couldn't remount because the filesystem is in an incomplete recovery state"); goto restore_opts; } if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb)) goto out; if (*flags & SB_RDONLY) { sb->s_flags |= SB_RDONLY; /* * Remounting a valid RW partition RDONLY, so set * the RDONLY flag and then mark the partition as valid again. */ down_write(&nilfs->ns_sem); nilfs_cleanup_super(sb); up_write(&nilfs->ns_sem); } else { __u64 features; struct nilfs_root *root; /* * Mounting a RDONLY partition read-write, so reread and * store the current valid flag. (It may have been changed * by fsck since we originally mounted the partition.) */ down_read(&nilfs->ns_sem); features = le64_to_cpu(nilfs->ns_sbp[0]->s_feature_compat_ro) & ~NILFS_FEATURE_COMPAT_RO_SUPP; up_read(&nilfs->ns_sem); if (features) { nilfs_warn(sb, "couldn't remount RDWR because of unsupported optional features (%llx)", (unsigned long long)features); err = -EROFS; goto restore_opts; } sb->s_flags &= ~SB_RDONLY; root = NILFS_I(d_inode(sb->s_root))->i_root; err = nilfs_attach_log_writer(sb, root); if (err) goto restore_opts; down_write(&nilfs->ns_sem); nilfs_setup_super(sb, true); up_write(&nilfs->ns_sem); } out: return 0; restore_opts: sb->s_flags = old_sb_flags; nilfs->ns_mount_opt = old_mount_opt; return err; } struct nilfs_super_data { __u64 cno; int flags; }; static int nilfs_parse_snapshot_option(const char *option, const substring_t *arg, struct nilfs_super_data *sd) { unsigned long long val; const char *msg = NULL; int err; if (!(sd->flags & SB_RDONLY)) { msg = "read-only option is not specified"; goto parse_error; } err = kstrtoull(arg->from, 0, &val); if (err) { if (err == -ERANGE) msg = "too large checkpoint number"; else msg = "malformed argument"; goto parse_error; } else if (val == 0) { msg = "invalid checkpoint number 0"; goto parse_error; } sd->cno = val; return 0; parse_error: nilfs_err(NULL, "invalid option \"%s\": %s", option, msg); return 1; } /** * nilfs_identify - pre-read mount options needed to identify mount instance * @data: mount options * @sd: nilfs_super_data */ static int nilfs_identify(char *data, struct nilfs_super_data *sd) { char *p, *options = data; substring_t args[MAX_OPT_ARGS]; int token; int ret = 0; do { p = strsep(&options, ","); if (p != NULL && *p) { token = match_token(p, tokens, args); if (token == Opt_snapshot) ret = nilfs_parse_snapshot_option(p, &args[0], sd); } if (!options) break; BUG_ON(options == data); *(options - 1) = ','; } while (!ret); return ret; } static int nilfs_set_bdev_super(struct super_block *s, void *data) { s->s_dev = *(dev_t *)data; return 0; } static int nilfs_test_bdev_super(struct super_block *s, void *data) { return !(s->s_iflags & SB_I_RETIRED) && s->s_dev == *(dev_t *)data; } static struct dentry * nilfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { struct nilfs_super_data sd = { .flags = flags }; struct super_block *s; dev_t dev; int err; if (nilfs_identify(data, &sd)) return ERR_PTR(-EINVAL); err = lookup_bdev(dev_name, &dev); if (err) return ERR_PTR(err); s = sget(fs_type, nilfs_test_bdev_super, nilfs_set_bdev_super, flags, &dev); if (IS_ERR(s)) return ERR_CAST(s); if (!s->s_root) { /* * We drop s_umount here because we need to open the bdev and * bdev->open_mutex ranks above s_umount (blkdev_put() -> * __invalidate_device()). It is safe because we have active sb * reference and SB_BORN is not set yet. */ up_write(&s->s_umount); err = setup_bdev_super(s, flags, NULL); down_write(&s->s_umount); if (!err) err = nilfs_fill_super(s, data, flags & SB_SILENT ? 1 : 0); if (err) goto failed_super; s->s_flags |= SB_ACTIVE; } else if (!sd.cno) { if (nilfs_tree_is_busy(s->s_root)) { if ((flags ^ s->s_flags) & SB_RDONLY) { nilfs_err(s, "the device already has a %s mount.", sb_rdonly(s) ? "read-only" : "read/write"); err = -EBUSY; goto failed_super; } } else { /* * Try remount to setup mount states if the current * tree is not mounted and only snapshots use this sb. */ err = nilfs_remount(s, &flags, data); if (err) goto failed_super; } } if (sd.cno) { struct dentry *root_dentry; err = nilfs_attach_snapshot(s, sd.cno, &root_dentry); if (err) goto failed_super; return root_dentry; } return dget(s->s_root); failed_super: deactivate_locked_super(s); return ERR_PTR(err); } struct file_system_type nilfs_fs_type = { .owner = THIS_MODULE, .name = "nilfs2", .mount = nilfs_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("nilfs2"); static void nilfs_inode_init_once(void *obj) { struct nilfs_inode_info *ii = obj; INIT_LIST_HEAD(&ii->i_dirty); #ifdef CONFIG_NILFS_XATTR init_rwsem(&ii->xattr_sem); #endif inode_init_once(&ii->vfs_inode); } static void nilfs_segbuf_init_once(void *obj) { memset(obj, 0, sizeof(struct nilfs_segment_buffer)); } static void nilfs_destroy_cachep(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(nilfs_inode_cachep); kmem_cache_destroy(nilfs_transaction_cachep); kmem_cache_destroy(nilfs_segbuf_cachep); kmem_cache_destroy(nilfs_btree_path_cache); } static int __init nilfs_init_cachep(void) { nilfs_inode_cachep = kmem_cache_create("nilfs2_inode_cache", sizeof(struct nilfs_inode_info), 0, SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, nilfs_inode_init_once); if (!nilfs_inode_cachep) goto fail; nilfs_transaction_cachep = kmem_cache_create("nilfs2_transaction_cache", sizeof(struct nilfs_transaction_info), 0, SLAB_RECLAIM_ACCOUNT, NULL); if (!nilfs_transaction_cachep) goto fail; nilfs_segbuf_cachep = kmem_cache_create("nilfs2_segbuf_cache", sizeof(struct nilfs_segment_buffer), 0, SLAB_RECLAIM_ACCOUNT, nilfs_segbuf_init_once); if (!nilfs_segbuf_cachep) goto fail; nilfs_btree_path_cache = kmem_cache_create("nilfs2_btree_path_cache", sizeof(struct nilfs_btree_path) * NILFS_BTREE_LEVEL_MAX, 0, 0, NULL); if (!nilfs_btree_path_cache) goto fail; return 0; fail: nilfs_destroy_cachep(); return -ENOMEM; } static int __init init_nilfs_fs(void) { int err; err = nilfs_init_cachep(); if (err) goto fail; err = nilfs_sysfs_init(); if (err) goto free_cachep; err = register_filesystem(&nilfs_fs_type); if (err) goto deinit_sysfs_entry; printk(KERN_INFO "NILFS version 2 loaded\n"); return 0; deinit_sysfs_entry: nilfs_sysfs_exit(); free_cachep: nilfs_destroy_cachep(); fail: return err; } static void __exit exit_nilfs_fs(void) { nilfs_destroy_cachep(); nilfs_sysfs_exit(); unregister_filesystem(&nilfs_fs_type); } module_init(init_nilfs_fs) module_exit(exit_nilfs_fs)
linux-master
fs/nilfs2/super.c
// SPDX-License-Identifier: GPL-2.0+ /* * NILFS B-tree. * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * Written by Koji Sato. */ #include <linux/slab.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/pagevec.h> #include "nilfs.h" #include "page.h" #include "btnode.h" #include "btree.h" #include "alloc.h" #include "dat.h" static void __nilfs_btree_init(struct nilfs_bmap *bmap); static struct nilfs_btree_path *nilfs_btree_alloc_path(void) { struct nilfs_btree_path *path; int level = NILFS_BTREE_LEVEL_DATA; path = kmem_cache_alloc(nilfs_btree_path_cache, GFP_NOFS); if (path == NULL) goto out; for (; level < NILFS_BTREE_LEVEL_MAX; level++) { path[level].bp_bh = NULL; path[level].bp_sib_bh = NULL; path[level].bp_index = 0; path[level].bp_oldreq.bpr_ptr = NILFS_BMAP_INVALID_PTR; path[level].bp_newreq.bpr_ptr = NILFS_BMAP_INVALID_PTR; path[level].bp_op = NULL; } out: return path; } static void nilfs_btree_free_path(struct nilfs_btree_path *path) { int level = NILFS_BTREE_LEVEL_DATA; for (; level < NILFS_BTREE_LEVEL_MAX; level++) brelse(path[level].bp_bh); kmem_cache_free(nilfs_btree_path_cache, path); } /* * B-tree node operations */ static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree, __u64 ptr, struct buffer_head **bhp) { struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode; struct address_space *btnc = btnc_inode->i_mapping; struct buffer_head *bh; bh = nilfs_btnode_create_block(btnc, ptr); if (!bh) return -ENOMEM; set_buffer_nilfs_volatile(bh); *bhp = bh; return 0; } static int nilfs_btree_node_get_flags(const struct nilfs_btree_node *node) { return node->bn_flags; } static void nilfs_btree_node_set_flags(struct nilfs_btree_node *node, int flags) { node->bn_flags = flags; } static int nilfs_btree_node_root(const struct nilfs_btree_node *node) { return nilfs_btree_node_get_flags(node) & NILFS_BTREE_NODE_ROOT; } static int nilfs_btree_node_get_level(const struct nilfs_btree_node *node) { return node->bn_level; } static void nilfs_btree_node_set_level(struct nilfs_btree_node *node, int level) { node->bn_level = level; } static int nilfs_btree_node_get_nchildren(const struct nilfs_btree_node *node) { return le16_to_cpu(node->bn_nchildren); } static void nilfs_btree_node_set_nchildren(struct nilfs_btree_node *node, int nchildren) { node->bn_nchildren = cpu_to_le16(nchildren); } static int nilfs_btree_node_size(const struct nilfs_bmap *btree) { return i_blocksize(btree->b_inode); } static int nilfs_btree_nchildren_per_block(const struct nilfs_bmap *btree) { return btree->b_nchildren_per_block; } static __le64 * nilfs_btree_node_dkeys(const struct nilfs_btree_node *node) { return (__le64 *)((char *)(node + 1) + (nilfs_btree_node_root(node) ? 0 : NILFS_BTREE_NODE_EXTRA_PAD_SIZE)); } static __le64 * nilfs_btree_node_dptrs(const struct nilfs_btree_node *node, int ncmax) { return (__le64 *)(nilfs_btree_node_dkeys(node) + ncmax); } static __u64 nilfs_btree_node_get_key(const struct nilfs_btree_node *node, int index) { return le64_to_cpu(*(nilfs_btree_node_dkeys(node) + index)); } static void nilfs_btree_node_set_key(struct nilfs_btree_node *node, int index, __u64 key) { *(nilfs_btree_node_dkeys(node) + index) = cpu_to_le64(key); } static __u64 nilfs_btree_node_get_ptr(const struct nilfs_btree_node *node, int index, int ncmax) { return le64_to_cpu(*(nilfs_btree_node_dptrs(node, ncmax) + index)); } static void nilfs_btree_node_set_ptr(struct nilfs_btree_node *node, int index, __u64 ptr, int ncmax) { *(nilfs_btree_node_dptrs(node, ncmax) + index) = cpu_to_le64(ptr); } static void nilfs_btree_node_init(struct nilfs_btree_node *node, int flags, int level, int nchildren, int ncmax, const __u64 *keys, const __u64 *ptrs) { __le64 *dkeys; __le64 *dptrs; int i; nilfs_btree_node_set_flags(node, flags); nilfs_btree_node_set_level(node, level); nilfs_btree_node_set_nchildren(node, nchildren); dkeys = nilfs_btree_node_dkeys(node); dptrs = nilfs_btree_node_dptrs(node, ncmax); for (i = 0; i < nchildren; i++) { dkeys[i] = cpu_to_le64(keys[i]); dptrs[i] = cpu_to_le64(ptrs[i]); } } /* Assume the buffer heads corresponding to left and right are locked. */ static void nilfs_btree_node_move_left(struct nilfs_btree_node *left, struct nilfs_btree_node *right, int n, int lncmax, int rncmax) { __le64 *ldkeys, *rdkeys; __le64 *ldptrs, *rdptrs; int lnchildren, rnchildren; ldkeys = nilfs_btree_node_dkeys(left); ldptrs = nilfs_btree_node_dptrs(left, lncmax); lnchildren = nilfs_btree_node_get_nchildren(left); rdkeys = nilfs_btree_node_dkeys(right); rdptrs = nilfs_btree_node_dptrs(right, rncmax); rnchildren = nilfs_btree_node_get_nchildren(right); memcpy(ldkeys + lnchildren, rdkeys, n * sizeof(*rdkeys)); memcpy(ldptrs + lnchildren, rdptrs, n * sizeof(*rdptrs)); memmove(rdkeys, rdkeys + n, (rnchildren - n) * sizeof(*rdkeys)); memmove(rdptrs, rdptrs + n, (rnchildren - n) * sizeof(*rdptrs)); lnchildren += n; rnchildren -= n; nilfs_btree_node_set_nchildren(left, lnchildren); nilfs_btree_node_set_nchildren(right, rnchildren); } /* Assume that the buffer heads corresponding to left and right are locked. */ static void nilfs_btree_node_move_right(struct nilfs_btree_node *left, struct nilfs_btree_node *right, int n, int lncmax, int rncmax) { __le64 *ldkeys, *rdkeys; __le64 *ldptrs, *rdptrs; int lnchildren, rnchildren; ldkeys = nilfs_btree_node_dkeys(left); ldptrs = nilfs_btree_node_dptrs(left, lncmax); lnchildren = nilfs_btree_node_get_nchildren(left); rdkeys = nilfs_btree_node_dkeys(right); rdptrs = nilfs_btree_node_dptrs(right, rncmax); rnchildren = nilfs_btree_node_get_nchildren(right); memmove(rdkeys + n, rdkeys, rnchildren * sizeof(*rdkeys)); memmove(rdptrs + n, rdptrs, rnchildren * sizeof(*rdptrs)); memcpy(rdkeys, ldkeys + lnchildren - n, n * sizeof(*rdkeys)); memcpy(rdptrs, ldptrs + lnchildren - n, n * sizeof(*rdptrs)); lnchildren -= n; rnchildren += n; nilfs_btree_node_set_nchildren(left, lnchildren); nilfs_btree_node_set_nchildren(right, rnchildren); } /* Assume that the buffer head corresponding to node is locked. */ static void nilfs_btree_node_insert(struct nilfs_btree_node *node, int index, __u64 key, __u64 ptr, int ncmax) { __le64 *dkeys; __le64 *dptrs; int nchildren; dkeys = nilfs_btree_node_dkeys(node); dptrs = nilfs_btree_node_dptrs(node, ncmax); nchildren = nilfs_btree_node_get_nchildren(node); if (index < nchildren) { memmove(dkeys + index + 1, dkeys + index, (nchildren - index) * sizeof(*dkeys)); memmove(dptrs + index + 1, dptrs + index, (nchildren - index) * sizeof(*dptrs)); } dkeys[index] = cpu_to_le64(key); dptrs[index] = cpu_to_le64(ptr); nchildren++; nilfs_btree_node_set_nchildren(node, nchildren); } /* Assume that the buffer head corresponding to node is locked. */ static void nilfs_btree_node_delete(struct nilfs_btree_node *node, int index, __u64 *keyp, __u64 *ptrp, int ncmax) { __u64 key; __u64 ptr; __le64 *dkeys; __le64 *dptrs; int nchildren; dkeys = nilfs_btree_node_dkeys(node); dptrs = nilfs_btree_node_dptrs(node, ncmax); key = le64_to_cpu(dkeys[index]); ptr = le64_to_cpu(dptrs[index]); nchildren = nilfs_btree_node_get_nchildren(node); if (keyp != NULL) *keyp = key; if (ptrp != NULL) *ptrp = ptr; if (index < nchildren - 1) { memmove(dkeys + index, dkeys + index + 1, (nchildren - index - 1) * sizeof(*dkeys)); memmove(dptrs + index, dptrs + index + 1, (nchildren - index - 1) * sizeof(*dptrs)); } nchildren--; nilfs_btree_node_set_nchildren(node, nchildren); } static int nilfs_btree_node_lookup(const struct nilfs_btree_node *node, __u64 key, int *indexp) { __u64 nkey; int index, low, high, s; /* binary search */ low = 0; high = nilfs_btree_node_get_nchildren(node) - 1; index = 0; s = 0; while (low <= high) { index = (low + high) / 2; nkey = nilfs_btree_node_get_key(node, index); if (nkey == key) { s = 0; goto out; } else if (nkey < key) { low = index + 1; s = -1; } else { high = index - 1; s = 1; } } /* adjust index */ if (nilfs_btree_node_get_level(node) > NILFS_BTREE_LEVEL_NODE_MIN) { if (s > 0 && index > 0) index--; } else if (s < 0) index++; out: *indexp = index; return s == 0; } /** * nilfs_btree_node_broken - verify consistency of btree node * @node: btree node block to be examined * @size: node size (in bytes) * @inode: host inode of btree * @blocknr: block number * * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned. */ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node, size_t size, struct inode *inode, sector_t blocknr) { int level, flags, nchildren; int ret = 0; level = nilfs_btree_node_get_level(node); flags = nilfs_btree_node_get_flags(node); nchildren = nilfs_btree_node_get_nchildren(node); if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN || level >= NILFS_BTREE_LEVEL_MAX || (flags & NILFS_BTREE_NODE_ROOT) || nchildren < 0 || nchildren > NILFS_BTREE_NODE_NCHILDREN_MAX(size))) { nilfs_crit(inode->i_sb, "bad btree node (ino=%lu, blocknr=%llu): level = %d, flags = 0x%x, nchildren = %d", inode->i_ino, (unsigned long long)blocknr, level, flags, nchildren); ret = 1; } return ret; } /** * nilfs_btree_root_broken - verify consistency of btree root node * @node: btree root node to be examined * @inode: host inode of btree * * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned. */ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node, struct inode *inode) { int level, flags, nchildren; int ret = 0; level = nilfs_btree_node_get_level(node); flags = nilfs_btree_node_get_flags(node); nchildren = nilfs_btree_node_get_nchildren(node); if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN || level >= NILFS_BTREE_LEVEL_MAX || nchildren < 0 || nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) { nilfs_crit(inode->i_sb, "bad btree root (ino=%lu): level = %d, flags = 0x%x, nchildren = %d", inode->i_ino, level, flags, nchildren); ret = 1; } return ret; } int nilfs_btree_broken_node_block(struct buffer_head *bh) { struct inode *inode; int ret; if (buffer_nilfs_checked(bh)) return 0; inode = bh->b_folio->mapping->host; ret = nilfs_btree_node_broken((struct nilfs_btree_node *)bh->b_data, bh->b_size, inode, bh->b_blocknr); if (likely(!ret)) set_buffer_nilfs_checked(bh); return ret; } static struct nilfs_btree_node * nilfs_btree_get_root(const struct nilfs_bmap *btree) { return (struct nilfs_btree_node *)btree->b_u.u_data; } static struct nilfs_btree_node * nilfs_btree_get_nonroot_node(const struct nilfs_btree_path *path, int level) { return (struct nilfs_btree_node *)path[level].bp_bh->b_data; } static struct nilfs_btree_node * nilfs_btree_get_sib_node(const struct nilfs_btree_path *path, int level) { return (struct nilfs_btree_node *)path[level].bp_sib_bh->b_data; } static int nilfs_btree_height(const struct nilfs_bmap *btree) { return nilfs_btree_node_get_level(nilfs_btree_get_root(btree)) + 1; } static struct nilfs_btree_node * nilfs_btree_get_node(const struct nilfs_bmap *btree, const struct nilfs_btree_path *path, int level, int *ncmaxp) { struct nilfs_btree_node *node; if (level == nilfs_btree_height(btree) - 1) { node = nilfs_btree_get_root(btree); *ncmaxp = NILFS_BTREE_ROOT_NCHILDREN_MAX; } else { node = nilfs_btree_get_nonroot_node(path, level); *ncmaxp = nilfs_btree_nchildren_per_block(btree); } return node; } static int nilfs_btree_bad_node(const struct nilfs_bmap *btree, struct nilfs_btree_node *node, int level) { if (unlikely(nilfs_btree_node_get_level(node) != level)) { dump_stack(); nilfs_crit(btree->b_inode->i_sb, "btree level mismatch (ino=%lu): %d != %d", btree->b_inode->i_ino, nilfs_btree_node_get_level(node), level); return 1; } return 0; } struct nilfs_btree_readahead_info { struct nilfs_btree_node *node; /* parent node */ int max_ra_blocks; /* max nof blocks to read ahead */ int index; /* current index on the parent node */ int ncmax; /* nof children in the parent node */ }; static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr, struct buffer_head **bhp, const struct nilfs_btree_readahead_info *ra) { struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode; struct address_space *btnc = btnc_inode->i_mapping; struct buffer_head *bh, *ra_bh; sector_t submit_ptr = 0; int ret; ret = nilfs_btnode_submit_block(btnc, ptr, 0, REQ_OP_READ, &bh, &submit_ptr); if (ret) { if (likely(ret == -EEXIST)) goto out_check; if (ret == -ENOENT) { /* * Block address translation failed due to invalid * value of 'ptr'. In this case, return internal code * -EINVAL (broken bmap) to notify bmap layer of fatal * metadata corruption. */ ret = -EINVAL; } return ret; } if (ra) { int i, n; __u64 ptr2; /* read ahead sibling nodes */ for (n = ra->max_ra_blocks, i = ra->index + 1; n > 0 && i < ra->ncmax; n--, i++) { ptr2 = nilfs_btree_node_get_ptr(ra->node, i, ra->ncmax); ret = nilfs_btnode_submit_block(btnc, ptr2, 0, REQ_OP_READ | REQ_RAHEAD, &ra_bh, &submit_ptr); if (likely(!ret || ret == -EEXIST)) brelse(ra_bh); else if (ret != -EBUSY) break; if (!buffer_locked(bh)) goto out_no_wait; } } wait_on_buffer(bh); out_no_wait: if (!buffer_uptodate(bh)) { nilfs_err(btree->b_inode->i_sb, "I/O error reading b-tree node block (ino=%lu, blocknr=%llu)", btree->b_inode->i_ino, (unsigned long long)ptr); brelse(bh); return -EIO; } out_check: if (nilfs_btree_broken_node_block(bh)) { clear_buffer_uptodate(bh); brelse(bh); return -EINVAL; } *bhp = bh; return 0; } static int nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr, struct buffer_head **bhp) { return __nilfs_btree_get_block(btree, ptr, bhp, NULL); } static int nilfs_btree_do_lookup(const struct nilfs_bmap *btree, struct nilfs_btree_path *path, __u64 key, __u64 *ptrp, int minlevel, int readahead) { struct nilfs_btree_node *node; struct nilfs_btree_readahead_info p, *ra; __u64 ptr; int level, index, found, ncmax, ret; node = nilfs_btree_get_root(btree); level = nilfs_btree_node_get_level(node); if (level < minlevel || nilfs_btree_node_get_nchildren(node) <= 0) return -ENOENT; found = nilfs_btree_node_lookup(node, key, &index); ptr = nilfs_btree_node_get_ptr(node, index, NILFS_BTREE_ROOT_NCHILDREN_MAX); path[level].bp_bh = NULL; path[level].bp_index = index; ncmax = nilfs_btree_nchildren_per_block(btree); while (--level >= minlevel) { ra = NULL; if (level == NILFS_BTREE_LEVEL_NODE_MIN && readahead) { p.node = nilfs_btree_get_node(btree, path, level + 1, &p.ncmax); p.index = index; p.max_ra_blocks = 7; ra = &p; } ret = __nilfs_btree_get_block(btree, ptr, &path[level].bp_bh, ra); if (ret < 0) return ret; node = nilfs_btree_get_nonroot_node(path, level); if (nilfs_btree_bad_node(btree, node, level)) return -EINVAL; if (!found) found = nilfs_btree_node_lookup(node, key, &index); else index = 0; if (index < ncmax) { ptr = nilfs_btree_node_get_ptr(node, index, ncmax); } else { WARN_ON(found || level != NILFS_BTREE_LEVEL_NODE_MIN); /* insert */ ptr = NILFS_BMAP_INVALID_PTR; } path[level].bp_index = index; } if (!found) return -ENOENT; if (ptrp != NULL) *ptrp = ptr; return 0; } static int nilfs_btree_do_lookup_last(const struct nilfs_bmap *btree, struct nilfs_btree_path *path, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node; __u64 ptr; int index, level, ncmax, ret; node = nilfs_btree_get_root(btree); index = nilfs_btree_node_get_nchildren(node) - 1; if (index < 0) return -ENOENT; level = nilfs_btree_node_get_level(node); ptr = nilfs_btree_node_get_ptr(node, index, NILFS_BTREE_ROOT_NCHILDREN_MAX); path[level].bp_bh = NULL; path[level].bp_index = index; ncmax = nilfs_btree_nchildren_per_block(btree); for (level--; level > 0; level--) { ret = nilfs_btree_get_block(btree, ptr, &path[level].bp_bh); if (ret < 0) return ret; node = nilfs_btree_get_nonroot_node(path, level); if (nilfs_btree_bad_node(btree, node, level)) return -EINVAL; index = nilfs_btree_node_get_nchildren(node) - 1; ptr = nilfs_btree_node_get_ptr(node, index, ncmax); path[level].bp_index = index; } if (keyp != NULL) *keyp = nilfs_btree_node_get_key(node, index); if (ptrp != NULL) *ptrp = ptr; return 0; } /** * nilfs_btree_get_next_key - get next valid key from btree path array * @btree: bmap struct of btree * @path: array of nilfs_btree_path struct * @minlevel: start level * @nextkey: place to store the next valid key * * Return Value: If a next key was found, 0 is returned. Otherwise, * -ENOENT is returned. */ static int nilfs_btree_get_next_key(const struct nilfs_bmap *btree, const struct nilfs_btree_path *path, int minlevel, __u64 *nextkey) { struct nilfs_btree_node *node; int maxlevel = nilfs_btree_height(btree) - 1; int index, next_adj, level; /* Next index is already set to bp_index for leaf nodes. */ next_adj = 0; for (level = minlevel; level <= maxlevel; level++) { if (level == maxlevel) node = nilfs_btree_get_root(btree); else node = nilfs_btree_get_nonroot_node(path, level); index = path[level].bp_index + next_adj; if (index < nilfs_btree_node_get_nchildren(node)) { /* Next key is in this node */ *nextkey = nilfs_btree_node_get_key(node, index); return 0; } /* For non-leaf nodes, next index is stored at bp_index + 1. */ next_adj = 1; } return -ENOENT; } static int nilfs_btree_lookup(const struct nilfs_bmap *btree, __u64 key, int level, __u64 *ptrp) { struct nilfs_btree_path *path; int ret; path = nilfs_btree_alloc_path(); if (path == NULL) return -ENOMEM; ret = nilfs_btree_do_lookup(btree, path, key, ptrp, level, 0); nilfs_btree_free_path(path); return ret; } static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree, __u64 key, __u64 *ptrp, unsigned int maxblocks) { struct nilfs_btree_path *path; struct nilfs_btree_node *node; struct inode *dat = NULL; __u64 ptr, ptr2; sector_t blocknr; int level = NILFS_BTREE_LEVEL_NODE_MIN; int ret, cnt, index, maxlevel, ncmax; struct nilfs_btree_readahead_info p; path = nilfs_btree_alloc_path(); if (path == NULL) return -ENOMEM; ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level, 1); if (ret < 0) goto out; if (NILFS_BMAP_USE_VBN(btree)) { dat = nilfs_bmap_get_dat(btree); ret = nilfs_dat_translate(dat, ptr, &blocknr); if (ret < 0) goto out; ptr = blocknr; } cnt = 1; if (cnt == maxblocks) goto end; maxlevel = nilfs_btree_height(btree) - 1; node = nilfs_btree_get_node(btree, path, level, &ncmax); index = path[level].bp_index + 1; for (;;) { while (index < nilfs_btree_node_get_nchildren(node)) { if (nilfs_btree_node_get_key(node, index) != key + cnt) goto end; ptr2 = nilfs_btree_node_get_ptr(node, index, ncmax); if (dat) { ret = nilfs_dat_translate(dat, ptr2, &blocknr); if (ret < 0) goto out; ptr2 = blocknr; } if (ptr2 != ptr + cnt || ++cnt == maxblocks) goto end; index++; } if (level == maxlevel) break; /* look-up right sibling node */ p.node = nilfs_btree_get_node(btree, path, level + 1, &p.ncmax); p.index = path[level + 1].bp_index + 1; p.max_ra_blocks = 7; if (p.index >= nilfs_btree_node_get_nchildren(p.node) || nilfs_btree_node_get_key(p.node, p.index) != key + cnt) break; ptr2 = nilfs_btree_node_get_ptr(p.node, p.index, p.ncmax); path[level + 1].bp_index = p.index; brelse(path[level].bp_bh); path[level].bp_bh = NULL; ret = __nilfs_btree_get_block(btree, ptr2, &path[level].bp_bh, &p); if (ret < 0) goto out; node = nilfs_btree_get_nonroot_node(path, level); ncmax = nilfs_btree_nchildren_per_block(btree); index = 0; path[level].bp_index = index; } end: *ptrp = ptr; ret = cnt; out: nilfs_btree_free_path(path); return ret; } static void nilfs_btree_promote_key(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 key) { if (level < nilfs_btree_height(btree) - 1) { do { nilfs_btree_node_set_key( nilfs_btree_get_nonroot_node(path, level), path[level].bp_index, key); if (!buffer_dirty(path[level].bp_bh)) mark_buffer_dirty(path[level].bp_bh); } while ((path[level].bp_index == 0) && (++level < nilfs_btree_height(btree) - 1)); } /* root */ if (level == nilfs_btree_height(btree) - 1) { nilfs_btree_node_set_key(nilfs_btree_get_root(btree), path[level].bp_index, key); } } static void nilfs_btree_do_insert(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node; int ncblk; if (level < nilfs_btree_height(btree) - 1) { node = nilfs_btree_get_nonroot_node(path, level); ncblk = nilfs_btree_nchildren_per_block(btree); nilfs_btree_node_insert(node, path[level].bp_index, *keyp, *ptrp, ncblk); if (!buffer_dirty(path[level].bp_bh)) mark_buffer_dirty(path[level].bp_bh); if (path[level].bp_index == 0) nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_node_get_key(node, 0)); } else { node = nilfs_btree_get_root(btree); nilfs_btree_node_insert(node, path[level].bp_index, *keyp, *ptrp, NILFS_BTREE_ROOT_NCHILDREN_MAX); } } static void nilfs_btree_carry_left(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node, *left; int nchildren, lnchildren, n, move, ncblk; node = nilfs_btree_get_nonroot_node(path, level); left = nilfs_btree_get_sib_node(path, level); nchildren = nilfs_btree_node_get_nchildren(node); lnchildren = nilfs_btree_node_get_nchildren(left); ncblk = nilfs_btree_nchildren_per_block(btree); move = 0; n = (nchildren + lnchildren + 1) / 2 - lnchildren; if (n > path[level].bp_index) { /* move insert point */ n--; move = 1; } nilfs_btree_node_move_left(left, node, n, ncblk, ncblk); if (!buffer_dirty(path[level].bp_bh)) mark_buffer_dirty(path[level].bp_bh); if (!buffer_dirty(path[level].bp_sib_bh)) mark_buffer_dirty(path[level].bp_sib_bh); nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_node_get_key(node, 0)); if (move) { brelse(path[level].bp_bh); path[level].bp_bh = path[level].bp_sib_bh; path[level].bp_sib_bh = NULL; path[level].bp_index += lnchildren; path[level + 1].bp_index--; } else { brelse(path[level].bp_sib_bh); path[level].bp_sib_bh = NULL; path[level].bp_index -= n; } nilfs_btree_do_insert(btree, path, level, keyp, ptrp); } static void nilfs_btree_carry_right(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node, *right; int nchildren, rnchildren, n, move, ncblk; node = nilfs_btree_get_nonroot_node(path, level); right = nilfs_btree_get_sib_node(path, level); nchildren = nilfs_btree_node_get_nchildren(node); rnchildren = nilfs_btree_node_get_nchildren(right); ncblk = nilfs_btree_nchildren_per_block(btree); move = 0; n = (nchildren + rnchildren + 1) / 2 - rnchildren; if (n > nchildren - path[level].bp_index) { /* move insert point */ n--; move = 1; } nilfs_btree_node_move_right(node, right, n, ncblk, ncblk); if (!buffer_dirty(path[level].bp_bh)) mark_buffer_dirty(path[level].bp_bh); if (!buffer_dirty(path[level].bp_sib_bh)) mark_buffer_dirty(path[level].bp_sib_bh); path[level + 1].bp_index++; nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_node_get_key(right, 0)); path[level + 1].bp_index--; if (move) { brelse(path[level].bp_bh); path[level].bp_bh = path[level].bp_sib_bh; path[level].bp_sib_bh = NULL; path[level].bp_index -= nilfs_btree_node_get_nchildren(node); path[level + 1].bp_index++; } else { brelse(path[level].bp_sib_bh); path[level].bp_sib_bh = NULL; } nilfs_btree_do_insert(btree, path, level, keyp, ptrp); } static void nilfs_btree_split(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node, *right; int nchildren, n, move, ncblk; node = nilfs_btree_get_nonroot_node(path, level); right = nilfs_btree_get_sib_node(path, level); nchildren = nilfs_btree_node_get_nchildren(node); ncblk = nilfs_btree_nchildren_per_block(btree); move = 0; n = (nchildren + 1) / 2; if (n > nchildren - path[level].bp_index) { n--; move = 1; } nilfs_btree_node_move_right(node, right, n, ncblk, ncblk); if (!buffer_dirty(path[level].bp_bh)) mark_buffer_dirty(path[level].bp_bh); if (!buffer_dirty(path[level].bp_sib_bh)) mark_buffer_dirty(path[level].bp_sib_bh); if (move) { path[level].bp_index -= nilfs_btree_node_get_nchildren(node); nilfs_btree_node_insert(right, path[level].bp_index, *keyp, *ptrp, ncblk); *keyp = nilfs_btree_node_get_key(right, 0); *ptrp = path[level].bp_newreq.bpr_ptr; brelse(path[level].bp_bh); path[level].bp_bh = path[level].bp_sib_bh; path[level].bp_sib_bh = NULL; } else { nilfs_btree_do_insert(btree, path, level, keyp, ptrp); *keyp = nilfs_btree_node_get_key(right, 0); *ptrp = path[level].bp_newreq.bpr_ptr; brelse(path[level].bp_sib_bh); path[level].bp_sib_bh = NULL; } path[level + 1].bp_index++; } static void nilfs_btree_grow(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *root, *child; int n, ncblk; root = nilfs_btree_get_root(btree); child = nilfs_btree_get_sib_node(path, level); ncblk = nilfs_btree_nchildren_per_block(btree); n = nilfs_btree_node_get_nchildren(root); nilfs_btree_node_move_right(root, child, n, NILFS_BTREE_ROOT_NCHILDREN_MAX, ncblk); nilfs_btree_node_set_level(root, level + 1); if (!buffer_dirty(path[level].bp_sib_bh)) mark_buffer_dirty(path[level].bp_sib_bh); path[level].bp_bh = path[level].bp_sib_bh; path[level].bp_sib_bh = NULL; nilfs_btree_do_insert(btree, path, level, keyp, ptrp); *keyp = nilfs_btree_node_get_key(child, 0); *ptrp = path[level].bp_newreq.bpr_ptr; } static __u64 nilfs_btree_find_near(const struct nilfs_bmap *btree, const struct nilfs_btree_path *path) { struct nilfs_btree_node *node; int level, ncmax; if (path == NULL) return NILFS_BMAP_INVALID_PTR; /* left sibling */ level = NILFS_BTREE_LEVEL_NODE_MIN; if (path[level].bp_index > 0) { node = nilfs_btree_get_node(btree, path, level, &ncmax); return nilfs_btree_node_get_ptr(node, path[level].bp_index - 1, ncmax); } /* parent */ level = NILFS_BTREE_LEVEL_NODE_MIN + 1; if (level <= nilfs_btree_height(btree) - 1) { node = nilfs_btree_get_node(btree, path, level, &ncmax); return nilfs_btree_node_get_ptr(node, path[level].bp_index, ncmax); } return NILFS_BMAP_INVALID_PTR; } static __u64 nilfs_btree_find_target_v(const struct nilfs_bmap *btree, const struct nilfs_btree_path *path, __u64 key) { __u64 ptr; ptr = nilfs_bmap_find_target_seq(btree, key); if (ptr != NILFS_BMAP_INVALID_PTR) /* sequential access */ return ptr; ptr = nilfs_btree_find_near(btree, path); if (ptr != NILFS_BMAP_INVALID_PTR) /* near */ return ptr; /* block group */ return nilfs_bmap_find_target_in_group(btree); } static int nilfs_btree_prepare_insert(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int *levelp, __u64 key, __u64 ptr, struct nilfs_bmap_stats *stats) { struct buffer_head *bh; struct nilfs_btree_node *node, *parent, *sib; __u64 sibptr; int pindex, level, ncmax, ncblk, ret; struct inode *dat = NULL; stats->bs_nblocks = 0; level = NILFS_BTREE_LEVEL_DATA; /* allocate a new ptr for data block */ if (NILFS_BMAP_USE_VBN(btree)) { path[level].bp_newreq.bpr_ptr = nilfs_btree_find_target_v(btree, path, key); dat = nilfs_bmap_get_dat(btree); } ret = nilfs_bmap_prepare_alloc_ptr(btree, &path[level].bp_newreq, dat); if (ret < 0) goto err_out_data; ncblk = nilfs_btree_nchildren_per_block(btree); for (level = NILFS_BTREE_LEVEL_NODE_MIN; level < nilfs_btree_height(btree) - 1; level++) { node = nilfs_btree_get_nonroot_node(path, level); if (nilfs_btree_node_get_nchildren(node) < ncblk) { path[level].bp_op = nilfs_btree_do_insert; stats->bs_nblocks++; goto out; } parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); pindex = path[level + 1].bp_index; /* left sibling */ if (pindex > 0) { sibptr = nilfs_btree_node_get_ptr(parent, pindex - 1, ncmax); ret = nilfs_btree_get_block(btree, sibptr, &bh); if (ret < 0) goto err_out_child_node; sib = (struct nilfs_btree_node *)bh->b_data; if (nilfs_btree_node_get_nchildren(sib) < ncblk) { path[level].bp_sib_bh = bh; path[level].bp_op = nilfs_btree_carry_left; stats->bs_nblocks++; goto out; } else { brelse(bh); } } /* right sibling */ if (pindex < nilfs_btree_node_get_nchildren(parent) - 1) { sibptr = nilfs_btree_node_get_ptr(parent, pindex + 1, ncmax); ret = nilfs_btree_get_block(btree, sibptr, &bh); if (ret < 0) goto err_out_child_node; sib = (struct nilfs_btree_node *)bh->b_data; if (nilfs_btree_node_get_nchildren(sib) < ncblk) { path[level].bp_sib_bh = bh; path[level].bp_op = nilfs_btree_carry_right; stats->bs_nblocks++; goto out; } else { brelse(bh); } } /* split */ path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1; ret = nilfs_bmap_prepare_alloc_ptr(btree, &path[level].bp_newreq, dat); if (ret < 0) goto err_out_child_node; ret = nilfs_btree_get_new_block(btree, path[level].bp_newreq.bpr_ptr, &bh); if (ret < 0) goto err_out_curr_node; stats->bs_nblocks++; sib = (struct nilfs_btree_node *)bh->b_data; nilfs_btree_node_init(sib, 0, level, 0, ncblk, NULL, NULL); path[level].bp_sib_bh = bh; path[level].bp_op = nilfs_btree_split; } /* root */ node = nilfs_btree_get_root(btree); if (nilfs_btree_node_get_nchildren(node) < NILFS_BTREE_ROOT_NCHILDREN_MAX) { path[level].bp_op = nilfs_btree_do_insert; stats->bs_nblocks++; goto out; } /* grow */ path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1; ret = nilfs_bmap_prepare_alloc_ptr(btree, &path[level].bp_newreq, dat); if (ret < 0) goto err_out_child_node; ret = nilfs_btree_get_new_block(btree, path[level].bp_newreq.bpr_ptr, &bh); if (ret < 0) goto err_out_curr_node; nilfs_btree_node_init((struct nilfs_btree_node *)bh->b_data, 0, level, 0, ncblk, NULL, NULL); path[level].bp_sib_bh = bh; path[level].bp_op = nilfs_btree_grow; level++; path[level].bp_op = nilfs_btree_do_insert; /* a newly-created node block and a data block are added */ stats->bs_nblocks += 2; /* success */ out: *levelp = level; return ret; /* error */ err_out_curr_node: nilfs_bmap_abort_alloc_ptr(btree, &path[level].bp_newreq, dat); err_out_child_node: for (level--; level > NILFS_BTREE_LEVEL_DATA; level--) { nilfs_btnode_delete(path[level].bp_sib_bh); nilfs_bmap_abort_alloc_ptr(btree, &path[level].bp_newreq, dat); } nilfs_bmap_abort_alloc_ptr(btree, &path[level].bp_newreq, dat); err_out_data: *levelp = level; stats->bs_nblocks = 0; return ret; } static void nilfs_btree_commit_insert(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int maxlevel, __u64 key, __u64 ptr) { struct inode *dat = NULL; int level; set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr)); ptr = path[NILFS_BTREE_LEVEL_DATA].bp_newreq.bpr_ptr; if (NILFS_BMAP_USE_VBN(btree)) { nilfs_bmap_set_target_v(btree, key, ptr); dat = nilfs_bmap_get_dat(btree); } for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) { nilfs_bmap_commit_alloc_ptr(btree, &path[level - 1].bp_newreq, dat); path[level].bp_op(btree, path, level, &key, &ptr); } if (!nilfs_bmap_dirty(btree)) nilfs_bmap_set_dirty(btree); } static int nilfs_btree_insert(struct nilfs_bmap *btree, __u64 key, __u64 ptr) { struct nilfs_btree_path *path; struct nilfs_bmap_stats stats; int level, ret; path = nilfs_btree_alloc_path(); if (path == NULL) return -ENOMEM; ret = nilfs_btree_do_lookup(btree, path, key, NULL, NILFS_BTREE_LEVEL_NODE_MIN, 0); if (ret != -ENOENT) { if (ret == 0) ret = -EEXIST; goto out; } ret = nilfs_btree_prepare_insert(btree, path, &level, key, ptr, &stats); if (ret < 0) goto out; nilfs_btree_commit_insert(btree, path, level, key, ptr); nilfs_inode_add_blocks(btree->b_inode, stats.bs_nblocks); out: nilfs_btree_free_path(path); return ret; } static void nilfs_btree_do_delete(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node; int ncblk; if (level < nilfs_btree_height(btree) - 1) { node = nilfs_btree_get_nonroot_node(path, level); ncblk = nilfs_btree_nchildren_per_block(btree); nilfs_btree_node_delete(node, path[level].bp_index, keyp, ptrp, ncblk); if (!buffer_dirty(path[level].bp_bh)) mark_buffer_dirty(path[level].bp_bh); if (path[level].bp_index == 0) nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_node_get_key(node, 0)); } else { node = nilfs_btree_get_root(btree); nilfs_btree_node_delete(node, path[level].bp_index, keyp, ptrp, NILFS_BTREE_ROOT_NCHILDREN_MAX); } } static void nilfs_btree_borrow_left(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node, *left; int nchildren, lnchildren, n, ncblk; nilfs_btree_do_delete(btree, path, level, keyp, ptrp); node = nilfs_btree_get_nonroot_node(path, level); left = nilfs_btree_get_sib_node(path, level); nchildren = nilfs_btree_node_get_nchildren(node); lnchildren = nilfs_btree_node_get_nchildren(left); ncblk = nilfs_btree_nchildren_per_block(btree); n = (nchildren + lnchildren) / 2 - nchildren; nilfs_btree_node_move_right(left, node, n, ncblk, ncblk); if (!buffer_dirty(path[level].bp_bh)) mark_buffer_dirty(path[level].bp_bh); if (!buffer_dirty(path[level].bp_sib_bh)) mark_buffer_dirty(path[level].bp_sib_bh); nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_node_get_key(node, 0)); brelse(path[level].bp_sib_bh); path[level].bp_sib_bh = NULL; path[level].bp_index += n; } static void nilfs_btree_borrow_right(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node, *right; int nchildren, rnchildren, n, ncblk; nilfs_btree_do_delete(btree, path, level, keyp, ptrp); node = nilfs_btree_get_nonroot_node(path, level); right = nilfs_btree_get_sib_node(path, level); nchildren = nilfs_btree_node_get_nchildren(node); rnchildren = nilfs_btree_node_get_nchildren(right); ncblk = nilfs_btree_nchildren_per_block(btree); n = (nchildren + rnchildren) / 2 - nchildren; nilfs_btree_node_move_left(node, right, n, ncblk, ncblk); if (!buffer_dirty(path[level].bp_bh)) mark_buffer_dirty(path[level].bp_bh); if (!buffer_dirty(path[level].bp_sib_bh)) mark_buffer_dirty(path[level].bp_sib_bh); path[level + 1].bp_index++; nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_node_get_key(right, 0)); path[level + 1].bp_index--; brelse(path[level].bp_sib_bh); path[level].bp_sib_bh = NULL; } static void nilfs_btree_concat_left(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node, *left; int n, ncblk; nilfs_btree_do_delete(btree, path, level, keyp, ptrp); node = nilfs_btree_get_nonroot_node(path, level); left = nilfs_btree_get_sib_node(path, level); ncblk = nilfs_btree_nchildren_per_block(btree); n = nilfs_btree_node_get_nchildren(node); nilfs_btree_node_move_left(left, node, n, ncblk, ncblk); if (!buffer_dirty(path[level].bp_sib_bh)) mark_buffer_dirty(path[level].bp_sib_bh); nilfs_btnode_delete(path[level].bp_bh); path[level].bp_bh = path[level].bp_sib_bh; path[level].bp_sib_bh = NULL; path[level].bp_index += nilfs_btree_node_get_nchildren(left); } static void nilfs_btree_concat_right(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node, *right; int n, ncblk; nilfs_btree_do_delete(btree, path, level, keyp, ptrp); node = nilfs_btree_get_nonroot_node(path, level); right = nilfs_btree_get_sib_node(path, level); ncblk = nilfs_btree_nchildren_per_block(btree); n = nilfs_btree_node_get_nchildren(right); nilfs_btree_node_move_left(node, right, n, ncblk, ncblk); if (!buffer_dirty(path[level].bp_bh)) mark_buffer_dirty(path[level].bp_bh); nilfs_btnode_delete(path[level].bp_sib_bh); path[level].bp_sib_bh = NULL; path[level + 1].bp_index++; } static void nilfs_btree_shrink(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *root, *child; int n, ncblk; nilfs_btree_do_delete(btree, path, level, keyp, ptrp); root = nilfs_btree_get_root(btree); child = nilfs_btree_get_nonroot_node(path, level); ncblk = nilfs_btree_nchildren_per_block(btree); nilfs_btree_node_delete(root, 0, NULL, NULL, NILFS_BTREE_ROOT_NCHILDREN_MAX); nilfs_btree_node_set_level(root, level); n = nilfs_btree_node_get_nchildren(child); nilfs_btree_node_move_left(root, child, n, NILFS_BTREE_ROOT_NCHILDREN_MAX, ncblk); nilfs_btnode_delete(path[level].bp_bh); path[level].bp_bh = NULL; } static void nilfs_btree_nop(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { } static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int *levelp, struct nilfs_bmap_stats *stats, struct inode *dat) { struct buffer_head *bh; struct nilfs_btree_node *node, *parent, *sib; __u64 sibptr; int pindex, dindex, level, ncmin, ncmax, ncblk, ret; ret = 0; stats->bs_nblocks = 0; ncmin = NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree)); ncblk = nilfs_btree_nchildren_per_block(btree); for (level = NILFS_BTREE_LEVEL_NODE_MIN, dindex = path[level].bp_index; level < nilfs_btree_height(btree) - 1; level++) { node = nilfs_btree_get_nonroot_node(path, level); path[level].bp_oldreq.bpr_ptr = nilfs_btree_node_get_ptr(node, dindex, ncblk); ret = nilfs_bmap_prepare_end_ptr(btree, &path[level].bp_oldreq, dat); if (ret < 0) goto err_out_child_node; if (nilfs_btree_node_get_nchildren(node) > ncmin) { path[level].bp_op = nilfs_btree_do_delete; stats->bs_nblocks++; goto out; } parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); pindex = path[level + 1].bp_index; dindex = pindex; if (pindex > 0) { /* left sibling */ sibptr = nilfs_btree_node_get_ptr(parent, pindex - 1, ncmax); ret = nilfs_btree_get_block(btree, sibptr, &bh); if (ret < 0) goto err_out_curr_node; sib = (struct nilfs_btree_node *)bh->b_data; if (nilfs_btree_node_get_nchildren(sib) > ncmin) { path[level].bp_sib_bh = bh; path[level].bp_op = nilfs_btree_borrow_left; stats->bs_nblocks++; goto out; } else { path[level].bp_sib_bh = bh; path[level].bp_op = nilfs_btree_concat_left; stats->bs_nblocks++; /* continue; */ } } else if (pindex < nilfs_btree_node_get_nchildren(parent) - 1) { /* right sibling */ sibptr = nilfs_btree_node_get_ptr(parent, pindex + 1, ncmax); ret = nilfs_btree_get_block(btree, sibptr, &bh); if (ret < 0) goto err_out_curr_node; sib = (struct nilfs_btree_node *)bh->b_data; if (nilfs_btree_node_get_nchildren(sib) > ncmin) { path[level].bp_sib_bh = bh; path[level].bp_op = nilfs_btree_borrow_right; stats->bs_nblocks++; goto out; } else { path[level].bp_sib_bh = bh; path[level].bp_op = nilfs_btree_concat_right; stats->bs_nblocks++; /* * When merging right sibling node * into the current node, pointer to * the right sibling node must be * terminated instead. The adjustment * below is required for that. */ dindex = pindex + 1; /* continue; */ } } else { /* no siblings */ /* the only child of the root node */ WARN_ON(level != nilfs_btree_height(btree) - 2); if (nilfs_btree_node_get_nchildren(node) - 1 <= NILFS_BTREE_ROOT_NCHILDREN_MAX) { path[level].bp_op = nilfs_btree_shrink; stats->bs_nblocks += 2; level++; path[level].bp_op = nilfs_btree_nop; goto shrink_root_child; } else { path[level].bp_op = nilfs_btree_do_delete; stats->bs_nblocks++; goto out; } } } /* child of the root node is deleted */ path[level].bp_op = nilfs_btree_do_delete; stats->bs_nblocks++; shrink_root_child: node = nilfs_btree_get_root(btree); path[level].bp_oldreq.bpr_ptr = nilfs_btree_node_get_ptr(node, dindex, NILFS_BTREE_ROOT_NCHILDREN_MAX); ret = nilfs_bmap_prepare_end_ptr(btree, &path[level].bp_oldreq, dat); if (ret < 0) goto err_out_child_node; /* success */ out: *levelp = level; return ret; /* error */ err_out_curr_node: nilfs_bmap_abort_end_ptr(btree, &path[level].bp_oldreq, dat); err_out_child_node: for (level--; level >= NILFS_BTREE_LEVEL_NODE_MIN; level--) { brelse(path[level].bp_sib_bh); nilfs_bmap_abort_end_ptr(btree, &path[level].bp_oldreq, dat); } *levelp = level; stats->bs_nblocks = 0; return ret; } static void nilfs_btree_commit_delete(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int maxlevel, struct inode *dat) { int level; for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) { nilfs_bmap_commit_end_ptr(btree, &path[level].bp_oldreq, dat); path[level].bp_op(btree, path, level, NULL, NULL); } if (!nilfs_bmap_dirty(btree)) nilfs_bmap_set_dirty(btree); } static int nilfs_btree_delete(struct nilfs_bmap *btree, __u64 key) { struct nilfs_btree_path *path; struct nilfs_bmap_stats stats; struct inode *dat; int level, ret; path = nilfs_btree_alloc_path(); if (path == NULL) return -ENOMEM; ret = nilfs_btree_do_lookup(btree, path, key, NULL, NILFS_BTREE_LEVEL_NODE_MIN, 0); if (ret < 0) goto out; dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL; ret = nilfs_btree_prepare_delete(btree, path, &level, &stats, dat); if (ret < 0) goto out; nilfs_btree_commit_delete(btree, path, level, dat); nilfs_inode_sub_blocks(btree->b_inode, stats.bs_nblocks); out: nilfs_btree_free_path(path); return ret; } static int nilfs_btree_seek_key(const struct nilfs_bmap *btree, __u64 start, __u64 *keyp) { struct nilfs_btree_path *path; const int minlevel = NILFS_BTREE_LEVEL_NODE_MIN; int ret; path = nilfs_btree_alloc_path(); if (!path) return -ENOMEM; ret = nilfs_btree_do_lookup(btree, path, start, NULL, minlevel, 0); if (!ret) *keyp = start; else if (ret == -ENOENT) ret = nilfs_btree_get_next_key(btree, path, minlevel, keyp); nilfs_btree_free_path(path); return ret; } static int nilfs_btree_last_key(const struct nilfs_bmap *btree, __u64 *keyp) { struct nilfs_btree_path *path; int ret; path = nilfs_btree_alloc_path(); if (path == NULL) return -ENOMEM; ret = nilfs_btree_do_lookup_last(btree, path, keyp, NULL); nilfs_btree_free_path(path); return ret; } static int nilfs_btree_check_delete(struct nilfs_bmap *btree, __u64 key) { struct buffer_head *bh; struct nilfs_btree_node *root, *node; __u64 maxkey, nextmaxkey; __u64 ptr; int nchildren, ret; root = nilfs_btree_get_root(btree); switch (nilfs_btree_height(btree)) { case 2: bh = NULL; node = root; break; case 3: nchildren = nilfs_btree_node_get_nchildren(root); if (nchildren > 1) return 0; ptr = nilfs_btree_node_get_ptr(root, nchildren - 1, NILFS_BTREE_ROOT_NCHILDREN_MAX); ret = nilfs_btree_get_block(btree, ptr, &bh); if (ret < 0) return ret; node = (struct nilfs_btree_node *)bh->b_data; break; default: return 0; } nchildren = nilfs_btree_node_get_nchildren(node); maxkey = nilfs_btree_node_get_key(node, nchildren - 1); nextmaxkey = (nchildren > 1) ? nilfs_btree_node_get_key(node, nchildren - 2) : 0; brelse(bh); return (maxkey == key) && (nextmaxkey < NILFS_BMAP_LARGE_LOW); } static int nilfs_btree_gather_data(struct nilfs_bmap *btree, __u64 *keys, __u64 *ptrs, int nitems) { struct buffer_head *bh; struct nilfs_btree_node *node, *root; __le64 *dkeys; __le64 *dptrs; __u64 ptr; int nchildren, ncmax, i, ret; root = nilfs_btree_get_root(btree); switch (nilfs_btree_height(btree)) { case 2: bh = NULL; node = root; ncmax = NILFS_BTREE_ROOT_NCHILDREN_MAX; break; case 3: nchildren = nilfs_btree_node_get_nchildren(root); WARN_ON(nchildren > 1); ptr = nilfs_btree_node_get_ptr(root, nchildren - 1, NILFS_BTREE_ROOT_NCHILDREN_MAX); ret = nilfs_btree_get_block(btree, ptr, &bh); if (ret < 0) return ret; node = (struct nilfs_btree_node *)bh->b_data; ncmax = nilfs_btree_nchildren_per_block(btree); break; default: node = NULL; return -EINVAL; } nchildren = nilfs_btree_node_get_nchildren(node); if (nchildren < nitems) nitems = nchildren; dkeys = nilfs_btree_node_dkeys(node); dptrs = nilfs_btree_node_dptrs(node, ncmax); for (i = 0; i < nitems; i++) { keys[i] = le64_to_cpu(dkeys[i]); ptrs[i] = le64_to_cpu(dptrs[i]); } brelse(bh); return nitems; } static int nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *btree, __u64 key, union nilfs_bmap_ptr_req *dreq, union nilfs_bmap_ptr_req *nreq, struct buffer_head **bhp, struct nilfs_bmap_stats *stats) { struct buffer_head *bh; struct inode *dat = NULL; int ret; stats->bs_nblocks = 0; /* for data */ /* cannot find near ptr */ if (NILFS_BMAP_USE_VBN(btree)) { dreq->bpr_ptr = nilfs_btree_find_target_v(btree, NULL, key); dat = nilfs_bmap_get_dat(btree); } ret = nilfs_attach_btree_node_cache(&NILFS_BMAP_I(btree)->vfs_inode); if (ret < 0) return ret; ret = nilfs_bmap_prepare_alloc_ptr(btree, dreq, dat); if (ret < 0) return ret; *bhp = NULL; stats->bs_nblocks++; if (nreq != NULL) { nreq->bpr_ptr = dreq->bpr_ptr + 1; ret = nilfs_bmap_prepare_alloc_ptr(btree, nreq, dat); if (ret < 0) goto err_out_dreq; ret = nilfs_btree_get_new_block(btree, nreq->bpr_ptr, &bh); if (ret < 0) goto err_out_nreq; *bhp = bh; stats->bs_nblocks++; } /* success */ return 0; /* error */ err_out_nreq: nilfs_bmap_abort_alloc_ptr(btree, nreq, dat); err_out_dreq: nilfs_bmap_abort_alloc_ptr(btree, dreq, dat); stats->bs_nblocks = 0; return ret; } static void nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree, __u64 key, __u64 ptr, const __u64 *keys, const __u64 *ptrs, int n, union nilfs_bmap_ptr_req *dreq, union nilfs_bmap_ptr_req *nreq, struct buffer_head *bh) { struct nilfs_btree_node *node; struct inode *dat; __u64 tmpptr; int ncblk; /* free resources */ if (btree->b_ops->bop_clear != NULL) btree->b_ops->bop_clear(btree); /* ptr must be a pointer to a buffer head. */ set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr)); /* convert and insert */ dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL; __nilfs_btree_init(btree); if (nreq != NULL) { nilfs_bmap_commit_alloc_ptr(btree, dreq, dat); nilfs_bmap_commit_alloc_ptr(btree, nreq, dat); /* create child node at level 1 */ node = (struct nilfs_btree_node *)bh->b_data; ncblk = nilfs_btree_nchildren_per_block(btree); nilfs_btree_node_init(node, 0, 1, n, ncblk, keys, ptrs); nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr, ncblk); if (!buffer_dirty(bh)) mark_buffer_dirty(bh); if (!nilfs_bmap_dirty(btree)) nilfs_bmap_set_dirty(btree); brelse(bh); /* create root node at level 2 */ node = nilfs_btree_get_root(btree); tmpptr = nreq->bpr_ptr; nilfs_btree_node_init(node, NILFS_BTREE_NODE_ROOT, 2, 1, NILFS_BTREE_ROOT_NCHILDREN_MAX, &keys[0], &tmpptr); } else { nilfs_bmap_commit_alloc_ptr(btree, dreq, dat); /* create root node at level 1 */ node = nilfs_btree_get_root(btree); nilfs_btree_node_init(node, NILFS_BTREE_NODE_ROOT, 1, n, NILFS_BTREE_ROOT_NCHILDREN_MAX, keys, ptrs); nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr, NILFS_BTREE_ROOT_NCHILDREN_MAX); if (!nilfs_bmap_dirty(btree)) nilfs_bmap_set_dirty(btree); } if (NILFS_BMAP_USE_VBN(btree)) nilfs_bmap_set_target_v(btree, key, dreq->bpr_ptr); } /** * nilfs_btree_convert_and_insert - * @bmap: * @key: * @ptr: * @keys: * @ptrs: * @n: */ int nilfs_btree_convert_and_insert(struct nilfs_bmap *btree, __u64 key, __u64 ptr, const __u64 *keys, const __u64 *ptrs, int n) { struct buffer_head *bh = NULL; union nilfs_bmap_ptr_req dreq, nreq, *di, *ni; struct nilfs_bmap_stats stats; int ret; if (n + 1 <= NILFS_BTREE_ROOT_NCHILDREN_MAX) { di = &dreq; ni = NULL; } else if ((n + 1) <= NILFS_BTREE_NODE_NCHILDREN_MAX( nilfs_btree_node_size(btree))) { di = &dreq; ni = &nreq; } else { di = NULL; ni = NULL; BUG(); } ret = nilfs_btree_prepare_convert_and_insert(btree, key, di, ni, &bh, &stats); if (ret < 0) return ret; nilfs_btree_commit_convert_and_insert(btree, key, ptr, keys, ptrs, n, di, ni, bh); nilfs_inode_add_blocks(btree->b_inode, stats.bs_nblocks); return 0; } static int nilfs_btree_propagate_p(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct buffer_head *bh) { while ((++level < nilfs_btree_height(btree) - 1) && !buffer_dirty(path[level].bp_bh)) mark_buffer_dirty(path[level].bp_bh); return 0; } static int nilfs_btree_prepare_update_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct inode *dat) { struct nilfs_btree_node *parent; int ncmax, ret; parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); path[level].bp_oldreq.bpr_ptr = nilfs_btree_node_get_ptr(parent, path[level + 1].bp_index, ncmax); path[level].bp_newreq.bpr_ptr = path[level].bp_oldreq.bpr_ptr + 1; ret = nilfs_dat_prepare_update(dat, &path[level].bp_oldreq.bpr_req, &path[level].bp_newreq.bpr_req); if (ret < 0) return ret; if (buffer_nilfs_node(path[level].bp_bh)) { path[level].bp_ctxt.oldkey = path[level].bp_oldreq.bpr_ptr; path[level].bp_ctxt.newkey = path[level].bp_newreq.bpr_ptr; path[level].bp_ctxt.bh = path[level].bp_bh; ret = nilfs_btnode_prepare_change_key( NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping, &path[level].bp_ctxt); if (ret < 0) { nilfs_dat_abort_update(dat, &path[level].bp_oldreq.bpr_req, &path[level].bp_newreq.bpr_req); return ret; } } return 0; } static void nilfs_btree_commit_update_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct inode *dat) { struct nilfs_btree_node *parent; int ncmax; nilfs_dat_commit_update(dat, &path[level].bp_oldreq.bpr_req, &path[level].bp_newreq.bpr_req, btree->b_ptr_type == NILFS_BMAP_PTR_VS); if (buffer_nilfs_node(path[level].bp_bh)) { nilfs_btnode_commit_change_key( NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping, &path[level].bp_ctxt); path[level].bp_bh = path[level].bp_ctxt.bh; } set_buffer_nilfs_volatile(path[level].bp_bh); parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); nilfs_btree_node_set_ptr(parent, path[level + 1].bp_index, path[level].bp_newreq.bpr_ptr, ncmax); } static void nilfs_btree_abort_update_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct inode *dat) { nilfs_dat_abort_update(dat, &path[level].bp_oldreq.bpr_req, &path[level].bp_newreq.bpr_req); if (buffer_nilfs_node(path[level].bp_bh)) nilfs_btnode_abort_change_key( NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping, &path[level].bp_ctxt); } static int nilfs_btree_prepare_propagate_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int minlevel, int *maxlevelp, struct inode *dat) { int level, ret; level = minlevel; if (!buffer_nilfs_volatile(path[level].bp_bh)) { ret = nilfs_btree_prepare_update_v(btree, path, level, dat); if (ret < 0) return ret; } while ((++level < nilfs_btree_height(btree) - 1) && !buffer_dirty(path[level].bp_bh)) { WARN_ON(buffer_nilfs_volatile(path[level].bp_bh)); ret = nilfs_btree_prepare_update_v(btree, path, level, dat); if (ret < 0) goto out; } /* success */ *maxlevelp = level - 1; return 0; /* error */ out: while (--level > minlevel) nilfs_btree_abort_update_v(btree, path, level, dat); if (!buffer_nilfs_volatile(path[level].bp_bh)) nilfs_btree_abort_update_v(btree, path, level, dat); return ret; } static void nilfs_btree_commit_propagate_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int minlevel, int maxlevel, struct buffer_head *bh, struct inode *dat) { int level; if (!buffer_nilfs_volatile(path[minlevel].bp_bh)) nilfs_btree_commit_update_v(btree, path, minlevel, dat); for (level = minlevel + 1; level <= maxlevel; level++) nilfs_btree_commit_update_v(btree, path, level, dat); } static int nilfs_btree_propagate_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct buffer_head *bh) { int maxlevel = 0, ret; struct nilfs_btree_node *parent; struct inode *dat = nilfs_bmap_get_dat(btree); __u64 ptr; int ncmax; get_bh(bh); path[level].bp_bh = bh; ret = nilfs_btree_prepare_propagate_v(btree, path, level, &maxlevel, dat); if (ret < 0) goto out; if (buffer_nilfs_volatile(path[level].bp_bh)) { parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); ptr = nilfs_btree_node_get_ptr(parent, path[level + 1].bp_index, ncmax); ret = nilfs_dat_mark_dirty(dat, ptr); if (ret < 0) goto out; } nilfs_btree_commit_propagate_v(btree, path, level, maxlevel, bh, dat); out: brelse(path[level].bp_bh); path[level].bp_bh = NULL; return ret; } static int nilfs_btree_propagate(struct nilfs_bmap *btree, struct buffer_head *bh) { struct nilfs_btree_path *path; struct nilfs_btree_node *node; __u64 key; int level, ret; WARN_ON(!buffer_dirty(bh)); path = nilfs_btree_alloc_path(); if (path == NULL) return -ENOMEM; if (buffer_nilfs_node(bh)) { node = (struct nilfs_btree_node *)bh->b_data; key = nilfs_btree_node_get_key(node, 0); level = nilfs_btree_node_get_level(node); } else { key = nilfs_bmap_data_get_key(btree, bh); level = NILFS_BTREE_LEVEL_DATA; } ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1, 0); if (ret < 0) { if (unlikely(ret == -ENOENT)) nilfs_crit(btree->b_inode->i_sb, "writing node/leaf block does not appear in b-tree (ino=%lu) at key=%llu, level=%d", btree->b_inode->i_ino, (unsigned long long)key, level); goto out; } ret = NILFS_BMAP_USE_VBN(btree) ? nilfs_btree_propagate_v(btree, path, level, bh) : nilfs_btree_propagate_p(btree, path, level, bh); out: nilfs_btree_free_path(path); return ret; } static int nilfs_btree_propagate_gc(struct nilfs_bmap *btree, struct buffer_head *bh) { return nilfs_dat_mark_dirty(nilfs_bmap_get_dat(btree), bh->b_blocknr); } static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap *btree, struct list_head *lists, struct buffer_head *bh) { struct list_head *head; struct buffer_head *cbh; struct nilfs_btree_node *node, *cnode; __u64 key, ckey; int level; get_bh(bh); node = (struct nilfs_btree_node *)bh->b_data; key = nilfs_btree_node_get_key(node, 0); level = nilfs_btree_node_get_level(node); if (level < NILFS_BTREE_LEVEL_NODE_MIN || level >= NILFS_BTREE_LEVEL_MAX) { dump_stack(); nilfs_warn(btree->b_inode->i_sb, "invalid btree level: %d (key=%llu, ino=%lu, blocknr=%llu)", level, (unsigned long long)key, btree->b_inode->i_ino, (unsigned long long)bh->b_blocknr); return; } list_for_each(head, &lists[level]) { cbh = list_entry(head, struct buffer_head, b_assoc_buffers); cnode = (struct nilfs_btree_node *)cbh->b_data; ckey = nilfs_btree_node_get_key(cnode, 0); if (key < ckey) break; } list_add_tail(&bh->b_assoc_buffers, head); } static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *btree, struct list_head *listp) { struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode; struct address_space *btcache = btnc_inode->i_mapping; struct list_head lists[NILFS_BTREE_LEVEL_MAX]; struct folio_batch fbatch; struct buffer_head *bh, *head; pgoff_t index = 0; int level, i; for (level = NILFS_BTREE_LEVEL_NODE_MIN; level < NILFS_BTREE_LEVEL_MAX; level++) INIT_LIST_HEAD(&lists[level]); folio_batch_init(&fbatch); while (filemap_get_folios_tag(btcache, &index, (pgoff_t)-1, PAGECACHE_TAG_DIRTY, &fbatch)) { for (i = 0; i < folio_batch_count(&fbatch); i++) { bh = head = folio_buffers(fbatch.folios[i]); do { if (buffer_dirty(bh)) nilfs_btree_add_dirty_buffer(btree, lists, bh); } while ((bh = bh->b_this_page) != head); } folio_batch_release(&fbatch); cond_resched(); } for (level = NILFS_BTREE_LEVEL_NODE_MIN; level < NILFS_BTREE_LEVEL_MAX; level++) list_splice_tail(&lists[level], listp); } static int nilfs_btree_assign_p(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct buffer_head **bh, sector_t blocknr, union nilfs_binfo *binfo) { struct nilfs_btree_node *parent; __u64 key; __u64 ptr; int ncmax, ret; parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); ptr = nilfs_btree_node_get_ptr(parent, path[level + 1].bp_index, ncmax); if (buffer_nilfs_node(*bh)) { path[level].bp_ctxt.oldkey = ptr; path[level].bp_ctxt.newkey = blocknr; path[level].bp_ctxt.bh = *bh; ret = nilfs_btnode_prepare_change_key( NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping, &path[level].bp_ctxt); if (ret < 0) return ret; nilfs_btnode_commit_change_key( NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping, &path[level].bp_ctxt); *bh = path[level].bp_ctxt.bh; } nilfs_btree_node_set_ptr(parent, path[level + 1].bp_index, blocknr, ncmax); key = nilfs_btree_node_get_key(parent, path[level + 1].bp_index); /* on-disk format */ binfo->bi_dat.bi_blkoff = cpu_to_le64(key); binfo->bi_dat.bi_level = level; memset(binfo->bi_dat.bi_pad, 0, sizeof(binfo->bi_dat.bi_pad)); return 0; } static int nilfs_btree_assign_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct buffer_head **bh, sector_t blocknr, union nilfs_binfo *binfo) { struct nilfs_btree_node *parent; struct inode *dat = nilfs_bmap_get_dat(btree); __u64 key; __u64 ptr; union nilfs_bmap_ptr_req req; int ncmax, ret; parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); ptr = nilfs_btree_node_get_ptr(parent, path[level + 1].bp_index, ncmax); req.bpr_ptr = ptr; ret = nilfs_dat_prepare_start(dat, &req.bpr_req); if (ret < 0) return ret; nilfs_dat_commit_start(dat, &req.bpr_req, blocknr); key = nilfs_btree_node_get_key(parent, path[level + 1].bp_index); /* on-disk format */ binfo->bi_v.bi_vblocknr = cpu_to_le64(ptr); binfo->bi_v.bi_blkoff = cpu_to_le64(key); return 0; } static int nilfs_btree_assign(struct nilfs_bmap *btree, struct buffer_head **bh, sector_t blocknr, union nilfs_binfo *binfo) { struct nilfs_btree_path *path; struct nilfs_btree_node *node; __u64 key; int level, ret; path = nilfs_btree_alloc_path(); if (path == NULL) return -ENOMEM; if (buffer_nilfs_node(*bh)) { node = (struct nilfs_btree_node *)(*bh)->b_data; key = nilfs_btree_node_get_key(node, 0); level = nilfs_btree_node_get_level(node); } else { key = nilfs_bmap_data_get_key(btree, *bh); level = NILFS_BTREE_LEVEL_DATA; } ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1, 0); if (ret < 0) { WARN_ON(ret == -ENOENT); goto out; } ret = NILFS_BMAP_USE_VBN(btree) ? nilfs_btree_assign_v(btree, path, level, bh, blocknr, binfo) : nilfs_btree_assign_p(btree, path, level, bh, blocknr, binfo); out: nilfs_btree_free_path(path); return ret; } static int nilfs_btree_assign_gc(struct nilfs_bmap *btree, struct buffer_head **bh, sector_t blocknr, union nilfs_binfo *binfo) { struct nilfs_btree_node *node; __u64 key; int ret; ret = nilfs_dat_move(nilfs_bmap_get_dat(btree), (*bh)->b_blocknr, blocknr); if (ret < 0) return ret; if (buffer_nilfs_node(*bh)) { node = (struct nilfs_btree_node *)(*bh)->b_data; key = nilfs_btree_node_get_key(node, 0); } else key = nilfs_bmap_data_get_key(btree, *bh); /* on-disk format */ binfo->bi_v.bi_vblocknr = cpu_to_le64((*bh)->b_blocknr); binfo->bi_v.bi_blkoff = cpu_to_le64(key); return 0; } static int nilfs_btree_mark(struct nilfs_bmap *btree, __u64 key, int level) { struct buffer_head *bh; struct nilfs_btree_path *path; __u64 ptr; int ret; path = nilfs_btree_alloc_path(); if (path == NULL) return -ENOMEM; ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level + 1, 0); if (ret < 0) { WARN_ON(ret == -ENOENT); goto out; } ret = nilfs_btree_get_block(btree, ptr, &bh); if (ret < 0) { WARN_ON(ret == -ENOENT); goto out; } if (!buffer_dirty(bh)) mark_buffer_dirty(bh); brelse(bh); if (!nilfs_bmap_dirty(btree)) nilfs_bmap_set_dirty(btree); out: nilfs_btree_free_path(path); return ret; } static const struct nilfs_bmap_operations nilfs_btree_ops = { .bop_lookup = nilfs_btree_lookup, .bop_lookup_contig = nilfs_btree_lookup_contig, .bop_insert = nilfs_btree_insert, .bop_delete = nilfs_btree_delete, .bop_clear = NULL, .bop_propagate = nilfs_btree_propagate, .bop_lookup_dirty_buffers = nilfs_btree_lookup_dirty_buffers, .bop_assign = nilfs_btree_assign, .bop_mark = nilfs_btree_mark, .bop_seek_key = nilfs_btree_seek_key, .bop_last_key = nilfs_btree_last_key, .bop_check_insert = NULL, .bop_check_delete = nilfs_btree_check_delete, .bop_gather_data = nilfs_btree_gather_data, }; static const struct nilfs_bmap_operations nilfs_btree_ops_gc = { .bop_lookup = NULL, .bop_lookup_contig = NULL, .bop_insert = NULL, .bop_delete = NULL, .bop_clear = NULL, .bop_propagate = nilfs_btree_propagate_gc, .bop_lookup_dirty_buffers = nilfs_btree_lookup_dirty_buffers, .bop_assign = nilfs_btree_assign_gc, .bop_mark = NULL, .bop_seek_key = NULL, .bop_last_key = NULL, .bop_check_insert = NULL, .bop_check_delete = NULL, .bop_gather_data = NULL, }; static void __nilfs_btree_init(struct nilfs_bmap *bmap) { bmap->b_ops = &nilfs_btree_ops; bmap->b_nchildren_per_block = NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap)); } int nilfs_btree_init(struct nilfs_bmap *bmap) { int ret = 0; __nilfs_btree_init(bmap); if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap), bmap->b_inode)) ret = -EIO; else ret = nilfs_attach_btree_node_cache( &NILFS_BMAP_I(bmap)->vfs_inode); return ret; } void nilfs_btree_init_gc(struct nilfs_bmap *bmap) { bmap->b_ops = &nilfs_btree_ops_gc; bmap->b_nchildren_per_block = NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap)); }
linux-master
fs/nilfs2/btree.c
// SPDX-License-Identifier: GPL-2.0+ /* * NILFS recovery logic * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * Written by Ryusuke Konishi. */ #include <linux/buffer_head.h> #include <linux/blkdev.h> #include <linux/swap.h> #include <linux/slab.h> #include <linux/crc32.h> #include "nilfs.h" #include "segment.h" #include "sufile.h" #include "page.h" #include "segbuf.h" /* * Segment check result */ enum { NILFS_SEG_VALID, NILFS_SEG_NO_SUPER_ROOT, NILFS_SEG_FAIL_IO, NILFS_SEG_FAIL_MAGIC, NILFS_SEG_FAIL_SEQ, NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT, NILFS_SEG_FAIL_CHECKSUM_FULL, NILFS_SEG_FAIL_CONSISTENCY, }; /* work structure for recovery */ struct nilfs_recovery_block { ino_t ino; /* * Inode number of the file that this block * belongs to */ sector_t blocknr; /* block number */ __u64 vblocknr; /* virtual block number */ unsigned long blkoff; /* File offset of the data block (per block) */ struct list_head list; }; static int nilfs_warn_segment_error(struct super_block *sb, int err) { const char *msg = NULL; switch (err) { case NILFS_SEG_FAIL_IO: nilfs_err(sb, "I/O error reading segment"); return -EIO; case NILFS_SEG_FAIL_MAGIC: msg = "Magic number mismatch"; break; case NILFS_SEG_FAIL_SEQ: msg = "Sequence number mismatch"; break; case NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT: msg = "Checksum error in super root"; break; case NILFS_SEG_FAIL_CHECKSUM_FULL: msg = "Checksum error in segment payload"; break; case NILFS_SEG_FAIL_CONSISTENCY: msg = "Inconsistency found"; break; case NILFS_SEG_NO_SUPER_ROOT: msg = "No super root in the last segment"; break; default: nilfs_err(sb, "unrecognized segment error %d", err); return -EINVAL; } nilfs_warn(sb, "invalid segment: %s", msg); return -EINVAL; } /** * nilfs_compute_checksum - compute checksum of blocks continuously * @nilfs: nilfs object * @bhs: buffer head of start block * @sum: place to store result * @offset: offset bytes in the first block * @check_bytes: number of bytes to be checked * @start: DBN of start block * @nblock: number of blocks to be checked */ static int nilfs_compute_checksum(struct the_nilfs *nilfs, struct buffer_head *bhs, u32 *sum, unsigned long offset, u64 check_bytes, sector_t start, unsigned long nblock) { unsigned int blocksize = nilfs->ns_blocksize; unsigned long size; u32 crc; BUG_ON(offset >= blocksize); check_bytes -= offset; size = min_t(u64, check_bytes, blocksize - offset); crc = crc32_le(nilfs->ns_crc_seed, (unsigned char *)bhs->b_data + offset, size); if (--nblock > 0) { do { struct buffer_head *bh; bh = __bread(nilfs->ns_bdev, ++start, blocksize); if (!bh) return -EIO; check_bytes -= size; size = min_t(u64, check_bytes, blocksize); crc = crc32_le(crc, bh->b_data, size); brelse(bh); } while (--nblock > 0); } *sum = crc; return 0; } /** * nilfs_read_super_root_block - read super root block * @nilfs: nilfs object * @sr_block: disk block number of the super root block * @pbh: address of a buffer_head pointer to return super root buffer * @check: CRC check flag */ int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block, struct buffer_head **pbh, int check) { struct buffer_head *bh_sr; struct nilfs_super_root *sr; u32 crc; int ret; *pbh = NULL; bh_sr = __bread(nilfs->ns_bdev, sr_block, nilfs->ns_blocksize); if (unlikely(!bh_sr)) { ret = NILFS_SEG_FAIL_IO; goto failed; } sr = (struct nilfs_super_root *)bh_sr->b_data; if (check) { unsigned int bytes = le16_to_cpu(sr->sr_bytes); if (bytes == 0 || bytes > nilfs->ns_blocksize) { ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT; goto failed_bh; } if (nilfs_compute_checksum( nilfs, bh_sr, &crc, sizeof(sr->sr_sum), bytes, sr_block, 1)) { ret = NILFS_SEG_FAIL_IO; goto failed_bh; } if (crc != le32_to_cpu(sr->sr_sum)) { ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT; goto failed_bh; } } *pbh = bh_sr; return 0; failed_bh: brelse(bh_sr); failed: return nilfs_warn_segment_error(nilfs->ns_sb, ret); } /** * nilfs_read_log_header - read summary header of the specified log * @nilfs: nilfs object * @start_blocknr: start block number of the log * @sum: pointer to return segment summary structure */ static struct buffer_head * nilfs_read_log_header(struct the_nilfs *nilfs, sector_t start_blocknr, struct nilfs_segment_summary **sum) { struct buffer_head *bh_sum; bh_sum = __bread(nilfs->ns_bdev, start_blocknr, nilfs->ns_blocksize); if (bh_sum) *sum = (struct nilfs_segment_summary *)bh_sum->b_data; return bh_sum; } /** * nilfs_validate_log - verify consistency of log * @nilfs: nilfs object * @seg_seq: sequence number of segment * @bh_sum: buffer head of summary block * @sum: segment summary struct */ static int nilfs_validate_log(struct the_nilfs *nilfs, u64 seg_seq, struct buffer_head *bh_sum, struct nilfs_segment_summary *sum) { unsigned long nblock; u32 crc; int ret; ret = NILFS_SEG_FAIL_MAGIC; if (le32_to_cpu(sum->ss_magic) != NILFS_SEGSUM_MAGIC) goto out; ret = NILFS_SEG_FAIL_SEQ; if (le64_to_cpu(sum->ss_seq) != seg_seq) goto out; nblock = le32_to_cpu(sum->ss_nblocks); ret = NILFS_SEG_FAIL_CONSISTENCY; if (unlikely(nblock == 0 || nblock > nilfs->ns_blocks_per_segment)) /* This limits the number of blocks read in the CRC check */ goto out; ret = NILFS_SEG_FAIL_IO; if (nilfs_compute_checksum(nilfs, bh_sum, &crc, sizeof(sum->ss_datasum), ((u64)nblock << nilfs->ns_blocksize_bits), bh_sum->b_blocknr, nblock)) goto out; ret = NILFS_SEG_FAIL_CHECKSUM_FULL; if (crc != le32_to_cpu(sum->ss_datasum)) goto out; ret = 0; out: return ret; } /** * nilfs_read_summary_info - read an item on summary blocks of a log * @nilfs: nilfs object * @pbh: the current buffer head on summary blocks [in, out] * @offset: the current byte offset on summary blocks [in, out] * @bytes: byte size of the item to be read */ static void *nilfs_read_summary_info(struct the_nilfs *nilfs, struct buffer_head **pbh, unsigned int *offset, unsigned int bytes) { void *ptr; sector_t blocknr; BUG_ON((*pbh)->b_size < *offset); if (bytes > (*pbh)->b_size - *offset) { blocknr = (*pbh)->b_blocknr; brelse(*pbh); *pbh = __bread(nilfs->ns_bdev, blocknr + 1, nilfs->ns_blocksize); if (unlikely(!*pbh)) return NULL; *offset = 0; } ptr = (*pbh)->b_data + *offset; *offset += bytes; return ptr; } /** * nilfs_skip_summary_info - skip items on summary blocks of a log * @nilfs: nilfs object * @pbh: the current buffer head on summary blocks [in, out] * @offset: the current byte offset on summary blocks [in, out] * @bytes: byte size of the item to be skipped * @count: number of items to be skipped */ static void nilfs_skip_summary_info(struct the_nilfs *nilfs, struct buffer_head **pbh, unsigned int *offset, unsigned int bytes, unsigned long count) { unsigned int rest_item_in_current_block = ((*pbh)->b_size - *offset) / bytes; if (count <= rest_item_in_current_block) { *offset += bytes * count; } else { sector_t blocknr = (*pbh)->b_blocknr; unsigned int nitem_per_block = (*pbh)->b_size / bytes; unsigned int bcnt; count -= rest_item_in_current_block; bcnt = DIV_ROUND_UP(count, nitem_per_block); *offset = bytes * (count - (bcnt - 1) * nitem_per_block); brelse(*pbh); *pbh = __bread(nilfs->ns_bdev, blocknr + bcnt, nilfs->ns_blocksize); } } /** * nilfs_scan_dsync_log - get block information of a log written for data sync * @nilfs: nilfs object * @start_blocknr: start block number of the log * @sum: log summary information * @head: list head to add nilfs_recovery_block struct */ static int nilfs_scan_dsync_log(struct the_nilfs *nilfs, sector_t start_blocknr, struct nilfs_segment_summary *sum, struct list_head *head) { struct buffer_head *bh; unsigned int offset; u32 nfinfo, sumbytes; sector_t blocknr; ino_t ino; int err = -EIO; nfinfo = le32_to_cpu(sum->ss_nfinfo); if (!nfinfo) return 0; sumbytes = le32_to_cpu(sum->ss_sumbytes); blocknr = start_blocknr + DIV_ROUND_UP(sumbytes, nilfs->ns_blocksize); bh = __bread(nilfs->ns_bdev, start_blocknr, nilfs->ns_blocksize); if (unlikely(!bh)) goto out; offset = le16_to_cpu(sum->ss_bytes); for (;;) { unsigned long nblocks, ndatablk, nnodeblk; struct nilfs_finfo *finfo; finfo = nilfs_read_summary_info(nilfs, &bh, &offset, sizeof(*finfo)); if (unlikely(!finfo)) goto out; ino = le64_to_cpu(finfo->fi_ino); nblocks = le32_to_cpu(finfo->fi_nblocks); ndatablk = le32_to_cpu(finfo->fi_ndatablk); nnodeblk = nblocks - ndatablk; while (ndatablk-- > 0) { struct nilfs_recovery_block *rb; struct nilfs_binfo_v *binfo; binfo = nilfs_read_summary_info(nilfs, &bh, &offset, sizeof(*binfo)); if (unlikely(!binfo)) goto out; rb = kmalloc(sizeof(*rb), GFP_NOFS); if (unlikely(!rb)) { err = -ENOMEM; goto out; } rb->ino = ino; rb->blocknr = blocknr++; rb->vblocknr = le64_to_cpu(binfo->bi_vblocknr); rb->blkoff = le64_to_cpu(binfo->bi_blkoff); /* INIT_LIST_HEAD(&rb->list); */ list_add_tail(&rb->list, head); } if (--nfinfo == 0) break; blocknr += nnodeblk; /* always 0 for data sync logs */ nilfs_skip_summary_info(nilfs, &bh, &offset, sizeof(__le64), nnodeblk); if (unlikely(!bh)) goto out; } err = 0; out: brelse(bh); /* brelse(NULL) is just ignored */ return err; } static void dispose_recovery_list(struct list_head *head) { while (!list_empty(head)) { struct nilfs_recovery_block *rb; rb = list_first_entry(head, struct nilfs_recovery_block, list); list_del(&rb->list); kfree(rb); } } struct nilfs_segment_entry { struct list_head list; __u64 segnum; }; static int nilfs_segment_list_add(struct list_head *head, __u64 segnum) { struct nilfs_segment_entry *ent = kmalloc(sizeof(*ent), GFP_NOFS); if (unlikely(!ent)) return -ENOMEM; ent->segnum = segnum; INIT_LIST_HEAD(&ent->list); list_add_tail(&ent->list, head); return 0; } void nilfs_dispose_segment_list(struct list_head *head) { while (!list_empty(head)) { struct nilfs_segment_entry *ent; ent = list_first_entry(head, struct nilfs_segment_entry, list); list_del(&ent->list); kfree(ent); } } static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs, struct super_block *sb, struct nilfs_recovery_info *ri) { struct list_head *head = &ri->ri_used_segments; struct nilfs_segment_entry *ent, *n; struct inode *sufile = nilfs->ns_sufile; __u64 segnum[4]; int err; int i; segnum[0] = nilfs->ns_segnum; segnum[1] = nilfs->ns_nextnum; segnum[2] = ri->ri_segnum; segnum[3] = ri->ri_nextnum; /* * Releasing the next segment of the latest super root. * The next segment is invalidated by this recovery. */ err = nilfs_sufile_free(sufile, segnum[1]); if (unlikely(err)) goto failed; for (i = 1; i < 4; i++) { err = nilfs_segment_list_add(head, segnum[i]); if (unlikely(err)) goto failed; } /* * Collecting segments written after the latest super root. * These are marked dirty to avoid being reallocated in the next write. */ list_for_each_entry_safe(ent, n, head, list) { if (ent->segnum != segnum[0]) { err = nilfs_sufile_scrap(sufile, ent->segnum); if (unlikely(err)) goto failed; } list_del(&ent->list); kfree(ent); } /* Allocate new segments for recovery */ err = nilfs_sufile_alloc(sufile, &segnum[0]); if (unlikely(err)) goto failed; nilfs->ns_pseg_offset = 0; nilfs->ns_seg_seq = ri->ri_seq + 2; nilfs->ns_nextnum = nilfs->ns_segnum = segnum[0]; failed: /* No need to recover sufile because it will be destroyed on error */ return err; } static int nilfs_recovery_copy_block(struct the_nilfs *nilfs, struct nilfs_recovery_block *rb, struct page *page) { struct buffer_head *bh_org; void *kaddr; bh_org = __bread(nilfs->ns_bdev, rb->blocknr, nilfs->ns_blocksize); if (unlikely(!bh_org)) return -EIO; kaddr = kmap_atomic(page); memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size); kunmap_atomic(kaddr); brelse(bh_org); return 0; } static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs, struct super_block *sb, struct nilfs_root *root, struct list_head *head, unsigned long *nr_salvaged_blocks) { struct inode *inode; struct nilfs_recovery_block *rb, *n; unsigned int blocksize = nilfs->ns_blocksize; struct page *page; loff_t pos; int err = 0, err2 = 0; list_for_each_entry_safe(rb, n, head, list) { inode = nilfs_iget(sb, root, rb->ino); if (IS_ERR(inode)) { err = PTR_ERR(inode); inode = NULL; goto failed_inode; } pos = rb->blkoff << inode->i_blkbits; err = block_write_begin(inode->i_mapping, pos, blocksize, &page, nilfs_get_block); if (unlikely(err)) { loff_t isize = inode->i_size; if (pos + blocksize > isize) nilfs_write_failed(inode->i_mapping, pos + blocksize); goto failed_inode; } err = nilfs_recovery_copy_block(nilfs, rb, page); if (unlikely(err)) goto failed_page; err = nilfs_set_file_dirty(inode, 1); if (unlikely(err)) goto failed_page; block_write_end(NULL, inode->i_mapping, pos, blocksize, blocksize, page, NULL); unlock_page(page); put_page(page); (*nr_salvaged_blocks)++; goto next; failed_page: unlock_page(page); put_page(page); failed_inode: nilfs_warn(sb, "error %d recovering data block (ino=%lu, block-offset=%llu)", err, (unsigned long)rb->ino, (unsigned long long)rb->blkoff); if (!err2) err2 = err; next: iput(inode); /* iput(NULL) is just ignored */ list_del_init(&rb->list); kfree(rb); } return err2; } /** * nilfs_do_roll_forward - salvage logical segments newer than the latest * checkpoint * @nilfs: nilfs object * @sb: super block instance * @ri: pointer to a nilfs_recovery_info */ static int nilfs_do_roll_forward(struct the_nilfs *nilfs, struct super_block *sb, struct nilfs_root *root, struct nilfs_recovery_info *ri) { struct buffer_head *bh_sum = NULL; struct nilfs_segment_summary *sum = NULL; sector_t pseg_start; sector_t seg_start, seg_end; /* Starting/ending DBN of full segment */ unsigned long nsalvaged_blocks = 0; unsigned int flags; u64 seg_seq; __u64 segnum, nextnum = 0; int empty_seg = 0; int err = 0, ret; LIST_HEAD(dsync_blocks); /* list of data blocks to be recovered */ enum { RF_INIT_ST, RF_DSYNC_ST, /* scanning data-sync segments */ }; int state = RF_INIT_ST; pseg_start = ri->ri_lsegs_start; seg_seq = ri->ri_lsegs_start_seq; segnum = nilfs_get_segnum_of_block(nilfs, pseg_start); nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end); while (segnum != ri->ri_segnum || pseg_start <= ri->ri_pseg_start) { brelse(bh_sum); bh_sum = nilfs_read_log_header(nilfs, pseg_start, &sum); if (!bh_sum) { err = -EIO; goto failed; } ret = nilfs_validate_log(nilfs, seg_seq, bh_sum, sum); if (ret) { if (ret == NILFS_SEG_FAIL_IO) { err = -EIO; goto failed; } goto strayed; } flags = le16_to_cpu(sum->ss_flags); if (flags & NILFS_SS_SR) goto confused; /* Found a valid partial segment; do recovery actions */ nextnum = nilfs_get_segnum_of_block(nilfs, le64_to_cpu(sum->ss_next)); empty_seg = 0; nilfs->ns_ctime = le64_to_cpu(sum->ss_create); if (!(flags & NILFS_SS_GC)) nilfs->ns_nongc_ctime = nilfs->ns_ctime; switch (state) { case RF_INIT_ST: if (!(flags & NILFS_SS_LOGBGN) || !(flags & NILFS_SS_SYNDT)) goto try_next_pseg; state = RF_DSYNC_ST; fallthrough; case RF_DSYNC_ST: if (!(flags & NILFS_SS_SYNDT)) goto confused; err = nilfs_scan_dsync_log(nilfs, pseg_start, sum, &dsync_blocks); if (unlikely(err)) goto failed; if (flags & NILFS_SS_LOGEND) { err = nilfs_recover_dsync_blocks( nilfs, sb, root, &dsync_blocks, &nsalvaged_blocks); if (unlikely(err)) goto failed; state = RF_INIT_ST; } break; /* Fall through to try_next_pseg */ } try_next_pseg: if (pseg_start == ri->ri_lsegs_end) break; pseg_start += le32_to_cpu(sum->ss_nblocks); if (pseg_start < seg_end) continue; goto feed_segment; strayed: if (pseg_start == ri->ri_lsegs_end) break; feed_segment: /* Looking to the next full segment */ if (empty_seg++) break; seg_seq++; segnum = nextnum; nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end); pseg_start = seg_start; } if (nsalvaged_blocks) { nilfs_info(sb, "salvaged %lu blocks", nsalvaged_blocks); ri->ri_need_recovery = NILFS_RECOVERY_ROLLFORWARD_DONE; } out: brelse(bh_sum); dispose_recovery_list(&dsync_blocks); return err; confused: err = -EINVAL; failed: nilfs_err(sb, "error %d roll-forwarding partial segment at blocknr = %llu", err, (unsigned long long)pseg_start); goto out; } static void nilfs_finish_roll_forward(struct the_nilfs *nilfs, struct nilfs_recovery_info *ri) { struct buffer_head *bh; int err; if (nilfs_get_segnum_of_block(nilfs, ri->ri_lsegs_start) != nilfs_get_segnum_of_block(nilfs, ri->ri_super_root)) return; bh = __getblk(nilfs->ns_bdev, ri->ri_lsegs_start, nilfs->ns_blocksize); BUG_ON(!bh); memset(bh->b_data, 0, bh->b_size); set_buffer_dirty(bh); err = sync_dirty_buffer(bh); if (unlikely(err)) nilfs_warn(nilfs->ns_sb, "buffer sync write failed during post-cleaning of recovery."); brelse(bh); } /** * nilfs_salvage_orphan_logs - salvage logs written after the latest checkpoint * @nilfs: nilfs object * @sb: super block instance * @ri: pointer to a nilfs_recovery_info struct to store search results. * * Return Value: On success, 0 is returned. On error, one of the following * negative error code is returned. * * %-EINVAL - Inconsistent filesystem state. * * %-EIO - I/O error * * %-ENOSPC - No space left on device (only in a panic state). * * %-ERESTARTSYS - Interrupted. * * %-ENOMEM - Insufficient memory available. */ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs, struct super_block *sb, struct nilfs_recovery_info *ri) { struct nilfs_root *root; int err; if (ri->ri_lsegs_start == 0 || ri->ri_lsegs_end == 0) return 0; err = nilfs_attach_checkpoint(sb, ri->ri_cno, true, &root); if (unlikely(err)) { nilfs_err(sb, "error %d loading the latest checkpoint", err); return err; } err = nilfs_do_roll_forward(nilfs, sb, root, ri); if (unlikely(err)) goto failed; if (ri->ri_need_recovery == NILFS_RECOVERY_ROLLFORWARD_DONE) { err = nilfs_prepare_segment_for_recovery(nilfs, sb, ri); if (unlikely(err)) { nilfs_err(sb, "error %d preparing segment for recovery", err); goto failed; } err = nilfs_attach_log_writer(sb, root); if (unlikely(err)) goto failed; set_nilfs_discontinued(nilfs); err = nilfs_construct_segment(sb); nilfs_detach_log_writer(sb); if (unlikely(err)) { nilfs_err(sb, "error %d writing segment for recovery", err); goto failed; } nilfs_finish_roll_forward(nilfs, ri); } failed: nilfs_put_root(root); return err; } /** * nilfs_search_super_root - search the latest valid super root * @nilfs: the_nilfs * @ri: pointer to a nilfs_recovery_info struct to store search results. * * nilfs_search_super_root() looks for the latest super-root from a partial * segment pointed by the superblock. It sets up struct the_nilfs through * this search. It fills nilfs_recovery_info (ri) required for recovery. * * Return Value: On success, 0 is returned. On error, one of the following * negative error code is returned. * * %-EINVAL - No valid segment found * * %-EIO - I/O error * * %-ENOMEM - Insufficient memory available. */ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_recovery_info *ri) { struct buffer_head *bh_sum = NULL; struct nilfs_segment_summary *sum = NULL; sector_t pseg_start, pseg_end, sr_pseg_start = 0; sector_t seg_start, seg_end; /* range of full segment (block number) */ sector_t b, end; unsigned long nblocks; unsigned int flags; u64 seg_seq; __u64 segnum, nextnum = 0; __u64 cno; LIST_HEAD(segments); int empty_seg = 0, scan_newer = 0; int ret; pseg_start = nilfs->ns_last_pseg; seg_seq = nilfs->ns_last_seq; cno = nilfs->ns_last_cno; segnum = nilfs_get_segnum_of_block(nilfs, pseg_start); /* Calculate range of segment */ nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end); /* Read ahead segment */ b = seg_start; while (b <= seg_end) __breadahead(nilfs->ns_bdev, b++, nilfs->ns_blocksize); for (;;) { brelse(bh_sum); ret = NILFS_SEG_FAIL_IO; bh_sum = nilfs_read_log_header(nilfs, pseg_start, &sum); if (!bh_sum) goto failed; ret = nilfs_validate_log(nilfs, seg_seq, bh_sum, sum); if (ret) { if (ret == NILFS_SEG_FAIL_IO) goto failed; goto strayed; } nblocks = le32_to_cpu(sum->ss_nblocks); pseg_end = pseg_start + nblocks - 1; if (unlikely(pseg_end > seg_end)) { ret = NILFS_SEG_FAIL_CONSISTENCY; goto strayed; } /* A valid partial segment */ ri->ri_pseg_start = pseg_start; ri->ri_seq = seg_seq; ri->ri_segnum = segnum; nextnum = nilfs_get_segnum_of_block(nilfs, le64_to_cpu(sum->ss_next)); ri->ri_nextnum = nextnum; empty_seg = 0; flags = le16_to_cpu(sum->ss_flags); if (!(flags & NILFS_SS_SR) && !scan_newer) { /* * This will never happen because a superblock * (last_segment) always points to a pseg with * a super root. */ ret = NILFS_SEG_FAIL_CONSISTENCY; goto failed; } if (pseg_start == seg_start) { nilfs_get_segment_range(nilfs, nextnum, &b, &end); while (b <= end) __breadahead(nilfs->ns_bdev, b++, nilfs->ns_blocksize); } if (!(flags & NILFS_SS_SR)) { if (!ri->ri_lsegs_start && (flags & NILFS_SS_LOGBGN)) { ri->ri_lsegs_start = pseg_start; ri->ri_lsegs_start_seq = seg_seq; } if (flags & NILFS_SS_LOGEND) ri->ri_lsegs_end = pseg_start; goto try_next_pseg; } /* A valid super root was found. */ ri->ri_cno = cno++; ri->ri_super_root = pseg_end; ri->ri_lsegs_start = ri->ri_lsegs_end = 0; nilfs_dispose_segment_list(&segments); sr_pseg_start = pseg_start; nilfs->ns_pseg_offset = pseg_start + nblocks - seg_start; nilfs->ns_seg_seq = seg_seq; nilfs->ns_segnum = segnum; nilfs->ns_cno = cno; /* nilfs->ns_cno = ri->ri_cno + 1 */ nilfs->ns_ctime = le64_to_cpu(sum->ss_create); nilfs->ns_nextnum = nextnum; if (scan_newer) ri->ri_need_recovery = NILFS_RECOVERY_SR_UPDATED; else { if (nilfs->ns_mount_state & NILFS_VALID_FS) goto super_root_found; scan_newer = 1; } try_next_pseg: /* Standing on a course, or met an inconsistent state */ pseg_start += nblocks; if (pseg_start < seg_end) continue; goto feed_segment; strayed: /* Off the trail */ if (!scan_newer) /* * This can happen if a checkpoint was written without * barriers, or as a result of an I/O failure. */ goto failed; feed_segment: /* Looking to the next full segment */ if (empty_seg++) goto super_root_found; /* found a valid super root */ ret = nilfs_segment_list_add(&segments, segnum); if (unlikely(ret)) goto failed; seg_seq++; segnum = nextnum; nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end); pseg_start = seg_start; } super_root_found: /* Updating pointers relating to the latest checkpoint */ brelse(bh_sum); list_splice_tail(&segments, &ri->ri_used_segments); nilfs->ns_last_pseg = sr_pseg_start; nilfs->ns_last_seq = nilfs->ns_seg_seq; nilfs->ns_last_cno = ri->ri_cno; return 0; failed: brelse(bh_sum); nilfs_dispose_segment_list(&segments); return ret < 0 ? ret : nilfs_warn_segment_error(nilfs->ns_sb, ret); }
linux-master
fs/nilfs2/recovery.c
// SPDX-License-Identifier: GPL-2.0+ /* * Sysfs support implementation. * * Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation. * Copyright (C) 2014 HGST, Inc., a Western Digital Company. * * Written by Vyacheslav Dubeyko <[email protected]> */ #include <linux/kobject.h> #include "nilfs.h" #include "mdt.h" #include "sufile.h" #include "cpfile.h" #include "sysfs.h" /* /sys/fs/<nilfs>/ */ static struct kset *nilfs_kset; #define NILFS_DEV_INT_GROUP_OPS(name, parent_name) \ static ssize_t nilfs_##name##_attr_show(struct kobject *kobj, \ struct attribute *attr, char *buf) \ { \ struct the_nilfs *nilfs = container_of(kobj->parent, \ struct the_nilfs, \ ns_##parent_name##_kobj); \ struct nilfs_##name##_attr *a = container_of(attr, \ struct nilfs_##name##_attr, \ attr); \ return a->show ? a->show(a, nilfs, buf) : 0; \ } \ static ssize_t nilfs_##name##_attr_store(struct kobject *kobj, \ struct attribute *attr, \ const char *buf, size_t len) \ { \ struct the_nilfs *nilfs = container_of(kobj->parent, \ struct the_nilfs, \ ns_##parent_name##_kobj); \ struct nilfs_##name##_attr *a = container_of(attr, \ struct nilfs_##name##_attr, \ attr); \ return a->store ? a->store(a, nilfs, buf, len) : 0; \ } \ static const struct sysfs_ops nilfs_##name##_attr_ops = { \ .show = nilfs_##name##_attr_show, \ .store = nilfs_##name##_attr_store, \ } #define NILFS_DEV_INT_GROUP_TYPE(name, parent_name) \ static void nilfs_##name##_attr_release(struct kobject *kobj) \ { \ struct nilfs_sysfs_##parent_name##_subgroups *subgroups = container_of(kobj, \ struct nilfs_sysfs_##parent_name##_subgroups, \ sg_##name##_kobj); \ complete(&subgroups->sg_##name##_kobj_unregister); \ } \ static struct kobj_type nilfs_##name##_ktype = { \ .default_groups = nilfs_##name##_groups, \ .sysfs_ops = &nilfs_##name##_attr_ops, \ .release = nilfs_##name##_attr_release, \ } #define NILFS_DEV_INT_GROUP_FNS(name, parent_name) \ static int nilfs_sysfs_create_##name##_group(struct the_nilfs *nilfs) \ { \ struct kobject *parent; \ struct kobject *kobj; \ struct completion *kobj_unregister; \ struct nilfs_sysfs_##parent_name##_subgroups *subgroups; \ int err; \ subgroups = nilfs->ns_##parent_name##_subgroups; \ kobj = &subgroups->sg_##name##_kobj; \ kobj_unregister = &subgroups->sg_##name##_kobj_unregister; \ parent = &nilfs->ns_##parent_name##_kobj; \ kobj->kset = nilfs_kset; \ init_completion(kobj_unregister); \ err = kobject_init_and_add(kobj, &nilfs_##name##_ktype, parent, \ #name); \ if (err) \ kobject_put(kobj); \ return err; \ } \ static void nilfs_sysfs_delete_##name##_group(struct the_nilfs *nilfs) \ { \ kobject_put(&nilfs->ns_##parent_name##_subgroups->sg_##name##_kobj); \ } /************************************************************************ * NILFS snapshot attrs * ************************************************************************/ static ssize_t nilfs_snapshot_inodes_count_show(struct nilfs_snapshot_attr *attr, struct nilfs_root *root, char *buf) { return sysfs_emit(buf, "%llu\n", (unsigned long long)atomic64_read(&root->inodes_count)); } static ssize_t nilfs_snapshot_blocks_count_show(struct nilfs_snapshot_attr *attr, struct nilfs_root *root, char *buf) { return sysfs_emit(buf, "%llu\n", (unsigned long long)atomic64_read(&root->blocks_count)); } static const char snapshot_readme_str[] = "The group contains details about mounted snapshot.\n\n" "(1) inodes_count\n\tshow number of inodes for snapshot.\n\n" "(2) blocks_count\n\tshow number of blocks for snapshot.\n\n"; static ssize_t nilfs_snapshot_README_show(struct nilfs_snapshot_attr *attr, struct nilfs_root *root, char *buf) { return sysfs_emit(buf, snapshot_readme_str); } NILFS_SNAPSHOT_RO_ATTR(inodes_count); NILFS_SNAPSHOT_RO_ATTR(blocks_count); NILFS_SNAPSHOT_RO_ATTR(README); static struct attribute *nilfs_snapshot_attrs[] = { NILFS_SNAPSHOT_ATTR_LIST(inodes_count), NILFS_SNAPSHOT_ATTR_LIST(blocks_count), NILFS_SNAPSHOT_ATTR_LIST(README), NULL, }; ATTRIBUTE_GROUPS(nilfs_snapshot); static ssize_t nilfs_snapshot_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct nilfs_root *root = container_of(kobj, struct nilfs_root, snapshot_kobj); struct nilfs_snapshot_attr *a = container_of(attr, struct nilfs_snapshot_attr, attr); return a->show ? a->show(a, root, buf) : 0; } static ssize_t nilfs_snapshot_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t len) { struct nilfs_root *root = container_of(kobj, struct nilfs_root, snapshot_kobj); struct nilfs_snapshot_attr *a = container_of(attr, struct nilfs_snapshot_attr, attr); return a->store ? a->store(a, root, buf, len) : 0; } static void nilfs_snapshot_attr_release(struct kobject *kobj) { struct nilfs_root *root = container_of(kobj, struct nilfs_root, snapshot_kobj); complete(&root->snapshot_kobj_unregister); } static const struct sysfs_ops nilfs_snapshot_attr_ops = { .show = nilfs_snapshot_attr_show, .store = nilfs_snapshot_attr_store, }; static struct kobj_type nilfs_snapshot_ktype = { .default_groups = nilfs_snapshot_groups, .sysfs_ops = &nilfs_snapshot_attr_ops, .release = nilfs_snapshot_attr_release, }; int nilfs_sysfs_create_snapshot_group(struct nilfs_root *root) { struct the_nilfs *nilfs; struct kobject *parent; int err; nilfs = root->nilfs; parent = &nilfs->ns_dev_subgroups->sg_mounted_snapshots_kobj; root->snapshot_kobj.kset = nilfs_kset; init_completion(&root->snapshot_kobj_unregister); if (root->cno == NILFS_CPTREE_CURRENT_CNO) { err = kobject_init_and_add(&root->snapshot_kobj, &nilfs_snapshot_ktype, &nilfs->ns_dev_kobj, "current_checkpoint"); } else { err = kobject_init_and_add(&root->snapshot_kobj, &nilfs_snapshot_ktype, parent, "%llu", root->cno); } if (err) kobject_put(&root->snapshot_kobj); return err; } void nilfs_sysfs_delete_snapshot_group(struct nilfs_root *root) { kobject_put(&root->snapshot_kobj); } /************************************************************************ * NILFS mounted snapshots attrs * ************************************************************************/ static const char mounted_snapshots_readme_str[] = "The mounted_snapshots group contains group for\n" "every mounted snapshot.\n"; static ssize_t nilfs_mounted_snapshots_README_show(struct nilfs_mounted_snapshots_attr *attr, struct the_nilfs *nilfs, char *buf) { return sysfs_emit(buf, mounted_snapshots_readme_str); } NILFS_MOUNTED_SNAPSHOTS_RO_ATTR(README); static struct attribute *nilfs_mounted_snapshots_attrs[] = { NILFS_MOUNTED_SNAPSHOTS_ATTR_LIST(README), NULL, }; ATTRIBUTE_GROUPS(nilfs_mounted_snapshots); NILFS_DEV_INT_GROUP_OPS(mounted_snapshots, dev); NILFS_DEV_INT_GROUP_TYPE(mounted_snapshots, dev); NILFS_DEV_INT_GROUP_FNS(mounted_snapshots, dev); /************************************************************************ * NILFS checkpoints attrs * ************************************************************************/ static ssize_t nilfs_checkpoints_checkpoints_number_show(struct nilfs_checkpoints_attr *attr, struct the_nilfs *nilfs, char *buf) { __u64 ncheckpoints; struct nilfs_cpstat cpstat; int err; down_read(&nilfs->ns_segctor_sem); err = nilfs_cpfile_get_stat(nilfs->ns_cpfile, &cpstat); up_read(&nilfs->ns_segctor_sem); if (err < 0) { nilfs_err(nilfs->ns_sb, "unable to get checkpoint stat: err=%d", err); return err; } ncheckpoints = cpstat.cs_ncps; return sysfs_emit(buf, "%llu\n", ncheckpoints); } static ssize_t nilfs_checkpoints_snapshots_number_show(struct nilfs_checkpoints_attr *attr, struct the_nilfs *nilfs, char *buf) { __u64 nsnapshots; struct nilfs_cpstat cpstat; int err; down_read(&nilfs->ns_segctor_sem); err = nilfs_cpfile_get_stat(nilfs->ns_cpfile, &cpstat); up_read(&nilfs->ns_segctor_sem); if (err < 0) { nilfs_err(nilfs->ns_sb, "unable to get checkpoint stat: err=%d", err); return err; } nsnapshots = cpstat.cs_nsss; return sysfs_emit(buf, "%llu\n", nsnapshots); } static ssize_t nilfs_checkpoints_last_seg_checkpoint_show(struct nilfs_checkpoints_attr *attr, struct the_nilfs *nilfs, char *buf) { __u64 last_cno; spin_lock(&nilfs->ns_last_segment_lock); last_cno = nilfs->ns_last_cno; spin_unlock(&nilfs->ns_last_segment_lock); return sysfs_emit(buf, "%llu\n", last_cno); } static ssize_t nilfs_checkpoints_next_checkpoint_show(struct nilfs_checkpoints_attr *attr, struct the_nilfs *nilfs, char *buf) { __u64 cno; down_read(&nilfs->ns_segctor_sem); cno = nilfs->ns_cno; up_read(&nilfs->ns_segctor_sem); return sysfs_emit(buf, "%llu\n", cno); } static const char checkpoints_readme_str[] = "The checkpoints group contains attributes that describe\n" "details about volume's checkpoints.\n\n" "(1) checkpoints_number\n\tshow number of checkpoints on volume.\n\n" "(2) snapshots_number\n\tshow number of snapshots on volume.\n\n" "(3) last_seg_checkpoint\n" "\tshow checkpoint number of the latest segment.\n\n" "(4) next_checkpoint\n\tshow next checkpoint number.\n\n"; static ssize_t nilfs_checkpoints_README_show(struct nilfs_checkpoints_attr *attr, struct the_nilfs *nilfs, char *buf) { return sysfs_emit(buf, checkpoints_readme_str); } NILFS_CHECKPOINTS_RO_ATTR(checkpoints_number); NILFS_CHECKPOINTS_RO_ATTR(snapshots_number); NILFS_CHECKPOINTS_RO_ATTR(last_seg_checkpoint); NILFS_CHECKPOINTS_RO_ATTR(next_checkpoint); NILFS_CHECKPOINTS_RO_ATTR(README); static struct attribute *nilfs_checkpoints_attrs[] = { NILFS_CHECKPOINTS_ATTR_LIST(checkpoints_number), NILFS_CHECKPOINTS_ATTR_LIST(snapshots_number), NILFS_CHECKPOINTS_ATTR_LIST(last_seg_checkpoint), NILFS_CHECKPOINTS_ATTR_LIST(next_checkpoint), NILFS_CHECKPOINTS_ATTR_LIST(README), NULL, }; ATTRIBUTE_GROUPS(nilfs_checkpoints); NILFS_DEV_INT_GROUP_OPS(checkpoints, dev); NILFS_DEV_INT_GROUP_TYPE(checkpoints, dev); NILFS_DEV_INT_GROUP_FNS(checkpoints, dev); /************************************************************************ * NILFS segments attrs * ************************************************************************/ static ssize_t nilfs_segments_segments_number_show(struct nilfs_segments_attr *attr, struct the_nilfs *nilfs, char *buf) { return sysfs_emit(buf, "%lu\n", nilfs->ns_nsegments); } static ssize_t nilfs_segments_blocks_per_segment_show(struct nilfs_segments_attr *attr, struct the_nilfs *nilfs, char *buf) { return sysfs_emit(buf, "%lu\n", nilfs->ns_blocks_per_segment); } static ssize_t nilfs_segments_clean_segments_show(struct nilfs_segments_attr *attr, struct the_nilfs *nilfs, char *buf) { unsigned long ncleansegs; down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile); up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); return sysfs_emit(buf, "%lu\n", ncleansegs); } static ssize_t nilfs_segments_dirty_segments_show(struct nilfs_segments_attr *attr, struct the_nilfs *nilfs, char *buf) { struct nilfs_sustat sustat; int err; down_read(&nilfs->ns_segctor_sem); err = nilfs_sufile_get_stat(nilfs->ns_sufile, &sustat); up_read(&nilfs->ns_segctor_sem); if (err < 0) { nilfs_err(nilfs->ns_sb, "unable to get segment stat: err=%d", err); return err; } return sysfs_emit(buf, "%llu\n", sustat.ss_ndirtysegs); } static const char segments_readme_str[] = "The segments group contains attributes that describe\n" "details about volume's segments.\n\n" "(1) segments_number\n\tshow number of segments on volume.\n\n" "(2) blocks_per_segment\n\tshow number of blocks in segment.\n\n" "(3) clean_segments\n\tshow count of clean segments.\n\n" "(4) dirty_segments\n\tshow count of dirty segments.\n\n"; static ssize_t nilfs_segments_README_show(struct nilfs_segments_attr *attr, struct the_nilfs *nilfs, char *buf) { return sysfs_emit(buf, segments_readme_str); } NILFS_SEGMENTS_RO_ATTR(segments_number); NILFS_SEGMENTS_RO_ATTR(blocks_per_segment); NILFS_SEGMENTS_RO_ATTR(clean_segments); NILFS_SEGMENTS_RO_ATTR(dirty_segments); NILFS_SEGMENTS_RO_ATTR(README); static struct attribute *nilfs_segments_attrs[] = { NILFS_SEGMENTS_ATTR_LIST(segments_number), NILFS_SEGMENTS_ATTR_LIST(blocks_per_segment), NILFS_SEGMENTS_ATTR_LIST(clean_segments), NILFS_SEGMENTS_ATTR_LIST(dirty_segments), NILFS_SEGMENTS_ATTR_LIST(README), NULL, }; ATTRIBUTE_GROUPS(nilfs_segments); NILFS_DEV_INT_GROUP_OPS(segments, dev); NILFS_DEV_INT_GROUP_TYPE(segments, dev); NILFS_DEV_INT_GROUP_FNS(segments, dev); /************************************************************************ * NILFS segctor attrs * ************************************************************************/ static ssize_t nilfs_segctor_last_pseg_block_show(struct nilfs_segctor_attr *attr, struct the_nilfs *nilfs, char *buf) { sector_t last_pseg; spin_lock(&nilfs->ns_last_segment_lock); last_pseg = nilfs->ns_last_pseg; spin_unlock(&nilfs->ns_last_segment_lock); return sysfs_emit(buf, "%llu\n", (unsigned long long)last_pseg); } static ssize_t nilfs_segctor_last_seg_sequence_show(struct nilfs_segctor_attr *attr, struct the_nilfs *nilfs, char *buf) { u64 last_seq; spin_lock(&nilfs->ns_last_segment_lock); last_seq = nilfs->ns_last_seq; spin_unlock(&nilfs->ns_last_segment_lock); return sysfs_emit(buf, "%llu\n", last_seq); } static ssize_t nilfs_segctor_last_seg_checkpoint_show(struct nilfs_segctor_attr *attr, struct the_nilfs *nilfs, char *buf) { __u64 last_cno; spin_lock(&nilfs->ns_last_segment_lock); last_cno = nilfs->ns_last_cno; spin_unlock(&nilfs->ns_last_segment_lock); return sysfs_emit(buf, "%llu\n", last_cno); } static ssize_t nilfs_segctor_current_seg_sequence_show(struct nilfs_segctor_attr *attr, struct the_nilfs *nilfs, char *buf) { u64 seg_seq; down_read(&nilfs->ns_segctor_sem); seg_seq = nilfs->ns_seg_seq; up_read(&nilfs->ns_segctor_sem); return sysfs_emit(buf, "%llu\n", seg_seq); } static ssize_t nilfs_segctor_current_last_full_seg_show(struct nilfs_segctor_attr *attr, struct the_nilfs *nilfs, char *buf) { __u64 segnum; down_read(&nilfs->ns_segctor_sem); segnum = nilfs->ns_segnum; up_read(&nilfs->ns_segctor_sem); return sysfs_emit(buf, "%llu\n", segnum); } static ssize_t nilfs_segctor_next_full_seg_show(struct nilfs_segctor_attr *attr, struct the_nilfs *nilfs, char *buf) { __u64 nextnum; down_read(&nilfs->ns_segctor_sem); nextnum = nilfs->ns_nextnum; up_read(&nilfs->ns_segctor_sem); return sysfs_emit(buf, "%llu\n", nextnum); } static ssize_t nilfs_segctor_next_pseg_offset_show(struct nilfs_segctor_attr *attr, struct the_nilfs *nilfs, char *buf) { unsigned long pseg_offset; down_read(&nilfs->ns_segctor_sem); pseg_offset = nilfs->ns_pseg_offset; up_read(&nilfs->ns_segctor_sem); return sysfs_emit(buf, "%lu\n", pseg_offset); } static ssize_t nilfs_segctor_next_checkpoint_show(struct nilfs_segctor_attr *attr, struct the_nilfs *nilfs, char *buf) { __u64 cno; down_read(&nilfs->ns_segctor_sem); cno = nilfs->ns_cno; up_read(&nilfs->ns_segctor_sem); return sysfs_emit(buf, "%llu\n", cno); } static ssize_t nilfs_segctor_last_seg_write_time_show(struct nilfs_segctor_attr *attr, struct the_nilfs *nilfs, char *buf) { time64_t ctime; down_read(&nilfs->ns_segctor_sem); ctime = nilfs->ns_ctime; up_read(&nilfs->ns_segctor_sem); return sysfs_emit(buf, "%ptTs\n", &ctime); } static ssize_t nilfs_segctor_last_seg_write_time_secs_show(struct nilfs_segctor_attr *attr, struct the_nilfs *nilfs, char *buf) { time64_t ctime; down_read(&nilfs->ns_segctor_sem); ctime = nilfs->ns_ctime; up_read(&nilfs->ns_segctor_sem); return sysfs_emit(buf, "%llu\n", ctime); } static ssize_t nilfs_segctor_last_nongc_write_time_show(struct nilfs_segctor_attr *attr, struct the_nilfs *nilfs, char *buf) { time64_t nongc_ctime; down_read(&nilfs->ns_segctor_sem); nongc_ctime = nilfs->ns_nongc_ctime; up_read(&nilfs->ns_segctor_sem); return sysfs_emit(buf, "%ptTs\n", &nongc_ctime); } static ssize_t nilfs_segctor_last_nongc_write_time_secs_show(struct nilfs_segctor_attr *attr, struct the_nilfs *nilfs, char *buf) { time64_t nongc_ctime; down_read(&nilfs->ns_segctor_sem); nongc_ctime = nilfs->ns_nongc_ctime; up_read(&nilfs->ns_segctor_sem); return sysfs_emit(buf, "%llu\n", nongc_ctime); } static ssize_t nilfs_segctor_dirty_data_blocks_count_show(struct nilfs_segctor_attr *attr, struct the_nilfs *nilfs, char *buf) { u32 ndirtyblks; down_read(&nilfs->ns_segctor_sem); ndirtyblks = atomic_read(&nilfs->ns_ndirtyblks); up_read(&nilfs->ns_segctor_sem); return sysfs_emit(buf, "%u\n", ndirtyblks); } static const char segctor_readme_str[] = "The segctor group contains attributes that describe\n" "segctor thread activity details.\n\n" "(1) last_pseg_block\n" "\tshow start block number of the latest segment.\n\n" "(2) last_seg_sequence\n" "\tshow sequence value of the latest segment.\n\n" "(3) last_seg_checkpoint\n" "\tshow checkpoint number of the latest segment.\n\n" "(4) current_seg_sequence\n\tshow segment sequence counter.\n\n" "(5) current_last_full_seg\n" "\tshow index number of the latest full segment.\n\n" "(6) next_full_seg\n" "\tshow index number of the full segment index to be used next.\n\n" "(7) next_pseg_offset\n" "\tshow offset of next partial segment in the current full segment.\n\n" "(8) next_checkpoint\n\tshow next checkpoint number.\n\n" "(9) last_seg_write_time\n" "\tshow write time of the last segment in human-readable format.\n\n" "(10) last_seg_write_time_secs\n" "\tshow write time of the last segment in seconds.\n\n" "(11) last_nongc_write_time\n" "\tshow write time of the last segment not for cleaner operation " "in human-readable format.\n\n" "(12) last_nongc_write_time_secs\n" "\tshow write time of the last segment not for cleaner operation " "in seconds.\n\n" "(13) dirty_data_blocks_count\n" "\tshow number of dirty data blocks.\n\n"; static ssize_t nilfs_segctor_README_show(struct nilfs_segctor_attr *attr, struct the_nilfs *nilfs, char *buf) { return sysfs_emit(buf, segctor_readme_str); } NILFS_SEGCTOR_RO_ATTR(last_pseg_block); NILFS_SEGCTOR_RO_ATTR(last_seg_sequence); NILFS_SEGCTOR_RO_ATTR(last_seg_checkpoint); NILFS_SEGCTOR_RO_ATTR(current_seg_sequence); NILFS_SEGCTOR_RO_ATTR(current_last_full_seg); NILFS_SEGCTOR_RO_ATTR(next_full_seg); NILFS_SEGCTOR_RO_ATTR(next_pseg_offset); NILFS_SEGCTOR_RO_ATTR(next_checkpoint); NILFS_SEGCTOR_RO_ATTR(last_seg_write_time); NILFS_SEGCTOR_RO_ATTR(last_seg_write_time_secs); NILFS_SEGCTOR_RO_ATTR(last_nongc_write_time); NILFS_SEGCTOR_RO_ATTR(last_nongc_write_time_secs); NILFS_SEGCTOR_RO_ATTR(dirty_data_blocks_count); NILFS_SEGCTOR_RO_ATTR(README); static struct attribute *nilfs_segctor_attrs[] = { NILFS_SEGCTOR_ATTR_LIST(last_pseg_block), NILFS_SEGCTOR_ATTR_LIST(last_seg_sequence), NILFS_SEGCTOR_ATTR_LIST(last_seg_checkpoint), NILFS_SEGCTOR_ATTR_LIST(current_seg_sequence), NILFS_SEGCTOR_ATTR_LIST(current_last_full_seg), NILFS_SEGCTOR_ATTR_LIST(next_full_seg), NILFS_SEGCTOR_ATTR_LIST(next_pseg_offset), NILFS_SEGCTOR_ATTR_LIST(next_checkpoint), NILFS_SEGCTOR_ATTR_LIST(last_seg_write_time), NILFS_SEGCTOR_ATTR_LIST(last_seg_write_time_secs), NILFS_SEGCTOR_ATTR_LIST(last_nongc_write_time), NILFS_SEGCTOR_ATTR_LIST(last_nongc_write_time_secs), NILFS_SEGCTOR_ATTR_LIST(dirty_data_blocks_count), NILFS_SEGCTOR_ATTR_LIST(README), NULL, }; ATTRIBUTE_GROUPS(nilfs_segctor); NILFS_DEV_INT_GROUP_OPS(segctor, dev); NILFS_DEV_INT_GROUP_TYPE(segctor, dev); NILFS_DEV_INT_GROUP_FNS(segctor, dev); /************************************************************************ * NILFS superblock attrs * ************************************************************************/ static ssize_t nilfs_superblock_sb_write_time_show(struct nilfs_superblock_attr *attr, struct the_nilfs *nilfs, char *buf) { time64_t sbwtime; down_read(&nilfs->ns_sem); sbwtime = nilfs->ns_sbwtime; up_read(&nilfs->ns_sem); return sysfs_emit(buf, "%ptTs\n", &sbwtime); } static ssize_t nilfs_superblock_sb_write_time_secs_show(struct nilfs_superblock_attr *attr, struct the_nilfs *nilfs, char *buf) { time64_t sbwtime; down_read(&nilfs->ns_sem); sbwtime = nilfs->ns_sbwtime; up_read(&nilfs->ns_sem); return sysfs_emit(buf, "%llu\n", sbwtime); } static ssize_t nilfs_superblock_sb_write_count_show(struct nilfs_superblock_attr *attr, struct the_nilfs *nilfs, char *buf) { unsigned int sbwcount; down_read(&nilfs->ns_sem); sbwcount = nilfs->ns_sbwcount; up_read(&nilfs->ns_sem); return sysfs_emit(buf, "%u\n", sbwcount); } static ssize_t nilfs_superblock_sb_update_frequency_show(struct nilfs_superblock_attr *attr, struct the_nilfs *nilfs, char *buf) { unsigned int sb_update_freq; down_read(&nilfs->ns_sem); sb_update_freq = nilfs->ns_sb_update_freq; up_read(&nilfs->ns_sem); return sysfs_emit(buf, "%u\n", sb_update_freq); } static ssize_t nilfs_superblock_sb_update_frequency_store(struct nilfs_superblock_attr *attr, struct the_nilfs *nilfs, const char *buf, size_t count) { unsigned int val; int err; err = kstrtouint(skip_spaces(buf), 0, &val); if (err) { nilfs_err(nilfs->ns_sb, "unable to convert string: err=%d", err); return err; } if (val < NILFS_SB_FREQ) { val = NILFS_SB_FREQ; nilfs_warn(nilfs->ns_sb, "superblock update frequency cannot be lesser than 10 seconds"); } down_write(&nilfs->ns_sem); nilfs->ns_sb_update_freq = val; up_write(&nilfs->ns_sem); return count; } static const char sb_readme_str[] = "The superblock group contains attributes that describe\n" "superblock's details.\n\n" "(1) sb_write_time\n\tshow previous write time of super block " "in human-readable format.\n\n" "(2) sb_write_time_secs\n\tshow previous write time of super block " "in seconds.\n\n" "(3) sb_write_count\n\tshow write count of super block.\n\n" "(4) sb_update_frequency\n" "\tshow/set interval of periodical update of superblock (in seconds).\n\n" "\tYou can set preferable frequency of superblock update by command:\n\n" "\t'echo <val> > /sys/fs/<nilfs>/<dev>/superblock/sb_update_frequency'\n"; static ssize_t nilfs_superblock_README_show(struct nilfs_superblock_attr *attr, struct the_nilfs *nilfs, char *buf) { return sysfs_emit(buf, sb_readme_str); } NILFS_SUPERBLOCK_RO_ATTR(sb_write_time); NILFS_SUPERBLOCK_RO_ATTR(sb_write_time_secs); NILFS_SUPERBLOCK_RO_ATTR(sb_write_count); NILFS_SUPERBLOCK_RW_ATTR(sb_update_frequency); NILFS_SUPERBLOCK_RO_ATTR(README); static struct attribute *nilfs_superblock_attrs[] = { NILFS_SUPERBLOCK_ATTR_LIST(sb_write_time), NILFS_SUPERBLOCK_ATTR_LIST(sb_write_time_secs), NILFS_SUPERBLOCK_ATTR_LIST(sb_write_count), NILFS_SUPERBLOCK_ATTR_LIST(sb_update_frequency), NILFS_SUPERBLOCK_ATTR_LIST(README), NULL, }; ATTRIBUTE_GROUPS(nilfs_superblock); NILFS_DEV_INT_GROUP_OPS(superblock, dev); NILFS_DEV_INT_GROUP_TYPE(superblock, dev); NILFS_DEV_INT_GROUP_FNS(superblock, dev); /************************************************************************ * NILFS device attrs * ************************************************************************/ static ssize_t nilfs_dev_revision_show(struct nilfs_dev_attr *attr, struct the_nilfs *nilfs, char *buf) { struct nilfs_super_block **sbp = nilfs->ns_sbp; u32 major = le32_to_cpu(sbp[0]->s_rev_level); u16 minor = le16_to_cpu(sbp[0]->s_minor_rev_level); return sysfs_emit(buf, "%d.%d\n", major, minor); } static ssize_t nilfs_dev_blocksize_show(struct nilfs_dev_attr *attr, struct the_nilfs *nilfs, char *buf) { return sysfs_emit(buf, "%u\n", nilfs->ns_blocksize); } static ssize_t nilfs_dev_device_size_show(struct nilfs_dev_attr *attr, struct the_nilfs *nilfs, char *buf) { struct nilfs_super_block **sbp = nilfs->ns_sbp; u64 dev_size = le64_to_cpu(sbp[0]->s_dev_size); return sysfs_emit(buf, "%llu\n", dev_size); } static ssize_t nilfs_dev_free_blocks_show(struct nilfs_dev_attr *attr, struct the_nilfs *nilfs, char *buf) { sector_t free_blocks = 0; nilfs_count_free_blocks(nilfs, &free_blocks); return sysfs_emit(buf, "%llu\n", (unsigned long long)free_blocks); } static ssize_t nilfs_dev_uuid_show(struct nilfs_dev_attr *attr, struct the_nilfs *nilfs, char *buf) { struct nilfs_super_block **sbp = nilfs->ns_sbp; return sysfs_emit(buf, "%pUb\n", sbp[0]->s_uuid); } static ssize_t nilfs_dev_volume_name_show(struct nilfs_dev_attr *attr, struct the_nilfs *nilfs, char *buf) { struct nilfs_super_block **sbp = nilfs->ns_sbp; return scnprintf(buf, sizeof(sbp[0]->s_volume_name), "%s\n", sbp[0]->s_volume_name); } static const char dev_readme_str[] = "The <device> group contains attributes that describe file system\n" "partition's details.\n\n" "(1) revision\n\tshow NILFS file system revision.\n\n" "(2) blocksize\n\tshow volume block size in bytes.\n\n" "(3) device_size\n\tshow volume size in bytes.\n\n" "(4) free_blocks\n\tshow count of free blocks on volume.\n\n" "(5) uuid\n\tshow volume's UUID.\n\n" "(6) volume_name\n\tshow volume's name.\n\n"; static ssize_t nilfs_dev_README_show(struct nilfs_dev_attr *attr, struct the_nilfs *nilfs, char *buf) { return sysfs_emit(buf, dev_readme_str); } NILFS_DEV_RO_ATTR(revision); NILFS_DEV_RO_ATTR(blocksize); NILFS_DEV_RO_ATTR(device_size); NILFS_DEV_RO_ATTR(free_blocks); NILFS_DEV_RO_ATTR(uuid); NILFS_DEV_RO_ATTR(volume_name); NILFS_DEV_RO_ATTR(README); static struct attribute *nilfs_dev_attrs[] = { NILFS_DEV_ATTR_LIST(revision), NILFS_DEV_ATTR_LIST(blocksize), NILFS_DEV_ATTR_LIST(device_size), NILFS_DEV_ATTR_LIST(free_blocks), NILFS_DEV_ATTR_LIST(uuid), NILFS_DEV_ATTR_LIST(volume_name), NILFS_DEV_ATTR_LIST(README), NULL, }; ATTRIBUTE_GROUPS(nilfs_dev); static ssize_t nilfs_dev_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct the_nilfs *nilfs = container_of(kobj, struct the_nilfs, ns_dev_kobj); struct nilfs_dev_attr *a = container_of(attr, struct nilfs_dev_attr, attr); return a->show ? a->show(a, nilfs, buf) : 0; } static ssize_t nilfs_dev_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t len) { struct the_nilfs *nilfs = container_of(kobj, struct the_nilfs, ns_dev_kobj); struct nilfs_dev_attr *a = container_of(attr, struct nilfs_dev_attr, attr); return a->store ? a->store(a, nilfs, buf, len) : 0; } static void nilfs_dev_attr_release(struct kobject *kobj) { struct the_nilfs *nilfs = container_of(kobj, struct the_nilfs, ns_dev_kobj); complete(&nilfs->ns_dev_kobj_unregister); } static const struct sysfs_ops nilfs_dev_attr_ops = { .show = nilfs_dev_attr_show, .store = nilfs_dev_attr_store, }; static struct kobj_type nilfs_dev_ktype = { .default_groups = nilfs_dev_groups, .sysfs_ops = &nilfs_dev_attr_ops, .release = nilfs_dev_attr_release, }; int nilfs_sysfs_create_device_group(struct super_block *sb) { struct the_nilfs *nilfs = sb->s_fs_info; size_t devgrp_size = sizeof(struct nilfs_sysfs_dev_subgroups); int err; nilfs->ns_dev_subgroups = kzalloc(devgrp_size, GFP_KERNEL); if (unlikely(!nilfs->ns_dev_subgroups)) { err = -ENOMEM; nilfs_err(sb, "unable to allocate memory for device group"); goto failed_create_device_group; } nilfs->ns_dev_kobj.kset = nilfs_kset; init_completion(&nilfs->ns_dev_kobj_unregister); err = kobject_init_and_add(&nilfs->ns_dev_kobj, &nilfs_dev_ktype, NULL, "%s", sb->s_id); if (err) goto cleanup_dev_kobject; err = nilfs_sysfs_create_mounted_snapshots_group(nilfs); if (err) goto cleanup_dev_kobject; err = nilfs_sysfs_create_checkpoints_group(nilfs); if (err) goto delete_mounted_snapshots_group; err = nilfs_sysfs_create_segments_group(nilfs); if (err) goto delete_checkpoints_group; err = nilfs_sysfs_create_superblock_group(nilfs); if (err) goto delete_segments_group; err = nilfs_sysfs_create_segctor_group(nilfs); if (err) goto delete_superblock_group; return 0; delete_superblock_group: nilfs_sysfs_delete_superblock_group(nilfs); delete_segments_group: nilfs_sysfs_delete_segments_group(nilfs); delete_checkpoints_group: nilfs_sysfs_delete_checkpoints_group(nilfs); delete_mounted_snapshots_group: nilfs_sysfs_delete_mounted_snapshots_group(nilfs); cleanup_dev_kobject: kobject_put(&nilfs->ns_dev_kobj); kfree(nilfs->ns_dev_subgroups); failed_create_device_group: return err; } void nilfs_sysfs_delete_device_group(struct the_nilfs *nilfs) { nilfs_sysfs_delete_mounted_snapshots_group(nilfs); nilfs_sysfs_delete_checkpoints_group(nilfs); nilfs_sysfs_delete_segments_group(nilfs); nilfs_sysfs_delete_superblock_group(nilfs); nilfs_sysfs_delete_segctor_group(nilfs); kobject_del(&nilfs->ns_dev_kobj); kobject_put(&nilfs->ns_dev_kobj); kfree(nilfs->ns_dev_subgroups); } /************************************************************************ * NILFS feature attrs * ************************************************************************/ static ssize_t nilfs_feature_revision_show(struct kobject *kobj, struct attribute *attr, char *buf) { return sysfs_emit(buf, "%d.%d\n", NILFS_CURRENT_REV, NILFS_MINOR_REV); } static const char features_readme_str[] = "The features group contains attributes that describe NILFS file\n" "system driver features.\n\n" "(1) revision\n\tshow current revision of NILFS file system driver.\n"; static ssize_t nilfs_feature_README_show(struct kobject *kobj, struct attribute *attr, char *buf) { return sysfs_emit(buf, features_readme_str); } NILFS_FEATURE_RO_ATTR(revision); NILFS_FEATURE_RO_ATTR(README); static struct attribute *nilfs_feature_attrs[] = { NILFS_FEATURE_ATTR_LIST(revision), NILFS_FEATURE_ATTR_LIST(README), NULL, }; static const struct attribute_group nilfs_feature_attr_group = { .name = "features", .attrs = nilfs_feature_attrs, }; int __init nilfs_sysfs_init(void) { int err; nilfs_kset = kset_create_and_add(NILFS_ROOT_GROUP_NAME, NULL, fs_kobj); if (!nilfs_kset) { err = -ENOMEM; nilfs_err(NULL, "unable to create sysfs entry: err=%d", err); goto failed_sysfs_init; } err = sysfs_create_group(&nilfs_kset->kobj, &nilfs_feature_attr_group); if (unlikely(err)) { nilfs_err(NULL, "unable to create feature group: err=%d", err); goto cleanup_sysfs_init; } return 0; cleanup_sysfs_init: kset_unregister(nilfs_kset); failed_sysfs_init: return err; } void nilfs_sysfs_exit(void) { sysfs_remove_group(&nilfs_kset->kobj, &nilfs_feature_attr_group); kset_unregister(nilfs_kset); }
linux-master
fs/nilfs2/sysfs.c
// SPDX-License-Identifier: GPL-2.0+ /* * Dummy inodes to buffer blocks for garbage collection * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * Written by Seiji Kihara, Amagai Yoshiji, and Ryusuke Konishi. * Revised by Ryusuke Konishi. * */ /* * This file adds the cache of on-disk blocks to be moved in garbage * collection. The disk blocks are held with dummy inodes (called * gcinodes), and this file provides lookup function of the dummy * inodes and their buffer read function. * * Buffers and pages held by the dummy inodes will be released each * time after they are copied to a new log. Dirty blocks made on the * current generation and the blocks to be moved by GC never overlap * because the dirty blocks make a new generation; they rather must be * written individually. */ #include <linux/buffer_head.h> #include <linux/mpage.h> #include <linux/hash.h> #include <linux/slab.h> #include <linux/swap.h> #include "nilfs.h" #include "btree.h" #include "btnode.h" #include "page.h" #include "mdt.h" #include "dat.h" #include "ifile.h" /* * nilfs_gccache_submit_read_data() - add data buffer and submit read request * @inode - gc inode * @blkoff - dummy offset treated as the key for the page cache * @pbn - physical block number of the block * @vbn - virtual block number of the block, 0 for non-virtual block * @out_bh - indirect pointer to a buffer_head struct to receive the results * * Description: nilfs_gccache_submit_read_data() registers the data buffer * specified by @pbn to the GC pagecache with the key @blkoff. * This function sets @vbn (@pbn if @vbn is zero) in b_blocknr of the buffer. * * Return Value: On success, 0 is returned. On Error, one of the following * negative error code is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - The block specified with @pbn does not exist. */ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff, sector_t pbn, __u64 vbn, struct buffer_head **out_bh) { struct buffer_head *bh; int err; bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0); if (unlikely(!bh)) return -ENOMEM; if (buffer_uptodate(bh)) goto out; if (pbn == 0) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; err = nilfs_dat_translate(nilfs->ns_dat, vbn, &pbn); if (unlikely(err)) { /* -EIO, -ENOMEM, -ENOENT */ brelse(bh); goto failed; } } lock_buffer(bh); if (buffer_uptodate(bh)) { unlock_buffer(bh); goto out; } if (!buffer_mapped(bh)) { bh->b_bdev = inode->i_sb->s_bdev; set_buffer_mapped(bh); } bh->b_blocknr = pbn; bh->b_end_io = end_buffer_read_sync; get_bh(bh); submit_bh(REQ_OP_READ, bh); if (vbn) bh->b_blocknr = vbn; out: err = 0; *out_bh = bh; failed: unlock_page(bh->b_page); put_page(bh->b_page); return err; } /* * nilfs_gccache_submit_read_node() - add node buffer and submit read request * @inode - gc inode * @pbn - physical block number for the block * @vbn - virtual block number for the block * @out_bh - indirect pointer to a buffer_head struct to receive the results * * Description: nilfs_gccache_submit_read_node() registers the node buffer * specified by @vbn to the GC pagecache. @pbn can be supplied by the * caller to avoid translation of the disk block address. * * Return Value: On success, 0 is returned. On Error, one of the following * negative error code is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. */ int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn, __u64 vbn, struct buffer_head **out_bh) { struct inode *btnc_inode = NILFS_I(inode)->i_assoc_inode; int ret; ret = nilfs_btnode_submit_block(btnc_inode->i_mapping, vbn ? : pbn, pbn, REQ_OP_READ, out_bh, &pbn); if (ret == -EEXIST) /* internal code (cache hit) */ ret = 0; return ret; } int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *bh) { wait_on_buffer(bh); if (!buffer_uptodate(bh)) { struct inode *inode = bh->b_folio->mapping->host; nilfs_err(inode->i_sb, "I/O error reading %s block for GC (ino=%lu, vblocknr=%llu)", buffer_nilfs_node(bh) ? "node" : "data", inode->i_ino, (unsigned long long)bh->b_blocknr); return -EIO; } if (buffer_dirty(bh)) return -EEXIST; if (buffer_nilfs_node(bh) && nilfs_btree_broken_node_block(bh)) { clear_buffer_uptodate(bh); return -EIO; } mark_buffer_dirty(bh); return 0; } int nilfs_init_gcinode(struct inode *inode) { struct nilfs_inode_info *ii = NILFS_I(inode); inode->i_mode = S_IFREG; mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); inode->i_mapping->a_ops = &empty_aops; ii->i_flags = 0; nilfs_bmap_init_gc(ii->i_bmap); return nilfs_attach_btree_node_cache(inode); } /** * nilfs_remove_all_gcinodes() - remove all unprocessed gc inodes */ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs) { struct list_head *head = &nilfs->ns_gc_inodes; struct nilfs_inode_info *ii; while (!list_empty(head)) { ii = list_first_entry(head, struct nilfs_inode_info, i_dirty); list_del_init(&ii->i_dirty); truncate_inode_pages(&ii->vfs_inode.i_data, 0); nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping); iput(&ii->vfs_inode); } }
linux-master
fs/nilfs2/gcinode.c
// SPDX-License-Identifier: GPL-2.0+ /* * NILFS segment buffer * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * Written by Ryusuke Konishi. * */ #include <linux/buffer_head.h> #include <linux/writeback.h> #include <linux/crc32.h> #include <linux/backing-dev.h> #include <linux/slab.h> #include "page.h" #include "segbuf.h" struct nilfs_write_info { struct the_nilfs *nilfs; struct bio *bio; int start, end; /* The region to be submitted */ int rest_blocks; int max_pages; int nr_vecs; sector_t blocknr; }; static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf, struct the_nilfs *nilfs); static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf); struct nilfs_segment_buffer *nilfs_segbuf_new(struct super_block *sb) { struct nilfs_segment_buffer *segbuf; segbuf = kmem_cache_alloc(nilfs_segbuf_cachep, GFP_NOFS); if (unlikely(!segbuf)) return NULL; segbuf->sb_super = sb; INIT_LIST_HEAD(&segbuf->sb_list); INIT_LIST_HEAD(&segbuf->sb_segsum_buffers); INIT_LIST_HEAD(&segbuf->sb_payload_buffers); segbuf->sb_super_root = NULL; init_completion(&segbuf->sb_bio_event); atomic_set(&segbuf->sb_err, 0); segbuf->sb_nbio = 0; return segbuf; } void nilfs_segbuf_free(struct nilfs_segment_buffer *segbuf) { kmem_cache_free(nilfs_segbuf_cachep, segbuf); } void nilfs_segbuf_map(struct nilfs_segment_buffer *segbuf, __u64 segnum, unsigned long offset, struct the_nilfs *nilfs) { segbuf->sb_segnum = segnum; nilfs_get_segment_range(nilfs, segnum, &segbuf->sb_fseg_start, &segbuf->sb_fseg_end); segbuf->sb_pseg_start = segbuf->sb_fseg_start + offset; segbuf->sb_rest_blocks = segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1; } /** * nilfs_segbuf_map_cont - map a new log behind a given log * @segbuf: new segment buffer * @prev: segment buffer containing a log to be continued */ void nilfs_segbuf_map_cont(struct nilfs_segment_buffer *segbuf, struct nilfs_segment_buffer *prev) { segbuf->sb_segnum = prev->sb_segnum; segbuf->sb_fseg_start = prev->sb_fseg_start; segbuf->sb_fseg_end = prev->sb_fseg_end; segbuf->sb_pseg_start = prev->sb_pseg_start + prev->sb_sum.nblocks; segbuf->sb_rest_blocks = segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1; } void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *segbuf, __u64 nextnum, struct the_nilfs *nilfs) { segbuf->sb_nextnum = nextnum; segbuf->sb_sum.next = nilfs_get_segment_start_blocknr(nilfs, nextnum); } int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer *segbuf) { struct buffer_head *bh; bh = sb_getblk(segbuf->sb_super, segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk); if (unlikely(!bh)) return -ENOMEM; lock_buffer(bh); if (!buffer_uptodate(bh)) { memset(bh->b_data, 0, bh->b_size); set_buffer_uptodate(bh); } unlock_buffer(bh); nilfs_segbuf_add_segsum_buffer(segbuf, bh); return 0; } int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *segbuf, struct buffer_head **bhp) { struct buffer_head *bh; bh = sb_getblk(segbuf->sb_super, segbuf->sb_pseg_start + segbuf->sb_sum.nblocks); if (unlikely(!bh)) return -ENOMEM; nilfs_segbuf_add_payload_buffer(segbuf, bh); *bhp = bh; return 0; } int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned int flags, time64_t ctime, __u64 cno) { int err; segbuf->sb_sum.nblocks = segbuf->sb_sum.nsumblk = 0; err = nilfs_segbuf_extend_segsum(segbuf); if (unlikely(err)) return err; segbuf->sb_sum.flags = flags; segbuf->sb_sum.sumbytes = sizeof(struct nilfs_segment_summary); segbuf->sb_sum.nfinfo = segbuf->sb_sum.nfileblk = 0; segbuf->sb_sum.ctime = ctime; segbuf->sb_sum.cno = cno; return 0; } /* * Setup segment summary */ void nilfs_segbuf_fill_in_segsum(struct nilfs_segment_buffer *segbuf) { struct nilfs_segment_summary *raw_sum; struct buffer_head *bh_sum; bh_sum = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head, b_assoc_buffers); raw_sum = (struct nilfs_segment_summary *)bh_sum->b_data; raw_sum->ss_magic = cpu_to_le32(NILFS_SEGSUM_MAGIC); raw_sum->ss_bytes = cpu_to_le16(sizeof(*raw_sum)); raw_sum->ss_flags = cpu_to_le16(segbuf->sb_sum.flags); raw_sum->ss_seq = cpu_to_le64(segbuf->sb_sum.seg_seq); raw_sum->ss_create = cpu_to_le64(segbuf->sb_sum.ctime); raw_sum->ss_next = cpu_to_le64(segbuf->sb_sum.next); raw_sum->ss_nblocks = cpu_to_le32(segbuf->sb_sum.nblocks); raw_sum->ss_nfinfo = cpu_to_le32(segbuf->sb_sum.nfinfo); raw_sum->ss_sumbytes = cpu_to_le32(segbuf->sb_sum.sumbytes); raw_sum->ss_pad = 0; raw_sum->ss_cno = cpu_to_le64(segbuf->sb_sum.cno); } /* * CRC calculation routines */ static void nilfs_segbuf_fill_in_segsum_crc(struct nilfs_segment_buffer *segbuf, u32 seed) { struct buffer_head *bh; struct nilfs_segment_summary *raw_sum; unsigned long size, bytes = segbuf->sb_sum.sumbytes; u32 crc; bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head, b_assoc_buffers); raw_sum = (struct nilfs_segment_summary *)bh->b_data; size = min_t(unsigned long, bytes, bh->b_size); crc = crc32_le(seed, (unsigned char *)raw_sum + sizeof(raw_sum->ss_datasum) + sizeof(raw_sum->ss_sumsum), size - (sizeof(raw_sum->ss_datasum) + sizeof(raw_sum->ss_sumsum))); list_for_each_entry_continue(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) { bytes -= size; size = min_t(unsigned long, bytes, bh->b_size); crc = crc32_le(crc, bh->b_data, size); } raw_sum->ss_sumsum = cpu_to_le32(crc); } static void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf, u32 seed) { struct buffer_head *bh; struct nilfs_segment_summary *raw_sum; void *kaddr; u32 crc; bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head, b_assoc_buffers); raw_sum = (struct nilfs_segment_summary *)bh->b_data; crc = crc32_le(seed, (unsigned char *)raw_sum + sizeof(raw_sum->ss_datasum), bh->b_size - sizeof(raw_sum->ss_datasum)); list_for_each_entry_continue(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) { crc = crc32_le(crc, bh->b_data, bh->b_size); } list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { kaddr = kmap_atomic(bh->b_page); crc = crc32_le(crc, kaddr + bh_offset(bh), bh->b_size); kunmap_atomic(kaddr); } raw_sum->ss_datasum = cpu_to_le32(crc); } static void nilfs_segbuf_fill_in_super_root_crc(struct nilfs_segment_buffer *segbuf, u32 seed) { struct nilfs_super_root *raw_sr; struct the_nilfs *nilfs = segbuf->sb_super->s_fs_info; unsigned int srsize; u32 crc; raw_sr = (struct nilfs_super_root *)segbuf->sb_super_root->b_data; srsize = NILFS_SR_BYTES(nilfs->ns_inode_size); crc = crc32_le(seed, (unsigned char *)raw_sr + sizeof(raw_sr->sr_sum), srsize - sizeof(raw_sr->sr_sum)); raw_sr->sr_sum = cpu_to_le32(crc); } static void nilfs_release_buffers(struct list_head *list) { struct buffer_head *bh, *n; list_for_each_entry_safe(bh, n, list, b_assoc_buffers) { list_del_init(&bh->b_assoc_buffers); brelse(bh); } } static void nilfs_segbuf_clear(struct nilfs_segment_buffer *segbuf) { nilfs_release_buffers(&segbuf->sb_segsum_buffers); nilfs_release_buffers(&segbuf->sb_payload_buffers); segbuf->sb_super_root = NULL; } /* * Iterators for segment buffers */ void nilfs_clear_logs(struct list_head *logs) { struct nilfs_segment_buffer *segbuf; list_for_each_entry(segbuf, logs, sb_list) nilfs_segbuf_clear(segbuf); } void nilfs_truncate_logs(struct list_head *logs, struct nilfs_segment_buffer *last) { struct nilfs_segment_buffer *n, *segbuf; segbuf = list_prepare_entry(last, logs, sb_list); list_for_each_entry_safe_continue(segbuf, n, logs, sb_list) { list_del_init(&segbuf->sb_list); nilfs_segbuf_clear(segbuf); nilfs_segbuf_free(segbuf); } } int nilfs_write_logs(struct list_head *logs, struct the_nilfs *nilfs) { struct nilfs_segment_buffer *segbuf; int ret = 0; list_for_each_entry(segbuf, logs, sb_list) { ret = nilfs_segbuf_write(segbuf, nilfs); if (ret) break; } return ret; } int nilfs_wait_on_logs(struct list_head *logs) { struct nilfs_segment_buffer *segbuf; int err, ret = 0; list_for_each_entry(segbuf, logs, sb_list) { err = nilfs_segbuf_wait(segbuf); if (err && !ret) ret = err; } return ret; } /** * nilfs_add_checksums_on_logs - add checksums on the logs * @logs: list of segment buffers storing target logs * @seed: checksum seed value */ void nilfs_add_checksums_on_logs(struct list_head *logs, u32 seed) { struct nilfs_segment_buffer *segbuf; list_for_each_entry(segbuf, logs, sb_list) { if (segbuf->sb_super_root) nilfs_segbuf_fill_in_super_root_crc(segbuf, seed); nilfs_segbuf_fill_in_segsum_crc(segbuf, seed); nilfs_segbuf_fill_in_data_crc(segbuf, seed); } } /* * BIO operations */ static void nilfs_end_bio_write(struct bio *bio) { struct nilfs_segment_buffer *segbuf = bio->bi_private; if (bio->bi_status) atomic_inc(&segbuf->sb_err); bio_put(bio); complete(&segbuf->sb_bio_event); } static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf, struct nilfs_write_info *wi) { struct bio *bio = wi->bio; bio->bi_end_io = nilfs_end_bio_write; bio->bi_private = segbuf; submit_bio(bio); segbuf->sb_nbio++; wi->bio = NULL; wi->rest_blocks -= wi->end - wi->start; wi->nr_vecs = min(wi->max_pages, wi->rest_blocks); wi->start = wi->end; return 0; } static void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf, struct nilfs_write_info *wi) { wi->bio = NULL; wi->rest_blocks = segbuf->sb_sum.nblocks; wi->max_pages = BIO_MAX_VECS; wi->nr_vecs = min(wi->max_pages, wi->rest_blocks); wi->start = wi->end = 0; wi->blocknr = segbuf->sb_pseg_start; } static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf, struct nilfs_write_info *wi, struct buffer_head *bh) { int len, err; BUG_ON(wi->nr_vecs <= 0); repeat: if (!wi->bio) { wi->bio = bio_alloc(wi->nilfs->ns_bdev, wi->nr_vecs, REQ_OP_WRITE, GFP_NOIO); wi->bio->bi_iter.bi_sector = (wi->blocknr + wi->end) << (wi->nilfs->ns_blocksize_bits - 9); } len = bio_add_page(wi->bio, bh->b_page, bh->b_size, bh_offset(bh)); if (len == bh->b_size) { wi->end++; return 0; } /* bio is FULL */ err = nilfs_segbuf_submit_bio(segbuf, wi); /* never submit current bh */ if (likely(!err)) goto repeat; return err; } /** * nilfs_segbuf_write - submit write requests of a log * @segbuf: buffer storing a log to be written * @nilfs: nilfs object * * Return Value: On Success, 0 is returned. On Error, one of the following * negative error code is returned. * * %-EIO - I/O error * * %-ENOMEM - Insufficient memory available. */ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf, struct the_nilfs *nilfs) { struct nilfs_write_info wi; struct buffer_head *bh; int res = 0; wi.nilfs = nilfs; nilfs_segbuf_prepare_write(segbuf, &wi); list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) { res = nilfs_segbuf_submit_bh(segbuf, &wi, bh); if (unlikely(res)) goto failed_bio; } list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { res = nilfs_segbuf_submit_bh(segbuf, &wi, bh); if (unlikely(res)) goto failed_bio; } if (wi.bio) { /* * Last BIO is always sent through the following * submission. */ wi.bio->bi_opf |= REQ_SYNC; res = nilfs_segbuf_submit_bio(segbuf, &wi); } failed_bio: return res; } /** * nilfs_segbuf_wait - wait for completion of requested BIOs * @segbuf: segment buffer * * Return Value: On Success, 0 is returned. On Error, one of the following * negative error code is returned. * * %-EIO - I/O error */ static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf) { int err = 0; if (!segbuf->sb_nbio) return 0; do { wait_for_completion(&segbuf->sb_bio_event); } while (--segbuf->sb_nbio > 0); if (unlikely(atomic_read(&segbuf->sb_err) > 0)) { nilfs_err(segbuf->sb_super, "I/O error writing log (start-blocknr=%llu, block-count=%lu) in segment %llu", (unsigned long long)segbuf->sb_pseg_start, segbuf->sb_sum.nblocks, (unsigned long long)segbuf->sb_segnum); err = -EIO; } return err; }
linux-master
fs/nilfs2/segbuf.c
// SPDX-License-Identifier: GPL-2.0+ /* * NILFS direct block pointer. * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * * Written by Koji Sato. */ #include <linux/errno.h> #include "nilfs.h" #include "page.h" #include "direct.h" #include "alloc.h" #include "dat.h" static inline __le64 *nilfs_direct_dptrs(const struct nilfs_bmap *direct) { return (__le64 *) ((struct nilfs_direct_node *)direct->b_u.u_data + 1); } static inline __u64 nilfs_direct_get_ptr(const struct nilfs_bmap *direct, __u64 key) { return le64_to_cpu(*(nilfs_direct_dptrs(direct) + key)); } static inline void nilfs_direct_set_ptr(struct nilfs_bmap *direct, __u64 key, __u64 ptr) { *(nilfs_direct_dptrs(direct) + key) = cpu_to_le64(ptr); } static int nilfs_direct_lookup(const struct nilfs_bmap *direct, __u64 key, int level, __u64 *ptrp) { __u64 ptr; if (key > NILFS_DIRECT_KEY_MAX || level != 1) return -ENOENT; ptr = nilfs_direct_get_ptr(direct, key); if (ptr == NILFS_BMAP_INVALID_PTR) return -ENOENT; *ptrp = ptr; return 0; } static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct, __u64 key, __u64 *ptrp, unsigned int maxblocks) { struct inode *dat = NULL; __u64 ptr, ptr2; sector_t blocknr; int ret, cnt; if (key > NILFS_DIRECT_KEY_MAX) return -ENOENT; ptr = nilfs_direct_get_ptr(direct, key); if (ptr == NILFS_BMAP_INVALID_PTR) return -ENOENT; if (NILFS_BMAP_USE_VBN(direct)) { dat = nilfs_bmap_get_dat(direct); ret = nilfs_dat_translate(dat, ptr, &blocknr); if (ret < 0) return ret; ptr = blocknr; } maxblocks = min_t(unsigned int, maxblocks, NILFS_DIRECT_KEY_MAX - key + 1); for (cnt = 1; cnt < maxblocks && (ptr2 = nilfs_direct_get_ptr(direct, key + cnt)) != NILFS_BMAP_INVALID_PTR; cnt++) { if (dat) { ret = nilfs_dat_translate(dat, ptr2, &blocknr); if (ret < 0) return ret; ptr2 = blocknr; } if (ptr2 != ptr + cnt) break; } *ptrp = ptr; return cnt; } static __u64 nilfs_direct_find_target_v(const struct nilfs_bmap *direct, __u64 key) { __u64 ptr; ptr = nilfs_bmap_find_target_seq(direct, key); if (ptr != NILFS_BMAP_INVALID_PTR) /* sequential access */ return ptr; /* block group */ return nilfs_bmap_find_target_in_group(direct); } static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) { union nilfs_bmap_ptr_req req; struct inode *dat = NULL; struct buffer_head *bh; int ret; if (key > NILFS_DIRECT_KEY_MAX) return -ENOENT; if (nilfs_direct_get_ptr(bmap, key) != NILFS_BMAP_INVALID_PTR) return -EEXIST; if (NILFS_BMAP_USE_VBN(bmap)) { req.bpr_ptr = nilfs_direct_find_target_v(bmap, key); dat = nilfs_bmap_get_dat(bmap); } ret = nilfs_bmap_prepare_alloc_ptr(bmap, &req, dat); if (!ret) { /* ptr must be a pointer to a buffer head. */ bh = (struct buffer_head *)((unsigned long)ptr); set_buffer_nilfs_volatile(bh); nilfs_bmap_commit_alloc_ptr(bmap, &req, dat); nilfs_direct_set_ptr(bmap, key, req.bpr_ptr); if (!nilfs_bmap_dirty(bmap)) nilfs_bmap_set_dirty(bmap); if (NILFS_BMAP_USE_VBN(bmap)) nilfs_bmap_set_target_v(bmap, key, req.bpr_ptr); nilfs_inode_add_blocks(bmap->b_inode, 1); } return ret; } static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key) { union nilfs_bmap_ptr_req req; struct inode *dat; int ret; if (key > NILFS_DIRECT_KEY_MAX || nilfs_direct_get_ptr(bmap, key) == NILFS_BMAP_INVALID_PTR) return -ENOENT; dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL; req.bpr_ptr = nilfs_direct_get_ptr(bmap, key); ret = nilfs_bmap_prepare_end_ptr(bmap, &req, dat); if (!ret) { nilfs_bmap_commit_end_ptr(bmap, &req, dat); nilfs_direct_set_ptr(bmap, key, NILFS_BMAP_INVALID_PTR); nilfs_inode_sub_blocks(bmap->b_inode, 1); } return ret; } static int nilfs_direct_seek_key(const struct nilfs_bmap *direct, __u64 start, __u64 *keyp) { __u64 key; for (key = start; key <= NILFS_DIRECT_KEY_MAX; key++) { if (nilfs_direct_get_ptr(direct, key) != NILFS_BMAP_INVALID_PTR) { *keyp = key; return 0; } } return -ENOENT; } static int nilfs_direct_last_key(const struct nilfs_bmap *direct, __u64 *keyp) { __u64 key, lastkey; lastkey = NILFS_DIRECT_KEY_MAX + 1; for (key = NILFS_DIRECT_KEY_MIN; key <= NILFS_DIRECT_KEY_MAX; key++) if (nilfs_direct_get_ptr(direct, key) != NILFS_BMAP_INVALID_PTR) lastkey = key; if (lastkey == NILFS_DIRECT_KEY_MAX + 1) return -ENOENT; *keyp = lastkey; return 0; } static int nilfs_direct_check_insert(const struct nilfs_bmap *bmap, __u64 key) { return key > NILFS_DIRECT_KEY_MAX; } static int nilfs_direct_gather_data(struct nilfs_bmap *direct, __u64 *keys, __u64 *ptrs, int nitems) { __u64 key; __u64 ptr; int n; if (nitems > NILFS_DIRECT_NBLOCKS) nitems = NILFS_DIRECT_NBLOCKS; n = 0; for (key = 0; key < nitems; key++) { ptr = nilfs_direct_get_ptr(direct, key); if (ptr != NILFS_BMAP_INVALID_PTR) { keys[n] = key; ptrs[n] = ptr; n++; } } return n; } int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap, __u64 key, __u64 *keys, __u64 *ptrs, int n) { __le64 *dptrs; int ret, i, j; /* no need to allocate any resource for conversion */ /* delete */ ret = bmap->b_ops->bop_delete(bmap, key); if (ret < 0) return ret; /* free resources */ if (bmap->b_ops->bop_clear != NULL) bmap->b_ops->bop_clear(bmap); /* convert */ dptrs = nilfs_direct_dptrs(bmap); for (i = 0, j = 0; i < NILFS_DIRECT_NBLOCKS; i++) { if ((j < n) && (i == keys[j])) { dptrs[i] = (i != key) ? cpu_to_le64(ptrs[j]) : NILFS_BMAP_INVALID_PTR; j++; } else dptrs[i] = NILFS_BMAP_INVALID_PTR; } nilfs_direct_init(bmap); return 0; } static int nilfs_direct_propagate(struct nilfs_bmap *bmap, struct buffer_head *bh) { struct nilfs_palloc_req oldreq, newreq; struct inode *dat; __u64 key; __u64 ptr; int ret; if (!NILFS_BMAP_USE_VBN(bmap)) return 0; dat = nilfs_bmap_get_dat(bmap); key = nilfs_bmap_data_get_key(bmap, bh); ptr = nilfs_direct_get_ptr(bmap, key); if (!buffer_nilfs_volatile(bh)) { oldreq.pr_entry_nr = ptr; newreq.pr_entry_nr = ptr; ret = nilfs_dat_prepare_update(dat, &oldreq, &newreq); if (ret < 0) return ret; nilfs_dat_commit_update(dat, &oldreq, &newreq, bmap->b_ptr_type == NILFS_BMAP_PTR_VS); set_buffer_nilfs_volatile(bh); nilfs_direct_set_ptr(bmap, key, newreq.pr_entry_nr); } else ret = nilfs_dat_mark_dirty(dat, ptr); return ret; } static int nilfs_direct_assign_v(struct nilfs_bmap *direct, __u64 key, __u64 ptr, struct buffer_head **bh, sector_t blocknr, union nilfs_binfo *binfo) { struct inode *dat = nilfs_bmap_get_dat(direct); union nilfs_bmap_ptr_req req; int ret; req.bpr_ptr = ptr; ret = nilfs_dat_prepare_start(dat, &req.bpr_req); if (!ret) { nilfs_dat_commit_start(dat, &req.bpr_req, blocknr); binfo->bi_v.bi_vblocknr = cpu_to_le64(ptr); binfo->bi_v.bi_blkoff = cpu_to_le64(key); } return ret; } static int nilfs_direct_assign_p(struct nilfs_bmap *direct, __u64 key, __u64 ptr, struct buffer_head **bh, sector_t blocknr, union nilfs_binfo *binfo) { nilfs_direct_set_ptr(direct, key, blocknr); binfo->bi_dat.bi_blkoff = cpu_to_le64(key); binfo->bi_dat.bi_level = 0; memset(binfo->bi_dat.bi_pad, 0, sizeof(binfo->bi_dat.bi_pad)); return 0; } static int nilfs_direct_assign(struct nilfs_bmap *bmap, struct buffer_head **bh, sector_t blocknr, union nilfs_binfo *binfo) { __u64 key; __u64 ptr; key = nilfs_bmap_data_get_key(bmap, *bh); if (unlikely(key > NILFS_DIRECT_KEY_MAX)) { nilfs_crit(bmap->b_inode->i_sb, "%s (ino=%lu): invalid key: %llu", __func__, bmap->b_inode->i_ino, (unsigned long long)key); return -EINVAL; } ptr = nilfs_direct_get_ptr(bmap, key); if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) { nilfs_crit(bmap->b_inode->i_sb, "%s (ino=%lu): invalid pointer: %llu", __func__, bmap->b_inode->i_ino, (unsigned long long)ptr); return -EINVAL; } return NILFS_BMAP_USE_VBN(bmap) ? nilfs_direct_assign_v(bmap, key, ptr, bh, blocknr, binfo) : nilfs_direct_assign_p(bmap, key, ptr, bh, blocknr, binfo); } static const struct nilfs_bmap_operations nilfs_direct_ops = { .bop_lookup = nilfs_direct_lookup, .bop_lookup_contig = nilfs_direct_lookup_contig, .bop_insert = nilfs_direct_insert, .bop_delete = nilfs_direct_delete, .bop_clear = NULL, .bop_propagate = nilfs_direct_propagate, .bop_lookup_dirty_buffers = NULL, .bop_assign = nilfs_direct_assign, .bop_mark = NULL, .bop_seek_key = nilfs_direct_seek_key, .bop_last_key = nilfs_direct_last_key, .bop_check_insert = nilfs_direct_check_insert, .bop_check_delete = NULL, .bop_gather_data = nilfs_direct_gather_data, }; int nilfs_direct_init(struct nilfs_bmap *bmap) { bmap->b_ops = &nilfs_direct_ops; return 0; }
linux-master
fs/nilfs2/direct.c
// SPDX-License-Identifier: GPL-2.0+ /* * NILFS checkpoint file. * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * * Written by Koji Sato. */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/buffer_head.h> #include <linux/errno.h> #include "mdt.h" #include "cpfile.h" static inline unsigned long nilfs_cpfile_checkpoints_per_block(const struct inode *cpfile) { return NILFS_MDT(cpfile)->mi_entries_per_block; } /* block number from the beginning of the file */ static unsigned long nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno) { __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1; do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile)); return (unsigned long)tcno; } /* offset in block */ static unsigned long nilfs_cpfile_get_offset(const struct inode *cpfile, __u64 cno) { __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1; return do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile)); } static __u64 nilfs_cpfile_first_checkpoint_in_block(const struct inode *cpfile, unsigned long blkoff) { return (__u64)nilfs_cpfile_checkpoints_per_block(cpfile) * blkoff + 1 - NILFS_MDT(cpfile)->mi_first_entry_offset; } static unsigned long nilfs_cpfile_checkpoints_in_block(const struct inode *cpfile, __u64 curr, __u64 max) { return min_t(__u64, nilfs_cpfile_checkpoints_per_block(cpfile) - nilfs_cpfile_get_offset(cpfile, curr), max - curr); } static inline int nilfs_cpfile_is_in_first(const struct inode *cpfile, __u64 cno) { return nilfs_cpfile_get_blkoff(cpfile, cno) == 0; } static unsigned int nilfs_cpfile_block_add_valid_checkpoints(const struct inode *cpfile, struct buffer_head *bh, void *kaddr, unsigned int n) { struct nilfs_checkpoint *cp = kaddr + bh_offset(bh); unsigned int count; count = le32_to_cpu(cp->cp_checkpoints_count) + n; cp->cp_checkpoints_count = cpu_to_le32(count); return count; } static unsigned int nilfs_cpfile_block_sub_valid_checkpoints(const struct inode *cpfile, struct buffer_head *bh, void *kaddr, unsigned int n) { struct nilfs_checkpoint *cp = kaddr + bh_offset(bh); unsigned int count; WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n); count = le32_to_cpu(cp->cp_checkpoints_count) - n; cp->cp_checkpoints_count = cpu_to_le32(count); return count; } static inline struct nilfs_cpfile_header * nilfs_cpfile_block_get_header(const struct inode *cpfile, struct buffer_head *bh, void *kaddr) { return kaddr + bh_offset(bh); } static struct nilfs_checkpoint * nilfs_cpfile_block_get_checkpoint(const struct inode *cpfile, __u64 cno, struct buffer_head *bh, void *kaddr) { return kaddr + bh_offset(bh) + nilfs_cpfile_get_offset(cpfile, cno) * NILFS_MDT(cpfile)->mi_entry_size; } static void nilfs_cpfile_block_init(struct inode *cpfile, struct buffer_head *bh, void *kaddr) { struct nilfs_checkpoint *cp = kaddr + bh_offset(bh); size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size; int n = nilfs_cpfile_checkpoints_per_block(cpfile); while (n-- > 0) { nilfs_checkpoint_set_invalid(cp); cp = (void *)cp + cpsz; } } static inline int nilfs_cpfile_get_header_block(struct inode *cpfile, struct buffer_head **bhp) { return nilfs_mdt_get_block(cpfile, 0, 0, NULL, bhp); } static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile, __u64 cno, int create, struct buffer_head **bhp) { return nilfs_mdt_get_block(cpfile, nilfs_cpfile_get_blkoff(cpfile, cno), create, nilfs_cpfile_block_init, bhp); } /** * nilfs_cpfile_find_checkpoint_block - find and get a buffer on cpfile * @cpfile: inode of cpfile * @start_cno: start checkpoint number (inclusive) * @end_cno: end checkpoint number (inclusive) * @cnop: place to store the next checkpoint number * @bhp: place to store a pointer to buffer_head struct * * Return Value: On success, it returns 0. On error, the following negative * error code is returned. * * %-ENOMEM - Insufficient memory available. * * %-EIO - I/O error * * %-ENOENT - no block exists in the range. */ static int nilfs_cpfile_find_checkpoint_block(struct inode *cpfile, __u64 start_cno, __u64 end_cno, __u64 *cnop, struct buffer_head **bhp) { unsigned long start, end, blkoff; int ret; if (unlikely(start_cno > end_cno)) return -ENOENT; start = nilfs_cpfile_get_blkoff(cpfile, start_cno); end = nilfs_cpfile_get_blkoff(cpfile, end_cno); ret = nilfs_mdt_find_block(cpfile, start, end, &blkoff, bhp); if (!ret) *cnop = (blkoff == start) ? start_cno : nilfs_cpfile_first_checkpoint_in_block(cpfile, blkoff); return ret; } static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile, __u64 cno) { return nilfs_mdt_delete_block(cpfile, nilfs_cpfile_get_blkoff(cpfile, cno)); } /** * nilfs_cpfile_get_checkpoint - get a checkpoint * @cpfile: inode of checkpoint file * @cno: checkpoint number * @create: create flag * @cpp: pointer to a checkpoint * @bhp: pointer to a buffer head * * Description: nilfs_cpfile_get_checkpoint() acquires the checkpoint * specified by @cno. A new checkpoint will be created if @cno is the current * checkpoint number and @create is nonzero. * * Return Value: On success, 0 is returned, and the checkpoint and the * buffer head of the buffer on which the checkpoint is located are stored in * the place pointed by @cpp and @bhp, respectively. On error, one of the * following negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - No such checkpoint. * * %-EINVAL - invalid checkpoint. */ int nilfs_cpfile_get_checkpoint(struct inode *cpfile, __u64 cno, int create, struct nilfs_checkpoint **cpp, struct buffer_head **bhp) { struct buffer_head *header_bh, *cp_bh; struct nilfs_cpfile_header *header; struct nilfs_checkpoint *cp; void *kaddr; int ret; if (unlikely(cno < 1 || cno > nilfs_mdt_cno(cpfile) || (cno < nilfs_mdt_cno(cpfile) && create))) return -EINVAL; down_write(&NILFS_MDT(cpfile)->mi_sem); ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); if (ret < 0) goto out_sem; ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, create, &cp_bh); if (ret < 0) goto out_header; kaddr = kmap(cp_bh->b_page); cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); if (nilfs_checkpoint_invalid(cp)) { if (!create) { kunmap(cp_bh->b_page); brelse(cp_bh); ret = -ENOENT; goto out_header; } /* a newly-created checkpoint */ nilfs_checkpoint_clear_invalid(cp); if (!nilfs_cpfile_is_in_first(cpfile, cno)) nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh, kaddr, 1); mark_buffer_dirty(cp_bh); kaddr = kmap_atomic(header_bh->b_page); header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); le64_add_cpu(&header->ch_ncheckpoints, 1); kunmap_atomic(kaddr); mark_buffer_dirty(header_bh); nilfs_mdt_mark_dirty(cpfile); } if (cpp != NULL) *cpp = cp; *bhp = cp_bh; out_header: brelse(header_bh); out_sem: up_write(&NILFS_MDT(cpfile)->mi_sem); return ret; } /** * nilfs_cpfile_put_checkpoint - put a checkpoint * @cpfile: inode of checkpoint file * @cno: checkpoint number * @bh: buffer head * * Description: nilfs_cpfile_put_checkpoint() releases the checkpoint * specified by @cno. @bh must be the buffer head which has been returned by * a previous call to nilfs_cpfile_get_checkpoint() with @cno. */ void nilfs_cpfile_put_checkpoint(struct inode *cpfile, __u64 cno, struct buffer_head *bh) { kunmap(bh->b_page); brelse(bh); } /** * nilfs_cpfile_delete_checkpoints - delete checkpoints * @cpfile: inode of checkpoint file * @start: start checkpoint number * @end: end checkpoint number * * Description: nilfs_cpfile_delete_checkpoints() deletes the checkpoints in * the period from @start to @end, excluding @end itself. The checkpoints * which have been already deleted are ignored. * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-EINVAL - invalid checkpoints. */ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, __u64 start, __u64 end) { struct buffer_head *header_bh, *cp_bh; struct nilfs_cpfile_header *header; struct nilfs_checkpoint *cp; size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size; __u64 cno; void *kaddr; unsigned long tnicps; int ret, ncps, nicps, nss, count, i; if (unlikely(start == 0 || start > end)) { nilfs_err(cpfile->i_sb, "cannot delete checkpoints: invalid range [%llu, %llu)", (unsigned long long)start, (unsigned long long)end); return -EINVAL; } down_write(&NILFS_MDT(cpfile)->mi_sem); ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); if (ret < 0) goto out_sem; tnicps = 0; nss = 0; for (cno = start; cno < end; cno += ncps) { ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, end); ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); if (ret < 0) { if (ret != -ENOENT) break; /* skip hole */ ret = 0; continue; } kaddr = kmap_atomic(cp_bh->b_page); cp = nilfs_cpfile_block_get_checkpoint( cpfile, cno, cp_bh, kaddr); nicps = 0; for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) { if (nilfs_checkpoint_snapshot(cp)) { nss++; } else if (!nilfs_checkpoint_invalid(cp)) { nilfs_checkpoint_set_invalid(cp); nicps++; } } if (nicps > 0) { tnicps += nicps; mark_buffer_dirty(cp_bh); nilfs_mdt_mark_dirty(cpfile); if (!nilfs_cpfile_is_in_first(cpfile, cno)) { count = nilfs_cpfile_block_sub_valid_checkpoints( cpfile, cp_bh, kaddr, nicps); if (count == 0) { /* make hole */ kunmap_atomic(kaddr); brelse(cp_bh); ret = nilfs_cpfile_delete_checkpoint_block( cpfile, cno); if (ret == 0) continue; nilfs_err(cpfile->i_sb, "error %d deleting checkpoint block", ret); break; } } } kunmap_atomic(kaddr); brelse(cp_bh); } if (tnicps > 0) { kaddr = kmap_atomic(header_bh->b_page); header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps); mark_buffer_dirty(header_bh); nilfs_mdt_mark_dirty(cpfile); kunmap_atomic(kaddr); } brelse(header_bh); if (nss > 0) ret = -EBUSY; out_sem: up_write(&NILFS_MDT(cpfile)->mi_sem); return ret; } static void nilfs_cpfile_checkpoint_to_cpinfo(struct inode *cpfile, struct nilfs_checkpoint *cp, struct nilfs_cpinfo *ci) { ci->ci_flags = le32_to_cpu(cp->cp_flags); ci->ci_cno = le64_to_cpu(cp->cp_cno); ci->ci_create = le64_to_cpu(cp->cp_create); ci->ci_nblk_inc = le64_to_cpu(cp->cp_nblk_inc); ci->ci_inodes_count = le64_to_cpu(cp->cp_inodes_count); ci->ci_blocks_count = le64_to_cpu(cp->cp_blocks_count); ci->ci_next = le64_to_cpu(cp->cp_snapshot_list.ssl_next); } static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop, void *buf, unsigned int cisz, size_t nci) { struct nilfs_checkpoint *cp; struct nilfs_cpinfo *ci = buf; struct buffer_head *bh; size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size; __u64 cur_cno = nilfs_mdt_cno(cpfile), cno = *cnop; void *kaddr; int n, ret; int ncps, i; if (cno == 0) return -ENOENT; /* checkpoint number 0 is invalid */ down_read(&NILFS_MDT(cpfile)->mi_sem); for (n = 0; n < nci; cno += ncps) { ret = nilfs_cpfile_find_checkpoint_block( cpfile, cno, cur_cno - 1, &cno, &bh); if (ret < 0) { if (likely(ret == -ENOENT)) break; goto out; } ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno); kaddr = kmap_atomic(bh->b_page); cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) { if (!nilfs_checkpoint_invalid(cp)) { nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci); ci = (void *)ci + cisz; n++; } } kunmap_atomic(kaddr); brelse(bh); } ret = n; if (n > 0) { ci = (void *)ci - cisz; *cnop = ci->ci_cno + 1; } out: up_read(&NILFS_MDT(cpfile)->mi_sem); return ret; } static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop, void *buf, unsigned int cisz, size_t nci) { struct buffer_head *bh; struct nilfs_cpfile_header *header; struct nilfs_checkpoint *cp; struct nilfs_cpinfo *ci = buf; __u64 curr = *cnop, next; unsigned long curr_blkoff, next_blkoff; void *kaddr; int n = 0, ret; down_read(&NILFS_MDT(cpfile)->mi_sem); if (curr == 0) { ret = nilfs_cpfile_get_header_block(cpfile, &bh); if (ret < 0) goto out; kaddr = kmap_atomic(bh->b_page); header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); curr = le64_to_cpu(header->ch_snapshot_list.ssl_next); kunmap_atomic(kaddr); brelse(bh); if (curr == 0) { ret = 0; goto out; } } else if (unlikely(curr == ~(__u64)0)) { ret = 0; goto out; } curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr); ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh); if (unlikely(ret < 0)) { if (ret == -ENOENT) ret = 0; /* No snapshots (started from a hole block) */ goto out; } kaddr = kmap_atomic(bh->b_page); while (n < nci) { cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr); curr = ~(__u64)0; /* Terminator */ if (unlikely(nilfs_checkpoint_invalid(cp) || !nilfs_checkpoint_snapshot(cp))) break; nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci); ci = (void *)ci + cisz; n++; next = le64_to_cpu(cp->cp_snapshot_list.ssl_next); if (next == 0) break; /* reach end of the snapshot list */ next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next); if (curr_blkoff != next_blkoff) { kunmap_atomic(kaddr); brelse(bh); ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 0, &bh); if (unlikely(ret < 0)) { WARN_ON(ret == -ENOENT); goto out; } kaddr = kmap_atomic(bh->b_page); } curr = next; curr_blkoff = next_blkoff; } kunmap_atomic(kaddr); brelse(bh); *cnop = curr; ret = n; out: up_read(&NILFS_MDT(cpfile)->mi_sem); return ret; } /** * nilfs_cpfile_get_cpinfo - * @cpfile: * @cno: * @ci: * @nci: */ ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode, void *buf, unsigned int cisz, size_t nci) { switch (mode) { case NILFS_CHECKPOINT: return nilfs_cpfile_do_get_cpinfo(cpfile, cnop, buf, cisz, nci); case NILFS_SNAPSHOT: return nilfs_cpfile_do_get_ssinfo(cpfile, cnop, buf, cisz, nci); default: return -EINVAL; } } /** * nilfs_cpfile_delete_checkpoint - * @cpfile: * @cno: */ int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno) { struct nilfs_cpinfo ci; __u64 tcno = cno; ssize_t nci; nci = nilfs_cpfile_do_get_cpinfo(cpfile, &tcno, &ci, sizeof(ci), 1); if (nci < 0) return nci; else if (nci == 0 || ci.ci_cno != cno) return -ENOENT; else if (nilfs_cpinfo_snapshot(&ci)) return -EBUSY; return nilfs_cpfile_delete_checkpoints(cpfile, cno, cno + 1); } static struct nilfs_snapshot_list * nilfs_cpfile_block_get_snapshot_list(const struct inode *cpfile, __u64 cno, struct buffer_head *bh, void *kaddr) { struct nilfs_cpfile_header *header; struct nilfs_checkpoint *cp; struct nilfs_snapshot_list *list; if (cno != 0) { cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); list = &cp->cp_snapshot_list; } else { header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); list = &header->ch_snapshot_list; } return list; } static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno) { struct buffer_head *header_bh, *curr_bh, *prev_bh, *cp_bh; struct nilfs_cpfile_header *header; struct nilfs_checkpoint *cp; struct nilfs_snapshot_list *list; __u64 curr, prev; unsigned long curr_blkoff, prev_blkoff; void *kaddr; int ret; if (cno == 0) return -ENOENT; /* checkpoint number 0 is invalid */ down_write(&NILFS_MDT(cpfile)->mi_sem); ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); if (ret < 0) goto out_sem; kaddr = kmap_atomic(cp_bh->b_page); cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); if (nilfs_checkpoint_invalid(cp)) { ret = -ENOENT; kunmap_atomic(kaddr); goto out_cp; } if (nilfs_checkpoint_snapshot(cp)) { ret = 0; kunmap_atomic(kaddr); goto out_cp; } kunmap_atomic(kaddr); ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); if (ret < 0) goto out_cp; kaddr = kmap_atomic(header_bh->b_page); header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); list = &header->ch_snapshot_list; curr_bh = header_bh; get_bh(curr_bh); curr = 0; curr_blkoff = 0; prev = le64_to_cpu(list->ssl_prev); while (prev > cno) { prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev); curr = prev; if (curr_blkoff != prev_blkoff) { kunmap_atomic(kaddr); brelse(curr_bh); ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &curr_bh); if (ret < 0) goto out_header; kaddr = kmap_atomic(curr_bh->b_page); } curr_blkoff = prev_blkoff; cp = nilfs_cpfile_block_get_checkpoint( cpfile, curr, curr_bh, kaddr); list = &cp->cp_snapshot_list; prev = le64_to_cpu(list->ssl_prev); } kunmap_atomic(kaddr); if (prev != 0) { ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0, &prev_bh); if (ret < 0) goto out_curr; } else { prev_bh = header_bh; get_bh(prev_bh); } kaddr = kmap_atomic(curr_bh->b_page); list = nilfs_cpfile_block_get_snapshot_list( cpfile, curr, curr_bh, kaddr); list->ssl_prev = cpu_to_le64(cno); kunmap_atomic(kaddr); kaddr = kmap_atomic(cp_bh->b_page); cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr); cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev); nilfs_checkpoint_set_snapshot(cp); kunmap_atomic(kaddr); kaddr = kmap_atomic(prev_bh->b_page); list = nilfs_cpfile_block_get_snapshot_list( cpfile, prev, prev_bh, kaddr); list->ssl_next = cpu_to_le64(cno); kunmap_atomic(kaddr); kaddr = kmap_atomic(header_bh->b_page); header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); le64_add_cpu(&header->ch_nsnapshots, 1); kunmap_atomic(kaddr); mark_buffer_dirty(prev_bh); mark_buffer_dirty(curr_bh); mark_buffer_dirty(cp_bh); mark_buffer_dirty(header_bh); nilfs_mdt_mark_dirty(cpfile); brelse(prev_bh); out_curr: brelse(curr_bh); out_header: brelse(header_bh); out_cp: brelse(cp_bh); out_sem: up_write(&NILFS_MDT(cpfile)->mi_sem); return ret; } static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno) { struct buffer_head *header_bh, *next_bh, *prev_bh, *cp_bh; struct nilfs_cpfile_header *header; struct nilfs_checkpoint *cp; struct nilfs_snapshot_list *list; __u64 next, prev; void *kaddr; int ret; if (cno == 0) return -ENOENT; /* checkpoint number 0 is invalid */ down_write(&NILFS_MDT(cpfile)->mi_sem); ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); if (ret < 0) goto out_sem; kaddr = kmap_atomic(cp_bh->b_page); cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); if (nilfs_checkpoint_invalid(cp)) { ret = -ENOENT; kunmap_atomic(kaddr); goto out_cp; } if (!nilfs_checkpoint_snapshot(cp)) { ret = 0; kunmap_atomic(kaddr); goto out_cp; } list = &cp->cp_snapshot_list; next = le64_to_cpu(list->ssl_next); prev = le64_to_cpu(list->ssl_prev); kunmap_atomic(kaddr); ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); if (ret < 0) goto out_cp; if (next != 0) { ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 0, &next_bh); if (ret < 0) goto out_header; } else { next_bh = header_bh; get_bh(next_bh); } if (prev != 0) { ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0, &prev_bh); if (ret < 0) goto out_next; } else { prev_bh = header_bh; get_bh(prev_bh); } kaddr = kmap_atomic(next_bh->b_page); list = nilfs_cpfile_block_get_snapshot_list( cpfile, next, next_bh, kaddr); list->ssl_prev = cpu_to_le64(prev); kunmap_atomic(kaddr); kaddr = kmap_atomic(prev_bh->b_page); list = nilfs_cpfile_block_get_snapshot_list( cpfile, prev, prev_bh, kaddr); list->ssl_next = cpu_to_le64(next); kunmap_atomic(kaddr); kaddr = kmap_atomic(cp_bh->b_page); cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); cp->cp_snapshot_list.ssl_next = cpu_to_le64(0); cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0); nilfs_checkpoint_clear_snapshot(cp); kunmap_atomic(kaddr); kaddr = kmap_atomic(header_bh->b_page); header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); le64_add_cpu(&header->ch_nsnapshots, -1); kunmap_atomic(kaddr); mark_buffer_dirty(next_bh); mark_buffer_dirty(prev_bh); mark_buffer_dirty(cp_bh); mark_buffer_dirty(header_bh); nilfs_mdt_mark_dirty(cpfile); brelse(prev_bh); out_next: brelse(next_bh); out_header: brelse(header_bh); out_cp: brelse(cp_bh); out_sem: up_write(&NILFS_MDT(cpfile)->mi_sem); return ret; } /** * nilfs_cpfile_is_snapshot - * @cpfile: inode of checkpoint file * @cno: checkpoint number * * Description: * * Return Value: On success, 1 is returned if the checkpoint specified by * @cno is a snapshot, or 0 if not. On error, one of the following negative * error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - No such checkpoint. */ int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno) { struct buffer_head *bh; struct nilfs_checkpoint *cp; void *kaddr; int ret; /* * CP number is invalid if it's zero or larger than the * largest existing one. */ if (cno == 0 || cno >= nilfs_mdt_cno(cpfile)) return -ENOENT; down_read(&NILFS_MDT(cpfile)->mi_sem); ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh); if (ret < 0) goto out; kaddr = kmap_atomic(bh->b_page); cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); if (nilfs_checkpoint_invalid(cp)) ret = -ENOENT; else ret = nilfs_checkpoint_snapshot(cp); kunmap_atomic(kaddr); brelse(bh); out: up_read(&NILFS_MDT(cpfile)->mi_sem); return ret; } /** * nilfs_cpfile_change_cpmode - change checkpoint mode * @cpfile: inode of checkpoint file * @cno: checkpoint number * @mode: mode of checkpoint * * Description: nilfs_change_cpmode() changes the mode of the checkpoint * specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT. * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - No such checkpoint. */ int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode) { int ret; switch (mode) { case NILFS_CHECKPOINT: if (nilfs_checkpoint_is_mounted(cpfile->i_sb, cno)) /* * Current implementation does not have to protect * plain read-only mounts since they are exclusive * with a read/write mount and are protected from the * cleaner. */ ret = -EBUSY; else ret = nilfs_cpfile_clear_snapshot(cpfile, cno); return ret; case NILFS_SNAPSHOT: return nilfs_cpfile_set_snapshot(cpfile, cno); default: return -EINVAL; } } /** * nilfs_cpfile_get_stat - get checkpoint statistics * @cpfile: inode of checkpoint file * @cpstat: pointer to a structure of checkpoint statistics * * Description: nilfs_cpfile_get_stat() returns information about checkpoints. * * Return Value: On success, 0 is returned, and checkpoints information is * stored in the place pointed by @cpstat. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. */ int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat) { struct buffer_head *bh; struct nilfs_cpfile_header *header; void *kaddr; int ret; down_read(&NILFS_MDT(cpfile)->mi_sem); ret = nilfs_cpfile_get_header_block(cpfile, &bh); if (ret < 0) goto out_sem; kaddr = kmap_atomic(bh->b_page); header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); cpstat->cs_cno = nilfs_mdt_cno(cpfile); cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints); cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots); kunmap_atomic(kaddr); brelse(bh); out_sem: up_read(&NILFS_MDT(cpfile)->mi_sem); return ret; } /** * nilfs_cpfile_read - read or get cpfile inode * @sb: super block instance * @cpsize: size of a checkpoint entry * @raw_inode: on-disk cpfile inode * @inodep: buffer to store the inode */ int nilfs_cpfile_read(struct super_block *sb, size_t cpsize, struct nilfs_inode *raw_inode, struct inode **inodep) { struct inode *cpfile; int err; if (cpsize > sb->s_blocksize) { nilfs_err(sb, "too large checkpoint size: %zu bytes", cpsize); return -EINVAL; } else if (cpsize < NILFS_MIN_CHECKPOINT_SIZE) { nilfs_err(sb, "too small checkpoint size: %zu bytes", cpsize); return -EINVAL; } cpfile = nilfs_iget_locked(sb, NULL, NILFS_CPFILE_INO); if (unlikely(!cpfile)) return -ENOMEM; if (!(cpfile->i_state & I_NEW)) goto out; err = nilfs_mdt_init(cpfile, NILFS_MDT_GFP, 0); if (err) goto failed; nilfs_mdt_set_entry_size(cpfile, cpsize, sizeof(struct nilfs_cpfile_header)); err = nilfs_read_inode_common(cpfile, raw_inode); if (err) goto failed; unlock_new_inode(cpfile); out: *inodep = cpfile; return 0; failed: iget_failed(cpfile); return err; }
linux-master
fs/nilfs2/cpfile.c
// SPDX-License-Identifier: GPL-2.0+ /* * NILFS segment usage file. * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * * Written by Koji Sato. * Revised by Ryusuke Konishi. */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/buffer_head.h> #include <linux/errno.h> #include "mdt.h" #include "sufile.h" #include <trace/events/nilfs2.h> /** * struct nilfs_sufile_info - on-memory private data of sufile * @mi: on-memory private data of metadata file * @ncleansegs: number of clean segments * @allocmin: lower limit of allocatable segment range * @allocmax: upper limit of allocatable segment range */ struct nilfs_sufile_info { struct nilfs_mdt_info mi; unsigned long ncleansegs;/* number of clean segments */ __u64 allocmin; /* lower limit of allocatable segment range */ __u64 allocmax; /* upper limit of allocatable segment range */ }; static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile) { return (struct nilfs_sufile_info *)NILFS_MDT(sufile); } static inline unsigned long nilfs_sufile_segment_usages_per_block(const struct inode *sufile) { return NILFS_MDT(sufile)->mi_entries_per_block; } static unsigned long nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum) { __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset; do_div(t, nilfs_sufile_segment_usages_per_block(sufile)); return (unsigned long)t; } static unsigned long nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum) { __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset; return do_div(t, nilfs_sufile_segment_usages_per_block(sufile)); } static unsigned long nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr, __u64 max) { return min_t(unsigned long, nilfs_sufile_segment_usages_per_block(sufile) - nilfs_sufile_get_offset(sufile, curr), max - curr + 1); } static struct nilfs_segment_usage * nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum, struct buffer_head *bh, void *kaddr) { return kaddr + bh_offset(bh) + nilfs_sufile_get_offset(sufile, segnum) * NILFS_MDT(sufile)->mi_entry_size; } static inline int nilfs_sufile_get_header_block(struct inode *sufile, struct buffer_head **bhp) { return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp); } static inline int nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum, int create, struct buffer_head **bhp) { return nilfs_mdt_get_block(sufile, nilfs_sufile_get_blkoff(sufile, segnum), create, NULL, bhp); } static int nilfs_sufile_delete_segment_usage_block(struct inode *sufile, __u64 segnum) { return nilfs_mdt_delete_block(sufile, nilfs_sufile_get_blkoff(sufile, segnum)); } static void nilfs_sufile_mod_counter(struct buffer_head *header_bh, u64 ncleanadd, u64 ndirtyadd) { struct nilfs_sufile_header *header; void *kaddr; kaddr = kmap_atomic(header_bh->b_page); header = kaddr + bh_offset(header_bh); le64_add_cpu(&header->sh_ncleansegs, ncleanadd); le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd); kunmap_atomic(kaddr); mark_buffer_dirty(header_bh); } /** * nilfs_sufile_get_ncleansegs - return the number of clean segments * @sufile: inode of segment usage file */ unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile) { return NILFS_SUI(sufile)->ncleansegs; } /** * nilfs_sufile_updatev - modify multiple segment usages at a time * @sufile: inode of segment usage file * @segnumv: array of segment numbers * @nsegs: size of @segnumv array * @create: creation flag * @ndone: place to store number of modified segments on @segnumv * @dofunc: primitive operation for the update * * Description: nilfs_sufile_updatev() repeatedly calls @dofunc * against the given array of segments. The @dofunc is called with * buffers of a header block and the sufile block in which the target * segment usage entry is contained. If @ndone is given, the number * of successfully modified segments from the head is stored in the * place @ndone points to. * * Return Value: On success, zero is returned. On error, one of the * following negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - Given segment usage is in hole block (may be returned if * @create is zero) * * %-EINVAL - Invalid segment usage number */ int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs, int create, size_t *ndone, void (*dofunc)(struct inode *, __u64, struct buffer_head *, struct buffer_head *)) { struct buffer_head *header_bh, *bh; unsigned long blkoff, prev_blkoff; __u64 *seg; size_t nerr = 0, n = 0; int ret = 0; if (unlikely(nsegs == 0)) goto out; down_write(&NILFS_MDT(sufile)->mi_sem); for (seg = segnumv; seg < segnumv + nsegs; seg++) { if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) { nilfs_warn(sufile->i_sb, "%s: invalid segment number: %llu", __func__, (unsigned long long)*seg); nerr++; } } if (nerr > 0) { ret = -EINVAL; goto out_sem; } ret = nilfs_sufile_get_header_block(sufile, &header_bh); if (ret < 0) goto out_sem; seg = segnumv; blkoff = nilfs_sufile_get_blkoff(sufile, *seg); ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh); if (ret < 0) goto out_header; for (;;) { dofunc(sufile, *seg, header_bh, bh); if (++seg >= segnumv + nsegs) break; prev_blkoff = blkoff; blkoff = nilfs_sufile_get_blkoff(sufile, *seg); if (blkoff == prev_blkoff) continue; /* get different block */ brelse(bh); ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh); if (unlikely(ret < 0)) goto out_header; } brelse(bh); out_header: n = seg - segnumv; brelse(header_bh); out_sem: up_write(&NILFS_MDT(sufile)->mi_sem); out: if (ndone) *ndone = n; return ret; } int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create, void (*dofunc)(struct inode *, __u64, struct buffer_head *, struct buffer_head *)) { struct buffer_head *header_bh, *bh; int ret; if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) { nilfs_warn(sufile->i_sb, "%s: invalid segment number: %llu", __func__, (unsigned long long)segnum); return -EINVAL; } down_write(&NILFS_MDT(sufile)->mi_sem); ret = nilfs_sufile_get_header_block(sufile, &header_bh); if (ret < 0) goto out_sem; ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh); if (!ret) { dofunc(sufile, segnum, header_bh, bh); brelse(bh); } brelse(header_bh); out_sem: up_write(&NILFS_MDT(sufile)->mi_sem); return ret; } /** * nilfs_sufile_set_alloc_range - limit range of segment to be allocated * @sufile: inode of segment usage file * @start: minimum segment number of allocatable region (inclusive) * @end: maximum segment number of allocatable region (inclusive) * * Return Value: On success, 0 is returned. On error, one of the * following negative error codes is returned. * * %-ERANGE - invalid segment region */ int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end) { struct nilfs_sufile_info *sui = NILFS_SUI(sufile); __u64 nsegs; int ret = -ERANGE; down_write(&NILFS_MDT(sufile)->mi_sem); nsegs = nilfs_sufile_get_nsegments(sufile); if (start <= end && end < nsegs) { sui->allocmin = start; sui->allocmax = end; ret = 0; } up_write(&NILFS_MDT(sufile)->mi_sem); return ret; } /** * nilfs_sufile_alloc - allocate a segment * @sufile: inode of segment usage file * @segnump: pointer to segment number * * Description: nilfs_sufile_alloc() allocates a clean segment. * * Return Value: On success, 0 is returned and the segment number of the * allocated segment is stored in the place pointed by @segnump. On error, one * of the following negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOSPC - No clean segment left. */ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) { struct buffer_head *header_bh, *su_bh; struct nilfs_sufile_header *header; struct nilfs_segment_usage *su; struct nilfs_sufile_info *sui = NILFS_SUI(sufile); size_t susz = NILFS_MDT(sufile)->mi_entry_size; __u64 segnum, maxsegnum, last_alloc; void *kaddr; unsigned long nsegments, nsus, cnt; int ret, j; down_write(&NILFS_MDT(sufile)->mi_sem); ret = nilfs_sufile_get_header_block(sufile, &header_bh); if (ret < 0) goto out_sem; kaddr = kmap_atomic(header_bh->b_page); header = kaddr + bh_offset(header_bh); last_alloc = le64_to_cpu(header->sh_last_alloc); kunmap_atomic(kaddr); nsegments = nilfs_sufile_get_nsegments(sufile); maxsegnum = sui->allocmax; segnum = last_alloc + 1; if (segnum < sui->allocmin || segnum > sui->allocmax) segnum = sui->allocmin; for (cnt = 0; cnt < nsegments; cnt += nsus) { if (segnum > maxsegnum) { if (cnt < sui->allocmax - sui->allocmin + 1) { /* * wrap around in the limited region. * if allocation started from * sui->allocmin, this never happens. */ segnum = sui->allocmin; maxsegnum = last_alloc; } else if (segnum > sui->allocmin && sui->allocmax + 1 < nsegments) { segnum = sui->allocmax + 1; maxsegnum = nsegments - 1; } else if (sui->allocmin > 0) { segnum = 0; maxsegnum = sui->allocmin - 1; } else { break; /* never happens */ } } trace_nilfs2_segment_usage_check(sufile, segnum, cnt); ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, &su_bh); if (ret < 0) goto out_header; kaddr = kmap_atomic(su_bh->b_page); su = nilfs_sufile_block_get_segment_usage( sufile, segnum, su_bh, kaddr); nsus = nilfs_sufile_segment_usages_in_block( sufile, segnum, maxsegnum); for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) { if (!nilfs_segment_usage_clean(su)) continue; /* found a clean segment */ nilfs_segment_usage_set_dirty(su); kunmap_atomic(kaddr); kaddr = kmap_atomic(header_bh->b_page); header = kaddr + bh_offset(header_bh); le64_add_cpu(&header->sh_ncleansegs, -1); le64_add_cpu(&header->sh_ndirtysegs, 1); header->sh_last_alloc = cpu_to_le64(segnum); kunmap_atomic(kaddr); sui->ncleansegs--; mark_buffer_dirty(header_bh); mark_buffer_dirty(su_bh); nilfs_mdt_mark_dirty(sufile); brelse(su_bh); *segnump = segnum; trace_nilfs2_segment_usage_allocated(sufile, segnum); goto out_header; } kunmap_atomic(kaddr); brelse(su_bh); } /* no segments left */ ret = -ENOSPC; out_header: brelse(header_bh); out_sem: up_write(&NILFS_MDT(sufile)->mi_sem); return ret; } void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum, struct buffer_head *header_bh, struct buffer_head *su_bh) { struct nilfs_segment_usage *su; void *kaddr; kaddr = kmap_atomic(su_bh->b_page); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); if (unlikely(!nilfs_segment_usage_clean(su))) { nilfs_warn(sufile->i_sb, "%s: segment %llu must be clean", __func__, (unsigned long long)segnum); kunmap_atomic(kaddr); return; } nilfs_segment_usage_set_dirty(su); kunmap_atomic(kaddr); nilfs_sufile_mod_counter(header_bh, -1, 1); NILFS_SUI(sufile)->ncleansegs--; mark_buffer_dirty(su_bh); nilfs_mdt_mark_dirty(sufile); } void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum, struct buffer_head *header_bh, struct buffer_head *su_bh) { struct nilfs_segment_usage *su; void *kaddr; int clean, dirty; kaddr = kmap_atomic(su_bh->b_page); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); if (su->su_flags == cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY)) && su->su_nblocks == cpu_to_le32(0)) { kunmap_atomic(kaddr); return; } clean = nilfs_segment_usage_clean(su); dirty = nilfs_segment_usage_dirty(su); /* make the segment garbage */ su->su_lastmod = cpu_to_le64(0); su->su_nblocks = cpu_to_le32(0); su->su_flags = cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY)); kunmap_atomic(kaddr); nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1); NILFS_SUI(sufile)->ncleansegs -= clean; mark_buffer_dirty(su_bh); nilfs_mdt_mark_dirty(sufile); } void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, struct buffer_head *header_bh, struct buffer_head *su_bh) { struct nilfs_segment_usage *su; void *kaddr; int sudirty; kaddr = kmap_atomic(su_bh->b_page); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); if (nilfs_segment_usage_clean(su)) { nilfs_warn(sufile->i_sb, "%s: segment %llu is already clean", __func__, (unsigned long long)segnum); kunmap_atomic(kaddr); return; } WARN_ON(nilfs_segment_usage_error(su)); WARN_ON(!nilfs_segment_usage_dirty(su)); sudirty = nilfs_segment_usage_dirty(su); nilfs_segment_usage_set_clean(su); kunmap_atomic(kaddr); mark_buffer_dirty(su_bh); nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0); NILFS_SUI(sufile)->ncleansegs++; nilfs_mdt_mark_dirty(sufile); trace_nilfs2_segment_usage_freed(sufile, segnum); } /** * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty * @sufile: inode of segment usage file * @segnum: segment number */ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum) { struct buffer_head *bh; void *kaddr; struct nilfs_segment_usage *su; int ret; down_write(&NILFS_MDT(sufile)->mi_sem); ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh); if (!ret) { mark_buffer_dirty(bh); nilfs_mdt_mark_dirty(sufile); kaddr = kmap_atomic(bh->b_page); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr); nilfs_segment_usage_set_dirty(su); kunmap_atomic(kaddr); brelse(bh); } up_write(&NILFS_MDT(sufile)->mi_sem); return ret; } /** * nilfs_sufile_set_segment_usage - set usage of a segment * @sufile: inode of segment usage file * @segnum: segment number * @nblocks: number of live blocks in the segment * @modtime: modification time (option) */ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, unsigned long nblocks, time64_t modtime) { struct buffer_head *bh; struct nilfs_segment_usage *su; void *kaddr; int ret; down_write(&NILFS_MDT(sufile)->mi_sem); ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh); if (ret < 0) goto out_sem; kaddr = kmap_atomic(bh->b_page); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr); WARN_ON(nilfs_segment_usage_error(su)); if (modtime) su->su_lastmod = cpu_to_le64(modtime); su->su_nblocks = cpu_to_le32(nblocks); kunmap_atomic(kaddr); mark_buffer_dirty(bh); nilfs_mdt_mark_dirty(sufile); brelse(bh); out_sem: up_write(&NILFS_MDT(sufile)->mi_sem); return ret; } /** * nilfs_sufile_get_stat - get segment usage statistics * @sufile: inode of segment usage file * @sustat: pointer to a structure of segment usage statistics * * Description: nilfs_sufile_get_stat() returns information about segment * usage. * * Return Value: On success, 0 is returned, and segment usage information is * stored in the place pointed by @sustat. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. */ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat) { struct buffer_head *header_bh; struct nilfs_sufile_header *header; struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; void *kaddr; int ret; down_read(&NILFS_MDT(sufile)->mi_sem); ret = nilfs_sufile_get_header_block(sufile, &header_bh); if (ret < 0) goto out_sem; kaddr = kmap_atomic(header_bh->b_page); header = kaddr + bh_offset(header_bh); sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile); sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs); sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs); sustat->ss_ctime = nilfs->ns_ctime; sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime; spin_lock(&nilfs->ns_last_segment_lock); sustat->ss_prot_seq = nilfs->ns_prot_seq; spin_unlock(&nilfs->ns_last_segment_lock); kunmap_atomic(kaddr); brelse(header_bh); out_sem: up_read(&NILFS_MDT(sufile)->mi_sem); return ret; } void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum, struct buffer_head *header_bh, struct buffer_head *su_bh) { struct nilfs_segment_usage *su; void *kaddr; int suclean; kaddr = kmap_atomic(su_bh->b_page); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); if (nilfs_segment_usage_error(su)) { kunmap_atomic(kaddr); return; } suclean = nilfs_segment_usage_clean(su); nilfs_segment_usage_set_error(su); kunmap_atomic(kaddr); if (suclean) { nilfs_sufile_mod_counter(header_bh, -1, 0); NILFS_SUI(sufile)->ncleansegs--; } mark_buffer_dirty(su_bh); nilfs_mdt_mark_dirty(sufile); } /** * nilfs_sufile_truncate_range - truncate range of segment array * @sufile: inode of segment usage file * @start: start segment number (inclusive) * @end: end segment number (inclusive) * * Return Value: On success, 0 is returned. On error, one of the * following negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-EINVAL - Invalid number of segments specified * * %-EBUSY - Dirty or active segments are present in the range */ static int nilfs_sufile_truncate_range(struct inode *sufile, __u64 start, __u64 end) { struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; struct buffer_head *header_bh; struct buffer_head *su_bh; struct nilfs_segment_usage *su, *su2; size_t susz = NILFS_MDT(sufile)->mi_entry_size; unsigned long segusages_per_block; unsigned long nsegs, ncleaned; __u64 segnum; void *kaddr; ssize_t n, nc; int ret; int j; nsegs = nilfs_sufile_get_nsegments(sufile); ret = -EINVAL; if (start > end || start >= nsegs) goto out; ret = nilfs_sufile_get_header_block(sufile, &header_bh); if (ret < 0) goto out; segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile); ncleaned = 0; for (segnum = start; segnum <= end; segnum += n) { n = min_t(unsigned long, segusages_per_block - nilfs_sufile_get_offset(sufile, segnum), end - segnum + 1); ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &su_bh); if (ret < 0) { if (ret != -ENOENT) goto out_header; /* hole */ continue; } kaddr = kmap_atomic(su_bh->b_page); su = nilfs_sufile_block_get_segment_usage( sufile, segnum, su_bh, kaddr); su2 = su; for (j = 0; j < n; j++, su = (void *)su + susz) { if ((le32_to_cpu(su->su_flags) & ~BIT(NILFS_SEGMENT_USAGE_ERROR)) || nilfs_segment_is_active(nilfs, segnum + j)) { ret = -EBUSY; kunmap_atomic(kaddr); brelse(su_bh); goto out_header; } } nc = 0; for (su = su2, j = 0; j < n; j++, su = (void *)su + susz) { if (nilfs_segment_usage_error(su)) { nilfs_segment_usage_set_clean(su); nc++; } } kunmap_atomic(kaddr); if (nc > 0) { mark_buffer_dirty(su_bh); ncleaned += nc; } brelse(su_bh); if (n == segusages_per_block) { /* make hole */ nilfs_sufile_delete_segment_usage_block(sufile, segnum); } } ret = 0; out_header: if (ncleaned > 0) { NILFS_SUI(sufile)->ncleansegs += ncleaned; nilfs_sufile_mod_counter(header_bh, ncleaned, 0); nilfs_mdt_mark_dirty(sufile); } brelse(header_bh); out: return ret; } /** * nilfs_sufile_resize - resize segment array * @sufile: inode of segment usage file * @newnsegs: new number of segments * * Return Value: On success, 0 is returned. On error, one of the * following negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOSPC - Enough free space is not left for shrinking * * %-EBUSY - Dirty or active segments exist in the region to be truncated */ int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs) { struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; struct buffer_head *header_bh; struct nilfs_sufile_header *header; struct nilfs_sufile_info *sui = NILFS_SUI(sufile); void *kaddr; unsigned long nsegs, nrsvsegs; int ret = 0; down_write(&NILFS_MDT(sufile)->mi_sem); nsegs = nilfs_sufile_get_nsegments(sufile); if (nsegs == newnsegs) goto out; ret = -ENOSPC; nrsvsegs = nilfs_nrsvsegs(nilfs, newnsegs); if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->ncleansegs) goto out; ret = nilfs_sufile_get_header_block(sufile, &header_bh); if (ret < 0) goto out; if (newnsegs > nsegs) { sui->ncleansegs += newnsegs - nsegs; } else /* newnsegs < nsegs */ { ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1); if (ret < 0) goto out_header; sui->ncleansegs -= nsegs - newnsegs; /* * If the sufile is successfully truncated, immediately adjust * the segment allocation space while locking the semaphore * "mi_sem" so that nilfs_sufile_alloc() never allocates * segments in the truncated space. */ sui->allocmax = newnsegs - 1; sui->allocmin = 0; } kaddr = kmap_atomic(header_bh->b_page); header = kaddr + bh_offset(header_bh); header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs); kunmap_atomic(kaddr); mark_buffer_dirty(header_bh); nilfs_mdt_mark_dirty(sufile); nilfs_set_nsegments(nilfs, newnsegs); out_header: brelse(header_bh); out: up_write(&NILFS_MDT(sufile)->mi_sem); return ret; } /** * nilfs_sufile_get_suinfo - * @sufile: inode of segment usage file * @segnum: segment number to start looking * @buf: array of suinfo * @sisz: byte size of suinfo * @nsi: size of suinfo array * * Description: * * Return Value: On success, 0 is returned and .... On error, one of the * following negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. */ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, unsigned int sisz, size_t nsi) { struct buffer_head *su_bh; struct nilfs_segment_usage *su; struct nilfs_suinfo *si = buf; size_t susz = NILFS_MDT(sufile)->mi_entry_size; struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; void *kaddr; unsigned long nsegs, segusages_per_block; ssize_t n; int ret, i, j; down_read(&NILFS_MDT(sufile)->mi_sem); segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile); nsegs = min_t(unsigned long, nilfs_sufile_get_nsegments(sufile) - segnum, nsi); for (i = 0; i < nsegs; i += n, segnum += n) { n = min_t(unsigned long, segusages_per_block - nilfs_sufile_get_offset(sufile, segnum), nsegs - i); ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &su_bh); if (ret < 0) { if (ret != -ENOENT) goto out; /* hole */ memset(si, 0, sisz * n); si = (void *)si + sisz * n; continue; } kaddr = kmap_atomic(su_bh->b_page); su = nilfs_sufile_block_get_segment_usage( sufile, segnum, su_bh, kaddr); for (j = 0; j < n; j++, su = (void *)su + susz, si = (void *)si + sisz) { si->sui_lastmod = le64_to_cpu(su->su_lastmod); si->sui_nblocks = le32_to_cpu(su->su_nblocks); si->sui_flags = le32_to_cpu(su->su_flags) & ~BIT(NILFS_SEGMENT_USAGE_ACTIVE); if (nilfs_segment_is_active(nilfs, segnum + j)) si->sui_flags |= BIT(NILFS_SEGMENT_USAGE_ACTIVE); } kunmap_atomic(kaddr); brelse(su_bh); } ret = nsegs; out: up_read(&NILFS_MDT(sufile)->mi_sem); return ret; } /** * nilfs_sufile_set_suinfo - sets segment usage info * @sufile: inode of segment usage file * @buf: array of suinfo_update * @supsz: byte size of suinfo_update * @nsup: size of suinfo_update array * * Description: Takes an array of nilfs_suinfo_update structs and updates * segment usage accordingly. Only the fields indicated by the sup_flags * are updated. * * Return Value: On success, 0 is returned. On error, one of the * following negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-EINVAL - Invalid values in input (segment number, flags or nblocks) */ ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf, unsigned int supsz, size_t nsup) { struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; struct buffer_head *header_bh, *bh; struct nilfs_suinfo_update *sup, *supend = buf + supsz * nsup; struct nilfs_segment_usage *su; void *kaddr; unsigned long blkoff, prev_blkoff; int cleansi, cleansu, dirtysi, dirtysu; long ncleaned = 0, ndirtied = 0; int ret = 0; if (unlikely(nsup == 0)) return ret; for (sup = buf; sup < supend; sup = (void *)sup + supsz) { if (sup->sup_segnum >= nilfs->ns_nsegments || (sup->sup_flags & (~0UL << __NR_NILFS_SUINFO_UPDATE_FIELDS)) || (nilfs_suinfo_update_nblocks(sup) && sup->sup_sui.sui_nblocks > nilfs->ns_blocks_per_segment)) return -EINVAL; } down_write(&NILFS_MDT(sufile)->mi_sem); ret = nilfs_sufile_get_header_block(sufile, &header_bh); if (ret < 0) goto out_sem; sup = buf; blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum); ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh); if (ret < 0) goto out_header; for (;;) { kaddr = kmap_atomic(bh->b_page); su = nilfs_sufile_block_get_segment_usage( sufile, sup->sup_segnum, bh, kaddr); if (nilfs_suinfo_update_lastmod(sup)) su->su_lastmod = cpu_to_le64(sup->sup_sui.sui_lastmod); if (nilfs_suinfo_update_nblocks(sup)) su->su_nblocks = cpu_to_le32(sup->sup_sui.sui_nblocks); if (nilfs_suinfo_update_flags(sup)) { /* * Active flag is a virtual flag projected by running * nilfs kernel code - drop it not to write it to * disk. */ sup->sup_sui.sui_flags &= ~BIT(NILFS_SEGMENT_USAGE_ACTIVE); cleansi = nilfs_suinfo_clean(&sup->sup_sui); cleansu = nilfs_segment_usage_clean(su); dirtysi = nilfs_suinfo_dirty(&sup->sup_sui); dirtysu = nilfs_segment_usage_dirty(su); if (cleansi && !cleansu) ++ncleaned; else if (!cleansi && cleansu) --ncleaned; if (dirtysi && !dirtysu) ++ndirtied; else if (!dirtysi && dirtysu) --ndirtied; su->su_flags = cpu_to_le32(sup->sup_sui.sui_flags); } kunmap_atomic(kaddr); sup = (void *)sup + supsz; if (sup >= supend) break; prev_blkoff = blkoff; blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum); if (blkoff == prev_blkoff) continue; /* get different block */ mark_buffer_dirty(bh); put_bh(bh); ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh); if (unlikely(ret < 0)) goto out_mark; } mark_buffer_dirty(bh); put_bh(bh); out_mark: if (ncleaned || ndirtied) { nilfs_sufile_mod_counter(header_bh, (u64)ncleaned, (u64)ndirtied); NILFS_SUI(sufile)->ncleansegs += ncleaned; } nilfs_mdt_mark_dirty(sufile); out_header: put_bh(header_bh); out_sem: up_write(&NILFS_MDT(sufile)->mi_sem); return ret; } /** * nilfs_sufile_trim_fs() - trim ioctl handle function * @sufile: inode of segment usage file * @range: fstrim_range structure * * start: First Byte to trim * len: number of Bytes to trim from start * minlen: minimum extent length in Bytes * * Decription: nilfs_sufile_trim_fs goes through all segments containing bytes * from start to start+len. start is rounded up to the next block boundary * and start+len is rounded down. For each clean segment blkdev_issue_discard * function is invoked. * * Return Value: On success, 0 is returned or negative error code, otherwise. */ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range) { struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; struct buffer_head *su_bh; struct nilfs_segment_usage *su; void *kaddr; size_t n, i, susz = NILFS_MDT(sufile)->mi_entry_size; sector_t seg_start, seg_end, start_block, end_block; sector_t start = 0, nblocks = 0; u64 segnum, segnum_end, minlen, len, max_blocks, ndiscarded = 0; int ret = 0; unsigned int sects_per_block; sects_per_block = (1 << nilfs->ns_blocksize_bits) / bdev_logical_block_size(nilfs->ns_bdev); len = range->len >> nilfs->ns_blocksize_bits; minlen = range->minlen >> nilfs->ns_blocksize_bits; max_blocks = ((u64)nilfs->ns_nsegments * nilfs->ns_blocks_per_segment); if (!len || range->start >= max_blocks << nilfs->ns_blocksize_bits) return -EINVAL; start_block = (range->start + nilfs->ns_blocksize - 1) >> nilfs->ns_blocksize_bits; /* * range->len can be very large (actually, it is set to * ULLONG_MAX by default) - truncate upper end of the range * carefully so as not to overflow. */ if (max_blocks - start_block < len) end_block = max_blocks - 1; else end_block = start_block + len - 1; segnum = nilfs_get_segnum_of_block(nilfs, start_block); segnum_end = nilfs_get_segnum_of_block(nilfs, end_block); down_read(&NILFS_MDT(sufile)->mi_sem); while (segnum <= segnum_end) { n = nilfs_sufile_segment_usages_in_block(sufile, segnum, segnum_end); ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &su_bh); if (ret < 0) { if (ret != -ENOENT) goto out_sem; /* hole */ segnum += n; continue; } kaddr = kmap_atomic(su_bh->b_page); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); for (i = 0; i < n; ++i, ++segnum, su = (void *)su + susz) { if (!nilfs_segment_usage_clean(su)) continue; nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end); if (!nblocks) { /* start new extent */ start = seg_start; nblocks = seg_end - seg_start + 1; continue; } if (start + nblocks == seg_start) { /* add to previous extent */ nblocks += seg_end - seg_start + 1; continue; } /* discard previous extent */ if (start < start_block) { nblocks -= start_block - start; start = start_block; } if (nblocks >= minlen) { kunmap_atomic(kaddr); ret = blkdev_issue_discard(nilfs->ns_bdev, start * sects_per_block, nblocks * sects_per_block, GFP_NOFS); if (ret < 0) { put_bh(su_bh); goto out_sem; } ndiscarded += nblocks; kaddr = kmap_atomic(su_bh->b_page); su = nilfs_sufile_block_get_segment_usage( sufile, segnum, su_bh, kaddr); } /* start new extent */ start = seg_start; nblocks = seg_end - seg_start + 1; } kunmap_atomic(kaddr); put_bh(su_bh); } if (nblocks) { /* discard last extent */ if (start < start_block) { nblocks -= start_block - start; start = start_block; } if (start + nblocks > end_block + 1) nblocks = end_block - start + 1; if (nblocks >= minlen) { ret = blkdev_issue_discard(nilfs->ns_bdev, start * sects_per_block, nblocks * sects_per_block, GFP_NOFS); if (!ret) ndiscarded += nblocks; } } out_sem: up_read(&NILFS_MDT(sufile)->mi_sem); range->len = ndiscarded << nilfs->ns_blocksize_bits; return ret; } /** * nilfs_sufile_read - read or get sufile inode * @sb: super block instance * @susize: size of a segment usage entry * @raw_inode: on-disk sufile inode * @inodep: buffer to store the inode */ int nilfs_sufile_read(struct super_block *sb, size_t susize, struct nilfs_inode *raw_inode, struct inode **inodep) { struct inode *sufile; struct nilfs_sufile_info *sui; struct buffer_head *header_bh; struct nilfs_sufile_header *header; void *kaddr; int err; if (susize > sb->s_blocksize) { nilfs_err(sb, "too large segment usage size: %zu bytes", susize); return -EINVAL; } else if (susize < NILFS_MIN_SEGMENT_USAGE_SIZE) { nilfs_err(sb, "too small segment usage size: %zu bytes", susize); return -EINVAL; } sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO); if (unlikely(!sufile)) return -ENOMEM; if (!(sufile->i_state & I_NEW)) goto out; err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui)); if (err) goto failed; nilfs_mdt_set_entry_size(sufile, susize, sizeof(struct nilfs_sufile_header)); err = nilfs_read_inode_common(sufile, raw_inode); if (err) goto failed; err = nilfs_sufile_get_header_block(sufile, &header_bh); if (err) goto failed; sui = NILFS_SUI(sufile); kaddr = kmap_atomic(header_bh->b_page); header = kaddr + bh_offset(header_bh); sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs); kunmap_atomic(kaddr); brelse(header_bh); sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1; sui->allocmin = 0; unlock_new_inode(sufile); out: *inodep = sufile; return 0; failed: iget_failed(sufile); return err; }
linux-master
fs/nilfs2/sufile.c
// SPDX-License-Identifier: GPL-2.0+ /* * NILFS disk address translation. * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * * Written by Koji Sato. */ #include <linux/types.h> #include <linux/buffer_head.h> #include <linux/string.h> #include <linux/errno.h> #include "nilfs.h" #include "mdt.h" #include "alloc.h" #include "dat.h" #define NILFS_CNO_MIN ((__u64)1) #define NILFS_CNO_MAX (~(__u64)0) /** * struct nilfs_dat_info - on-memory private data of DAT file * @mi: on-memory private data of metadata file * @palloc_cache: persistent object allocator cache of DAT file * @shadow: shadow map of DAT file */ struct nilfs_dat_info { struct nilfs_mdt_info mi; struct nilfs_palloc_cache palloc_cache; struct nilfs_shadow_map shadow; }; static inline struct nilfs_dat_info *NILFS_DAT_I(struct inode *dat) { return (struct nilfs_dat_info *)NILFS_MDT(dat); } static int nilfs_dat_prepare_entry(struct inode *dat, struct nilfs_palloc_req *req, int create) { int ret; ret = nilfs_palloc_get_entry_block(dat, req->pr_entry_nr, create, &req->pr_entry_bh); if (unlikely(ret == -ENOENT)) { nilfs_err(dat->i_sb, "DAT doesn't have a block to manage vblocknr = %llu", (unsigned long long)req->pr_entry_nr); /* * Return internal code -EINVAL to notify bmap layer of * metadata corruption. */ ret = -EINVAL; } return ret; } static void nilfs_dat_commit_entry(struct inode *dat, struct nilfs_palloc_req *req) { mark_buffer_dirty(req->pr_entry_bh); nilfs_mdt_mark_dirty(dat); brelse(req->pr_entry_bh); } static void nilfs_dat_abort_entry(struct inode *dat, struct nilfs_palloc_req *req) { brelse(req->pr_entry_bh); } int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req) { int ret; ret = nilfs_palloc_prepare_alloc_entry(dat, req); if (ret < 0) return ret; ret = nilfs_dat_prepare_entry(dat, req, 1); if (ret < 0) nilfs_palloc_abort_alloc_entry(dat, req); return ret; } void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req) { struct nilfs_dat_entry *entry; void *kaddr; kaddr = kmap_atomic(req->pr_entry_bh->b_page); entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, req->pr_entry_bh, kaddr); entry->de_start = cpu_to_le64(NILFS_CNO_MIN); entry->de_end = cpu_to_le64(NILFS_CNO_MAX); entry->de_blocknr = cpu_to_le64(0); kunmap_atomic(kaddr); nilfs_palloc_commit_alloc_entry(dat, req); nilfs_dat_commit_entry(dat, req); } void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req) { nilfs_dat_abort_entry(dat, req); nilfs_palloc_abort_alloc_entry(dat, req); } static void nilfs_dat_commit_free(struct inode *dat, struct nilfs_palloc_req *req) { struct nilfs_dat_entry *entry; void *kaddr; kaddr = kmap_atomic(req->pr_entry_bh->b_page); entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, req->pr_entry_bh, kaddr); entry->de_start = cpu_to_le64(NILFS_CNO_MIN); entry->de_end = cpu_to_le64(NILFS_CNO_MIN); entry->de_blocknr = cpu_to_le64(0); kunmap_atomic(kaddr); nilfs_dat_commit_entry(dat, req); if (unlikely(req->pr_desc_bh == NULL || req->pr_bitmap_bh == NULL)) { nilfs_error(dat->i_sb, "state inconsistency probably due to duplicate use of vblocknr = %llu", (unsigned long long)req->pr_entry_nr); return; } nilfs_palloc_commit_free_entry(dat, req); } int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req) { return nilfs_dat_prepare_entry(dat, req, 0); } void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req, sector_t blocknr) { struct nilfs_dat_entry *entry; void *kaddr; kaddr = kmap_atomic(req->pr_entry_bh->b_page); entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, req->pr_entry_bh, kaddr); entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat)); entry->de_blocknr = cpu_to_le64(blocknr); kunmap_atomic(kaddr); nilfs_dat_commit_entry(dat, req); } int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req) { struct nilfs_dat_entry *entry; __u64 start; sector_t blocknr; void *kaddr; int ret; ret = nilfs_dat_prepare_entry(dat, req, 0); if (ret < 0) return ret; kaddr = kmap_atomic(req->pr_entry_bh->b_page); entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, req->pr_entry_bh, kaddr); start = le64_to_cpu(entry->de_start); blocknr = le64_to_cpu(entry->de_blocknr); kunmap_atomic(kaddr); if (blocknr == 0) { ret = nilfs_palloc_prepare_free_entry(dat, req); if (ret < 0) { nilfs_dat_abort_entry(dat, req); return ret; } } if (unlikely(start > nilfs_mdt_cno(dat))) { nilfs_err(dat->i_sb, "vblocknr = %llu has abnormal lifetime: start cno (= %llu) > current cno (= %llu)", (unsigned long long)req->pr_entry_nr, (unsigned long long)start, (unsigned long long)nilfs_mdt_cno(dat)); nilfs_dat_abort_entry(dat, req); return -EINVAL; } return 0; } void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req, int dead) { struct nilfs_dat_entry *entry; __u64 start, end; sector_t blocknr; void *kaddr; kaddr = kmap_atomic(req->pr_entry_bh->b_page); entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, req->pr_entry_bh, kaddr); end = start = le64_to_cpu(entry->de_start); if (!dead) { end = nilfs_mdt_cno(dat); WARN_ON(start > end); } entry->de_end = cpu_to_le64(end); blocknr = le64_to_cpu(entry->de_blocknr); kunmap_atomic(kaddr); if (blocknr == 0) nilfs_dat_commit_free(dat, req); else nilfs_dat_commit_entry(dat, req); } void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req) { struct nilfs_dat_entry *entry; __u64 start; sector_t blocknr; void *kaddr; kaddr = kmap_atomic(req->pr_entry_bh->b_page); entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, req->pr_entry_bh, kaddr); start = le64_to_cpu(entry->de_start); blocknr = le64_to_cpu(entry->de_blocknr); kunmap_atomic(kaddr); if (start == nilfs_mdt_cno(dat) && blocknr == 0) nilfs_palloc_abort_free_entry(dat, req); nilfs_dat_abort_entry(dat, req); } int nilfs_dat_prepare_update(struct inode *dat, struct nilfs_palloc_req *oldreq, struct nilfs_palloc_req *newreq) { int ret; ret = nilfs_dat_prepare_end(dat, oldreq); if (!ret) { ret = nilfs_dat_prepare_alloc(dat, newreq); if (ret < 0) nilfs_dat_abort_end(dat, oldreq); } return ret; } void nilfs_dat_commit_update(struct inode *dat, struct nilfs_palloc_req *oldreq, struct nilfs_palloc_req *newreq, int dead) { nilfs_dat_commit_end(dat, oldreq, dead); nilfs_dat_commit_alloc(dat, newreq); } void nilfs_dat_abort_update(struct inode *dat, struct nilfs_palloc_req *oldreq, struct nilfs_palloc_req *newreq) { nilfs_dat_abort_end(dat, oldreq); nilfs_dat_abort_alloc(dat, newreq); } /** * nilfs_dat_mark_dirty - * @dat: DAT file inode * @vblocknr: virtual block number * * Description: * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. */ int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr) { struct nilfs_palloc_req req; int ret; req.pr_entry_nr = vblocknr; ret = nilfs_dat_prepare_entry(dat, &req, 0); if (ret == 0) nilfs_dat_commit_entry(dat, &req); return ret; } /** * nilfs_dat_freev - free virtual block numbers * @dat: DAT file inode * @vblocknrs: array of virtual block numbers * @nitems: number of virtual block numbers * * Description: nilfs_dat_freev() frees the virtual block numbers specified by * @vblocknrs and @nitems. * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - The virtual block number have not been allocated. */ int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems) { return nilfs_palloc_freev(dat, vblocknrs, nitems); } /** * nilfs_dat_move - change a block number * @dat: DAT file inode * @vblocknr: virtual block number * @blocknr: block number * * Description: nilfs_dat_move() changes the block number associated with * @vblocknr to @blocknr. * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. */ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr) { struct buffer_head *entry_bh; struct nilfs_dat_entry *entry; void *kaddr; int ret; ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh); if (ret < 0) return ret; /* * The given disk block number (blocknr) is not yet written to * the device at this point. * * To prevent nilfs_dat_translate() from returning the * uncommitted block number, this makes a copy of the entry * buffer and redirects nilfs_dat_translate() to the copy. */ if (!buffer_nilfs_redirected(entry_bh)) { ret = nilfs_mdt_freeze_buffer(dat, entry_bh); if (ret) { brelse(entry_bh); return ret; } } kaddr = kmap_atomic(entry_bh->b_page); entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); if (unlikely(entry->de_blocknr == cpu_to_le64(0))) { nilfs_crit(dat->i_sb, "%s: invalid vblocknr = %llu, [%llu, %llu)", __func__, (unsigned long long)vblocknr, (unsigned long long)le64_to_cpu(entry->de_start), (unsigned long long)le64_to_cpu(entry->de_end)); kunmap_atomic(kaddr); brelse(entry_bh); return -EINVAL; } WARN_ON(blocknr == 0); entry->de_blocknr = cpu_to_le64(blocknr); kunmap_atomic(kaddr); mark_buffer_dirty(entry_bh); nilfs_mdt_mark_dirty(dat); brelse(entry_bh); return 0; } /** * nilfs_dat_translate - translate a virtual block number to a block number * @dat: DAT file inode * @vblocknr: virtual block number * @blocknrp: pointer to a block number * * Description: nilfs_dat_translate() maps the virtual block number @vblocknr * to the corresponding block number. * * Return Value: On success, 0 is returned and the block number associated * with @vblocknr is stored in the place pointed by @blocknrp. On error, one * of the following negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - A block number associated with @vblocknr does not exist. */ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp) { struct buffer_head *entry_bh, *bh; struct nilfs_dat_entry *entry; sector_t blocknr; void *kaddr; int ret; ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh); if (ret < 0) return ret; if (!nilfs_doing_gc() && buffer_nilfs_redirected(entry_bh)) { bh = nilfs_mdt_get_frozen_buffer(dat, entry_bh); if (bh) { WARN_ON(!buffer_uptodate(bh)); brelse(entry_bh); entry_bh = bh; } } kaddr = kmap_atomic(entry_bh->b_page); entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); blocknr = le64_to_cpu(entry->de_blocknr); if (blocknr == 0) { ret = -ENOENT; goto out; } *blocknrp = blocknr; out: kunmap_atomic(kaddr); brelse(entry_bh); return ret; } ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned int visz, size_t nvi) { struct buffer_head *entry_bh; struct nilfs_dat_entry *entry; struct nilfs_vinfo *vinfo = buf; __u64 first, last; void *kaddr; unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block; int i, j, n, ret; for (i = 0; i < nvi; i += n) { ret = nilfs_palloc_get_entry_block(dat, vinfo->vi_vblocknr, 0, &entry_bh); if (ret < 0) return ret; kaddr = kmap_atomic(entry_bh->b_page); /* last virtual block number in this block */ first = vinfo->vi_vblocknr; do_div(first, entries_per_block); first *= entries_per_block; last = first + entries_per_block - 1; for (j = i, n = 0; j < nvi && vinfo->vi_vblocknr >= first && vinfo->vi_vblocknr <= last; j++, n++, vinfo = (void *)vinfo + visz) { entry = nilfs_palloc_block_get_entry( dat, vinfo->vi_vblocknr, entry_bh, kaddr); vinfo->vi_start = le64_to_cpu(entry->de_start); vinfo->vi_end = le64_to_cpu(entry->de_end); vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr); } kunmap_atomic(kaddr); brelse(entry_bh); } return nvi; } /** * nilfs_dat_read - read or get dat inode * @sb: super block instance * @entry_size: size of a dat entry * @raw_inode: on-disk dat inode * @inodep: buffer to store the inode */ int nilfs_dat_read(struct super_block *sb, size_t entry_size, struct nilfs_inode *raw_inode, struct inode **inodep) { static struct lock_class_key dat_lock_key; struct inode *dat; struct nilfs_dat_info *di; int err; if (entry_size > sb->s_blocksize) { nilfs_err(sb, "too large DAT entry size: %zu bytes", entry_size); return -EINVAL; } else if (entry_size < NILFS_MIN_DAT_ENTRY_SIZE) { nilfs_err(sb, "too small DAT entry size: %zu bytes", entry_size); return -EINVAL; } dat = nilfs_iget_locked(sb, NULL, NILFS_DAT_INO); if (unlikely(!dat)) return -ENOMEM; if (!(dat->i_state & I_NEW)) goto out; err = nilfs_mdt_init(dat, NILFS_MDT_GFP, sizeof(*di)); if (err) goto failed; err = nilfs_palloc_init_blockgroup(dat, entry_size); if (err) goto failed; di = NILFS_DAT_I(dat); lockdep_set_class(&di->mi.mi_sem, &dat_lock_key); nilfs_palloc_setup_cache(dat, &di->palloc_cache); err = nilfs_mdt_setup_shadow_map(dat, &di->shadow); if (err) goto failed; err = nilfs_read_inode_common(dat, raw_inode); if (err) goto failed; unlock_new_inode(dat); out: *inodep = dat; return 0; failed: iget_failed(dat); return err; }
linux-master
fs/nilfs2/dat.c
// SPDX-License-Identifier: GPL-2.0+ /* * NILFS dat/inode allocator * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * * Originally written by Koji Sato. * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji. */ #include <linux/types.h> #include <linux/buffer_head.h> #include <linux/fs.h> #include <linux/bitops.h> #include <linux/slab.h> #include "mdt.h" #include "alloc.h" /** * nilfs_palloc_groups_per_desc_block - get the number of groups that a group * descriptor block can maintain * @inode: inode of metadata file using this allocator */ static inline unsigned long nilfs_palloc_groups_per_desc_block(const struct inode *inode) { return i_blocksize(inode) / sizeof(struct nilfs_palloc_group_desc); } /** * nilfs_palloc_groups_count - get maximum number of groups * @inode: inode of metadata file using this allocator */ static inline unsigned long nilfs_palloc_groups_count(const struct inode *inode) { return 1UL << (BITS_PER_LONG - (inode->i_blkbits + 3 /* log2(8) */)); } /** * nilfs_palloc_init_blockgroup - initialize private variables for allocator * @inode: inode of metadata file using this allocator * @entry_size: size of the persistent object */ int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned int entry_size) { struct nilfs_mdt_info *mi = NILFS_MDT(inode); mi->mi_bgl = kmalloc(sizeof(*mi->mi_bgl), GFP_NOFS); if (!mi->mi_bgl) return -ENOMEM; bgl_lock_init(mi->mi_bgl); nilfs_mdt_set_entry_size(inode, entry_size, 0); mi->mi_blocks_per_group = DIV_ROUND_UP(nilfs_palloc_entries_per_group(inode), mi->mi_entries_per_block) + 1; /* * Number of blocks in a group including entry blocks * and a bitmap block */ mi->mi_blocks_per_desc_block = nilfs_palloc_groups_per_desc_block(inode) * mi->mi_blocks_per_group + 1; /* * Number of blocks per descriptor including the * descriptor block */ return 0; } /** * nilfs_palloc_group - get group number and offset from an entry number * @inode: inode of metadata file using this allocator * @nr: serial number of the entry (e.g. inode number) * @offset: pointer to store offset number in the group */ static unsigned long nilfs_palloc_group(const struct inode *inode, __u64 nr, unsigned long *offset) { __u64 group = nr; *offset = do_div(group, nilfs_palloc_entries_per_group(inode)); return group; } /** * nilfs_palloc_desc_blkoff - get block offset of a group descriptor block * @inode: inode of metadata file using this allocator * @group: group number * * nilfs_palloc_desc_blkoff() returns block offset of the descriptor * block which contains a descriptor of the specified group. */ static unsigned long nilfs_palloc_desc_blkoff(const struct inode *inode, unsigned long group) { unsigned long desc_block = group / nilfs_palloc_groups_per_desc_block(inode); return desc_block * NILFS_MDT(inode)->mi_blocks_per_desc_block; } /** * nilfs_palloc_bitmap_blkoff - get block offset of a bitmap block * @inode: inode of metadata file using this allocator * @group: group number * * nilfs_palloc_bitmap_blkoff() returns block offset of the bitmap * block used to allocate/deallocate entries in the specified group. */ static unsigned long nilfs_palloc_bitmap_blkoff(const struct inode *inode, unsigned long group) { unsigned long desc_offset = group % nilfs_palloc_groups_per_desc_block(inode); return nilfs_palloc_desc_blkoff(inode, group) + 1 + desc_offset * NILFS_MDT(inode)->mi_blocks_per_group; } /** * nilfs_palloc_group_desc_nfrees - get the number of free entries in a group * @desc: pointer to descriptor structure for the group * @lock: spin lock protecting @desc */ static unsigned long nilfs_palloc_group_desc_nfrees(const struct nilfs_palloc_group_desc *desc, spinlock_t *lock) { unsigned long nfree; spin_lock(lock); nfree = le32_to_cpu(desc->pg_nfrees); spin_unlock(lock); return nfree; } /** * nilfs_palloc_group_desc_add_entries - adjust count of free entries * @desc: pointer to descriptor structure for the group * @lock: spin lock protecting @desc * @n: delta to be added */ static u32 nilfs_palloc_group_desc_add_entries(struct nilfs_palloc_group_desc *desc, spinlock_t *lock, u32 n) { u32 nfree; spin_lock(lock); le32_add_cpu(&desc->pg_nfrees, n); nfree = le32_to_cpu(desc->pg_nfrees); spin_unlock(lock); return nfree; } /** * nilfs_palloc_entry_blkoff - get block offset of an entry block * @inode: inode of metadata file using this allocator * @nr: serial number of the entry (e.g. inode number) */ static unsigned long nilfs_palloc_entry_blkoff(const struct inode *inode, __u64 nr) { unsigned long group, group_offset; group = nilfs_palloc_group(inode, nr, &group_offset); return nilfs_palloc_bitmap_blkoff(inode, group) + 1 + group_offset / NILFS_MDT(inode)->mi_entries_per_block; } /** * nilfs_palloc_desc_block_init - initialize buffer of a group descriptor block * @inode: inode of metadata file * @bh: buffer head of the buffer to be initialized * @kaddr: kernel address mapped for the page including the buffer */ static void nilfs_palloc_desc_block_init(struct inode *inode, struct buffer_head *bh, void *kaddr) { struct nilfs_palloc_group_desc *desc = kaddr + bh_offset(bh); unsigned long n = nilfs_palloc_groups_per_desc_block(inode); __le32 nfrees; nfrees = cpu_to_le32(nilfs_palloc_entries_per_group(inode)); while (n-- > 0) { desc->pg_nfrees = nfrees; desc++; } } static int nilfs_palloc_get_block(struct inode *inode, unsigned long blkoff, int create, void (*init_block)(struct inode *, struct buffer_head *, void *), struct buffer_head **bhp, struct nilfs_bh_assoc *prev, spinlock_t *lock) { int ret; spin_lock(lock); if (prev->bh && blkoff == prev->blkoff && likely(buffer_uptodate(prev->bh))) { get_bh(prev->bh); *bhp = prev->bh; spin_unlock(lock); return 0; } spin_unlock(lock); ret = nilfs_mdt_get_block(inode, blkoff, create, init_block, bhp); if (!ret) { spin_lock(lock); /* * The following code must be safe for change of the * cache contents during the get block call. */ brelse(prev->bh); get_bh(*bhp); prev->bh = *bhp; prev->blkoff = blkoff; spin_unlock(lock); } return ret; } /** * nilfs_palloc_delete_block - delete a block on the persistent allocator file * @inode: inode of metadata file using this allocator * @blkoff: block offset * @prev: nilfs_bh_assoc struct of the last used buffer * @lock: spin lock protecting @prev */ static int nilfs_palloc_delete_block(struct inode *inode, unsigned long blkoff, struct nilfs_bh_assoc *prev, spinlock_t *lock) { spin_lock(lock); if (prev->bh && blkoff == prev->blkoff) { brelse(prev->bh); prev->bh = NULL; } spin_unlock(lock); return nilfs_mdt_delete_block(inode, blkoff); } /** * nilfs_palloc_get_desc_block - get buffer head of a group descriptor block * @inode: inode of metadata file using this allocator * @group: group number * @create: create flag * @bhp: pointer to store the resultant buffer head */ static int nilfs_palloc_get_desc_block(struct inode *inode, unsigned long group, int create, struct buffer_head **bhp) { struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache; return nilfs_palloc_get_block(inode, nilfs_palloc_desc_blkoff(inode, group), create, nilfs_palloc_desc_block_init, bhp, &cache->prev_desc, &cache->lock); } /** * nilfs_palloc_get_bitmap_block - get buffer head of a bitmap block * @inode: inode of metadata file using this allocator * @group: group number * @create: create flag * @bhp: pointer to store the resultant buffer head */ static int nilfs_palloc_get_bitmap_block(struct inode *inode, unsigned long group, int create, struct buffer_head **bhp) { struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache; return nilfs_palloc_get_block(inode, nilfs_palloc_bitmap_blkoff(inode, group), create, NULL, bhp, &cache->prev_bitmap, &cache->lock); } /** * nilfs_palloc_delete_bitmap_block - delete a bitmap block * @inode: inode of metadata file using this allocator * @group: group number */ static int nilfs_palloc_delete_bitmap_block(struct inode *inode, unsigned long group) { struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache; return nilfs_palloc_delete_block(inode, nilfs_palloc_bitmap_blkoff(inode, group), &cache->prev_bitmap, &cache->lock); } /** * nilfs_palloc_get_entry_block - get buffer head of an entry block * @inode: inode of metadata file using this allocator * @nr: serial number of the entry (e.g. inode number) * @create: create flag * @bhp: pointer to store the resultant buffer head */ int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr, int create, struct buffer_head **bhp) { struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache; return nilfs_palloc_get_block(inode, nilfs_palloc_entry_blkoff(inode, nr), create, NULL, bhp, &cache->prev_entry, &cache->lock); } /** * nilfs_palloc_delete_entry_block - delete an entry block * @inode: inode of metadata file using this allocator * @nr: serial number of the entry */ static int nilfs_palloc_delete_entry_block(struct inode *inode, __u64 nr) { struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache; return nilfs_palloc_delete_block(inode, nilfs_palloc_entry_blkoff(inode, nr), &cache->prev_entry, &cache->lock); } /** * nilfs_palloc_block_get_group_desc - get kernel address of a group descriptor * @inode: inode of metadata file using this allocator * @group: group number * @bh: buffer head of the buffer storing the group descriptor block * @kaddr: kernel address mapped for the page including the buffer */ static struct nilfs_palloc_group_desc * nilfs_palloc_block_get_group_desc(const struct inode *inode, unsigned long group, const struct buffer_head *bh, void *kaddr) { return (struct nilfs_palloc_group_desc *)(kaddr + bh_offset(bh)) + group % nilfs_palloc_groups_per_desc_block(inode); } /** * nilfs_palloc_block_get_entry - get kernel address of an entry * @inode: inode of metadata file using this allocator * @nr: serial number of the entry (e.g. inode number) * @bh: buffer head of the buffer storing the entry block * @kaddr: kernel address mapped for the page including the buffer */ void *nilfs_palloc_block_get_entry(const struct inode *inode, __u64 nr, const struct buffer_head *bh, void *kaddr) { unsigned long entry_offset, group_offset; nilfs_palloc_group(inode, nr, &group_offset); entry_offset = group_offset % NILFS_MDT(inode)->mi_entries_per_block; return kaddr + bh_offset(bh) + entry_offset * NILFS_MDT(inode)->mi_entry_size; } /** * nilfs_palloc_find_available_slot - find available slot in a group * @bitmap: bitmap of the group * @target: offset number of an entry in the group (start point) * @bsize: size in bits * @lock: spin lock protecting @bitmap */ static int nilfs_palloc_find_available_slot(unsigned char *bitmap, unsigned long target, unsigned int bsize, spinlock_t *lock) { int pos, end = bsize; if (likely(target < bsize)) { pos = target; do { pos = nilfs_find_next_zero_bit(bitmap, end, pos); if (pos >= end) break; if (!nilfs_set_bit_atomic(lock, pos, bitmap)) return pos; } while (++pos < end); end = target; } /* wrap around */ for (pos = 0; pos < end; pos++) { pos = nilfs_find_next_zero_bit(bitmap, end, pos); if (pos >= end) break; if (!nilfs_set_bit_atomic(lock, pos, bitmap)) return pos; } return -ENOSPC; } /** * nilfs_palloc_rest_groups_in_desc_block - get the remaining number of groups * in a group descriptor block * @inode: inode of metadata file using this allocator * @curr: current group number * @max: maximum number of groups */ static unsigned long nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode, unsigned long curr, unsigned long max) { return min_t(unsigned long, nilfs_palloc_groups_per_desc_block(inode) - curr % nilfs_palloc_groups_per_desc_block(inode), max - curr + 1); } /** * nilfs_palloc_count_desc_blocks - count descriptor blocks number * @inode: inode of metadata file using this allocator * @desc_blocks: descriptor blocks number [out] */ static int nilfs_palloc_count_desc_blocks(struct inode *inode, unsigned long *desc_blocks) { __u64 blknum; int ret; ret = nilfs_bmap_last_key(NILFS_I(inode)->i_bmap, &blknum); if (likely(!ret)) *desc_blocks = DIV_ROUND_UP( (unsigned long)blknum, NILFS_MDT(inode)->mi_blocks_per_desc_block); return ret; } /** * nilfs_palloc_mdt_file_can_grow - check potential opportunity for * MDT file growing * @inode: inode of metadata file using this allocator * @desc_blocks: known current descriptor blocks count */ static inline bool nilfs_palloc_mdt_file_can_grow(struct inode *inode, unsigned long desc_blocks) { return (nilfs_palloc_groups_per_desc_block(inode) * desc_blocks) < nilfs_palloc_groups_count(inode); } /** * nilfs_palloc_count_max_entries - count max number of entries that can be * described by descriptor blocks count * @inode: inode of metadata file using this allocator * @nused: current number of used entries * @nmaxp: max number of entries [out] */ int nilfs_palloc_count_max_entries(struct inode *inode, u64 nused, u64 *nmaxp) { unsigned long desc_blocks = 0; u64 entries_per_desc_block, nmax; int err; err = nilfs_palloc_count_desc_blocks(inode, &desc_blocks); if (unlikely(err)) return err; entries_per_desc_block = (u64)nilfs_palloc_entries_per_group(inode) * nilfs_palloc_groups_per_desc_block(inode); nmax = entries_per_desc_block * desc_blocks; if (nused == nmax && nilfs_palloc_mdt_file_can_grow(inode, desc_blocks)) nmax += entries_per_desc_block; if (nused > nmax) return -ERANGE; *nmaxp = nmax; return 0; } /** * nilfs_palloc_prepare_alloc_entry - prepare to allocate a persistent object * @inode: inode of metadata file using this allocator * @req: nilfs_palloc_req structure exchanged for the allocation */ int nilfs_palloc_prepare_alloc_entry(struct inode *inode, struct nilfs_palloc_req *req) { struct buffer_head *desc_bh, *bitmap_bh; struct nilfs_palloc_group_desc *desc; unsigned char *bitmap; void *desc_kaddr, *bitmap_kaddr; unsigned long group, maxgroup, ngroups; unsigned long group_offset, maxgroup_offset; unsigned long n, entries_per_group; unsigned long i, j; spinlock_t *lock; int pos, ret; ngroups = nilfs_palloc_groups_count(inode); maxgroup = ngroups - 1; group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset); entries_per_group = nilfs_palloc_entries_per_group(inode); for (i = 0; i < ngroups; i += n) { if (group >= ngroups) { /* wrap around */ group = 0; maxgroup = nilfs_palloc_group(inode, req->pr_entry_nr, &maxgroup_offset) - 1; } ret = nilfs_palloc_get_desc_block(inode, group, 1, &desc_bh); if (ret < 0) return ret; desc_kaddr = kmap(desc_bh->b_page); desc = nilfs_palloc_block_get_group_desc( inode, group, desc_bh, desc_kaddr); n = nilfs_palloc_rest_groups_in_desc_block(inode, group, maxgroup); for (j = 0; j < n; j++, desc++, group++) { lock = nilfs_mdt_bgl_lock(inode, group); if (nilfs_palloc_group_desc_nfrees(desc, lock) > 0) { ret = nilfs_palloc_get_bitmap_block( inode, group, 1, &bitmap_bh); if (ret < 0) goto out_desc; bitmap_kaddr = kmap(bitmap_bh->b_page); bitmap = bitmap_kaddr + bh_offset(bitmap_bh); pos = nilfs_palloc_find_available_slot( bitmap, group_offset, entries_per_group, lock); if (pos >= 0) { /* found a free entry */ nilfs_palloc_group_desc_add_entries( desc, lock, -1); req->pr_entry_nr = entries_per_group * group + pos; kunmap(desc_bh->b_page); kunmap(bitmap_bh->b_page); req->pr_desc_bh = desc_bh; req->pr_bitmap_bh = bitmap_bh; return 0; } kunmap(bitmap_bh->b_page); brelse(bitmap_bh); } group_offset = 0; } kunmap(desc_bh->b_page); brelse(desc_bh); } /* no entries left */ return -ENOSPC; out_desc: kunmap(desc_bh->b_page); brelse(desc_bh); return ret; } /** * nilfs_palloc_commit_alloc_entry - finish allocation of a persistent object * @inode: inode of metadata file using this allocator * @req: nilfs_palloc_req structure exchanged for the allocation */ void nilfs_palloc_commit_alloc_entry(struct inode *inode, struct nilfs_palloc_req *req) { mark_buffer_dirty(req->pr_bitmap_bh); mark_buffer_dirty(req->pr_desc_bh); nilfs_mdt_mark_dirty(inode); brelse(req->pr_bitmap_bh); brelse(req->pr_desc_bh); } /** * nilfs_palloc_commit_free_entry - finish deallocating a persistent object * @inode: inode of metadata file using this allocator * @req: nilfs_palloc_req structure exchanged for the removal */ void nilfs_palloc_commit_free_entry(struct inode *inode, struct nilfs_palloc_req *req) { struct nilfs_palloc_group_desc *desc; unsigned long group, group_offset; unsigned char *bitmap; void *desc_kaddr, *bitmap_kaddr; spinlock_t *lock; group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset); desc_kaddr = kmap(req->pr_desc_bh->b_page); desc = nilfs_palloc_block_get_group_desc(inode, group, req->pr_desc_bh, desc_kaddr); bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page); bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh); lock = nilfs_mdt_bgl_lock(inode, group); if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap)) nilfs_warn(inode->i_sb, "%s (ino=%lu): entry number %llu already freed", __func__, inode->i_ino, (unsigned long long)req->pr_entry_nr); else nilfs_palloc_group_desc_add_entries(desc, lock, 1); kunmap(req->pr_bitmap_bh->b_page); kunmap(req->pr_desc_bh->b_page); mark_buffer_dirty(req->pr_desc_bh); mark_buffer_dirty(req->pr_bitmap_bh); nilfs_mdt_mark_dirty(inode); brelse(req->pr_bitmap_bh); brelse(req->pr_desc_bh); } /** * nilfs_palloc_abort_alloc_entry - cancel allocation of a persistent object * @inode: inode of metadata file using this allocator * @req: nilfs_palloc_req structure exchanged for the allocation */ void nilfs_palloc_abort_alloc_entry(struct inode *inode, struct nilfs_palloc_req *req) { struct nilfs_palloc_group_desc *desc; void *desc_kaddr, *bitmap_kaddr; unsigned char *bitmap; unsigned long group, group_offset; spinlock_t *lock; group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset); desc_kaddr = kmap(req->pr_desc_bh->b_page); desc = nilfs_palloc_block_get_group_desc(inode, group, req->pr_desc_bh, desc_kaddr); bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page); bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh); lock = nilfs_mdt_bgl_lock(inode, group); if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap)) nilfs_warn(inode->i_sb, "%s (ino=%lu): entry number %llu already freed", __func__, inode->i_ino, (unsigned long long)req->pr_entry_nr); else nilfs_palloc_group_desc_add_entries(desc, lock, 1); kunmap(req->pr_bitmap_bh->b_page); kunmap(req->pr_desc_bh->b_page); brelse(req->pr_bitmap_bh); brelse(req->pr_desc_bh); req->pr_entry_nr = 0; req->pr_bitmap_bh = NULL; req->pr_desc_bh = NULL; } /** * nilfs_palloc_prepare_free_entry - prepare to deallocate a persistent object * @inode: inode of metadata file using this allocator * @req: nilfs_palloc_req structure exchanged for the removal */ int nilfs_palloc_prepare_free_entry(struct inode *inode, struct nilfs_palloc_req *req) { struct buffer_head *desc_bh, *bitmap_bh; unsigned long group, group_offset; int ret; group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset); ret = nilfs_palloc_get_desc_block(inode, group, 1, &desc_bh); if (ret < 0) return ret; ret = nilfs_palloc_get_bitmap_block(inode, group, 1, &bitmap_bh); if (ret < 0) { brelse(desc_bh); return ret; } req->pr_desc_bh = desc_bh; req->pr_bitmap_bh = bitmap_bh; return 0; } /** * nilfs_palloc_abort_free_entry - cancel deallocating a persistent object * @inode: inode of metadata file using this allocator * @req: nilfs_palloc_req structure exchanged for the removal */ void nilfs_palloc_abort_free_entry(struct inode *inode, struct nilfs_palloc_req *req) { brelse(req->pr_bitmap_bh); brelse(req->pr_desc_bh); req->pr_entry_nr = 0; req->pr_bitmap_bh = NULL; req->pr_desc_bh = NULL; } /** * nilfs_palloc_freev - deallocate a set of persistent objects * @inode: inode of metadata file using this allocator * @entry_nrs: array of entry numbers to be deallocated * @nitems: number of entries stored in @entry_nrs */ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) { struct buffer_head *desc_bh, *bitmap_bh; struct nilfs_palloc_group_desc *desc; unsigned char *bitmap; void *desc_kaddr, *bitmap_kaddr; unsigned long group, group_offset; __u64 group_min_nr, last_nrs[8]; const unsigned long epg = nilfs_palloc_entries_per_group(inode); const unsigned int epb = NILFS_MDT(inode)->mi_entries_per_block; unsigned int entry_start, end, pos; spinlock_t *lock; int i, j, k, ret; u32 nfree; for (i = 0; i < nitems; i = j) { int change_group = false; int nempties = 0, n = 0; group = nilfs_palloc_group(inode, entry_nrs[i], &group_offset); ret = nilfs_palloc_get_desc_block(inode, group, 0, &desc_bh); if (ret < 0) return ret; ret = nilfs_palloc_get_bitmap_block(inode, group, 0, &bitmap_bh); if (ret < 0) { brelse(desc_bh); return ret; } /* Get the first entry number of the group */ group_min_nr = (__u64)group * epg; bitmap_kaddr = kmap(bitmap_bh->b_page); bitmap = bitmap_kaddr + bh_offset(bitmap_bh); lock = nilfs_mdt_bgl_lock(inode, group); j = i; entry_start = rounddown(group_offset, epb); do { if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap)) { nilfs_warn(inode->i_sb, "%s (ino=%lu): entry number %llu already freed", __func__, inode->i_ino, (unsigned long long)entry_nrs[j]); } else { n++; } j++; if (j >= nitems || entry_nrs[j] < group_min_nr || entry_nrs[j] >= group_min_nr + epg) { change_group = true; } else { group_offset = entry_nrs[j] - group_min_nr; if (group_offset >= entry_start && group_offset < entry_start + epb) { /* This entry is in the same block */ continue; } } /* Test if the entry block is empty or not */ end = entry_start + epb; pos = nilfs_find_next_bit(bitmap, end, entry_start); if (pos >= end) { last_nrs[nempties++] = entry_nrs[j - 1]; if (nempties >= ARRAY_SIZE(last_nrs)) break; } if (change_group) break; /* Go on to the next entry block */ entry_start = rounddown(group_offset, epb); } while (true); kunmap(bitmap_bh->b_page); mark_buffer_dirty(bitmap_bh); brelse(bitmap_bh); for (k = 0; k < nempties; k++) { ret = nilfs_palloc_delete_entry_block(inode, last_nrs[k]); if (ret && ret != -ENOENT) nilfs_warn(inode->i_sb, "error %d deleting block that object (entry=%llu, ino=%lu) belongs to", ret, (unsigned long long)last_nrs[k], inode->i_ino); } desc_kaddr = kmap_atomic(desc_bh->b_page); desc = nilfs_palloc_block_get_group_desc( inode, group, desc_bh, desc_kaddr); nfree = nilfs_palloc_group_desc_add_entries(desc, lock, n); kunmap_atomic(desc_kaddr); mark_buffer_dirty(desc_bh); nilfs_mdt_mark_dirty(inode); brelse(desc_bh); if (nfree == nilfs_palloc_entries_per_group(inode)) { ret = nilfs_palloc_delete_bitmap_block(inode, group); if (ret && ret != -ENOENT) nilfs_warn(inode->i_sb, "error %d deleting bitmap block of group=%lu, ino=%lu", ret, group, inode->i_ino); } } return 0; } void nilfs_palloc_setup_cache(struct inode *inode, struct nilfs_palloc_cache *cache) { NILFS_MDT(inode)->mi_palloc_cache = cache; spin_lock_init(&cache->lock); } void nilfs_palloc_clear_cache(struct inode *inode) { struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache; spin_lock(&cache->lock); brelse(cache->prev_desc.bh); brelse(cache->prev_bitmap.bh); brelse(cache->prev_entry.bh); cache->prev_desc.bh = NULL; cache->prev_bitmap.bh = NULL; cache->prev_entry.bh = NULL; spin_unlock(&cache->lock); } void nilfs_palloc_destroy_cache(struct inode *inode) { nilfs_palloc_clear_cache(inode); NILFS_MDT(inode)->mi_palloc_cache = NULL; }
linux-master
fs/nilfs2/alloc.c
// SPDX-License-Identifier: GPL-2.0+ /* * the_nilfs shared structure. * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * Written by Ryusuke Konishi. * */ #include <linux/buffer_head.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/backing-dev.h> #include <linux/random.h> #include <linux/log2.h> #include <linux/crc32.h> #include "nilfs.h" #include "segment.h" #include "alloc.h" #include "cpfile.h" #include "sufile.h" #include "dat.h" #include "segbuf.h" static int nilfs_valid_sb(struct nilfs_super_block *sbp); void nilfs_set_last_segment(struct the_nilfs *nilfs, sector_t start_blocknr, u64 seq, __u64 cno) { spin_lock(&nilfs->ns_last_segment_lock); nilfs->ns_last_pseg = start_blocknr; nilfs->ns_last_seq = seq; nilfs->ns_last_cno = cno; if (!nilfs_sb_dirty(nilfs)) { if (nilfs->ns_prev_seq == nilfs->ns_last_seq) goto stay_cursor; set_nilfs_sb_dirty(nilfs); } nilfs->ns_prev_seq = nilfs->ns_last_seq; stay_cursor: spin_unlock(&nilfs->ns_last_segment_lock); } /** * alloc_nilfs - allocate a nilfs object * @sb: super block instance * * Return Value: On success, pointer to the_nilfs is returned. * On error, NULL is returned. */ struct the_nilfs *alloc_nilfs(struct super_block *sb) { struct the_nilfs *nilfs; nilfs = kzalloc(sizeof(*nilfs), GFP_KERNEL); if (!nilfs) return NULL; nilfs->ns_sb = sb; nilfs->ns_bdev = sb->s_bdev; atomic_set(&nilfs->ns_ndirtyblks, 0); init_rwsem(&nilfs->ns_sem); mutex_init(&nilfs->ns_snapshot_mount_mutex); INIT_LIST_HEAD(&nilfs->ns_dirty_files); INIT_LIST_HEAD(&nilfs->ns_gc_inodes); spin_lock_init(&nilfs->ns_inode_lock); spin_lock_init(&nilfs->ns_next_gen_lock); spin_lock_init(&nilfs->ns_last_segment_lock); nilfs->ns_cptree = RB_ROOT; spin_lock_init(&nilfs->ns_cptree_lock); init_rwsem(&nilfs->ns_segctor_sem); nilfs->ns_sb_update_freq = NILFS_SB_FREQ; return nilfs; } /** * destroy_nilfs - destroy nilfs object * @nilfs: nilfs object to be released */ void destroy_nilfs(struct the_nilfs *nilfs) { might_sleep(); if (nilfs_init(nilfs)) { brelse(nilfs->ns_sbh[0]); brelse(nilfs->ns_sbh[1]); } kfree(nilfs); } static int nilfs_load_super_root(struct the_nilfs *nilfs, struct super_block *sb, sector_t sr_block) { struct buffer_head *bh_sr; struct nilfs_super_root *raw_sr; struct nilfs_super_block **sbp = nilfs->ns_sbp; struct nilfs_inode *rawi; unsigned int dat_entry_size, segment_usage_size, checkpoint_size; unsigned int inode_size; int err; err = nilfs_read_super_root_block(nilfs, sr_block, &bh_sr, 1); if (unlikely(err)) return err; down_read(&nilfs->ns_sem); dat_entry_size = le16_to_cpu(sbp[0]->s_dat_entry_size); checkpoint_size = le16_to_cpu(sbp[0]->s_checkpoint_size); segment_usage_size = le16_to_cpu(sbp[0]->s_segment_usage_size); up_read(&nilfs->ns_sem); inode_size = nilfs->ns_inode_size; rawi = (void *)bh_sr->b_data + NILFS_SR_DAT_OFFSET(inode_size); err = nilfs_dat_read(sb, dat_entry_size, rawi, &nilfs->ns_dat); if (err) goto failed; rawi = (void *)bh_sr->b_data + NILFS_SR_CPFILE_OFFSET(inode_size); err = nilfs_cpfile_read(sb, checkpoint_size, rawi, &nilfs->ns_cpfile); if (err) goto failed_dat; rawi = (void *)bh_sr->b_data + NILFS_SR_SUFILE_OFFSET(inode_size); err = nilfs_sufile_read(sb, segment_usage_size, rawi, &nilfs->ns_sufile); if (err) goto failed_cpfile; raw_sr = (struct nilfs_super_root *)bh_sr->b_data; nilfs->ns_nongc_ctime = le64_to_cpu(raw_sr->sr_nongc_ctime); failed: brelse(bh_sr); return err; failed_cpfile: iput(nilfs->ns_cpfile); failed_dat: iput(nilfs->ns_dat); goto failed; } static void nilfs_init_recovery_info(struct nilfs_recovery_info *ri) { memset(ri, 0, sizeof(*ri)); INIT_LIST_HEAD(&ri->ri_used_segments); } static void nilfs_clear_recovery_info(struct nilfs_recovery_info *ri) { nilfs_dispose_segment_list(&ri->ri_used_segments); } /** * nilfs_store_log_cursor - load log cursor from a super block * @nilfs: nilfs object * @sbp: buffer storing super block to be read * * nilfs_store_log_cursor() reads the last position of the log * containing a super root from a given super block, and initializes * relevant information on the nilfs object preparatory for log * scanning and recovery. */ static int nilfs_store_log_cursor(struct the_nilfs *nilfs, struct nilfs_super_block *sbp) { int ret = 0; nilfs->ns_last_pseg = le64_to_cpu(sbp->s_last_pseg); nilfs->ns_last_cno = le64_to_cpu(sbp->s_last_cno); nilfs->ns_last_seq = le64_to_cpu(sbp->s_last_seq); nilfs->ns_prev_seq = nilfs->ns_last_seq; nilfs->ns_seg_seq = nilfs->ns_last_seq; nilfs->ns_segnum = nilfs_get_segnum_of_block(nilfs, nilfs->ns_last_pseg); nilfs->ns_cno = nilfs->ns_last_cno + 1; if (nilfs->ns_segnum >= nilfs->ns_nsegments) { nilfs_err(nilfs->ns_sb, "pointed segment number is out of range: segnum=%llu, nsegments=%lu", (unsigned long long)nilfs->ns_segnum, nilfs->ns_nsegments); ret = -EINVAL; } return ret; } /** * nilfs_get_blocksize - get block size from raw superblock data * @sb: super block instance * @sbp: superblock raw data buffer * @blocksize: place to store block size * * nilfs_get_blocksize() calculates the block size from the block size * exponent information written in @sbp and stores it in @blocksize, * or aborts with an error message if it's too large. * * Return Value: On success, 0 is returned. If the block size is too * large, -EINVAL is returned. */ static int nilfs_get_blocksize(struct super_block *sb, struct nilfs_super_block *sbp, int *blocksize) { unsigned int shift_bits = le32_to_cpu(sbp->s_log_block_size); if (unlikely(shift_bits > ilog2(NILFS_MAX_BLOCK_SIZE) - BLOCK_SIZE_BITS)) { nilfs_err(sb, "too large filesystem blocksize: 2 ^ %u KiB", shift_bits); return -EINVAL; } *blocksize = BLOCK_SIZE << shift_bits; return 0; } /** * load_nilfs - load and recover the nilfs * @nilfs: the_nilfs structure to be released * @sb: super block instance used to recover past segment * * load_nilfs() searches and load the latest super root, * attaches the last segment, and does recovery if needed. * The caller must call this exclusively for simultaneous mounts. */ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb) { struct nilfs_recovery_info ri; unsigned int s_flags = sb->s_flags; int really_read_only = bdev_read_only(nilfs->ns_bdev); int valid_fs = nilfs_valid_fs(nilfs); int err; if (!valid_fs) { nilfs_warn(sb, "mounting unchecked fs"); if (s_flags & SB_RDONLY) { nilfs_info(sb, "recovery required for readonly filesystem"); nilfs_info(sb, "write access will be enabled during recovery"); } } nilfs_init_recovery_info(&ri); err = nilfs_search_super_root(nilfs, &ri); if (unlikely(err)) { struct nilfs_super_block **sbp = nilfs->ns_sbp; int blocksize; if (err != -EINVAL) goto scan_error; if (!nilfs_valid_sb(sbp[1])) { nilfs_warn(sb, "unable to fall back to spare super block"); goto scan_error; } nilfs_info(sb, "trying rollback from an earlier position"); /* * restore super block with its spare and reconfigure * relevant states of the nilfs object. */ memcpy(sbp[0], sbp[1], nilfs->ns_sbsize); nilfs->ns_crc_seed = le32_to_cpu(sbp[0]->s_crc_seed); nilfs->ns_sbwtime = le64_to_cpu(sbp[0]->s_wtime); /* verify consistency between two super blocks */ err = nilfs_get_blocksize(sb, sbp[0], &blocksize); if (err) goto scan_error; if (blocksize != nilfs->ns_blocksize) { nilfs_warn(sb, "blocksize differs between two super blocks (%d != %d)", blocksize, nilfs->ns_blocksize); err = -EINVAL; goto scan_error; } err = nilfs_store_log_cursor(nilfs, sbp[0]); if (err) goto scan_error; /* drop clean flag to allow roll-forward and recovery */ nilfs->ns_mount_state &= ~NILFS_VALID_FS; valid_fs = 0; err = nilfs_search_super_root(nilfs, &ri); if (err) goto scan_error; } err = nilfs_load_super_root(nilfs, sb, ri.ri_super_root); if (unlikely(err)) { nilfs_err(sb, "error %d while loading super root", err); goto failed; } err = nilfs_sysfs_create_device_group(sb); if (unlikely(err)) goto sysfs_error; if (valid_fs) goto skip_recovery; if (s_flags & SB_RDONLY) { __u64 features; if (nilfs_test_opt(nilfs, NORECOVERY)) { nilfs_info(sb, "norecovery option specified, skipping roll-forward recovery"); goto skip_recovery; } features = le64_to_cpu(nilfs->ns_sbp[0]->s_feature_compat_ro) & ~NILFS_FEATURE_COMPAT_RO_SUPP; if (features) { nilfs_err(sb, "couldn't proceed with recovery because of unsupported optional features (%llx)", (unsigned long long)features); err = -EROFS; goto failed_unload; } if (really_read_only) { nilfs_err(sb, "write access unavailable, cannot proceed"); err = -EROFS; goto failed_unload; } sb->s_flags &= ~SB_RDONLY; } else if (nilfs_test_opt(nilfs, NORECOVERY)) { nilfs_err(sb, "recovery cancelled because norecovery option was specified for a read/write mount"); err = -EINVAL; goto failed_unload; } err = nilfs_salvage_orphan_logs(nilfs, sb, &ri); if (err) goto failed_unload; down_write(&nilfs->ns_sem); nilfs->ns_mount_state |= NILFS_VALID_FS; /* set "clean" flag */ err = nilfs_cleanup_super(sb); up_write(&nilfs->ns_sem); if (err) { nilfs_err(sb, "error %d updating super block. recovery unfinished.", err); goto failed_unload; } nilfs_info(sb, "recovery complete"); skip_recovery: nilfs_clear_recovery_info(&ri); sb->s_flags = s_flags; return 0; scan_error: nilfs_err(sb, "error %d while searching super root", err); goto failed; failed_unload: nilfs_sysfs_delete_device_group(nilfs); sysfs_error: iput(nilfs->ns_cpfile); iput(nilfs->ns_sufile); iput(nilfs->ns_dat); failed: nilfs_clear_recovery_info(&ri); sb->s_flags = s_flags; return err; } static unsigned long long nilfs_max_size(unsigned int blkbits) { unsigned int max_bits; unsigned long long res = MAX_LFS_FILESIZE; /* page cache limit */ max_bits = blkbits + NILFS_BMAP_KEY_BIT; /* bmap size limit */ if (max_bits < 64) res = min_t(unsigned long long, res, (1ULL << max_bits) - 1); return res; } /** * nilfs_nrsvsegs - calculate the number of reserved segments * @nilfs: nilfs object * @nsegs: total number of segments */ unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs) { return max_t(unsigned long, NILFS_MIN_NRSVSEGS, DIV_ROUND_UP(nsegs * nilfs->ns_r_segments_percentage, 100)); } /** * nilfs_max_segment_count - calculate the maximum number of segments * @nilfs: nilfs object */ static u64 nilfs_max_segment_count(struct the_nilfs *nilfs) { u64 max_count = U64_MAX; do_div(max_count, nilfs->ns_blocks_per_segment); return min_t(u64, max_count, ULONG_MAX); } void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs) { nilfs->ns_nsegments = nsegs; nilfs->ns_nrsvsegs = nilfs_nrsvsegs(nilfs, nsegs); } static int nilfs_store_disk_layout(struct the_nilfs *nilfs, struct nilfs_super_block *sbp) { u64 nsegments, nblocks; if (le32_to_cpu(sbp->s_rev_level) < NILFS_MIN_SUPP_REV) { nilfs_err(nilfs->ns_sb, "unsupported revision (superblock rev.=%d.%d, current rev.=%d.%d). Please check the version of mkfs.nilfs(2).", le32_to_cpu(sbp->s_rev_level), le16_to_cpu(sbp->s_minor_rev_level), NILFS_CURRENT_REV, NILFS_MINOR_REV); return -EINVAL; } nilfs->ns_sbsize = le16_to_cpu(sbp->s_bytes); if (nilfs->ns_sbsize > BLOCK_SIZE) return -EINVAL; nilfs->ns_inode_size = le16_to_cpu(sbp->s_inode_size); if (nilfs->ns_inode_size > nilfs->ns_blocksize) { nilfs_err(nilfs->ns_sb, "too large inode size: %d bytes", nilfs->ns_inode_size); return -EINVAL; } else if (nilfs->ns_inode_size < NILFS_MIN_INODE_SIZE) { nilfs_err(nilfs->ns_sb, "too small inode size: %d bytes", nilfs->ns_inode_size); return -EINVAL; } nilfs->ns_first_ino = le32_to_cpu(sbp->s_first_ino); nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment); if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) { nilfs_err(nilfs->ns_sb, "too short segment: %lu blocks", nilfs->ns_blocks_per_segment); return -EINVAL; } nilfs->ns_first_data_block = le64_to_cpu(sbp->s_first_data_block); nilfs->ns_r_segments_percentage = le32_to_cpu(sbp->s_r_segments_percentage); if (nilfs->ns_r_segments_percentage < 1 || nilfs->ns_r_segments_percentage > 99) { nilfs_err(nilfs->ns_sb, "invalid reserved segments percentage: %lu", nilfs->ns_r_segments_percentage); return -EINVAL; } nsegments = le64_to_cpu(sbp->s_nsegments); if (nsegments > nilfs_max_segment_count(nilfs)) { nilfs_err(nilfs->ns_sb, "segment count %llu exceeds upper limit (%llu segments)", (unsigned long long)nsegments, (unsigned long long)nilfs_max_segment_count(nilfs)); return -EINVAL; } nblocks = sb_bdev_nr_blocks(nilfs->ns_sb); if (nblocks) { u64 min_block_count = nsegments * nilfs->ns_blocks_per_segment; /* * To avoid failing to mount early device images without a * second superblock, exclude that block count from the * "min_block_count" calculation. */ if (nblocks < min_block_count) { nilfs_err(nilfs->ns_sb, "total number of segment blocks %llu exceeds device size (%llu blocks)", (unsigned long long)min_block_count, (unsigned long long)nblocks); return -EINVAL; } } nilfs_set_nsegments(nilfs, nsegments); nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed); return 0; } static int nilfs_valid_sb(struct nilfs_super_block *sbp) { static unsigned char sum[4]; const int sumoff = offsetof(struct nilfs_super_block, s_sum); size_t bytes; u32 crc; if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC) return 0; bytes = le16_to_cpu(sbp->s_bytes); if (bytes < sumoff + 4 || bytes > BLOCK_SIZE) return 0; crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp, sumoff); crc = crc32_le(crc, sum, 4); crc = crc32_le(crc, (unsigned char *)sbp + sumoff + 4, bytes - sumoff - 4); return crc == le32_to_cpu(sbp->s_sum); } /** * nilfs_sb2_bad_offset - check the location of the second superblock * @sbp: superblock raw data buffer * @offset: byte offset of second superblock calculated from device size * * nilfs_sb2_bad_offset() checks if the position on the second * superblock is valid or not based on the filesystem parameters * stored in @sbp. If @offset points to a location within the segment * area, or if the parameters themselves are not normal, it is * determined to be invalid. * * Return Value: true if invalid, false if valid. */ static bool nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset) { unsigned int shift_bits = le32_to_cpu(sbp->s_log_block_size); u32 blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment); u64 nsegments = le64_to_cpu(sbp->s_nsegments); u64 index; if (blocks_per_segment < NILFS_SEG_MIN_BLOCKS || shift_bits > ilog2(NILFS_MAX_BLOCK_SIZE) - BLOCK_SIZE_BITS) return true; index = offset >> (shift_bits + BLOCK_SIZE_BITS); do_div(index, blocks_per_segment); return index < nsegments; } static void nilfs_release_super_block(struct the_nilfs *nilfs) { int i; for (i = 0; i < 2; i++) { if (nilfs->ns_sbp[i]) { brelse(nilfs->ns_sbh[i]); nilfs->ns_sbh[i] = NULL; nilfs->ns_sbp[i] = NULL; } } } void nilfs_fall_back_super_block(struct the_nilfs *nilfs) { brelse(nilfs->ns_sbh[0]); nilfs->ns_sbh[0] = nilfs->ns_sbh[1]; nilfs->ns_sbp[0] = nilfs->ns_sbp[1]; nilfs->ns_sbh[1] = NULL; nilfs->ns_sbp[1] = NULL; } void nilfs_swap_super_block(struct the_nilfs *nilfs) { struct buffer_head *tsbh = nilfs->ns_sbh[0]; struct nilfs_super_block *tsbp = nilfs->ns_sbp[0]; nilfs->ns_sbh[0] = nilfs->ns_sbh[1]; nilfs->ns_sbp[0] = nilfs->ns_sbp[1]; nilfs->ns_sbh[1] = tsbh; nilfs->ns_sbp[1] = tsbp; } static int nilfs_load_super_block(struct the_nilfs *nilfs, struct super_block *sb, int blocksize, struct nilfs_super_block **sbpp) { struct nilfs_super_block **sbp = nilfs->ns_sbp; struct buffer_head **sbh = nilfs->ns_sbh; u64 sb2off, devsize = bdev_nr_bytes(nilfs->ns_bdev); int valid[2], swp = 0; if (devsize < NILFS_SEG_MIN_BLOCKS * NILFS_MIN_BLOCK_SIZE + 4096) { nilfs_err(sb, "device size too small"); return -EINVAL; } sb2off = NILFS_SB2_OFFSET_BYTES(devsize); sbp[0] = nilfs_read_super_block(sb, NILFS_SB_OFFSET_BYTES, blocksize, &sbh[0]); sbp[1] = nilfs_read_super_block(sb, sb2off, blocksize, &sbh[1]); if (!sbp[0]) { if (!sbp[1]) { nilfs_err(sb, "unable to read superblock"); return -EIO; } nilfs_warn(sb, "unable to read primary superblock (blocksize = %d)", blocksize); } else if (!sbp[1]) { nilfs_warn(sb, "unable to read secondary superblock (blocksize = %d)", blocksize); } /* * Compare two super blocks and set 1 in swp if the secondary * super block is valid and newer. Otherwise, set 0 in swp. */ valid[0] = nilfs_valid_sb(sbp[0]); valid[1] = nilfs_valid_sb(sbp[1]); swp = valid[1] && (!valid[0] || le64_to_cpu(sbp[1]->s_last_cno) > le64_to_cpu(sbp[0]->s_last_cno)); if (valid[swp] && nilfs_sb2_bad_offset(sbp[swp], sb2off)) { brelse(sbh[1]); sbh[1] = NULL; sbp[1] = NULL; valid[1] = 0; swp = 0; } if (!valid[swp]) { nilfs_release_super_block(nilfs); nilfs_err(sb, "couldn't find nilfs on the device"); return -EINVAL; } if (!valid[!swp]) nilfs_warn(sb, "broken superblock, retrying with spare superblock (blocksize = %d)", blocksize); if (swp) nilfs_swap_super_block(nilfs); nilfs->ns_sbwcount = 0; nilfs->ns_sbwtime = le64_to_cpu(sbp[0]->s_wtime); nilfs->ns_prot_seq = le64_to_cpu(sbp[valid[1] & !swp]->s_last_seq); *sbpp = sbp[0]; return 0; } /** * init_nilfs - initialize a NILFS instance. * @nilfs: the_nilfs structure * @sb: super block * @data: mount options * * init_nilfs() performs common initialization per block device (e.g. * reading the super block, getting disk layout information, initializing * shared fields in the_nilfs). * * Return Value: On success, 0 is returned. On error, a negative error * code is returned. */ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data) { struct nilfs_super_block *sbp; int blocksize; int err; down_write(&nilfs->ns_sem); blocksize = sb_min_blocksize(sb, NILFS_MIN_BLOCK_SIZE); if (!blocksize) { nilfs_err(sb, "unable to set blocksize"); err = -EINVAL; goto out; } err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp); if (err) goto out; err = nilfs_store_magic_and_option(sb, sbp, data); if (err) goto failed_sbh; err = nilfs_check_feature_compatibility(sb, sbp); if (err) goto failed_sbh; err = nilfs_get_blocksize(sb, sbp, &blocksize); if (err) goto failed_sbh; if (blocksize < NILFS_MIN_BLOCK_SIZE) { nilfs_err(sb, "couldn't mount because of unsupported filesystem blocksize %d", blocksize); err = -EINVAL; goto failed_sbh; } if (sb->s_blocksize != blocksize) { int hw_blocksize = bdev_logical_block_size(sb->s_bdev); if (blocksize < hw_blocksize) { nilfs_err(sb, "blocksize %d too small for device (sector-size = %d)", blocksize, hw_blocksize); err = -EINVAL; goto failed_sbh; } nilfs_release_super_block(nilfs); sb_set_blocksize(sb, blocksize); err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp); if (err) goto out; /* * Not to failed_sbh; sbh is released automatically * when reloading fails. */ } nilfs->ns_blocksize_bits = sb->s_blocksize_bits; nilfs->ns_blocksize = blocksize; get_random_bytes(&nilfs->ns_next_generation, sizeof(nilfs->ns_next_generation)); err = nilfs_store_disk_layout(nilfs, sbp); if (err) goto failed_sbh; sb->s_maxbytes = nilfs_max_size(sb->s_blocksize_bits); nilfs->ns_mount_state = le16_to_cpu(sbp->s_state); err = nilfs_store_log_cursor(nilfs, sbp); if (err) goto failed_sbh; set_nilfs_init(nilfs); err = 0; out: up_write(&nilfs->ns_sem); return err; failed_sbh: nilfs_release_super_block(nilfs); goto out; } int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump, size_t nsegs) { sector_t seg_start, seg_end; sector_t start = 0, nblocks = 0; unsigned int sects_per_block; __u64 *sn; int ret = 0; sects_per_block = (1 << nilfs->ns_blocksize_bits) / bdev_logical_block_size(nilfs->ns_bdev); for (sn = segnump; sn < segnump + nsegs; sn++) { nilfs_get_segment_range(nilfs, *sn, &seg_start, &seg_end); if (!nblocks) { start = seg_start; nblocks = seg_end - seg_start + 1; } else if (start + nblocks == seg_start) { nblocks += seg_end - seg_start + 1; } else { ret = blkdev_issue_discard(nilfs->ns_bdev, start * sects_per_block, nblocks * sects_per_block, GFP_NOFS); if (ret < 0) return ret; nblocks = 0; } } if (nblocks) ret = blkdev_issue_discard(nilfs->ns_bdev, start * sects_per_block, nblocks * sects_per_block, GFP_NOFS); return ret; } int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks) { unsigned long ncleansegs; ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile); *nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment; return 0; } int nilfs_near_disk_full(struct the_nilfs *nilfs) { unsigned long ncleansegs, nincsegs; ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile); nincsegs = atomic_read(&nilfs->ns_ndirtyblks) / nilfs->ns_blocks_per_segment + 1; return ncleansegs <= nilfs->ns_nrsvsegs + nincsegs; } struct nilfs_root *nilfs_lookup_root(struct the_nilfs *nilfs, __u64 cno) { struct rb_node *n; struct nilfs_root *root; spin_lock(&nilfs->ns_cptree_lock); n = nilfs->ns_cptree.rb_node; while (n) { root = rb_entry(n, struct nilfs_root, rb_node); if (cno < root->cno) { n = n->rb_left; } else if (cno > root->cno) { n = n->rb_right; } else { refcount_inc(&root->count); spin_unlock(&nilfs->ns_cptree_lock); return root; } } spin_unlock(&nilfs->ns_cptree_lock); return NULL; } struct nilfs_root * nilfs_find_or_create_root(struct the_nilfs *nilfs, __u64 cno) { struct rb_node **p, *parent; struct nilfs_root *root, *new; int err; root = nilfs_lookup_root(nilfs, cno); if (root) return root; new = kzalloc(sizeof(*root), GFP_KERNEL); if (!new) return NULL; spin_lock(&nilfs->ns_cptree_lock); p = &nilfs->ns_cptree.rb_node; parent = NULL; while (*p) { parent = *p; root = rb_entry(parent, struct nilfs_root, rb_node); if (cno < root->cno) { p = &(*p)->rb_left; } else if (cno > root->cno) { p = &(*p)->rb_right; } else { refcount_inc(&root->count); spin_unlock(&nilfs->ns_cptree_lock); kfree(new); return root; } } new->cno = cno; new->ifile = NULL; new->nilfs = nilfs; refcount_set(&new->count, 1); atomic64_set(&new->inodes_count, 0); atomic64_set(&new->blocks_count, 0); rb_link_node(&new->rb_node, parent, p); rb_insert_color(&new->rb_node, &nilfs->ns_cptree); spin_unlock(&nilfs->ns_cptree_lock); err = nilfs_sysfs_create_snapshot_group(new); if (err) { kfree(new); new = NULL; } return new; } void nilfs_put_root(struct nilfs_root *root) { struct the_nilfs *nilfs = root->nilfs; if (refcount_dec_and_lock(&root->count, &nilfs->ns_cptree_lock)) { rb_erase(&root->rb_node, &nilfs->ns_cptree); spin_unlock(&nilfs->ns_cptree_lock); nilfs_sysfs_delete_snapshot_group(root); iput(root->ifile); kfree(root); } }
linux-master
fs/nilfs2/the_nilfs.c
// SPDX-License-Identifier: GPL-2.0+ /* * NILFS directory entry operations * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * Modified for NILFS by Amagai Yoshiji. */ /* * linux/fs/ext2/dir.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card ([email protected]) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/dir.c * * Copyright (C) 1991, 1992 Linus Torvalds * * ext2 directory handling functions * * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller ([email protected]), 1995 * * All code that works with directory layout had been switched to pagecache * and moved here. AV */ #include <linux/pagemap.h> #include "nilfs.h" #include "page.h" static inline unsigned int nilfs_rec_len_from_disk(__le16 dlen) { unsigned int len = le16_to_cpu(dlen); #if (PAGE_SIZE >= 65536) if (len == NILFS_MAX_REC_LEN) return 1 << 16; #endif return len; } static inline __le16 nilfs_rec_len_to_disk(unsigned int len) { #if (PAGE_SIZE >= 65536) if (len == (1 << 16)) return cpu_to_le16(NILFS_MAX_REC_LEN); BUG_ON(len > (1 << 16)); #endif return cpu_to_le16(len); } /* * nilfs uses block-sized chunks. Arguably, sector-sized ones would be * more robust, but we have what we have */ static inline unsigned int nilfs_chunk_size(struct inode *inode) { return inode->i_sb->s_blocksize; } static inline void nilfs_put_page(struct page *page) { kunmap(page); put_page(page); } /* * Return the offset into page `page_nr' of the last valid * byte in that page, plus one. */ static unsigned int nilfs_last_byte(struct inode *inode, unsigned long page_nr) { unsigned int last_byte = inode->i_size; last_byte -= page_nr << PAGE_SHIFT; if (last_byte > PAGE_SIZE) last_byte = PAGE_SIZE; return last_byte; } static int nilfs_prepare_chunk(struct page *page, unsigned int from, unsigned int to) { loff_t pos = page_offset(page) + from; return __block_write_begin(page, pos, to - from, nilfs_get_block); } static void nilfs_commit_chunk(struct page *page, struct address_space *mapping, unsigned int from, unsigned int to) { struct inode *dir = mapping->host; loff_t pos = page_offset(page) + from; unsigned int len = to - from; unsigned int nr_dirty, copied; int err; nr_dirty = nilfs_page_count_clean_buffers(page, from, to); copied = block_write_end(NULL, mapping, pos, len, len, page, NULL); if (pos + copied > dir->i_size) i_size_write(dir, pos + copied); if (IS_DIRSYNC(dir)) nilfs_set_transaction_flag(NILFS_TI_SYNC); err = nilfs_set_file_dirty(dir, nr_dirty); WARN_ON(err); /* do not happen */ unlock_page(page); } static bool nilfs_check_page(struct page *page) { struct inode *dir = page->mapping->host; struct super_block *sb = dir->i_sb; unsigned int chunk_size = nilfs_chunk_size(dir); char *kaddr = page_address(page); unsigned int offs, rec_len; unsigned int limit = PAGE_SIZE; struct nilfs_dir_entry *p; char *error; if ((dir->i_size >> PAGE_SHIFT) == page->index) { limit = dir->i_size & ~PAGE_MASK; if (limit & (chunk_size - 1)) goto Ebadsize; if (!limit) goto out; } for (offs = 0; offs <= limit - NILFS_DIR_REC_LEN(1); offs += rec_len) { p = (struct nilfs_dir_entry *)(kaddr + offs); rec_len = nilfs_rec_len_from_disk(p->rec_len); if (rec_len < NILFS_DIR_REC_LEN(1)) goto Eshort; if (rec_len & 3) goto Ealign; if (rec_len < NILFS_DIR_REC_LEN(p->name_len)) goto Enamelen; if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1)) goto Espan; } if (offs != limit) goto Eend; out: SetPageChecked(page); return true; /* Too bad, we had an error */ Ebadsize: nilfs_error(sb, "size of directory #%lu is not a multiple of chunk size", dir->i_ino); goto fail; Eshort: error = "rec_len is smaller than minimal"; goto bad_entry; Ealign: error = "unaligned directory entry"; goto bad_entry; Enamelen: error = "rec_len is too small for name_len"; goto bad_entry; Espan: error = "directory entry across blocks"; bad_entry: nilfs_error(sb, "bad entry in directory #%lu: %s - offset=%lu, inode=%lu, rec_len=%d, name_len=%d", dir->i_ino, error, (page->index << PAGE_SHIFT) + offs, (unsigned long)le64_to_cpu(p->inode), rec_len, p->name_len); goto fail; Eend: p = (struct nilfs_dir_entry *)(kaddr + offs); nilfs_error(sb, "entry in directory #%lu spans the page boundary offset=%lu, inode=%lu", dir->i_ino, (page->index << PAGE_SHIFT) + offs, (unsigned long)le64_to_cpu(p->inode)); fail: SetPageError(page); return false; } static struct page *nilfs_get_page(struct inode *dir, unsigned long n) { struct address_space *mapping = dir->i_mapping; struct page *page = read_mapping_page(mapping, n, NULL); if (!IS_ERR(page)) { kmap(page); if (unlikely(!PageChecked(page))) { if (!nilfs_check_page(page)) goto fail; } } return page; fail: nilfs_put_page(page); return ERR_PTR(-EIO); } /* * NOTE! unlike strncmp, nilfs_match returns 1 for success, 0 for failure. * * len <= NILFS_NAME_LEN and de != NULL are guaranteed by caller. */ static int nilfs_match(int len, const unsigned char *name, struct nilfs_dir_entry *de) { if (len != de->name_len) return 0; if (!de->inode) return 0; return !memcmp(name, de->name, len); } /* * p is at least 6 bytes before the end of page */ static struct nilfs_dir_entry *nilfs_next_entry(struct nilfs_dir_entry *p) { return (struct nilfs_dir_entry *)((char *)p + nilfs_rec_len_from_disk(p->rec_len)); } static unsigned char nilfs_filetype_table[NILFS_FT_MAX] = { [NILFS_FT_UNKNOWN] = DT_UNKNOWN, [NILFS_FT_REG_FILE] = DT_REG, [NILFS_FT_DIR] = DT_DIR, [NILFS_FT_CHRDEV] = DT_CHR, [NILFS_FT_BLKDEV] = DT_BLK, [NILFS_FT_FIFO] = DT_FIFO, [NILFS_FT_SOCK] = DT_SOCK, [NILFS_FT_SYMLINK] = DT_LNK, }; #define S_SHIFT 12 static unsigned char nilfs_type_by_mode[S_IFMT >> S_SHIFT] = { [S_IFREG >> S_SHIFT] = NILFS_FT_REG_FILE, [S_IFDIR >> S_SHIFT] = NILFS_FT_DIR, [S_IFCHR >> S_SHIFT] = NILFS_FT_CHRDEV, [S_IFBLK >> S_SHIFT] = NILFS_FT_BLKDEV, [S_IFIFO >> S_SHIFT] = NILFS_FT_FIFO, [S_IFSOCK >> S_SHIFT] = NILFS_FT_SOCK, [S_IFLNK >> S_SHIFT] = NILFS_FT_SYMLINK, }; static void nilfs_set_de_type(struct nilfs_dir_entry *de, struct inode *inode) { umode_t mode = inode->i_mode; de->file_type = nilfs_type_by_mode[(mode & S_IFMT)>>S_SHIFT]; } static int nilfs_readdir(struct file *file, struct dir_context *ctx) { loff_t pos = ctx->pos; struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; unsigned int offset = pos & ~PAGE_MASK; unsigned long n = pos >> PAGE_SHIFT; unsigned long npages = dir_pages(inode); if (pos > inode->i_size - NILFS_DIR_REC_LEN(1)) return 0; for ( ; n < npages; n++, offset = 0) { char *kaddr, *limit; struct nilfs_dir_entry *de; struct page *page = nilfs_get_page(inode, n); if (IS_ERR(page)) { nilfs_error(sb, "bad page in #%lu", inode->i_ino); ctx->pos += PAGE_SIZE - offset; return -EIO; } kaddr = page_address(page); de = (struct nilfs_dir_entry *)(kaddr + offset); limit = kaddr + nilfs_last_byte(inode, n) - NILFS_DIR_REC_LEN(1); for ( ; (char *)de <= limit; de = nilfs_next_entry(de)) { if (de->rec_len == 0) { nilfs_error(sb, "zero-length directory entry"); nilfs_put_page(page); return -EIO; } if (de->inode) { unsigned char t; if (de->file_type < NILFS_FT_MAX) t = nilfs_filetype_table[de->file_type]; else t = DT_UNKNOWN; if (!dir_emit(ctx, de->name, de->name_len, le64_to_cpu(de->inode), t)) { nilfs_put_page(page); return 0; } } ctx->pos += nilfs_rec_len_from_disk(de->rec_len); } nilfs_put_page(page); } return 0; } /* * nilfs_find_entry() * * finds an entry in the specified directory with the wanted name. It * returns the page in which the entry was found, and the entry itself * (as a parameter - res_dir). Page is returned mapped and unlocked. * Entry is guaranteed to be valid. */ struct nilfs_dir_entry * nilfs_find_entry(struct inode *dir, const struct qstr *qstr, struct page **res_page) { const unsigned char *name = qstr->name; int namelen = qstr->len; unsigned int reclen = NILFS_DIR_REC_LEN(namelen); unsigned long start, n; unsigned long npages = dir_pages(dir); struct page *page = NULL; struct nilfs_inode_info *ei = NILFS_I(dir); struct nilfs_dir_entry *de; if (npages == 0) goto out; /* OFFSET_CACHE */ *res_page = NULL; start = ei->i_dir_start_lookup; if (start >= npages) start = 0; n = start; do { char *kaddr; page = nilfs_get_page(dir, n); if (!IS_ERR(page)) { kaddr = page_address(page); de = (struct nilfs_dir_entry *)kaddr; kaddr += nilfs_last_byte(dir, n) - reclen; while ((char *) de <= kaddr) { if (de->rec_len == 0) { nilfs_error(dir->i_sb, "zero-length directory entry"); nilfs_put_page(page); goto out; } if (nilfs_match(namelen, name, de)) goto found; de = nilfs_next_entry(de); } nilfs_put_page(page); } if (++n >= npages) n = 0; /* next page is past the blocks we've got */ if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) { nilfs_error(dir->i_sb, "dir %lu size %lld exceeds block count %llu", dir->i_ino, dir->i_size, (unsigned long long)dir->i_blocks); goto out; } } while (n != start); out: return NULL; found: *res_page = page; ei->i_dir_start_lookup = n; return de; } struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct page **p) { struct page *page = nilfs_get_page(dir, 0); struct nilfs_dir_entry *de = NULL; if (!IS_ERR(page)) { de = nilfs_next_entry( (struct nilfs_dir_entry *)page_address(page)); *p = page; } return de; } ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr) { ino_t res = 0; struct nilfs_dir_entry *de; struct page *page; de = nilfs_find_entry(dir, qstr, &page); if (de) { res = le64_to_cpu(de->inode); kunmap(page); put_page(page); } return res; } /* Releases the page */ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, struct page *page, struct inode *inode) { unsigned int from = (char *)de - (char *)page_address(page); unsigned int to = from + nilfs_rec_len_from_disk(de->rec_len); struct address_space *mapping = page->mapping; int err; lock_page(page); err = nilfs_prepare_chunk(page, from, to); BUG_ON(err); de->inode = cpu_to_le64(inode->i_ino); nilfs_set_de_type(de, inode); nilfs_commit_chunk(page, mapping, from, to); nilfs_put_page(page); dir->i_mtime = inode_set_ctime_current(dir); } /* * Parent is locked. */ int nilfs_add_link(struct dentry *dentry, struct inode *inode) { struct inode *dir = d_inode(dentry->d_parent); const unsigned char *name = dentry->d_name.name; int namelen = dentry->d_name.len; unsigned int chunk_size = nilfs_chunk_size(dir); unsigned int reclen = NILFS_DIR_REC_LEN(namelen); unsigned short rec_len, name_len; struct page *page = NULL; struct nilfs_dir_entry *de; unsigned long npages = dir_pages(dir); unsigned long n; char *kaddr; unsigned int from, to; int err; /* * We take care of directory expansion in the same loop. * This code plays outside i_size, so it locks the page * to protect that region. */ for (n = 0; n <= npages; n++) { char *dir_end; page = nilfs_get_page(dir, n); err = PTR_ERR(page); if (IS_ERR(page)) goto out; lock_page(page); kaddr = page_address(page); dir_end = kaddr + nilfs_last_byte(dir, n); de = (struct nilfs_dir_entry *)kaddr; kaddr += PAGE_SIZE - reclen; while ((char *)de <= kaddr) { if ((char *)de == dir_end) { /* We hit i_size */ name_len = 0; rec_len = chunk_size; de->rec_len = nilfs_rec_len_to_disk(chunk_size); de->inode = 0; goto got_it; } if (de->rec_len == 0) { nilfs_error(dir->i_sb, "zero-length directory entry"); err = -EIO; goto out_unlock; } err = -EEXIST; if (nilfs_match(namelen, name, de)) goto out_unlock; name_len = NILFS_DIR_REC_LEN(de->name_len); rec_len = nilfs_rec_len_from_disk(de->rec_len); if (!de->inode && rec_len >= reclen) goto got_it; if (rec_len >= name_len + reclen) goto got_it; de = (struct nilfs_dir_entry *)((char *)de + rec_len); } unlock_page(page); nilfs_put_page(page); } BUG(); return -EINVAL; got_it: from = (char *)de - (char *)page_address(page); to = from + rec_len; err = nilfs_prepare_chunk(page, from, to); if (err) goto out_unlock; if (de->inode) { struct nilfs_dir_entry *de1; de1 = (struct nilfs_dir_entry *)((char *)de + name_len); de1->rec_len = nilfs_rec_len_to_disk(rec_len - name_len); de->rec_len = nilfs_rec_len_to_disk(name_len); de = de1; } de->name_len = namelen; memcpy(de->name, name, namelen); de->inode = cpu_to_le64(inode->i_ino); nilfs_set_de_type(de, inode); nilfs_commit_chunk(page, page->mapping, from, to); dir->i_mtime = inode_set_ctime_current(dir); nilfs_mark_inode_dirty(dir); /* OFFSET_CACHE */ out_put: nilfs_put_page(page); out: return err; out_unlock: unlock_page(page); goto out_put; } /* * nilfs_delete_entry deletes a directory entry by merging it with the * previous entry. Page is up-to-date. Releases the page. */ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page) { struct address_space *mapping = page->mapping; struct inode *inode = mapping->host; char *kaddr = page_address(page); unsigned int from, to; struct nilfs_dir_entry *de, *pde = NULL; int err; from = ((char *)dir - kaddr) & ~(nilfs_chunk_size(inode) - 1); to = ((char *)dir - kaddr) + nilfs_rec_len_from_disk(dir->rec_len); de = (struct nilfs_dir_entry *)(kaddr + from); while ((char *)de < (char *)dir) { if (de->rec_len == 0) { nilfs_error(inode->i_sb, "zero-length directory entry"); err = -EIO; goto out; } pde = de; de = nilfs_next_entry(de); } if (pde) from = (char *)pde - (char *)page_address(page); lock_page(page); err = nilfs_prepare_chunk(page, from, to); BUG_ON(err); if (pde) pde->rec_len = nilfs_rec_len_to_disk(to - from); dir->inode = 0; nilfs_commit_chunk(page, mapping, from, to); inode->i_mtime = inode_set_ctime_current(inode); out: nilfs_put_page(page); return err; } /* * Set the first fragment of directory. */ int nilfs_make_empty(struct inode *inode, struct inode *parent) { struct address_space *mapping = inode->i_mapping; struct page *page = grab_cache_page(mapping, 0); unsigned int chunk_size = nilfs_chunk_size(inode); struct nilfs_dir_entry *de; int err; void *kaddr; if (!page) return -ENOMEM; err = nilfs_prepare_chunk(page, 0, chunk_size); if (unlikely(err)) { unlock_page(page); goto fail; } kaddr = kmap_atomic(page); memset(kaddr, 0, chunk_size); de = (struct nilfs_dir_entry *)kaddr; de->name_len = 1; de->rec_len = nilfs_rec_len_to_disk(NILFS_DIR_REC_LEN(1)); memcpy(de->name, ".\0\0", 4); de->inode = cpu_to_le64(inode->i_ino); nilfs_set_de_type(de, inode); de = (struct nilfs_dir_entry *)(kaddr + NILFS_DIR_REC_LEN(1)); de->name_len = 2; de->rec_len = nilfs_rec_len_to_disk(chunk_size - NILFS_DIR_REC_LEN(1)); de->inode = cpu_to_le64(parent->i_ino); memcpy(de->name, "..\0", 4); nilfs_set_de_type(de, inode); kunmap_atomic(kaddr); nilfs_commit_chunk(page, mapping, 0, chunk_size); fail: put_page(page); return err; } /* * routine to check that the specified directory is empty (for rmdir) */ int nilfs_empty_dir(struct inode *inode) { struct page *page = NULL; unsigned long i, npages = dir_pages(inode); for (i = 0; i < npages; i++) { char *kaddr; struct nilfs_dir_entry *de; page = nilfs_get_page(inode, i); if (IS_ERR(page)) continue; kaddr = page_address(page); de = (struct nilfs_dir_entry *)kaddr; kaddr += nilfs_last_byte(inode, i) - NILFS_DIR_REC_LEN(1); while ((char *)de <= kaddr) { if (de->rec_len == 0) { nilfs_error(inode->i_sb, "zero-length directory entry (kaddr=%p, de=%p)", kaddr, de); goto not_empty; } if (de->inode != 0) { /* check for . and .. */ if (de->name[0] != '.') goto not_empty; if (de->name_len > 2) goto not_empty; if (de->name_len < 2) { if (de->inode != cpu_to_le64(inode->i_ino)) goto not_empty; } else if (de->name[1] != '.') goto not_empty; } de = nilfs_next_entry(de); } nilfs_put_page(page); } return 1; not_empty: nilfs_put_page(page); return 0; } const struct file_operations nilfs_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .iterate_shared = nilfs_readdir, .unlocked_ioctl = nilfs_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = nilfs_compat_ioctl, #endif /* CONFIG_COMPAT */ .fsync = nilfs_sync_file, };
linux-master
fs/nilfs2/dir.c
// SPDX-License-Identifier: GPL-2.0+ /* * NILFS inode operations. * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * Written by Ryusuke Konishi. * */ #include <linux/buffer_head.h> #include <linux/gfp.h> #include <linux/mpage.h> #include <linux/pagemap.h> #include <linux/writeback.h> #include <linux/uio.h> #include <linux/fiemap.h> #include "nilfs.h" #include "btnode.h" #include "segment.h" #include "page.h" #include "mdt.h" #include "cpfile.h" #include "ifile.h" /** * struct nilfs_iget_args - arguments used during comparison between inodes * @ino: inode number * @cno: checkpoint number * @root: pointer on NILFS root object (mounted checkpoint) * @for_gc: inode for GC flag * @for_btnc: inode for B-tree node cache flag * @for_shadow: inode for shadowed page cache flag */ struct nilfs_iget_args { u64 ino; __u64 cno; struct nilfs_root *root; bool for_gc; bool for_btnc; bool for_shadow; }; static int nilfs_iget_test(struct inode *inode, void *opaque); void nilfs_inode_add_blocks(struct inode *inode, int n) { struct nilfs_root *root = NILFS_I(inode)->i_root; inode_add_bytes(inode, i_blocksize(inode) * n); if (root) atomic64_add(n, &root->blocks_count); } void nilfs_inode_sub_blocks(struct inode *inode, int n) { struct nilfs_root *root = NILFS_I(inode)->i_root; inode_sub_bytes(inode, i_blocksize(inode) * n); if (root) atomic64_sub(n, &root->blocks_count); } /** * nilfs_get_block() - get a file block on the filesystem (callback function) * @inode: inode struct of the target file * @blkoff: file block number * @bh_result: buffer head to be mapped on * @create: indicate whether allocating the block or not when it has not * been allocated yet. * * This function does not issue actual read request of the specified data * block. It is done by VFS. */ int nilfs_get_block(struct inode *inode, sector_t blkoff, struct buffer_head *bh_result, int create) { struct nilfs_inode_info *ii = NILFS_I(inode); struct the_nilfs *nilfs = inode->i_sb->s_fs_info; __u64 blknum = 0; int err = 0, ret; unsigned int maxblocks = bh_result->b_size >> inode->i_blkbits; down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks); up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); if (ret >= 0) { /* found */ map_bh(bh_result, inode->i_sb, blknum); if (ret > 0) bh_result->b_size = (ret << inode->i_blkbits); goto out; } /* data block was not found */ if (ret == -ENOENT && create) { struct nilfs_transaction_info ti; bh_result->b_blocknr = 0; err = nilfs_transaction_begin(inode->i_sb, &ti, 1); if (unlikely(err)) goto out; err = nilfs_bmap_insert(ii->i_bmap, blkoff, (unsigned long)bh_result); if (unlikely(err != 0)) { if (err == -EEXIST) { /* * The get_block() function could be called * from multiple callers for an inode. * However, the page having this block must * be locked in this case. */ nilfs_warn(inode->i_sb, "%s (ino=%lu): a race condition while inserting a data block at offset=%llu", __func__, inode->i_ino, (unsigned long long)blkoff); err = 0; } nilfs_transaction_abort(inode->i_sb); goto out; } nilfs_mark_inode_dirty_sync(inode); nilfs_transaction_commit(inode->i_sb); /* never fails */ /* Error handling should be detailed */ set_buffer_new(bh_result); set_buffer_delay(bh_result); map_bh(bh_result, inode->i_sb, 0); /* Disk block number must be changed to proper value */ } else if (ret == -ENOENT) { /* * not found is not error (e.g. hole); must return without * the mapped state flag. */ ; } else { err = ret; } out: return err; } /** * nilfs_read_folio() - implement read_folio() method of nilfs_aops {} * address_space_operations. * @file: file struct of the file to be read * @folio: the folio to be read */ static int nilfs_read_folio(struct file *file, struct folio *folio) { return mpage_read_folio(folio, nilfs_get_block); } static void nilfs_readahead(struct readahead_control *rac) { mpage_readahead(rac, nilfs_get_block); } static int nilfs_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; int err = 0; if (sb_rdonly(inode->i_sb)) { nilfs_clear_dirty_pages(mapping, false); return -EROFS; } if (wbc->sync_mode == WB_SYNC_ALL) err = nilfs_construct_dsync_segment(inode->i_sb, inode, wbc->range_start, wbc->range_end); return err; } static int nilfs_writepage(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; int err; if (sb_rdonly(inode->i_sb)) { /* * It means that filesystem was remounted in read-only * mode because of error or metadata corruption. But we * have dirty pages that try to be flushed in background. * So, here we simply discard this dirty page. */ nilfs_clear_dirty_page(page, false); unlock_page(page); return -EROFS; } redirty_page_for_writepage(wbc, page); unlock_page(page); if (wbc->sync_mode == WB_SYNC_ALL) { err = nilfs_construct_segment(inode->i_sb); if (unlikely(err)) return err; } else if (wbc->for_reclaim) nilfs_flush_segment(inode->i_sb, inode->i_ino); return 0; } static bool nilfs_dirty_folio(struct address_space *mapping, struct folio *folio) { struct inode *inode = mapping->host; struct buffer_head *head; unsigned int nr_dirty = 0; bool ret = filemap_dirty_folio(mapping, folio); /* * The page may not be locked, eg if called from try_to_unmap_one() */ spin_lock(&mapping->private_lock); head = folio_buffers(folio); if (head) { struct buffer_head *bh = head; do { /* Do not mark hole blocks dirty */ if (buffer_dirty(bh) || !buffer_mapped(bh)) continue; set_buffer_dirty(bh); nr_dirty++; } while (bh = bh->b_this_page, bh != head); } else if (ret) { nr_dirty = 1 << (folio_shift(folio) - inode->i_blkbits); } spin_unlock(&mapping->private_lock); if (nr_dirty) nilfs_set_file_dirty(inode, nr_dirty); return ret; } void nilfs_write_failed(struct address_space *mapping, loff_t to) { struct inode *inode = mapping->host; if (to > inode->i_size) { truncate_pagecache(inode, inode->i_size); nilfs_truncate(inode); } } static int nilfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; int err = nilfs_transaction_begin(inode->i_sb, NULL, 1); if (unlikely(err)) return err; err = block_write_begin(mapping, pos, len, pagep, nilfs_get_block); if (unlikely(err)) { nilfs_write_failed(mapping, pos + len); nilfs_transaction_abort(inode->i_sb); } return err; } static int nilfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = mapping->host; unsigned int start = pos & (PAGE_SIZE - 1); unsigned int nr_dirty; int err; nr_dirty = nilfs_page_count_clean_buffers(page, start, start + copied); copied = generic_write_end(file, mapping, pos, len, copied, page, fsdata); nilfs_set_file_dirty(inode, nr_dirty); err = nilfs_transaction_commit(inode->i_sb); return err ? : copied; } static ssize_t nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct inode *inode = file_inode(iocb->ki_filp); if (iov_iter_rw(iter) == WRITE) return 0; /* Needs synchronization with the cleaner */ return blockdev_direct_IO(iocb, inode, iter, nilfs_get_block); } const struct address_space_operations nilfs_aops = { .writepage = nilfs_writepage, .read_folio = nilfs_read_folio, .writepages = nilfs_writepages, .dirty_folio = nilfs_dirty_folio, .readahead = nilfs_readahead, .write_begin = nilfs_write_begin, .write_end = nilfs_write_end, .invalidate_folio = block_invalidate_folio, .direct_IO = nilfs_direct_IO, .is_partially_uptodate = block_is_partially_uptodate, }; static int nilfs_insert_inode_locked(struct inode *inode, struct nilfs_root *root, unsigned long ino) { struct nilfs_iget_args args = { .ino = ino, .root = root, .cno = 0, .for_gc = false, .for_btnc = false, .for_shadow = false }; return insert_inode_locked4(inode, ino, nilfs_iget_test, &args); } struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) { struct super_block *sb = dir->i_sb; struct the_nilfs *nilfs = sb->s_fs_info; struct inode *inode; struct nilfs_inode_info *ii; struct nilfs_root *root; struct buffer_head *bh; int err = -ENOMEM; ino_t ino; inode = new_inode(sb); if (unlikely(!inode)) goto failed; mapping_set_gfp_mask(inode->i_mapping, mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); root = NILFS_I(dir)->i_root; ii = NILFS_I(inode); ii->i_state = BIT(NILFS_I_NEW); ii->i_root = root; err = nilfs_ifile_create_inode(root->ifile, &ino, &bh); if (unlikely(err)) goto failed_ifile_create_inode; /* reference count of i_bh inherits from nilfs_mdt_read_block() */ if (unlikely(ino < NILFS_USER_INO)) { nilfs_warn(sb, "inode bitmap is inconsistent for reserved inodes"); do { brelse(bh); err = nilfs_ifile_create_inode(root->ifile, &ino, &bh); if (unlikely(err)) goto failed_ifile_create_inode; } while (ino < NILFS_USER_INO); nilfs_info(sb, "repaired inode bitmap for reserved inodes"); } ii->i_bh = bh; atomic64_inc(&root->inodes_count); inode_init_owner(&nop_mnt_idmap, inode, dir, mode); inode->i_ino = ino; inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode); if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { err = nilfs_bmap_read(ii->i_bmap, NULL); if (err < 0) goto failed_after_creation; set_bit(NILFS_I_BMAP, &ii->i_state); /* No lock is needed; iget() ensures it. */ } ii->i_flags = nilfs_mask_flags( mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED); /* ii->i_file_acl = 0; */ /* ii->i_dir_acl = 0; */ ii->i_dir_start_lookup = 0; nilfs_set_inode_flags(inode); spin_lock(&nilfs->ns_next_gen_lock); inode->i_generation = nilfs->ns_next_generation++; spin_unlock(&nilfs->ns_next_gen_lock); if (nilfs_insert_inode_locked(inode, root, ino) < 0) { err = -EIO; goto failed_after_creation; } err = nilfs_init_acl(inode, dir); if (unlikely(err)) /* * Never occur. When supporting nilfs_init_acl(), * proper cancellation of above jobs should be considered. */ goto failed_after_creation; return inode; failed_after_creation: clear_nlink(inode); if (inode->i_state & I_NEW) unlock_new_inode(inode); iput(inode); /* * raw_inode will be deleted through * nilfs_evict_inode(). */ goto failed; failed_ifile_create_inode: make_bad_inode(inode); iput(inode); failed: return ERR_PTR(err); } void nilfs_set_inode_flags(struct inode *inode) { unsigned int flags = NILFS_I(inode)->i_flags; unsigned int new_fl = 0; if (flags & FS_SYNC_FL) new_fl |= S_SYNC; if (flags & FS_APPEND_FL) new_fl |= S_APPEND; if (flags & FS_IMMUTABLE_FL) new_fl |= S_IMMUTABLE; if (flags & FS_NOATIME_FL) new_fl |= S_NOATIME; if (flags & FS_DIRSYNC_FL) new_fl |= S_DIRSYNC; inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC); } int nilfs_read_inode_common(struct inode *inode, struct nilfs_inode *raw_inode) { struct nilfs_inode_info *ii = NILFS_I(inode); int err; inode->i_mode = le16_to_cpu(raw_inode->i_mode); i_uid_write(inode, le32_to_cpu(raw_inode->i_uid)); i_gid_write(inode, le32_to_cpu(raw_inode->i_gid)); set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); inode->i_size = le64_to_cpu(raw_inode->i_size); inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime); inode_set_ctime(inode, le64_to_cpu(raw_inode->i_ctime), le32_to_cpu(raw_inode->i_ctime_nsec)); inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime); inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); if (nilfs_is_metadata_file_inode(inode) && !S_ISREG(inode->i_mode)) return -EIO; /* this inode is for metadata and corrupted */ if (inode->i_nlink == 0) return -ESTALE; /* this inode is deleted */ inode->i_blocks = le64_to_cpu(raw_inode->i_blocks); ii->i_flags = le32_to_cpu(raw_inode->i_flags); #if 0 ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl); ii->i_dir_acl = S_ISREG(inode->i_mode) ? 0 : le32_to_cpu(raw_inode->i_dir_acl); #endif ii->i_dir_start_lookup = 0; inode->i_generation = le32_to_cpu(raw_inode->i_generation); if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) { err = nilfs_bmap_read(ii->i_bmap, raw_inode); if (err < 0) return err; set_bit(NILFS_I_BMAP, &ii->i_state); /* No lock is needed; iget() ensures it. */ } return 0; } static int __nilfs_read_inode(struct super_block *sb, struct nilfs_root *root, unsigned long ino, struct inode *inode) { struct the_nilfs *nilfs = sb->s_fs_info; struct buffer_head *bh; struct nilfs_inode *raw_inode; int err; down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh); if (unlikely(err)) goto bad_inode; raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh); err = nilfs_read_inode_common(inode, raw_inode); if (err) goto failed_unmap; if (S_ISREG(inode->i_mode)) { inode->i_op = &nilfs_file_inode_operations; inode->i_fop = &nilfs_file_operations; inode->i_mapping->a_ops = &nilfs_aops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &nilfs_dir_inode_operations; inode->i_fop = &nilfs_dir_operations; inode->i_mapping->a_ops = &nilfs_aops; } else if (S_ISLNK(inode->i_mode)) { inode->i_op = &nilfs_symlink_inode_operations; inode_nohighmem(inode); inode->i_mapping->a_ops = &nilfs_aops; } else { inode->i_op = &nilfs_special_inode_operations; init_special_inode( inode, inode->i_mode, huge_decode_dev(le64_to_cpu(raw_inode->i_device_code))); } nilfs_ifile_unmap_inode(root->ifile, ino, bh); brelse(bh); up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); nilfs_set_inode_flags(inode); mapping_set_gfp_mask(inode->i_mapping, mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); return 0; failed_unmap: nilfs_ifile_unmap_inode(root->ifile, ino, bh); brelse(bh); bad_inode: up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); return err; } static int nilfs_iget_test(struct inode *inode, void *opaque) { struct nilfs_iget_args *args = opaque; struct nilfs_inode_info *ii; if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root) return 0; ii = NILFS_I(inode); if (test_bit(NILFS_I_BTNC, &ii->i_state)) { if (!args->for_btnc) return 0; } else if (args->for_btnc) { return 0; } if (test_bit(NILFS_I_SHADOW, &ii->i_state)) { if (!args->for_shadow) return 0; } else if (args->for_shadow) { return 0; } if (!test_bit(NILFS_I_GCINODE, &ii->i_state)) return !args->for_gc; return args->for_gc && args->cno == ii->i_cno; } static int nilfs_iget_set(struct inode *inode, void *opaque) { struct nilfs_iget_args *args = opaque; inode->i_ino = args->ino; NILFS_I(inode)->i_cno = args->cno; NILFS_I(inode)->i_root = args->root; if (args->root && args->ino == NILFS_ROOT_INO) nilfs_get_root(args->root); if (args->for_gc) NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE); if (args->for_btnc) NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC); if (args->for_shadow) NILFS_I(inode)->i_state |= BIT(NILFS_I_SHADOW); return 0; } struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root, unsigned long ino) { struct nilfs_iget_args args = { .ino = ino, .root = root, .cno = 0, .for_gc = false, .for_btnc = false, .for_shadow = false }; return ilookup5(sb, ino, nilfs_iget_test, &args); } struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root, unsigned long ino) { struct nilfs_iget_args args = { .ino = ino, .root = root, .cno = 0, .for_gc = false, .for_btnc = false, .for_shadow = false }; return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args); } struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root, unsigned long ino) { struct inode *inode; int err; inode = nilfs_iget_locked(sb, root, ino); if (unlikely(!inode)) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; err = __nilfs_read_inode(sb, root, ino, inode); if (unlikely(err)) { iget_failed(inode); return ERR_PTR(err); } unlock_new_inode(inode); return inode; } struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino, __u64 cno) { struct nilfs_iget_args args = { .ino = ino, .root = NULL, .cno = cno, .for_gc = true, .for_btnc = false, .for_shadow = false }; struct inode *inode; int err; inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args); if (unlikely(!inode)) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; err = nilfs_init_gcinode(inode); if (unlikely(err)) { iget_failed(inode); return ERR_PTR(err); } unlock_new_inode(inode); return inode; } /** * nilfs_attach_btree_node_cache - attach a B-tree node cache to the inode * @inode: inode object * * nilfs_attach_btree_node_cache() attaches a B-tree node cache to @inode, * or does nothing if the inode already has it. This function allocates * an additional inode to maintain page cache of B-tree nodes one-on-one. * * Return Value: On success, 0 is returned. On errors, one of the following * negative error code is returned. * * %-ENOMEM - Insufficient memory available. */ int nilfs_attach_btree_node_cache(struct inode *inode) { struct nilfs_inode_info *ii = NILFS_I(inode); struct inode *btnc_inode; struct nilfs_iget_args args; if (ii->i_assoc_inode) return 0; args.ino = inode->i_ino; args.root = ii->i_root; args.cno = ii->i_cno; args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0; args.for_btnc = true; args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0; btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test, nilfs_iget_set, &args); if (unlikely(!btnc_inode)) return -ENOMEM; if (btnc_inode->i_state & I_NEW) { nilfs_init_btnc_inode(btnc_inode); unlock_new_inode(btnc_inode); } NILFS_I(btnc_inode)->i_assoc_inode = inode; NILFS_I(btnc_inode)->i_bmap = ii->i_bmap; ii->i_assoc_inode = btnc_inode; return 0; } /** * nilfs_detach_btree_node_cache - detach the B-tree node cache from the inode * @inode: inode object * * nilfs_detach_btree_node_cache() detaches the B-tree node cache and its * holder inode bound to @inode, or does nothing if @inode doesn't have it. */ void nilfs_detach_btree_node_cache(struct inode *inode) { struct nilfs_inode_info *ii = NILFS_I(inode); struct inode *btnc_inode = ii->i_assoc_inode; if (btnc_inode) { NILFS_I(btnc_inode)->i_assoc_inode = NULL; ii->i_assoc_inode = NULL; iput(btnc_inode); } } /** * nilfs_iget_for_shadow - obtain inode for shadow mapping * @inode: inode object that uses shadow mapping * * nilfs_iget_for_shadow() allocates a pair of inodes that holds page * caches for shadow mapping. The page cache for data pages is set up * in one inode and the one for b-tree node pages is set up in the * other inode, which is attached to the former inode. * * Return Value: On success, a pointer to the inode for data pages is * returned. On errors, one of the following negative error code is returned * in a pointer type. * * %-ENOMEM - Insufficient memory available. */ struct inode *nilfs_iget_for_shadow(struct inode *inode) { struct nilfs_iget_args args = { .ino = inode->i_ino, .root = NULL, .cno = 0, .for_gc = false, .for_btnc = false, .for_shadow = true }; struct inode *s_inode; int err; s_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test, nilfs_iget_set, &args); if (unlikely(!s_inode)) return ERR_PTR(-ENOMEM); if (!(s_inode->i_state & I_NEW)) return inode; NILFS_I(s_inode)->i_flags = 0; memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap)); mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS); err = nilfs_attach_btree_node_cache(s_inode); if (unlikely(err)) { iget_failed(s_inode); return ERR_PTR(err); } unlock_new_inode(s_inode); return s_inode; } void nilfs_write_inode_common(struct inode *inode, struct nilfs_inode *raw_inode, int has_bmap) { struct nilfs_inode_info *ii = NILFS_I(inode); raw_inode->i_mode = cpu_to_le16(inode->i_mode); raw_inode->i_uid = cpu_to_le32(i_uid_read(inode)); raw_inode->i_gid = cpu_to_le32(i_gid_read(inode)); raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); raw_inode->i_size = cpu_to_le64(inode->i_size); raw_inode->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec); raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); raw_inode->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec); raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); raw_inode->i_blocks = cpu_to_le64(inode->i_blocks); raw_inode->i_flags = cpu_to_le32(ii->i_flags); raw_inode->i_generation = cpu_to_le32(inode->i_generation); if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; /* zero-fill unused portion in the case of super root block */ raw_inode->i_xattr = 0; raw_inode->i_pad = 0; memset((void *)raw_inode + sizeof(*raw_inode), 0, nilfs->ns_inode_size - sizeof(*raw_inode)); } if (has_bmap) nilfs_bmap_write(ii->i_bmap, raw_inode); else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) raw_inode->i_device_code = cpu_to_le64(huge_encode_dev(inode->i_rdev)); /* * When extending inode, nilfs->ns_inode_size should be checked * for substitutions of appended fields. */ } void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags) { ino_t ino = inode->i_ino; struct nilfs_inode_info *ii = NILFS_I(inode); struct inode *ifile = ii->i_root->ifile; struct nilfs_inode *raw_inode; raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh); if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state)) memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size); if (flags & I_DIRTY_DATASYNC) set_bit(NILFS_I_INODE_SYNC, &ii->i_state); nilfs_write_inode_common(inode, raw_inode, 0); /* * XXX: call with has_bmap = 0 is a workaround to avoid * deadlock of bmap. This delays update of i_bmap to just * before writing. */ nilfs_ifile_unmap_inode(ifile, ino, ibh); } #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */ static void nilfs_truncate_bmap(struct nilfs_inode_info *ii, unsigned long from) { __u64 b; int ret; if (!test_bit(NILFS_I_BMAP, &ii->i_state)) return; repeat: ret = nilfs_bmap_last_key(ii->i_bmap, &b); if (ret == -ENOENT) return; else if (ret < 0) goto failed; if (b < from) return; b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from); ret = nilfs_bmap_truncate(ii->i_bmap, b); nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb); if (!ret || (ret == -ENOMEM && nilfs_bmap_truncate(ii->i_bmap, b) == 0)) goto repeat; failed: nilfs_warn(ii->vfs_inode.i_sb, "error %d truncating bmap (ino=%lu)", ret, ii->vfs_inode.i_ino); } void nilfs_truncate(struct inode *inode) { unsigned long blkoff; unsigned int blocksize; struct nilfs_transaction_info ti; struct super_block *sb = inode->i_sb; struct nilfs_inode_info *ii = NILFS_I(inode); if (!test_bit(NILFS_I_BMAP, &ii->i_state)) return; if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return; blocksize = sb->s_blocksize; blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits; nilfs_transaction_begin(sb, &ti, 0); /* never fails */ block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block); nilfs_truncate_bmap(ii, blkoff); inode->i_mtime = inode_set_ctime_current(inode); if (IS_SYNC(inode)) nilfs_set_transaction_flag(NILFS_TI_SYNC); nilfs_mark_inode_dirty(inode); nilfs_set_file_dirty(inode, 0); nilfs_transaction_commit(sb); /* * May construct a logical segment and may fail in sync mode. * But truncate has no return value. */ } static void nilfs_clear_inode(struct inode *inode) { struct nilfs_inode_info *ii = NILFS_I(inode); /* * Free resources allocated in nilfs_read_inode(), here. */ BUG_ON(!list_empty(&ii->i_dirty)); brelse(ii->i_bh); ii->i_bh = NULL; if (nilfs_is_metadata_file_inode(inode)) nilfs_mdt_clear(inode); if (test_bit(NILFS_I_BMAP, &ii->i_state)) nilfs_bmap_clear(ii->i_bmap); if (!test_bit(NILFS_I_BTNC, &ii->i_state)) nilfs_detach_btree_node_cache(inode); if (ii->i_root && inode->i_ino == NILFS_ROOT_INO) nilfs_put_root(ii->i_root); } void nilfs_evict_inode(struct inode *inode) { struct nilfs_transaction_info ti; struct super_block *sb = inode->i_sb; struct nilfs_inode_info *ii = NILFS_I(inode); struct the_nilfs *nilfs; int ret; if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) { truncate_inode_pages_final(&inode->i_data); clear_inode(inode); nilfs_clear_inode(inode); return; } nilfs_transaction_begin(sb, &ti, 0); /* never fails */ truncate_inode_pages_final(&inode->i_data); nilfs = sb->s_fs_info; if (unlikely(sb_rdonly(sb) || !nilfs->ns_writer)) { /* * If this inode is about to be disposed after the file system * has been degraded to read-only due to file system corruption * or after the writer has been detached, do not make any * changes that cause writes, just clear it. * Do this check after read-locking ns_segctor_sem by * nilfs_transaction_begin() in order to avoid a race with * the writer detach operation. */ clear_inode(inode); nilfs_clear_inode(inode); nilfs_transaction_abort(sb); return; } /* TODO: some of the following operations may fail. */ nilfs_truncate_bmap(ii, 0); nilfs_mark_inode_dirty(inode); clear_inode(inode); ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino); if (!ret) atomic64_dec(&ii->i_root->inodes_count); nilfs_clear_inode(inode); if (IS_SYNC(inode)) nilfs_set_transaction_flag(NILFS_TI_SYNC); nilfs_transaction_commit(sb); /* * May construct a logical segment and may fail in sync mode. * But delete_inode has no return value. */ } int nilfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *iattr) { struct nilfs_transaction_info ti; struct inode *inode = d_inode(dentry); struct super_block *sb = inode->i_sb; int err; err = setattr_prepare(&nop_mnt_idmap, dentry, iattr); if (err) return err; err = nilfs_transaction_begin(sb, &ti, 0); if (unlikely(err)) return err; if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size != i_size_read(inode)) { inode_dio_wait(inode); truncate_setsize(inode, iattr->ia_size); nilfs_truncate(inode); } setattr_copy(&nop_mnt_idmap, inode, iattr); mark_inode_dirty(inode); if (iattr->ia_valid & ATTR_MODE) { err = nilfs_acl_chmod(inode); if (unlikely(err)) goto out_err; } return nilfs_transaction_commit(sb); out_err: nilfs_transaction_abort(sb); return err; } int nilfs_permission(struct mnt_idmap *idmap, struct inode *inode, int mask) { struct nilfs_root *root = NILFS_I(inode)->i_root; if ((mask & MAY_WRITE) && root && root->cno != NILFS_CPTREE_CURRENT_CNO) return -EROFS; /* snapshot is not writable */ return generic_permission(&nop_mnt_idmap, inode, mask); } int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; struct nilfs_inode_info *ii = NILFS_I(inode); int err; spin_lock(&nilfs->ns_inode_lock); if (ii->i_bh == NULL || unlikely(!buffer_uptodate(ii->i_bh))) { spin_unlock(&nilfs->ns_inode_lock); err = nilfs_ifile_get_inode_block(ii->i_root->ifile, inode->i_ino, pbh); if (unlikely(err)) return err; spin_lock(&nilfs->ns_inode_lock); if (ii->i_bh == NULL) ii->i_bh = *pbh; else if (unlikely(!buffer_uptodate(ii->i_bh))) { __brelse(ii->i_bh); ii->i_bh = *pbh; } else { brelse(*pbh); *pbh = ii->i_bh; } } else *pbh = ii->i_bh; get_bh(*pbh); spin_unlock(&nilfs->ns_inode_lock); return 0; } int nilfs_inode_dirty(struct inode *inode) { struct nilfs_inode_info *ii = NILFS_I(inode); struct the_nilfs *nilfs = inode->i_sb->s_fs_info; int ret = 0; if (!list_empty(&ii->i_dirty)) { spin_lock(&nilfs->ns_inode_lock); ret = test_bit(NILFS_I_DIRTY, &ii->i_state) || test_bit(NILFS_I_BUSY, &ii->i_state); spin_unlock(&nilfs->ns_inode_lock); } return ret; } int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty) { struct nilfs_inode_info *ii = NILFS_I(inode); struct the_nilfs *nilfs = inode->i_sb->s_fs_info; atomic_add(nr_dirty, &nilfs->ns_ndirtyblks); if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state)) return 0; spin_lock(&nilfs->ns_inode_lock); if (!test_bit(NILFS_I_QUEUED, &ii->i_state) && !test_bit(NILFS_I_BUSY, &ii->i_state)) { /* * Because this routine may race with nilfs_dispose_list(), * we have to check NILFS_I_QUEUED here, too. */ if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) { /* * This will happen when somebody is freeing * this inode. */ nilfs_warn(inode->i_sb, "cannot set file dirty (ino=%lu): the file is being freed", inode->i_ino); spin_unlock(&nilfs->ns_inode_lock); return -EINVAL; /* * NILFS_I_DIRTY may remain for * freeing inode. */ } list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files); set_bit(NILFS_I_QUEUED, &ii->i_state); } spin_unlock(&nilfs->ns_inode_lock); return 0; } int __nilfs_mark_inode_dirty(struct inode *inode, int flags) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; struct buffer_head *ibh; int err; /* * Do not dirty inodes after the log writer has been detached * and its nilfs_root struct has been freed. */ if (unlikely(nilfs_purging(nilfs))) return 0; err = nilfs_load_inode_block(inode, &ibh); if (unlikely(err)) { nilfs_warn(inode->i_sb, "cannot mark inode dirty (ino=%lu): error %d loading inode block", inode->i_ino, err); return err; } nilfs_update_inode(inode, ibh, flags); mark_buffer_dirty(ibh); nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile); brelse(ibh); return 0; } /** * nilfs_dirty_inode - reflect changes on given inode to an inode block. * @inode: inode of the file to be registered. * @flags: flags to determine the dirty state of the inode * * nilfs_dirty_inode() loads a inode block containing the specified * @inode and copies data from a nilfs_inode to a corresponding inode * entry in the inode block. This operation is excluded from the segment * construction. This function can be called both as a single operation * and as a part of indivisible file operations. */ void nilfs_dirty_inode(struct inode *inode, int flags) { struct nilfs_transaction_info ti; struct nilfs_mdt_info *mdi = NILFS_MDT(inode); if (is_bad_inode(inode)) { nilfs_warn(inode->i_sb, "tried to mark bad_inode dirty. ignored."); dump_stack(); return; } if (mdi) { nilfs_mdt_mark_dirty(inode); return; } nilfs_transaction_begin(inode->i_sb, &ti, 0); __nilfs_mark_inode_dirty(inode, flags); nilfs_transaction_commit(inode->i_sb); /* never fails */ } int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; __u64 logical = 0, phys = 0, size = 0; __u32 flags = 0; loff_t isize; sector_t blkoff, end_blkoff; sector_t delalloc_blkoff; unsigned long delalloc_blklen; unsigned int blkbits = inode->i_blkbits; int ret, n; ret = fiemap_prep(inode, fieinfo, start, &len, 0); if (ret) return ret; inode_lock(inode); isize = i_size_read(inode); blkoff = start >> blkbits; end_blkoff = (start + len - 1) >> blkbits; delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff, &delalloc_blkoff); do { __u64 blkphy; unsigned int maxblocks; if (delalloc_blklen && blkoff == delalloc_blkoff) { if (size) { /* End of the current extent */ ret = fiemap_fill_next_extent( fieinfo, logical, phys, size, flags); if (ret) break; } if (blkoff > end_blkoff) break; flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC; logical = blkoff << blkbits; phys = 0; size = delalloc_blklen << blkbits; blkoff = delalloc_blkoff + delalloc_blklen; delalloc_blklen = nilfs_find_uncommitted_extent( inode, blkoff, &delalloc_blkoff); continue; } /* * Limit the number of blocks that we look up so as * not to get into the next delayed allocation extent. */ maxblocks = INT_MAX; if (delalloc_blklen) maxblocks = min_t(sector_t, delalloc_blkoff - blkoff, maxblocks); blkphy = 0; down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); n = nilfs_bmap_lookup_contig( NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks); up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); if (n < 0) { int past_eof; if (unlikely(n != -ENOENT)) break; /* error */ /* HOLE */ blkoff++; past_eof = ((blkoff << blkbits) >= isize); if (size) { /* End of the current extent */ if (past_eof) flags |= FIEMAP_EXTENT_LAST; ret = fiemap_fill_next_extent( fieinfo, logical, phys, size, flags); if (ret) break; size = 0; } if (blkoff > end_blkoff || past_eof) break; } else { if (size) { if (phys && blkphy << blkbits == phys + size) { /* The current extent goes on */ size += n << blkbits; } else { /* Terminate the current extent */ ret = fiemap_fill_next_extent( fieinfo, logical, phys, size, flags); if (ret || blkoff > end_blkoff) break; /* Start another extent */ flags = FIEMAP_EXTENT_MERGED; logical = blkoff << blkbits; phys = blkphy << blkbits; size = n << blkbits; } } else { /* Start a new extent */ flags = FIEMAP_EXTENT_MERGED; logical = blkoff << blkbits; phys = blkphy << blkbits; size = n << blkbits; } blkoff += n; } cond_resched(); } while (true); /* If ret is 1 then we just hit the end of the extent array */ if (ret == 1) ret = 0; inode_unlock(inode); return ret; }
linux-master
fs/nilfs2/inode.c
// SPDX-License-Identifier: GPL-2.0+ /* * Buffer/page management specific to NILFS * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * Written by Ryusuke Konishi and Seiji Kihara. */ #include <linux/pagemap.h> #include <linux/writeback.h> #include <linux/swap.h> #include <linux/bitops.h> #include <linux/page-flags.h> #include <linux/list.h> #include <linux/highmem.h> #include <linux/pagevec.h> #include <linux/gfp.h> #include "nilfs.h" #include "page.h" #include "mdt.h" #define NILFS_BUFFER_INHERENT_BITS \ (BIT(BH_Uptodate) | BIT(BH_Mapped) | BIT(BH_NILFS_Node) | \ BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked)) static struct buffer_head * __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index, int blkbits, unsigned long b_state) { unsigned long first_block; struct buffer_head *bh; if (!page_has_buffers(page)) create_empty_buffers(page, 1 << blkbits, b_state); first_block = (unsigned long)index << (PAGE_SHIFT - blkbits); bh = nilfs_page_get_nth_block(page, block - first_block); touch_buffer(bh); wait_on_buffer(bh); return bh; } struct buffer_head *nilfs_grab_buffer(struct inode *inode, struct address_space *mapping, unsigned long blkoff, unsigned long b_state) { int blkbits = inode->i_blkbits; pgoff_t index = blkoff >> (PAGE_SHIFT - blkbits); struct page *page; struct buffer_head *bh; page = grab_cache_page(mapping, index); if (unlikely(!page)) return NULL; bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state); if (unlikely(!bh)) { unlock_page(page); put_page(page); return NULL; } return bh; } /** * nilfs_forget_buffer - discard dirty state * @bh: buffer head of the buffer to be discarded */ void nilfs_forget_buffer(struct buffer_head *bh) { struct page *page = bh->b_page; const unsigned long clear_bits = (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) | BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected)); lock_buffer(bh); set_mask_bits(&bh->b_state, clear_bits, 0); if (nilfs_page_buffers_clean(page)) __nilfs_clear_page_dirty(page); bh->b_blocknr = -1; ClearPageUptodate(page); ClearPageMappedToDisk(page); unlock_buffer(bh); brelse(bh); } /** * nilfs_copy_buffer -- copy buffer data and flags * @dbh: destination buffer * @sbh: source buffer */ void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh) { void *kaddr0, *kaddr1; unsigned long bits; struct page *spage = sbh->b_page, *dpage = dbh->b_page; struct buffer_head *bh; kaddr0 = kmap_atomic(spage); kaddr1 = kmap_atomic(dpage); memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size); kunmap_atomic(kaddr1); kunmap_atomic(kaddr0); dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS; dbh->b_blocknr = sbh->b_blocknr; dbh->b_bdev = sbh->b_bdev; bh = dbh; bits = sbh->b_state & (BIT(BH_Uptodate) | BIT(BH_Mapped)); while ((bh = bh->b_this_page) != dbh) { lock_buffer(bh); bits &= bh->b_state; unlock_buffer(bh); } if (bits & BIT(BH_Uptodate)) SetPageUptodate(dpage); else ClearPageUptodate(dpage); if (bits & BIT(BH_Mapped)) SetPageMappedToDisk(dpage); else ClearPageMappedToDisk(dpage); } /** * nilfs_page_buffers_clean - check if a page has dirty buffers or not. * @page: page to be checked * * nilfs_page_buffers_clean() returns zero if the page has dirty buffers. * Otherwise, it returns non-zero value. */ int nilfs_page_buffers_clean(struct page *page) { struct buffer_head *bh, *head; bh = head = page_buffers(page); do { if (buffer_dirty(bh)) return 0; bh = bh->b_this_page; } while (bh != head); return 1; } void nilfs_page_bug(struct page *page) { struct address_space *m; unsigned long ino; if (unlikely(!page)) { printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n"); return; } m = page->mapping; ino = m ? m->host->i_ino : 0; printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx " "mapping=%p ino=%lu\n", page, page_ref_count(page), (unsigned long long)page->index, page->flags, m, ino); if (page_has_buffers(page)) { struct buffer_head *bh, *head; int i = 0; bh = head = page_buffers(page); do { printk(KERN_CRIT " BH[%d] %p: cnt=%d block#=%llu state=0x%lx\n", i++, bh, atomic_read(&bh->b_count), (unsigned long long)bh->b_blocknr, bh->b_state); bh = bh->b_this_page; } while (bh != head); } } /** * nilfs_copy_page -- copy the page with buffers * @dst: destination page * @src: source page * @copy_dirty: flag whether to copy dirty states on the page's buffer heads. * * This function is for both data pages and btnode pages. The dirty flag * should be treated by caller. The page must not be under i/o. * Both src and dst page must be locked */ static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty) { struct buffer_head *dbh, *dbufs, *sbh; unsigned long mask = NILFS_BUFFER_INHERENT_BITS; BUG_ON(PageWriteback(dst)); sbh = page_buffers(src); if (!page_has_buffers(dst)) create_empty_buffers(dst, sbh->b_size, 0); if (copy_dirty) mask |= BIT(BH_Dirty); dbh = dbufs = page_buffers(dst); do { lock_buffer(sbh); lock_buffer(dbh); dbh->b_state = sbh->b_state & mask; dbh->b_blocknr = sbh->b_blocknr; dbh->b_bdev = sbh->b_bdev; sbh = sbh->b_this_page; dbh = dbh->b_this_page; } while (dbh != dbufs); copy_highpage(dst, src); if (PageUptodate(src) && !PageUptodate(dst)) SetPageUptodate(dst); else if (!PageUptodate(src) && PageUptodate(dst)) ClearPageUptodate(dst); if (PageMappedToDisk(src) && !PageMappedToDisk(dst)) SetPageMappedToDisk(dst); else if (!PageMappedToDisk(src) && PageMappedToDisk(dst)) ClearPageMappedToDisk(dst); do { unlock_buffer(sbh); unlock_buffer(dbh); sbh = sbh->b_this_page; dbh = dbh->b_this_page; } while (dbh != dbufs); } int nilfs_copy_dirty_pages(struct address_space *dmap, struct address_space *smap) { struct folio_batch fbatch; unsigned int i; pgoff_t index = 0; int err = 0; folio_batch_init(&fbatch); repeat: if (!filemap_get_folios_tag(smap, &index, (pgoff_t)-1, PAGECACHE_TAG_DIRTY, &fbatch)) return 0; for (i = 0; i < folio_batch_count(&fbatch); i++) { struct folio *folio = fbatch.folios[i], *dfolio; folio_lock(folio); if (unlikely(!folio_test_dirty(folio))) NILFS_PAGE_BUG(&folio->page, "inconsistent dirty state"); dfolio = filemap_grab_folio(dmap, folio->index); if (unlikely(IS_ERR(dfolio))) { /* No empty page is added to the page cache */ folio_unlock(folio); err = PTR_ERR(dfolio); break; } if (unlikely(!folio_buffers(folio))) NILFS_PAGE_BUG(&folio->page, "found empty page in dat page cache"); nilfs_copy_page(&dfolio->page, &folio->page, 1); filemap_dirty_folio(folio_mapping(dfolio), dfolio); folio_unlock(dfolio); folio_put(dfolio); folio_unlock(folio); } folio_batch_release(&fbatch); cond_resched(); if (likely(!err)) goto repeat; return err; } /** * nilfs_copy_back_pages -- copy back pages to original cache from shadow cache * @dmap: destination page cache * @smap: source page cache * * No pages must be added to the cache during this process. * This must be ensured by the caller. */ void nilfs_copy_back_pages(struct address_space *dmap, struct address_space *smap) { struct folio_batch fbatch; unsigned int i, n; pgoff_t start = 0; folio_batch_init(&fbatch); repeat: n = filemap_get_folios(smap, &start, ~0UL, &fbatch); if (!n) return; for (i = 0; i < folio_batch_count(&fbatch); i++) { struct folio *folio = fbatch.folios[i], *dfolio; pgoff_t index = folio->index; folio_lock(folio); dfolio = filemap_lock_folio(dmap, index); if (!IS_ERR(dfolio)) { /* overwrite existing folio in the destination cache */ WARN_ON(folio_test_dirty(dfolio)); nilfs_copy_page(&dfolio->page, &folio->page, 0); folio_unlock(dfolio); folio_put(dfolio); /* Do we not need to remove folio from smap here? */ } else { struct folio *f; /* move the folio to the destination cache */ xa_lock_irq(&smap->i_pages); f = __xa_erase(&smap->i_pages, index); WARN_ON(folio != f); smap->nrpages--; xa_unlock_irq(&smap->i_pages); xa_lock_irq(&dmap->i_pages); f = __xa_store(&dmap->i_pages, index, folio, GFP_NOFS); if (unlikely(f)) { /* Probably -ENOMEM */ folio->mapping = NULL; folio_put(folio); } else { folio->mapping = dmap; dmap->nrpages++; if (folio_test_dirty(folio)) __xa_set_mark(&dmap->i_pages, index, PAGECACHE_TAG_DIRTY); } xa_unlock_irq(&dmap->i_pages); } folio_unlock(folio); } folio_batch_release(&fbatch); cond_resched(); goto repeat; } /** * nilfs_clear_dirty_pages - discard dirty pages in address space * @mapping: address space with dirty pages for discarding * @silent: suppress [true] or print [false] warning messages */ void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent) { struct folio_batch fbatch; unsigned int i; pgoff_t index = 0; folio_batch_init(&fbatch); while (filemap_get_folios_tag(mapping, &index, (pgoff_t)-1, PAGECACHE_TAG_DIRTY, &fbatch)) { for (i = 0; i < folio_batch_count(&fbatch); i++) { struct folio *folio = fbatch.folios[i]; folio_lock(folio); /* * This folio may have been removed from the address * space by truncation or invalidation when the lock * was acquired. Skip processing in that case. */ if (likely(folio->mapping == mapping)) nilfs_clear_dirty_page(&folio->page, silent); folio_unlock(folio); } folio_batch_release(&fbatch); cond_resched(); } } /** * nilfs_clear_dirty_page - discard dirty page * @page: dirty page that will be discarded * @silent: suppress [true] or print [false] warning messages */ void nilfs_clear_dirty_page(struct page *page, bool silent) { struct inode *inode = page->mapping->host; struct super_block *sb = inode->i_sb; BUG_ON(!PageLocked(page)); if (!silent) nilfs_warn(sb, "discard dirty page: offset=%lld, ino=%lu", page_offset(page), inode->i_ino); ClearPageUptodate(page); ClearPageMappedToDisk(page); if (page_has_buffers(page)) { struct buffer_head *bh, *head; const unsigned long clear_bits = (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) | BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected)); bh = head = page_buffers(page); do { lock_buffer(bh); if (!silent) nilfs_warn(sb, "discard dirty block: blocknr=%llu, size=%zu", (u64)bh->b_blocknr, bh->b_size); set_mask_bits(&bh->b_state, clear_bits, 0); unlock_buffer(bh); } while (bh = bh->b_this_page, bh != head); } __nilfs_clear_page_dirty(page); } unsigned int nilfs_page_count_clean_buffers(struct page *page, unsigned int from, unsigned int to) { unsigned int block_start, block_end; struct buffer_head *bh, *head; unsigned int nc = 0; for (bh = head = page_buffers(page), block_start = 0; bh != head || !block_start; block_start = block_end, bh = bh->b_this_page) { block_end = block_start + bh->b_size; if (block_end > from && block_start < to && !buffer_dirty(bh)) nc++; } return nc; } /* * NILFS2 needs clear_page_dirty() in the following two cases: * * 1) For B-tree node pages and data pages of DAT file, NILFS2 clears dirty * flag of pages when it copies back pages from shadow cache to the * original cache. * * 2) Some B-tree operations like insertion or deletion may dispose buffers * in dirty state, and this needs to cancel the dirty state of their pages. */ int __nilfs_clear_page_dirty(struct page *page) { struct address_space *mapping = page->mapping; if (mapping) { xa_lock_irq(&mapping->i_pages); if (test_bit(PG_dirty, &page->flags)) { __xa_clear_mark(&mapping->i_pages, page_index(page), PAGECACHE_TAG_DIRTY); xa_unlock_irq(&mapping->i_pages); return clear_page_dirty_for_io(page); } xa_unlock_irq(&mapping->i_pages); return 0; } return TestClearPageDirty(page); } /** * nilfs_find_uncommitted_extent - find extent of uncommitted data * @inode: inode * @start_blk: start block offset (in) * @blkoff: start offset of the found extent (out) * * This function searches an extent of buffers marked "delayed" which * starts from a block offset equal to or larger than @start_blk. If * such an extent was found, this will store the start offset in * @blkoff and return its length in blocks. Otherwise, zero is * returned. */ unsigned long nilfs_find_uncommitted_extent(struct inode *inode, sector_t start_blk, sector_t *blkoff) { unsigned int i, nr_folios; pgoff_t index; unsigned long length = 0; struct folio_batch fbatch; struct folio *folio; if (inode->i_mapping->nrpages == 0) return 0; index = start_blk >> (PAGE_SHIFT - inode->i_blkbits); folio_batch_init(&fbatch); repeat: nr_folios = filemap_get_folios_contig(inode->i_mapping, &index, ULONG_MAX, &fbatch); if (nr_folios == 0) return length; i = 0; do { folio = fbatch.folios[i]; folio_lock(folio); if (folio_buffers(folio)) { struct buffer_head *bh, *head; sector_t b; b = folio->index << (PAGE_SHIFT - inode->i_blkbits); bh = head = folio_buffers(folio); do { if (b < start_blk) continue; if (buffer_delay(bh)) { if (length == 0) *blkoff = b; length++; } else if (length > 0) { goto out_locked; } } while (++b, bh = bh->b_this_page, bh != head); } else { if (length > 0) goto out_locked; } folio_unlock(folio); } while (++i < nr_folios); folio_batch_release(&fbatch); cond_resched(); goto repeat; out_locked: folio_unlock(folio); folio_batch_release(&fbatch); return length; }
linux-master
fs/nilfs2/page.c
// SPDX-License-Identifier: GPL-2.0+ /* * NILFS ioctl operations. * * Copyright (C) 2007, 2008 Nippon Telegraph and Telephone Corporation. * * Written by Koji Sato. */ #include <linux/fs.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/capability.h> /* capable() */ #include <linux/uaccess.h> /* copy_from_user(), copy_to_user() */ #include <linux/vmalloc.h> #include <linux/compat.h> /* compat_ptr() */ #include <linux/mount.h> /* mnt_want_write_file(), mnt_drop_write_file() */ #include <linux/buffer_head.h> #include <linux/fileattr.h> #include "nilfs.h" #include "segment.h" #include "bmap.h" #include "cpfile.h" #include "sufile.h" #include "dat.h" /** * nilfs_ioctl_wrap_copy - wrapping function of get/set metadata info * @nilfs: nilfs object * @argv: vector of arguments from userspace * @dir: set of direction flags * @dofunc: concrete function of get/set metadata info * * Description: nilfs_ioctl_wrap_copy() gets/sets metadata info by means of * calling dofunc() function on the basis of @argv argument. * * Return Value: On success, 0 is returned and requested metadata info * is copied into userspace. On error, one of the following * negative error codes is returned. * * %-EINVAL - Invalid arguments from userspace. * * %-ENOMEM - Insufficient amount of memory available. * * %-EFAULT - Failure during execution of requested operation. */ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs, struct nilfs_argv *argv, int dir, ssize_t (*dofunc)(struct the_nilfs *, __u64 *, int, void *, size_t, size_t)) { void *buf; void __user *base = (void __user *)(unsigned long)argv->v_base; size_t maxmembs, total, n; ssize_t nr; int ret, i; __u64 pos, ppos; if (argv->v_nmembs == 0) return 0; if (argv->v_size > PAGE_SIZE) return -EINVAL; /* * Reject pairs of a start item position (argv->v_index) and a * total count (argv->v_nmembs) which leads position 'pos' to * overflow by the increment at the end of the loop. */ if (argv->v_index > ~(__u64)0 - argv->v_nmembs) return -EINVAL; buf = (void *)get_zeroed_page(GFP_NOFS); if (unlikely(!buf)) return -ENOMEM; maxmembs = PAGE_SIZE / argv->v_size; ret = 0; total = 0; pos = argv->v_index; for (i = 0; i < argv->v_nmembs; i += n) { n = (argv->v_nmembs - i < maxmembs) ? argv->v_nmembs - i : maxmembs; if ((dir & _IOC_WRITE) && copy_from_user(buf, base + argv->v_size * i, argv->v_size * n)) { ret = -EFAULT; break; } ppos = pos; nr = dofunc(nilfs, &pos, argv->v_flags, buf, argv->v_size, n); if (nr < 0) { ret = nr; break; } if ((dir & _IOC_READ) && copy_to_user(base + argv->v_size * i, buf, argv->v_size * nr)) { ret = -EFAULT; break; } total += nr; if ((size_t)nr < n) break; if (pos == ppos) pos += n; } argv->v_nmembs = total; free_pages((unsigned long)buf, 0); return ret; } /** * nilfs_fileattr_get - ioctl to support lsattr */ int nilfs_fileattr_get(struct dentry *dentry, struct fileattr *fa) { struct inode *inode = d_inode(dentry); fileattr_fill_flags(fa, NILFS_I(inode)->i_flags & FS_FL_USER_VISIBLE); return 0; } /** * nilfs_fileattr_set - ioctl to support chattr */ int nilfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, struct fileattr *fa) { struct inode *inode = d_inode(dentry); struct nilfs_transaction_info ti; unsigned int flags, oldflags; int ret; if (fileattr_has_fsx(fa)) return -EOPNOTSUPP; flags = nilfs_mask_flags(inode->i_mode, fa->flags); ret = nilfs_transaction_begin(inode->i_sb, &ti, 0); if (ret) return ret; oldflags = NILFS_I(inode)->i_flags & ~FS_FL_USER_MODIFIABLE; NILFS_I(inode)->i_flags = oldflags | (flags & FS_FL_USER_MODIFIABLE); nilfs_set_inode_flags(inode); inode_set_ctime_current(inode); if (IS_SYNC(inode)) nilfs_set_transaction_flag(NILFS_TI_SYNC); nilfs_mark_inode_dirty(inode); return nilfs_transaction_commit(inode->i_sb); } /** * nilfs_ioctl_getversion - get info about a file's version (generation number) */ static int nilfs_ioctl_getversion(struct inode *inode, void __user *argp) { return put_user(inode->i_generation, (int __user *)argp); } /** * nilfs_ioctl_change_cpmode - change checkpoint mode (checkpoint/snapshot) * @inode: inode object * @filp: file object * @cmd: ioctl's request code * @argp: pointer on argument from userspace * * Description: nilfs_ioctl_change_cpmode() function changes mode of * given checkpoint between checkpoint and snapshot state. This ioctl * is used in chcp and mkcp utilities. * * Return Value: On success, 0 is returned and mode of a checkpoint is * changed. On error, one of the following negative error codes * is returned. * * %-EPERM - Operation not permitted. * * %-EFAULT - Failure during checkpoint mode changing. */ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; struct nilfs_transaction_info ti; struct nilfs_cpmode cpmode; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = mnt_want_write_file(filp); if (ret) return ret; ret = -EFAULT; if (copy_from_user(&cpmode, argp, sizeof(cpmode))) goto out; mutex_lock(&nilfs->ns_snapshot_mount_mutex); nilfs_transaction_begin(inode->i_sb, &ti, 0); ret = nilfs_cpfile_change_cpmode( nilfs->ns_cpfile, cpmode.cm_cno, cpmode.cm_mode); if (unlikely(ret < 0)) nilfs_transaction_abort(inode->i_sb); else nilfs_transaction_commit(inode->i_sb); /* never fails */ mutex_unlock(&nilfs->ns_snapshot_mount_mutex); out: mnt_drop_write_file(filp); return ret; } /** * nilfs_ioctl_delete_checkpoint - remove checkpoint * @inode: inode object * @filp: file object * @cmd: ioctl's request code * @argp: pointer on argument from userspace * * Description: nilfs_ioctl_delete_checkpoint() function removes * checkpoint from NILFS2 file system. This ioctl is used in rmcp * utility. * * Return Value: On success, 0 is returned and a checkpoint is * removed. On error, one of the following negative error codes * is returned. * * %-EPERM - Operation not permitted. * * %-EFAULT - Failure during checkpoint removing. */ static int nilfs_ioctl_delete_checkpoint(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; struct nilfs_transaction_info ti; __u64 cno; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = mnt_want_write_file(filp); if (ret) return ret; ret = -EFAULT; if (copy_from_user(&cno, argp, sizeof(cno))) goto out; nilfs_transaction_begin(inode->i_sb, &ti, 0); ret = nilfs_cpfile_delete_checkpoint(nilfs->ns_cpfile, cno); if (unlikely(ret < 0)) nilfs_transaction_abort(inode->i_sb); else nilfs_transaction_commit(inode->i_sb); /* never fails */ out: mnt_drop_write_file(filp); return ret; } /** * nilfs_ioctl_do_get_cpinfo - callback method getting info about checkpoints * @nilfs: nilfs object * @posp: pointer on array of checkpoint's numbers * @flags: checkpoint mode (checkpoint or snapshot) * @buf: buffer for storing checkponts' info * @size: size in bytes of one checkpoint info item in array * @nmembs: number of checkpoints in array (numbers and infos) * * Description: nilfs_ioctl_do_get_cpinfo() function returns info about * requested checkpoints. The NILFS_IOCTL_GET_CPINFO ioctl is used in * lscp utility and by nilfs_cleanerd daemon. * * Return value: count of nilfs_cpinfo structures in output buffer. */ static ssize_t nilfs_ioctl_do_get_cpinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, void *buf, size_t size, size_t nmembs) { int ret; down_read(&nilfs->ns_segctor_sem); ret = nilfs_cpfile_get_cpinfo(nilfs->ns_cpfile, posp, flags, buf, size, nmembs); up_read(&nilfs->ns_segctor_sem); return ret; } /** * nilfs_ioctl_get_cpstat - get checkpoints statistics * @inode: inode object * @filp: file object * @cmd: ioctl's request code * @argp: pointer on argument from userspace * * Description: nilfs_ioctl_get_cpstat() returns information about checkpoints. * The NILFS_IOCTL_GET_CPSTAT ioctl is used by lscp, rmcp utilities * and by nilfs_cleanerd daemon. * * Return Value: On success, 0 is returned, and checkpoints information is * copied into userspace pointer @argp. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-EFAULT - Failure during getting checkpoints statistics. */ static int nilfs_ioctl_get_cpstat(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; struct nilfs_cpstat cpstat; int ret; down_read(&nilfs->ns_segctor_sem); ret = nilfs_cpfile_get_stat(nilfs->ns_cpfile, &cpstat); up_read(&nilfs->ns_segctor_sem); if (ret < 0) return ret; if (copy_to_user(argp, &cpstat, sizeof(cpstat))) ret = -EFAULT; return ret; } /** * nilfs_ioctl_do_get_suinfo - callback method getting segment usage info * @nilfs: nilfs object * @posp: pointer on array of segment numbers * @flags: *not used* * @buf: buffer for storing suinfo array * @size: size in bytes of one suinfo item in array * @nmembs: count of segment numbers and suinfos in array * * Description: nilfs_ioctl_do_get_suinfo() function returns segment usage * info about requested segments. The NILFS_IOCTL_GET_SUINFO ioctl is used * in lssu, nilfs_resize utilities and by nilfs_cleanerd daemon. * * Return value: count of nilfs_suinfo structures in output buffer. */ static ssize_t nilfs_ioctl_do_get_suinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, void *buf, size_t size, size_t nmembs) { int ret; down_read(&nilfs->ns_segctor_sem); ret = nilfs_sufile_get_suinfo(nilfs->ns_sufile, *posp, buf, size, nmembs); up_read(&nilfs->ns_segctor_sem); return ret; } /** * nilfs_ioctl_get_sustat - get segment usage statistics * @inode: inode object * @filp: file object * @cmd: ioctl's request code * @argp: pointer on argument from userspace * * Description: nilfs_ioctl_get_sustat() returns segment usage statistics. * The NILFS_IOCTL_GET_SUSTAT ioctl is used in lssu, nilfs_resize utilities * and by nilfs_cleanerd daemon. * * Return Value: On success, 0 is returned, and segment usage information is * copied into userspace pointer @argp. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-EFAULT - Failure during getting segment usage statistics. */ static int nilfs_ioctl_get_sustat(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; struct nilfs_sustat sustat; int ret; down_read(&nilfs->ns_segctor_sem); ret = nilfs_sufile_get_stat(nilfs->ns_sufile, &sustat); up_read(&nilfs->ns_segctor_sem); if (ret < 0) return ret; if (copy_to_user(argp, &sustat, sizeof(sustat))) ret = -EFAULT; return ret; } /** * nilfs_ioctl_do_get_vinfo - callback method getting virtual blocks info * @nilfs: nilfs object * @posp: *not used* * @flags: *not used* * @buf: buffer for storing array of nilfs_vinfo structures * @size: size in bytes of one vinfo item in array * @nmembs: count of vinfos in array * * Description: nilfs_ioctl_do_get_vinfo() function returns information * on virtual block addresses. The NILFS_IOCTL_GET_VINFO ioctl is used * by nilfs_cleanerd daemon. * * Return value: count of nilfs_vinfo structures in output buffer. */ static ssize_t nilfs_ioctl_do_get_vinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, void *buf, size_t size, size_t nmembs) { int ret; down_read(&nilfs->ns_segctor_sem); ret = nilfs_dat_get_vinfo(nilfs->ns_dat, buf, size, nmembs); up_read(&nilfs->ns_segctor_sem); return ret; } /** * nilfs_ioctl_do_get_bdescs - callback method getting disk block descriptors * @nilfs: nilfs object * @posp: *not used* * @flags: *not used* * @buf: buffer for storing array of nilfs_bdesc structures * @size: size in bytes of one bdesc item in array * @nmembs: count of bdescs in array * * Description: nilfs_ioctl_do_get_bdescs() function returns information * about descriptors of disk block numbers. The NILFS_IOCTL_GET_BDESCS ioctl * is used by nilfs_cleanerd daemon. * * Return value: count of nilfs_bdescs structures in output buffer. */ static ssize_t nilfs_ioctl_do_get_bdescs(struct the_nilfs *nilfs, __u64 *posp, int flags, void *buf, size_t size, size_t nmembs) { struct nilfs_bmap *bmap = NILFS_I(nilfs->ns_dat)->i_bmap; struct nilfs_bdesc *bdescs = buf; int ret, i; down_read(&nilfs->ns_segctor_sem); for (i = 0; i < nmembs; i++) { ret = nilfs_bmap_lookup_at_level(bmap, bdescs[i].bd_offset, bdescs[i].bd_level + 1, &bdescs[i].bd_blocknr); if (ret < 0) { if (ret != -ENOENT) { up_read(&nilfs->ns_segctor_sem); return ret; } bdescs[i].bd_blocknr = 0; } } up_read(&nilfs->ns_segctor_sem); return nmembs; } /** * nilfs_ioctl_get_bdescs - get disk block descriptors * @inode: inode object * @filp: file object * @cmd: ioctl's request code * @argp: pointer on argument from userspace * * Description: nilfs_ioctl_do_get_bdescs() function returns information * about descriptors of disk block numbers. The NILFS_IOCTL_GET_BDESCS ioctl * is used by nilfs_cleanerd daemon. * * Return Value: On success, 0 is returned, and disk block descriptors are * copied into userspace pointer @argp. On error, one of the following * negative error codes is returned. * * %-EINVAL - Invalid arguments from userspace. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-EFAULT - Failure during getting disk block descriptors. */ static int nilfs_ioctl_get_bdescs(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; struct nilfs_argv argv; int ret; if (copy_from_user(&argv, argp, sizeof(argv))) return -EFAULT; if (argv.v_size != sizeof(struct nilfs_bdesc)) return -EINVAL; ret = nilfs_ioctl_wrap_copy(nilfs, &argv, _IOC_DIR(cmd), nilfs_ioctl_do_get_bdescs); if (ret < 0) return ret; if (copy_to_user(argp, &argv, sizeof(argv))) ret = -EFAULT; return ret; } /** * nilfs_ioctl_move_inode_block - prepare data/node block for moving by GC * @inode: inode object * @vdesc: descriptor of virtual block number * @buffers: list of moving buffers * * Description: nilfs_ioctl_move_inode_block() function registers data/node * buffer in the GC pagecache and submit read request. * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - Requested block doesn't exist. * * %-EEXIST - Blocks conflict is detected. */ static int nilfs_ioctl_move_inode_block(struct inode *inode, struct nilfs_vdesc *vdesc, struct list_head *buffers) { struct buffer_head *bh; int ret; if (vdesc->vd_flags == 0) ret = nilfs_gccache_submit_read_data( inode, vdesc->vd_offset, vdesc->vd_blocknr, vdesc->vd_vblocknr, &bh); else ret = nilfs_gccache_submit_read_node( inode, vdesc->vd_blocknr, vdesc->vd_vblocknr, &bh); if (unlikely(ret < 0)) { if (ret == -ENOENT) nilfs_crit(inode->i_sb, "%s: invalid virtual block address (%s): ino=%llu, cno=%llu, offset=%llu, blocknr=%llu, vblocknr=%llu", __func__, vdesc->vd_flags ? "node" : "data", (unsigned long long)vdesc->vd_ino, (unsigned long long)vdesc->vd_cno, (unsigned long long)vdesc->vd_offset, (unsigned long long)vdesc->vd_blocknr, (unsigned long long)vdesc->vd_vblocknr); return ret; } if (unlikely(!list_empty(&bh->b_assoc_buffers))) { nilfs_crit(inode->i_sb, "%s: conflicting %s buffer: ino=%llu, cno=%llu, offset=%llu, blocknr=%llu, vblocknr=%llu", __func__, vdesc->vd_flags ? "node" : "data", (unsigned long long)vdesc->vd_ino, (unsigned long long)vdesc->vd_cno, (unsigned long long)vdesc->vd_offset, (unsigned long long)vdesc->vd_blocknr, (unsigned long long)vdesc->vd_vblocknr); brelse(bh); return -EEXIST; } list_add_tail(&bh->b_assoc_buffers, buffers); return 0; } /** * nilfs_ioctl_move_blocks - move valid inode's blocks during garbage collection * @sb: superblock object * @argv: vector of arguments from userspace * @buf: array of nilfs_vdesc structures * * Description: nilfs_ioctl_move_blocks() function reads valid data/node * blocks that garbage collector specified with the array of nilfs_vdesc * structures and stores them into page caches of GC inodes. * * Return Value: Number of processed nilfs_vdesc structures or * error code, otherwise. */ static int nilfs_ioctl_move_blocks(struct super_block *sb, struct nilfs_argv *argv, void *buf) { size_t nmembs = argv->v_nmembs; struct the_nilfs *nilfs = sb->s_fs_info; struct inode *inode; struct nilfs_vdesc *vdesc; struct buffer_head *bh, *n; LIST_HEAD(buffers); ino_t ino; __u64 cno; int i, ret; for (i = 0, vdesc = buf; i < nmembs; ) { ino = vdesc->vd_ino; cno = vdesc->vd_cno; inode = nilfs_iget_for_gc(sb, ino, cno); if (IS_ERR(inode)) { ret = PTR_ERR(inode); goto failed; } if (list_empty(&NILFS_I(inode)->i_dirty)) { /* * Add the inode to GC inode list. Garbage Collection * is serialized and no two processes manipulate the * list simultaneously. */ igrab(inode); list_add(&NILFS_I(inode)->i_dirty, &nilfs->ns_gc_inodes); } do { ret = nilfs_ioctl_move_inode_block(inode, vdesc, &buffers); if (unlikely(ret < 0)) { iput(inode); goto failed; } vdesc++; } while (++i < nmembs && vdesc->vd_ino == ino && vdesc->vd_cno == cno); iput(inode); /* The inode still remains in GC inode list */ } list_for_each_entry_safe(bh, n, &buffers, b_assoc_buffers) { ret = nilfs_gccache_wait_and_mark_dirty(bh); if (unlikely(ret < 0)) { WARN_ON(ret == -EEXIST); goto failed; } list_del_init(&bh->b_assoc_buffers); brelse(bh); } return nmembs; failed: list_for_each_entry_safe(bh, n, &buffers, b_assoc_buffers) { list_del_init(&bh->b_assoc_buffers); brelse(bh); } return ret; } /** * nilfs_ioctl_delete_checkpoints - delete checkpoints * @nilfs: nilfs object * @argv: vector of arguments from userspace * @buf: array of periods of checkpoints numbers * * Description: nilfs_ioctl_delete_checkpoints() function deletes checkpoints * in the period from p_start to p_end, excluding p_end itself. The checkpoints * which have been already deleted are ignored. * * Return Value: Number of processed nilfs_period structures or * error code, otherwise. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-EINVAL - invalid checkpoints. */ static int nilfs_ioctl_delete_checkpoints(struct the_nilfs *nilfs, struct nilfs_argv *argv, void *buf) { size_t nmembs = argv->v_nmembs; struct inode *cpfile = nilfs->ns_cpfile; struct nilfs_period *periods = buf; int ret, i; for (i = 0; i < nmembs; i++) { ret = nilfs_cpfile_delete_checkpoints( cpfile, periods[i].p_start, periods[i].p_end); if (ret < 0) return ret; } return nmembs; } /** * nilfs_ioctl_free_vblocknrs - free virtual block numbers * @nilfs: nilfs object * @argv: vector of arguments from userspace * @buf: array of virtual block numbers * * Description: nilfs_ioctl_free_vblocknrs() function frees * the virtual block numbers specified by @buf and @argv->v_nmembs. * * Return Value: Number of processed virtual block numbers or * error code, otherwise. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - The virtual block number have not been allocated. */ static int nilfs_ioctl_free_vblocknrs(struct the_nilfs *nilfs, struct nilfs_argv *argv, void *buf) { size_t nmembs = argv->v_nmembs; int ret; ret = nilfs_dat_freev(nilfs->ns_dat, buf, nmembs); return (ret < 0) ? ret : nmembs; } /** * nilfs_ioctl_mark_blocks_dirty - mark blocks dirty * @nilfs: nilfs object * @argv: vector of arguments from userspace * @buf: array of block descriptors * * Description: nilfs_ioctl_mark_blocks_dirty() function marks * metadata file or data blocks as dirty. * * Return Value: Number of processed block descriptors or * error code, otherwise. * * %-ENOMEM - Insufficient memory available. * * %-EIO - I/O error * * %-ENOENT - the specified block does not exist (hole block) */ static int nilfs_ioctl_mark_blocks_dirty(struct the_nilfs *nilfs, struct nilfs_argv *argv, void *buf) { size_t nmembs = argv->v_nmembs; struct nilfs_bmap *bmap = NILFS_I(nilfs->ns_dat)->i_bmap; struct nilfs_bdesc *bdescs = buf; struct buffer_head *bh; int ret, i; for (i = 0; i < nmembs; i++) { /* XXX: use macro or inline func to check liveness */ ret = nilfs_bmap_lookup_at_level(bmap, bdescs[i].bd_offset, bdescs[i].bd_level + 1, &bdescs[i].bd_blocknr); if (ret < 0) { if (ret != -ENOENT) return ret; bdescs[i].bd_blocknr = 0; } if (bdescs[i].bd_blocknr != bdescs[i].bd_oblocknr) /* skip dead block */ continue; if (bdescs[i].bd_level == 0) { ret = nilfs_mdt_get_block(nilfs->ns_dat, bdescs[i].bd_offset, false, NULL, &bh); if (unlikely(ret)) { WARN_ON(ret == -ENOENT); return ret; } mark_buffer_dirty(bh); nilfs_mdt_mark_dirty(nilfs->ns_dat); put_bh(bh); } else { ret = nilfs_bmap_mark(bmap, bdescs[i].bd_offset, bdescs[i].bd_level); if (ret < 0) { WARN_ON(ret == -ENOENT); return ret; } } } return nmembs; } int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs, struct nilfs_argv *argv, void **kbufs) { const char *msg; int ret; ret = nilfs_ioctl_delete_checkpoints(nilfs, &argv[1], kbufs[1]); if (ret < 0) { /* * can safely abort because checkpoints can be removed * independently. */ msg = "cannot delete checkpoints"; goto failed; } ret = nilfs_ioctl_free_vblocknrs(nilfs, &argv[2], kbufs[2]); if (ret < 0) { /* * can safely abort because DAT file is updated atomically * using a copy-on-write technique. */ msg = "cannot delete virtual blocks from DAT file"; goto failed; } ret = nilfs_ioctl_mark_blocks_dirty(nilfs, &argv[3], kbufs[3]); if (ret < 0) { /* * can safely abort because the operation is nondestructive. */ msg = "cannot mark copying blocks dirty"; goto failed; } return 0; failed: nilfs_err(nilfs->ns_sb, "error %d preparing GC: %s", ret, msg); return ret; } /** * nilfs_ioctl_clean_segments - clean segments * @inode: inode object * @filp: file object * @cmd: ioctl's request code * @argp: pointer on argument from userspace * * Description: nilfs_ioctl_clean_segments() function makes garbage * collection operation in the environment of requested parameters * from userspace. The NILFS_IOCTL_CLEAN_SEGMENTS ioctl is used by * nilfs_cleanerd daemon. * * Return Value: On success, 0 is returned or error code, otherwise. */ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) { struct nilfs_argv argv[5]; static const size_t argsz[5] = { sizeof(struct nilfs_vdesc), sizeof(struct nilfs_period), sizeof(__u64), sizeof(struct nilfs_bdesc), sizeof(__u64), }; void __user *base; void *kbufs[5]; struct the_nilfs *nilfs; size_t len, nsegs; int n, ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = mnt_want_write_file(filp); if (ret) return ret; ret = -EFAULT; if (copy_from_user(argv, argp, sizeof(argv))) goto out; ret = -EINVAL; nsegs = argv[4].v_nmembs; if (argv[4].v_size != argsz[4]) goto out; if (nsegs > UINT_MAX / sizeof(__u64)) goto out; /* * argv[4] points to segment numbers this ioctl cleans. We * use kmalloc() for its buffer because memory used for the * segment numbers is enough small. */ kbufs[4] = memdup_user((void __user *)(unsigned long)argv[4].v_base, nsegs * sizeof(__u64)); if (IS_ERR(kbufs[4])) { ret = PTR_ERR(kbufs[4]); goto out; } nilfs = inode->i_sb->s_fs_info; for (n = 0; n < 4; n++) { ret = -EINVAL; if (argv[n].v_size != argsz[n]) goto out_free; if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment) goto out_free; if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size) goto out_free; len = argv[n].v_size * argv[n].v_nmembs; base = (void __user *)(unsigned long)argv[n].v_base; if (len == 0) { kbufs[n] = NULL; continue; } kbufs[n] = vmalloc(len); if (!kbufs[n]) { ret = -ENOMEM; goto out_free; } if (copy_from_user(kbufs[n], base, len)) { ret = -EFAULT; vfree(kbufs[n]); goto out_free; } } /* * nilfs_ioctl_move_blocks() will call nilfs_iget_for_gc(), * which will operates an inode list without blocking. * To protect the list from concurrent operations, * nilfs_ioctl_move_blocks should be atomic operation. */ if (test_and_set_bit(THE_NILFS_GC_RUNNING, &nilfs->ns_flags)) { ret = -EBUSY; goto out_free; } ret = nilfs_ioctl_move_blocks(inode->i_sb, &argv[0], kbufs[0]); if (ret < 0) { nilfs_err(inode->i_sb, "error %d preparing GC: cannot read source blocks", ret); } else { if (nilfs_sb_need_update(nilfs)) set_nilfs_discontinued(nilfs); ret = nilfs_clean_segments(inode->i_sb, argv, kbufs); } nilfs_remove_all_gcinodes(nilfs); clear_nilfs_gc_running(nilfs); out_free: while (--n >= 0) vfree(kbufs[n]); kfree(kbufs[4]); out: mnt_drop_write_file(filp); return ret; } /** * nilfs_ioctl_sync - make a checkpoint * @inode: inode object * @filp: file object * @cmd: ioctl's request code * @argp: pointer on argument from userspace * * Description: nilfs_ioctl_sync() function constructs a logical segment * for checkpointing. This function guarantees that all modified data * and metadata are written out to the device when it successfully * returned. * * Return Value: On success, 0 is retured. On errors, one of the following * negative error code is returned. * * %-EROFS - Read only filesystem. * * %-EIO - I/O error * * %-ENOSPC - No space left on device (only in a panic state). * * %-ERESTARTSYS - Interrupted. * * %-ENOMEM - Insufficient memory available. * * %-EFAULT - Failure during execution of requested operation. */ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) { __u64 cno; int ret; struct the_nilfs *nilfs; ret = nilfs_construct_segment(inode->i_sb); if (ret < 0) return ret; nilfs = inode->i_sb->s_fs_info; ret = nilfs_flush_device(nilfs); if (ret < 0) return ret; if (argp != NULL) { down_read(&nilfs->ns_segctor_sem); cno = nilfs->ns_cno - 1; up_read(&nilfs->ns_segctor_sem); if (copy_to_user(argp, &cno, sizeof(cno))) return -EFAULT; } return 0; } /** * nilfs_ioctl_resize - resize NILFS2 volume * @inode: inode object * @filp: file object * @argp: pointer on argument from userspace * * Return Value: On success, 0 is returned or error code, otherwise. */ static int nilfs_ioctl_resize(struct inode *inode, struct file *filp, void __user *argp) { __u64 newsize; int ret = -EPERM; if (!capable(CAP_SYS_ADMIN)) goto out; ret = mnt_want_write_file(filp); if (ret) goto out; ret = -EFAULT; if (copy_from_user(&newsize, argp, sizeof(newsize))) goto out_drop_write; ret = nilfs_resize_fs(inode->i_sb, newsize); out_drop_write: mnt_drop_write_file(filp); out: return ret; } /** * nilfs_ioctl_trim_fs() - trim ioctl handle function * @inode: inode object * @argp: pointer on argument from userspace * * Description: nilfs_ioctl_trim_fs is the FITRIM ioctl handle function. It * checks the arguments from userspace and calls nilfs_sufile_trim_fs, which * performs the actual trim operation. * * Return Value: On success, 0 is returned or negative error code, otherwise. */ static int nilfs_ioctl_trim_fs(struct inode *inode, void __user *argp) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; struct fstrim_range range; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!bdev_max_discard_sectors(nilfs->ns_bdev)) return -EOPNOTSUPP; if (copy_from_user(&range, argp, sizeof(range))) return -EFAULT; range.minlen = max_t(u64, range.minlen, bdev_discard_granularity(nilfs->ns_bdev)); down_read(&nilfs->ns_segctor_sem); ret = nilfs_sufile_trim_fs(nilfs->ns_sufile, &range); up_read(&nilfs->ns_segctor_sem); if (ret < 0) return ret; if (copy_to_user(argp, &range, sizeof(range))) return -EFAULT; return 0; } /** * nilfs_ioctl_set_alloc_range - limit range of segments to be allocated * @inode: inode object * @argp: pointer on argument from userspace * * Description: nilfs_ioctl_set_alloc_range() function defines lower limit * of segments in bytes and upper limit of segments in bytes. * The NILFS_IOCTL_SET_ALLOC_RANGE is used by nilfs_resize utility. * * Return Value: On success, 0 is returned or error code, otherwise. */ static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; __u64 range[2]; __u64 minseg, maxseg; unsigned long segbytes; int ret = -EPERM; if (!capable(CAP_SYS_ADMIN)) goto out; ret = -EFAULT; if (copy_from_user(range, argp, sizeof(__u64[2]))) goto out; ret = -ERANGE; if (range[1] > bdev_nr_bytes(inode->i_sb->s_bdev)) goto out; segbytes = nilfs->ns_blocks_per_segment * nilfs->ns_blocksize; minseg = range[0] + segbytes - 1; do_div(minseg, segbytes); if (range[1] < 4096) goto out; maxseg = NILFS_SB2_OFFSET_BYTES(range[1]); if (maxseg < segbytes) goto out; do_div(maxseg, segbytes); maxseg--; ret = nilfs_sufile_set_alloc_range(nilfs->ns_sufile, minseg, maxseg); out: return ret; } /** * nilfs_ioctl_get_info - wrapping function of get metadata info * @inode: inode object * @filp: file object * @cmd: ioctl's request code * @argp: pointer on argument from userspace * @membsz: size of an item in bytes * @dofunc: concrete function of getting metadata info * * Description: nilfs_ioctl_get_info() gets metadata info by means of * calling dofunc() function. * * Return Value: On success, 0 is returned and requested metadata info * is copied into userspace. On error, one of the following * negative error codes is returned. * * %-EINVAL - Invalid arguments from userspace. * * %-ENOMEM - Insufficient amount of memory available. * * %-EFAULT - Failure during execution of requested operation. */ static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp, size_t membsz, ssize_t (*dofunc)(struct the_nilfs *, __u64 *, int, void *, size_t, size_t)) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; struct nilfs_argv argv; int ret; if (copy_from_user(&argv, argp, sizeof(argv))) return -EFAULT; if (argv.v_size < membsz) return -EINVAL; ret = nilfs_ioctl_wrap_copy(nilfs, &argv, _IOC_DIR(cmd), dofunc); if (ret < 0) return ret; if (copy_to_user(argp, &argv, sizeof(argv))) ret = -EFAULT; return ret; } /** * nilfs_ioctl_set_suinfo - set segment usage info * @inode: inode object * @filp: file object * @cmd: ioctl's request code * @argp: pointer on argument from userspace * * Description: Expects an array of nilfs_suinfo_update structures * encapsulated in nilfs_argv and updates the segment usage info * according to the flags in nilfs_suinfo_update. * * Return Value: On success, 0 is returned. On error, one of the * following negative error codes is returned. * * %-EPERM - Not enough permissions * * %-EFAULT - Error copying input data * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-EINVAL - Invalid values in input (segment number, flags or nblocks) */ static int nilfs_ioctl_set_suinfo(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; struct nilfs_transaction_info ti; struct nilfs_argv argv; size_t len; void __user *base; void *kbuf; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = mnt_want_write_file(filp); if (ret) return ret; ret = -EFAULT; if (copy_from_user(&argv, argp, sizeof(argv))) goto out; ret = -EINVAL; if (argv.v_size < sizeof(struct nilfs_suinfo_update)) goto out; if (argv.v_nmembs > nilfs->ns_nsegments) goto out; if (argv.v_nmembs >= UINT_MAX / argv.v_size) goto out; len = argv.v_size * argv.v_nmembs; if (!len) { ret = 0; goto out; } base = (void __user *)(unsigned long)argv.v_base; kbuf = vmalloc(len); if (!kbuf) { ret = -ENOMEM; goto out; } if (copy_from_user(kbuf, base, len)) { ret = -EFAULT; goto out_free; } nilfs_transaction_begin(inode->i_sb, &ti, 0); ret = nilfs_sufile_set_suinfo(nilfs->ns_sufile, kbuf, argv.v_size, argv.v_nmembs); if (unlikely(ret < 0)) nilfs_transaction_abort(inode->i_sb); else nilfs_transaction_commit(inode->i_sb); /* never fails */ out_free: vfree(kbuf); out: mnt_drop_write_file(filp); return ret; } long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(filp); void __user *argp = (void __user *)arg; switch (cmd) { case FS_IOC_GETVERSION: return nilfs_ioctl_getversion(inode, argp); case NILFS_IOCTL_CHANGE_CPMODE: return nilfs_ioctl_change_cpmode(inode, filp, cmd, argp); case NILFS_IOCTL_DELETE_CHECKPOINT: return nilfs_ioctl_delete_checkpoint(inode, filp, cmd, argp); case NILFS_IOCTL_GET_CPINFO: return nilfs_ioctl_get_info(inode, filp, cmd, argp, sizeof(struct nilfs_cpinfo), nilfs_ioctl_do_get_cpinfo); case NILFS_IOCTL_GET_CPSTAT: return nilfs_ioctl_get_cpstat(inode, filp, cmd, argp); case NILFS_IOCTL_GET_SUINFO: return nilfs_ioctl_get_info(inode, filp, cmd, argp, sizeof(struct nilfs_suinfo), nilfs_ioctl_do_get_suinfo); case NILFS_IOCTL_SET_SUINFO: return nilfs_ioctl_set_suinfo(inode, filp, cmd, argp); case NILFS_IOCTL_GET_SUSTAT: return nilfs_ioctl_get_sustat(inode, filp, cmd, argp); case NILFS_IOCTL_GET_VINFO: return nilfs_ioctl_get_info(inode, filp, cmd, argp, sizeof(struct nilfs_vinfo), nilfs_ioctl_do_get_vinfo); case NILFS_IOCTL_GET_BDESCS: return nilfs_ioctl_get_bdescs(inode, filp, cmd, argp); case NILFS_IOCTL_CLEAN_SEGMENTS: return nilfs_ioctl_clean_segments(inode, filp, cmd, argp); case NILFS_IOCTL_SYNC: return nilfs_ioctl_sync(inode, filp, cmd, argp); case NILFS_IOCTL_RESIZE: return nilfs_ioctl_resize(inode, filp, argp); case NILFS_IOCTL_SET_ALLOC_RANGE: return nilfs_ioctl_set_alloc_range(inode, argp); case FITRIM: return nilfs_ioctl_trim_fs(inode, argp); default: return -ENOTTY; } } #ifdef CONFIG_COMPAT long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { switch (cmd) { case FS_IOC32_GETVERSION: cmd = FS_IOC_GETVERSION; break; case NILFS_IOCTL_CHANGE_CPMODE: case NILFS_IOCTL_DELETE_CHECKPOINT: case NILFS_IOCTL_GET_CPINFO: case NILFS_IOCTL_GET_CPSTAT: case NILFS_IOCTL_GET_SUINFO: case NILFS_IOCTL_SET_SUINFO: case NILFS_IOCTL_GET_SUSTAT: case NILFS_IOCTL_GET_VINFO: case NILFS_IOCTL_GET_BDESCS: case NILFS_IOCTL_CLEAN_SEGMENTS: case NILFS_IOCTL_SYNC: case NILFS_IOCTL_RESIZE: case NILFS_IOCTL_SET_ALLOC_RANGE: case FITRIM: break; default: return -ENOIOCTLCMD; } return nilfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); } #endif
linux-master
fs/nilfs2/ioctl.c
// SPDX-License-Identifier: GPL-2.0+ /* * Meta data file for NILFS * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * Written by Ryusuke Konishi. */ #include <linux/buffer_head.h> #include <linux/mpage.h> #include <linux/mm.h> #include <linux/writeback.h> #include <linux/backing-dev.h> #include <linux/swap.h> #include <linux/slab.h> #include "nilfs.h" #include "btnode.h" #include "segment.h" #include "page.h" #include "mdt.h" #include "alloc.h" /* nilfs_palloc_destroy_cache() */ #include <trace/events/nilfs2.h> #define NILFS_MDT_MAX_RA_BLOCKS (16 - 1) static int nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block, struct buffer_head *bh, void (*init_block)(struct inode *, struct buffer_head *, void *)) { struct nilfs_inode_info *ii = NILFS_I(inode); void *kaddr; int ret; /* Caller exclude read accesses using page lock */ /* set_buffer_new(bh); */ bh->b_blocknr = 0; ret = nilfs_bmap_insert(ii->i_bmap, block, (unsigned long)bh); if (unlikely(ret)) return ret; set_buffer_mapped(bh); kaddr = kmap_atomic(bh->b_page); memset(kaddr + bh_offset(bh), 0, i_blocksize(inode)); if (init_block) init_block(inode, bh, kaddr); flush_dcache_page(bh->b_page); kunmap_atomic(kaddr); set_buffer_uptodate(bh); mark_buffer_dirty(bh); nilfs_mdt_mark_dirty(inode); trace_nilfs2_mdt_insert_new_block(inode, inode->i_ino, block); return 0; } static int nilfs_mdt_create_block(struct inode *inode, unsigned long block, struct buffer_head **out_bh, void (*init_block)(struct inode *, struct buffer_head *, void *)) { struct super_block *sb = inode->i_sb; struct nilfs_transaction_info ti; struct buffer_head *bh; int err; nilfs_transaction_begin(sb, &ti, 0); err = -ENOMEM; bh = nilfs_grab_buffer(inode, inode->i_mapping, block, 0); if (unlikely(!bh)) goto failed_unlock; err = -EEXIST; if (buffer_uptodate(bh)) goto failed_bh; wait_on_buffer(bh); if (buffer_uptodate(bh)) goto failed_bh; bh->b_bdev = sb->s_bdev; err = nilfs_mdt_insert_new_block(inode, block, bh, init_block); if (likely(!err)) { get_bh(bh); *out_bh = bh; } failed_bh: unlock_page(bh->b_page); put_page(bh->b_page); brelse(bh); failed_unlock: if (likely(!err)) err = nilfs_transaction_commit(sb); else nilfs_transaction_abort(sb); return err; } static int nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff, blk_opf_t opf, struct buffer_head **out_bh) { struct buffer_head *bh; __u64 blknum = 0; int ret = -ENOMEM; bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0); if (unlikely(!bh)) goto failed; ret = -EEXIST; /* internal code */ if (buffer_uptodate(bh)) goto out; if (opf & REQ_RAHEAD) { if (!trylock_buffer(bh)) { ret = -EBUSY; goto failed_bh; } } else /* opf == REQ_OP_READ */ lock_buffer(bh); if (buffer_uptodate(bh)) { unlock_buffer(bh); goto out; } ret = nilfs_bmap_lookup(NILFS_I(inode)->i_bmap, blkoff, &blknum); if (unlikely(ret)) { unlock_buffer(bh); goto failed_bh; } map_bh(bh, inode->i_sb, (sector_t)blknum); bh->b_end_io = end_buffer_read_sync; get_bh(bh); submit_bh(opf, bh); ret = 0; trace_nilfs2_mdt_submit_block(inode, inode->i_ino, blkoff, opf & REQ_OP_MASK); out: get_bh(bh); *out_bh = bh; failed_bh: unlock_page(bh->b_page); put_page(bh->b_page); brelse(bh); failed: return ret; } static int nilfs_mdt_read_block(struct inode *inode, unsigned long block, int readahead, struct buffer_head **out_bh) { struct buffer_head *first_bh, *bh; unsigned long blkoff; int i, nr_ra_blocks = NILFS_MDT_MAX_RA_BLOCKS; int err; err = nilfs_mdt_submit_block(inode, block, REQ_OP_READ, &first_bh); if (err == -EEXIST) /* internal code */ goto out; if (unlikely(err)) goto failed; if (readahead) { blkoff = block + 1; for (i = 0; i < nr_ra_blocks; i++, blkoff++) { err = nilfs_mdt_submit_block(inode, blkoff, REQ_OP_READ | REQ_RAHEAD, &bh); if (likely(!err || err == -EEXIST)) brelse(bh); else if (err != -EBUSY) break; /* abort readahead if bmap lookup failed */ if (!buffer_locked(first_bh)) goto out_no_wait; } } wait_on_buffer(first_bh); out_no_wait: err = -EIO; if (!buffer_uptodate(first_bh)) { nilfs_err(inode->i_sb, "I/O error reading meta-data file (ino=%lu, block-offset=%lu)", inode->i_ino, block); goto failed_bh; } out: *out_bh = first_bh; return 0; failed_bh: brelse(first_bh); failed: return err; } /** * nilfs_mdt_get_block - read or create a buffer on meta data file. * @inode: inode of the meta data file * @blkoff: block offset * @create: create flag * @init_block: initializer used for newly allocated block * @out_bh: output of a pointer to the buffer_head * * nilfs_mdt_get_block() looks up the specified buffer and tries to create * a new buffer if @create is not zero. On success, the returned buffer is * assured to be either existing or formatted using a buffer lock on success. * @out_bh is substituted only when zero is returned. * * Return Value: On success, it returns 0. On error, the following negative * error code is returned. * * %-ENOMEM - Insufficient memory available. * * %-EIO - I/O error * * %-ENOENT - the specified block does not exist (hole block) * * %-EROFS - Read only filesystem (for create mode) */ int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create, void (*init_block)(struct inode *, struct buffer_head *, void *), struct buffer_head **out_bh) { int ret; /* Should be rewritten with merging nilfs_mdt_read_block() */ retry: ret = nilfs_mdt_read_block(inode, blkoff, !create, out_bh); if (!create || ret != -ENOENT) return ret; ret = nilfs_mdt_create_block(inode, blkoff, out_bh, init_block); if (unlikely(ret == -EEXIST)) { /* create = 0; */ /* limit read-create loop retries */ goto retry; } return ret; } /** * nilfs_mdt_find_block - find and get a buffer on meta data file. * @inode: inode of the meta data file * @start: start block offset (inclusive) * @end: end block offset (inclusive) * @blkoff: block offset * @out_bh: place to store a pointer to buffer_head struct * * nilfs_mdt_find_block() looks up an existing block in range of * [@start, @end] and stores pointer to a buffer head of the block to * @out_bh, and block offset to @blkoff, respectively. @out_bh and * @blkoff are substituted only when zero is returned. * * Return Value: On success, it returns 0. On error, the following negative * error code is returned. * * %-ENOMEM - Insufficient memory available. * * %-EIO - I/O error * * %-ENOENT - no block was found in the range */ int nilfs_mdt_find_block(struct inode *inode, unsigned long start, unsigned long end, unsigned long *blkoff, struct buffer_head **out_bh) { __u64 next; int ret; if (unlikely(start > end)) return -ENOENT; ret = nilfs_mdt_read_block(inode, start, true, out_bh); if (!ret) { *blkoff = start; goto out; } if (unlikely(ret != -ENOENT || start == ULONG_MAX)) goto out; ret = nilfs_bmap_seek_key(NILFS_I(inode)->i_bmap, start + 1, &next); if (!ret) { if (next <= end) { ret = nilfs_mdt_read_block(inode, next, true, out_bh); if (!ret) *blkoff = next; } else { ret = -ENOENT; } } out: return ret; } /** * nilfs_mdt_delete_block - make a hole on the meta data file. * @inode: inode of the meta data file * @block: block offset * * Return Value: On success, zero is returned. * On error, one of the following negative error code is returned. * * %-ENOMEM - Insufficient memory available. * * %-EIO - I/O error */ int nilfs_mdt_delete_block(struct inode *inode, unsigned long block) { struct nilfs_inode_info *ii = NILFS_I(inode); int err; err = nilfs_bmap_delete(ii->i_bmap, block); if (!err || err == -ENOENT) { nilfs_mdt_mark_dirty(inode); nilfs_mdt_forget_block(inode, block); } return err; } /** * nilfs_mdt_forget_block - discard dirty state and try to remove the page * @inode: inode of the meta data file * @block: block offset * * nilfs_mdt_forget_block() clears a dirty flag of the specified buffer, and * tries to release the page including the buffer from a page cache. * * Return Value: On success, 0 is returned. On error, one of the following * negative error code is returned. * * %-EBUSY - page has an active buffer. * * %-ENOENT - page cache has no page addressed by the offset. */ int nilfs_mdt_forget_block(struct inode *inode, unsigned long block) { pgoff_t index = (pgoff_t)block >> (PAGE_SHIFT - inode->i_blkbits); struct page *page; unsigned long first_block; int ret = 0; int still_dirty; page = find_lock_page(inode->i_mapping, index); if (!page) return -ENOENT; wait_on_page_writeback(page); first_block = (unsigned long)index << (PAGE_SHIFT - inode->i_blkbits); if (page_has_buffers(page)) { struct buffer_head *bh; bh = nilfs_page_get_nth_block(page, block - first_block); nilfs_forget_buffer(bh); } still_dirty = PageDirty(page); unlock_page(page); put_page(page); if (still_dirty || invalidate_inode_pages2_range(inode->i_mapping, index, index) != 0) ret = -EBUSY; return ret; } int nilfs_mdt_fetch_dirty(struct inode *inode) { struct nilfs_inode_info *ii = NILFS_I(inode); if (nilfs_bmap_test_and_clear_dirty(ii->i_bmap)) { set_bit(NILFS_I_DIRTY, &ii->i_state); return 1; } return test_bit(NILFS_I_DIRTY, &ii->i_state); } static int nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; struct super_block *sb; int err = 0; if (inode && sb_rdonly(inode->i_sb)) { /* * It means that filesystem was remounted in read-only * mode because of error or metadata corruption. But we * have dirty pages that try to be flushed in background. * So, here we simply discard this dirty page. */ nilfs_clear_dirty_page(page, false); unlock_page(page); return -EROFS; } redirty_page_for_writepage(wbc, page); unlock_page(page); if (!inode) return 0; sb = inode->i_sb; if (wbc->sync_mode == WB_SYNC_ALL) err = nilfs_construct_segment(sb); else if (wbc->for_reclaim) nilfs_flush_segment(sb, inode->i_ino); return err; } static const struct address_space_operations def_mdt_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .writepage = nilfs_mdt_write_page, }; static const struct inode_operations def_mdt_iops; static const struct file_operations def_mdt_fops; int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz) { struct nilfs_mdt_info *mi; mi = kzalloc(max(sizeof(*mi), objsz), GFP_NOFS); if (!mi) return -ENOMEM; init_rwsem(&mi->mi_sem); inode->i_private = mi; inode->i_mode = S_IFREG; mapping_set_gfp_mask(inode->i_mapping, gfp_mask); inode->i_op = &def_mdt_iops; inode->i_fop = &def_mdt_fops; inode->i_mapping->a_ops = &def_mdt_aops; return 0; } /** * nilfs_mdt_clear - do cleanup for the metadata file * @inode: inode of the metadata file */ void nilfs_mdt_clear(struct inode *inode) { struct nilfs_mdt_info *mdi = NILFS_MDT(inode); struct nilfs_shadow_map *shadow = mdi->mi_shadow; if (mdi->mi_palloc_cache) nilfs_palloc_destroy_cache(inode); if (shadow) { struct inode *s_inode = shadow->inode; shadow->inode = NULL; iput(s_inode); mdi->mi_shadow = NULL; } } /** * nilfs_mdt_destroy - release resources used by the metadata file * @inode: inode of the metadata file */ void nilfs_mdt_destroy(struct inode *inode) { struct nilfs_mdt_info *mdi = NILFS_MDT(inode); kfree(mdi->mi_bgl); /* kfree(NULL) is safe */ kfree(mdi); } void nilfs_mdt_set_entry_size(struct inode *inode, unsigned int entry_size, unsigned int header_size) { struct nilfs_mdt_info *mi = NILFS_MDT(inode); mi->mi_entry_size = entry_size; mi->mi_entries_per_block = i_blocksize(inode) / entry_size; mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size); } /** * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file * @inode: inode of the metadata file * @shadow: shadow mapping */ int nilfs_mdt_setup_shadow_map(struct inode *inode, struct nilfs_shadow_map *shadow) { struct nilfs_mdt_info *mi = NILFS_MDT(inode); struct inode *s_inode; INIT_LIST_HEAD(&shadow->frozen_buffers); s_inode = nilfs_iget_for_shadow(inode); if (IS_ERR(s_inode)) return PTR_ERR(s_inode); shadow->inode = s_inode; mi->mi_shadow = shadow; return 0; } /** * nilfs_mdt_save_to_shadow_map - copy bmap and dirty pages to shadow map * @inode: inode of the metadata file */ int nilfs_mdt_save_to_shadow_map(struct inode *inode) { struct nilfs_mdt_info *mi = NILFS_MDT(inode); struct nilfs_inode_info *ii = NILFS_I(inode); struct nilfs_shadow_map *shadow = mi->mi_shadow; struct inode *s_inode = shadow->inode; int ret; ret = nilfs_copy_dirty_pages(s_inode->i_mapping, inode->i_mapping); if (ret) goto out; ret = nilfs_copy_dirty_pages(NILFS_I(s_inode)->i_assoc_inode->i_mapping, ii->i_assoc_inode->i_mapping); if (ret) goto out; nilfs_bmap_save(ii->i_bmap, &shadow->bmap_store); out: return ret; } int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh) { struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow; struct buffer_head *bh_frozen; struct page *page; int blkbits = inode->i_blkbits; page = grab_cache_page(shadow->inode->i_mapping, bh->b_folio->index); if (!page) return -ENOMEM; if (!page_has_buffers(page)) create_empty_buffers(page, 1 << blkbits, 0); bh_frozen = nilfs_page_get_nth_block(page, bh_offset(bh) >> blkbits); if (!buffer_uptodate(bh_frozen)) nilfs_copy_buffer(bh_frozen, bh); if (list_empty(&bh_frozen->b_assoc_buffers)) { list_add_tail(&bh_frozen->b_assoc_buffers, &shadow->frozen_buffers); set_buffer_nilfs_redirected(bh); } else { brelse(bh_frozen); /* already frozen */ } unlock_page(page); put_page(page); return 0; } struct buffer_head * nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh) { struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow; struct buffer_head *bh_frozen = NULL; struct page *page; int n; page = find_lock_page(shadow->inode->i_mapping, bh->b_folio->index); if (page) { if (page_has_buffers(page)) { n = bh_offset(bh) >> inode->i_blkbits; bh_frozen = nilfs_page_get_nth_block(page, n); } unlock_page(page); put_page(page); } return bh_frozen; } static void nilfs_release_frozen_buffers(struct nilfs_shadow_map *shadow) { struct list_head *head = &shadow->frozen_buffers; struct buffer_head *bh; while (!list_empty(head)) { bh = list_first_entry(head, struct buffer_head, b_assoc_buffers); list_del_init(&bh->b_assoc_buffers); brelse(bh); /* drop ref-count to make it releasable */ } } /** * nilfs_mdt_restore_from_shadow_map - restore dirty pages and bmap state * @inode: inode of the metadata file */ void nilfs_mdt_restore_from_shadow_map(struct inode *inode) { struct nilfs_mdt_info *mi = NILFS_MDT(inode); struct nilfs_inode_info *ii = NILFS_I(inode); struct nilfs_shadow_map *shadow = mi->mi_shadow; down_write(&mi->mi_sem); if (mi->mi_palloc_cache) nilfs_palloc_clear_cache(inode); nilfs_clear_dirty_pages(inode->i_mapping, true); nilfs_copy_back_pages(inode->i_mapping, shadow->inode->i_mapping); nilfs_clear_dirty_pages(ii->i_assoc_inode->i_mapping, true); nilfs_copy_back_pages(ii->i_assoc_inode->i_mapping, NILFS_I(shadow->inode)->i_assoc_inode->i_mapping); nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store); up_write(&mi->mi_sem); } /** * nilfs_mdt_clear_shadow_map - truncate pages in shadow map caches * @inode: inode of the metadata file */ void nilfs_mdt_clear_shadow_map(struct inode *inode) { struct nilfs_mdt_info *mi = NILFS_MDT(inode); struct nilfs_shadow_map *shadow = mi->mi_shadow; struct inode *shadow_btnc_inode = NILFS_I(shadow->inode)->i_assoc_inode; down_write(&mi->mi_sem); nilfs_release_frozen_buffers(shadow); truncate_inode_pages(shadow->inode->i_mapping, 0); truncate_inode_pages(shadow_btnc_inode->i_mapping, 0); up_write(&mi->mi_sem); }
linux-master
fs/nilfs2/mdt.c
// SPDX-License-Identifier: GPL-2.0+ /* * NILFS block mapping. * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * * Written by Koji Sato. */ #include <linux/fs.h> #include <linux/string.h> #include <linux/errno.h> #include "nilfs.h" #include "bmap.h" #include "btree.h" #include "direct.h" #include "btnode.h" #include "mdt.h" #include "dat.h" #include "alloc.h" struct inode *nilfs_bmap_get_dat(const struct nilfs_bmap *bmap) { struct the_nilfs *nilfs = bmap->b_inode->i_sb->s_fs_info; return nilfs->ns_dat; } static int nilfs_bmap_convert_error(struct nilfs_bmap *bmap, const char *fname, int err) { struct inode *inode = bmap->b_inode; if (err == -EINVAL) { __nilfs_error(inode->i_sb, fname, "broken bmap (inode number=%lu)", inode->i_ino); err = -EIO; } return err; } /** * nilfs_bmap_lookup_at_level - find a data block or node block * @bmap: bmap * @key: key * @level: level * @ptrp: place to store the value associated to @key * * Description: nilfs_bmap_lookup_at_level() finds a record whose key * matches @key in the block at @level of the bmap. * * Return Value: On success, 0 is returned and the record associated with @key * is stored in the place pointed by @ptrp. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - A record associated with @key does not exist. */ int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level, __u64 *ptrp) { sector_t blocknr; int ret; down_read(&bmap->b_sem); ret = bmap->b_ops->bop_lookup(bmap, key, level, ptrp); if (ret < 0) goto out; if (NILFS_BMAP_USE_VBN(bmap)) { ret = nilfs_dat_translate(nilfs_bmap_get_dat(bmap), *ptrp, &blocknr); if (!ret) *ptrp = blocknr; else if (ret == -ENOENT) { /* * If there was no valid entry in DAT for the block * address obtained by b_ops->bop_lookup, then pass * internal code -EINVAL to nilfs_bmap_convert_error * to treat it as metadata corruption. */ ret = -EINVAL; } } out: up_read(&bmap->b_sem); return nilfs_bmap_convert_error(bmap, __func__, ret); } int nilfs_bmap_lookup_contig(struct nilfs_bmap *bmap, __u64 key, __u64 *ptrp, unsigned int maxblocks) { int ret; down_read(&bmap->b_sem); ret = bmap->b_ops->bop_lookup_contig(bmap, key, ptrp, maxblocks); up_read(&bmap->b_sem); return nilfs_bmap_convert_error(bmap, __func__, ret); } static int nilfs_bmap_do_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) { __u64 keys[NILFS_BMAP_SMALL_HIGH + 1]; __u64 ptrs[NILFS_BMAP_SMALL_HIGH + 1]; int ret, n; if (bmap->b_ops->bop_check_insert != NULL) { ret = bmap->b_ops->bop_check_insert(bmap, key); if (ret > 0) { n = bmap->b_ops->bop_gather_data( bmap, keys, ptrs, NILFS_BMAP_SMALL_HIGH + 1); if (n < 0) return n; ret = nilfs_btree_convert_and_insert( bmap, key, ptr, keys, ptrs, n); if (ret == 0) bmap->b_u.u_flags |= NILFS_BMAP_LARGE; return ret; } else if (ret < 0) return ret; } return bmap->b_ops->bop_insert(bmap, key, ptr); } /** * nilfs_bmap_insert - insert a new key-record pair into a bmap * @bmap: bmap * @key: key * @rec: record * * Description: nilfs_bmap_insert() inserts the new key-record pair specified * by @key and @rec into @bmap. * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-EEXIST - A record associated with @key already exist. */ int nilfs_bmap_insert(struct nilfs_bmap *bmap, __u64 key, unsigned long rec) { int ret; down_write(&bmap->b_sem); ret = nilfs_bmap_do_insert(bmap, key, rec); up_write(&bmap->b_sem); return nilfs_bmap_convert_error(bmap, __func__, ret); } static int nilfs_bmap_do_delete(struct nilfs_bmap *bmap, __u64 key) { __u64 keys[NILFS_BMAP_LARGE_LOW + 1]; __u64 ptrs[NILFS_BMAP_LARGE_LOW + 1]; int ret, n; if (bmap->b_ops->bop_check_delete != NULL) { ret = bmap->b_ops->bop_check_delete(bmap, key); if (ret > 0) { n = bmap->b_ops->bop_gather_data( bmap, keys, ptrs, NILFS_BMAP_LARGE_LOW + 1); if (n < 0) return n; ret = nilfs_direct_delete_and_convert( bmap, key, keys, ptrs, n); if (ret == 0) bmap->b_u.u_flags &= ~NILFS_BMAP_LARGE; return ret; } else if (ret < 0) return ret; } return bmap->b_ops->bop_delete(bmap, key); } /** * nilfs_bmap_seek_key - seek a valid entry and return its key * @bmap: bmap struct * @start: start key number * @keyp: place to store valid key * * Description: nilfs_bmap_seek_key() seeks a valid key on @bmap * starting from @start, and stores it to @keyp if found. * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - No valid entry was found */ int nilfs_bmap_seek_key(struct nilfs_bmap *bmap, __u64 start, __u64 *keyp) { int ret; down_read(&bmap->b_sem); ret = bmap->b_ops->bop_seek_key(bmap, start, keyp); up_read(&bmap->b_sem); if (ret < 0) ret = nilfs_bmap_convert_error(bmap, __func__, ret); return ret; } int nilfs_bmap_last_key(struct nilfs_bmap *bmap, __u64 *keyp) { int ret; down_read(&bmap->b_sem); ret = bmap->b_ops->bop_last_key(bmap, keyp); up_read(&bmap->b_sem); if (ret < 0) ret = nilfs_bmap_convert_error(bmap, __func__, ret); return ret; } /** * nilfs_bmap_delete - delete a key-record pair from a bmap * @bmap: bmap * @key: key * * Description: nilfs_bmap_delete() deletes the key-record pair specified by * @key from @bmap. * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - A record associated with @key does not exist. */ int nilfs_bmap_delete(struct nilfs_bmap *bmap, __u64 key) { int ret; down_write(&bmap->b_sem); ret = nilfs_bmap_do_delete(bmap, key); up_write(&bmap->b_sem); return nilfs_bmap_convert_error(bmap, __func__, ret); } static int nilfs_bmap_do_truncate(struct nilfs_bmap *bmap, __u64 key) { __u64 lastkey; int ret; ret = bmap->b_ops->bop_last_key(bmap, &lastkey); if (ret < 0) { if (ret == -ENOENT) ret = 0; return ret; } while (key <= lastkey) { ret = nilfs_bmap_do_delete(bmap, lastkey); if (ret < 0) return ret; ret = bmap->b_ops->bop_last_key(bmap, &lastkey); if (ret < 0) { if (ret == -ENOENT) ret = 0; return ret; } } return 0; } /** * nilfs_bmap_truncate - truncate a bmap to a specified key * @bmap: bmap * @key: key * * Description: nilfs_bmap_truncate() removes key-record pairs whose keys are * greater than or equal to @key from @bmap. * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. */ int nilfs_bmap_truncate(struct nilfs_bmap *bmap, __u64 key) { int ret; down_write(&bmap->b_sem); ret = nilfs_bmap_do_truncate(bmap, key); up_write(&bmap->b_sem); return nilfs_bmap_convert_error(bmap, __func__, ret); } /** * nilfs_bmap_clear - free resources a bmap holds * @bmap: bmap * * Description: nilfs_bmap_clear() frees resources associated with @bmap. */ void nilfs_bmap_clear(struct nilfs_bmap *bmap) { down_write(&bmap->b_sem); if (bmap->b_ops->bop_clear != NULL) bmap->b_ops->bop_clear(bmap); up_write(&bmap->b_sem); } /** * nilfs_bmap_propagate - propagate dirty state * @bmap: bmap * @bh: buffer head * * Description: nilfs_bmap_propagate() marks the buffers that directly or * indirectly refer to the block specified by @bh dirty. * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. */ int nilfs_bmap_propagate(struct nilfs_bmap *bmap, struct buffer_head *bh) { int ret; down_write(&bmap->b_sem); ret = bmap->b_ops->bop_propagate(bmap, bh); up_write(&bmap->b_sem); return nilfs_bmap_convert_error(bmap, __func__, ret); } /** * nilfs_bmap_lookup_dirty_buffers - * @bmap: bmap * @listp: pointer to buffer head list */ void nilfs_bmap_lookup_dirty_buffers(struct nilfs_bmap *bmap, struct list_head *listp) { if (bmap->b_ops->bop_lookup_dirty_buffers != NULL) bmap->b_ops->bop_lookup_dirty_buffers(bmap, listp); } /** * nilfs_bmap_assign - assign a new block number to a block * @bmap: bmap * @bh: pointer to buffer head * @blocknr: block number * @binfo: block information * * Description: nilfs_bmap_assign() assigns the block number @blocknr to the * buffer specified by @bh. * * Return Value: On success, 0 is returned and the buffer head of a newly * create buffer and the block information associated with the buffer are * stored in the place pointed by @bh and @binfo, respectively. On error, one * of the following negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. */ int nilfs_bmap_assign(struct nilfs_bmap *bmap, struct buffer_head **bh, unsigned long blocknr, union nilfs_binfo *binfo) { int ret; down_write(&bmap->b_sem); ret = bmap->b_ops->bop_assign(bmap, bh, blocknr, binfo); up_write(&bmap->b_sem); return nilfs_bmap_convert_error(bmap, __func__, ret); } /** * nilfs_bmap_mark - mark block dirty * @bmap: bmap * @key: key * @level: level * * Description: nilfs_bmap_mark() marks the block specified by @key and @level * as dirty. * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. */ int nilfs_bmap_mark(struct nilfs_bmap *bmap, __u64 key, int level) { int ret; if (bmap->b_ops->bop_mark == NULL) return 0; down_write(&bmap->b_sem); ret = bmap->b_ops->bop_mark(bmap, key, level); up_write(&bmap->b_sem); return nilfs_bmap_convert_error(bmap, __func__, ret); } /** * nilfs_bmap_test_and_clear_dirty - test and clear a bmap dirty state * @bmap: bmap * * Description: nilfs_test_and_clear() is the atomic operation to test and * clear the dirty state of @bmap. * * Return Value: 1 is returned if @bmap is dirty, or 0 if clear. */ int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *bmap) { int ret; down_write(&bmap->b_sem); ret = nilfs_bmap_dirty(bmap); nilfs_bmap_clear_dirty(bmap); up_write(&bmap->b_sem); return ret; } /* * Internal use only */ __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap, const struct buffer_head *bh) { struct buffer_head *pbh; __u64 key; key = page_index(bh->b_page) << (PAGE_SHIFT - bmap->b_inode->i_blkbits); for (pbh = page_buffers(bh->b_page); pbh != bh; pbh = pbh->b_this_page) key++; return key; } __u64 nilfs_bmap_find_target_seq(const struct nilfs_bmap *bmap, __u64 key) { __s64 diff; diff = key - bmap->b_last_allocated_key; if ((nilfs_bmap_keydiff_abs(diff) < NILFS_INODE_BMAP_SIZE) && (bmap->b_last_allocated_ptr != NILFS_BMAP_INVALID_PTR) && (bmap->b_last_allocated_ptr + diff > 0)) return bmap->b_last_allocated_ptr + diff; else return NILFS_BMAP_INVALID_PTR; } #define NILFS_BMAP_GROUP_DIV 8 __u64 nilfs_bmap_find_target_in_group(const struct nilfs_bmap *bmap) { struct inode *dat = nilfs_bmap_get_dat(bmap); unsigned long entries_per_group = nilfs_palloc_entries_per_group(dat); unsigned long group = bmap->b_inode->i_ino / entries_per_group; return group * entries_per_group + (bmap->b_inode->i_ino % NILFS_BMAP_GROUP_DIV) * (entries_per_group / NILFS_BMAP_GROUP_DIV); } static struct lock_class_key nilfs_bmap_dat_lock_key; static struct lock_class_key nilfs_bmap_mdt_lock_key; /** * nilfs_bmap_read - read a bmap from an inode * @bmap: bmap * @raw_inode: on-disk inode * * Description: nilfs_bmap_read() initializes the bmap @bmap. * * Return Value: On success, 0 is returned. On error, the following negative * error code is returned. * * %-ENOMEM - Insufficient amount of memory available. */ int nilfs_bmap_read(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode) { if (raw_inode == NULL) memset(bmap->b_u.u_data, 0, NILFS_BMAP_SIZE); else memcpy(bmap->b_u.u_data, raw_inode->i_bmap, NILFS_BMAP_SIZE); init_rwsem(&bmap->b_sem); bmap->b_state = 0; bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode; switch (bmap->b_inode->i_ino) { case NILFS_DAT_INO: bmap->b_ptr_type = NILFS_BMAP_PTR_P; bmap->b_last_allocated_key = 0; bmap->b_last_allocated_ptr = NILFS_BMAP_NEW_PTR_INIT; lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key); break; case NILFS_CPFILE_INO: case NILFS_SUFILE_INO: bmap->b_ptr_type = NILFS_BMAP_PTR_VS; bmap->b_last_allocated_key = 0; bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; lockdep_set_class(&bmap->b_sem, &nilfs_bmap_mdt_lock_key); break; case NILFS_IFILE_INO: lockdep_set_class(&bmap->b_sem, &nilfs_bmap_mdt_lock_key); fallthrough; default: bmap->b_ptr_type = NILFS_BMAP_PTR_VM; bmap->b_last_allocated_key = 0; bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; break; } return (bmap->b_u.u_flags & NILFS_BMAP_LARGE) ? nilfs_btree_init(bmap) : nilfs_direct_init(bmap); } /** * nilfs_bmap_write - write back a bmap to an inode * @bmap: bmap * @raw_inode: on-disk inode * * Description: nilfs_bmap_write() stores @bmap in @raw_inode. */ void nilfs_bmap_write(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode) { down_write(&bmap->b_sem); memcpy(raw_inode->i_bmap, bmap->b_u.u_data, NILFS_INODE_BMAP_SIZE * sizeof(__le64)); if (bmap->b_inode->i_ino == NILFS_DAT_INO) bmap->b_last_allocated_ptr = NILFS_BMAP_NEW_PTR_INIT; up_write(&bmap->b_sem); } void nilfs_bmap_init_gc(struct nilfs_bmap *bmap) { memset(&bmap->b_u, 0, NILFS_BMAP_SIZE); init_rwsem(&bmap->b_sem); bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode; bmap->b_ptr_type = NILFS_BMAP_PTR_U; bmap->b_last_allocated_key = 0; bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; bmap->b_state = 0; nilfs_btree_init_gc(bmap); } void nilfs_bmap_save(const struct nilfs_bmap *bmap, struct nilfs_bmap_store *store) { memcpy(store->data, bmap->b_u.u_data, sizeof(store->data)); store->last_allocated_key = bmap->b_last_allocated_key; store->last_allocated_ptr = bmap->b_last_allocated_ptr; store->state = bmap->b_state; } void nilfs_bmap_restore(struct nilfs_bmap *bmap, const struct nilfs_bmap_store *store) { memcpy(bmap->b_u.u_data, store->data, sizeof(store->data)); bmap->b_last_allocated_key = store->last_allocated_key; bmap->b_last_allocated_ptr = store->last_allocated_ptr; bmap->b_state = store->state; }
linux-master
fs/nilfs2/bmap.c
// SPDX-License-Identifier: GPL-2.0+ /* * NILFS pathname lookup operations. * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * Modified for NILFS by Amagai Yoshiji and Ryusuke Konishi. */ /* * linux/fs/ext2/namei.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card ([email protected]) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/namei.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller ([email protected]), 1995 */ #include <linux/pagemap.h> #include "nilfs.h" #include "export.h" #define NILFS_FID_SIZE_NON_CONNECTABLE \ (offsetof(struct nilfs_fid, parent_gen) / 4) #define NILFS_FID_SIZE_CONNECTABLE (sizeof(struct nilfs_fid) / 4) static inline int nilfs_add_nondir(struct dentry *dentry, struct inode *inode) { int err = nilfs_add_link(dentry, inode); if (!err) { d_instantiate_new(dentry, inode); return 0; } inode_dec_link_count(inode); unlock_new_inode(inode); iput(inode); return err; } /* * Methods themselves. */ static struct dentry * nilfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct inode *inode; ino_t ino; if (dentry->d_name.len > NILFS_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); ino = nilfs_inode_by_name(dir, &dentry->d_name); inode = ino ? nilfs_iget(dir->i_sb, NILFS_I(dir)->i_root, ino) : NULL; return d_splice_alias(inode, dentry); } /* * By the time this is called, we already have created * the directory cache entry for the new file, but it * is so far negative - it has no inode. * * If the create succeeds, we fill in the inode information * with d_instantiate(). */ static int nilfs_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { struct inode *inode; struct nilfs_transaction_info ti; int err; err = nilfs_transaction_begin(dir->i_sb, &ti, 1); if (err) return err; inode = nilfs_new_inode(dir, mode); err = PTR_ERR(inode); if (!IS_ERR(inode)) { inode->i_op = &nilfs_file_inode_operations; inode->i_fop = &nilfs_file_operations; inode->i_mapping->a_ops = &nilfs_aops; nilfs_mark_inode_dirty(inode); err = nilfs_add_nondir(dentry, inode); } if (!err) err = nilfs_transaction_commit(dir->i_sb); else nilfs_transaction_abort(dir->i_sb); return err; } static int nilfs_mknod(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct inode *inode; struct nilfs_transaction_info ti; int err; err = nilfs_transaction_begin(dir->i_sb, &ti, 1); if (err) return err; inode = nilfs_new_inode(dir, mode); err = PTR_ERR(inode); if (!IS_ERR(inode)) { init_special_inode(inode, inode->i_mode, rdev); nilfs_mark_inode_dirty(inode); err = nilfs_add_nondir(dentry, inode); } if (!err) err = nilfs_transaction_commit(dir->i_sb); else nilfs_transaction_abort(dir->i_sb); return err; } static int nilfs_symlink(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, const char *symname) { struct nilfs_transaction_info ti; struct super_block *sb = dir->i_sb; unsigned int l = strlen(symname) + 1; struct inode *inode; int err; if (l > sb->s_blocksize) return -ENAMETOOLONG; err = nilfs_transaction_begin(dir->i_sb, &ti, 1); if (err) return err; inode = nilfs_new_inode(dir, S_IFLNK | 0777); err = PTR_ERR(inode); if (IS_ERR(inode)) goto out; /* slow symlink */ inode->i_op = &nilfs_symlink_inode_operations; inode_nohighmem(inode); inode->i_mapping->a_ops = &nilfs_aops; err = page_symlink(inode, symname, l); if (err) goto out_fail; /* mark_inode_dirty(inode); */ /* page_symlink() do this */ err = nilfs_add_nondir(dentry, inode); out: if (!err) err = nilfs_transaction_commit(dir->i_sb); else nilfs_transaction_abort(dir->i_sb); return err; out_fail: drop_nlink(inode); nilfs_mark_inode_dirty(inode); unlock_new_inode(inode); iput(inode); goto out; } static int nilfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(old_dentry); struct nilfs_transaction_info ti; int err; err = nilfs_transaction_begin(dir->i_sb, &ti, 1); if (err) return err; inode_set_ctime_current(inode); inode_inc_link_count(inode); ihold(inode); err = nilfs_add_link(dentry, inode); if (!err) { d_instantiate(dentry, inode); err = nilfs_transaction_commit(dir->i_sb); } else { inode_dec_link_count(inode); iput(inode); nilfs_transaction_abort(dir->i_sb); } return err; } static int nilfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) { struct inode *inode; struct nilfs_transaction_info ti; int err; err = nilfs_transaction_begin(dir->i_sb, &ti, 1); if (err) return err; inc_nlink(dir); inode = nilfs_new_inode(dir, S_IFDIR | mode); err = PTR_ERR(inode); if (IS_ERR(inode)) goto out_dir; inode->i_op = &nilfs_dir_inode_operations; inode->i_fop = &nilfs_dir_operations; inode->i_mapping->a_ops = &nilfs_aops; inc_nlink(inode); err = nilfs_make_empty(inode, dir); if (err) goto out_fail; err = nilfs_add_link(dentry, inode); if (err) goto out_fail; nilfs_mark_inode_dirty(inode); d_instantiate_new(dentry, inode); out: if (!err) err = nilfs_transaction_commit(dir->i_sb); else nilfs_transaction_abort(dir->i_sb); return err; out_fail: drop_nlink(inode); drop_nlink(inode); nilfs_mark_inode_dirty(inode); unlock_new_inode(inode); iput(inode); out_dir: drop_nlink(dir); nilfs_mark_inode_dirty(dir); goto out; } static int nilfs_do_unlink(struct inode *dir, struct dentry *dentry) { struct inode *inode; struct nilfs_dir_entry *de; struct page *page; int err; err = -ENOENT; de = nilfs_find_entry(dir, &dentry->d_name, &page); if (!de) goto out; inode = d_inode(dentry); err = -EIO; if (le64_to_cpu(de->inode) != inode->i_ino) goto out; if (!inode->i_nlink) { nilfs_warn(inode->i_sb, "deleting nonexistent file (ino=%lu), %d", inode->i_ino, inode->i_nlink); set_nlink(inode, 1); } err = nilfs_delete_entry(de, page); if (err) goto out; inode_set_ctime_to_ts(inode, inode_get_ctime(dir)); drop_nlink(inode); err = 0; out: return err; } static int nilfs_unlink(struct inode *dir, struct dentry *dentry) { struct nilfs_transaction_info ti; int err; err = nilfs_transaction_begin(dir->i_sb, &ti, 0); if (err) return err; err = nilfs_do_unlink(dir, dentry); if (!err) { nilfs_mark_inode_dirty(dir); nilfs_mark_inode_dirty(d_inode(dentry)); err = nilfs_transaction_commit(dir->i_sb); } else nilfs_transaction_abort(dir->i_sb); return err; } static int nilfs_rmdir(struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(dentry); struct nilfs_transaction_info ti; int err; err = nilfs_transaction_begin(dir->i_sb, &ti, 0); if (err) return err; err = -ENOTEMPTY; if (nilfs_empty_dir(inode)) { err = nilfs_do_unlink(dir, dentry); if (!err) { inode->i_size = 0; drop_nlink(inode); nilfs_mark_inode_dirty(inode); drop_nlink(dir); nilfs_mark_inode_dirty(dir); } } if (!err) err = nilfs_transaction_commit(dir->i_sb); else nilfs_transaction_abort(dir->i_sb); return err; } static int nilfs_rename(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { struct inode *old_inode = d_inode(old_dentry); struct inode *new_inode = d_inode(new_dentry); struct page *dir_page = NULL; struct nilfs_dir_entry *dir_de = NULL; struct page *old_page; struct nilfs_dir_entry *old_de; struct nilfs_transaction_info ti; int err; if (flags & ~RENAME_NOREPLACE) return -EINVAL; err = nilfs_transaction_begin(old_dir->i_sb, &ti, 1); if (unlikely(err)) return err; err = -ENOENT; old_de = nilfs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_de) goto out; if (S_ISDIR(old_inode->i_mode)) { err = -EIO; dir_de = nilfs_dotdot(old_inode, &dir_page); if (!dir_de) goto out_old; } if (new_inode) { struct page *new_page; struct nilfs_dir_entry *new_de; err = -ENOTEMPTY; if (dir_de && !nilfs_empty_dir(new_inode)) goto out_dir; err = -ENOENT; new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page); if (!new_de) goto out_dir; nilfs_set_link(new_dir, new_de, new_page, old_inode); nilfs_mark_inode_dirty(new_dir); inode_set_ctime_current(new_inode); if (dir_de) drop_nlink(new_inode); drop_nlink(new_inode); nilfs_mark_inode_dirty(new_inode); } else { err = nilfs_add_link(new_dentry, old_inode); if (err) goto out_dir; if (dir_de) { inc_nlink(new_dir); nilfs_mark_inode_dirty(new_dir); } } /* * Like most other Unix systems, set the ctime for inodes on a * rename. */ inode_set_ctime_current(old_inode); nilfs_delete_entry(old_de, old_page); if (dir_de) { nilfs_set_link(old_inode, dir_de, dir_page, new_dir); drop_nlink(old_dir); } nilfs_mark_inode_dirty(old_dir); nilfs_mark_inode_dirty(old_inode); err = nilfs_transaction_commit(old_dir->i_sb); return err; out_dir: if (dir_de) { kunmap(dir_page); put_page(dir_page); } out_old: kunmap(old_page); put_page(old_page); out: nilfs_transaction_abort(old_dir->i_sb); return err; } /* * Export operations */ static struct dentry *nilfs_get_parent(struct dentry *child) { unsigned long ino; struct inode *inode; struct nilfs_root *root; ino = nilfs_inode_by_name(d_inode(child), &dotdot_name); if (!ino) return ERR_PTR(-ENOENT); root = NILFS_I(d_inode(child))->i_root; inode = nilfs_iget(child->d_sb, root, ino); if (IS_ERR(inode)) return ERR_CAST(inode); return d_obtain_alias(inode); } static struct dentry *nilfs_get_dentry(struct super_block *sb, u64 cno, u64 ino, u32 gen) { struct nilfs_root *root; struct inode *inode; if (ino < NILFS_FIRST_INO(sb) && ino != NILFS_ROOT_INO) return ERR_PTR(-ESTALE); root = nilfs_lookup_root(sb->s_fs_info, cno); if (!root) return ERR_PTR(-ESTALE); inode = nilfs_iget(sb, root, ino); nilfs_put_root(root); if (IS_ERR(inode)) return ERR_CAST(inode); if (gen && inode->i_generation != gen) { iput(inode); return ERR_PTR(-ESTALE); } return d_obtain_alias(inode); } static struct dentry *nilfs_fh_to_dentry(struct super_block *sb, struct fid *fh, int fh_len, int fh_type) { struct nilfs_fid *fid = (struct nilfs_fid *)fh; if (fh_len < NILFS_FID_SIZE_NON_CONNECTABLE || (fh_type != FILEID_NILFS_WITH_PARENT && fh_type != FILEID_NILFS_WITHOUT_PARENT)) return NULL; return nilfs_get_dentry(sb, fid->cno, fid->ino, fid->gen); } static struct dentry *nilfs_fh_to_parent(struct super_block *sb, struct fid *fh, int fh_len, int fh_type) { struct nilfs_fid *fid = (struct nilfs_fid *)fh; if (fh_len < NILFS_FID_SIZE_CONNECTABLE || fh_type != FILEID_NILFS_WITH_PARENT) return NULL; return nilfs_get_dentry(sb, fid->cno, fid->parent_ino, fid->parent_gen); } static int nilfs_encode_fh(struct inode *inode, __u32 *fh, int *lenp, struct inode *parent) { struct nilfs_fid *fid = (struct nilfs_fid *)fh; struct nilfs_root *root = NILFS_I(inode)->i_root; int type; if (parent && *lenp < NILFS_FID_SIZE_CONNECTABLE) { *lenp = NILFS_FID_SIZE_CONNECTABLE; return FILEID_INVALID; } if (*lenp < NILFS_FID_SIZE_NON_CONNECTABLE) { *lenp = NILFS_FID_SIZE_NON_CONNECTABLE; return FILEID_INVALID; } fid->cno = root->cno; fid->ino = inode->i_ino; fid->gen = inode->i_generation; if (parent) { fid->parent_ino = parent->i_ino; fid->parent_gen = parent->i_generation; type = FILEID_NILFS_WITH_PARENT; *lenp = NILFS_FID_SIZE_CONNECTABLE; } else { type = FILEID_NILFS_WITHOUT_PARENT; *lenp = NILFS_FID_SIZE_NON_CONNECTABLE; } return type; } const struct inode_operations nilfs_dir_inode_operations = { .create = nilfs_create, .lookup = nilfs_lookup, .link = nilfs_link, .unlink = nilfs_unlink, .symlink = nilfs_symlink, .mkdir = nilfs_mkdir, .rmdir = nilfs_rmdir, .mknod = nilfs_mknod, .rename = nilfs_rename, .setattr = nilfs_setattr, .permission = nilfs_permission, .fiemap = nilfs_fiemap, .fileattr_get = nilfs_fileattr_get, .fileattr_set = nilfs_fileattr_set, }; const struct inode_operations nilfs_special_inode_operations = { .setattr = nilfs_setattr, .permission = nilfs_permission, }; const struct inode_operations nilfs_symlink_inode_operations = { .get_link = page_get_link, .permission = nilfs_permission, }; const struct export_operations nilfs_export_ops = { .encode_fh = nilfs_encode_fh, .fh_to_dentry = nilfs_fh_to_dentry, .fh_to_parent = nilfs_fh_to_parent, .get_parent = nilfs_get_parent, };
linux-master
fs/nilfs2/namei.c
// SPDX-License-Identifier: GPL-2.0+ /* * NILFS segment constructor. * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * Written by Ryusuke Konishi. * */ #include <linux/pagemap.h> #include <linux/buffer_head.h> #include <linux/writeback.h> #include <linux/bitops.h> #include <linux/bio.h> #include <linux/completion.h> #include <linux/blkdev.h> #include <linux/backing-dev.h> #include <linux/freezer.h> #include <linux/kthread.h> #include <linux/crc32.h> #include <linux/pagevec.h> #include <linux/slab.h> #include <linux/sched/signal.h> #include "nilfs.h" #include "btnode.h" #include "page.h" #include "segment.h" #include "sufile.h" #include "cpfile.h" #include "ifile.h" #include "segbuf.h" /* * Segment constructor */ #define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */ #define SC_MAX_SEGDELTA 64 /* * Upper limit of the number of segments * appended in collection retry loop */ /* Construction mode */ enum { SC_LSEG_SR = 1, /* Make a logical segment having a super root */ SC_LSEG_DSYNC, /* * Flush data blocks of a given file and make * a logical segment without a super root. */ SC_FLUSH_FILE, /* * Flush data files, leads to segment writes without * creating a checkpoint. */ SC_FLUSH_DAT, /* * Flush DAT file. This also creates segments * without a checkpoint. */ }; /* Stage numbers of dirty block collection */ enum { NILFS_ST_INIT = 0, NILFS_ST_GC, /* Collecting dirty blocks for GC */ NILFS_ST_FILE, NILFS_ST_IFILE, NILFS_ST_CPFILE, NILFS_ST_SUFILE, NILFS_ST_DAT, NILFS_ST_SR, /* Super root */ NILFS_ST_DSYNC, /* Data sync blocks */ NILFS_ST_DONE, }; #define CREATE_TRACE_POINTS #include <trace/events/nilfs2.h> /* * nilfs_sc_cstage_inc(), nilfs_sc_cstage_set(), nilfs_sc_cstage_get() are * wrapper functions of stage count (nilfs_sc_info->sc_stage.scnt). Users of * the variable must use them because transition of stage count must involve * trace events (trace_nilfs2_collection_stage_transition). * * nilfs_sc_cstage_get() isn't required for the above purpose because it doesn't * produce tracepoint events. It is provided just for making the intention * clear. */ static inline void nilfs_sc_cstage_inc(struct nilfs_sc_info *sci) { sci->sc_stage.scnt++; trace_nilfs2_collection_stage_transition(sci); } static inline void nilfs_sc_cstage_set(struct nilfs_sc_info *sci, int next_scnt) { sci->sc_stage.scnt = next_scnt; trace_nilfs2_collection_stage_transition(sci); } static inline int nilfs_sc_cstage_get(struct nilfs_sc_info *sci) { return sci->sc_stage.scnt; } /* State flags of collection */ #define NILFS_CF_NODE 0x0001 /* Collecting node blocks */ #define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */ #define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */ #define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED) /* Operations depending on the construction mode and file type */ struct nilfs_sc_operations { int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *, struct inode *); int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *, struct inode *); int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *, struct inode *); void (*write_data_binfo)(struct nilfs_sc_info *, struct nilfs_segsum_pointer *, union nilfs_binfo *); void (*write_node_binfo)(struct nilfs_sc_info *, struct nilfs_segsum_pointer *, union nilfs_binfo *); }; /* * Other definitions */ static void nilfs_segctor_start_timer(struct nilfs_sc_info *); static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int); static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *); static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int); #define nilfs_cnt32_ge(a, b) \ (typecheck(__u32, a) && typecheck(__u32, b) && \ ((__s32)(a) - (__s32)(b) >= 0)) static int nilfs_prepare_segment_lock(struct super_block *sb, struct nilfs_transaction_info *ti) { struct nilfs_transaction_info *cur_ti = current->journal_info; void *save = NULL; if (cur_ti) { if (cur_ti->ti_magic == NILFS_TI_MAGIC) return ++cur_ti->ti_count; /* * If journal_info field is occupied by other FS, * it is saved and will be restored on * nilfs_transaction_commit(). */ nilfs_warn(sb, "journal info from a different FS"); save = current->journal_info; } if (!ti) { ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS); if (!ti) return -ENOMEM; ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC; } else { ti->ti_flags = 0; } ti->ti_count = 0; ti->ti_save = save; ti->ti_magic = NILFS_TI_MAGIC; current->journal_info = ti; return 0; } /** * nilfs_transaction_begin - start indivisible file operations. * @sb: super block * @ti: nilfs_transaction_info * @vacancy_check: flags for vacancy rate checks * * nilfs_transaction_begin() acquires a reader/writer semaphore, called * the segment semaphore, to make a segment construction and write tasks * exclusive. The function is used with nilfs_transaction_commit() in pairs. * The region enclosed by these two functions can be nested. To avoid a * deadlock, the semaphore is only acquired or released in the outermost call. * * This function allocates a nilfs_transaction_info struct to keep context * information on it. It is initialized and hooked onto the current task in * the outermost call. If a pre-allocated struct is given to @ti, it is used * instead; otherwise a new struct is assigned from a slab. * * When @vacancy_check flag is set, this function will check the amount of * free space, and will wait for the GC to reclaim disk space if low capacity. * * Return Value: On success, 0 is returned. On error, one of the following * negative error code is returned. * * %-ENOMEM - Insufficient memory available. * * %-ENOSPC - No space left on device */ int nilfs_transaction_begin(struct super_block *sb, struct nilfs_transaction_info *ti, int vacancy_check) { struct the_nilfs *nilfs; int ret = nilfs_prepare_segment_lock(sb, ti); struct nilfs_transaction_info *trace_ti; if (unlikely(ret < 0)) return ret; if (ret > 0) { trace_ti = current->journal_info; trace_nilfs2_transaction_transition(sb, trace_ti, trace_ti->ti_count, trace_ti->ti_flags, TRACE_NILFS2_TRANSACTION_BEGIN); return 0; } sb_start_intwrite(sb); nilfs = sb->s_fs_info; down_read(&nilfs->ns_segctor_sem); if (vacancy_check && nilfs_near_disk_full(nilfs)) { up_read(&nilfs->ns_segctor_sem); ret = -ENOSPC; goto failed; } trace_ti = current->journal_info; trace_nilfs2_transaction_transition(sb, trace_ti, trace_ti->ti_count, trace_ti->ti_flags, TRACE_NILFS2_TRANSACTION_BEGIN); return 0; failed: ti = current->journal_info; current->journal_info = ti->ti_save; if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) kmem_cache_free(nilfs_transaction_cachep, ti); sb_end_intwrite(sb); return ret; } /** * nilfs_transaction_commit - commit indivisible file operations. * @sb: super block * * nilfs_transaction_commit() releases the read semaphore which is * acquired by nilfs_transaction_begin(). This is only performed * in outermost call of this function. If a commit flag is set, * nilfs_transaction_commit() sets a timer to start the segment * constructor. If a sync flag is set, it starts construction * directly. */ int nilfs_transaction_commit(struct super_block *sb) { struct nilfs_transaction_info *ti = current->journal_info; struct the_nilfs *nilfs = sb->s_fs_info; int err = 0; BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC); ti->ti_flags |= NILFS_TI_COMMIT; if (ti->ti_count > 0) { ti->ti_count--; trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT); return 0; } if (nilfs->ns_writer) { struct nilfs_sc_info *sci = nilfs->ns_writer; if (ti->ti_flags & NILFS_TI_COMMIT) nilfs_segctor_start_timer(sci); if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark) nilfs_segctor_do_flush(sci, 0); } up_read(&nilfs->ns_segctor_sem); trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT); current->journal_info = ti->ti_save; if (ti->ti_flags & NILFS_TI_SYNC) err = nilfs_construct_segment(sb); if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) kmem_cache_free(nilfs_transaction_cachep, ti); sb_end_intwrite(sb); return err; } void nilfs_transaction_abort(struct super_block *sb) { struct nilfs_transaction_info *ti = current->journal_info; struct the_nilfs *nilfs = sb->s_fs_info; BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC); if (ti->ti_count > 0) { ti->ti_count--; trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT); return; } up_read(&nilfs->ns_segctor_sem); trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT); current->journal_info = ti->ti_save; if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) kmem_cache_free(nilfs_transaction_cachep, ti); sb_end_intwrite(sb); } void nilfs_relax_pressure_in_lock(struct super_block *sb) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_sc_info *sci = nilfs->ns_writer; if (sb_rdonly(sb) || unlikely(!sci) || !sci->sc_flush_request) return; set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags); up_read(&nilfs->ns_segctor_sem); down_write(&nilfs->ns_segctor_sem); if (sci->sc_flush_request && test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) { struct nilfs_transaction_info *ti = current->journal_info; ti->ti_flags |= NILFS_TI_WRITER; nilfs_segctor_do_immediate_flush(sci); ti->ti_flags &= ~NILFS_TI_WRITER; } downgrade_write(&nilfs->ns_segctor_sem); } static void nilfs_transaction_lock(struct super_block *sb, struct nilfs_transaction_info *ti, int gcflag) { struct nilfs_transaction_info *cur_ti = current->journal_info; struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_sc_info *sci = nilfs->ns_writer; WARN_ON(cur_ti); ti->ti_flags = NILFS_TI_WRITER; ti->ti_count = 0; ti->ti_save = cur_ti; ti->ti_magic = NILFS_TI_MAGIC; current->journal_info = ti; for (;;) { trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, ti->ti_flags, TRACE_NILFS2_TRANSACTION_TRYLOCK); down_write(&nilfs->ns_segctor_sem); if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) break; nilfs_segctor_do_immediate_flush(sci); up_write(&nilfs->ns_segctor_sem); cond_resched(); } if (gcflag) ti->ti_flags |= NILFS_TI_GC; trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, ti->ti_flags, TRACE_NILFS2_TRANSACTION_LOCK); } static void nilfs_transaction_unlock(struct super_block *sb) { struct nilfs_transaction_info *ti = current->journal_info; struct the_nilfs *nilfs = sb->s_fs_info; BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC); BUG_ON(ti->ti_count > 0); up_write(&nilfs->ns_segctor_sem); current->journal_info = ti->ti_save; trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, ti->ti_flags, TRACE_NILFS2_TRANSACTION_UNLOCK); } static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci, struct nilfs_segsum_pointer *ssp, unsigned int bytes) { struct nilfs_segment_buffer *segbuf = sci->sc_curseg; unsigned int blocksize = sci->sc_super->s_blocksize; void *p; if (unlikely(ssp->offset + bytes > blocksize)) { ssp->offset = 0; BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh, &segbuf->sb_segsum_buffers)); ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh); } p = ssp->bh->b_data + ssp->offset; ssp->offset += bytes; return p; } /** * nilfs_segctor_reset_segment_buffer - reset the current segment buffer * @sci: nilfs_sc_info */ static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci) { struct nilfs_segment_buffer *segbuf = sci->sc_curseg; struct buffer_head *sumbh; unsigned int sumbytes; unsigned int flags = 0; int err; if (nilfs_doing_gc()) flags = NILFS_SS_GC; err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno); if (unlikely(err)) return err; sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers); sumbytes = segbuf->sb_sum.sumbytes; sci->sc_finfo_ptr.bh = sumbh; sci->sc_finfo_ptr.offset = sumbytes; sci->sc_binfo_ptr.bh = sumbh; sci->sc_binfo_ptr.offset = sumbytes; sci->sc_blk_cnt = sci->sc_datablk_cnt = 0; return 0; } /** * nilfs_segctor_zeropad_segsum - zero pad the rest of the segment summary area * @sci: segment constructor object * * nilfs_segctor_zeropad_segsum() zero-fills unallocated space at the end of * the current segment summary block. */ static void nilfs_segctor_zeropad_segsum(struct nilfs_sc_info *sci) { struct nilfs_segsum_pointer *ssp; ssp = sci->sc_blk_cnt > 0 ? &sci->sc_binfo_ptr : &sci->sc_finfo_ptr; if (ssp->offset < ssp->bh->b_size) memset(ssp->bh->b_data + ssp->offset, 0, ssp->bh->b_size - ssp->offset); } static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci) { sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks; if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs)) return -E2BIG; /* * The current segment is filled up * (internal code) */ nilfs_segctor_zeropad_segsum(sci); sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg); return nilfs_segctor_reset_segment_buffer(sci); } static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci) { struct nilfs_segment_buffer *segbuf = sci->sc_curseg; int err; if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) { err = nilfs_segctor_feed_segment(sci); if (err) return err; segbuf = sci->sc_curseg; } err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root); if (likely(!err)) segbuf->sb_sum.flags |= NILFS_SS_SR; return err; } /* * Functions for making segment summary and payloads */ static int nilfs_segctor_segsum_block_required( struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp, unsigned int binfo_size) { unsigned int blocksize = sci->sc_super->s_blocksize; /* Size of finfo and binfo is enough small against blocksize */ return ssp->offset + binfo_size + (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) > blocksize; } static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci, struct inode *inode) { sci->sc_curseg->sb_sum.nfinfo++; sci->sc_binfo_ptr = sci->sc_finfo_ptr; nilfs_segctor_map_segsum_entry( sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo)); if (NILFS_I(inode)->i_root && !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)) set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); /* skip finfo */ } static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci, struct inode *inode) { struct nilfs_finfo *finfo; struct nilfs_inode_info *ii; struct nilfs_segment_buffer *segbuf; __u64 cno; if (sci->sc_blk_cnt == 0) return; ii = NILFS_I(inode); if (test_bit(NILFS_I_GCINODE, &ii->i_state)) cno = ii->i_cno; else if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) cno = 0; else cno = sci->sc_cno; finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr, sizeof(*finfo)); finfo->fi_ino = cpu_to_le64(inode->i_ino); finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt); finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt); finfo->fi_cno = cpu_to_le64(cno); segbuf = sci->sc_curseg; segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset + sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1); sci->sc_finfo_ptr = sci->sc_binfo_ptr; sci->sc_blk_cnt = sci->sc_datablk_cnt = 0; } static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci, struct buffer_head *bh, struct inode *inode, unsigned int binfo_size) { struct nilfs_segment_buffer *segbuf; int required, err = 0; retry: segbuf = sci->sc_curseg; required = nilfs_segctor_segsum_block_required( sci, &sci->sc_binfo_ptr, binfo_size); if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) { nilfs_segctor_end_finfo(sci, inode); err = nilfs_segctor_feed_segment(sci); if (err) return err; goto retry; } if (unlikely(required)) { nilfs_segctor_zeropad_segsum(sci); err = nilfs_segbuf_extend_segsum(segbuf); if (unlikely(err)) goto failed; } if (sci->sc_blk_cnt == 0) nilfs_segctor_begin_finfo(sci, inode); nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size); /* Substitution to vblocknr is delayed until update_blocknr() */ nilfs_segbuf_add_file_buffer(segbuf, bh); sci->sc_blk_cnt++; failed: return err; } /* * Callback functions that enumerate, mark, and collect dirty blocks */ static int nilfs_collect_file_data(struct nilfs_sc_info *sci, struct buffer_head *bh, struct inode *inode) { int err; err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh); if (err < 0) return err; err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(struct nilfs_binfo_v)); if (!err) sci->sc_datablk_cnt++; return err; } static int nilfs_collect_file_node(struct nilfs_sc_info *sci, struct buffer_head *bh, struct inode *inode) { return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh); } static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci, struct buffer_head *bh, struct inode *inode) { WARN_ON(!buffer_dirty(bh)); return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64)); } static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci, struct nilfs_segsum_pointer *ssp, union nilfs_binfo *binfo) { struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry( sci, ssp, sizeof(*binfo_v)); *binfo_v = binfo->bi_v; } static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci, struct nilfs_segsum_pointer *ssp, union nilfs_binfo *binfo) { __le64 *vblocknr = nilfs_segctor_map_segsum_entry( sci, ssp, sizeof(*vblocknr)); *vblocknr = binfo->bi_v.bi_vblocknr; } static const struct nilfs_sc_operations nilfs_sc_file_ops = { .collect_data = nilfs_collect_file_data, .collect_node = nilfs_collect_file_node, .collect_bmap = nilfs_collect_file_bmap, .write_data_binfo = nilfs_write_file_data_binfo, .write_node_binfo = nilfs_write_file_node_binfo, }; static int nilfs_collect_dat_data(struct nilfs_sc_info *sci, struct buffer_head *bh, struct inode *inode) { int err; err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh); if (err < 0) return err; err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64)); if (!err) sci->sc_datablk_cnt++; return err; } static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci, struct buffer_head *bh, struct inode *inode) { WARN_ON(!buffer_dirty(bh)); return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(struct nilfs_binfo_dat)); } static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci, struct nilfs_segsum_pointer *ssp, union nilfs_binfo *binfo) { __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*blkoff)); *blkoff = binfo->bi_dat.bi_blkoff; } static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci, struct nilfs_segsum_pointer *ssp, union nilfs_binfo *binfo) { struct nilfs_binfo_dat *binfo_dat = nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat)); *binfo_dat = binfo->bi_dat; } static const struct nilfs_sc_operations nilfs_sc_dat_ops = { .collect_data = nilfs_collect_dat_data, .collect_node = nilfs_collect_file_node, .collect_bmap = nilfs_collect_dat_bmap, .write_data_binfo = nilfs_write_dat_data_binfo, .write_node_binfo = nilfs_write_dat_node_binfo, }; static const struct nilfs_sc_operations nilfs_sc_dsync_ops = { .collect_data = nilfs_collect_file_data, .collect_node = NULL, .collect_bmap = NULL, .write_data_binfo = nilfs_write_file_data_binfo, .write_node_binfo = NULL, }; static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode, struct list_head *listp, size_t nlimit, loff_t start, loff_t end) { struct address_space *mapping = inode->i_mapping; struct folio_batch fbatch; pgoff_t index = 0, last = ULONG_MAX; size_t ndirties = 0; int i; if (unlikely(start != 0 || end != LLONG_MAX)) { /* * A valid range is given for sync-ing data pages. The * range is rounded to per-page; extra dirty buffers * may be included if blocksize < pagesize. */ index = start >> PAGE_SHIFT; last = end >> PAGE_SHIFT; } folio_batch_init(&fbatch); repeat: if (unlikely(index > last) || !filemap_get_folios_tag(mapping, &index, last, PAGECACHE_TAG_DIRTY, &fbatch)) return ndirties; for (i = 0; i < folio_batch_count(&fbatch); i++) { struct buffer_head *bh, *head; struct folio *folio = fbatch.folios[i]; folio_lock(folio); if (unlikely(folio->mapping != mapping)) { /* Exclude folios removed from the address space */ folio_unlock(folio); continue; } head = folio_buffers(folio); if (!head) { create_empty_buffers(&folio->page, i_blocksize(inode), 0); head = folio_buffers(folio); } folio_unlock(folio); bh = head; do { if (!buffer_dirty(bh) || buffer_async_write(bh)) continue; get_bh(bh); list_add_tail(&bh->b_assoc_buffers, listp); ndirties++; if (unlikely(ndirties >= nlimit)) { folio_batch_release(&fbatch); cond_resched(); return ndirties; } } while (bh = bh->b_this_page, bh != head); } folio_batch_release(&fbatch); cond_resched(); goto repeat; } static void nilfs_lookup_dirty_node_buffers(struct inode *inode, struct list_head *listp) { struct nilfs_inode_info *ii = NILFS_I(inode); struct inode *btnc_inode = ii->i_assoc_inode; struct folio_batch fbatch; struct buffer_head *bh, *head; unsigned int i; pgoff_t index = 0; if (!btnc_inode) return; folio_batch_init(&fbatch); while (filemap_get_folios_tag(btnc_inode->i_mapping, &index, (pgoff_t)-1, PAGECACHE_TAG_DIRTY, &fbatch)) { for (i = 0; i < folio_batch_count(&fbatch); i++) { bh = head = folio_buffers(fbatch.folios[i]); do { if (buffer_dirty(bh) && !buffer_async_write(bh)) { get_bh(bh); list_add_tail(&bh->b_assoc_buffers, listp); } bh = bh->b_this_page; } while (bh != head); } folio_batch_release(&fbatch); cond_resched(); } } static void nilfs_dispose_list(struct the_nilfs *nilfs, struct list_head *head, int force) { struct nilfs_inode_info *ii, *n; struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii; unsigned int nv = 0; while (!list_empty(head)) { spin_lock(&nilfs->ns_inode_lock); list_for_each_entry_safe(ii, n, head, i_dirty) { list_del_init(&ii->i_dirty); if (force) { if (unlikely(ii->i_bh)) { brelse(ii->i_bh); ii->i_bh = NULL; } } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) { set_bit(NILFS_I_QUEUED, &ii->i_state); list_add_tail(&ii->i_dirty, &nilfs->ns_dirty_files); continue; } ivec[nv++] = ii; if (nv == SC_N_INODEVEC) break; } spin_unlock(&nilfs->ns_inode_lock); for (pii = ivec; nv > 0; pii++, nv--) iput(&(*pii)->vfs_inode); } } static void nilfs_iput_work_func(struct work_struct *work) { struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info, sc_iput_work); struct the_nilfs *nilfs = sci->sc_super->s_fs_info; nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0); } static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs, struct nilfs_root *root) { int ret = 0; if (nilfs_mdt_fetch_dirty(root->ifile)) ret++; if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile)) ret++; if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile)) ret++; if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat)) ret++; return ret; } static int nilfs_segctor_clean(struct nilfs_sc_info *sci) { return list_empty(&sci->sc_dirty_files) && !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) && sci->sc_nfreesegs == 0 && (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes)); } static int nilfs_segctor_confirm(struct nilfs_sc_info *sci) { struct the_nilfs *nilfs = sci->sc_super->s_fs_info; int ret = 0; if (nilfs_test_metadata_dirty(nilfs, sci->sc_root)) set_bit(NILFS_SC_DIRTY, &sci->sc_flags); spin_lock(&nilfs->ns_inode_lock); if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci)) ret++; spin_unlock(&nilfs->ns_inode_lock); return ret; } static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci) { struct the_nilfs *nilfs = sci->sc_super->s_fs_info; nilfs_mdt_clear_dirty(sci->sc_root->ifile); nilfs_mdt_clear_dirty(nilfs->ns_cpfile); nilfs_mdt_clear_dirty(nilfs->ns_sufile); nilfs_mdt_clear_dirty(nilfs->ns_dat); } static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci) { struct the_nilfs *nilfs = sci->sc_super->s_fs_info; struct buffer_head *bh_cp; struct nilfs_checkpoint *raw_cp; int err; /* XXX: this interface will be changed */ err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1, &raw_cp, &bh_cp); if (likely(!err)) { /* * The following code is duplicated with cpfile. But, it is * needed to collect the checkpoint even if it was not newly * created. */ mark_buffer_dirty(bh_cp); nilfs_mdt_mark_dirty(nilfs->ns_cpfile); nilfs_cpfile_put_checkpoint( nilfs->ns_cpfile, nilfs->ns_cno, bh_cp); } else if (err == -EINVAL || err == -ENOENT) { nilfs_error(sci->sc_super, "checkpoint creation failed due to metadata corruption."); err = -EIO; } return err; } static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci) { struct the_nilfs *nilfs = sci->sc_super->s_fs_info; struct buffer_head *bh_cp; struct nilfs_checkpoint *raw_cp; int err; err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0, &raw_cp, &bh_cp); if (unlikely(err)) { if (err == -EINVAL || err == -ENOENT) { nilfs_error(sci->sc_super, "checkpoint finalization failed due to metadata corruption."); err = -EIO; } goto failed_ibh; } raw_cp->cp_snapshot_list.ssl_next = 0; raw_cp->cp_snapshot_list.ssl_prev = 0; raw_cp->cp_inodes_count = cpu_to_le64(atomic64_read(&sci->sc_root->inodes_count)); raw_cp->cp_blocks_count = cpu_to_le64(atomic64_read(&sci->sc_root->blocks_count)); raw_cp->cp_nblk_inc = cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc); raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime); raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno); if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)) nilfs_checkpoint_clear_minor(raw_cp); else nilfs_checkpoint_set_minor(raw_cp); nilfs_write_inode_common(sci->sc_root->ifile, &raw_cp->cp_ifile_inode, 1); nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp); return 0; failed_ibh: return err; } static void nilfs_fill_in_file_bmap(struct inode *ifile, struct nilfs_inode_info *ii) { struct buffer_head *ibh; struct nilfs_inode *raw_inode; if (test_bit(NILFS_I_BMAP, &ii->i_state)) { ibh = ii->i_bh; BUG_ON(!ibh); raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino, ibh); nilfs_bmap_write(ii->i_bmap, raw_inode); nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh); } } static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci) { struct nilfs_inode_info *ii; list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) { nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii); set_bit(NILFS_I_COLLECTED, &ii->i_state); } } static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci, struct the_nilfs *nilfs) { struct buffer_head *bh_sr; struct nilfs_super_root *raw_sr; unsigned int isz, srsz; bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root; lock_buffer(bh_sr); raw_sr = (struct nilfs_super_root *)bh_sr->b_data; isz = nilfs->ns_inode_size; srsz = NILFS_SR_BYTES(isz); raw_sr->sr_sum = 0; /* Ensure initialization within this update */ raw_sr->sr_bytes = cpu_to_le16(srsz); raw_sr->sr_nongc_ctime = cpu_to_le64(nilfs_doing_gc() ? nilfs->ns_nongc_ctime : sci->sc_seg_ctime); raw_sr->sr_flags = 0; nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr + NILFS_SR_DAT_OFFSET(isz), 1); nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr + NILFS_SR_CPFILE_OFFSET(isz), 1); nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr + NILFS_SR_SUFILE_OFFSET(isz), 1); memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz); set_buffer_uptodate(bh_sr); unlock_buffer(bh_sr); } static void nilfs_redirty_inodes(struct list_head *head) { struct nilfs_inode_info *ii; list_for_each_entry(ii, head, i_dirty) { if (test_bit(NILFS_I_COLLECTED, &ii->i_state)) clear_bit(NILFS_I_COLLECTED, &ii->i_state); } } static void nilfs_drop_collected_inodes(struct list_head *head) { struct nilfs_inode_info *ii; list_for_each_entry(ii, head, i_dirty) { if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state)) continue; clear_bit(NILFS_I_INODE_SYNC, &ii->i_state); set_bit(NILFS_I_UPDATED, &ii->i_state); } } static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci, struct inode *inode, struct list_head *listp, int (*collect)(struct nilfs_sc_info *, struct buffer_head *, struct inode *)) { struct buffer_head *bh, *n; int err = 0; if (collect) { list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) { list_del_init(&bh->b_assoc_buffers); err = collect(sci, bh, inode); brelse(bh); if (unlikely(err)) goto dispose_buffers; } return 0; } dispose_buffers: while (!list_empty(listp)) { bh = list_first_entry(listp, struct buffer_head, b_assoc_buffers); list_del_init(&bh->b_assoc_buffers); brelse(bh); } return err; } static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci) { /* Remaining number of blocks within segment buffer */ return sci->sc_segbuf_nblocks - (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks); } static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci, struct inode *inode, const struct nilfs_sc_operations *sc_ops) { LIST_HEAD(data_buffers); LIST_HEAD(node_buffers); int err; if (!(sci->sc_stage.flags & NILFS_CF_NODE)) { size_t n, rest = nilfs_segctor_buffer_rest(sci); n = nilfs_lookup_dirty_data_buffers( inode, &data_buffers, rest + 1, 0, LLONG_MAX); if (n > rest) { err = nilfs_segctor_apply_buffers( sci, inode, &data_buffers, sc_ops->collect_data); BUG_ON(!err); /* always receive -E2BIG or true error */ goto break_or_fail; } } nilfs_lookup_dirty_node_buffers(inode, &node_buffers); if (!(sci->sc_stage.flags & NILFS_CF_NODE)) { err = nilfs_segctor_apply_buffers( sci, inode, &data_buffers, sc_ops->collect_data); if (unlikely(err)) { /* dispose node list */ nilfs_segctor_apply_buffers( sci, inode, &node_buffers, NULL); goto break_or_fail; } sci->sc_stage.flags |= NILFS_CF_NODE; } /* Collect node */ err = nilfs_segctor_apply_buffers( sci, inode, &node_buffers, sc_ops->collect_node); if (unlikely(err)) goto break_or_fail; nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers); err = nilfs_segctor_apply_buffers( sci, inode, &node_buffers, sc_ops->collect_bmap); if (unlikely(err)) goto break_or_fail; nilfs_segctor_end_finfo(sci, inode); sci->sc_stage.flags &= ~NILFS_CF_NODE; break_or_fail: return err; } static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci, struct inode *inode) { LIST_HEAD(data_buffers); size_t n, rest = nilfs_segctor_buffer_rest(sci); int err; n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1, sci->sc_dsync_start, sci->sc_dsync_end); err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers, nilfs_collect_file_data); if (!err) { nilfs_segctor_end_finfo(sci, inode); BUG_ON(n > rest); /* always receive -E2BIG or true error if n > rest */ } return err; } static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) { struct the_nilfs *nilfs = sci->sc_super->s_fs_info; struct list_head *head; struct nilfs_inode_info *ii; size_t ndone; int err = 0; switch (nilfs_sc_cstage_get(sci)) { case NILFS_ST_INIT: /* Pre-processes */ sci->sc_stage.flags = 0; if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) { sci->sc_nblk_inc = 0; sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN; if (mode == SC_LSEG_DSYNC) { nilfs_sc_cstage_set(sci, NILFS_ST_DSYNC); goto dsync_mode; } } sci->sc_stage.dirty_file_ptr = NULL; sci->sc_stage.gc_inode_ptr = NULL; if (mode == SC_FLUSH_DAT) { nilfs_sc_cstage_set(sci, NILFS_ST_DAT); goto dat_stage; } nilfs_sc_cstage_inc(sci); fallthrough; case NILFS_ST_GC: if (nilfs_doing_gc()) { head = &sci->sc_gc_inodes; ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr, head, i_dirty); list_for_each_entry_continue(ii, head, i_dirty) { err = nilfs_segctor_scan_file( sci, &ii->vfs_inode, &nilfs_sc_file_ops); if (unlikely(err)) { sci->sc_stage.gc_inode_ptr = list_entry( ii->i_dirty.prev, struct nilfs_inode_info, i_dirty); goto break_or_fail; } set_bit(NILFS_I_COLLECTED, &ii->i_state); } sci->sc_stage.gc_inode_ptr = NULL; } nilfs_sc_cstage_inc(sci); fallthrough; case NILFS_ST_FILE: head = &sci->sc_dirty_files; ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head, i_dirty); list_for_each_entry_continue(ii, head, i_dirty) { clear_bit(NILFS_I_DIRTY, &ii->i_state); err = nilfs_segctor_scan_file(sci, &ii->vfs_inode, &nilfs_sc_file_ops); if (unlikely(err)) { sci->sc_stage.dirty_file_ptr = list_entry(ii->i_dirty.prev, struct nilfs_inode_info, i_dirty); goto break_or_fail; } /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */ /* XXX: required ? */ } sci->sc_stage.dirty_file_ptr = NULL; if (mode == SC_FLUSH_FILE) { nilfs_sc_cstage_set(sci, NILFS_ST_DONE); return 0; } nilfs_sc_cstage_inc(sci); sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED; fallthrough; case NILFS_ST_IFILE: err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile, &nilfs_sc_file_ops); if (unlikely(err)) break; nilfs_sc_cstage_inc(sci); /* Creating a checkpoint */ err = nilfs_segctor_create_checkpoint(sci); if (unlikely(err)) break; fallthrough; case NILFS_ST_CPFILE: err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile, &nilfs_sc_file_ops); if (unlikely(err)) break; nilfs_sc_cstage_inc(sci); fallthrough; case NILFS_ST_SUFILE: err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs, sci->sc_nfreesegs, &ndone); if (unlikely(err)) { nilfs_sufile_cancel_freev(nilfs->ns_sufile, sci->sc_freesegs, ndone, NULL); break; } sci->sc_stage.flags |= NILFS_CF_SUFREED; err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile, &nilfs_sc_file_ops); if (unlikely(err)) break; nilfs_sc_cstage_inc(sci); fallthrough; case NILFS_ST_DAT: dat_stage: err = nilfs_segctor_scan_file(sci, nilfs->ns_dat, &nilfs_sc_dat_ops); if (unlikely(err)) break; if (mode == SC_FLUSH_DAT) { nilfs_sc_cstage_set(sci, NILFS_ST_DONE); return 0; } nilfs_sc_cstage_inc(sci); fallthrough; case NILFS_ST_SR: if (mode == SC_LSEG_SR) { /* Appending a super root */ err = nilfs_segctor_add_super_root(sci); if (unlikely(err)) break; } /* End of a logical segment */ sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND; nilfs_sc_cstage_set(sci, NILFS_ST_DONE); return 0; case NILFS_ST_DSYNC: dsync_mode: sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT; ii = sci->sc_dsync_inode; if (!test_bit(NILFS_I_BUSY, &ii->i_state)) break; err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode); if (unlikely(err)) break; sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND; nilfs_sc_cstage_set(sci, NILFS_ST_DONE); return 0; case NILFS_ST_DONE: return 0; default: BUG(); } break_or_fail: return err; } /** * nilfs_segctor_begin_construction - setup segment buffer to make a new log * @sci: nilfs_sc_info * @nilfs: nilfs object */ static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci, struct the_nilfs *nilfs) { struct nilfs_segment_buffer *segbuf, *prev; __u64 nextnum; int err, alloc = 0; segbuf = nilfs_segbuf_new(sci->sc_super); if (unlikely(!segbuf)) return -ENOMEM; if (list_empty(&sci->sc_write_logs)) { nilfs_segbuf_map(segbuf, nilfs->ns_segnum, nilfs->ns_pseg_offset, nilfs); if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) { nilfs_shift_to_next_segment(nilfs); nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs); } segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq; nextnum = nilfs->ns_nextnum; if (nilfs->ns_segnum == nilfs->ns_nextnum) /* Start from the head of a new full segment */ alloc++; } else { /* Continue logs */ prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs); nilfs_segbuf_map_cont(segbuf, prev); segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq; nextnum = prev->sb_nextnum; if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) { nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs); segbuf->sb_sum.seg_seq++; alloc++; } } err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum); if (err) goto failed; if (alloc) { err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum); if (err) goto failed; } nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs); BUG_ON(!list_empty(&sci->sc_segbufs)); list_add_tail(&segbuf->sb_list, &sci->sc_segbufs); sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks; return 0; failed: nilfs_segbuf_free(segbuf); return err; } static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci, struct the_nilfs *nilfs, int nadd) { struct nilfs_segment_buffer *segbuf, *prev; struct inode *sufile = nilfs->ns_sufile; __u64 nextnextnum; LIST_HEAD(list); int err, ret, i; prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs); /* * Since the segment specified with nextnum might be allocated during * the previous construction, the buffer including its segusage may * not be dirty. The following call ensures that the buffer is dirty * and will pin the buffer on memory until the sufile is written. */ err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum); if (unlikely(err)) return err; for (i = 0; i < nadd; i++) { /* extend segment info */ err = -ENOMEM; segbuf = nilfs_segbuf_new(sci->sc_super); if (unlikely(!segbuf)) goto failed; /* map this buffer to region of segment on-disk */ nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs); sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks; /* allocate the next next full segment */ err = nilfs_sufile_alloc(sufile, &nextnextnum); if (unlikely(err)) goto failed_segbuf; segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1; nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs); list_add_tail(&segbuf->sb_list, &list); prev = segbuf; } list_splice_tail(&list, &sci->sc_segbufs); return 0; failed_segbuf: nilfs_segbuf_free(segbuf); failed: list_for_each_entry(segbuf, &list, sb_list) { ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum); WARN_ON(ret); /* never fails */ } nilfs_destroy_logs(&list); return err; } static void nilfs_free_incomplete_logs(struct list_head *logs, struct the_nilfs *nilfs) { struct nilfs_segment_buffer *segbuf, *prev; struct inode *sufile = nilfs->ns_sufile; int ret; segbuf = NILFS_FIRST_SEGBUF(logs); if (nilfs->ns_nextnum != segbuf->sb_nextnum) { ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum); WARN_ON(ret); /* never fails */ } if (atomic_read(&segbuf->sb_err)) { /* Case 1: The first segment failed */ if (segbuf->sb_pseg_start != segbuf->sb_fseg_start) /* * Case 1a: Partial segment appended into an existing * segment */ nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start, segbuf->sb_fseg_end); else /* Case 1b: New full segment */ set_nilfs_discontinued(nilfs); } prev = segbuf; list_for_each_entry_continue(segbuf, logs, sb_list) { if (prev->sb_nextnum != segbuf->sb_nextnum) { ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum); WARN_ON(ret); /* never fails */ } if (atomic_read(&segbuf->sb_err) && segbuf->sb_segnum != nilfs->ns_nextnum) /* Case 2: extended segment (!= next) failed */ nilfs_sufile_set_error(sufile, segbuf->sb_segnum); prev = segbuf; } } static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci, struct inode *sufile) { struct nilfs_segment_buffer *segbuf; unsigned long live_blocks; int ret; list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { live_blocks = segbuf->sb_sum.nblocks + (segbuf->sb_pseg_start - segbuf->sb_fseg_start); ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum, live_blocks, sci->sc_seg_ctime); WARN_ON(ret); /* always succeed because the segusage is dirty */ } } static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile) { struct nilfs_segment_buffer *segbuf; int ret; segbuf = NILFS_FIRST_SEGBUF(logs); ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum, segbuf->sb_pseg_start - segbuf->sb_fseg_start, 0); WARN_ON(ret); /* always succeed because the segusage is dirty */ list_for_each_entry_continue(segbuf, logs, sb_list) { ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum, 0, 0); WARN_ON(ret); /* always succeed */ } } static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci, struct nilfs_segment_buffer *last, struct inode *sufile) { struct nilfs_segment_buffer *segbuf = last; int ret; list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) { sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks; ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum); WARN_ON(ret); } nilfs_truncate_logs(&sci->sc_segbufs, last); } static int nilfs_segctor_collect(struct nilfs_sc_info *sci, struct the_nilfs *nilfs, int mode) { struct nilfs_cstage prev_stage = sci->sc_stage; int err, nadd = 1; /* Collection retry loop */ for (;;) { sci->sc_nblk_this_inc = 0; sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs); err = nilfs_segctor_reset_segment_buffer(sci); if (unlikely(err)) goto failed; err = nilfs_segctor_collect_blocks(sci, mode); sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks; if (!err) break; if (unlikely(err != -E2BIG)) goto failed; /* The current segment is filled up */ if (mode != SC_LSEG_SR || nilfs_sc_cstage_get(sci) < NILFS_ST_CPFILE) break; nilfs_clear_logs(&sci->sc_segbufs); if (sci->sc_stage.flags & NILFS_CF_SUFREED) { err = nilfs_sufile_cancel_freev(nilfs->ns_sufile, sci->sc_freesegs, sci->sc_nfreesegs, NULL); WARN_ON(err); /* do not happen */ sci->sc_stage.flags &= ~NILFS_CF_SUFREED; } err = nilfs_segctor_extend_segments(sci, nilfs, nadd); if (unlikely(err)) return err; nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA); sci->sc_stage = prev_stage; } nilfs_segctor_zeropad_segsum(sci); nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile); return 0; failed: return err; } static void nilfs_list_replace_buffer(struct buffer_head *old_bh, struct buffer_head *new_bh) { BUG_ON(!list_empty(&new_bh->b_assoc_buffers)); list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers); /* The caller must release old_bh */ } static int nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci, struct nilfs_segment_buffer *segbuf, int mode) { struct inode *inode = NULL; sector_t blocknr; unsigned long nfinfo = segbuf->sb_sum.nfinfo; unsigned long nblocks = 0, ndatablk = 0; const struct nilfs_sc_operations *sc_op = NULL; struct nilfs_segsum_pointer ssp; struct nilfs_finfo *finfo = NULL; union nilfs_binfo binfo; struct buffer_head *bh, *bh_org; ino_t ino = 0; int err = 0; if (!nfinfo) goto out; blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk; ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers); ssp.offset = sizeof(struct nilfs_segment_summary); list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { if (bh == segbuf->sb_super_root) break; if (!finfo) { finfo = nilfs_segctor_map_segsum_entry( sci, &ssp, sizeof(*finfo)); ino = le64_to_cpu(finfo->fi_ino); nblocks = le32_to_cpu(finfo->fi_nblocks); ndatablk = le32_to_cpu(finfo->fi_ndatablk); inode = bh->b_folio->mapping->host; if (mode == SC_LSEG_DSYNC) sc_op = &nilfs_sc_dsync_ops; else if (ino == NILFS_DAT_INO) sc_op = &nilfs_sc_dat_ops; else /* file blocks */ sc_op = &nilfs_sc_file_ops; } bh_org = bh; get_bh(bh_org); err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr, &binfo); if (bh != bh_org) nilfs_list_replace_buffer(bh_org, bh); brelse(bh_org); if (unlikely(err)) goto failed_bmap; if (ndatablk > 0) sc_op->write_data_binfo(sci, &ssp, &binfo); else sc_op->write_node_binfo(sci, &ssp, &binfo); blocknr++; if (--nblocks == 0) { finfo = NULL; if (--nfinfo == 0) break; } else if (ndatablk > 0) ndatablk--; } out: return 0; failed_bmap: return err; } static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode) { struct nilfs_segment_buffer *segbuf; int err; list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode); if (unlikely(err)) return err; nilfs_segbuf_fill_in_segsum(segbuf); } return 0; } static void nilfs_begin_page_io(struct page *page) { if (!page || PageWriteback(page)) /* * For split b-tree node pages, this function may be called * twice. We ignore the 2nd or later calls by this check. */ return; lock_page(page); clear_page_dirty_for_io(page); set_page_writeback(page); unlock_page(page); } static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci) { struct nilfs_segment_buffer *segbuf; struct page *bd_page = NULL, *fs_page = NULL; list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { struct buffer_head *bh; list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) { if (bh->b_page != bd_page) { if (bd_page) { lock_page(bd_page); clear_page_dirty_for_io(bd_page); set_page_writeback(bd_page); unlock_page(bd_page); } bd_page = bh->b_page; } } list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { set_buffer_async_write(bh); if (bh == segbuf->sb_super_root) { if (bh->b_page != bd_page) { lock_page(bd_page); clear_page_dirty_for_io(bd_page); set_page_writeback(bd_page); unlock_page(bd_page); bd_page = bh->b_page; } break; } if (bh->b_page != fs_page) { nilfs_begin_page_io(fs_page); fs_page = bh->b_page; } } } if (bd_page) { lock_page(bd_page); clear_page_dirty_for_io(bd_page); set_page_writeback(bd_page); unlock_page(bd_page); } nilfs_begin_page_io(fs_page); } static int nilfs_segctor_write(struct nilfs_sc_info *sci, struct the_nilfs *nilfs) { int ret; ret = nilfs_write_logs(&sci->sc_segbufs, nilfs); list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs); return ret; } static void nilfs_end_page_io(struct page *page, int err) { if (!page) return; if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) { /* * For b-tree node pages, this function may be called twice * or more because they might be split in a segment. */ if (PageDirty(page)) { /* * For pages holding split b-tree node buffers, dirty * flag on the buffers may be cleared discretely. * In that case, the page is once redirtied for * remaining buffers, and it must be cancelled if * all the buffers get cleaned later. */ lock_page(page); if (nilfs_page_buffers_clean(page)) __nilfs_clear_page_dirty(page); unlock_page(page); } return; } if (!err) { if (!nilfs_page_buffers_clean(page)) __set_page_dirty_nobuffers(page); ClearPageError(page); } else { __set_page_dirty_nobuffers(page); SetPageError(page); } end_page_writeback(page); } static void nilfs_abort_logs(struct list_head *logs, int err) { struct nilfs_segment_buffer *segbuf; struct page *bd_page = NULL, *fs_page = NULL; struct buffer_head *bh; if (list_empty(logs)) return; list_for_each_entry(segbuf, logs, sb_list) { list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) { clear_buffer_uptodate(bh); if (bh->b_page != bd_page) { if (bd_page) end_page_writeback(bd_page); bd_page = bh->b_page; } } list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { clear_buffer_async_write(bh); if (bh == segbuf->sb_super_root) { clear_buffer_uptodate(bh); if (bh->b_page != bd_page) { end_page_writeback(bd_page); bd_page = bh->b_page; } break; } if (bh->b_page != fs_page) { nilfs_end_page_io(fs_page, err); fs_page = bh->b_page; } } } if (bd_page) end_page_writeback(bd_page); nilfs_end_page_io(fs_page, err); } static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci, struct the_nilfs *nilfs, int err) { LIST_HEAD(logs); int ret; list_splice_tail_init(&sci->sc_write_logs, &logs); ret = nilfs_wait_on_logs(&logs); nilfs_abort_logs(&logs, ret ? : err); list_splice_tail_init(&sci->sc_segbufs, &logs); nilfs_cancel_segusage(&logs, nilfs->ns_sufile); nilfs_free_incomplete_logs(&logs, nilfs); if (sci->sc_stage.flags & NILFS_CF_SUFREED) { ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile, sci->sc_freesegs, sci->sc_nfreesegs, NULL); WARN_ON(ret); /* do not happen */ } nilfs_destroy_logs(&logs); } static void nilfs_set_next_segment(struct the_nilfs *nilfs, struct nilfs_segment_buffer *segbuf) { nilfs->ns_segnum = segbuf->sb_segnum; nilfs->ns_nextnum = segbuf->sb_nextnum; nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start + segbuf->sb_sum.nblocks; nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq; nilfs->ns_ctime = segbuf->sb_sum.ctime; } static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci) { struct nilfs_segment_buffer *segbuf; struct page *bd_page = NULL, *fs_page = NULL; struct the_nilfs *nilfs = sci->sc_super->s_fs_info; int update_sr = false; list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) { struct buffer_head *bh; list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) { set_buffer_uptodate(bh); clear_buffer_dirty(bh); if (bh->b_page != bd_page) { if (bd_page) end_page_writeback(bd_page); bd_page = bh->b_page; } } /* * We assume that the buffers which belong to the same page * continue over the buffer list. * Under this assumption, the last BHs of pages is * identifiable by the discontinuity of bh->b_page * (page != fs_page). * * For B-tree node blocks, however, this assumption is not * guaranteed. The cleanup code of B-tree node pages needs * special care. */ list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { const unsigned long set_bits = BIT(BH_Uptodate); const unsigned long clear_bits = (BIT(BH_Dirty) | BIT(BH_Async_Write) | BIT(BH_Delay) | BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Redirected)); set_mask_bits(&bh->b_state, clear_bits, set_bits); if (bh == segbuf->sb_super_root) { if (bh->b_page != bd_page) { end_page_writeback(bd_page); bd_page = bh->b_page; } update_sr = true; break; } if (bh->b_page != fs_page) { nilfs_end_page_io(fs_page, 0); fs_page = bh->b_page; } } if (!nilfs_segbuf_simplex(segbuf)) { if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) { set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags); sci->sc_lseg_stime = jiffies; } if (segbuf->sb_sum.flags & NILFS_SS_LOGEND) clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags); } } /* * Since pages may continue over multiple segment buffers, * end of the last page must be checked outside of the loop. */ if (bd_page) end_page_writeback(bd_page); nilfs_end_page_io(fs_page, 0); nilfs_drop_collected_inodes(&sci->sc_dirty_files); if (nilfs_doing_gc()) nilfs_drop_collected_inodes(&sci->sc_gc_inodes); else nilfs->ns_nongc_ctime = sci->sc_seg_ctime; sci->sc_nblk_inc += sci->sc_nblk_this_inc; segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs); nilfs_set_next_segment(nilfs, segbuf); if (update_sr) { nilfs->ns_flushed_device = 0; nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start, segbuf->sb_sum.seg_seq, nilfs->ns_cno++); clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); clear_bit(NILFS_SC_DIRTY, &sci->sc_flags); set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags); nilfs_segctor_clear_metadata_dirty(sci); } else clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags); } static int nilfs_segctor_wait(struct nilfs_sc_info *sci) { int ret; ret = nilfs_wait_on_logs(&sci->sc_write_logs); if (!ret) { nilfs_segctor_complete_write(sci); nilfs_destroy_logs(&sci->sc_write_logs); } return ret; } static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci, struct the_nilfs *nilfs) { struct nilfs_inode_info *ii, *n; struct inode *ifile = sci->sc_root->ifile; spin_lock(&nilfs->ns_inode_lock); retry: list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) { if (!ii->i_bh) { struct buffer_head *ibh; int err; spin_unlock(&nilfs->ns_inode_lock); err = nilfs_ifile_get_inode_block( ifile, ii->vfs_inode.i_ino, &ibh); if (unlikely(err)) { nilfs_warn(sci->sc_super, "log writer: error %d getting inode block (ino=%lu)", err, ii->vfs_inode.i_ino); return err; } spin_lock(&nilfs->ns_inode_lock); if (likely(!ii->i_bh)) ii->i_bh = ibh; else brelse(ibh); goto retry; } // Always redirty the buffer to avoid race condition mark_buffer_dirty(ii->i_bh); nilfs_mdt_mark_dirty(ifile); clear_bit(NILFS_I_QUEUED, &ii->i_state); set_bit(NILFS_I_BUSY, &ii->i_state); list_move_tail(&ii->i_dirty, &sci->sc_dirty_files); } spin_unlock(&nilfs->ns_inode_lock); return 0; } static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, struct the_nilfs *nilfs) { struct nilfs_inode_info *ii, *n; int during_mount = !(sci->sc_super->s_flags & SB_ACTIVE); int defer_iput = false; spin_lock(&nilfs->ns_inode_lock); list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) { if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) || test_bit(NILFS_I_DIRTY, &ii->i_state)) continue; clear_bit(NILFS_I_BUSY, &ii->i_state); brelse(ii->i_bh); ii->i_bh = NULL; list_del_init(&ii->i_dirty); if (!ii->vfs_inode.i_nlink || during_mount) { /* * Defer calling iput() to avoid deadlocks if * i_nlink == 0 or mount is not yet finished. */ list_add_tail(&ii->i_dirty, &sci->sc_iput_queue); defer_iput = true; } else { spin_unlock(&nilfs->ns_inode_lock); iput(&ii->vfs_inode); spin_lock(&nilfs->ns_inode_lock); } } spin_unlock(&nilfs->ns_inode_lock); if (defer_iput) schedule_work(&sci->sc_iput_work); } /* * Main procedure of segment constructor */ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode) { struct the_nilfs *nilfs = sci->sc_super->s_fs_info; int err; if (sb_rdonly(sci->sc_super)) return -EROFS; nilfs_sc_cstage_set(sci, NILFS_ST_INIT); sci->sc_cno = nilfs->ns_cno; err = nilfs_segctor_collect_dirty_files(sci, nilfs); if (unlikely(err)) goto out; if (nilfs_test_metadata_dirty(nilfs, sci->sc_root)) set_bit(NILFS_SC_DIRTY, &sci->sc_flags); if (nilfs_segctor_clean(sci)) goto out; do { sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK; err = nilfs_segctor_begin_construction(sci, nilfs); if (unlikely(err)) goto out; /* Update time stamp */ sci->sc_seg_ctime = ktime_get_real_seconds(); err = nilfs_segctor_collect(sci, nilfs, mode); if (unlikely(err)) goto failed; /* Avoid empty segment */ if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE && nilfs_segbuf_empty(sci->sc_curseg)) { nilfs_segctor_abort_construction(sci, nilfs, 1); goto out; } err = nilfs_segctor_assign(sci, mode); if (unlikely(err)) goto failed; if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED) nilfs_segctor_fill_in_file_bmap(sci); if (mode == SC_LSEG_SR && nilfs_sc_cstage_get(sci) >= NILFS_ST_CPFILE) { err = nilfs_segctor_fill_in_checkpoint(sci); if (unlikely(err)) goto failed_to_write; nilfs_segctor_fill_in_super_root(sci, nilfs); } nilfs_segctor_update_segusage(sci, nilfs->ns_sufile); /* Write partial segments */ nilfs_segctor_prepare_write(sci); nilfs_add_checksums_on_logs(&sci->sc_segbufs, nilfs->ns_crc_seed); err = nilfs_segctor_write(sci, nilfs); if (unlikely(err)) goto failed_to_write; if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE || nilfs->ns_blocksize_bits != PAGE_SHIFT) { /* * At this point, we avoid double buffering * for blocksize < pagesize because page dirty * flag is turned off during write and dirty * buffers are not properly collected for * pages crossing over segments. */ err = nilfs_segctor_wait(sci); if (err) goto failed_to_write; } } while (nilfs_sc_cstage_get(sci) != NILFS_ST_DONE); out: nilfs_segctor_drop_written_files(sci, nilfs); return err; failed_to_write: if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED) nilfs_redirty_inodes(&sci->sc_dirty_files); failed: if (nilfs_doing_gc()) nilfs_redirty_inodes(&sci->sc_gc_inodes); nilfs_segctor_abort_construction(sci, nilfs, err); goto out; } /** * nilfs_segctor_start_timer - set timer of background write * @sci: nilfs_sc_info * * If the timer has already been set, it ignores the new request. * This function MUST be called within a section locking the segment * semaphore. */ static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci) { spin_lock(&sci->sc_state_lock); if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) { sci->sc_timer.expires = jiffies + sci->sc_interval; add_timer(&sci->sc_timer); sci->sc_state |= NILFS_SEGCTOR_COMMIT; } spin_unlock(&sci->sc_state_lock); } static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn) { spin_lock(&sci->sc_state_lock); if (!(sci->sc_flush_request & BIT(bn))) { unsigned long prev_req = sci->sc_flush_request; sci->sc_flush_request |= BIT(bn); if (!prev_req) wake_up(&sci->sc_wait_daemon); } spin_unlock(&sci->sc_state_lock); } /** * nilfs_flush_segment - trigger a segment construction for resource control * @sb: super block * @ino: inode number of the file to be flushed out. */ void nilfs_flush_segment(struct super_block *sb, ino_t ino) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_sc_info *sci = nilfs->ns_writer; if (!sci || nilfs_doing_construction()) return; nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0); /* assign bit 0 to data files */ } struct nilfs_segctor_wait_request { wait_queue_entry_t wq; __u32 seq; int err; atomic_t done; }; static int nilfs_segctor_sync(struct nilfs_sc_info *sci) { struct nilfs_segctor_wait_request wait_req; int err = 0; spin_lock(&sci->sc_state_lock); init_wait(&wait_req.wq); wait_req.err = 0; atomic_set(&wait_req.done, 0); wait_req.seq = ++sci->sc_seq_request; spin_unlock(&sci->sc_state_lock); init_waitqueue_entry(&wait_req.wq, current); add_wait_queue(&sci->sc_wait_request, &wait_req.wq); set_current_state(TASK_INTERRUPTIBLE); wake_up(&sci->sc_wait_daemon); for (;;) { if (atomic_read(&wait_req.done)) { err = wait_req.err; break; } if (!signal_pending(current)) { schedule(); continue; } err = -ERESTARTSYS; break; } finish_wait(&sci->sc_wait_request, &wait_req.wq); return err; } static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err) { struct nilfs_segctor_wait_request *wrq, *n; unsigned long flags; spin_lock_irqsave(&sci->sc_wait_request.lock, flags); list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) { if (!atomic_read(&wrq->done) && nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) { wrq->err = err; atomic_set(&wrq->done, 1); } if (atomic_read(&wrq->done)) { wrq->wq.func(&wrq->wq, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, NULL); } } spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags); } /** * nilfs_construct_segment - construct a logical segment * @sb: super block * * Return Value: On success, 0 is returned. On errors, one of the following * negative error code is returned. * * %-EROFS - Read only filesystem. * * %-EIO - I/O error * * %-ENOSPC - No space left on device (only in a panic state). * * %-ERESTARTSYS - Interrupted. * * %-ENOMEM - Insufficient memory available. */ int nilfs_construct_segment(struct super_block *sb) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_sc_info *sci = nilfs->ns_writer; struct nilfs_transaction_info *ti; if (sb_rdonly(sb) || unlikely(!sci)) return -EROFS; /* A call inside transactions causes a deadlock. */ BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC); return nilfs_segctor_sync(sci); } /** * nilfs_construct_dsync_segment - construct a data-only logical segment * @sb: super block * @inode: inode whose data blocks should be written out * @start: start byte offset * @end: end byte offset (inclusive) * * Return Value: On success, 0 is returned. On errors, one of the following * negative error code is returned. * * %-EROFS - Read only filesystem. * * %-EIO - I/O error * * %-ENOSPC - No space left on device (only in a panic state). * * %-ERESTARTSYS - Interrupted. * * %-ENOMEM - Insufficient memory available. */ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode, loff_t start, loff_t end) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_sc_info *sci = nilfs->ns_writer; struct nilfs_inode_info *ii; struct nilfs_transaction_info ti; int err = 0; if (sb_rdonly(sb) || unlikely(!sci)) return -EROFS; nilfs_transaction_lock(sb, &ti, 0); ii = NILFS_I(inode); if (test_bit(NILFS_I_INODE_SYNC, &ii->i_state) || nilfs_test_opt(nilfs, STRICT_ORDER) || test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) || nilfs_discontinued(nilfs)) { nilfs_transaction_unlock(sb); err = nilfs_segctor_sync(sci); return err; } spin_lock(&nilfs->ns_inode_lock); if (!test_bit(NILFS_I_QUEUED, &ii->i_state) && !test_bit(NILFS_I_BUSY, &ii->i_state)) { spin_unlock(&nilfs->ns_inode_lock); nilfs_transaction_unlock(sb); return 0; } spin_unlock(&nilfs->ns_inode_lock); sci->sc_dsync_inode = ii; sci->sc_dsync_start = start; sci->sc_dsync_end = end; err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC); if (!err) nilfs->ns_flushed_device = 0; nilfs_transaction_unlock(sb); return err; } #define FLUSH_FILE_BIT (0x1) /* data file only */ #define FLUSH_DAT_BIT BIT(NILFS_DAT_INO) /* DAT only */ /** * nilfs_segctor_accept - record accepted sequence count of log-write requests * @sci: segment constructor object */ static void nilfs_segctor_accept(struct nilfs_sc_info *sci) { spin_lock(&sci->sc_state_lock); sci->sc_seq_accepted = sci->sc_seq_request; spin_unlock(&sci->sc_state_lock); del_timer_sync(&sci->sc_timer); } /** * nilfs_segctor_notify - notify the result of request to caller threads * @sci: segment constructor object * @mode: mode of log forming * @err: error code to be notified */ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err) { /* Clear requests (even when the construction failed) */ spin_lock(&sci->sc_state_lock); if (mode == SC_LSEG_SR) { sci->sc_state &= ~NILFS_SEGCTOR_COMMIT; sci->sc_seq_done = sci->sc_seq_accepted; nilfs_segctor_wakeup(sci, err); sci->sc_flush_request = 0; } else { if (mode == SC_FLUSH_FILE) sci->sc_flush_request &= ~FLUSH_FILE_BIT; else if (mode == SC_FLUSH_DAT) sci->sc_flush_request &= ~FLUSH_DAT_BIT; /* re-enable timer if checkpoint creation was not done */ if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && time_before(jiffies, sci->sc_timer.expires)) add_timer(&sci->sc_timer); } spin_unlock(&sci->sc_state_lock); } /** * nilfs_segctor_construct - form logs and write them to disk * @sci: segment constructor object * @mode: mode of log forming */ static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode) { struct the_nilfs *nilfs = sci->sc_super->s_fs_info; struct nilfs_super_block **sbp; int err = 0; nilfs_segctor_accept(sci); if (nilfs_discontinued(nilfs)) mode = SC_LSEG_SR; if (!nilfs_segctor_confirm(sci)) err = nilfs_segctor_do_construct(sci, mode); if (likely(!err)) { if (mode != SC_FLUSH_DAT) atomic_set(&nilfs->ns_ndirtyblks, 0); if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) && nilfs_discontinued(nilfs)) { down_write(&nilfs->ns_sem); err = -EIO; sbp = nilfs_prepare_super(sci->sc_super, nilfs_sb_will_flip(nilfs)); if (likely(sbp)) { nilfs_set_log_cursor(sbp[0], nilfs); err = nilfs_commit_super(sci->sc_super, NILFS_SB_COMMIT); } up_write(&nilfs->ns_sem); } } nilfs_segctor_notify(sci, mode, err); return err; } static void nilfs_construction_timeout(struct timer_list *t) { struct nilfs_sc_info *sci = from_timer(sci, t, sc_timer); wake_up_process(sci->sc_timer_task); } static void nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head) { struct nilfs_inode_info *ii, *n; list_for_each_entry_safe(ii, n, head, i_dirty) { if (!test_bit(NILFS_I_UPDATED, &ii->i_state)) continue; list_del_init(&ii->i_dirty); truncate_inode_pages(&ii->vfs_inode.i_data, 0); nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping); iput(&ii->vfs_inode); } } int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv, void **kbufs) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_sc_info *sci = nilfs->ns_writer; struct nilfs_transaction_info ti; int err; if (unlikely(!sci)) return -EROFS; nilfs_transaction_lock(sb, &ti, 1); err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat); if (unlikely(err)) goto out_unlock; err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs); if (unlikely(err)) { nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat); goto out_unlock; } sci->sc_freesegs = kbufs[4]; sci->sc_nfreesegs = argv[4].v_nmembs; list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes); for (;;) { err = nilfs_segctor_construct(sci, SC_LSEG_SR); nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes); if (likely(!err)) break; nilfs_warn(sb, "error %d cleaning segments", err); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(sci->sc_interval); } if (nilfs_test_opt(nilfs, DISCARD)) { int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs, sci->sc_nfreesegs); if (ret) { nilfs_warn(sb, "error %d on discard request, turning discards off for the device", ret); nilfs_clear_opt(nilfs, DISCARD); } } out_unlock: sci->sc_freesegs = NULL; sci->sc_nfreesegs = 0; nilfs_mdt_clear_shadow_map(nilfs->ns_dat); nilfs_transaction_unlock(sb); return err; } static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode) { struct nilfs_transaction_info ti; nilfs_transaction_lock(sci->sc_super, &ti, 0); nilfs_segctor_construct(sci, mode); /* * Unclosed segment should be retried. We do this using sc_timer. * Timeout of sc_timer will invoke complete construction which leads * to close the current logical segment. */ if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) nilfs_segctor_start_timer(sci); nilfs_transaction_unlock(sci->sc_super); } static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci) { int mode = 0; spin_lock(&sci->sc_state_lock); mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ? SC_FLUSH_DAT : SC_FLUSH_FILE; spin_unlock(&sci->sc_state_lock); if (mode) { nilfs_segctor_do_construct(sci, mode); spin_lock(&sci->sc_state_lock); sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ? ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT; spin_unlock(&sci->sc_state_lock); } clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags); } static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci) { if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) || time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) { if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT)) return SC_FLUSH_FILE; else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT)) return SC_FLUSH_DAT; } return SC_LSEG_SR; } /** * nilfs_segctor_thread - main loop of the segment constructor thread. * @arg: pointer to a struct nilfs_sc_info. * * nilfs_segctor_thread() initializes a timer and serves as a daemon * to execute segment constructions. */ static int nilfs_segctor_thread(void *arg) { struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg; struct the_nilfs *nilfs = sci->sc_super->s_fs_info; int timeout = 0; sci->sc_timer_task = current; /* start sync. */ sci->sc_task = current; wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */ nilfs_info(sci->sc_super, "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds", sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ); spin_lock(&sci->sc_state_lock); loop: for (;;) { int mode; if (sci->sc_state & NILFS_SEGCTOR_QUIT) goto end_thread; if (timeout || sci->sc_seq_request != sci->sc_seq_done) mode = SC_LSEG_SR; else if (sci->sc_flush_request) mode = nilfs_segctor_flush_mode(sci); else break; spin_unlock(&sci->sc_state_lock); nilfs_segctor_thread_construct(sci, mode); spin_lock(&sci->sc_state_lock); timeout = 0; } if (freezing(current)) { spin_unlock(&sci->sc_state_lock); try_to_freeze(); spin_lock(&sci->sc_state_lock); } else { DEFINE_WAIT(wait); int should_sleep = 1; prepare_to_wait(&sci->sc_wait_daemon, &wait, TASK_INTERRUPTIBLE); if (sci->sc_seq_request != sci->sc_seq_done) should_sleep = 0; else if (sci->sc_flush_request) should_sleep = 0; else if (sci->sc_state & NILFS_SEGCTOR_COMMIT) should_sleep = time_before(jiffies, sci->sc_timer.expires); if (should_sleep) { spin_unlock(&sci->sc_state_lock); schedule(); spin_lock(&sci->sc_state_lock); } finish_wait(&sci->sc_wait_daemon, &wait); timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && time_after_eq(jiffies, sci->sc_timer.expires)); if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs)) set_nilfs_discontinued(nilfs); } goto loop; end_thread: /* end sync. */ sci->sc_task = NULL; wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */ spin_unlock(&sci->sc_state_lock); return 0; } static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci) { struct task_struct *t; t = kthread_run(nilfs_segctor_thread, sci, "segctord"); if (IS_ERR(t)) { int err = PTR_ERR(t); nilfs_err(sci->sc_super, "error %d creating segctord thread", err); return err; } wait_event(sci->sc_wait_task, sci->sc_task != NULL); return 0; } static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci) __acquires(&sci->sc_state_lock) __releases(&sci->sc_state_lock) { sci->sc_state |= NILFS_SEGCTOR_QUIT; while (sci->sc_task) { wake_up(&sci->sc_wait_daemon); spin_unlock(&sci->sc_state_lock); wait_event(sci->sc_wait_task, sci->sc_task == NULL); spin_lock(&sci->sc_state_lock); } } /* * Setup & clean-up functions */ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb, struct nilfs_root *root) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_sc_info *sci; sci = kzalloc(sizeof(*sci), GFP_KERNEL); if (!sci) return NULL; sci->sc_super = sb; nilfs_get_root(root); sci->sc_root = root; init_waitqueue_head(&sci->sc_wait_request); init_waitqueue_head(&sci->sc_wait_daemon); init_waitqueue_head(&sci->sc_wait_task); spin_lock_init(&sci->sc_state_lock); INIT_LIST_HEAD(&sci->sc_dirty_files); INIT_LIST_HEAD(&sci->sc_segbufs); INIT_LIST_HEAD(&sci->sc_write_logs); INIT_LIST_HEAD(&sci->sc_gc_inodes); INIT_LIST_HEAD(&sci->sc_iput_queue); INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func); timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0); sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT; sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ; sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK; if (nilfs->ns_interval) sci->sc_interval = HZ * nilfs->ns_interval; if (nilfs->ns_watermark) sci->sc_watermark = nilfs->ns_watermark; return sci; } static void nilfs_segctor_write_out(struct nilfs_sc_info *sci) { int ret, retrycount = NILFS_SC_CLEANUP_RETRY; /* * The segctord thread was stopped and its timer was removed. * But some tasks remain. */ do { struct nilfs_transaction_info ti; nilfs_transaction_lock(sci->sc_super, &ti, 0); ret = nilfs_segctor_construct(sci, SC_LSEG_SR); nilfs_transaction_unlock(sci->sc_super); flush_work(&sci->sc_iput_work); } while (ret && ret != -EROFS && retrycount-- > 0); } /** * nilfs_segctor_destroy - destroy the segment constructor. * @sci: nilfs_sc_info * * nilfs_segctor_destroy() kills the segctord thread and frees * the nilfs_sc_info struct. * Caller must hold the segment semaphore. */ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci) { struct the_nilfs *nilfs = sci->sc_super->s_fs_info; int flag; up_write(&nilfs->ns_segctor_sem); spin_lock(&sci->sc_state_lock); nilfs_segctor_kill_thread(sci); flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request || sci->sc_seq_request != sci->sc_seq_done); spin_unlock(&sci->sc_state_lock); if (flush_work(&sci->sc_iput_work)) flag = true; if (flag || !nilfs_segctor_confirm(sci)) nilfs_segctor_write_out(sci); if (!list_empty(&sci->sc_dirty_files)) { nilfs_warn(sci->sc_super, "disposed unprocessed dirty file(s) when stopping log writer"); nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1); } if (!list_empty(&sci->sc_iput_queue)) { nilfs_warn(sci->sc_super, "disposed unprocessed inode(s) in iput queue when stopping log writer"); nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1); } WARN_ON(!list_empty(&sci->sc_segbufs)); WARN_ON(!list_empty(&sci->sc_write_logs)); nilfs_put_root(sci->sc_root); down_write(&nilfs->ns_segctor_sem); timer_shutdown_sync(&sci->sc_timer); kfree(sci); } /** * nilfs_attach_log_writer - attach log writer * @sb: super block instance * @root: root object of the current filesystem tree * * This allocates a log writer object, initializes it, and starts the * log writer. * * Return Value: On success, 0 is returned. On error, one of the following * negative error code is returned. * * %-ENOMEM - Insufficient memory available. */ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root) { struct the_nilfs *nilfs = sb->s_fs_info; int err; if (nilfs->ns_writer) { /* * This happens if the filesystem is made read-only by * __nilfs_error or nilfs_remount and then remounted * read/write. In these cases, reuse the existing * writer. */ return 0; } nilfs->ns_writer = nilfs_segctor_new(sb, root); if (!nilfs->ns_writer) return -ENOMEM; inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL); err = nilfs_segctor_start_thread(nilfs->ns_writer); if (unlikely(err)) nilfs_detach_log_writer(sb); return err; } /** * nilfs_detach_log_writer - destroy log writer * @sb: super block instance * * This kills log writer daemon, frees the log writer object, and * destroys list of dirty files. */ void nilfs_detach_log_writer(struct super_block *sb) { struct the_nilfs *nilfs = sb->s_fs_info; LIST_HEAD(garbage_list); down_write(&nilfs->ns_segctor_sem); if (nilfs->ns_writer) { nilfs_segctor_destroy(nilfs->ns_writer); nilfs->ns_writer = NULL; } set_nilfs_purging(nilfs); /* Force to free the list of dirty files */ spin_lock(&nilfs->ns_inode_lock); if (!list_empty(&nilfs->ns_dirty_files)) { list_splice_init(&nilfs->ns_dirty_files, &garbage_list); nilfs_warn(sb, "disposed unprocessed dirty file(s) when detaching log writer"); } spin_unlock(&nilfs->ns_inode_lock); up_write(&nilfs->ns_segctor_sem); nilfs_dispose_list(nilfs, &garbage_list, 1); clear_nilfs_purging(nilfs); }
linux-master
fs/nilfs2/segment.c
// SPDX-License-Identifier: GPL-2.0+ /* * NILFS inode file * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * * Written by Amagai Yoshiji. * Revised by Ryusuke Konishi. * */ #include <linux/types.h> #include <linux/buffer_head.h> #include "nilfs.h" #include "mdt.h" #include "alloc.h" #include "ifile.h" /** * struct nilfs_ifile_info - on-memory private data of ifile * @mi: on-memory private data of metadata file * @palloc_cache: persistent object allocator cache of ifile */ struct nilfs_ifile_info { struct nilfs_mdt_info mi; struct nilfs_palloc_cache palloc_cache; }; static inline struct nilfs_ifile_info *NILFS_IFILE_I(struct inode *ifile) { return (struct nilfs_ifile_info *)NILFS_MDT(ifile); } /** * nilfs_ifile_create_inode - create a new disk inode * @ifile: ifile inode * @out_ino: pointer to a variable to store inode number * @out_bh: buffer_head contains newly allocated disk inode * * Return Value: On success, 0 is returned and the newly allocated inode * number is stored in the place pointed by @ino, and buffer_head pointer * that contains newly allocated disk inode structure is stored in the * place pointed by @out_bh * On error, one of the following negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOSPC - No inode left. */ int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino, struct buffer_head **out_bh) { struct nilfs_palloc_req req; int ret; req.pr_entry_nr = 0; /* * 0 says find free inode from beginning * of a group. dull code!! */ req.pr_entry_bh = NULL; ret = nilfs_palloc_prepare_alloc_entry(ifile, &req); if (!ret) { ret = nilfs_palloc_get_entry_block(ifile, req.pr_entry_nr, 1, &req.pr_entry_bh); if (ret < 0) nilfs_palloc_abort_alloc_entry(ifile, &req); } if (ret < 0) { brelse(req.pr_entry_bh); return ret; } nilfs_palloc_commit_alloc_entry(ifile, &req); mark_buffer_dirty(req.pr_entry_bh); nilfs_mdt_mark_dirty(ifile); *out_ino = (ino_t)req.pr_entry_nr; *out_bh = req.pr_entry_bh; return 0; } /** * nilfs_ifile_delete_inode - delete a disk inode * @ifile: ifile inode * @ino: inode number * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - The inode number @ino have not been allocated. */ int nilfs_ifile_delete_inode(struct inode *ifile, ino_t ino) { struct nilfs_palloc_req req = { .pr_entry_nr = ino, .pr_entry_bh = NULL }; struct nilfs_inode *raw_inode; void *kaddr; int ret; ret = nilfs_palloc_prepare_free_entry(ifile, &req); if (!ret) { ret = nilfs_palloc_get_entry_block(ifile, req.pr_entry_nr, 0, &req.pr_entry_bh); if (ret < 0) nilfs_palloc_abort_free_entry(ifile, &req); } if (ret < 0) { brelse(req.pr_entry_bh); return ret; } kaddr = kmap_atomic(req.pr_entry_bh->b_page); raw_inode = nilfs_palloc_block_get_entry(ifile, req.pr_entry_nr, req.pr_entry_bh, kaddr); raw_inode->i_flags = 0; kunmap_atomic(kaddr); mark_buffer_dirty(req.pr_entry_bh); brelse(req.pr_entry_bh); nilfs_palloc_commit_free_entry(ifile, &req); return 0; } int nilfs_ifile_get_inode_block(struct inode *ifile, ino_t ino, struct buffer_head **out_bh) { struct super_block *sb = ifile->i_sb; int err; if (unlikely(!NILFS_VALID_INODE(sb, ino))) { nilfs_error(sb, "bad inode number: %lu", (unsigned long)ino); return -EINVAL; } err = nilfs_palloc_get_entry_block(ifile, ino, 0, out_bh); if (unlikely(err)) nilfs_warn(sb, "error %d reading inode: ino=%lu", err, (unsigned long)ino); return err; } /** * nilfs_ifile_count_free_inodes - calculate free inodes count * @ifile: ifile inode * @nmaxinodes: current maximum of available inodes count [out] * @nfreeinodes: free inodes count [out] */ int nilfs_ifile_count_free_inodes(struct inode *ifile, u64 *nmaxinodes, u64 *nfreeinodes) { u64 nused; int err; *nmaxinodes = 0; *nfreeinodes = 0; nused = atomic64_read(&NILFS_I(ifile)->i_root->inodes_count); err = nilfs_palloc_count_max_entries(ifile, nused, nmaxinodes); if (likely(!err)) *nfreeinodes = *nmaxinodes - nused; return err; } /** * nilfs_ifile_read - read or get ifile inode * @sb: super block instance * @root: root object * @inode_size: size of an inode * @raw_inode: on-disk ifile inode * @inodep: buffer to store the inode */ int nilfs_ifile_read(struct super_block *sb, struct nilfs_root *root, size_t inode_size, struct nilfs_inode *raw_inode, struct inode **inodep) { struct inode *ifile; int err; ifile = nilfs_iget_locked(sb, root, NILFS_IFILE_INO); if (unlikely(!ifile)) return -ENOMEM; if (!(ifile->i_state & I_NEW)) goto out; err = nilfs_mdt_init(ifile, NILFS_MDT_GFP, sizeof(struct nilfs_ifile_info)); if (err) goto failed; err = nilfs_palloc_init_blockgroup(ifile, inode_size); if (err) goto failed; nilfs_palloc_setup_cache(ifile, &NILFS_IFILE_I(ifile)->palloc_cache); err = nilfs_read_inode_common(ifile, raw_inode); if (err) goto failed; unlock_new_inode(ifile); out: *inodep = ifile; return 0; failed: iget_failed(ifile); return err; }
linux-master
fs/nilfs2/ifile.c
// SPDX-License-Identifier: GPL-2.0+ /* * NILFS regular file handling primitives including fsync(). * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * Written by Amagai Yoshiji and Ryusuke Konishi. */ #include <linux/fs.h> #include <linux/mm.h> #include <linux/writeback.h> #include "nilfs.h" #include "segment.h" int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { /* * Called from fsync() system call * This is the only entry point that can catch write and synch * timing for both data blocks and intermediate blocks. * * This function should be implemented when the writeback function * will be implemented. */ struct the_nilfs *nilfs; struct inode *inode = file->f_mapping->host; int err = 0; if (nilfs_inode_dirty(inode)) { if (datasync) err = nilfs_construct_dsync_segment(inode->i_sb, inode, start, end); else err = nilfs_construct_segment(inode->i_sb); } nilfs = inode->i_sb->s_fs_info; if (!err) err = nilfs_flush_device(nilfs); return err; } static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct page *page = vmf->page; struct inode *inode = file_inode(vma->vm_file); struct nilfs_transaction_info ti; int ret = 0; if (unlikely(nilfs_near_disk_full(inode->i_sb->s_fs_info))) return VM_FAULT_SIGBUS; /* -ENOSPC */ sb_start_pagefault(inode->i_sb); lock_page(page); if (page->mapping != inode->i_mapping || page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) { unlock_page(page); ret = -EFAULT; /* make the VM retry the fault */ goto out; } /* * check to see if the page is mapped already (no holes) */ if (PageMappedToDisk(page)) goto mapped; if (page_has_buffers(page)) { struct buffer_head *bh, *head; int fully_mapped = 1; bh = head = page_buffers(page); do { if (!buffer_mapped(bh)) { fully_mapped = 0; break; } } while (bh = bh->b_this_page, bh != head); if (fully_mapped) { SetPageMappedToDisk(page); goto mapped; } } unlock_page(page); /* * fill hole blocks */ ret = nilfs_transaction_begin(inode->i_sb, &ti, 1); /* never returns -ENOMEM, but may return -ENOSPC */ if (unlikely(ret)) goto out; file_update_time(vma->vm_file); ret = block_page_mkwrite(vma, vmf, nilfs_get_block); if (ret) { nilfs_transaction_abort(inode->i_sb); goto out; } nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits)); nilfs_transaction_commit(inode->i_sb); mapped: wait_for_stable_page(page); out: sb_end_pagefault(inode->i_sb); return vmf_fs_error(ret); } static const struct vm_operations_struct nilfs_file_vm_ops = { .fault = filemap_fault, .map_pages = filemap_map_pages, .page_mkwrite = nilfs_page_mkwrite, }; static int nilfs_file_mmap(struct file *file, struct vm_area_struct *vma) { file_accessed(file); vma->vm_ops = &nilfs_file_vm_ops; return 0; } /* * We have mostly NULL's here: the current defaults are ok for * the nilfs filesystem. */ const struct file_operations nilfs_file_operations = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .unlocked_ioctl = nilfs_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = nilfs_compat_ioctl, #endif /* CONFIG_COMPAT */ .mmap = nilfs_file_mmap, .open = generic_file_open, /* .release = nilfs_release_file, */ .fsync = nilfs_sync_file, .splice_read = filemap_splice_read, .splice_write = iter_file_splice_write, }; const struct inode_operations nilfs_file_inode_operations = { .setattr = nilfs_setattr, .permission = nilfs_permission, .fiemap = nilfs_fiemap, .fileattr_get = nilfs_fileattr_get, .fileattr_set = nilfs_fileattr_set, }; /* end of file */
linux-master
fs/nilfs2/file.c
// SPDX-License-Identifier: GPL-2.0+ /* * NILFS B-tree node cache * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * Originally written by Seiji Kihara. * Fully revised by Ryusuke Konishi for stabilization and simplification. * */ #include <linux/types.h> #include <linux/buffer_head.h> #include <linux/mm.h> #include <linux/backing-dev.h> #include <linux/gfp.h> #include "nilfs.h" #include "mdt.h" #include "dat.h" #include "page.h" #include "btnode.h" /** * nilfs_init_btnc_inode - initialize B-tree node cache inode * @btnc_inode: inode to be initialized * * nilfs_init_btnc_inode() sets up an inode for B-tree node cache. */ void nilfs_init_btnc_inode(struct inode *btnc_inode) { struct nilfs_inode_info *ii = NILFS_I(btnc_inode); btnc_inode->i_mode = S_IFREG; ii->i_flags = 0; memset(&ii->i_bmap_data, 0, sizeof(struct nilfs_bmap)); mapping_set_gfp_mask(btnc_inode->i_mapping, GFP_NOFS); } void nilfs_btnode_cache_clear(struct address_space *btnc) { invalidate_mapping_pages(btnc, 0, -1); truncate_inode_pages(btnc, 0); } struct buffer_head * nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr) { struct inode *inode = btnc->host; struct buffer_head *bh; bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node)); if (unlikely(!bh)) return NULL; if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) || buffer_dirty(bh))) { brelse(bh); BUG(); } memset(bh->b_data, 0, i_blocksize(inode)); bh->b_bdev = inode->i_sb->s_bdev; bh->b_blocknr = blocknr; set_buffer_mapped(bh); set_buffer_uptodate(bh); unlock_page(bh->b_page); put_page(bh->b_page); return bh; } int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, sector_t pblocknr, blk_opf_t opf, struct buffer_head **pbh, sector_t *submit_ptr) { struct buffer_head *bh; struct inode *inode = btnc->host; struct page *page; int err; bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node)); if (unlikely(!bh)) return -ENOMEM; err = -EEXIST; /* internal code */ page = bh->b_page; if (buffer_uptodate(bh) || buffer_dirty(bh)) goto found; if (pblocknr == 0) { pblocknr = blocknr; if (inode->i_ino != NILFS_DAT_INO) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; /* blocknr is a virtual block number */ err = nilfs_dat_translate(nilfs->ns_dat, blocknr, &pblocknr); if (unlikely(err)) { brelse(bh); goto out_locked; } } } if (opf & REQ_RAHEAD) { if (pblocknr != *submit_ptr + 1 || !trylock_buffer(bh)) { err = -EBUSY; /* internal code */ brelse(bh); goto out_locked; } } else { /* opf == REQ_OP_READ */ lock_buffer(bh); } if (buffer_uptodate(bh)) { unlock_buffer(bh); err = -EEXIST; /* internal code */ goto found; } set_buffer_mapped(bh); bh->b_bdev = inode->i_sb->s_bdev; bh->b_blocknr = pblocknr; /* set block address for read */ bh->b_end_io = end_buffer_read_sync; get_bh(bh); submit_bh(opf, bh); bh->b_blocknr = blocknr; /* set back to the given block address */ *submit_ptr = pblocknr; err = 0; found: *pbh = bh; out_locked: unlock_page(page); put_page(page); return err; } /** * nilfs_btnode_delete - delete B-tree node buffer * @bh: buffer to be deleted * * nilfs_btnode_delete() invalidates the specified buffer and delete the page * including the buffer if the page gets unbusy. */ void nilfs_btnode_delete(struct buffer_head *bh) { struct address_space *mapping; struct page *page = bh->b_page; pgoff_t index = page_index(page); int still_dirty; get_page(page); lock_page(page); wait_on_page_writeback(page); nilfs_forget_buffer(bh); still_dirty = PageDirty(page); mapping = page->mapping; unlock_page(page); put_page(page); if (!still_dirty && mapping) invalidate_inode_pages2_range(mapping, index, index); } /** * nilfs_btnode_prepare_change_key * prepare to move contents of the block for old key to one of new key. * the old buffer will not be removed, but might be reused for new buffer. * it might return -ENOMEM because of memory allocation errors, * and might return -EIO because of disk read errors. */ int nilfs_btnode_prepare_change_key(struct address_space *btnc, struct nilfs_btnode_chkey_ctxt *ctxt) { struct buffer_head *obh, *nbh; struct inode *inode = btnc->host; __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; int err; if (oldkey == newkey) return 0; obh = ctxt->bh; ctxt->newbh = NULL; if (inode->i_blkbits == PAGE_SHIFT) { struct page *opage = obh->b_page; lock_page(opage); retry: /* BUG_ON(oldkey != obh->b_folio->index); */ if (unlikely(oldkey != opage->index)) NILFS_PAGE_BUG(opage, "invalid oldkey %lld (newkey=%lld)", (unsigned long long)oldkey, (unsigned long long)newkey); xa_lock_irq(&btnc->i_pages); err = __xa_insert(&btnc->i_pages, newkey, opage, GFP_NOFS); xa_unlock_irq(&btnc->i_pages); /* * Note: page->index will not change to newkey until * nilfs_btnode_commit_change_key() will be called. * To protect the page in intermediate state, the page lock * is held. */ if (!err) return 0; else if (err != -EBUSY) goto failed_unlock; err = invalidate_inode_pages2_range(btnc, newkey, newkey); if (!err) goto retry; /* fallback to copy mode */ unlock_page(opage); } nbh = nilfs_btnode_create_block(btnc, newkey); if (!nbh) return -ENOMEM; BUG_ON(nbh == obh); ctxt->newbh = nbh; return 0; failed_unlock: unlock_page(obh->b_page); return err; } /** * nilfs_btnode_commit_change_key * commit the change_key operation prepared by prepare_change_key(). */ void nilfs_btnode_commit_change_key(struct address_space *btnc, struct nilfs_btnode_chkey_ctxt *ctxt) { struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh; __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; struct page *opage; if (oldkey == newkey) return; if (nbh == NULL) { /* blocksize == pagesize */ opage = obh->b_page; if (unlikely(oldkey != opage->index)) NILFS_PAGE_BUG(opage, "invalid oldkey %lld (newkey=%lld)", (unsigned long long)oldkey, (unsigned long long)newkey); mark_buffer_dirty(obh); xa_lock_irq(&btnc->i_pages); __xa_erase(&btnc->i_pages, oldkey); __xa_set_mark(&btnc->i_pages, newkey, PAGECACHE_TAG_DIRTY); xa_unlock_irq(&btnc->i_pages); opage->index = obh->b_blocknr = newkey; unlock_page(opage); } else { nilfs_copy_buffer(nbh, obh); mark_buffer_dirty(nbh); nbh->b_blocknr = newkey; ctxt->bh = nbh; nilfs_btnode_delete(obh); /* will decrement bh->b_count */ } } /** * nilfs_btnode_abort_change_key * abort the change_key operation prepared by prepare_change_key(). */ void nilfs_btnode_abort_change_key(struct address_space *btnc, struct nilfs_btnode_chkey_ctxt *ctxt) { struct buffer_head *nbh = ctxt->newbh; __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; if (oldkey == newkey) return; if (nbh == NULL) { /* blocksize == pagesize */ xa_erase_irq(&btnc->i_pages, newkey); unlock_page(ctxt->bh->b_page); } else { /* * When canceling a buffer that a prepare operation has * allocated to copy a node block to another location, use * nilfs_btnode_delete() to initialize and release the buffer * so that the buffer flags will not be in an inconsistent * state when it is reallocated. */ nilfs_btnode_delete(nbh); } }
linux-master
fs/nilfs2/btnode.c
/* * linux/fs/hfs/super.c * * Copyright (C) 1995-1997 Paul H. Hargrove * (C) 2003 Ardis Technologies <[email protected]> * This file may be distributed under the terms of the GNU General Public License. * * This file contains hfs_read_super(), some of the super_ops and * init_hfs_fs() and exit_hfs_fs(). The remaining super_ops are in * inode.c since they deal with inodes. * * Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/backing-dev.h> #include <linux/mount.h> #include <linux/init.h> #include <linux/nls.h> #include <linux/parser.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/vfs.h> #include "hfs_fs.h" #include "btree.h" static struct kmem_cache *hfs_inode_cachep; MODULE_LICENSE("GPL"); static int hfs_sync_fs(struct super_block *sb, int wait) { hfs_mdb_commit(sb); return 0; } /* * hfs_put_super() * * This is the put_super() entry in the super_operations structure for * HFS filesystems. The purpose is to release the resources * associated with the superblock sb. */ static void hfs_put_super(struct super_block *sb) { cancel_delayed_work_sync(&HFS_SB(sb)->mdb_work); hfs_mdb_close(sb); /* release the MDB's resources */ hfs_mdb_put(sb); } static void flush_mdb(struct work_struct *work) { struct hfs_sb_info *sbi; struct super_block *sb; sbi = container_of(work, struct hfs_sb_info, mdb_work.work); sb = sbi->sb; spin_lock(&sbi->work_lock); sbi->work_queued = 0; spin_unlock(&sbi->work_lock); hfs_mdb_commit(sb); } void hfs_mark_mdb_dirty(struct super_block *sb) { struct hfs_sb_info *sbi = HFS_SB(sb); unsigned long delay; if (sb_rdonly(sb)) return; spin_lock(&sbi->work_lock); if (!sbi->work_queued) { delay = msecs_to_jiffies(dirty_writeback_interval * 10); queue_delayed_work(system_long_wq, &sbi->mdb_work, delay); sbi->work_queued = 1; } spin_unlock(&sbi->work_lock); } /* * hfs_statfs() * * This is the statfs() entry in the super_operations structure for * HFS filesystems. The purpose is to return various data about the * filesystem. * * changed f_files/f_ffree to reflect the fs_ablock/free_ablocks. */ static int hfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; u64 id = huge_encode_dev(sb->s_bdev->bd_dev); buf->f_type = HFS_SUPER_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_blocks = (u32)HFS_SB(sb)->fs_ablocks * HFS_SB(sb)->fs_div; buf->f_bfree = (u32)HFS_SB(sb)->free_ablocks * HFS_SB(sb)->fs_div; buf->f_bavail = buf->f_bfree; buf->f_files = HFS_SB(sb)->fs_ablocks; buf->f_ffree = HFS_SB(sb)->free_ablocks; buf->f_fsid = u64_to_fsid(id); buf->f_namelen = HFS_NAMELEN; return 0; } static int hfs_remount(struct super_block *sb, int *flags, char *data) { sync_filesystem(sb); *flags |= SB_NODIRATIME; if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb)) return 0; if (!(*flags & SB_RDONLY)) { if (!(HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) { pr_warn("filesystem was not cleanly unmounted, running fsck.hfs is recommended. leaving read-only.\n"); sb->s_flags |= SB_RDONLY; *flags |= SB_RDONLY; } else if (HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_SLOCK)) { pr_warn("filesystem is marked locked, leaving read-only.\n"); sb->s_flags |= SB_RDONLY; *flags |= SB_RDONLY; } } return 0; } static int hfs_show_options(struct seq_file *seq, struct dentry *root) { struct hfs_sb_info *sbi = HFS_SB(root->d_sb); if (sbi->s_creator != cpu_to_be32(0x3f3f3f3f)) seq_show_option_n(seq, "creator", (char *)&sbi->s_creator, 4); if (sbi->s_type != cpu_to_be32(0x3f3f3f3f)) seq_show_option_n(seq, "type", (char *)&sbi->s_type, 4); seq_printf(seq, ",uid=%u,gid=%u", from_kuid_munged(&init_user_ns, sbi->s_uid), from_kgid_munged(&init_user_ns, sbi->s_gid)); if (sbi->s_file_umask != 0133) seq_printf(seq, ",file_umask=%o", sbi->s_file_umask); if (sbi->s_dir_umask != 0022) seq_printf(seq, ",dir_umask=%o", sbi->s_dir_umask); if (sbi->part >= 0) seq_printf(seq, ",part=%u", sbi->part); if (sbi->session >= 0) seq_printf(seq, ",session=%u", sbi->session); if (sbi->nls_disk) seq_printf(seq, ",codepage=%s", sbi->nls_disk->charset); if (sbi->nls_io) seq_printf(seq, ",iocharset=%s", sbi->nls_io->charset); if (sbi->s_quiet) seq_printf(seq, ",quiet"); return 0; } static struct inode *hfs_alloc_inode(struct super_block *sb) { struct hfs_inode_info *i; i = alloc_inode_sb(sb, hfs_inode_cachep, GFP_KERNEL); return i ? &i->vfs_inode : NULL; } static void hfs_free_inode(struct inode *inode) { kmem_cache_free(hfs_inode_cachep, HFS_I(inode)); } static const struct super_operations hfs_super_operations = { .alloc_inode = hfs_alloc_inode, .free_inode = hfs_free_inode, .write_inode = hfs_write_inode, .evict_inode = hfs_evict_inode, .put_super = hfs_put_super, .sync_fs = hfs_sync_fs, .statfs = hfs_statfs, .remount_fs = hfs_remount, .show_options = hfs_show_options, }; enum { opt_uid, opt_gid, opt_umask, opt_file_umask, opt_dir_umask, opt_part, opt_session, opt_type, opt_creator, opt_quiet, opt_codepage, opt_iocharset, opt_err }; static const match_table_t tokens = { { opt_uid, "uid=%u" }, { opt_gid, "gid=%u" }, { opt_umask, "umask=%o" }, { opt_file_umask, "file_umask=%o" }, { opt_dir_umask, "dir_umask=%o" }, { opt_part, "part=%u" }, { opt_session, "session=%u" }, { opt_type, "type=%s" }, { opt_creator, "creator=%s" }, { opt_quiet, "quiet" }, { opt_codepage, "codepage=%s" }, { opt_iocharset, "iocharset=%s" }, { opt_err, NULL } }; static inline int match_fourchar(substring_t *arg, u32 *result) { if (arg->to - arg->from != 4) return -EINVAL; memcpy(result, arg->from, 4); return 0; } /* * parse_options() * * adapted from linux/fs/msdos/inode.c written 1992,93 by Werner Almesberger * This function is called by hfs_read_super() to parse the mount options. */ static int parse_options(char *options, struct hfs_sb_info *hsb) { char *p; substring_t args[MAX_OPT_ARGS]; int tmp, token; /* initialize the sb with defaults */ hsb->s_uid = current_uid(); hsb->s_gid = current_gid(); hsb->s_file_umask = 0133; hsb->s_dir_umask = 0022; hsb->s_type = hsb->s_creator = cpu_to_be32(0x3f3f3f3f); /* == '????' */ hsb->s_quiet = 0; hsb->part = -1; hsb->session = -1; if (!options) return 1; while ((p = strsep(&options, ",")) != NULL) { if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case opt_uid: if (match_int(&args[0], &tmp)) { pr_err("uid requires an argument\n"); return 0; } hsb->s_uid = make_kuid(current_user_ns(), (uid_t)tmp); if (!uid_valid(hsb->s_uid)) { pr_err("invalid uid %d\n", tmp); return 0; } break; case opt_gid: if (match_int(&args[0], &tmp)) { pr_err("gid requires an argument\n"); return 0; } hsb->s_gid = make_kgid(current_user_ns(), (gid_t)tmp); if (!gid_valid(hsb->s_gid)) { pr_err("invalid gid %d\n", tmp); return 0; } break; case opt_umask: if (match_octal(&args[0], &tmp)) { pr_err("umask requires a value\n"); return 0; } hsb->s_file_umask = (umode_t)tmp; hsb->s_dir_umask = (umode_t)tmp; break; case opt_file_umask: if (match_octal(&args[0], &tmp)) { pr_err("file_umask requires a value\n"); return 0; } hsb->s_file_umask = (umode_t)tmp; break; case opt_dir_umask: if (match_octal(&args[0], &tmp)) { pr_err("dir_umask requires a value\n"); return 0; } hsb->s_dir_umask = (umode_t)tmp; break; case opt_part: if (match_int(&args[0], &hsb->part)) { pr_err("part requires an argument\n"); return 0; } break; case opt_session: if (match_int(&args[0], &hsb->session)) { pr_err("session requires an argument\n"); return 0; } break; case opt_type: if (match_fourchar(&args[0], &hsb->s_type)) { pr_err("type requires a 4 character value\n"); return 0; } break; case opt_creator: if (match_fourchar(&args[0], &hsb->s_creator)) { pr_err("creator requires a 4 character value\n"); return 0; } break; case opt_quiet: hsb->s_quiet = 1; break; case opt_codepage: if (hsb->nls_disk) { pr_err("unable to change codepage\n"); return 0; } p = match_strdup(&args[0]); if (p) hsb->nls_disk = load_nls(p); if (!hsb->nls_disk) { pr_err("unable to load codepage \"%s\"\n", p); kfree(p); return 0; } kfree(p); break; case opt_iocharset: if (hsb->nls_io) { pr_err("unable to change iocharset\n"); return 0; } p = match_strdup(&args[0]); if (p) hsb->nls_io = load_nls(p); if (!hsb->nls_io) { pr_err("unable to load iocharset \"%s\"\n", p); kfree(p); return 0; } kfree(p); break; default: return 0; } } if (hsb->nls_disk && !hsb->nls_io) { hsb->nls_io = load_nls_default(); if (!hsb->nls_io) { pr_err("unable to load default iocharset\n"); return 0; } } hsb->s_dir_umask &= 0777; hsb->s_file_umask &= 0577; return 1; } /* * hfs_read_super() * * This is the function that is responsible for mounting an HFS * filesystem. It performs all the tasks necessary to get enough data * from the disk to read the root inode. This includes parsing the * mount options, dealing with Macintosh partitions, reading the * superblock and the allocation bitmap blocks, calling * hfs_btree_init() to get the necessary data about the extents and * catalog B-trees and, finally, reading the root inode into memory. */ static int hfs_fill_super(struct super_block *sb, void *data, int silent) { struct hfs_sb_info *sbi; struct hfs_find_data fd; hfs_cat_rec rec; struct inode *root_inode; int res; sbi = kzalloc(sizeof(struct hfs_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; sbi->sb = sb; sb->s_fs_info = sbi; spin_lock_init(&sbi->work_lock); INIT_DELAYED_WORK(&sbi->mdb_work, flush_mdb); res = -EINVAL; if (!parse_options((char *)data, sbi)) { pr_err("unable to parse mount options\n"); goto bail; } sb->s_op = &hfs_super_operations; sb->s_xattr = hfs_xattr_handlers; sb->s_flags |= SB_NODIRATIME; mutex_init(&sbi->bitmap_lock); res = hfs_mdb_get(sb); if (res) { if (!silent) pr_warn("can't find a HFS filesystem on dev %s\n", hfs_mdb_name(sb)); res = -EINVAL; goto bail; } /* try to get the root inode */ res = hfs_find_init(HFS_SB(sb)->cat_tree, &fd); if (res) goto bail_no_root; res = hfs_cat_find_brec(sb, HFS_ROOT_CNID, &fd); if (!res) { if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) { res = -EIO; goto bail_hfs_find; } hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength); } if (res) goto bail_hfs_find; res = -EINVAL; root_inode = hfs_iget(sb, &fd.search_key->cat, &rec); hfs_find_exit(&fd); if (!root_inode) goto bail_no_root; sb->s_d_op = &hfs_dentry_operations; res = -ENOMEM; sb->s_root = d_make_root(root_inode); if (!sb->s_root) goto bail_no_root; /* everything's okay */ return 0; bail_hfs_find: hfs_find_exit(&fd); bail_no_root: pr_err("get root inode failed\n"); bail: hfs_mdb_put(sb); return res; } static struct dentry *hfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, hfs_fill_super); } static struct file_system_type hfs_fs_type = { .owner = THIS_MODULE, .name = "hfs", .mount = hfs_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("hfs"); static void hfs_init_once(void *p) { struct hfs_inode_info *i = p; inode_init_once(&i->vfs_inode); } static int __init init_hfs_fs(void) { int err; hfs_inode_cachep = kmem_cache_create("hfs_inode_cache", sizeof(struct hfs_inode_info), 0, SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, hfs_init_once); if (!hfs_inode_cachep) return -ENOMEM; err = register_filesystem(&hfs_fs_type); if (err) kmem_cache_destroy(hfs_inode_cachep); return err; } static void __exit exit_hfs_fs(void) { unregister_filesystem(&hfs_fs_type); /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(hfs_inode_cachep); } module_init(init_hfs_fs) module_exit(exit_hfs_fs)
linux-master
fs/hfs/super.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/hfs/bnode.c * * Copyright (C) 2001 * Brad Boyer ([email protected]) * (C) 2003 Ardis Technologies <[email protected]> * * Handle basic btree node operations */ #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/swap.h> #include "btree.h" void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) { struct page *page; int pagenum; int bytes_read; int bytes_to_read; off += node->page_offset; pagenum = off >> PAGE_SHIFT; off &= ~PAGE_MASK; /* compute page offset for the first page */ for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) { if (pagenum >= node->tree->pages_per_bnode) break; page = node->page[pagenum]; bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off); memcpy_from_page(buf + bytes_read, page, off, bytes_to_read); pagenum++; off = 0; /* page offset only applies to the first page */ } } u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off) { __be16 data; // optimize later... hfs_bnode_read(node, &data, off, 2); return be16_to_cpu(data); } u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off) { u8 data; // optimize later... hfs_bnode_read(node, &data, off, 1); return data; } void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off) { struct hfs_btree *tree; int key_len; tree = node->tree; if (node->type == HFS_NODE_LEAF || tree->attributes & HFS_TREE_VARIDXKEYS) key_len = hfs_bnode_read_u8(node, off) + 1; else key_len = tree->max_key_len + 1; hfs_bnode_read(node, key, off, key_len); } void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len) { struct page *page; off += node->page_offset; page = node->page[0]; memcpy_to_page(page, off, buf, len); set_page_dirty(page); } void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data) { __be16 v = cpu_to_be16(data); // optimize later... hfs_bnode_write(node, &v, off, 2); } void hfs_bnode_write_u8(struct hfs_bnode *node, int off, u8 data) { // optimize later... hfs_bnode_write(node, &data, off, 1); } void hfs_bnode_clear(struct hfs_bnode *node, int off, int len) { struct page *page; off += node->page_offset; page = node->page[0]; memzero_page(page, off, len); set_page_dirty(page); } void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, struct hfs_bnode *src_node, int src, int len) { struct page *src_page, *dst_page; hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len); if (!len) return; src += src_node->page_offset; dst += dst_node->page_offset; src_page = src_node->page[0]; dst_page = dst_node->page[0]; memcpy_page(dst_page, dst, src_page, src, len); set_page_dirty(dst_page); } void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) { struct page *page; void *ptr; hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len); if (!len) return; src += node->page_offset; dst += node->page_offset; page = node->page[0]; ptr = kmap_local_page(page); memmove(ptr + dst, ptr + src, len); kunmap_local(ptr); set_page_dirty(page); } void hfs_bnode_dump(struct hfs_bnode *node) { struct hfs_bnode_desc desc; __be32 cnid; int i, off, key_off; hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this); hfs_bnode_read(node, &desc, 0, sizeof(desc)); hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n", be32_to_cpu(desc.next), be32_to_cpu(desc.prev), desc.type, desc.height, be16_to_cpu(desc.num_recs)); off = node->tree->node_size - 2; for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) { key_off = hfs_bnode_read_u16(node, off); hfs_dbg_cont(BNODE_MOD, " %d", key_off); if (i && node->type == HFS_NODE_INDEX) { int tmp; if (node->tree->attributes & HFS_TREE_VARIDXKEYS) tmp = (hfs_bnode_read_u8(node, key_off) | 1) + 1; else tmp = node->tree->max_key_len + 1; hfs_dbg_cont(BNODE_MOD, " (%d,%d", tmp, hfs_bnode_read_u8(node, key_off)); hfs_bnode_read(node, &cnid, key_off + tmp, 4); hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid)); } else if (i && node->type == HFS_NODE_LEAF) { int tmp; tmp = hfs_bnode_read_u8(node, key_off); hfs_dbg_cont(BNODE_MOD, " (%d)", tmp); } } hfs_dbg_cont(BNODE_MOD, "\n"); } void hfs_bnode_unlink(struct hfs_bnode *node) { struct hfs_btree *tree; struct hfs_bnode *tmp; __be32 cnid; tree = node->tree; if (node->prev) { tmp = hfs_bnode_find(tree, node->prev); if (IS_ERR(tmp)) return; tmp->next = node->next; cnid = cpu_to_be32(tmp->next); hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, next), 4); hfs_bnode_put(tmp); } else if (node->type == HFS_NODE_LEAF) tree->leaf_head = node->next; if (node->next) { tmp = hfs_bnode_find(tree, node->next); if (IS_ERR(tmp)) return; tmp->prev = node->prev; cnid = cpu_to_be32(tmp->prev); hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, prev), 4); hfs_bnode_put(tmp); } else if (node->type == HFS_NODE_LEAF) tree->leaf_tail = node->prev; // move down? if (!node->prev && !node->next) { printk(KERN_DEBUG "hfs_btree_del_level\n"); } if (!node->parent) { tree->root = 0; tree->depth = 0; } set_bit(HFS_BNODE_DELETED, &node->flags); } static inline int hfs_bnode_hash(u32 num) { num = (num >> 16) + num; num += num >> 8; return num & (NODE_HASH_SIZE - 1); } struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid) { struct hfs_bnode *node; if (cnid >= tree->node_count) { pr_err("request for non-existent node %d in B*Tree\n", cnid); return NULL; } for (node = tree->node_hash[hfs_bnode_hash(cnid)]; node; node = node->next_hash) { if (node->this == cnid) { return node; } } return NULL; } static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) { struct hfs_bnode *node, *node2; struct address_space *mapping; struct page *page; int size, block, i, hash; loff_t off; if (cnid >= tree->node_count) { pr_err("request for non-existent node %d in B*Tree\n", cnid); return NULL; } size = sizeof(struct hfs_bnode) + tree->pages_per_bnode * sizeof(struct page *); node = kzalloc(size, GFP_KERNEL); if (!node) return NULL; node->tree = tree; node->this = cnid; set_bit(HFS_BNODE_NEW, &node->flags); atomic_set(&node->refcnt, 1); hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n", node->tree->cnid, node->this); init_waitqueue_head(&node->lock_wq); spin_lock(&tree->hash_lock); node2 = hfs_bnode_findhash(tree, cnid); if (!node2) { hash = hfs_bnode_hash(cnid); node->next_hash = tree->node_hash[hash]; tree->node_hash[hash] = node; tree->node_hash_cnt++; } else { hfs_bnode_get(node2); spin_unlock(&tree->hash_lock); kfree(node); wait_event(node2->lock_wq, !test_bit(HFS_BNODE_NEW, &node2->flags)); return node2; } spin_unlock(&tree->hash_lock); mapping = tree->inode->i_mapping; off = (loff_t)cnid * tree->node_size; block = off >> PAGE_SHIFT; node->page_offset = off & ~PAGE_MASK; for (i = 0; i < tree->pages_per_bnode; i++) { page = read_mapping_page(mapping, block++, NULL); if (IS_ERR(page)) goto fail; node->page[i] = page; } return node; fail: set_bit(HFS_BNODE_ERROR, &node->flags); return node; } void hfs_bnode_unhash(struct hfs_bnode *node) { struct hfs_bnode **p; hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n", node->tree->cnid, node->this, atomic_read(&node->refcnt)); for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)]; *p && *p != node; p = &(*p)->next_hash) ; BUG_ON(!*p); *p = node->next_hash; node->tree->node_hash_cnt--; } /* Load a particular node out of a tree */ struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num) { struct hfs_bnode *node; struct hfs_bnode_desc *desc; int i, rec_off, off, next_off; int entry_size, key_size; spin_lock(&tree->hash_lock); node = hfs_bnode_findhash(tree, num); if (node) { hfs_bnode_get(node); spin_unlock(&tree->hash_lock); wait_event(node->lock_wq, !test_bit(HFS_BNODE_NEW, &node->flags)); if (test_bit(HFS_BNODE_ERROR, &node->flags)) goto node_error; return node; } spin_unlock(&tree->hash_lock); node = __hfs_bnode_create(tree, num); if (!node) return ERR_PTR(-ENOMEM); if (test_bit(HFS_BNODE_ERROR, &node->flags)) goto node_error; if (!test_bit(HFS_BNODE_NEW, &node->flags)) return node; desc = (struct hfs_bnode_desc *)(kmap_local_page(node->page[0]) + node->page_offset); node->prev = be32_to_cpu(desc->prev); node->next = be32_to_cpu(desc->next); node->num_recs = be16_to_cpu(desc->num_recs); node->type = desc->type; node->height = desc->height; kunmap_local(desc); switch (node->type) { case HFS_NODE_HEADER: case HFS_NODE_MAP: if (node->height != 0) goto node_error; break; case HFS_NODE_LEAF: if (node->height != 1) goto node_error; break; case HFS_NODE_INDEX: if (node->height <= 1 || node->height > tree->depth) goto node_error; break; default: goto node_error; } rec_off = tree->node_size - 2; off = hfs_bnode_read_u16(node, rec_off); if (off != sizeof(struct hfs_bnode_desc)) goto node_error; for (i = 1; i <= node->num_recs; off = next_off, i++) { rec_off -= 2; next_off = hfs_bnode_read_u16(node, rec_off); if (next_off <= off || next_off > tree->node_size || next_off & 1) goto node_error; entry_size = next_off - off; if (node->type != HFS_NODE_INDEX && node->type != HFS_NODE_LEAF) continue; key_size = hfs_bnode_read_u8(node, off) + 1; if (key_size >= entry_size /*|| key_size & 1*/) goto node_error; } clear_bit(HFS_BNODE_NEW, &node->flags); wake_up(&node->lock_wq); return node; node_error: set_bit(HFS_BNODE_ERROR, &node->flags); clear_bit(HFS_BNODE_NEW, &node->flags); wake_up(&node->lock_wq); hfs_bnode_put(node); return ERR_PTR(-EIO); } void hfs_bnode_free(struct hfs_bnode *node) { int i; for (i = 0; i < node->tree->pages_per_bnode; i++) if (node->page[i]) put_page(node->page[i]); kfree(node); } struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num) { struct hfs_bnode *node; struct page **pagep; int i; spin_lock(&tree->hash_lock); node = hfs_bnode_findhash(tree, num); spin_unlock(&tree->hash_lock); if (node) { pr_crit("new node %u already hashed?\n", num); WARN_ON(1); return node; } node = __hfs_bnode_create(tree, num); if (!node) return ERR_PTR(-ENOMEM); if (test_bit(HFS_BNODE_ERROR, &node->flags)) { hfs_bnode_put(node); return ERR_PTR(-EIO); } pagep = node->page; memzero_page(*pagep, node->page_offset, min((int)PAGE_SIZE, (int)tree->node_size)); set_page_dirty(*pagep); for (i = 1; i < tree->pages_per_bnode; i++) { memzero_page(*++pagep, 0, PAGE_SIZE); set_page_dirty(*pagep); } clear_bit(HFS_BNODE_NEW, &node->flags); wake_up(&node->lock_wq); return node; } void hfs_bnode_get(struct hfs_bnode *node) { if (node) { atomic_inc(&node->refcnt); hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n", node->tree->cnid, node->this, atomic_read(&node->refcnt)); } } /* Dispose of resources used by a node */ void hfs_bnode_put(struct hfs_bnode *node) { if (node) { struct hfs_btree *tree = node->tree; int i; hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n", node->tree->cnid, node->this, atomic_read(&node->refcnt)); BUG_ON(!atomic_read(&node->refcnt)); if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) return; for (i = 0; i < tree->pages_per_bnode; i++) { if (!node->page[i]) continue; mark_page_accessed(node->page[i]); } if (test_bit(HFS_BNODE_DELETED, &node->flags)) { hfs_bnode_unhash(node); spin_unlock(&tree->hash_lock); hfs_bmap_free(node); hfs_bnode_free(node); return; } spin_unlock(&tree->hash_lock); } }
linux-master
fs/hfs/bnode.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/hfs/btree.c * * Copyright (C) 2001 * Brad Boyer ([email protected]) * (C) 2003 Ardis Technologies <[email protected]> * * Handle opening/closing btree */ #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/log2.h> #include "btree.h" /* Get a reference to a B*Tree and do some initial checks */ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp keycmp) { struct hfs_btree *tree; struct hfs_btree_header_rec *head; struct address_space *mapping; struct page *page; unsigned int size; tree = kzalloc(sizeof(*tree), GFP_KERNEL); if (!tree) return NULL; mutex_init(&tree->tree_lock); spin_lock_init(&tree->hash_lock); /* Set the correct compare function */ tree->sb = sb; tree->cnid = id; tree->keycmp = keycmp; tree->inode = iget_locked(sb, id); if (!tree->inode) goto free_tree; BUG_ON(!(tree->inode->i_state & I_NEW)); { struct hfs_mdb *mdb = HFS_SB(sb)->mdb; HFS_I(tree->inode)->flags = 0; mutex_init(&HFS_I(tree->inode)->extents_lock); switch (id) { case HFS_EXT_CNID: hfs_inode_read_fork(tree->inode, mdb->drXTExtRec, mdb->drXTFlSize, mdb->drXTFlSize, be32_to_cpu(mdb->drXTClpSiz)); if (HFS_I(tree->inode)->alloc_blocks > HFS_I(tree->inode)->first_blocks) { pr_err("invalid btree extent records\n"); unlock_new_inode(tree->inode); goto free_inode; } tree->inode->i_mapping->a_ops = &hfs_btree_aops; break; case HFS_CAT_CNID: hfs_inode_read_fork(tree->inode, mdb->drCTExtRec, mdb->drCTFlSize, mdb->drCTFlSize, be32_to_cpu(mdb->drCTClpSiz)); if (!HFS_I(tree->inode)->first_blocks) { pr_err("invalid btree extent records (0 size)\n"); unlock_new_inode(tree->inode); goto free_inode; } tree->inode->i_mapping->a_ops = &hfs_btree_aops; break; default: BUG(); } } unlock_new_inode(tree->inode); mapping = tree->inode->i_mapping; page = read_mapping_page(mapping, 0, NULL); if (IS_ERR(page)) goto free_inode; /* Load the header */ head = (struct hfs_btree_header_rec *)(kmap_local_page(page) + sizeof(struct hfs_bnode_desc)); tree->root = be32_to_cpu(head->root); tree->leaf_count = be32_to_cpu(head->leaf_count); tree->leaf_head = be32_to_cpu(head->leaf_head); tree->leaf_tail = be32_to_cpu(head->leaf_tail); tree->node_count = be32_to_cpu(head->node_count); tree->free_nodes = be32_to_cpu(head->free_nodes); tree->attributes = be32_to_cpu(head->attributes); tree->node_size = be16_to_cpu(head->node_size); tree->max_key_len = be16_to_cpu(head->max_key_len); tree->depth = be16_to_cpu(head->depth); size = tree->node_size; if (!is_power_of_2(size)) goto fail_page; if (!tree->node_count) goto fail_page; switch (id) { case HFS_EXT_CNID: if (tree->max_key_len != HFS_MAX_EXT_KEYLEN) { pr_err("invalid extent max_key_len %d\n", tree->max_key_len); goto fail_page; } break; case HFS_CAT_CNID: if (tree->max_key_len != HFS_MAX_CAT_KEYLEN) { pr_err("invalid catalog max_key_len %d\n", tree->max_key_len); goto fail_page; } break; default: BUG(); } tree->node_size_shift = ffs(size) - 1; tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT; kunmap_local(head); put_page(page); return tree; fail_page: kunmap_local(head); put_page(page); free_inode: tree->inode->i_mapping->a_ops = &hfs_aops; iput(tree->inode); free_tree: kfree(tree); return NULL; } /* Release resources used by a btree */ void hfs_btree_close(struct hfs_btree *tree) { struct hfs_bnode *node; int i; if (!tree) return; for (i = 0; i < NODE_HASH_SIZE; i++) { while ((node = tree->node_hash[i])) { tree->node_hash[i] = node->next_hash; if (atomic_read(&node->refcnt)) pr_err("node %d:%d still has %d user(s)!\n", node->tree->cnid, node->this, atomic_read(&node->refcnt)); hfs_bnode_free(node); tree->node_hash_cnt--; } } iput(tree->inode); kfree(tree); } void hfs_btree_write(struct hfs_btree *tree) { struct hfs_btree_header_rec *head; struct hfs_bnode *node; struct page *page; node = hfs_bnode_find(tree, 0); if (IS_ERR(node)) /* panic? */ return; /* Load the header */ page = node->page[0]; head = (struct hfs_btree_header_rec *)(kmap_local_page(page) + sizeof(struct hfs_bnode_desc)); head->root = cpu_to_be32(tree->root); head->leaf_count = cpu_to_be32(tree->leaf_count); head->leaf_head = cpu_to_be32(tree->leaf_head); head->leaf_tail = cpu_to_be32(tree->leaf_tail); head->node_count = cpu_to_be32(tree->node_count); head->free_nodes = cpu_to_be32(tree->free_nodes); head->attributes = cpu_to_be32(tree->attributes); head->depth = cpu_to_be16(tree->depth); kunmap_local(head); set_page_dirty(page); hfs_bnode_put(node); } static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx) { struct hfs_btree *tree = prev->tree; struct hfs_bnode *node; struct hfs_bnode_desc desc; __be32 cnid; node = hfs_bnode_create(tree, idx); if (IS_ERR(node)) return node; if (!tree->free_nodes) panic("FIXME!!!"); tree->free_nodes--; prev->next = idx; cnid = cpu_to_be32(idx); hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4); node->type = HFS_NODE_MAP; node->num_recs = 1; hfs_bnode_clear(node, 0, tree->node_size); desc.next = 0; desc.prev = 0; desc.type = HFS_NODE_MAP; desc.height = 0; desc.num_recs = cpu_to_be16(1); desc.reserved = 0; hfs_bnode_write(node, &desc, 0, sizeof(desc)); hfs_bnode_write_u16(node, 14, 0x8000); hfs_bnode_write_u16(node, tree->node_size - 2, 14); hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6); return node; } /* Make sure @tree has enough space for the @rsvd_nodes */ int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes) { struct inode *inode = tree->inode; u32 count; int res; while (tree->free_nodes < rsvd_nodes) { res = hfs_extend_file(inode); if (res) return res; HFS_I(inode)->phys_size = inode->i_size = (loff_t)HFS_I(inode)->alloc_blocks * HFS_SB(tree->sb)->alloc_blksz; HFS_I(inode)->fs_blocks = inode->i_size >> tree->sb->s_blocksize_bits; inode_set_bytes(inode, inode->i_size); count = inode->i_size >> tree->node_size_shift; tree->free_nodes += count - tree->node_count; tree->node_count = count; } return 0; } struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) { struct hfs_bnode *node, *next_node; struct page **pagep; u32 nidx, idx; unsigned off; u16 off16; u16 len; u8 *data, byte, m; int i, res; res = hfs_bmap_reserve(tree, 1); if (res) return ERR_PTR(res); nidx = 0; node = hfs_bnode_find(tree, nidx); if (IS_ERR(node)) return node; len = hfs_brec_lenoff(node, 2, &off16); off = off16; off += node->page_offset; pagep = node->page + (off >> PAGE_SHIFT); data = kmap_local_page(*pagep); off &= ~PAGE_MASK; idx = 0; for (;;) { while (len) { byte = data[off]; if (byte != 0xff) { for (m = 0x80, i = 0; i < 8; m >>= 1, i++) { if (!(byte & m)) { idx += i; data[off] |= m; set_page_dirty(*pagep); kunmap_local(data); tree->free_nodes--; mark_inode_dirty(tree->inode); hfs_bnode_put(node); return hfs_bnode_create(tree, idx); } } } if (++off >= PAGE_SIZE) { kunmap_local(data); data = kmap_local_page(*++pagep); off = 0; } idx += 8; len--; } kunmap_local(data); nidx = node->next; if (!nidx) { printk(KERN_DEBUG "create new bmap node...\n"); next_node = hfs_bmap_new_bmap(node, idx); } else next_node = hfs_bnode_find(tree, nidx); hfs_bnode_put(node); if (IS_ERR(next_node)) return next_node; node = next_node; len = hfs_brec_lenoff(node, 0, &off16); off = off16; off += node->page_offset; pagep = node->page + (off >> PAGE_SHIFT); data = kmap_local_page(*pagep); off &= ~PAGE_MASK; } } void hfs_bmap_free(struct hfs_bnode *node) { struct hfs_btree *tree; struct page *page; u16 off, len; u32 nidx; u8 *data, byte, m; hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this); tree = node->tree; nidx = node->this; node = hfs_bnode_find(tree, 0); if (IS_ERR(node)) return; len = hfs_brec_lenoff(node, 2, &off); while (nidx >= len * 8) { u32 i; nidx -= len * 8; i = node->next; if (!i) { /* panic */; pr_crit("unable to free bnode %u. bmap not found!\n", node->this); hfs_bnode_put(node); return; } hfs_bnode_put(node); node = hfs_bnode_find(tree, i); if (IS_ERR(node)) return; if (node->type != HFS_NODE_MAP) { /* panic */; pr_crit("invalid bmap found! (%u,%d)\n", node->this, node->type); hfs_bnode_put(node); return; } len = hfs_brec_lenoff(node, 0, &off); } off += node->page_offset + nidx / 8; page = node->page[off >> PAGE_SHIFT]; data = kmap_local_page(page); off &= ~PAGE_MASK; m = 1 << (~nidx & 7); byte = data[off]; if (!(byte & m)) { pr_crit("trying to free free bnode %u(%d)\n", node->this, node->type); kunmap_local(data); hfs_bnode_put(node); return; } data[off] = byte & ~m; set_page_dirty(page); kunmap_local(data); hfs_bnode_put(node); tree->free_nodes++; mark_inode_dirty(tree->inode); }
linux-master
fs/hfs/btree.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/hfs/bfind.c * * Copyright (C) 2001 * Brad Boyer ([email protected]) * (C) 2003 Ardis Technologies <[email protected]> * * Search routines for btrees */ #include <linux/slab.h> #include "btree.h" int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd) { void *ptr; fd->tree = tree; fd->bnode = NULL; ptr = kmalloc(tree->max_key_len * 2 + 4, GFP_KERNEL); if (!ptr) return -ENOMEM; fd->search_key = ptr; fd->key = ptr + tree->max_key_len + 2; hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0)); switch (tree->cnid) { case HFS_CAT_CNID: mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX); break; case HFS_EXT_CNID: mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX); break; case HFS_ATTR_CNID: mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX); break; default: return -EINVAL; } return 0; } void hfs_find_exit(struct hfs_find_data *fd) { hfs_bnode_put(fd->bnode); kfree(fd->search_key); hfs_dbg(BNODE_REFS, "find_exit: %d (%p)\n", fd->tree->cnid, __builtin_return_address(0)); mutex_unlock(&fd->tree->tree_lock); fd->tree = NULL; } /* Find the record in bnode that best matches key (not greater than...)*/ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd) { int cmpval; u16 off, len, keylen; int rec; int b, e; int res; b = 0; e = bnode->num_recs - 1; res = -ENOENT; do { rec = (e + b) / 2; len = hfs_brec_lenoff(bnode, rec, &off); keylen = hfs_brec_keylen(bnode, rec); if (keylen == 0) { res = -EINVAL; goto fail; } hfs_bnode_read(bnode, fd->key, off, keylen); cmpval = bnode->tree->keycmp(fd->key, fd->search_key); if (!cmpval) { e = rec; res = 0; goto done; } if (cmpval < 0) b = rec + 1; else e = rec - 1; } while (b <= e); if (rec != e && e >= 0) { len = hfs_brec_lenoff(bnode, e, &off); keylen = hfs_brec_keylen(bnode, e); if (keylen == 0) { res = -EINVAL; goto fail; } hfs_bnode_read(bnode, fd->key, off, keylen); } done: fd->record = e; fd->keyoffset = off; fd->keylength = keylen; fd->entryoffset = off + keylen; fd->entrylength = len - keylen; fail: return res; } /* Traverse a B*Tree from the root to a leaf finding best fit to key */ /* Return allocated copy of node found, set recnum to best record */ int hfs_brec_find(struct hfs_find_data *fd) { struct hfs_btree *tree; struct hfs_bnode *bnode; u32 nidx, parent; __be32 data; int height, res; tree = fd->tree; if (fd->bnode) hfs_bnode_put(fd->bnode); fd->bnode = NULL; nidx = tree->root; if (!nidx) return -ENOENT; height = tree->depth; res = 0; parent = 0; for (;;) { bnode = hfs_bnode_find(tree, nidx); if (IS_ERR(bnode)) { res = PTR_ERR(bnode); bnode = NULL; break; } if (bnode->height != height) goto invalid; if (bnode->type != (--height ? HFS_NODE_INDEX : HFS_NODE_LEAF)) goto invalid; bnode->parent = parent; res = __hfs_brec_find(bnode, fd); if (!height) break; if (fd->record < 0) goto release; parent = nidx; hfs_bnode_read(bnode, &data, fd->entryoffset, 4); nidx = be32_to_cpu(data); hfs_bnode_put(bnode); } fd->bnode = bnode; return res; invalid: pr_err("inconsistency in B*Tree (%d,%d,%d,%u,%u)\n", height, bnode->height, bnode->type, nidx, parent); res = -EIO; release: hfs_bnode_put(bnode); return res; } int hfs_brec_read(struct hfs_find_data *fd, void *rec, int rec_len) { int res; res = hfs_brec_find(fd); if (res) return res; if (fd->entrylength > rec_len) return -EINVAL; hfs_bnode_read(fd->bnode, rec, fd->entryoffset, fd->entrylength); return 0; } int hfs_brec_goto(struct hfs_find_data *fd, int cnt) { struct hfs_btree *tree; struct hfs_bnode *bnode; int idx, res = 0; u16 off, len, keylen; bnode = fd->bnode; tree = bnode->tree; if (cnt < 0) { cnt = -cnt; while (cnt > fd->record) { cnt -= fd->record + 1; fd->record = bnode->num_recs - 1; idx = bnode->prev; if (!idx) { res = -ENOENT; goto out; } hfs_bnode_put(bnode); bnode = hfs_bnode_find(tree, idx); if (IS_ERR(bnode)) { res = PTR_ERR(bnode); bnode = NULL; goto out; } } fd->record -= cnt; } else { while (cnt >= bnode->num_recs - fd->record) { cnt -= bnode->num_recs - fd->record; fd->record = 0; idx = bnode->next; if (!idx) { res = -ENOENT; goto out; } hfs_bnode_put(bnode); bnode = hfs_bnode_find(tree, idx); if (IS_ERR(bnode)) { res = PTR_ERR(bnode); bnode = NULL; goto out; } } fd->record += cnt; } len = hfs_brec_lenoff(bnode, fd->record, &off); keylen = hfs_brec_keylen(bnode, fd->record); if (keylen == 0) { res = -EINVAL; goto out; } fd->keyoffset = off; fd->keylength = keylen; fd->entryoffset = off + keylen; fd->entrylength = len - keylen; hfs_bnode_read(bnode, fd->key, off, keylen); out: fd->bnode = bnode; return res; }
linux-master
fs/hfs/bfind.c
/* * linux/fs/hfs/part_tbl.c * * Copyright (C) 1996-1997 Paul H. Hargrove * (C) 2003 Ardis Technologies <[email protected]> * This file may be distributed under the terms of the GNU General Public License. * * Original code to handle the new style Mac partition table based on * a patch contributed by Holger Schemel ([email protected]). */ #include "hfs_fs.h" /* * The new style Mac partition map * * For each partition on the media there is a physical block (512-byte * block) containing one of these structures. These blocks are * contiguous starting at block 1. */ struct new_pmap { __be16 pmSig; /* signature */ __be16 reSigPad; /* padding */ __be32 pmMapBlkCnt; /* partition blocks count */ __be32 pmPyPartStart; /* physical block start of partition */ __be32 pmPartBlkCnt; /* physical block count of partition */ u8 pmPartName[32]; /* (null terminated?) string giving the name of this partition */ u8 pmPartType[32]; /* (null terminated?) string giving the type of this partition */ /* a bunch more stuff we don't need */ } __packed; /* * The old style Mac partition map * * The partition map consists for a 2-byte signature followed by an * array of these structures. The map is terminated with an all-zero * one of these. */ struct old_pmap { __be16 pdSig; /* Signature bytes */ struct old_pmap_entry { __be32 pdStart; __be32 pdSize; __be32 pdFSID; } pdEntry[42]; } __packed; /* * hfs_part_find() * * Parse the partition map looking for the * start and length of the 'part'th HFS partition. */ int hfs_part_find(struct super_block *sb, sector_t *part_start, sector_t *part_size) { struct buffer_head *bh; __be16 *data; int i, size, res; res = -ENOENT; bh = sb_bread512(sb, *part_start + HFS_PMAP_BLK, data); if (!bh) return -EIO; switch (be16_to_cpu(*data)) { case HFS_OLD_PMAP_MAGIC: { struct old_pmap *pm; struct old_pmap_entry *p; pm = (struct old_pmap *)bh->b_data; p = pm->pdEntry; size = 42; for (i = 0; i < size; p++, i++) { if (p->pdStart && p->pdSize && p->pdFSID == cpu_to_be32(0x54465331)/*"TFS1"*/ && (HFS_SB(sb)->part < 0 || HFS_SB(sb)->part == i)) { *part_start += be32_to_cpu(p->pdStart); *part_size = be32_to_cpu(p->pdSize); res = 0; } } break; } case HFS_NEW_PMAP_MAGIC: { struct new_pmap *pm; pm = (struct new_pmap *)bh->b_data; size = be32_to_cpu(pm->pmMapBlkCnt); for (i = 0; i < size;) { if (!memcmp(pm->pmPartType,"Apple_HFS", 9) && (HFS_SB(sb)->part < 0 || HFS_SB(sb)->part == i)) { *part_start += be32_to_cpu(pm->pmPyPartStart); *part_size = be32_to_cpu(pm->pmPartBlkCnt); res = 0; break; } brelse(bh); bh = sb_bread512(sb, *part_start + HFS_PMAP_BLK + ++i, pm); if (!bh) return -EIO; if (pm->pmSig != cpu_to_be16(HFS_NEW_PMAP_MAGIC)) break; } break; } } brelse(bh); return res; }
linux-master
fs/hfs/part_tbl.c
/* * linux/fs/hfs/bitmap.c * * Copyright (C) 1996-1997 Paul H. Hargrove * (C) 2003 Ardis Technologies <[email protected]> * This file may be distributed under the terms of the GNU General Public License. * * Based on GPLed code Copyright (C) 1995 Michael Dreher * * This file contains the code to modify the volume bitmap: * search/set/clear bits. */ #include "hfs_fs.h" /* * hfs_find_zero_bit() * * Description: * Given a block of memory, its length in bits, and a starting bit number, * determine the number of the first zero bits (in left-to-right ordering) * in that range. * * Returns >= 'size' if no zero bits are found in the range. * * Accesses memory in 32-bit aligned chunks of 32-bits and thus * may read beyond the 'size'th bit. */ static u32 hfs_find_set_zero_bits(__be32 *bitmap, u32 size, u32 offset, u32 *max) { __be32 *curr, *end; u32 mask, start, len, n; __be32 val; int i; len = *max; if (!len) return size; curr = bitmap + (offset / 32); end = bitmap + ((size + 31) / 32); /* scan the first partial u32 for zero bits */ val = *curr; if (~val) { n = be32_to_cpu(val); i = offset % 32; mask = (1U << 31) >> i; for (; i < 32; mask >>= 1, i++) { if (!(n & mask)) goto found; } } /* scan complete u32s for the first zero bit */ while (++curr < end) { val = *curr; if (~val) { n = be32_to_cpu(val); mask = 1 << 31; for (i = 0; i < 32; mask >>= 1, i++) { if (!(n & mask)) goto found; } } } return size; found: start = (curr - bitmap) * 32 + i; if (start >= size) return start; /* do any partial u32 at the start */ len = min(size - start, len); while (1) { n |= mask; if (++i >= 32) break; mask >>= 1; if (!--len || n & mask) goto done; } if (!--len) goto done; *curr++ = cpu_to_be32(n); /* do full u32s */ while (1) { n = be32_to_cpu(*curr); if (len < 32) break; if (n) { len = 32; break; } *curr++ = cpu_to_be32(0xffffffff); len -= 32; } /* do any partial u32 at end */ mask = 1U << 31; for (i = 0; i < len; i++) { if (n & mask) break; n |= mask; mask >>= 1; } done: *curr = cpu_to_be32(n); *max = (curr - bitmap) * 32 + i - start; return start; } /* * hfs_vbm_search_free() * * Description: * Search for 'num_bits' consecutive cleared bits in the bitmap blocks of * the hfs MDB. 'mdb' had better be locked or the returned range * may be no longer free, when this functions returns! * XXX Currently the search starts from bit 0, but it should start with * the bit number stored in 's_alloc_ptr' of the MDB. * Input Variable(s): * struct hfs_mdb *mdb: Pointer to the hfs MDB * u16 *num_bits: Pointer to the number of cleared bits * to search for * Output Variable(s): * u16 *num_bits: The number of consecutive clear bits of the * returned range. If the bitmap is fragmented, this will be less than * requested and it will be zero, when the disk is full. * Returns: * The number of the first bit of the range of cleared bits which has been * found. When 'num_bits' is zero, this is invalid! * Preconditions: * 'mdb' points to a "valid" (struct hfs_mdb). * 'num_bits' points to a variable of type (u16), which contains * the number of cleared bits to find. * Postconditions: * 'num_bits' is set to the length of the found sequence. */ u32 hfs_vbm_search_free(struct super_block *sb, u32 goal, u32 *num_bits) { void *bitmap; u32 pos; /* make sure we have actual work to perform */ if (!*num_bits) return 0; mutex_lock(&HFS_SB(sb)->bitmap_lock); bitmap = HFS_SB(sb)->bitmap; pos = hfs_find_set_zero_bits(bitmap, HFS_SB(sb)->fs_ablocks, goal, num_bits); if (pos >= HFS_SB(sb)->fs_ablocks) { if (goal) pos = hfs_find_set_zero_bits(bitmap, goal, 0, num_bits); if (pos >= HFS_SB(sb)->fs_ablocks) { *num_bits = pos = 0; goto out; } } hfs_dbg(BITMAP, "alloc_bits: %u,%u\n", pos, *num_bits); HFS_SB(sb)->free_ablocks -= *num_bits; hfs_bitmap_dirty(sb); out: mutex_unlock(&HFS_SB(sb)->bitmap_lock); return pos; } /* * hfs_clear_vbm_bits() * * Description: * Clear the requested bits in the volume bitmap of the hfs filesystem * Input Variable(s): * struct hfs_mdb *mdb: Pointer to the hfs MDB * u16 start: The offset of the first bit * u16 count: The number of bits * Output Variable(s): * None * Returns: * 0: no error * -1: One of the bits was already clear. This is a strange * error and when it happens, the filesystem must be repaired! * -2: One or more of the bits are out of range of the bitmap. * Preconditions: * 'mdb' points to a "valid" (struct hfs_mdb). * Postconditions: * Starting with bit number 'start', 'count' bits in the volume bitmap * are cleared. The affected bitmap blocks are marked "dirty", the free * block count of the MDB is updated and the MDB is marked dirty. */ int hfs_clear_vbm_bits(struct super_block *sb, u16 start, u16 count) { __be32 *curr; u32 mask; int i, len; /* is there any actual work to be done? */ if (!count) return 0; hfs_dbg(BITMAP, "clear_bits: %u,%u\n", start, count); /* are all of the bits in range? */ if ((start + count) > HFS_SB(sb)->fs_ablocks) return -2; mutex_lock(&HFS_SB(sb)->bitmap_lock); /* bitmap is always on a 32-bit boundary */ curr = HFS_SB(sb)->bitmap + (start / 32); len = count; /* do any partial u32 at the start */ i = start % 32; if (i) { int j = 32 - i; mask = 0xffffffffU << j; if (j > count) { mask |= 0xffffffffU >> (i + count); *curr &= cpu_to_be32(mask); goto out; } *curr++ &= cpu_to_be32(mask); count -= j; } /* do full u32s */ while (count >= 32) { *curr++ = 0; count -= 32; } /* do any partial u32 at end */ if (count) { mask = 0xffffffffU >> count; *curr &= cpu_to_be32(mask); } out: HFS_SB(sb)->free_ablocks += len; mutex_unlock(&HFS_SB(sb)->bitmap_lock); hfs_bitmap_dirty(sb); return 0; }
linux-master
fs/hfs/bitmap.c
/* * linux/fs/hfs/string.c * * Copyright (C) 1995-1997 Paul H. Hargrove * (C) 2003 Ardis Technologies <[email protected]> * This file may be distributed under the terms of the GNU General Public License. * * This file contains the string comparison function for the * Macintosh character set. * * The code in this file is derived from code which is copyright * 1986, 1989, 1990 by Abacus Research and Development, Inc. (ARDI) * It is used here by the permission of ARDI's president Cliff Matthews. */ #include "hfs_fs.h" #include <linux/dcache.h> /*================ File-local variables ================*/ /* * unsigned char caseorder[] * * Defines the lexical ordering of characters on the Macintosh * * Composition of the 'casefold' and 'order' tables from ARDI's code * with the entry for 0x20 changed to match that for 0xCA to remove * special case for those two characters. */ static unsigned char caseorder[256] = { 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,0x0E,0x0F, 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17,0x18,0x19,0x1A,0x1B,0x1C,0x1D,0x1E,0x1F, 0x20,0x22,0x23,0x28,0x29,0x2A,0x2B,0x2C,0x2F,0x30,0x31,0x32,0x33,0x34,0x35,0x36, 0x37,0x38,0x39,0x3A,0x3B,0x3C,0x3D,0x3E,0x3F,0x40,0x41,0x42,0x43,0x44,0x45,0x46, 0x47,0x48,0x57,0x59,0x5D,0x5F,0x66,0x68,0x6A,0x6C,0x72,0x74,0x76,0x78,0x7A,0x7E, 0x8C,0x8E,0x90,0x92,0x95,0x97,0x9E,0xA0,0xA2,0xA4,0xA7,0xA9,0xAA,0xAB,0xAC,0xAD, 0x4E,0x48,0x57,0x59,0x5D,0x5F,0x66,0x68,0x6A,0x6C,0x72,0x74,0x76,0x78,0x7A,0x7E, 0x8C,0x8E,0x90,0x92,0x95,0x97,0x9E,0xA0,0xA2,0xA4,0xA7,0xAF,0xB0,0xB1,0xB2,0xB3, 0x4A,0x4C,0x5A,0x60,0x7B,0x7F,0x98,0x4F,0x49,0x51,0x4A,0x4B,0x4C,0x5A,0x60,0x63, 0x64,0x65,0x6E,0x6F,0x70,0x71,0x7B,0x84,0x85,0x86,0x7F,0x80,0x9A,0x9B,0x9C,0x98, 0xB4,0xB5,0xB6,0xB7,0xB8,0xB9,0xBA,0x94,0xBB,0xBC,0xBD,0xBE,0xBF,0xC0,0x4D,0x81, 0xC1,0xC2,0xC3,0xC4,0xC5,0xC6,0xC7,0xC8,0xC9,0xCA,0xCB,0x55,0x8A,0xCC,0x4D,0x81, 0xCD,0xCE,0xCF,0xD0,0xD1,0xD2,0xD3,0x26,0x27,0xD4,0x20,0x49,0x4B,0x80,0x82,0x82, 0xD5,0xD6,0x24,0x25,0x2D,0x2E,0xD7,0xD8,0xA6,0xD9,0xDA,0xDB,0xDC,0xDD,0xDE,0xDF, 0xE0,0xE1,0xE2,0xE3,0xE4,0xE5,0xE6,0xE7,0xE8,0xE9,0xEA,0xEB,0xEC,0xED,0xEE,0xEF, 0xF0,0xF1,0xF2,0xF3,0xF4,0xF5,0xF6,0xF7,0xF8,0xF9,0xFA,0xFB,0xFC,0xFD,0xFE,0xFF }; /*================ Global functions ================*/ /* * Hash a string to an integer in a case-independent way */ int hfs_hash_dentry(const struct dentry *dentry, struct qstr *this) { const unsigned char *name = this->name; unsigned int hash, len = this->len; if (len > HFS_NAMELEN) len = HFS_NAMELEN; hash = init_name_hash(dentry); for (; len; len--) hash = partial_name_hash(caseorder[*name++], hash); this->hash = end_name_hash(hash); return 0; } /* * Compare two strings in the HFS filename character ordering * Returns positive, negative, or zero, not just 0 or (+/-)1 * * Equivalent to ARDI's call: * ROMlib_RelString(s1+1, s2+1, true, false, (s1[0]<<16) | s2[0]) */ int hfs_strcmp(const unsigned char *s1, unsigned int len1, const unsigned char *s2, unsigned int len2) { int len, tmp; len = (len1 > len2) ? len2 : len1; while (len--) { tmp = (int)caseorder[*(s1++)] - (int)caseorder[*(s2++)]; if (tmp) return tmp; } return len1 - len2; } /* * Test for equality of two strings in the HFS filename character ordering. * return 1 on failure and 0 on success */ int hfs_compare_dentry(const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { const unsigned char *n1, *n2; if (len >= HFS_NAMELEN) { if (name->len < HFS_NAMELEN) return 1; len = HFS_NAMELEN; } else if (len != name->len) return 1; n1 = str; n2 = name->name; while (len--) { if (caseorder[*n1++] != caseorder[*n2++]) return 1; } return 0; }
linux-master
fs/hfs/string.c
/* * linux/fs/hfs/dir.c * * Copyright (C) 1995-1997 Paul H. Hargrove * (C) 2003 Ardis Technologies <[email protected]> * This file may be distributed under the terms of the GNU General Public License. * * This file contains directory-related functions independent of which * scheme is being used to represent forks. * * Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds */ #include "hfs_fs.h" #include "btree.h" /* * hfs_lookup() */ static struct dentry *hfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { hfs_cat_rec rec; struct hfs_find_data fd; struct inode *inode = NULL; int res; res = hfs_find_init(HFS_SB(dir->i_sb)->cat_tree, &fd); if (res) return ERR_PTR(res); hfs_cat_build_key(dir->i_sb, fd.search_key, dir->i_ino, &dentry->d_name); res = hfs_brec_read(&fd, &rec, sizeof(rec)); if (res) { if (res != -ENOENT) inode = ERR_PTR(res); } else { inode = hfs_iget(dir->i_sb, &fd.search_key->cat, &rec); if (!inode) inode = ERR_PTR(-EACCES); } hfs_find_exit(&fd); return d_splice_alias(inode, dentry); } /* * hfs_readdir */ static int hfs_readdir(struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; int len, err; char strbuf[HFS_MAX_NAMELEN]; union hfs_cat_rec entry; struct hfs_find_data fd; struct hfs_readdir_data *rd; u16 type; if (ctx->pos >= inode->i_size) return 0; err = hfs_find_init(HFS_SB(sb)->cat_tree, &fd); if (err) return err; hfs_cat_build_key(sb, fd.search_key, inode->i_ino, NULL); err = hfs_brec_find(&fd); if (err) goto out; if (ctx->pos == 0) { /* This is completely artificial... */ if (!dir_emit_dot(file, ctx)) goto out; ctx->pos = 1; } if (ctx->pos == 1) { if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) { err = -EIO; goto out; } hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); if (entry.type != HFS_CDR_THD) { pr_err("bad catalog folder thread\n"); err = -EIO; goto out; } //if (fd.entrylength < HFS_MIN_THREAD_SZ) { // pr_err("truncated catalog thread\n"); // err = -EIO; // goto out; //} if (!dir_emit(ctx, "..", 2, be32_to_cpu(entry.thread.ParID), DT_DIR)) goto out; ctx->pos = 2; } if (ctx->pos >= inode->i_size) goto out; err = hfs_brec_goto(&fd, ctx->pos - 1); if (err) goto out; for (;;) { if (be32_to_cpu(fd.key->cat.ParID) != inode->i_ino) { pr_err("walked past end of dir\n"); err = -EIO; goto out; } if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) { err = -EIO; goto out; } hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); type = entry.type; len = hfs_mac2asc(sb, strbuf, &fd.key->cat.CName); if (type == HFS_CDR_DIR) { if (fd.entrylength < sizeof(struct hfs_cat_dir)) { pr_err("small dir entry\n"); err = -EIO; goto out; } if (!dir_emit(ctx, strbuf, len, be32_to_cpu(entry.dir.DirID), DT_DIR)) break; } else if (type == HFS_CDR_FIL) { if (fd.entrylength < sizeof(struct hfs_cat_file)) { pr_err("small file entry\n"); err = -EIO; goto out; } if (!dir_emit(ctx, strbuf, len, be32_to_cpu(entry.file.FlNum), DT_REG)) break; } else { pr_err("bad catalog entry type %d\n", type); err = -EIO; goto out; } ctx->pos++; if (ctx->pos >= inode->i_size) goto out; err = hfs_brec_goto(&fd, 1); if (err) goto out; } rd = file->private_data; if (!rd) { rd = kmalloc(sizeof(struct hfs_readdir_data), GFP_KERNEL); if (!rd) { err = -ENOMEM; goto out; } file->private_data = rd; rd->file = file; spin_lock(&HFS_I(inode)->open_dir_lock); list_add(&rd->list, &HFS_I(inode)->open_dir_list); spin_unlock(&HFS_I(inode)->open_dir_lock); } /* * Can be done after the list insertion; exclusion with * hfs_delete_cat() is provided by directory lock. */ memcpy(&rd->key, &fd.key->cat, sizeof(struct hfs_cat_key)); out: hfs_find_exit(&fd); return err; } static int hfs_dir_release(struct inode *inode, struct file *file) { struct hfs_readdir_data *rd = file->private_data; if (rd) { spin_lock(&HFS_I(inode)->open_dir_lock); list_del(&rd->list); spin_unlock(&HFS_I(inode)->open_dir_lock); kfree(rd); } return 0; } /* * hfs_create() * * This is the create() entry in the inode_operations structure for * regular HFS directories. The purpose is to create a new file in * a directory and return a corresponding inode, given the inode for * the directory and the name (and its length) of the new file. */ static int hfs_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { struct inode *inode; int res; inode = hfs_new_inode(dir, &dentry->d_name, mode); if (!inode) return -ENOMEM; res = hfs_cat_create(inode->i_ino, dir, &dentry->d_name, inode); if (res) { clear_nlink(inode); hfs_delete_inode(inode); iput(inode); return res; } d_instantiate(dentry, inode); mark_inode_dirty(inode); return 0; } /* * hfs_mkdir() * * This is the mkdir() entry in the inode_operations structure for * regular HFS directories. The purpose is to create a new directory * in a directory, given the inode for the parent directory and the * name (and its length) of the new directory. */ static int hfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) { struct inode *inode; int res; inode = hfs_new_inode(dir, &dentry->d_name, S_IFDIR | mode); if (!inode) return -ENOMEM; res = hfs_cat_create(inode->i_ino, dir, &dentry->d_name, inode); if (res) { clear_nlink(inode); hfs_delete_inode(inode); iput(inode); return res; } d_instantiate(dentry, inode); mark_inode_dirty(inode); return 0; } /* * hfs_remove() * * This serves as both unlink() and rmdir() in the inode_operations * structure for regular HFS directories. The purpose is to delete * an existing child, given the inode for the parent directory and * the name (and its length) of the existing directory. * * HFS does not have hardlinks, so both rmdir and unlink set the * link count to 0. The only difference is the emptiness check. */ static int hfs_remove(struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(dentry); int res; if (S_ISDIR(inode->i_mode) && inode->i_size != 2) return -ENOTEMPTY; res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name); if (res) return res; clear_nlink(inode); inode_set_ctime_current(inode); hfs_delete_inode(inode); mark_inode_dirty(inode); return 0; } /* * hfs_rename() * * This is the rename() entry in the inode_operations structure for * regular HFS directories. The purpose is to rename an existing * file or directory, given the inode for the current directory and * the name (and its length) of the existing file/directory and the * inode for the new directory and the name (and its length) of the * new file/directory. * XXX: how do you handle must_be dir? */ static int hfs_rename(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { int res; if (flags & ~RENAME_NOREPLACE) return -EINVAL; /* Unlink destination if it already exists */ if (d_really_is_positive(new_dentry)) { res = hfs_remove(new_dir, new_dentry); if (res) return res; } res = hfs_cat_move(d_inode(old_dentry)->i_ino, old_dir, &old_dentry->d_name, new_dir, &new_dentry->d_name); if (!res) hfs_cat_build_key(old_dir->i_sb, (btree_key *)&HFS_I(d_inode(old_dentry))->cat_key, new_dir->i_ino, &new_dentry->d_name); return res; } const struct file_operations hfs_dir_operations = { .read = generic_read_dir, .iterate_shared = hfs_readdir, .llseek = generic_file_llseek, .release = hfs_dir_release, }; const struct inode_operations hfs_dir_inode_operations = { .create = hfs_create, .lookup = hfs_lookup, .unlink = hfs_remove, .mkdir = hfs_mkdir, .rmdir = hfs_remove, .rename = hfs_rename, .setattr = hfs_inode_setattr, };
linux-master
fs/hfs/dir.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/hfs/attr.c * * (C) 2003 Ardis Technologies <[email protected]> * * Export hfs data via xattr */ #include <linux/fs.h> #include <linux/xattr.h> #include "hfs_fs.h" #include "btree.h" enum hfs_xattr_type { HFS_TYPE, HFS_CREATOR, }; static int __hfs_setxattr(struct inode *inode, enum hfs_xattr_type type, const void *value, size_t size, int flags) { struct hfs_find_data fd; hfs_cat_rec rec; struct hfs_cat_file *file; int res; if (!S_ISREG(inode->i_mode) || HFS_IS_RSRC(inode)) return -EOPNOTSUPP; res = hfs_find_init(HFS_SB(inode->i_sb)->cat_tree, &fd); if (res) return res; fd.search_key->cat = HFS_I(inode)->cat_key; res = hfs_brec_find(&fd); if (res) goto out; hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, sizeof(struct hfs_cat_file)); file = &rec.file; switch (type) { case HFS_TYPE: if (size == 4) memcpy(&file->UsrWds.fdType, value, 4); else res = -ERANGE; break; case HFS_CREATOR: if (size == 4) memcpy(&file->UsrWds.fdCreator, value, 4); else res = -ERANGE; break; } if (!res) hfs_bnode_write(fd.bnode, &rec, fd.entryoffset, sizeof(struct hfs_cat_file)); out: hfs_find_exit(&fd); return res; } static ssize_t __hfs_getxattr(struct inode *inode, enum hfs_xattr_type type, void *value, size_t size) { struct hfs_find_data fd; hfs_cat_rec rec; struct hfs_cat_file *file; ssize_t res = 0; if (!S_ISREG(inode->i_mode) || HFS_IS_RSRC(inode)) return -EOPNOTSUPP; if (size) { res = hfs_find_init(HFS_SB(inode->i_sb)->cat_tree, &fd); if (res) return res; fd.search_key->cat = HFS_I(inode)->cat_key; res = hfs_brec_find(&fd); if (res) goto out; hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, sizeof(struct hfs_cat_file)); } file = &rec.file; switch (type) { case HFS_TYPE: if (size >= 4) { memcpy(value, &file->UsrWds.fdType, 4); res = 4; } else res = size ? -ERANGE : 4; break; case HFS_CREATOR: if (size >= 4) { memcpy(value, &file->UsrWds.fdCreator, 4); res = 4; } else res = size ? -ERANGE : 4; break; } out: if (size) hfs_find_exit(&fd); return res; } static int hfs_xattr_get(const struct xattr_handler *handler, struct dentry *unused, struct inode *inode, const char *name, void *value, size_t size) { return __hfs_getxattr(inode, handler->flags, value, size); } static int hfs_xattr_set(const struct xattr_handler *handler, struct mnt_idmap *idmap, struct dentry *unused, struct inode *inode, const char *name, const void *value, size_t size, int flags) { if (!value) return -EOPNOTSUPP; return __hfs_setxattr(inode, handler->flags, value, size, flags); } static const struct xattr_handler hfs_creator_handler = { .name = "hfs.creator", .flags = HFS_CREATOR, .get = hfs_xattr_get, .set = hfs_xattr_set, }; static const struct xattr_handler hfs_type_handler = { .name = "hfs.type", .flags = HFS_TYPE, .get = hfs_xattr_get, .set = hfs_xattr_set, }; const struct xattr_handler *hfs_xattr_handlers[] = { &hfs_creator_handler, &hfs_type_handler, NULL };
linux-master
fs/hfs/attr.c
/* * linux/fs/hfs/sysdep.c * * Copyright (C) 1996 Paul H. Hargrove * (C) 2003 Ardis Technologies <[email protected]> * This file may be distributed under the terms of the GNU General Public License. * * This file contains the code to do various system dependent things. */ #include <linux/namei.h> #include "hfs_fs.h" /* dentry case-handling: just lowercase everything */ static int hfs_revalidate_dentry(struct dentry *dentry, unsigned int flags) { struct inode *inode; int diff; if (flags & LOOKUP_RCU) return -ECHILD; inode = d_inode(dentry); if(!inode) return 1; /* fix up inode on a timezone change */ diff = sys_tz.tz_minuteswest * 60 - HFS_I(inode)->tz_secondswest; if (diff) { struct timespec64 ctime = inode_get_ctime(inode); inode_set_ctime(inode, ctime.tv_sec + diff, ctime.tv_nsec); inode->i_atime.tv_sec += diff; inode->i_mtime.tv_sec += diff; HFS_I(inode)->tz_secondswest += diff; } return 1; } const struct dentry_operations hfs_dentry_operations = { .d_revalidate = hfs_revalidate_dentry, .d_hash = hfs_hash_dentry, .d_compare = hfs_compare_dentry, };
linux-master
fs/hfs/sysdep.c
/* * linux/fs/hfs/inode.c * * Copyright (C) 1995-1997 Paul H. Hargrove * (C) 2003 Ardis Technologies <[email protected]> * This file may be distributed under the terms of the GNU General Public License. * * This file contains inode-related functions which do not depend on * which scheme is being used to represent forks. * * Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds */ #include <linux/pagemap.h> #include <linux/mpage.h> #include <linux/sched.h> #include <linux/cred.h> #include <linux/uio.h> #include <linux/xattr.h> #include <linux/blkdev.h> #include "hfs_fs.h" #include "btree.h" static const struct file_operations hfs_file_operations; static const struct inode_operations hfs_file_inode_operations; /*================ Variable-like macros ================*/ #define HFS_VALID_MODE_BITS (S_IFREG | S_IFDIR | S_IRWXUGO) static int hfs_writepage(struct page *page, struct writeback_control *wbc) { return block_write_full_page(page, hfs_get_block, wbc); } static int hfs_read_folio(struct file *file, struct folio *folio) { return block_read_full_folio(folio, hfs_get_block); } static void hfs_write_failed(struct address_space *mapping, loff_t to) { struct inode *inode = mapping->host; if (to > inode->i_size) { truncate_pagecache(inode, inode->i_size); hfs_file_truncate(inode); } } int hfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int ret; *pagep = NULL; ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata, hfs_get_block, &HFS_I(mapping->host)->phys_size); if (unlikely(ret)) hfs_write_failed(mapping, pos + len); return ret; } static sector_t hfs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping, block, hfs_get_block); } static bool hfs_release_folio(struct folio *folio, gfp_t mask) { struct inode *inode = folio->mapping->host; struct super_block *sb = inode->i_sb; struct hfs_btree *tree; struct hfs_bnode *node; u32 nidx; int i; bool res = true; switch (inode->i_ino) { case HFS_EXT_CNID: tree = HFS_SB(sb)->ext_tree; break; case HFS_CAT_CNID: tree = HFS_SB(sb)->cat_tree; break; default: BUG(); return false; } if (!tree) return false; if (tree->node_size >= PAGE_SIZE) { nidx = folio->index >> (tree->node_size_shift - PAGE_SHIFT); spin_lock(&tree->hash_lock); node = hfs_bnode_findhash(tree, nidx); if (!node) ; else if (atomic_read(&node->refcnt)) res = false; if (res && node) { hfs_bnode_unhash(node); hfs_bnode_free(node); } spin_unlock(&tree->hash_lock); } else { nidx = folio->index << (PAGE_SHIFT - tree->node_size_shift); i = 1 << (PAGE_SHIFT - tree->node_size_shift); spin_lock(&tree->hash_lock); do { node = hfs_bnode_findhash(tree, nidx++); if (!node) continue; if (atomic_read(&node->refcnt)) { res = false; break; } hfs_bnode_unhash(node); hfs_bnode_free(node); } while (--i && nidx < tree->node_count); spin_unlock(&tree->hash_lock); } return res ? try_to_free_buffers(folio) : false; } static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; size_t count = iov_iter_count(iter); ssize_t ret; ret = blockdev_direct_IO(iocb, inode, iter, hfs_get_block); /* * In case of error extending write may have instantiated a few * blocks outside i_size. Trim these off again. */ if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { loff_t isize = i_size_read(inode); loff_t end = iocb->ki_pos + count; if (end > isize) hfs_write_failed(mapping, end); } return ret; } static int hfs_writepages(struct address_space *mapping, struct writeback_control *wbc) { return mpage_writepages(mapping, wbc, hfs_get_block); } const struct address_space_operations hfs_btree_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .read_folio = hfs_read_folio, .writepage = hfs_writepage, .write_begin = hfs_write_begin, .write_end = generic_write_end, .bmap = hfs_bmap, .release_folio = hfs_release_folio, }; const struct address_space_operations hfs_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .read_folio = hfs_read_folio, .write_begin = hfs_write_begin, .write_end = generic_write_end, .bmap = hfs_bmap, .direct_IO = hfs_direct_IO, .writepages = hfs_writepages, .migrate_folio = buffer_migrate_folio, }; /* * hfs_new_inode */ struct inode *hfs_new_inode(struct inode *dir, const struct qstr *name, umode_t mode) { struct super_block *sb = dir->i_sb; struct inode *inode = new_inode(sb); if (!inode) return NULL; mutex_init(&HFS_I(inode)->extents_lock); INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list); spin_lock_init(&HFS_I(inode)->open_dir_lock); hfs_cat_build_key(sb, (btree_key *)&HFS_I(inode)->cat_key, dir->i_ino, name); inode->i_ino = HFS_SB(sb)->next_id++; inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); set_nlink(inode, 1); inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode); HFS_I(inode)->flags = 0; HFS_I(inode)->rsrc_inode = NULL; HFS_I(inode)->fs_blocks = 0; if (S_ISDIR(mode)) { inode->i_size = 2; HFS_SB(sb)->folder_count++; if (dir->i_ino == HFS_ROOT_CNID) HFS_SB(sb)->root_dirs++; inode->i_op = &hfs_dir_inode_operations; inode->i_fop = &hfs_dir_operations; inode->i_mode |= S_IRWXUGO; inode->i_mode &= ~HFS_SB(inode->i_sb)->s_dir_umask; } else if (S_ISREG(mode)) { HFS_I(inode)->clump_blocks = HFS_SB(sb)->clumpablks; HFS_SB(sb)->file_count++; if (dir->i_ino == HFS_ROOT_CNID) HFS_SB(sb)->root_files++; inode->i_op = &hfs_file_inode_operations; inode->i_fop = &hfs_file_operations; inode->i_mapping->a_ops = &hfs_aops; inode->i_mode |= S_IRUGO|S_IXUGO; if (mode & S_IWUSR) inode->i_mode |= S_IWUGO; inode->i_mode &= ~HFS_SB(inode->i_sb)->s_file_umask; HFS_I(inode)->phys_size = 0; HFS_I(inode)->alloc_blocks = 0; HFS_I(inode)->first_blocks = 0; HFS_I(inode)->cached_start = 0; HFS_I(inode)->cached_blocks = 0; memset(HFS_I(inode)->first_extents, 0, sizeof(hfs_extent_rec)); memset(HFS_I(inode)->cached_extents, 0, sizeof(hfs_extent_rec)); } insert_inode_hash(inode); mark_inode_dirty(inode); set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags); hfs_mark_mdb_dirty(sb); return inode; } void hfs_delete_inode(struct inode *inode) { struct super_block *sb = inode->i_sb; hfs_dbg(INODE, "delete_inode: %lu\n", inode->i_ino); if (S_ISDIR(inode->i_mode)) { HFS_SB(sb)->folder_count--; if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID)) HFS_SB(sb)->root_dirs--; set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags); hfs_mark_mdb_dirty(sb); return; } HFS_SB(sb)->file_count--; if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID)) HFS_SB(sb)->root_files--; if (S_ISREG(inode->i_mode)) { if (!inode->i_nlink) { inode->i_size = 0; hfs_file_truncate(inode); } } set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags); hfs_mark_mdb_dirty(sb); } void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext, __be32 __log_size, __be32 phys_size, u32 clump_size) { struct super_block *sb = inode->i_sb; u32 log_size = be32_to_cpu(__log_size); u16 count; int i; memcpy(HFS_I(inode)->first_extents, ext, sizeof(hfs_extent_rec)); for (count = 0, i = 0; i < 3; i++) count += be16_to_cpu(ext[i].count); HFS_I(inode)->first_blocks = count; inode->i_size = HFS_I(inode)->phys_size = log_size; HFS_I(inode)->fs_blocks = (log_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; inode_set_bytes(inode, HFS_I(inode)->fs_blocks << sb->s_blocksize_bits); HFS_I(inode)->alloc_blocks = be32_to_cpu(phys_size) / HFS_SB(sb)->alloc_blksz; HFS_I(inode)->clump_blocks = clump_size / HFS_SB(sb)->alloc_blksz; if (!HFS_I(inode)->clump_blocks) HFS_I(inode)->clump_blocks = HFS_SB(sb)->clumpablks; } struct hfs_iget_data { struct hfs_cat_key *key; hfs_cat_rec *rec; }; static int hfs_test_inode(struct inode *inode, void *data) { struct hfs_iget_data *idata = data; hfs_cat_rec *rec; rec = idata->rec; switch (rec->type) { case HFS_CDR_DIR: return inode->i_ino == be32_to_cpu(rec->dir.DirID); case HFS_CDR_FIL: return inode->i_ino == be32_to_cpu(rec->file.FlNum); default: BUG(); return 1; } } /* * hfs_read_inode */ static int hfs_read_inode(struct inode *inode, void *data) { struct hfs_iget_data *idata = data; struct hfs_sb_info *hsb = HFS_SB(inode->i_sb); hfs_cat_rec *rec; HFS_I(inode)->flags = 0; HFS_I(inode)->rsrc_inode = NULL; mutex_init(&HFS_I(inode)->extents_lock); INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list); spin_lock_init(&HFS_I(inode)->open_dir_lock); /* Initialize the inode */ inode->i_uid = hsb->s_uid; inode->i_gid = hsb->s_gid; set_nlink(inode, 1); if (idata->key) HFS_I(inode)->cat_key = *idata->key; else HFS_I(inode)->flags |= HFS_FLG_RSRC; HFS_I(inode)->tz_secondswest = sys_tz.tz_minuteswest * 60; rec = idata->rec; switch (rec->type) { case HFS_CDR_FIL: if (!HFS_IS_RSRC(inode)) { hfs_inode_read_fork(inode, rec->file.ExtRec, rec->file.LgLen, rec->file.PyLen, be16_to_cpu(rec->file.ClpSize)); } else { hfs_inode_read_fork(inode, rec->file.RExtRec, rec->file.RLgLen, rec->file.RPyLen, be16_to_cpu(rec->file.ClpSize)); } inode->i_ino = be32_to_cpu(rec->file.FlNum); inode->i_mode = S_IRUGO | S_IXUGO; if (!(rec->file.Flags & HFS_FIL_LOCK)) inode->i_mode |= S_IWUGO; inode->i_mode &= ~hsb->s_file_umask; inode->i_mode |= S_IFREG; inode->i_atime = inode->i_mtime = inode_set_ctime_to_ts(inode, hfs_m_to_utime(rec->file.MdDat)); inode->i_op = &hfs_file_inode_operations; inode->i_fop = &hfs_file_operations; inode->i_mapping->a_ops = &hfs_aops; break; case HFS_CDR_DIR: inode->i_ino = be32_to_cpu(rec->dir.DirID); inode->i_size = be16_to_cpu(rec->dir.Val) + 2; HFS_I(inode)->fs_blocks = 0; inode->i_mode = S_IFDIR | (S_IRWXUGO & ~hsb->s_dir_umask); inode->i_atime = inode->i_mtime = inode_set_ctime_to_ts(inode, hfs_m_to_utime(rec->dir.MdDat)); inode->i_op = &hfs_dir_inode_operations; inode->i_fop = &hfs_dir_operations; break; default: make_bad_inode(inode); } return 0; } /* * __hfs_iget() * * Given the MDB for a HFS filesystem, a 'key' and an 'entry' in * the catalog B-tree and the 'type' of the desired file return the * inode for that file/directory or NULL. Note that 'type' indicates * whether we want the actual file or directory, or the corresponding * metadata (AppleDouble header file or CAP metadata file). */ struct inode *hfs_iget(struct super_block *sb, struct hfs_cat_key *key, hfs_cat_rec *rec) { struct hfs_iget_data data = { key, rec }; struct inode *inode; u32 cnid; switch (rec->type) { case HFS_CDR_DIR: cnid = be32_to_cpu(rec->dir.DirID); break; case HFS_CDR_FIL: cnid = be32_to_cpu(rec->file.FlNum); break; default: return NULL; } inode = iget5_locked(sb, cnid, hfs_test_inode, hfs_read_inode, &data); if (inode && (inode->i_state & I_NEW)) unlock_new_inode(inode); return inode; } void hfs_inode_write_fork(struct inode *inode, struct hfs_extent *ext, __be32 *log_size, __be32 *phys_size) { memcpy(ext, HFS_I(inode)->first_extents, sizeof(hfs_extent_rec)); if (log_size) *log_size = cpu_to_be32(inode->i_size); if (phys_size) *phys_size = cpu_to_be32(HFS_I(inode)->alloc_blocks * HFS_SB(inode->i_sb)->alloc_blksz); } int hfs_write_inode(struct inode *inode, struct writeback_control *wbc) { struct inode *main_inode = inode; struct hfs_find_data fd; hfs_cat_rec rec; int res; hfs_dbg(INODE, "hfs_write_inode: %lu\n", inode->i_ino); res = hfs_ext_write_extent(inode); if (res) return res; if (inode->i_ino < HFS_FIRSTUSER_CNID) { switch (inode->i_ino) { case HFS_ROOT_CNID: break; case HFS_EXT_CNID: hfs_btree_write(HFS_SB(inode->i_sb)->ext_tree); return 0; case HFS_CAT_CNID: hfs_btree_write(HFS_SB(inode->i_sb)->cat_tree); return 0; default: BUG(); return -EIO; } } if (HFS_IS_RSRC(inode)) main_inode = HFS_I(inode)->rsrc_inode; if (!main_inode->i_nlink) return 0; if (hfs_find_init(HFS_SB(main_inode->i_sb)->cat_tree, &fd)) /* panic? */ return -EIO; res = -EIO; if (HFS_I(main_inode)->cat_key.CName.len > HFS_NAMELEN) goto out; fd.search_key->cat = HFS_I(main_inode)->cat_key; if (hfs_brec_find(&fd)) goto out; if (S_ISDIR(main_inode->i_mode)) { if (fd.entrylength < sizeof(struct hfs_cat_dir)) goto out; hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, sizeof(struct hfs_cat_dir)); if (rec.type != HFS_CDR_DIR || be32_to_cpu(rec.dir.DirID) != inode->i_ino) { } rec.dir.MdDat = hfs_u_to_mtime(inode->i_mtime); rec.dir.Val = cpu_to_be16(inode->i_size - 2); hfs_bnode_write(fd.bnode, &rec, fd.entryoffset, sizeof(struct hfs_cat_dir)); } else if (HFS_IS_RSRC(inode)) { if (fd.entrylength < sizeof(struct hfs_cat_file)) goto out; hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, sizeof(struct hfs_cat_file)); hfs_inode_write_fork(inode, rec.file.RExtRec, &rec.file.RLgLen, &rec.file.RPyLen); hfs_bnode_write(fd.bnode, &rec, fd.entryoffset, sizeof(struct hfs_cat_file)); } else { if (fd.entrylength < sizeof(struct hfs_cat_file)) goto out; hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, sizeof(struct hfs_cat_file)); if (rec.type != HFS_CDR_FIL || be32_to_cpu(rec.file.FlNum) != inode->i_ino) { } if (inode->i_mode & S_IWUSR) rec.file.Flags &= ~HFS_FIL_LOCK; else rec.file.Flags |= HFS_FIL_LOCK; hfs_inode_write_fork(inode, rec.file.ExtRec, &rec.file.LgLen, &rec.file.PyLen); rec.file.MdDat = hfs_u_to_mtime(inode->i_mtime); hfs_bnode_write(fd.bnode, &rec, fd.entryoffset, sizeof(struct hfs_cat_file)); } res = 0; out: hfs_find_exit(&fd); return res; } static struct dentry *hfs_file_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct inode *inode = NULL; hfs_cat_rec rec; struct hfs_find_data fd; int res; if (HFS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc")) goto out; inode = HFS_I(dir)->rsrc_inode; if (inode) goto out; inode = new_inode(dir->i_sb); if (!inode) return ERR_PTR(-ENOMEM); res = hfs_find_init(HFS_SB(dir->i_sb)->cat_tree, &fd); if (res) { iput(inode); return ERR_PTR(res); } fd.search_key->cat = HFS_I(dir)->cat_key; res = hfs_brec_read(&fd, &rec, sizeof(rec)); if (!res) { struct hfs_iget_data idata = { NULL, &rec }; hfs_read_inode(inode, &idata); } hfs_find_exit(&fd); if (res) { iput(inode); return ERR_PTR(res); } HFS_I(inode)->rsrc_inode = dir; HFS_I(dir)->rsrc_inode = inode; igrab(dir); inode_fake_hash(inode); mark_inode_dirty(inode); dont_mount(dentry); out: return d_splice_alias(inode, dentry); } void hfs_evict_inode(struct inode *inode) { truncate_inode_pages_final(&inode->i_data); clear_inode(inode); if (HFS_IS_RSRC(inode) && HFS_I(inode)->rsrc_inode) { HFS_I(HFS_I(inode)->rsrc_inode)->rsrc_inode = NULL; iput(HFS_I(inode)->rsrc_inode); } } static int hfs_file_open(struct inode *inode, struct file *file) { if (HFS_IS_RSRC(inode)) inode = HFS_I(inode)->rsrc_inode; atomic_inc(&HFS_I(inode)->opencnt); return 0; } static int hfs_file_release(struct inode *inode, struct file *file) { //struct super_block *sb = inode->i_sb; if (HFS_IS_RSRC(inode)) inode = HFS_I(inode)->rsrc_inode; if (atomic_dec_and_test(&HFS_I(inode)->opencnt)) { inode_lock(inode); hfs_file_truncate(inode); //if (inode->i_flags & S_DEAD) { // hfs_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL); // hfs_delete_inode(inode); //} inode_unlock(inode); } return 0; } /* * hfs_notify_change() * * Based very closely on fs/msdos/inode.c by Werner Almesberger * * This is the notify_change() field in the super_operations structure * for HFS file systems. The purpose is to take that changes made to * an inode and apply then in a filesystem-dependent manner. In this * case the process has a few of tasks to do: * 1) prevent changes to the i_uid and i_gid fields. * 2) map file permissions to the closest allowable permissions * 3) Since multiple Linux files can share the same on-disk inode under * HFS (for instance the data and resource forks of a file) a change * to permissions must be applied to all other in-core inodes which * correspond to the same HFS file. */ int hfs_inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); struct hfs_sb_info *hsb = HFS_SB(inode->i_sb); int error; error = setattr_prepare(&nop_mnt_idmap, dentry, attr); /* basic permission checks */ if (error) return error; /* no uig/gid changes and limit which mode bits can be set */ if (((attr->ia_valid & ATTR_UID) && (!uid_eq(attr->ia_uid, hsb->s_uid))) || ((attr->ia_valid & ATTR_GID) && (!gid_eq(attr->ia_gid, hsb->s_gid))) || ((attr->ia_valid & ATTR_MODE) && ((S_ISDIR(inode->i_mode) && (attr->ia_mode != inode->i_mode)) || (attr->ia_mode & ~HFS_VALID_MODE_BITS)))) { return hsb->s_quiet ? 0 : error; } if (attr->ia_valid & ATTR_MODE) { /* Only the 'w' bits can ever change and only all together. */ if (attr->ia_mode & S_IWUSR) attr->ia_mode = inode->i_mode | S_IWUGO; else attr->ia_mode = inode->i_mode & ~S_IWUGO; attr->ia_mode &= S_ISDIR(inode->i_mode) ? ~hsb->s_dir_umask: ~hsb->s_file_umask; } if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size != i_size_read(inode)) { inode_dio_wait(inode); error = inode_newsize_ok(inode, attr->ia_size); if (error) return error; truncate_setsize(inode, attr->ia_size); hfs_file_truncate(inode); inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode); } setattr_copy(&nop_mnt_idmap, inode, attr); mark_inode_dirty(inode); return 0; } static int hfs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) { struct inode *inode = filp->f_mapping->host; struct super_block * sb; int ret, err; ret = file_write_and_wait_range(filp, start, end); if (ret) return ret; inode_lock(inode); /* sync the inode to buffers */ ret = write_inode_now(inode, 0); /* sync the superblock to buffers */ sb = inode->i_sb; flush_delayed_work(&HFS_SB(sb)->mdb_work); /* .. finally sync the buffers to disk */ err = sync_blockdev(sb->s_bdev); if (!ret) ret = err; inode_unlock(inode); return ret; } static const struct file_operations hfs_file_operations = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .mmap = generic_file_mmap, .splice_read = filemap_splice_read, .fsync = hfs_file_fsync, .open = hfs_file_open, .release = hfs_file_release, }; static const struct inode_operations hfs_file_inode_operations = { .lookup = hfs_file_lookup, .setattr = hfs_inode_setattr, .listxattr = generic_listxattr, };
linux-master
fs/hfs/inode.c
/* * linux/fs/hfs/trans.c * * Copyright (C) 1995-1997 Paul H. Hargrove * This file may be distributed under the terms of the GNU General Public License. * * This file contains routines for converting between the Macintosh * character set and various other encodings. This includes dealing * with ':' vs. '/' as the path-element separator. */ #include <linux/types.h> #include <linux/nls.h> #include "hfs_fs.h" /*================ Global functions ================*/ /* * hfs_mac2asc() * * Given a 'Pascal String' (a string preceded by a length byte) in * the Macintosh character set produce the corresponding filename using * the 'trivial' name-mangling scheme, returning the length of the * mangled filename. Note that the output string is not NULL * terminated. * * The name-mangling works as follows: * The character '/', which is illegal in Linux filenames is replaced * by ':' which never appears in HFS filenames. All other characters * are passed unchanged from input to output. */ int hfs_mac2asc(struct super_block *sb, char *out, const struct hfs_name *in) { struct nls_table *nls_disk = HFS_SB(sb)->nls_disk; struct nls_table *nls_io = HFS_SB(sb)->nls_io; const char *src; char *dst; int srclen, dstlen, size; src = in->name; srclen = in->len; if (srclen > HFS_NAMELEN) srclen = HFS_NAMELEN; dst = out; dstlen = HFS_MAX_NAMELEN; if (nls_io) { wchar_t ch; while (srclen > 0) { if (nls_disk) { size = nls_disk->char2uni(src, srclen, &ch); if (size <= 0) { ch = '?'; size = 1; } src += size; srclen -= size; } else { ch = *src++; srclen--; } if (ch == '/') ch = ':'; size = nls_io->uni2char(ch, dst, dstlen); if (size < 0) { if (size == -ENAMETOOLONG) goto out; *dst = '?'; size = 1; } dst += size; dstlen -= size; } } else { char ch; while (--srclen >= 0) *dst++ = (ch = *src++) == '/' ? ':' : ch; } out: return dst - out; } /* * hfs_asc2mac() * * Given an ASCII string (not null-terminated) and its length, * generate the corresponding filename in the Macintosh character set * using the 'trivial' name-mangling scheme, returning the length of * the mangled filename. Note that the output string is not NULL * terminated. * * This routine is a inverse to hfs_mac2triv(). * A ':' is replaced by a '/'. */ void hfs_asc2mac(struct super_block *sb, struct hfs_name *out, const struct qstr *in) { struct nls_table *nls_disk = HFS_SB(sb)->nls_disk; struct nls_table *nls_io = HFS_SB(sb)->nls_io; const char *src; char *dst; int srclen, dstlen, size; src = in->name; srclen = in->len; dst = out->name; dstlen = HFS_NAMELEN; if (nls_io) { wchar_t ch; while (srclen > 0 && dstlen > 0) { size = nls_io->char2uni(src, srclen, &ch); if (size < 0) { ch = '?'; size = 1; } src += size; srclen -= size; if (ch == ':') ch = '/'; if (nls_disk) { size = nls_disk->uni2char(ch, dst, dstlen); if (size < 0) { if (size == -ENAMETOOLONG) goto out; *dst = '?'; size = 1; } dst += size; dstlen -= size; } else { *dst++ = ch > 0xff ? '?' : ch; dstlen--; } } } else { char ch; if (dstlen > srclen) dstlen = srclen; while (--dstlen >= 0) *dst++ = (ch = *src++) == ':' ? '/' : ch; } out: out->len = dst - (char *)out->name; dstlen = HFS_NAMELEN - out->len; while (--dstlen >= 0) *dst++ = 0; }
linux-master
fs/hfs/trans.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/hfs/brec.c * * Copyright (C) 2001 * Brad Boyer ([email protected]) * (C) 2003 Ardis Technologies <[email protected]> * * Handle individual btree records */ #include "btree.h" static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd); static int hfs_brec_update_parent(struct hfs_find_data *fd); static int hfs_btree_inc_height(struct hfs_btree *tree); /* Get the length and offset of the given record in the given node */ u16 hfs_brec_lenoff(struct hfs_bnode *node, u16 rec, u16 *off) { __be16 retval[2]; u16 dataoff; dataoff = node->tree->node_size - (rec + 2) * 2; hfs_bnode_read(node, retval, dataoff, 4); *off = be16_to_cpu(retval[1]); return be16_to_cpu(retval[0]) - *off; } /* Get the length of the key from a keyed record */ u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec) { u16 retval, recoff; if (node->type != HFS_NODE_INDEX && node->type != HFS_NODE_LEAF) return 0; if ((node->type == HFS_NODE_INDEX) && !(node->tree->attributes & HFS_TREE_VARIDXKEYS)) { if (node->tree->attributes & HFS_TREE_BIGKEYS) retval = node->tree->max_key_len + 2; else retval = node->tree->max_key_len + 1; } else { recoff = hfs_bnode_read_u16(node, node->tree->node_size - (rec + 1) * 2); if (!recoff) return 0; if (node->tree->attributes & HFS_TREE_BIGKEYS) { retval = hfs_bnode_read_u16(node, recoff) + 2; if (retval > node->tree->max_key_len + 2) { pr_err("keylen %d too large\n", retval); retval = 0; } } else { retval = (hfs_bnode_read_u8(node, recoff) | 1) + 1; if (retval > node->tree->max_key_len + 1) { pr_err("keylen %d too large\n", retval); retval = 0; } } } return retval; } int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len) { struct hfs_btree *tree; struct hfs_bnode *node, *new_node; int size, key_len, rec; int data_off, end_off; int idx_rec_off, data_rec_off, end_rec_off; __be32 cnid; tree = fd->tree; if (!fd->bnode) { if (!tree->root) hfs_btree_inc_height(tree); node = hfs_bnode_find(tree, tree->leaf_head); if (IS_ERR(node)) return PTR_ERR(node); fd->bnode = node; fd->record = -1; } new_node = NULL; key_len = (fd->search_key->key_len | 1) + 1; again: /* new record idx and complete record size */ rec = fd->record + 1; size = key_len + entry_len; node = fd->bnode; hfs_bnode_dump(node); /* get last offset */ end_rec_off = tree->node_size - (node->num_recs + 1) * 2; end_off = hfs_bnode_read_u16(node, end_rec_off); end_rec_off -= 2; hfs_dbg(BNODE_MOD, "insert_rec: %d, %d, %d, %d\n", rec, size, end_off, end_rec_off); if (size > end_rec_off - end_off) { if (new_node) panic("not enough room!\n"); new_node = hfs_bnode_split(fd); if (IS_ERR(new_node)) return PTR_ERR(new_node); goto again; } if (node->type == HFS_NODE_LEAF) { tree->leaf_count++; mark_inode_dirty(tree->inode); } node->num_recs++; /* write new last offset */ hfs_bnode_write_u16(node, offsetof(struct hfs_bnode_desc, num_recs), node->num_recs); hfs_bnode_write_u16(node, end_rec_off, end_off + size); data_off = end_off; data_rec_off = end_rec_off + 2; idx_rec_off = tree->node_size - (rec + 1) * 2; if (idx_rec_off == data_rec_off) goto skip; /* move all following entries */ do { data_off = hfs_bnode_read_u16(node, data_rec_off + 2); hfs_bnode_write_u16(node, data_rec_off, data_off + size); data_rec_off += 2; } while (data_rec_off < idx_rec_off); /* move data away */ hfs_bnode_move(node, data_off + size, data_off, end_off - data_off); skip: hfs_bnode_write(node, fd->search_key, data_off, key_len); hfs_bnode_write(node, entry, data_off + key_len, entry_len); hfs_bnode_dump(node); /* * update parent key if we inserted a key * at the start of the node and it is not the new node */ if (!rec && new_node != node) { hfs_bnode_read_key(node, fd->search_key, data_off + size); hfs_brec_update_parent(fd); } if (new_node) { hfs_bnode_put(fd->bnode); if (!new_node->parent) { hfs_btree_inc_height(tree); new_node->parent = tree->root; } fd->bnode = hfs_bnode_find(tree, new_node->parent); /* create index data entry */ cnid = cpu_to_be32(new_node->this); entry = &cnid; entry_len = sizeof(cnid); /* get index key */ hfs_bnode_read_key(new_node, fd->search_key, 14); __hfs_brec_find(fd->bnode, fd); hfs_bnode_put(new_node); new_node = NULL; if (tree->attributes & HFS_TREE_VARIDXKEYS) key_len = fd->search_key->key_len + 1; else { fd->search_key->key_len = tree->max_key_len; key_len = tree->max_key_len + 1; } goto again; } return 0; } int hfs_brec_remove(struct hfs_find_data *fd) { struct hfs_btree *tree; struct hfs_bnode *node, *parent; int end_off, rec_off, data_off, size; tree = fd->tree; node = fd->bnode; again: rec_off = tree->node_size - (fd->record + 2) * 2; end_off = tree->node_size - (node->num_recs + 1) * 2; if (node->type == HFS_NODE_LEAF) { tree->leaf_count--; mark_inode_dirty(tree->inode); } hfs_bnode_dump(node); hfs_dbg(BNODE_MOD, "remove_rec: %d, %d\n", fd->record, fd->keylength + fd->entrylength); if (!--node->num_recs) { hfs_bnode_unlink(node); if (!node->parent) return 0; parent = hfs_bnode_find(tree, node->parent); if (IS_ERR(parent)) return PTR_ERR(parent); hfs_bnode_put(node); node = fd->bnode = parent; __hfs_brec_find(node, fd); goto again; } hfs_bnode_write_u16(node, offsetof(struct hfs_bnode_desc, num_recs), node->num_recs); if (rec_off == end_off) goto skip; size = fd->keylength + fd->entrylength; do { data_off = hfs_bnode_read_u16(node, rec_off); hfs_bnode_write_u16(node, rec_off + 2, data_off - size); rec_off -= 2; } while (rec_off >= end_off); /* fill hole */ hfs_bnode_move(node, fd->keyoffset, fd->keyoffset + size, data_off - fd->keyoffset - size); skip: hfs_bnode_dump(node); if (!fd->record) hfs_brec_update_parent(fd); return 0; } static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd) { struct hfs_btree *tree; struct hfs_bnode *node, *new_node, *next_node; struct hfs_bnode_desc node_desc; int num_recs, new_rec_off, new_off, old_rec_off; int data_start, data_end, size; tree = fd->tree; node = fd->bnode; new_node = hfs_bmap_alloc(tree); if (IS_ERR(new_node)) return new_node; hfs_bnode_get(node); hfs_dbg(BNODE_MOD, "split_nodes: %d - %d - %d\n", node->this, new_node->this, node->next); new_node->next = node->next; new_node->prev = node->this; new_node->parent = node->parent; new_node->type = node->type; new_node->height = node->height; if (node->next) next_node = hfs_bnode_find(tree, node->next); else next_node = NULL; if (IS_ERR(next_node)) { hfs_bnode_put(node); hfs_bnode_put(new_node); return next_node; } size = tree->node_size / 2 - node->num_recs * 2 - 14; old_rec_off = tree->node_size - 4; num_recs = 1; for (;;) { data_start = hfs_bnode_read_u16(node, old_rec_off); if (data_start > size) break; old_rec_off -= 2; if (++num_recs < node->num_recs) continue; /* panic? */ hfs_bnode_put(node); hfs_bnode_put(new_node); if (next_node) hfs_bnode_put(next_node); return ERR_PTR(-ENOSPC); } if (fd->record + 1 < num_recs) { /* new record is in the lower half, * so leave some more space there */ old_rec_off += 2; num_recs--; data_start = hfs_bnode_read_u16(node, old_rec_off); } else { hfs_bnode_put(node); hfs_bnode_get(new_node); fd->bnode = new_node; fd->record -= num_recs; fd->keyoffset -= data_start - 14; fd->entryoffset -= data_start - 14; } new_node->num_recs = node->num_recs - num_recs; node->num_recs = num_recs; new_rec_off = tree->node_size - 2; new_off = 14; size = data_start - new_off; num_recs = new_node->num_recs; data_end = data_start; while (num_recs) { hfs_bnode_write_u16(new_node, new_rec_off, new_off); old_rec_off -= 2; new_rec_off -= 2; data_end = hfs_bnode_read_u16(node, old_rec_off); new_off = data_end - size; num_recs--; } hfs_bnode_write_u16(new_node, new_rec_off, new_off); hfs_bnode_copy(new_node, 14, node, data_start, data_end - data_start); /* update new bnode header */ node_desc.next = cpu_to_be32(new_node->next); node_desc.prev = cpu_to_be32(new_node->prev); node_desc.type = new_node->type; node_desc.height = new_node->height; node_desc.num_recs = cpu_to_be16(new_node->num_recs); node_desc.reserved = 0; hfs_bnode_write(new_node, &node_desc, 0, sizeof(node_desc)); /* update previous bnode header */ node->next = new_node->this; hfs_bnode_read(node, &node_desc, 0, sizeof(node_desc)); node_desc.next = cpu_to_be32(node->next); node_desc.num_recs = cpu_to_be16(node->num_recs); hfs_bnode_write(node, &node_desc, 0, sizeof(node_desc)); /* update next bnode header */ if (next_node) { next_node->prev = new_node->this; hfs_bnode_read(next_node, &node_desc, 0, sizeof(node_desc)); node_desc.prev = cpu_to_be32(next_node->prev); hfs_bnode_write(next_node, &node_desc, 0, sizeof(node_desc)); hfs_bnode_put(next_node); } else if (node->this == tree->leaf_tail) { /* if there is no next node, this might be the new tail */ tree->leaf_tail = new_node->this; mark_inode_dirty(tree->inode); } hfs_bnode_dump(node); hfs_bnode_dump(new_node); hfs_bnode_put(node); return new_node; } static int hfs_brec_update_parent(struct hfs_find_data *fd) { struct hfs_btree *tree; struct hfs_bnode *node, *new_node, *parent; int newkeylen, diff; int rec, rec_off, end_rec_off; int start_off, end_off; tree = fd->tree; node = fd->bnode; new_node = NULL; if (!node->parent) return 0; again: parent = hfs_bnode_find(tree, node->parent); if (IS_ERR(parent)) return PTR_ERR(parent); __hfs_brec_find(parent, fd); if (fd->record < 0) return -ENOENT; hfs_bnode_dump(parent); rec = fd->record; /* size difference between old and new key */ if (tree->attributes & HFS_TREE_VARIDXKEYS) newkeylen = (hfs_bnode_read_u8(node, 14) | 1) + 1; else fd->keylength = newkeylen = tree->max_key_len + 1; hfs_dbg(BNODE_MOD, "update_rec: %d, %d, %d\n", rec, fd->keylength, newkeylen); rec_off = tree->node_size - (rec + 2) * 2; end_rec_off = tree->node_size - (parent->num_recs + 1) * 2; diff = newkeylen - fd->keylength; if (!diff) goto skip; if (diff > 0) { end_off = hfs_bnode_read_u16(parent, end_rec_off); if (end_rec_off - end_off < diff) { printk(KERN_DEBUG "splitting index node...\n"); fd->bnode = parent; new_node = hfs_bnode_split(fd); if (IS_ERR(new_node)) return PTR_ERR(new_node); parent = fd->bnode; rec = fd->record; rec_off = tree->node_size - (rec + 2) * 2; end_rec_off = tree->node_size - (parent->num_recs + 1) * 2; } } end_off = start_off = hfs_bnode_read_u16(parent, rec_off); hfs_bnode_write_u16(parent, rec_off, start_off + diff); start_off -= 4; /* move previous cnid too */ while (rec_off > end_rec_off) { rec_off -= 2; end_off = hfs_bnode_read_u16(parent, rec_off); hfs_bnode_write_u16(parent, rec_off, end_off + diff); } hfs_bnode_move(parent, start_off + diff, start_off, end_off - start_off); skip: hfs_bnode_copy(parent, fd->keyoffset, node, 14, newkeylen); if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) hfs_bnode_write_u8(parent, fd->keyoffset, newkeylen - 1); hfs_bnode_dump(parent); hfs_bnode_put(node); node = parent; if (new_node) { __be32 cnid; if (!new_node->parent) { hfs_btree_inc_height(tree); new_node->parent = tree->root; } fd->bnode = hfs_bnode_find(tree, new_node->parent); /* create index key and entry */ hfs_bnode_read_key(new_node, fd->search_key, 14); cnid = cpu_to_be32(new_node->this); __hfs_brec_find(fd->bnode, fd); hfs_brec_insert(fd, &cnid, sizeof(cnid)); hfs_bnode_put(fd->bnode); hfs_bnode_put(new_node); if (!rec) { if (new_node == node) goto out; /* restore search_key */ hfs_bnode_read_key(node, fd->search_key, 14); } new_node = NULL; } if (!rec && node->parent) goto again; out: fd->bnode = node; return 0; } static int hfs_btree_inc_height(struct hfs_btree *tree) { struct hfs_bnode *node, *new_node; struct hfs_bnode_desc node_desc; int key_size, rec; __be32 cnid; node = NULL; if (tree->root) { node = hfs_bnode_find(tree, tree->root); if (IS_ERR(node)) return PTR_ERR(node); } new_node = hfs_bmap_alloc(tree); if (IS_ERR(new_node)) { hfs_bnode_put(node); return PTR_ERR(new_node); } tree->root = new_node->this; if (!tree->depth) { tree->leaf_head = tree->leaf_tail = new_node->this; new_node->type = HFS_NODE_LEAF; new_node->num_recs = 0; } else { new_node->type = HFS_NODE_INDEX; new_node->num_recs = 1; } new_node->parent = 0; new_node->next = 0; new_node->prev = 0; new_node->height = ++tree->depth; node_desc.next = cpu_to_be32(new_node->next); node_desc.prev = cpu_to_be32(new_node->prev); node_desc.type = new_node->type; node_desc.height = new_node->height; node_desc.num_recs = cpu_to_be16(new_node->num_recs); node_desc.reserved = 0; hfs_bnode_write(new_node, &node_desc, 0, sizeof(node_desc)); rec = tree->node_size - 2; hfs_bnode_write_u16(new_node, rec, 14); if (node) { /* insert old root idx into new root */ node->parent = tree->root; if (node->type == HFS_NODE_LEAF || tree->attributes & HFS_TREE_VARIDXKEYS) key_size = hfs_bnode_read_u8(node, 14) + 1; else key_size = tree->max_key_len + 1; hfs_bnode_copy(new_node, 14, node, 14, key_size); if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) { key_size = tree->max_key_len + 1; hfs_bnode_write_u8(new_node, 14, tree->max_key_len); } key_size = (key_size + 1) & -2; cnid = cpu_to_be32(node->this); hfs_bnode_write(new_node, &cnid, 14 + key_size, 4); rec -= 2; hfs_bnode_write_u16(new_node, rec, 14 + key_size + 4); hfs_bnode_put(node); } hfs_bnode_put(new_node); mark_inode_dirty(tree->inode); return 0; }
linux-master
fs/hfs/brec.c
/* * linux/fs/hfs/mdb.c * * Copyright (C) 1995-1997 Paul H. Hargrove * (C) 2003 Ardis Technologies <[email protected]> * This file may be distributed under the terms of the GNU General Public License. * * This file contains functions for reading/writing the MDB. */ #include <linux/cdrom.h> #include <linux/blkdev.h> #include <linux/nls.h> #include <linux/slab.h> #include "hfs_fs.h" #include "btree.h" /*================ File-local data types ================*/ /* * The HFS Master Directory Block (MDB). * * Also known as the Volume Information Block (VIB), this structure is * the HFS equivalent of a superblock. * * Reference: _Inside Macintosh: Files_ pages 2-59 through 2-62 * * modified for HFS Extended */ static int hfs_get_last_session(struct super_block *sb, sector_t *start, sector_t *size) { struct cdrom_device_info *cdi = disk_to_cdi(sb->s_bdev->bd_disk); /* default values */ *start = 0; *size = bdev_nr_sectors(sb->s_bdev); if (HFS_SB(sb)->session >= 0) { struct cdrom_tocentry te; if (!cdi) return -EINVAL; te.cdte_track = HFS_SB(sb)->session; te.cdte_format = CDROM_LBA; if (cdrom_read_tocentry(cdi, &te) || (te.cdte_ctrl & CDROM_DATA_TRACK) != 4) { pr_err("invalid session number or type of track\n"); return -EINVAL; } *start = (sector_t)te.cdte_addr.lba << 2; } else if (cdi) { struct cdrom_multisession ms_info; ms_info.addr_format = CDROM_LBA; if (cdrom_multisession(cdi, &ms_info) == 0 && ms_info.xa_flag) *start = (sector_t)ms_info.addr.lba << 2; } return 0; } /* * hfs_mdb_get() * * Build the in-core MDB for a filesystem, including * the B-trees and the volume bitmap. */ int hfs_mdb_get(struct super_block *sb) { struct buffer_head *bh; struct hfs_mdb *mdb, *mdb2; unsigned int block; char *ptr; int off2, len, size, sect; sector_t part_start, part_size; loff_t off; __be16 attrib; /* set the device driver to 512-byte blocks */ size = sb_min_blocksize(sb, HFS_SECTOR_SIZE); if (!size) return -EINVAL; if (hfs_get_last_session(sb, &part_start, &part_size)) return -EINVAL; while (1) { /* See if this is an HFS filesystem */ bh = sb_bread512(sb, part_start + HFS_MDB_BLK, mdb); if (!bh) goto out; if (mdb->drSigWord == cpu_to_be16(HFS_SUPER_MAGIC)) break; brelse(bh); /* check for a partition block * (should do this only for cdrom/loop though) */ if (hfs_part_find(sb, &part_start, &part_size)) goto out; } HFS_SB(sb)->alloc_blksz = size = be32_to_cpu(mdb->drAlBlkSiz); if (!size || (size & (HFS_SECTOR_SIZE - 1))) { pr_err("bad allocation block size %d\n", size); goto out_bh; } size = min(HFS_SB(sb)->alloc_blksz, (u32)PAGE_SIZE); /* size must be a multiple of 512 */ while (size & (size - 1)) size -= HFS_SECTOR_SIZE; sect = be16_to_cpu(mdb->drAlBlSt) + part_start; /* align block size to first sector */ while (sect & ((size - 1) >> HFS_SECTOR_SIZE_BITS)) size >>= 1; /* align block size to weird alloc size */ while (HFS_SB(sb)->alloc_blksz & (size - 1)) size >>= 1; brelse(bh); if (!sb_set_blocksize(sb, size)) { pr_err("unable to set blocksize to %u\n", size); goto out; } bh = sb_bread512(sb, part_start + HFS_MDB_BLK, mdb); if (!bh) goto out; if (mdb->drSigWord != cpu_to_be16(HFS_SUPER_MAGIC)) goto out_bh; HFS_SB(sb)->mdb_bh = bh; HFS_SB(sb)->mdb = mdb; /* These parameters are read from the MDB, and never written */ HFS_SB(sb)->part_start = part_start; HFS_SB(sb)->fs_ablocks = be16_to_cpu(mdb->drNmAlBlks); HFS_SB(sb)->fs_div = HFS_SB(sb)->alloc_blksz >> sb->s_blocksize_bits; HFS_SB(sb)->clumpablks = be32_to_cpu(mdb->drClpSiz) / HFS_SB(sb)->alloc_blksz; if (!HFS_SB(sb)->clumpablks) HFS_SB(sb)->clumpablks = 1; HFS_SB(sb)->fs_start = (be16_to_cpu(mdb->drAlBlSt) + part_start) >> (sb->s_blocksize_bits - HFS_SECTOR_SIZE_BITS); /* These parameters are read from and written to the MDB */ HFS_SB(sb)->free_ablocks = be16_to_cpu(mdb->drFreeBks); HFS_SB(sb)->next_id = be32_to_cpu(mdb->drNxtCNID); HFS_SB(sb)->root_files = be16_to_cpu(mdb->drNmFls); HFS_SB(sb)->root_dirs = be16_to_cpu(mdb->drNmRtDirs); HFS_SB(sb)->file_count = be32_to_cpu(mdb->drFilCnt); HFS_SB(sb)->folder_count = be32_to_cpu(mdb->drDirCnt); /* TRY to get the alternate (backup) MDB. */ sect = part_start + part_size - 2; bh = sb_bread512(sb, sect, mdb2); if (bh) { if (mdb2->drSigWord == cpu_to_be16(HFS_SUPER_MAGIC)) { HFS_SB(sb)->alt_mdb_bh = bh; HFS_SB(sb)->alt_mdb = mdb2; } else brelse(bh); } if (!HFS_SB(sb)->alt_mdb) { pr_warn("unable to locate alternate MDB\n"); pr_warn("continuing without an alternate MDB\n"); } HFS_SB(sb)->bitmap = kmalloc(8192, GFP_KERNEL); if (!HFS_SB(sb)->bitmap) goto out; /* read in the bitmap */ block = be16_to_cpu(mdb->drVBMSt) + part_start; off = (loff_t)block << HFS_SECTOR_SIZE_BITS; size = (HFS_SB(sb)->fs_ablocks + 8) / 8; ptr = (u8 *)HFS_SB(sb)->bitmap; while (size) { bh = sb_bread(sb, off >> sb->s_blocksize_bits); if (!bh) { pr_err("unable to read volume bitmap\n"); goto out; } off2 = off & (sb->s_blocksize - 1); len = min((int)sb->s_blocksize - off2, size); memcpy(ptr, bh->b_data + off2, len); brelse(bh); ptr += len; off += len; size -= len; } HFS_SB(sb)->ext_tree = hfs_btree_open(sb, HFS_EXT_CNID, hfs_ext_keycmp); if (!HFS_SB(sb)->ext_tree) { pr_err("unable to open extent tree\n"); goto out; } HFS_SB(sb)->cat_tree = hfs_btree_open(sb, HFS_CAT_CNID, hfs_cat_keycmp); if (!HFS_SB(sb)->cat_tree) { pr_err("unable to open catalog tree\n"); goto out; } attrib = mdb->drAtrb; if (!(attrib & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) { pr_warn("filesystem was not cleanly unmounted, running fsck.hfs is recommended. mounting read-only.\n"); sb->s_flags |= SB_RDONLY; } if ((attrib & cpu_to_be16(HFS_SB_ATTRIB_SLOCK))) { pr_warn("filesystem is marked locked, mounting read-only.\n"); sb->s_flags |= SB_RDONLY; } if (!sb_rdonly(sb)) { /* Mark the volume uncleanly unmounted in case we crash */ attrib &= cpu_to_be16(~HFS_SB_ATTRIB_UNMNT); attrib |= cpu_to_be16(HFS_SB_ATTRIB_INCNSTNT); mdb->drAtrb = attrib; be32_add_cpu(&mdb->drWrCnt, 1); mdb->drLsMod = hfs_mtime(); mark_buffer_dirty(HFS_SB(sb)->mdb_bh); sync_dirty_buffer(HFS_SB(sb)->mdb_bh); } return 0; out_bh: brelse(bh); out: hfs_mdb_put(sb); return -EIO; } /* * hfs_mdb_commit() * * Description: * This updates the MDB on disk. * It does not check, if the superblock has been modified, or * if the filesystem has been mounted read-only. It is mainly * called by hfs_sync_fs() and flush_mdb(). * Input Variable(s): * struct hfs_mdb *mdb: Pointer to the hfs MDB * int backup; * Output Variable(s): * NONE * Returns: * void * Preconditions: * 'mdb' points to a "valid" (struct hfs_mdb). * Postconditions: * The HFS MDB and on disk will be updated, by copying the possibly * modified fields from the in memory MDB (in native byte order) to * the disk block buffer. * If 'backup' is non-zero then the alternate MDB is also written * and the function doesn't return until it is actually on disk. */ void hfs_mdb_commit(struct super_block *sb) { struct hfs_mdb *mdb = HFS_SB(sb)->mdb; if (sb_rdonly(sb)) return; lock_buffer(HFS_SB(sb)->mdb_bh); if (test_and_clear_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags)) { /* These parameters may have been modified, so write them back */ mdb->drLsMod = hfs_mtime(); mdb->drFreeBks = cpu_to_be16(HFS_SB(sb)->free_ablocks); mdb->drNxtCNID = cpu_to_be32(HFS_SB(sb)->next_id); mdb->drNmFls = cpu_to_be16(HFS_SB(sb)->root_files); mdb->drNmRtDirs = cpu_to_be16(HFS_SB(sb)->root_dirs); mdb->drFilCnt = cpu_to_be32(HFS_SB(sb)->file_count); mdb->drDirCnt = cpu_to_be32(HFS_SB(sb)->folder_count); /* write MDB to disk */ mark_buffer_dirty(HFS_SB(sb)->mdb_bh); } /* write the backup MDB, not returning until it is written. * we only do this when either the catalog or extents overflow * files grow. */ if (test_and_clear_bit(HFS_FLG_ALT_MDB_DIRTY, &HFS_SB(sb)->flags) && HFS_SB(sb)->alt_mdb) { hfs_inode_write_fork(HFS_SB(sb)->ext_tree->inode, mdb->drXTExtRec, &mdb->drXTFlSize, NULL); hfs_inode_write_fork(HFS_SB(sb)->cat_tree->inode, mdb->drCTExtRec, &mdb->drCTFlSize, NULL); lock_buffer(HFS_SB(sb)->alt_mdb_bh); memcpy(HFS_SB(sb)->alt_mdb, HFS_SB(sb)->mdb, HFS_SECTOR_SIZE); HFS_SB(sb)->alt_mdb->drAtrb |= cpu_to_be16(HFS_SB_ATTRIB_UNMNT); HFS_SB(sb)->alt_mdb->drAtrb &= cpu_to_be16(~HFS_SB_ATTRIB_INCNSTNT); unlock_buffer(HFS_SB(sb)->alt_mdb_bh); mark_buffer_dirty(HFS_SB(sb)->alt_mdb_bh); sync_dirty_buffer(HFS_SB(sb)->alt_mdb_bh); } if (test_and_clear_bit(HFS_FLG_BITMAP_DIRTY, &HFS_SB(sb)->flags)) { struct buffer_head *bh; sector_t block; char *ptr; int off, size, len; block = be16_to_cpu(HFS_SB(sb)->mdb->drVBMSt) + HFS_SB(sb)->part_start; off = (block << HFS_SECTOR_SIZE_BITS) & (sb->s_blocksize - 1); block >>= sb->s_blocksize_bits - HFS_SECTOR_SIZE_BITS; size = (HFS_SB(sb)->fs_ablocks + 7) / 8; ptr = (u8 *)HFS_SB(sb)->bitmap; while (size) { bh = sb_bread(sb, block); if (!bh) { pr_err("unable to read volume bitmap\n"); break; } len = min((int)sb->s_blocksize - off, size); lock_buffer(bh); memcpy(bh->b_data + off, ptr, len); unlock_buffer(bh); mark_buffer_dirty(bh); brelse(bh); block++; off = 0; ptr += len; size -= len; } } unlock_buffer(HFS_SB(sb)->mdb_bh); } void hfs_mdb_close(struct super_block *sb) { /* update volume attributes */ if (sb_rdonly(sb)) return; HFS_SB(sb)->mdb->drAtrb |= cpu_to_be16(HFS_SB_ATTRIB_UNMNT); HFS_SB(sb)->mdb->drAtrb &= cpu_to_be16(~HFS_SB_ATTRIB_INCNSTNT); mark_buffer_dirty(HFS_SB(sb)->mdb_bh); } /* * hfs_mdb_put() * * Release the resources associated with the in-core MDB. */ void hfs_mdb_put(struct super_block *sb) { if (!HFS_SB(sb)) return; /* free the B-trees */ hfs_btree_close(HFS_SB(sb)->ext_tree); hfs_btree_close(HFS_SB(sb)->cat_tree); /* free the buffers holding the primary and alternate MDBs */ brelse(HFS_SB(sb)->mdb_bh); brelse(HFS_SB(sb)->alt_mdb_bh); unload_nls(HFS_SB(sb)->nls_io); unload_nls(HFS_SB(sb)->nls_disk); kfree(HFS_SB(sb)->bitmap); kfree(HFS_SB(sb)); sb->s_fs_info = NULL; }
linux-master
fs/hfs/mdb.c
/* * linux/fs/hfs/extent.c * * Copyright (C) 1995-1997 Paul H. Hargrove * (C) 2003 Ardis Technologies <[email protected]> * This file may be distributed under the terms of the GNU General Public License. * * This file contains the functions related to the extents B-tree. */ #include <linux/pagemap.h> #include "hfs_fs.h" #include "btree.h" /*================ File-local functions ================*/ /* * build_key */ static void hfs_ext_build_key(hfs_btree_key *key, u32 cnid, u16 block, u8 type) { key->key_len = 7; key->ext.FkType = type; key->ext.FNum = cpu_to_be32(cnid); key->ext.FABN = cpu_to_be16(block); } /* * hfs_ext_compare() * * Description: * This is the comparison function used for the extents B-tree. In * comparing extent B-tree entries, the file id is the most * significant field (compared as unsigned ints); the fork type is * the second most significant field (compared as unsigned chars); * and the allocation block number field is the least significant * (compared as unsigned ints). * Input Variable(s): * struct hfs_ext_key *key1: pointer to the first key to compare * struct hfs_ext_key *key2: pointer to the second key to compare * Output Variable(s): * NONE * Returns: * int: negative if key1<key2, positive if key1>key2, and 0 if key1==key2 * Preconditions: * key1 and key2 point to "valid" (struct hfs_ext_key)s. * Postconditions: * This function has no side-effects */ int hfs_ext_keycmp(const btree_key *key1, const btree_key *key2) { __be32 fnum1, fnum2; __be16 block1, block2; fnum1 = key1->ext.FNum; fnum2 = key2->ext.FNum; if (fnum1 != fnum2) return be32_to_cpu(fnum1) < be32_to_cpu(fnum2) ? -1 : 1; if (key1->ext.FkType != key2->ext.FkType) return key1->ext.FkType < key2->ext.FkType ? -1 : 1; block1 = key1->ext.FABN; block2 = key2->ext.FABN; if (block1 == block2) return 0; return be16_to_cpu(block1) < be16_to_cpu(block2) ? -1 : 1; } /* * hfs_ext_find_block * * Find a block within an extent record */ static u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off) { int i; u16 count; for (i = 0; i < 3; ext++, i++) { count = be16_to_cpu(ext->count); if (off < count) return be16_to_cpu(ext->block) + off; off -= count; } /* panic? */ return 0; } static int hfs_ext_block_count(struct hfs_extent *ext) { int i; u16 count = 0; for (i = 0; i < 3; ext++, i++) count += be16_to_cpu(ext->count); return count; } static u16 hfs_ext_lastblock(struct hfs_extent *ext) { int i; ext += 2; for (i = 0; i < 2; ext--, i++) if (ext->count) break; return be16_to_cpu(ext->block) + be16_to_cpu(ext->count); } static int __hfs_ext_write_extent(struct inode *inode, struct hfs_find_data *fd) { int res; hfs_ext_build_key(fd->search_key, inode->i_ino, HFS_I(inode)->cached_start, HFS_IS_RSRC(inode) ? HFS_FK_RSRC : HFS_FK_DATA); res = hfs_brec_find(fd); if (HFS_I(inode)->flags & HFS_FLG_EXT_NEW) { if (res != -ENOENT) return res; /* Fail early and avoid ENOSPC during the btree operation */ res = hfs_bmap_reserve(fd->tree, fd->tree->depth + 1); if (res) return res; hfs_brec_insert(fd, HFS_I(inode)->cached_extents, sizeof(hfs_extent_rec)); HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW); } else { if (res) return res; hfs_bnode_write(fd->bnode, HFS_I(inode)->cached_extents, fd->entryoffset, fd->entrylength); HFS_I(inode)->flags &= ~HFS_FLG_EXT_DIRTY; } return 0; } int hfs_ext_write_extent(struct inode *inode) { struct hfs_find_data fd; int res = 0; if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY) { res = hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd); if (res) return res; res = __hfs_ext_write_extent(inode, &fd); hfs_find_exit(&fd); } return res; } static inline int __hfs_ext_read_extent(struct hfs_find_data *fd, struct hfs_extent *extent, u32 cnid, u32 block, u8 type) { int res; hfs_ext_build_key(fd->search_key, cnid, block, type); fd->key->ext.FNum = 0; res = hfs_brec_find(fd); if (res && res != -ENOENT) return res; if (fd->key->ext.FNum != fd->search_key->ext.FNum || fd->key->ext.FkType != fd->search_key->ext.FkType) return -ENOENT; if (fd->entrylength != sizeof(hfs_extent_rec)) return -EIO; hfs_bnode_read(fd->bnode, extent, fd->entryoffset, sizeof(hfs_extent_rec)); return 0; } static inline int __hfs_ext_cache_extent(struct hfs_find_data *fd, struct inode *inode, u32 block) { int res; if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY) { res = __hfs_ext_write_extent(inode, fd); if (res) return res; } res = __hfs_ext_read_extent(fd, HFS_I(inode)->cached_extents, inode->i_ino, block, HFS_IS_RSRC(inode) ? HFS_FK_RSRC : HFS_FK_DATA); if (!res) { HFS_I(inode)->cached_start = be16_to_cpu(fd->key->ext.FABN); HFS_I(inode)->cached_blocks = hfs_ext_block_count(HFS_I(inode)->cached_extents); } else { HFS_I(inode)->cached_start = HFS_I(inode)->cached_blocks = 0; HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW); } return res; } static int hfs_ext_read_extent(struct inode *inode, u16 block) { struct hfs_find_data fd; int res; if (block >= HFS_I(inode)->cached_start && block < HFS_I(inode)->cached_start + HFS_I(inode)->cached_blocks) return 0; res = hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd); if (!res) { res = __hfs_ext_cache_extent(&fd, inode, block); hfs_find_exit(&fd); } return res; } static void hfs_dump_extent(struct hfs_extent *extent) { int i; hfs_dbg(EXTENT, " "); for (i = 0; i < 3; i++) hfs_dbg_cont(EXTENT, " %u:%u", be16_to_cpu(extent[i].block), be16_to_cpu(extent[i].count)); hfs_dbg_cont(EXTENT, "\n"); } static int hfs_add_extent(struct hfs_extent *extent, u16 offset, u16 alloc_block, u16 block_count) { u16 count, start; int i; hfs_dump_extent(extent); for (i = 0; i < 3; extent++, i++) { count = be16_to_cpu(extent->count); if (offset == count) { start = be16_to_cpu(extent->block); if (alloc_block != start + count) { if (++i >= 3) return -ENOSPC; extent++; extent->block = cpu_to_be16(alloc_block); } else block_count += count; extent->count = cpu_to_be16(block_count); return 0; } else if (offset < count) break; offset -= count; } /* panic? */ return -EIO; } static int hfs_free_extents(struct super_block *sb, struct hfs_extent *extent, u16 offset, u16 block_nr) { u16 count, start; int i; hfs_dump_extent(extent); for (i = 0; i < 3; extent++, i++) { count = be16_to_cpu(extent->count); if (offset == count) goto found; else if (offset < count) break; offset -= count; } /* panic? */ return -EIO; found: for (;;) { start = be16_to_cpu(extent->block); if (count <= block_nr) { hfs_clear_vbm_bits(sb, start, count); extent->block = 0; extent->count = 0; block_nr -= count; } else { count -= block_nr; hfs_clear_vbm_bits(sb, start + count, block_nr); extent->count = cpu_to_be16(count); block_nr = 0; } if (!block_nr || !i) return 0; i--; extent--; count = be16_to_cpu(extent->count); } } int hfs_free_fork(struct super_block *sb, struct hfs_cat_file *file, int type) { struct hfs_find_data fd; u32 total_blocks, blocks, start; u32 cnid = be32_to_cpu(file->FlNum); struct hfs_extent *extent; int res, i; if (type == HFS_FK_DATA) { total_blocks = be32_to_cpu(file->PyLen); extent = file->ExtRec; } else { total_blocks = be32_to_cpu(file->RPyLen); extent = file->RExtRec; } total_blocks /= HFS_SB(sb)->alloc_blksz; if (!total_blocks) return 0; blocks = 0; for (i = 0; i < 3; i++) blocks += be16_to_cpu(extent[i].count); res = hfs_free_extents(sb, extent, blocks, blocks); if (res) return res; if (total_blocks == blocks) return 0; res = hfs_find_init(HFS_SB(sb)->ext_tree, &fd); if (res) return res; do { res = __hfs_ext_read_extent(&fd, extent, cnid, total_blocks, type); if (res) break; start = be16_to_cpu(fd.key->ext.FABN); hfs_free_extents(sb, extent, total_blocks - start, total_blocks); hfs_brec_remove(&fd); total_blocks = start; } while (total_blocks > blocks); hfs_find_exit(&fd); return res; } /* * hfs_get_block */ int hfs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create) { struct super_block *sb; u16 dblock, ablock; int res; sb = inode->i_sb; /* Convert inode block to disk allocation block */ ablock = (u32)block / HFS_SB(sb)->fs_div; if (block >= HFS_I(inode)->fs_blocks) { if (!create) return 0; if (block > HFS_I(inode)->fs_blocks) return -EIO; if (ablock >= HFS_I(inode)->alloc_blocks) { res = hfs_extend_file(inode); if (res) return res; } } else create = 0; if (ablock < HFS_I(inode)->first_blocks) { dblock = hfs_ext_find_block(HFS_I(inode)->first_extents, ablock); goto done; } mutex_lock(&HFS_I(inode)->extents_lock); res = hfs_ext_read_extent(inode, ablock); if (!res) dblock = hfs_ext_find_block(HFS_I(inode)->cached_extents, ablock - HFS_I(inode)->cached_start); else { mutex_unlock(&HFS_I(inode)->extents_lock); return -EIO; } mutex_unlock(&HFS_I(inode)->extents_lock); done: map_bh(bh_result, sb, HFS_SB(sb)->fs_start + dblock * HFS_SB(sb)->fs_div + (u32)block % HFS_SB(sb)->fs_div); if (create) { set_buffer_new(bh_result); HFS_I(inode)->phys_size += sb->s_blocksize; HFS_I(inode)->fs_blocks++; inode_add_bytes(inode, sb->s_blocksize); mark_inode_dirty(inode); } return 0; } int hfs_extend_file(struct inode *inode) { struct super_block *sb = inode->i_sb; u32 start, len, goal; int res; mutex_lock(&HFS_I(inode)->extents_lock); if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks) goal = hfs_ext_lastblock(HFS_I(inode)->first_extents); else { res = hfs_ext_read_extent(inode, HFS_I(inode)->alloc_blocks); if (res) goto out; goal = hfs_ext_lastblock(HFS_I(inode)->cached_extents); } len = HFS_I(inode)->clump_blocks; start = hfs_vbm_search_free(sb, goal, &len); if (!len) { res = -ENOSPC; goto out; } hfs_dbg(EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len); if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks) { if (!HFS_I(inode)->first_blocks) { hfs_dbg(EXTENT, "first extents\n"); /* no extents yet */ HFS_I(inode)->first_extents[0].block = cpu_to_be16(start); HFS_I(inode)->first_extents[0].count = cpu_to_be16(len); res = 0; } else { /* try to append to extents in inode */ res = hfs_add_extent(HFS_I(inode)->first_extents, HFS_I(inode)->alloc_blocks, start, len); if (res == -ENOSPC) goto insert_extent; } if (!res) { hfs_dump_extent(HFS_I(inode)->first_extents); HFS_I(inode)->first_blocks += len; } } else { res = hfs_add_extent(HFS_I(inode)->cached_extents, HFS_I(inode)->alloc_blocks - HFS_I(inode)->cached_start, start, len); if (!res) { hfs_dump_extent(HFS_I(inode)->cached_extents); HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY; HFS_I(inode)->cached_blocks += len; } else if (res == -ENOSPC) goto insert_extent; } out: mutex_unlock(&HFS_I(inode)->extents_lock); if (!res) { HFS_I(inode)->alloc_blocks += len; mark_inode_dirty(inode); if (inode->i_ino < HFS_FIRSTUSER_CNID) set_bit(HFS_FLG_ALT_MDB_DIRTY, &HFS_SB(sb)->flags); set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags); hfs_mark_mdb_dirty(sb); } return res; insert_extent: hfs_dbg(EXTENT, "insert new extent\n"); res = hfs_ext_write_extent(inode); if (res) goto out; memset(HFS_I(inode)->cached_extents, 0, sizeof(hfs_extent_rec)); HFS_I(inode)->cached_extents[0].block = cpu_to_be16(start); HFS_I(inode)->cached_extents[0].count = cpu_to_be16(len); hfs_dump_extent(HFS_I(inode)->cached_extents); HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW; HFS_I(inode)->cached_start = HFS_I(inode)->alloc_blocks; HFS_I(inode)->cached_blocks = len; res = 0; goto out; } void hfs_file_truncate(struct inode *inode) { struct super_block *sb = inode->i_sb; struct hfs_find_data fd; u16 blk_cnt, alloc_cnt, start; u32 size; int res; hfs_dbg(INODE, "truncate: %lu, %Lu -> %Lu\n", inode->i_ino, (long long)HFS_I(inode)->phys_size, inode->i_size); if (inode->i_size > HFS_I(inode)->phys_size) { struct address_space *mapping = inode->i_mapping; void *fsdata = NULL; struct page *page; /* XXX: Can use generic_cont_expand? */ size = inode->i_size - 1; res = hfs_write_begin(NULL, mapping, size + 1, 0, &page, &fsdata); if (!res) { res = generic_write_end(NULL, mapping, size + 1, 0, 0, page, fsdata); } if (res) inode->i_size = HFS_I(inode)->phys_size; return; } else if (inode->i_size == HFS_I(inode)->phys_size) return; size = inode->i_size + HFS_SB(sb)->alloc_blksz - 1; blk_cnt = size / HFS_SB(sb)->alloc_blksz; alloc_cnt = HFS_I(inode)->alloc_blocks; if (blk_cnt == alloc_cnt) goto out; mutex_lock(&HFS_I(inode)->extents_lock); res = hfs_find_init(HFS_SB(sb)->ext_tree, &fd); if (res) { mutex_unlock(&HFS_I(inode)->extents_lock); /* XXX: We lack error handling of hfs_file_truncate() */ return; } while (1) { if (alloc_cnt == HFS_I(inode)->first_blocks) { hfs_free_extents(sb, HFS_I(inode)->first_extents, alloc_cnt, alloc_cnt - blk_cnt); hfs_dump_extent(HFS_I(inode)->first_extents); HFS_I(inode)->first_blocks = blk_cnt; break; } res = __hfs_ext_cache_extent(&fd, inode, alloc_cnt); if (res) break; start = HFS_I(inode)->cached_start; hfs_free_extents(sb, HFS_I(inode)->cached_extents, alloc_cnt - start, alloc_cnt - blk_cnt); hfs_dump_extent(HFS_I(inode)->cached_extents); if (blk_cnt > start) { HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY; break; } alloc_cnt = start; HFS_I(inode)->cached_start = HFS_I(inode)->cached_blocks = 0; HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW); hfs_brec_remove(&fd); } hfs_find_exit(&fd); mutex_unlock(&HFS_I(inode)->extents_lock); HFS_I(inode)->alloc_blocks = blk_cnt; out: HFS_I(inode)->phys_size = inode->i_size; HFS_I(inode)->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; inode_set_bytes(inode, HFS_I(inode)->fs_blocks << sb->s_blocksize_bits); mark_inode_dirty(inode); }
linux-master
fs/hfs/extent.c
/* * linux/fs/hfs/catalog.c * * Copyright (C) 1995-1997 Paul H. Hargrove * (C) 2003 Ardis Technologies <[email protected]> * This file may be distributed under the terms of the GNU General Public License. * * This file contains the functions related to the catalog B-tree. * * Cache code shamelessly stolen from * linux/fs/inode.c Copyright (C) 1991, 1992 Linus Torvalds * re-shamelessly stolen Copyright (C) 1997 Linus Torvalds */ #include "hfs_fs.h" #include "btree.h" /* * hfs_cat_build_key() * * Given the ID of the parent and the name build a search key. */ void hfs_cat_build_key(struct super_block *sb, btree_key *key, u32 parent, const struct qstr *name) { key->cat.reserved = 0; key->cat.ParID = cpu_to_be32(parent); if (name) { hfs_asc2mac(sb, &key->cat.CName, name); key->key_len = 6 + key->cat.CName.len; } else { memset(&key->cat.CName, 0, sizeof(struct hfs_name)); key->key_len = 6; } } static int hfs_cat_build_record(hfs_cat_rec *rec, u32 cnid, struct inode *inode) { __be32 mtime = hfs_mtime(); memset(rec, 0, sizeof(*rec)); if (S_ISDIR(inode->i_mode)) { rec->type = HFS_CDR_DIR; rec->dir.DirID = cpu_to_be32(cnid); rec->dir.CrDat = mtime; rec->dir.MdDat = mtime; rec->dir.BkDat = 0; rec->dir.UsrInfo.frView = cpu_to_be16(0xff); return sizeof(struct hfs_cat_dir); } else { /* init some fields for the file record */ rec->type = HFS_CDR_FIL; rec->file.Flags = HFS_FIL_USED | HFS_FIL_THD; if (!(inode->i_mode & S_IWUSR)) rec->file.Flags |= HFS_FIL_LOCK; rec->file.FlNum = cpu_to_be32(cnid); rec->file.CrDat = mtime; rec->file.MdDat = mtime; rec->file.BkDat = 0; rec->file.UsrWds.fdType = HFS_SB(inode->i_sb)->s_type; rec->file.UsrWds.fdCreator = HFS_SB(inode->i_sb)->s_creator; return sizeof(struct hfs_cat_file); } } static int hfs_cat_build_thread(struct super_block *sb, hfs_cat_rec *rec, int type, u32 parentid, const struct qstr *name) { rec->type = type; memset(rec->thread.reserved, 0, sizeof(rec->thread.reserved)); rec->thread.ParID = cpu_to_be32(parentid); hfs_asc2mac(sb, &rec->thread.CName, name); return sizeof(struct hfs_cat_thread); } /* * create_entry() * * Add a new file or directory to the catalog B-tree and * return a (struct hfs_cat_entry) for it in '*result'. */ int hfs_cat_create(u32 cnid, struct inode *dir, const struct qstr *str, struct inode *inode) { struct hfs_find_data fd; struct super_block *sb; union hfs_cat_rec entry; int entry_size; int err; hfs_dbg(CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink); if (dir->i_size >= HFS_MAX_VALENCE) return -ENOSPC; sb = dir->i_sb; err = hfs_find_init(HFS_SB(sb)->cat_tree, &fd); if (err) return err; /* * Fail early and avoid ENOSPC during the btree operations. We may * have to split the root node at most once. */ err = hfs_bmap_reserve(fd.tree, 2 * fd.tree->depth); if (err) goto err2; hfs_cat_build_key(sb, fd.search_key, cnid, NULL); entry_size = hfs_cat_build_thread(sb, &entry, S_ISDIR(inode->i_mode) ? HFS_CDR_THD : HFS_CDR_FTH, dir->i_ino, str); err = hfs_brec_find(&fd); if (err != -ENOENT) { if (!err) err = -EEXIST; goto err2; } err = hfs_brec_insert(&fd, &entry, entry_size); if (err) goto err2; hfs_cat_build_key(sb, fd.search_key, dir->i_ino, str); entry_size = hfs_cat_build_record(&entry, cnid, inode); err = hfs_brec_find(&fd); if (err != -ENOENT) { /* panic? */ if (!err) err = -EEXIST; goto err1; } err = hfs_brec_insert(&fd, &entry, entry_size); if (err) goto err1; dir->i_size++; dir->i_mtime = inode_set_ctime_current(dir); mark_inode_dirty(dir); hfs_find_exit(&fd); return 0; err1: hfs_cat_build_key(sb, fd.search_key, cnid, NULL); if (!hfs_brec_find(&fd)) hfs_brec_remove(&fd); err2: hfs_find_exit(&fd); return err; } /* * hfs_cat_compare() * * Description: * This is the comparison function used for the catalog B-tree. In * comparing catalog B-tree entries, the parent id is the most * significant field (compared as unsigned ints). The name field is * the least significant (compared in "Macintosh lexical order", * see hfs_strcmp() in string.c) * Input Variable(s): * struct hfs_cat_key *key1: pointer to the first key to compare * struct hfs_cat_key *key2: pointer to the second key to compare * Output Variable(s): * NONE * Returns: * int: negative if key1<key2, positive if key1>key2, and 0 if key1==key2 * Preconditions: * key1 and key2 point to "valid" (struct hfs_cat_key)s. * Postconditions: * This function has no side-effects */ int hfs_cat_keycmp(const btree_key *key1, const btree_key *key2) { __be32 k1p, k2p; k1p = key1->cat.ParID; k2p = key2->cat.ParID; if (k1p != k2p) return be32_to_cpu(k1p) < be32_to_cpu(k2p) ? -1 : 1; return hfs_strcmp(key1->cat.CName.name, key1->cat.CName.len, key2->cat.CName.name, key2->cat.CName.len); } /* Try to get a catalog entry for given catalog id */ // move to read_super??? int hfs_cat_find_brec(struct super_block *sb, u32 cnid, struct hfs_find_data *fd) { hfs_cat_rec rec; int res, len, type; hfs_cat_build_key(sb, fd->search_key, cnid, NULL); res = hfs_brec_read(fd, &rec, sizeof(rec)); if (res) return res; type = rec.type; if (type != HFS_CDR_THD && type != HFS_CDR_FTH) { pr_err("found bad thread record in catalog\n"); return -EIO; } fd->search_key->cat.ParID = rec.thread.ParID; len = fd->search_key->cat.CName.len = rec.thread.CName.len; if (len > HFS_NAMELEN) { pr_err("bad catalog namelength\n"); return -EIO; } memcpy(fd->search_key->cat.CName.name, rec.thread.CName.name, len); return hfs_brec_find(fd); } /* * hfs_cat_delete() * * Delete the indicated file or directory. * The associated thread is also removed unless ('with_thread'==0). */ int hfs_cat_delete(u32 cnid, struct inode *dir, const struct qstr *str) { struct super_block *sb; struct hfs_find_data fd; struct hfs_readdir_data *rd; int res, type; hfs_dbg(CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid); sb = dir->i_sb; res = hfs_find_init(HFS_SB(sb)->cat_tree, &fd); if (res) return res; hfs_cat_build_key(sb, fd.search_key, dir->i_ino, str); res = hfs_brec_find(&fd); if (res) goto out; type = hfs_bnode_read_u8(fd.bnode, fd.entryoffset); if (type == HFS_CDR_FIL) { struct hfs_cat_file file; hfs_bnode_read(fd.bnode, &file, fd.entryoffset, sizeof(file)); if (be32_to_cpu(file.FlNum) == cnid) { #if 0 hfs_free_fork(sb, &file, HFS_FK_DATA); #endif hfs_free_fork(sb, &file, HFS_FK_RSRC); } } /* we only need to take spinlock for exclusion with ->release() */ spin_lock(&HFS_I(dir)->open_dir_lock); list_for_each_entry(rd, &HFS_I(dir)->open_dir_list, list) { if (fd.tree->keycmp(fd.search_key, (void *)&rd->key) < 0) rd->file->f_pos--; } spin_unlock(&HFS_I(dir)->open_dir_lock); res = hfs_brec_remove(&fd); if (res) goto out; hfs_cat_build_key(sb, fd.search_key, cnid, NULL); res = hfs_brec_find(&fd); if (!res) { res = hfs_brec_remove(&fd); if (res) goto out; } dir->i_size--; dir->i_mtime = inode_set_ctime_current(dir); mark_inode_dirty(dir); res = 0; out: hfs_find_exit(&fd); return res; } /* * hfs_cat_move() * * Rename a file or directory, possibly to a new directory. * If the destination exists it is removed and a * (struct hfs_cat_entry) for it is returned in '*result'. */ int hfs_cat_move(u32 cnid, struct inode *src_dir, const struct qstr *src_name, struct inode *dst_dir, const struct qstr *dst_name) { struct super_block *sb; struct hfs_find_data src_fd, dst_fd; union hfs_cat_rec entry; int entry_size, type; int err; hfs_dbg(CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name, dst_dir->i_ino, dst_name->name); sb = src_dir->i_sb; err = hfs_find_init(HFS_SB(sb)->cat_tree, &src_fd); if (err) return err; dst_fd = src_fd; /* * Fail early and avoid ENOSPC during the btree operations. We may * have to split the root node at most once. */ err = hfs_bmap_reserve(src_fd.tree, 2 * src_fd.tree->depth); if (err) goto out; /* find the old dir entry and read the data */ hfs_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name); err = hfs_brec_find(&src_fd); if (err) goto out; if (src_fd.entrylength > sizeof(entry) || src_fd.entrylength < 0) { err = -EIO; goto out; } hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset, src_fd.entrylength); /* create new dir entry with the data from the old entry */ hfs_cat_build_key(sb, dst_fd.search_key, dst_dir->i_ino, dst_name); err = hfs_brec_find(&dst_fd); if (err != -ENOENT) { if (!err) err = -EEXIST; goto out; } err = hfs_brec_insert(&dst_fd, &entry, src_fd.entrylength); if (err) goto out; dst_dir->i_size++; dst_dir->i_mtime = inode_set_ctime_current(dst_dir); mark_inode_dirty(dst_dir); /* finally remove the old entry */ hfs_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name); err = hfs_brec_find(&src_fd); if (err) goto out; err = hfs_brec_remove(&src_fd); if (err) goto out; src_dir->i_size--; src_dir->i_mtime = inode_set_ctime_current(src_dir); mark_inode_dirty(src_dir); type = entry.type; if (type == HFS_CDR_FIL && !(entry.file.Flags & HFS_FIL_THD)) goto out; /* remove old thread entry */ hfs_cat_build_key(sb, src_fd.search_key, cnid, NULL); err = hfs_brec_find(&src_fd); if (err) goto out; err = hfs_brec_remove(&src_fd); if (err) goto out; /* create new thread entry */ hfs_cat_build_key(sb, dst_fd.search_key, cnid, NULL); entry_size = hfs_cat_build_thread(sb, &entry, type == HFS_CDR_FIL ? HFS_CDR_FTH : HFS_CDR_THD, dst_dir->i_ino, dst_name); err = hfs_brec_find(&dst_fd); if (err != -ENOENT) { if (!err) err = -EEXIST; goto out; } err = hfs_brec_insert(&dst_fd, &entry, entry_size); out: hfs_bnode_put(dst_fd.bnode); hfs_find_exit(&src_fd); return err; }
linux-master
fs/hfs/catalog.c
// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved. ** ** ******************************************************************************* ******************************************************************************/ #include <linux/module.h> #include "dlm_internal.h" #include "lockspace.h" #include "member.h" #include "recoverd.h" #include "dir.h" #include "midcomms.h" #include "config.h" #include "memory.h" #include "lock.h" #include "recover.h" #include "requestqueue.h" #include "user.h" #include "ast.h" static int ls_count; static struct mutex ls_lock; static struct list_head lslist; static spinlock_t lslist_lock; static struct task_struct * scand_task; static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len) { ssize_t ret = len; int n; int rc = kstrtoint(buf, 0, &n); if (rc) return rc; ls = dlm_find_lockspace_local(ls->ls_local_handle); if (!ls) return -EINVAL; switch (n) { case 0: dlm_ls_stop(ls); break; case 1: dlm_ls_start(ls); break; default: ret = -EINVAL; } dlm_put_lockspace(ls); return ret; } static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len) { int rc = kstrtoint(buf, 0, &ls->ls_uevent_result); if (rc) return rc; set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags); wake_up(&ls->ls_uevent_wait); return len; } static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf) { return snprintf(buf, PAGE_SIZE, "%u\n", ls->ls_global_id); } static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len) { int rc = kstrtouint(buf, 0, &ls->ls_global_id); if (rc) return rc; return len; } static ssize_t dlm_nodir_show(struct dlm_ls *ls, char *buf) { return snprintf(buf, PAGE_SIZE, "%u\n", dlm_no_directory(ls)); } static ssize_t dlm_nodir_store(struct dlm_ls *ls, const char *buf, size_t len) { int val; int rc = kstrtoint(buf, 0, &val); if (rc) return rc; if (val == 1) set_bit(LSFL_NODIR, &ls->ls_flags); return len; } static ssize_t dlm_recover_status_show(struct dlm_ls *ls, char *buf) { uint32_t status = dlm_recover_status(ls); return snprintf(buf, PAGE_SIZE, "%x\n", status); } static ssize_t dlm_recover_nodeid_show(struct dlm_ls *ls, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", ls->ls_recover_nodeid); } struct dlm_attr { struct attribute attr; ssize_t (*show)(struct dlm_ls *, char *); ssize_t (*store)(struct dlm_ls *, const char *, size_t); }; static struct dlm_attr dlm_attr_control = { .attr = {.name = "control", .mode = S_IWUSR}, .store = dlm_control_store }; static struct dlm_attr dlm_attr_event = { .attr = {.name = "event_done", .mode = S_IWUSR}, .store = dlm_event_store }; static struct dlm_attr dlm_attr_id = { .attr = {.name = "id", .mode = S_IRUGO | S_IWUSR}, .show = dlm_id_show, .store = dlm_id_store }; static struct dlm_attr dlm_attr_nodir = { .attr = {.name = "nodir", .mode = S_IRUGO | S_IWUSR}, .show = dlm_nodir_show, .store = dlm_nodir_store }; static struct dlm_attr dlm_attr_recover_status = { .attr = {.name = "recover_status", .mode = S_IRUGO}, .show = dlm_recover_status_show }; static struct dlm_attr dlm_attr_recover_nodeid = { .attr = {.name = "recover_nodeid", .mode = S_IRUGO}, .show = dlm_recover_nodeid_show }; static struct attribute *dlm_attrs[] = { &dlm_attr_control.attr, &dlm_attr_event.attr, &dlm_attr_id.attr, &dlm_attr_nodir.attr, &dlm_attr_recover_status.attr, &dlm_attr_recover_nodeid.attr, NULL, }; ATTRIBUTE_GROUPS(dlm); static ssize_t dlm_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj); struct dlm_attr *a = container_of(attr, struct dlm_attr, attr); return a->show ? a->show(ls, buf) : 0; } static ssize_t dlm_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t len) { struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj); struct dlm_attr *a = container_of(attr, struct dlm_attr, attr); return a->store ? a->store(ls, buf, len) : len; } static void lockspace_kobj_release(struct kobject *k) { struct dlm_ls *ls = container_of(k, struct dlm_ls, ls_kobj); kfree(ls); } static const struct sysfs_ops dlm_attr_ops = { .show = dlm_attr_show, .store = dlm_attr_store, }; static struct kobj_type dlm_ktype = { .default_groups = dlm_groups, .sysfs_ops = &dlm_attr_ops, .release = lockspace_kobj_release, }; static struct kset *dlm_kset; static int do_uevent(struct dlm_ls *ls, int in) { if (in) kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE); else kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE); log_rinfo(ls, "%s the lockspace group...", in ? "joining" : "leaving"); /* dlm_controld will see the uevent, do the necessary group management and then write to sysfs to wake us */ wait_event(ls->ls_uevent_wait, test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags)); log_rinfo(ls, "group event done %d", ls->ls_uevent_result); return ls->ls_uevent_result; } static int dlm_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) { const struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj); add_uevent_var(env, "LOCKSPACE=%s", ls->ls_name); return 0; } static const struct kset_uevent_ops dlm_uevent_ops = { .uevent = dlm_uevent, }; int __init dlm_lockspace_init(void) { ls_count = 0; mutex_init(&ls_lock); INIT_LIST_HEAD(&lslist); spin_lock_init(&lslist_lock); dlm_kset = kset_create_and_add("dlm", &dlm_uevent_ops, kernel_kobj); if (!dlm_kset) { printk(KERN_WARNING "%s: can not create kset\n", __func__); return -ENOMEM; } return 0; } void dlm_lockspace_exit(void) { kset_unregister(dlm_kset); } static struct dlm_ls *find_ls_to_scan(void) { struct dlm_ls *ls; spin_lock(&lslist_lock); list_for_each_entry(ls, &lslist, ls_list) { if (time_after_eq(jiffies, ls->ls_scan_time + dlm_config.ci_scan_secs * HZ)) { spin_unlock(&lslist_lock); return ls; } } spin_unlock(&lslist_lock); return NULL; } static int dlm_scand(void *data) { struct dlm_ls *ls; while (!kthread_should_stop()) { ls = find_ls_to_scan(); if (ls) { if (dlm_lock_recovery_try(ls)) { ls->ls_scan_time = jiffies; dlm_scan_rsbs(ls); dlm_unlock_recovery(ls); } else { ls->ls_scan_time += HZ; } continue; } schedule_timeout_interruptible(dlm_config.ci_scan_secs * HZ); } return 0; } static int dlm_scand_start(void) { struct task_struct *p; int error = 0; p = kthread_run(dlm_scand, NULL, "dlm_scand"); if (IS_ERR(p)) error = PTR_ERR(p); else scand_task = p; return error; } static void dlm_scand_stop(void) { kthread_stop(scand_task); } struct dlm_ls *dlm_find_lockspace_global(uint32_t id) { struct dlm_ls *ls; spin_lock(&lslist_lock); list_for_each_entry(ls, &lslist, ls_list) { if (ls->ls_global_id == id) { atomic_inc(&ls->ls_count); goto out; } } ls = NULL; out: spin_unlock(&lslist_lock); return ls; } struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace) { struct dlm_ls *ls; spin_lock(&lslist_lock); list_for_each_entry(ls, &lslist, ls_list) { if (ls->ls_local_handle == lockspace) { atomic_inc(&ls->ls_count); goto out; } } ls = NULL; out: spin_unlock(&lslist_lock); return ls; } struct dlm_ls *dlm_find_lockspace_device(int minor) { struct dlm_ls *ls; spin_lock(&lslist_lock); list_for_each_entry(ls, &lslist, ls_list) { if (ls->ls_device.minor == minor) { atomic_inc(&ls->ls_count); goto out; } } ls = NULL; out: spin_unlock(&lslist_lock); return ls; } void dlm_put_lockspace(struct dlm_ls *ls) { if (atomic_dec_and_test(&ls->ls_count)) wake_up(&ls->ls_count_wait); } static void remove_lockspace(struct dlm_ls *ls) { retry: wait_event(ls->ls_count_wait, atomic_read(&ls->ls_count) == 0); spin_lock(&lslist_lock); if (atomic_read(&ls->ls_count) != 0) { spin_unlock(&lslist_lock); goto retry; } WARN_ON(ls->ls_create_count != 0); list_del(&ls->ls_list); spin_unlock(&lslist_lock); } static int threads_start(void) { int error; /* Thread for sending/receiving messages for all lockspace's */ error = dlm_midcomms_start(); if (error) { log_print("cannot start dlm midcomms %d", error); goto fail; } error = dlm_scand_start(); if (error) { log_print("cannot start dlm_scand thread %d", error); goto midcomms_fail; } return 0; midcomms_fail: dlm_midcomms_stop(); fail: return error; } static int new_lockspace(const char *name, const char *cluster, uint32_t flags, int lvblen, const struct dlm_lockspace_ops *ops, void *ops_arg, int *ops_result, dlm_lockspace_t **lockspace) { struct dlm_ls *ls; int i, size, error; int do_unreg = 0; int namelen = strlen(name); if (namelen > DLM_LOCKSPACE_LEN || namelen == 0) return -EINVAL; if (lvblen % 8) return -EINVAL; if (!try_module_get(THIS_MODULE)) return -EINVAL; if (!dlm_user_daemon_available()) { log_print("dlm user daemon not available"); error = -EUNATCH; goto out; } if (ops && ops_result) { if (!dlm_config.ci_recover_callbacks) *ops_result = -EOPNOTSUPP; else *ops_result = 0; } if (!cluster) log_print("dlm cluster name '%s' is being used without an application provided cluster name", dlm_config.ci_cluster_name); if (dlm_config.ci_recover_callbacks && cluster && strncmp(cluster, dlm_config.ci_cluster_name, DLM_LOCKSPACE_LEN)) { log_print("dlm cluster name '%s' does not match " "the application cluster name '%s'", dlm_config.ci_cluster_name, cluster); error = -EBADR; goto out; } error = 0; spin_lock(&lslist_lock); list_for_each_entry(ls, &lslist, ls_list) { WARN_ON(ls->ls_create_count <= 0); if (ls->ls_namelen != namelen) continue; if (memcmp(ls->ls_name, name, namelen)) continue; if (flags & DLM_LSFL_NEWEXCL) { error = -EEXIST; break; } ls->ls_create_count++; *lockspace = ls; error = 1; break; } spin_unlock(&lslist_lock); if (error) goto out; error = -ENOMEM; ls = kzalloc(sizeof(*ls), GFP_NOFS); if (!ls) goto out; memcpy(ls->ls_name, name, namelen); ls->ls_namelen = namelen; ls->ls_lvblen = lvblen; atomic_set(&ls->ls_count, 0); init_waitqueue_head(&ls->ls_count_wait); ls->ls_flags = 0; ls->ls_scan_time = jiffies; if (ops && dlm_config.ci_recover_callbacks) { ls->ls_ops = ops; ls->ls_ops_arg = ops_arg; } /* ls_exflags are forced to match among nodes, and we don't * need to require all nodes to have some flags set */ ls->ls_exflags = (flags & ~(DLM_LSFL_FS | DLM_LSFL_NEWEXCL)); size = READ_ONCE(dlm_config.ci_rsbtbl_size); ls->ls_rsbtbl_size = size; ls->ls_rsbtbl = vmalloc(array_size(size, sizeof(struct dlm_rsbtable))); if (!ls->ls_rsbtbl) goto out_lsfree; for (i = 0; i < size; i++) { ls->ls_rsbtbl[i].keep.rb_node = NULL; ls->ls_rsbtbl[i].toss.rb_node = NULL; spin_lock_init(&ls->ls_rsbtbl[i].lock); } for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) { ls->ls_remove_names[i] = kzalloc(DLM_RESNAME_MAXLEN+1, GFP_KERNEL); if (!ls->ls_remove_names[i]) goto out_rsbtbl; } idr_init(&ls->ls_lkbidr); spin_lock_init(&ls->ls_lkbidr_spin); INIT_LIST_HEAD(&ls->ls_waiters); mutex_init(&ls->ls_waiters_mutex); INIT_LIST_HEAD(&ls->ls_orphans); mutex_init(&ls->ls_orphans_mutex); INIT_LIST_HEAD(&ls->ls_new_rsb); spin_lock_init(&ls->ls_new_rsb_spin); INIT_LIST_HEAD(&ls->ls_nodes); INIT_LIST_HEAD(&ls->ls_nodes_gone); ls->ls_num_nodes = 0; ls->ls_low_nodeid = 0; ls->ls_total_weight = 0; ls->ls_node_array = NULL; memset(&ls->ls_local_rsb, 0, sizeof(struct dlm_rsb)); ls->ls_local_rsb.res_ls = ls; ls->ls_debug_rsb_dentry = NULL; ls->ls_debug_waiters_dentry = NULL; init_waitqueue_head(&ls->ls_uevent_wait); ls->ls_uevent_result = 0; init_completion(&ls->ls_recovery_done); ls->ls_recovery_result = -1; spin_lock_init(&ls->ls_cb_lock); INIT_LIST_HEAD(&ls->ls_cb_delay); ls->ls_recoverd_task = NULL; mutex_init(&ls->ls_recoverd_active); spin_lock_init(&ls->ls_recover_lock); spin_lock_init(&ls->ls_rcom_spin); get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t)); ls->ls_recover_status = 0; ls->ls_recover_seq = get_random_u64(); ls->ls_recover_args = NULL; init_rwsem(&ls->ls_in_recovery); init_rwsem(&ls->ls_recv_active); INIT_LIST_HEAD(&ls->ls_requestqueue); atomic_set(&ls->ls_requestqueue_cnt, 0); init_waitqueue_head(&ls->ls_requestqueue_wait); mutex_init(&ls->ls_requestqueue_mutex); spin_lock_init(&ls->ls_clear_proc_locks); /* Due backwards compatibility with 3.1 we need to use maximum * possible dlm message size to be sure the message will fit and * not having out of bounds issues. However on sending side 3.2 * might send less. */ ls->ls_recover_buf = kmalloc(DLM_MAX_SOCKET_BUFSIZE, GFP_NOFS); if (!ls->ls_recover_buf) goto out_lkbidr; ls->ls_slot = 0; ls->ls_num_slots = 0; ls->ls_slots_size = 0; ls->ls_slots = NULL; INIT_LIST_HEAD(&ls->ls_recover_list); spin_lock_init(&ls->ls_recover_list_lock); idr_init(&ls->ls_recover_idr); spin_lock_init(&ls->ls_recover_idr_lock); ls->ls_recover_list_count = 0; ls->ls_local_handle = ls; init_waitqueue_head(&ls->ls_wait_general); INIT_LIST_HEAD(&ls->ls_root_list); init_rwsem(&ls->ls_root_sem); spin_lock(&lslist_lock); ls->ls_create_count = 1; list_add(&ls->ls_list, &lslist); spin_unlock(&lslist_lock); if (flags & DLM_LSFL_FS) { error = dlm_callback_start(ls); if (error) { log_error(ls, "can't start dlm_callback %d", error); goto out_delist; } } init_waitqueue_head(&ls->ls_recover_lock_wait); /* * Once started, dlm_recoverd first looks for ls in lslist, then * initializes ls_in_recovery as locked in "down" mode. We need * to wait for the wakeup from dlm_recoverd because in_recovery * has to start out in down mode. */ error = dlm_recoverd_start(ls); if (error) { log_error(ls, "can't start dlm_recoverd %d", error); goto out_callback; } wait_event(ls->ls_recover_lock_wait, test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags)); /* let kobject handle freeing of ls if there's an error */ do_unreg = 1; ls->ls_kobj.kset = dlm_kset; error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL, "%s", ls->ls_name); if (error) goto out_recoverd; kobject_uevent(&ls->ls_kobj, KOBJ_ADD); /* This uevent triggers dlm_controld in userspace to add us to the group of nodes that are members of this lockspace (managed by the cluster infrastructure.) Once it's done that, it tells us who the current lockspace members are (via configfs) and then tells the lockspace to start running (via sysfs) in dlm_ls_start(). */ error = do_uevent(ls, 1); if (error) goto out_recoverd; /* wait until recovery is successful or failed */ wait_for_completion(&ls->ls_recovery_done); error = ls->ls_recovery_result; if (error) goto out_members; dlm_create_debug_file(ls); log_rinfo(ls, "join complete"); *lockspace = ls; return 0; out_members: do_uevent(ls, 0); dlm_clear_members(ls); kfree(ls->ls_node_array); out_recoverd: dlm_recoverd_stop(ls); out_callback: dlm_callback_stop(ls); out_delist: spin_lock(&lslist_lock); list_del(&ls->ls_list); spin_unlock(&lslist_lock); idr_destroy(&ls->ls_recover_idr); kfree(ls->ls_recover_buf); out_lkbidr: idr_destroy(&ls->ls_lkbidr); out_rsbtbl: for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) kfree(ls->ls_remove_names[i]); vfree(ls->ls_rsbtbl); out_lsfree: if (do_unreg) kobject_put(&ls->ls_kobj); else kfree(ls); out: module_put(THIS_MODULE); return error; } static int __dlm_new_lockspace(const char *name, const char *cluster, uint32_t flags, int lvblen, const struct dlm_lockspace_ops *ops, void *ops_arg, int *ops_result, dlm_lockspace_t **lockspace) { int error = 0; mutex_lock(&ls_lock); if (!ls_count) error = threads_start(); if (error) goto out; error = new_lockspace(name, cluster, flags, lvblen, ops, ops_arg, ops_result, lockspace); if (!error) ls_count++; if (error > 0) error = 0; if (!ls_count) { dlm_scand_stop(); dlm_midcomms_shutdown(); dlm_midcomms_stop(); } out: mutex_unlock(&ls_lock); return error; } int dlm_new_lockspace(const char *name, const char *cluster, uint32_t flags, int lvblen, const struct dlm_lockspace_ops *ops, void *ops_arg, int *ops_result, dlm_lockspace_t **lockspace) { return __dlm_new_lockspace(name, cluster, flags | DLM_LSFL_FS, lvblen, ops, ops_arg, ops_result, lockspace); } int dlm_new_user_lockspace(const char *name, const char *cluster, uint32_t flags, int lvblen, const struct dlm_lockspace_ops *ops, void *ops_arg, int *ops_result, dlm_lockspace_t **lockspace) { return __dlm_new_lockspace(name, cluster, flags, lvblen, ops, ops_arg, ops_result, lockspace); } static int lkb_idr_is_local(int id, void *p, void *data) { struct dlm_lkb *lkb = p; return lkb->lkb_nodeid == 0 && lkb->lkb_grmode != DLM_LOCK_IV; } static int lkb_idr_is_any(int id, void *p, void *data) { return 1; } static int lkb_idr_free(int id, void *p, void *data) { struct dlm_lkb *lkb = p; if (lkb->lkb_lvbptr && test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags)) dlm_free_lvb(lkb->lkb_lvbptr); dlm_free_lkb(lkb); return 0; } /* NOTE: We check the lkbidr here rather than the resource table. This is because there may be LKBs queued as ASTs that have been unlinked from their RSBs and are pending deletion once the AST has been delivered */ static int lockspace_busy(struct dlm_ls *ls, int force) { int rv; spin_lock(&ls->ls_lkbidr_spin); if (force == 0) { rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_any, ls); } else if (force == 1) { rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_local, ls); } else { rv = 0; } spin_unlock(&ls->ls_lkbidr_spin); return rv; } static int release_lockspace(struct dlm_ls *ls, int force) { struct dlm_rsb *rsb; struct rb_node *n; int i, busy, rv; busy = lockspace_busy(ls, force); spin_lock(&lslist_lock); if (ls->ls_create_count == 1) { if (busy) { rv = -EBUSY; } else { /* remove_lockspace takes ls off lslist */ ls->ls_create_count = 0; rv = 0; } } else if (ls->ls_create_count > 1) { rv = --ls->ls_create_count; } else { rv = -EINVAL; } spin_unlock(&lslist_lock); if (rv) { log_debug(ls, "release_lockspace no remove %d", rv); return rv; } if (ls_count == 1) dlm_midcomms_version_wait(); dlm_device_deregister(ls); if (force < 3 && dlm_user_daemon_available()) do_uevent(ls, 0); dlm_recoverd_stop(ls); if (ls_count == 1) { dlm_scand_stop(); dlm_clear_members(ls); dlm_midcomms_shutdown(); } dlm_callback_stop(ls); remove_lockspace(ls); dlm_delete_debug_file(ls); idr_destroy(&ls->ls_recover_idr); kfree(ls->ls_recover_buf); /* * Free all lkb's in idr */ idr_for_each(&ls->ls_lkbidr, lkb_idr_free, ls); idr_destroy(&ls->ls_lkbidr); /* * Free all rsb's on rsbtbl[] lists */ for (i = 0; i < ls->ls_rsbtbl_size; i++) { while ((n = rb_first(&ls->ls_rsbtbl[i].keep))) { rsb = rb_entry(n, struct dlm_rsb, res_hashnode); rb_erase(n, &ls->ls_rsbtbl[i].keep); dlm_free_rsb(rsb); } while ((n = rb_first(&ls->ls_rsbtbl[i].toss))) { rsb = rb_entry(n, struct dlm_rsb, res_hashnode); rb_erase(n, &ls->ls_rsbtbl[i].toss); dlm_free_rsb(rsb); } } vfree(ls->ls_rsbtbl); for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) kfree(ls->ls_remove_names[i]); while (!list_empty(&ls->ls_new_rsb)) { rsb = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain); list_del(&rsb->res_hashchain); dlm_free_rsb(rsb); } /* * Free structures on any other lists */ dlm_purge_requestqueue(ls); kfree(ls->ls_recover_args); dlm_clear_members(ls); dlm_clear_members_gone(ls); kfree(ls->ls_node_array); log_rinfo(ls, "release_lockspace final free"); kobject_put(&ls->ls_kobj); /* The ls structure will be freed when the kobject is done with */ module_put(THIS_MODULE); return 0; } /* * Called when a system has released all its locks and is not going to use the * lockspace any longer. We free everything we're managing for this lockspace. * Remaining nodes will go through the recovery process as if we'd died. The * lockspace must continue to function as usual, participating in recoveries, * until this returns. * * Force has 4 possible values: * 0 - don't destroy lockspace if it has any LKBs * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs * 2 - destroy lockspace regardless of LKBs * 3 - destroy lockspace as part of a forced shutdown */ int dlm_release_lockspace(void *lockspace, int force) { struct dlm_ls *ls; int error; ls = dlm_find_lockspace_local(lockspace); if (!ls) return -EINVAL; dlm_put_lockspace(ls); mutex_lock(&ls_lock); error = release_lockspace(ls, force); if (!error) ls_count--; if (!ls_count) dlm_midcomms_stop(); mutex_unlock(&ls_lock); return error; } void dlm_stop_lockspaces(void) { struct dlm_ls *ls; int count; restart: count = 0; spin_lock(&lslist_lock); list_for_each_entry(ls, &lslist, ls_list) { if (!test_bit(LSFL_RUNNING, &ls->ls_flags)) { count++; continue; } spin_unlock(&lslist_lock); log_error(ls, "no userland control daemon, stopping lockspace"); dlm_ls_stop(ls); goto restart; } spin_unlock(&lslist_lock); if (count) log_print("dlm user daemon left %d lockspaces", count); }
linux-master
fs/dlm/lockspace.c
// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved. ** ** ******************************************************************************* ******************************************************************************/ #include <trace/events/dlm.h> #include "dlm_internal.h" #include "memory.h" #include "lock.h" #include "user.h" #include "ast.h" void dlm_release_callback(struct kref *ref) { struct dlm_callback *cb = container_of(ref, struct dlm_callback, ref); dlm_free_cb(cb); } void dlm_callback_set_last_ptr(struct dlm_callback **from, struct dlm_callback *to) { if (*from) kref_put(&(*from)->ref, dlm_release_callback); if (to) kref_get(&to->ref); *from = to; } int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, int status, uint32_t sbflags) { struct dlm_ls *ls = lkb->lkb_resource->res_ls; int rv = DLM_ENQUEUE_CALLBACK_SUCCESS; struct dlm_callback *cb; int prev_mode; if (flags & DLM_CB_BAST) { /* if cb is a bast, it should be skipped if the blocking mode is * compatible with the last granted mode */ if (lkb->lkb_last_cast) { if (dlm_modes_compat(mode, lkb->lkb_last_cast->mode)) { log_debug(ls, "skip %x bast mode %d for cast mode %d", lkb->lkb_id, mode, lkb->lkb_last_cast->mode); goto out; } } /* * Suppress some redundant basts here, do more on removal. * Don't even add a bast if the callback just before it * is a bast for the same mode or a more restrictive mode. * (the addional > PR check is needed for PR/CW inversion) */ if (lkb->lkb_last_cb && lkb->lkb_last_cb->flags & DLM_CB_BAST) { prev_mode = lkb->lkb_last_cb->mode; if ((prev_mode == mode) || (prev_mode > mode && prev_mode > DLM_LOCK_PR)) { log_debug(ls, "skip %x add bast mode %d for bast mode %d", lkb->lkb_id, mode, prev_mode); goto out; } } } cb = dlm_allocate_cb(); if (!cb) { rv = DLM_ENQUEUE_CALLBACK_FAILURE; goto out; } cb->flags = flags; cb->mode = mode; cb->sb_status = status; cb->sb_flags = (sbflags & 0x000000FF); kref_init(&cb->ref); if (!test_and_set_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags)) rv = DLM_ENQUEUE_CALLBACK_NEED_SCHED; list_add_tail(&cb->list, &lkb->lkb_callbacks); if (flags & DLM_CB_CAST) dlm_callback_set_last_ptr(&lkb->lkb_last_cast, cb); dlm_callback_set_last_ptr(&lkb->lkb_last_cb, cb); out: return rv; } int dlm_dequeue_lkb_callback(struct dlm_lkb *lkb, struct dlm_callback **cb) { /* oldest undelivered cb is callbacks first entry */ *cb = list_first_entry_or_null(&lkb->lkb_callbacks, struct dlm_callback, list); if (!*cb) return DLM_DEQUEUE_CALLBACK_EMPTY; /* remove it from callbacks so shift others down */ list_del(&(*cb)->list); if (list_empty(&lkb->lkb_callbacks)) return DLM_DEQUEUE_CALLBACK_LAST; return DLM_DEQUEUE_CALLBACK_SUCCESS; } void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status, uint32_t sbflags) { struct dlm_ls *ls = lkb->lkb_resource->res_ls; int rv; if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) { dlm_user_add_ast(lkb, flags, mode, status, sbflags); return; } spin_lock(&lkb->lkb_cb_lock); rv = dlm_enqueue_lkb_callback(lkb, flags, mode, status, sbflags); switch (rv) { case DLM_ENQUEUE_CALLBACK_NEED_SCHED: kref_get(&lkb->lkb_ref); spin_lock(&ls->ls_cb_lock); if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) { list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay); } else { queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work); } spin_unlock(&ls->ls_cb_lock); break; case DLM_ENQUEUE_CALLBACK_FAILURE: WARN_ON_ONCE(1); break; case DLM_ENQUEUE_CALLBACK_SUCCESS: break; default: WARN_ON_ONCE(1); break; } spin_unlock(&lkb->lkb_cb_lock); } void dlm_callback_work(struct work_struct *work) { struct dlm_lkb *lkb = container_of(work, struct dlm_lkb, lkb_cb_work); struct dlm_ls *ls = lkb->lkb_resource->res_ls; void (*castfn) (void *astparam); void (*bastfn) (void *astparam, int mode); struct dlm_callback *cb; int rv; spin_lock(&lkb->lkb_cb_lock); rv = dlm_dequeue_lkb_callback(lkb, &cb); if (WARN_ON_ONCE(rv == DLM_DEQUEUE_CALLBACK_EMPTY)) { clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags); spin_unlock(&lkb->lkb_cb_lock); goto out; } spin_unlock(&lkb->lkb_cb_lock); for (;;) { castfn = lkb->lkb_astfn; bastfn = lkb->lkb_bastfn; if (cb->flags & DLM_CB_BAST) { trace_dlm_bast(ls, lkb, cb->mode); lkb->lkb_last_bast_time = ktime_get(); lkb->lkb_last_bast_mode = cb->mode; bastfn(lkb->lkb_astparam, cb->mode); } else if (cb->flags & DLM_CB_CAST) { lkb->lkb_lksb->sb_status = cb->sb_status; lkb->lkb_lksb->sb_flags = cb->sb_flags; trace_dlm_ast(ls, lkb); lkb->lkb_last_cast_time = ktime_get(); castfn(lkb->lkb_astparam); } kref_put(&cb->ref, dlm_release_callback); spin_lock(&lkb->lkb_cb_lock); rv = dlm_dequeue_lkb_callback(lkb, &cb); if (rv == DLM_DEQUEUE_CALLBACK_EMPTY) { clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags); spin_unlock(&lkb->lkb_cb_lock); break; } spin_unlock(&lkb->lkb_cb_lock); } out: /* undo kref_get from dlm_add_callback, may cause lkb to be freed */ dlm_put_lkb(lkb); } int dlm_callback_start(struct dlm_ls *ls) { ls->ls_callback_wq = alloc_workqueue("dlm_callback", WQ_HIGHPRI | WQ_MEM_RECLAIM, 0); if (!ls->ls_callback_wq) { log_print("can't start dlm_callback workqueue"); return -ENOMEM; } return 0; } void dlm_callback_stop(struct dlm_ls *ls) { if (ls->ls_callback_wq) destroy_workqueue(ls->ls_callback_wq); } void dlm_callback_suspend(struct dlm_ls *ls) { if (ls->ls_callback_wq) { spin_lock(&ls->ls_cb_lock); set_bit(LSFL_CB_DELAY, &ls->ls_flags); spin_unlock(&ls->ls_cb_lock); flush_workqueue(ls->ls_callback_wq); } } #define MAX_CB_QUEUE 25 void dlm_callback_resume(struct dlm_ls *ls) { struct dlm_lkb *lkb, *safe; int count = 0, sum = 0; bool empty; if (!ls->ls_callback_wq) return; more: spin_lock(&ls->ls_cb_lock); list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) { list_del_init(&lkb->lkb_cb_list); queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work); count++; if (count == MAX_CB_QUEUE) break; } empty = list_empty(&ls->ls_cb_delay); if (empty) clear_bit(LSFL_CB_DELAY, &ls->ls_flags); spin_unlock(&ls->ls_cb_lock); sum += count; if (!empty) { count = 0; cond_resched(); goto more; } if (sum) log_rinfo(ls, "%s %d", __func__, sum); }
linux-master
fs/dlm/ast.c
// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. ** ** ******************************************************************************* ******************************************************************************/ #include "dlm_internal.h" #include "midcomms.h" #include "lowcomms.h" #include "config.h" #include "memory.h" #include "ast.h" static struct kmem_cache *writequeue_cache; static struct kmem_cache *mhandle_cache; static struct kmem_cache *msg_cache; static struct kmem_cache *lkb_cache; static struct kmem_cache *rsb_cache; static struct kmem_cache *cb_cache; int __init dlm_memory_init(void) { writequeue_cache = dlm_lowcomms_writequeue_cache_create(); if (!writequeue_cache) goto out; mhandle_cache = dlm_midcomms_cache_create(); if (!mhandle_cache) goto mhandle; lkb_cache = kmem_cache_create("dlm_lkb", sizeof(struct dlm_lkb), __alignof__(struct dlm_lkb), 0, NULL); if (!lkb_cache) goto lkb; msg_cache = dlm_lowcomms_msg_cache_create(); if (!msg_cache) goto msg; rsb_cache = kmem_cache_create("dlm_rsb", sizeof(struct dlm_rsb), __alignof__(struct dlm_rsb), 0, NULL); if (!rsb_cache) goto rsb; cb_cache = kmem_cache_create("dlm_cb", sizeof(struct dlm_callback), __alignof__(struct dlm_callback), 0, NULL); if (!cb_cache) goto cb; return 0; cb: kmem_cache_destroy(rsb_cache); rsb: kmem_cache_destroy(msg_cache); msg: kmem_cache_destroy(lkb_cache); lkb: kmem_cache_destroy(mhandle_cache); mhandle: kmem_cache_destroy(writequeue_cache); out: return -ENOMEM; } void dlm_memory_exit(void) { kmem_cache_destroy(writequeue_cache); kmem_cache_destroy(mhandle_cache); kmem_cache_destroy(msg_cache); kmem_cache_destroy(lkb_cache); kmem_cache_destroy(rsb_cache); kmem_cache_destroy(cb_cache); } char *dlm_allocate_lvb(struct dlm_ls *ls) { char *p; p = kzalloc(ls->ls_lvblen, GFP_NOFS); return p; } void dlm_free_lvb(char *p) { kfree(p); } struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls) { struct dlm_rsb *r; r = kmem_cache_zalloc(rsb_cache, GFP_NOFS); return r; } void dlm_free_rsb(struct dlm_rsb *r) { if (r->res_lvbptr) dlm_free_lvb(r->res_lvbptr); kmem_cache_free(rsb_cache, r); } struct dlm_lkb *dlm_allocate_lkb(struct dlm_ls *ls) { struct dlm_lkb *lkb; lkb = kmem_cache_zalloc(lkb_cache, GFP_NOFS); return lkb; } void dlm_free_lkb(struct dlm_lkb *lkb) { if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) { struct dlm_user_args *ua; ua = lkb->lkb_ua; if (ua) { kfree(ua->lksb.sb_lvbptr); kfree(ua); } } /* drop references if they are set */ dlm_callback_set_last_ptr(&lkb->lkb_last_cast, NULL); dlm_callback_set_last_ptr(&lkb->lkb_last_cb, NULL); kmem_cache_free(lkb_cache, lkb); } struct dlm_mhandle *dlm_allocate_mhandle(gfp_t allocation) { return kmem_cache_alloc(mhandle_cache, allocation); } void dlm_free_mhandle(struct dlm_mhandle *mhandle) { kmem_cache_free(mhandle_cache, mhandle); } struct writequeue_entry *dlm_allocate_writequeue(void) { return kmem_cache_alloc(writequeue_cache, GFP_ATOMIC); } void dlm_free_writequeue(struct writequeue_entry *writequeue) { kmem_cache_free(writequeue_cache, writequeue); } struct dlm_msg *dlm_allocate_msg(gfp_t allocation) { return kmem_cache_alloc(msg_cache, allocation); } void dlm_free_msg(struct dlm_msg *msg) { kmem_cache_free(msg_cache, msg); } struct dlm_callback *dlm_allocate_cb(void) { return kmem_cache_alloc(cb_cache, GFP_ATOMIC); } void dlm_free_cb(struct dlm_callback *cb) { kmem_cache_free(cb_cache, cb); }
linux-master
fs/dlm/memory.c
// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. ** ** ******************************************************************************* ******************************************************************************/ #include "dlm_internal.h" #include "rcom.h" #include "util.h" #define DLM_ERRNO_EDEADLK 35 #define DLM_ERRNO_EBADR 53 #define DLM_ERRNO_EBADSLT 57 #define DLM_ERRNO_EPROTO 71 #define DLM_ERRNO_EOPNOTSUPP 95 #define DLM_ERRNO_ETIMEDOUT 110 #define DLM_ERRNO_EINPROGRESS 115 /* higher errno values are inconsistent across architectures, so select one set of values for on the wire */ int to_dlm_errno(int err) { switch (err) { case -EDEADLK: return -DLM_ERRNO_EDEADLK; case -EBADR: return -DLM_ERRNO_EBADR; case -EBADSLT: return -DLM_ERRNO_EBADSLT; case -EPROTO: return -DLM_ERRNO_EPROTO; case -EOPNOTSUPP: return -DLM_ERRNO_EOPNOTSUPP; case -ETIMEDOUT: return -DLM_ERRNO_ETIMEDOUT; case -EINPROGRESS: return -DLM_ERRNO_EINPROGRESS; } return err; } int from_dlm_errno(int err) { switch (err) { case -DLM_ERRNO_EDEADLK: return -EDEADLK; case -DLM_ERRNO_EBADR: return -EBADR; case -DLM_ERRNO_EBADSLT: return -EBADSLT; case -DLM_ERRNO_EPROTO: return -EPROTO; case -DLM_ERRNO_EOPNOTSUPP: return -EOPNOTSUPP; case -DLM_ERRNO_ETIMEDOUT: return -ETIMEDOUT; case -DLM_ERRNO_EINPROGRESS: return -EINPROGRESS; } return err; }
linux-master
fs/dlm/util.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2006-2010 Red Hat, Inc. All rights reserved. */ #include <linux/miscdevice.h> #include <linux/init.h> #include <linux/wait.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/poll.h> #include <linux/signal.h> #include <linux/spinlock.h> #include <linux/dlm.h> #include <linux/dlm_device.h> #include <linux/slab.h> #include <linux/sched/signal.h> #include <trace/events/dlm.h> #include "dlm_internal.h" #include "lockspace.h" #include "lock.h" #include "lvb_table.h" #include "user.h" #include "ast.h" #include "config.h" #include "memory.h" static const char name_prefix[] = "dlm"; static const struct file_operations device_fops; static atomic_t dlm_monitor_opened; static int dlm_monitor_unused = 1; #ifdef CONFIG_COMPAT struct dlm_lock_params32 { __u8 mode; __u8 namelen; __u16 unused; __u32 flags; __u32 lkid; __u32 parent; __u64 xid; __u64 timeout; __u32 castparam; __u32 castaddr; __u32 bastparam; __u32 bastaddr; __u32 lksb; char lvb[DLM_USER_LVB_LEN]; char name[]; }; struct dlm_write_request32 { __u32 version[3]; __u8 cmd; __u8 is64bit; __u8 unused[2]; union { struct dlm_lock_params32 lock; struct dlm_lspace_params lspace; struct dlm_purge_params purge; } i; }; struct dlm_lksb32 { __u32 sb_status; __u32 sb_lkid; __u8 sb_flags; __u32 sb_lvbptr; }; struct dlm_lock_result32 { __u32 version[3]; __u32 length; __u32 user_astaddr; __u32 user_astparam; __u32 user_lksb; struct dlm_lksb32 lksb; __u8 bast_mode; __u8 unused[3]; /* Offsets may be zero if no data is present */ __u32 lvb_offset; }; static void compat_input(struct dlm_write_request *kb, struct dlm_write_request32 *kb32, int namelen) { kb->version[0] = kb32->version[0]; kb->version[1] = kb32->version[1]; kb->version[2] = kb32->version[2]; kb->cmd = kb32->cmd; kb->is64bit = kb32->is64bit; if (kb->cmd == DLM_USER_CREATE_LOCKSPACE || kb->cmd == DLM_USER_REMOVE_LOCKSPACE) { kb->i.lspace.flags = kb32->i.lspace.flags; kb->i.lspace.minor = kb32->i.lspace.minor; memcpy(kb->i.lspace.name, kb32->i.lspace.name, namelen); } else if (kb->cmd == DLM_USER_PURGE) { kb->i.purge.nodeid = kb32->i.purge.nodeid; kb->i.purge.pid = kb32->i.purge.pid; } else { kb->i.lock.mode = kb32->i.lock.mode; kb->i.lock.namelen = kb32->i.lock.namelen; kb->i.lock.flags = kb32->i.lock.flags; kb->i.lock.lkid = kb32->i.lock.lkid; kb->i.lock.parent = kb32->i.lock.parent; kb->i.lock.xid = kb32->i.lock.xid; kb->i.lock.timeout = kb32->i.lock.timeout; kb->i.lock.castparam = (__user void *)(long)kb32->i.lock.castparam; kb->i.lock.castaddr = (__user void *)(long)kb32->i.lock.castaddr; kb->i.lock.bastparam = (__user void *)(long)kb32->i.lock.bastparam; kb->i.lock.bastaddr = (__user void *)(long)kb32->i.lock.bastaddr; kb->i.lock.lksb = (__user void *)(long)kb32->i.lock.lksb; memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN); memcpy(kb->i.lock.name, kb32->i.lock.name, namelen); } } static void compat_output(struct dlm_lock_result *res, struct dlm_lock_result32 *res32) { memset(res32, 0, sizeof(*res32)); res32->version[0] = res->version[0]; res32->version[1] = res->version[1]; res32->version[2] = res->version[2]; res32->user_astaddr = (__u32)(__force long)res->user_astaddr; res32->user_astparam = (__u32)(__force long)res->user_astparam; res32->user_lksb = (__u32)(__force long)res->user_lksb; res32->bast_mode = res->bast_mode; res32->lvb_offset = res->lvb_offset; res32->length = res->length; res32->lksb.sb_status = res->lksb.sb_status; res32->lksb.sb_flags = res->lksb.sb_flags; res32->lksb.sb_lkid = res->lksb.sb_lkid; res32->lksb.sb_lvbptr = (__u32)(long)res->lksb.sb_lvbptr; } #endif /* should held proc->asts_spin lock */ void dlm_purge_lkb_callbacks(struct dlm_lkb *lkb) { struct dlm_callback *cb, *safe; list_for_each_entry_safe(cb, safe, &lkb->lkb_callbacks, list) { list_del(&cb->list); kref_put(&cb->ref, dlm_release_callback); } clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags); /* invalidate */ dlm_callback_set_last_ptr(&lkb->lkb_last_cast, NULL); dlm_callback_set_last_ptr(&lkb->lkb_last_cb, NULL); lkb->lkb_last_bast_mode = -1; } /* Figure out if this lock is at the end of its life and no longer available for the application to use. The lkb still exists until the final ast is read. A lock becomes EOL in three situations: 1. a noqueue request fails with EAGAIN 2. an unlock completes with EUNLOCK 3. a cancel of a waiting request completes with ECANCEL/EDEADLK An EOL lock needs to be removed from the process's list of locks. And we can't allow any new operation on an EOL lock. This is not related to the lifetime of the lkb struct which is managed entirely by refcount. */ static int lkb_is_endoflife(int mode, int status) { switch (status) { case -DLM_EUNLOCK: return 1; case -DLM_ECANCEL: case -ETIMEDOUT: case -EDEADLK: case -EAGAIN: if (mode == DLM_LOCK_IV) return 1; break; } return 0; } /* we could possibly check if the cancel of an orphan has resulted in the lkb being removed and then remove that lkb from the orphans list and free it */ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode, int status, uint32_t sbflags) { struct dlm_ls *ls; struct dlm_user_args *ua; struct dlm_user_proc *proc; int rv; if (test_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags) || test_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags)) return; ls = lkb->lkb_resource->res_ls; spin_lock(&ls->ls_clear_proc_locks); /* If ORPHAN/DEAD flag is set, it means the process is dead so an ast can't be delivered. For ORPHAN's, dlm_clear_proc_locks() freed lkb->ua so we can't try to use it. This second check is necessary for cases where a completion ast is received for an operation that began before clear_proc_locks did its cancel/unlock. */ if (test_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags) || test_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags)) goto out; DLM_ASSERT(lkb->lkb_ua, dlm_print_lkb(lkb);); ua = lkb->lkb_ua; proc = ua->proc; if ((flags & DLM_CB_BAST) && ua->bastaddr == NULL) goto out; if ((flags & DLM_CB_CAST) && lkb_is_endoflife(mode, status)) set_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags); spin_lock(&proc->asts_spin); rv = dlm_enqueue_lkb_callback(lkb, flags, mode, status, sbflags); switch (rv) { case DLM_ENQUEUE_CALLBACK_FAILURE: spin_unlock(&proc->asts_spin); WARN_ON_ONCE(1); goto out; case DLM_ENQUEUE_CALLBACK_NEED_SCHED: kref_get(&lkb->lkb_ref); list_add_tail(&lkb->lkb_cb_list, &proc->asts); wake_up_interruptible(&proc->wait); break; case DLM_ENQUEUE_CALLBACK_SUCCESS: break; default: WARN_ON_ONCE(1); break; } spin_unlock(&proc->asts_spin); if (test_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags)) { /* N.B. spin_lock locks_spin, not asts_spin */ spin_lock(&proc->locks_spin); if (!list_empty(&lkb->lkb_ownqueue)) { list_del_init(&lkb->lkb_ownqueue); dlm_put_lkb(lkb); } spin_unlock(&proc->locks_spin); } out: spin_unlock(&ls->ls_clear_proc_locks); } static int device_user_lock(struct dlm_user_proc *proc, struct dlm_lock_params *params) { struct dlm_ls *ls; struct dlm_user_args *ua; uint32_t lkid; int error = -ENOMEM; ls = dlm_find_lockspace_local(proc->lockspace); if (!ls) return -ENOENT; if (!params->castaddr || !params->lksb) { error = -EINVAL; goto out; } ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS); if (!ua) goto out; ua->proc = proc; ua->user_lksb = params->lksb; ua->castparam = params->castparam; ua->castaddr = params->castaddr; ua->bastparam = params->bastparam; ua->bastaddr = params->bastaddr; ua->xid = params->xid; if (params->flags & DLM_LKF_CONVERT) { error = dlm_user_convert(ls, ua, params->mode, params->flags, params->lkid, params->lvb); } else if (params->flags & DLM_LKF_ORPHAN) { error = dlm_user_adopt_orphan(ls, ua, params->mode, params->flags, params->name, params->namelen, &lkid); if (!error) error = lkid; } else { error = dlm_user_request(ls, ua, params->mode, params->flags, params->name, params->namelen); if (!error) error = ua->lksb.sb_lkid; } out: dlm_put_lockspace(ls); return error; } static int device_user_unlock(struct dlm_user_proc *proc, struct dlm_lock_params *params) { struct dlm_ls *ls; struct dlm_user_args *ua; int error = -ENOMEM; ls = dlm_find_lockspace_local(proc->lockspace); if (!ls) return -ENOENT; ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS); if (!ua) goto out; ua->proc = proc; ua->user_lksb = params->lksb; ua->castparam = params->castparam; ua->castaddr = params->castaddr; if (params->flags & DLM_LKF_CANCEL) error = dlm_user_cancel(ls, ua, params->flags, params->lkid); else error = dlm_user_unlock(ls, ua, params->flags, params->lkid, params->lvb); out: dlm_put_lockspace(ls); return error; } static int device_user_deadlock(struct dlm_user_proc *proc, struct dlm_lock_params *params) { struct dlm_ls *ls; int error; ls = dlm_find_lockspace_local(proc->lockspace); if (!ls) return -ENOENT; error = dlm_user_deadlock(ls, params->flags, params->lkid); dlm_put_lockspace(ls); return error; } static int dlm_device_register(struct dlm_ls *ls, char *name) { int error, len; /* The device is already registered. This happens when the lockspace is created multiple times from userspace. */ if (ls->ls_device.name) return 0; error = -ENOMEM; len = strlen(name) + strlen(name_prefix) + 2; ls->ls_device.name = kzalloc(len, GFP_NOFS); if (!ls->ls_device.name) goto fail; snprintf((char *)ls->ls_device.name, len, "%s_%s", name_prefix, name); ls->ls_device.fops = &device_fops; ls->ls_device.minor = MISC_DYNAMIC_MINOR; error = misc_register(&ls->ls_device); if (error) { kfree(ls->ls_device.name); /* this has to be set to NULL * to avoid a double-free in dlm_device_deregister */ ls->ls_device.name = NULL; } fail: return error; } int dlm_device_deregister(struct dlm_ls *ls) { /* The device is not registered. This happens when the lockspace was never used from userspace, or when device_create_lockspace() calls dlm_release_lockspace() after the register fails. */ if (!ls->ls_device.name) return 0; misc_deregister(&ls->ls_device); kfree(ls->ls_device.name); return 0; } static int device_user_purge(struct dlm_user_proc *proc, struct dlm_purge_params *params) { struct dlm_ls *ls; int error; ls = dlm_find_lockspace_local(proc->lockspace); if (!ls) return -ENOENT; error = dlm_user_purge(ls, proc, params->nodeid, params->pid); dlm_put_lockspace(ls); return error; } static int device_create_lockspace(struct dlm_lspace_params *params) { dlm_lockspace_t *lockspace; struct dlm_ls *ls; int error; if (!capable(CAP_SYS_ADMIN)) return -EPERM; error = dlm_new_user_lockspace(params->name, dlm_config.ci_cluster_name, params->flags, DLM_USER_LVB_LEN, NULL, NULL, NULL, &lockspace); if (error) return error; ls = dlm_find_lockspace_local(lockspace); if (!ls) return -ENOENT; error = dlm_device_register(ls, params->name); dlm_put_lockspace(ls); if (error) dlm_release_lockspace(lockspace, 0); else error = ls->ls_device.minor; return error; } static int device_remove_lockspace(struct dlm_lspace_params *params) { dlm_lockspace_t *lockspace; struct dlm_ls *ls; int error, force = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ls = dlm_find_lockspace_device(params->minor); if (!ls) return -ENOENT; if (params->flags & DLM_USER_LSFLG_FORCEFREE) force = 2; lockspace = ls->ls_local_handle; dlm_put_lockspace(ls); /* The final dlm_release_lockspace waits for references to go to zero, so all processes will need to close their device for the ls before the release will proceed. release also calls the device_deregister above. Converting a positive return value from release to zero means that userspace won't know when its release was the final one, but it shouldn't need to know. */ error = dlm_release_lockspace(lockspace, force); if (error > 0) error = 0; return error; } /* Check the user's version matches ours */ static int check_version(struct dlm_write_request *req) { if (req->version[0] != DLM_DEVICE_VERSION_MAJOR || (req->version[0] == DLM_DEVICE_VERSION_MAJOR && req->version[1] > DLM_DEVICE_VERSION_MINOR)) { printk(KERN_DEBUG "dlm: process %s (%d) version mismatch " "user (%d.%d.%d) kernel (%d.%d.%d)\n", current->comm, task_pid_nr(current), req->version[0], req->version[1], req->version[2], DLM_DEVICE_VERSION_MAJOR, DLM_DEVICE_VERSION_MINOR, DLM_DEVICE_VERSION_PATCH); return -EINVAL; } return 0; } /* * device_write * * device_user_lock * dlm_user_request -> request_lock * dlm_user_convert -> convert_lock * * device_user_unlock * dlm_user_unlock -> unlock_lock * dlm_user_cancel -> cancel_lock * * device_create_lockspace * dlm_new_lockspace * * device_remove_lockspace * dlm_release_lockspace */ /* a write to a lockspace device is a lock or unlock request, a write to the control device is to create/remove a lockspace */ static ssize_t device_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct dlm_user_proc *proc = file->private_data; struct dlm_write_request *kbuf; int error; #ifdef CONFIG_COMPAT if (count < sizeof(struct dlm_write_request32)) #else if (count < sizeof(struct dlm_write_request)) #endif return -EINVAL; /* * can't compare against COMPAT/dlm_write_request32 because * we don't yet know if is64bit is zero */ if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN) return -EINVAL; kbuf = memdup_user_nul(buf, count); if (IS_ERR(kbuf)) return PTR_ERR(kbuf); if (check_version(kbuf)) { error = -EBADE; goto out_free; } #ifdef CONFIG_COMPAT if (!kbuf->is64bit) { struct dlm_write_request32 *k32buf; int namelen = 0; if (count > sizeof(struct dlm_write_request32)) namelen = count - sizeof(struct dlm_write_request32); k32buf = (struct dlm_write_request32 *)kbuf; /* add 1 after namelen so that the name string is terminated */ kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1, GFP_NOFS); if (!kbuf) { kfree(k32buf); return -ENOMEM; } if (proc) set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags); compat_input(kbuf, k32buf, namelen); kfree(k32buf); } #endif /* do we really need this? can a write happen after a close? */ if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) && (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))) { error = -EINVAL; goto out_free; } error = -EINVAL; switch (kbuf->cmd) { case DLM_USER_LOCK: if (!proc) { log_print("no locking on control device"); goto out_free; } error = device_user_lock(proc, &kbuf->i.lock); break; case DLM_USER_UNLOCK: if (!proc) { log_print("no locking on control device"); goto out_free; } error = device_user_unlock(proc, &kbuf->i.lock); break; case DLM_USER_DEADLOCK: if (!proc) { log_print("no locking on control device"); goto out_free; } error = device_user_deadlock(proc, &kbuf->i.lock); break; case DLM_USER_CREATE_LOCKSPACE: if (proc) { log_print("create/remove only on control device"); goto out_free; } error = device_create_lockspace(&kbuf->i.lspace); break; case DLM_USER_REMOVE_LOCKSPACE: if (proc) { log_print("create/remove only on control device"); goto out_free; } error = device_remove_lockspace(&kbuf->i.lspace); break; case DLM_USER_PURGE: if (!proc) { log_print("no locking on control device"); goto out_free; } error = device_user_purge(proc, &kbuf->i.purge); break; default: log_print("Unknown command passed to DLM device : %d\n", kbuf->cmd); } out_free: kfree(kbuf); return error; } /* Every process that opens the lockspace device has its own "proc" structure hanging off the open file that's used to keep track of locks owned by the process and asts that need to be delivered to the process. */ static int device_open(struct inode *inode, struct file *file) { struct dlm_user_proc *proc; struct dlm_ls *ls; ls = dlm_find_lockspace_device(iminor(inode)); if (!ls) return -ENOENT; proc = kzalloc(sizeof(struct dlm_user_proc), GFP_NOFS); if (!proc) { dlm_put_lockspace(ls); return -ENOMEM; } proc->lockspace = ls->ls_local_handle; INIT_LIST_HEAD(&proc->asts); INIT_LIST_HEAD(&proc->locks); INIT_LIST_HEAD(&proc->unlocking); spin_lock_init(&proc->asts_spin); spin_lock_init(&proc->locks_spin); init_waitqueue_head(&proc->wait); file->private_data = proc; return 0; } static int device_close(struct inode *inode, struct file *file) { struct dlm_user_proc *proc = file->private_data; struct dlm_ls *ls; ls = dlm_find_lockspace_local(proc->lockspace); if (!ls) return -ENOENT; set_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags); dlm_clear_proc_locks(ls, proc); /* at this point no more lkb's should exist for this lockspace, so there's no chance of dlm_user_add_ast() being called and looking for lkb->ua->proc */ kfree(proc); file->private_data = NULL; dlm_put_lockspace(ls); dlm_put_lockspace(ls); /* for the find in device_open() */ /* FIXME: AUTOFREE: if this ls is no longer used do device_remove_lockspace() */ return 0; } static int copy_result_to_user(struct dlm_user_args *ua, int compat, uint32_t flags, int mode, int copy_lvb, char __user *buf, size_t count) { #ifdef CONFIG_COMPAT struct dlm_lock_result32 result32; #endif struct dlm_lock_result result; void *resultptr; int error=0; int len; int struct_len; memset(&result, 0, sizeof(struct dlm_lock_result)); result.version[0] = DLM_DEVICE_VERSION_MAJOR; result.version[1] = DLM_DEVICE_VERSION_MINOR; result.version[2] = DLM_DEVICE_VERSION_PATCH; memcpy(&result.lksb, &ua->lksb, offsetof(struct dlm_lksb, sb_lvbptr)); result.user_lksb = ua->user_lksb; /* FIXME: dlm1 provides for the user's bastparam/addr to not be updated in a conversion unless the conversion is successful. See code in dlm_user_convert() for updating ua from ua_tmp. OpenVMS, though, notes that a new blocking AST address and parameter are set even if the conversion fails, so maybe we should just do that. */ if (flags & DLM_CB_BAST) { result.user_astaddr = ua->bastaddr; result.user_astparam = ua->bastparam; result.bast_mode = mode; } else { result.user_astaddr = ua->castaddr; result.user_astparam = ua->castparam; } #ifdef CONFIG_COMPAT if (compat) len = sizeof(struct dlm_lock_result32); else #endif len = sizeof(struct dlm_lock_result); struct_len = len; /* copy lvb to userspace if there is one, it's been updated, and the user buffer has space for it */ if (copy_lvb && ua->lksb.sb_lvbptr && count >= len + DLM_USER_LVB_LEN) { if (copy_to_user(buf+len, ua->lksb.sb_lvbptr, DLM_USER_LVB_LEN)) { error = -EFAULT; goto out; } result.lvb_offset = len; len += DLM_USER_LVB_LEN; } result.length = len; resultptr = &result; #ifdef CONFIG_COMPAT if (compat) { compat_output(&result, &result32); resultptr = &result32; } #endif if (copy_to_user(buf, resultptr, struct_len)) error = -EFAULT; else error = len; out: return error; } static int copy_version_to_user(char __user *buf, size_t count) { struct dlm_device_version ver; memset(&ver, 0, sizeof(struct dlm_device_version)); ver.version[0] = DLM_DEVICE_VERSION_MAJOR; ver.version[1] = DLM_DEVICE_VERSION_MINOR; ver.version[2] = DLM_DEVICE_VERSION_PATCH; if (copy_to_user(buf, &ver, sizeof(struct dlm_device_version))) return -EFAULT; return sizeof(struct dlm_device_version); } /* a read returns a single ast described in a struct dlm_lock_result */ static ssize_t device_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct dlm_user_proc *proc = file->private_data; struct dlm_lkb *lkb; DECLARE_WAITQUEUE(wait, current); struct dlm_callback *cb; int rv, copy_lvb = 0; int old_mode, new_mode; if (count == sizeof(struct dlm_device_version)) { rv = copy_version_to_user(buf, count); return rv; } if (!proc) { log_print("non-version read from control device %zu", count); return -EINVAL; } #ifdef CONFIG_COMPAT if (count < sizeof(struct dlm_lock_result32)) #else if (count < sizeof(struct dlm_lock_result)) #endif return -EINVAL; try_another: /* do we really need this? can a read happen after a close? */ if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags)) return -EINVAL; spin_lock(&proc->asts_spin); if (list_empty(&proc->asts)) { if (file->f_flags & O_NONBLOCK) { spin_unlock(&proc->asts_spin); return -EAGAIN; } add_wait_queue(&proc->wait, &wait); repeat: set_current_state(TASK_INTERRUPTIBLE); if (list_empty(&proc->asts) && !signal_pending(current)) { spin_unlock(&proc->asts_spin); schedule(); spin_lock(&proc->asts_spin); goto repeat; } set_current_state(TASK_RUNNING); remove_wait_queue(&proc->wait, &wait); if (signal_pending(current)) { spin_unlock(&proc->asts_spin); return -ERESTARTSYS; } } /* if we empty lkb_callbacks, we don't want to unlock the spinlock without removing lkb_cb_list; so empty lkb_cb_list is always consistent with empty lkb_callbacks */ lkb = list_first_entry(&proc->asts, struct dlm_lkb, lkb_cb_list); /* rem_lkb_callback sets a new lkb_last_cast */ old_mode = lkb->lkb_last_cast->mode; rv = dlm_dequeue_lkb_callback(lkb, &cb); switch (rv) { case DLM_DEQUEUE_CALLBACK_EMPTY: /* this shouldn't happen; lkb should have been removed from * list when last item was dequeued */ log_print("dlm_rem_lkb_callback empty %x", lkb->lkb_id); list_del_init(&lkb->lkb_cb_list); spin_unlock(&proc->asts_spin); /* removes ref for proc->asts, may cause lkb to be freed */ dlm_put_lkb(lkb); WARN_ON_ONCE(1); goto try_another; case DLM_DEQUEUE_CALLBACK_LAST: list_del_init(&lkb->lkb_cb_list); clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags); break; case DLM_DEQUEUE_CALLBACK_SUCCESS: break; default: WARN_ON_ONCE(1); break; } spin_unlock(&proc->asts_spin); if (cb->flags & DLM_CB_BAST) { trace_dlm_bast(lkb->lkb_resource->res_ls, lkb, cb->mode); } else if (cb->flags & DLM_CB_CAST) { new_mode = cb->mode; if (!cb->sb_status && lkb->lkb_lksb->sb_lvbptr && dlm_lvb_operations[old_mode + 1][new_mode + 1]) copy_lvb = 1; lkb->lkb_lksb->sb_status = cb->sb_status; lkb->lkb_lksb->sb_flags = cb->sb_flags; trace_dlm_ast(lkb->lkb_resource->res_ls, lkb); } rv = copy_result_to_user(lkb->lkb_ua, test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags), cb->flags, cb->mode, copy_lvb, buf, count); kref_put(&cb->ref, dlm_release_callback); /* removes ref for proc->asts, may cause lkb to be freed */ if (rv == DLM_DEQUEUE_CALLBACK_LAST) dlm_put_lkb(lkb); return rv; } static __poll_t device_poll(struct file *file, poll_table *wait) { struct dlm_user_proc *proc = file->private_data; poll_wait(file, &proc->wait, wait); spin_lock(&proc->asts_spin); if (!list_empty(&proc->asts)) { spin_unlock(&proc->asts_spin); return EPOLLIN | EPOLLRDNORM; } spin_unlock(&proc->asts_spin); return 0; } int dlm_user_daemon_available(void) { /* dlm_controld hasn't started (or, has started, but not properly populated configfs) */ if (!dlm_our_nodeid()) return 0; /* This is to deal with versions of dlm_controld that don't know about the monitor device. We assume that if the dlm_controld was started (above), but the monitor device was never opened, that it's an old version. dlm_controld should open the monitor device before populating configfs. */ if (dlm_monitor_unused) return 1; return atomic_read(&dlm_monitor_opened) ? 1 : 0; } static int ctl_device_open(struct inode *inode, struct file *file) { file->private_data = NULL; return 0; } static int ctl_device_close(struct inode *inode, struct file *file) { return 0; } static int monitor_device_open(struct inode *inode, struct file *file) { atomic_inc(&dlm_monitor_opened); dlm_monitor_unused = 0; return 0; } static int monitor_device_close(struct inode *inode, struct file *file) { if (atomic_dec_and_test(&dlm_monitor_opened)) dlm_stop_lockspaces(); return 0; } static const struct file_operations device_fops = { .open = device_open, .release = device_close, .read = device_read, .write = device_write, .poll = device_poll, .owner = THIS_MODULE, .llseek = noop_llseek, }; static const struct file_operations ctl_device_fops = { .open = ctl_device_open, .release = ctl_device_close, .read = device_read, .write = device_write, .owner = THIS_MODULE, .llseek = noop_llseek, }; static struct miscdevice ctl_device = { .name = "dlm-control", .fops = &ctl_device_fops, .minor = MISC_DYNAMIC_MINOR, }; static const struct file_operations monitor_device_fops = { .open = monitor_device_open, .release = monitor_device_close, .owner = THIS_MODULE, .llseek = noop_llseek, }; static struct miscdevice monitor_device = { .name = "dlm-monitor", .fops = &monitor_device_fops, .minor = MISC_DYNAMIC_MINOR, }; int __init dlm_user_init(void) { int error; atomic_set(&dlm_monitor_opened, 0); error = misc_register(&ctl_device); if (error) { log_print("misc_register failed for control device"); goto out; } error = misc_register(&monitor_device); if (error) { log_print("misc_register failed for monitor device"); misc_deregister(&ctl_device); } out: return error; } void dlm_user_exit(void) { misc_deregister(&ctl_device); misc_deregister(&monitor_device); }
linux-master
fs/dlm/user.c
// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. ** ** ******************************************************************************* ******************************************************************************/ #include "dlm_internal.h" #include "lockspace.h" #include "member.h" #include "lowcomms.h" #include "midcomms.h" #include "rcom.h" #include "recover.h" #include "dir.h" #include "config.h" #include "memory.h" #include "lock.h" #include "util.h" static int rcom_response(struct dlm_ls *ls) { return test_bit(LSFL_RCOM_READY, &ls->ls_flags); } static void _create_rcom(struct dlm_ls *ls, int to_nodeid, int type, int len, struct dlm_rcom **rc_ret, char *mb, int mb_len, uint64_t seq) { struct dlm_rcom *rc; rc = (struct dlm_rcom *) mb; rc->rc_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR); rc->rc_header.u.h_lockspace = cpu_to_le32(ls->ls_global_id); rc->rc_header.h_nodeid = cpu_to_le32(dlm_our_nodeid()); rc->rc_header.h_length = cpu_to_le16(mb_len); rc->rc_header.h_cmd = DLM_RCOM; rc->rc_type = cpu_to_le32(type); rc->rc_seq = cpu_to_le64(seq); *rc_ret = rc; } static int create_rcom(struct dlm_ls *ls, int to_nodeid, int type, int len, struct dlm_rcom **rc_ret, struct dlm_mhandle **mh_ret, uint64_t seq) { int mb_len = sizeof(struct dlm_rcom) + len; struct dlm_mhandle *mh; char *mb; mh = dlm_midcomms_get_mhandle(to_nodeid, mb_len, GFP_NOFS, &mb); if (!mh) { log_print("%s to %d type %d len %d ENOBUFS", __func__, to_nodeid, type, len); return -ENOBUFS; } _create_rcom(ls, to_nodeid, type, len, rc_ret, mb, mb_len, seq); *mh_ret = mh; return 0; } static int create_rcom_stateless(struct dlm_ls *ls, int to_nodeid, int type, int len, struct dlm_rcom **rc_ret, struct dlm_msg **msg_ret, uint64_t seq) { int mb_len = sizeof(struct dlm_rcom) + len; struct dlm_msg *msg; char *mb; msg = dlm_lowcomms_new_msg(to_nodeid, mb_len, GFP_NOFS, &mb, NULL, NULL); if (!msg) { log_print("create_rcom to %d type %d len %d ENOBUFS", to_nodeid, type, len); return -ENOBUFS; } _create_rcom(ls, to_nodeid, type, len, rc_ret, mb, mb_len, seq); *msg_ret = msg; return 0; } static void send_rcom(struct dlm_mhandle *mh, struct dlm_rcom *rc) { dlm_midcomms_commit_mhandle(mh, NULL, 0); } static void send_rcom_stateless(struct dlm_msg *msg, struct dlm_rcom *rc) { dlm_lowcomms_commit_msg(msg); dlm_lowcomms_put_msg(msg); } static void set_rcom_status(struct dlm_ls *ls, struct rcom_status *rs, uint32_t flags) { rs->rs_flags = cpu_to_le32(flags); } /* When replying to a status request, a node also sends back its configuration values. The requesting node then checks that the remote node is configured the same way as itself. */ static void set_rcom_config(struct dlm_ls *ls, struct rcom_config *rf, uint32_t num_slots) { rf->rf_lvblen = cpu_to_le32(ls->ls_lvblen); rf->rf_lsflags = cpu_to_le32(ls->ls_exflags); rf->rf_our_slot = cpu_to_le16(ls->ls_slot); rf->rf_num_slots = cpu_to_le16(num_slots); rf->rf_generation = cpu_to_le32(ls->ls_generation); } static int check_rcom_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) { struct rcom_config *rf = (struct rcom_config *) rc->rc_buf; if ((le32_to_cpu(rc->rc_header.h_version) & 0xFFFF0000) != DLM_HEADER_MAJOR) { log_error(ls, "version mismatch: %x nodeid %d: %x", DLM_HEADER_MAJOR | DLM_HEADER_MINOR, nodeid, le32_to_cpu(rc->rc_header.h_version)); return -EPROTO; } if (le32_to_cpu(rf->rf_lvblen) != ls->ls_lvblen || le32_to_cpu(rf->rf_lsflags) != ls->ls_exflags) { log_error(ls, "config mismatch: %d,%x nodeid %d: %d,%x", ls->ls_lvblen, ls->ls_exflags, nodeid, le32_to_cpu(rf->rf_lvblen), le32_to_cpu(rf->rf_lsflags)); return -EPROTO; } return 0; } static void allow_sync_reply(struct dlm_ls *ls, __le64 *new_seq) { spin_lock(&ls->ls_rcom_spin); *new_seq = cpu_to_le64(++ls->ls_rcom_seq); set_bit(LSFL_RCOM_WAIT, &ls->ls_flags); spin_unlock(&ls->ls_rcom_spin); } static void disallow_sync_reply(struct dlm_ls *ls) { spin_lock(&ls->ls_rcom_spin); clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags); clear_bit(LSFL_RCOM_READY, &ls->ls_flags); spin_unlock(&ls->ls_rcom_spin); } /* * low nodeid gathers one slot value at a time from each node. * it sets need_slots=0, and saves rf_our_slot returned from each * rcom_config. * * other nodes gather all slot values at once from the low nodeid. * they set need_slots=1, and ignore the rf_our_slot returned from each * rcom_config. they use the rf_num_slots returned from the low * node's rcom_config. */ int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags, uint64_t seq) { struct dlm_rcom *rc; struct dlm_msg *msg; int error = 0; ls->ls_recover_nodeid = nodeid; if (nodeid == dlm_our_nodeid()) { rc = ls->ls_recover_buf; rc->rc_result = cpu_to_le32(dlm_recover_status(ls)); goto out; } retry: error = create_rcom_stateless(ls, nodeid, DLM_RCOM_STATUS, sizeof(struct rcom_status), &rc, &msg, seq); if (error) goto out; set_rcom_status(ls, (struct rcom_status *)rc->rc_buf, status_flags); allow_sync_reply(ls, &rc->rc_id); memset(ls->ls_recover_buf, 0, DLM_MAX_SOCKET_BUFSIZE); send_rcom_stateless(msg, rc); error = dlm_wait_function(ls, &rcom_response); disallow_sync_reply(ls); if (error == -ETIMEDOUT) goto retry; if (error) goto out; rc = ls->ls_recover_buf; if (rc->rc_result == cpu_to_le32(-ESRCH)) { /* we pretend the remote lockspace exists with 0 status */ log_debug(ls, "remote node %d not ready", nodeid); rc->rc_result = 0; error = 0; } else { error = check_rcom_config(ls, rc, nodeid); } /* the caller looks at rc_result for the remote recovery status */ out: return error; } static void receive_rcom_status(struct dlm_ls *ls, const struct dlm_rcom *rc_in, uint64_t seq) { struct dlm_rcom *rc; struct rcom_status *rs; uint32_t status; int nodeid = le32_to_cpu(rc_in->rc_header.h_nodeid); int len = sizeof(struct rcom_config); struct dlm_msg *msg; int num_slots = 0; int error; if (!dlm_slots_version(&rc_in->rc_header)) { status = dlm_recover_status(ls); goto do_create; } rs = (struct rcom_status *)rc_in->rc_buf; if (!(le32_to_cpu(rs->rs_flags) & DLM_RSF_NEED_SLOTS)) { status = dlm_recover_status(ls); goto do_create; } spin_lock(&ls->ls_recover_lock); status = ls->ls_recover_status; num_slots = ls->ls_num_slots; spin_unlock(&ls->ls_recover_lock); len += num_slots * sizeof(struct rcom_slot); do_create: error = create_rcom_stateless(ls, nodeid, DLM_RCOM_STATUS_REPLY, len, &rc, &msg, seq); if (error) return; rc->rc_id = rc_in->rc_id; rc->rc_seq_reply = rc_in->rc_seq; rc->rc_result = cpu_to_le32(status); set_rcom_config(ls, (struct rcom_config *)rc->rc_buf, num_slots); if (!num_slots) goto do_send; spin_lock(&ls->ls_recover_lock); if (ls->ls_num_slots != num_slots) { spin_unlock(&ls->ls_recover_lock); log_debug(ls, "receive_rcom_status num_slots %d to %d", num_slots, ls->ls_num_slots); rc->rc_result = 0; set_rcom_config(ls, (struct rcom_config *)rc->rc_buf, 0); goto do_send; } dlm_slots_copy_out(ls, rc); spin_unlock(&ls->ls_recover_lock); do_send: send_rcom_stateless(msg, rc); } static void receive_sync_reply(struct dlm_ls *ls, const struct dlm_rcom *rc_in) { spin_lock(&ls->ls_rcom_spin); if (!test_bit(LSFL_RCOM_WAIT, &ls->ls_flags) || le64_to_cpu(rc_in->rc_id) != ls->ls_rcom_seq) { log_debug(ls, "reject reply %d from %d seq %llx expect %llx", le32_to_cpu(rc_in->rc_type), le32_to_cpu(rc_in->rc_header.h_nodeid), (unsigned long long)le64_to_cpu(rc_in->rc_id), (unsigned long long)ls->ls_rcom_seq); goto out; } memcpy(ls->ls_recover_buf, rc_in, le16_to_cpu(rc_in->rc_header.h_length)); set_bit(LSFL_RCOM_READY, &ls->ls_flags); clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags); wake_up(&ls->ls_wait_general); out: spin_unlock(&ls->ls_rcom_spin); } int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len, uint64_t seq) { struct dlm_mhandle *mh; struct dlm_rcom *rc; int error = 0; ls->ls_recover_nodeid = nodeid; retry: error = create_rcom(ls, nodeid, DLM_RCOM_NAMES, last_len, &rc, &mh, seq); if (error) goto out; memcpy(rc->rc_buf, last_name, last_len); allow_sync_reply(ls, &rc->rc_id); memset(ls->ls_recover_buf, 0, DLM_MAX_SOCKET_BUFSIZE); send_rcom(mh, rc); error = dlm_wait_function(ls, &rcom_response); disallow_sync_reply(ls); if (error == -ETIMEDOUT) goto retry; out: return error; } static void receive_rcom_names(struct dlm_ls *ls, const struct dlm_rcom *rc_in, uint64_t seq) { struct dlm_mhandle *mh; struct dlm_rcom *rc; int error, inlen, outlen, nodeid; nodeid = le32_to_cpu(rc_in->rc_header.h_nodeid); inlen = le16_to_cpu(rc_in->rc_header.h_length) - sizeof(struct dlm_rcom); outlen = DLM_MAX_APP_BUFSIZE - sizeof(struct dlm_rcom); error = create_rcom(ls, nodeid, DLM_RCOM_NAMES_REPLY, outlen, &rc, &mh, seq); if (error) return; rc->rc_id = rc_in->rc_id; rc->rc_seq_reply = rc_in->rc_seq; dlm_copy_master_names(ls, rc_in->rc_buf, inlen, rc->rc_buf, outlen, nodeid); send_rcom(mh, rc); } int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid, uint64_t seq) { struct dlm_rcom *rc; struct dlm_mhandle *mh; struct dlm_ls *ls = r->res_ls; int error; error = create_rcom(ls, dir_nodeid, DLM_RCOM_LOOKUP, r->res_length, &rc, &mh, seq); if (error) goto out; memcpy(rc->rc_buf, r->res_name, r->res_length); rc->rc_id = cpu_to_le64(r->res_id); send_rcom(mh, rc); out: return error; } static void receive_rcom_lookup(struct dlm_ls *ls, const struct dlm_rcom *rc_in, uint64_t seq) { struct dlm_rcom *rc; struct dlm_mhandle *mh; int error, ret_nodeid, nodeid = le32_to_cpu(rc_in->rc_header.h_nodeid); int len = le16_to_cpu(rc_in->rc_header.h_length) - sizeof(struct dlm_rcom); /* Old code would send this special id to trigger a debug dump. */ if (rc_in->rc_id == cpu_to_le64(0xFFFFFFFF)) { log_error(ls, "receive_rcom_lookup dump from %d", nodeid); dlm_dump_rsb_name(ls, rc_in->rc_buf, len); return; } error = create_rcom(ls, nodeid, DLM_RCOM_LOOKUP_REPLY, 0, &rc, &mh, seq); if (error) return; error = dlm_master_lookup(ls, nodeid, rc_in->rc_buf, len, DLM_LU_RECOVER_MASTER, &ret_nodeid, NULL); if (error) ret_nodeid = error; rc->rc_result = cpu_to_le32(ret_nodeid); rc->rc_id = rc_in->rc_id; rc->rc_seq_reply = rc_in->rc_seq; send_rcom(mh, rc); } static void receive_rcom_lookup_reply(struct dlm_ls *ls, const struct dlm_rcom *rc_in) { dlm_recover_master_reply(ls, rc_in); } static void pack_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb, struct rcom_lock *rl) { memset(rl, 0, sizeof(*rl)); rl->rl_ownpid = cpu_to_le32(lkb->lkb_ownpid); rl->rl_lkid = cpu_to_le32(lkb->lkb_id); rl->rl_exflags = cpu_to_le32(lkb->lkb_exflags); rl->rl_flags = cpu_to_le32(dlm_dflags_val(lkb)); rl->rl_lvbseq = cpu_to_le32(lkb->lkb_lvbseq); rl->rl_rqmode = lkb->lkb_rqmode; rl->rl_grmode = lkb->lkb_grmode; rl->rl_status = lkb->lkb_status; rl->rl_wait_type = cpu_to_le16(lkb->lkb_wait_type); if (lkb->lkb_bastfn) rl->rl_asts |= DLM_CB_BAST; if (lkb->lkb_astfn) rl->rl_asts |= DLM_CB_CAST; rl->rl_namelen = cpu_to_le16(r->res_length); memcpy(rl->rl_name, r->res_name, r->res_length); /* FIXME: might we have an lvb without DLM_LKF_VALBLK set ? If so, receive_rcom_lock_args() won't take this copy. */ if (lkb->lkb_lvbptr) memcpy(rl->rl_lvb, lkb->lkb_lvbptr, r->res_ls->ls_lvblen); } int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb, uint64_t seq) { struct dlm_ls *ls = r->res_ls; struct dlm_rcom *rc; struct dlm_mhandle *mh; struct rcom_lock *rl; int error, len = sizeof(struct rcom_lock); if (lkb->lkb_lvbptr) len += ls->ls_lvblen; error = create_rcom(ls, r->res_nodeid, DLM_RCOM_LOCK, len, &rc, &mh, seq); if (error) goto out; rl = (struct rcom_lock *) rc->rc_buf; pack_rcom_lock(r, lkb, rl); rc->rc_id = cpu_to_le64((uintptr_t)r); send_rcom(mh, rc); out: return error; } /* needs at least dlm_rcom + rcom_lock */ static void receive_rcom_lock(struct dlm_ls *ls, const struct dlm_rcom *rc_in, uint64_t seq) { __le32 rl_remid, rl_result; struct rcom_lock *rl; struct dlm_rcom *rc; struct dlm_mhandle *mh; int error, nodeid = le32_to_cpu(rc_in->rc_header.h_nodeid); dlm_recover_master_copy(ls, rc_in, &rl_remid, &rl_result); error = create_rcom(ls, nodeid, DLM_RCOM_LOCK_REPLY, sizeof(struct rcom_lock), &rc, &mh, seq); if (error) return; memcpy(rc->rc_buf, rc_in->rc_buf, sizeof(struct rcom_lock)); rl = (struct rcom_lock *)rc->rc_buf; /* set rl_remid and rl_result from dlm_recover_master_copy() */ rl->rl_remid = rl_remid; rl->rl_result = rl_result; rc->rc_id = rc_in->rc_id; rc->rc_seq_reply = rc_in->rc_seq; send_rcom(mh, rc); } /* If the lockspace doesn't exist then still send a status message back; it's possible that it just doesn't have its global_id yet. */ int dlm_send_ls_not_ready(int nodeid, const struct dlm_rcom *rc_in) { struct dlm_rcom *rc; struct rcom_config *rf; struct dlm_mhandle *mh; char *mb; int mb_len = sizeof(struct dlm_rcom) + sizeof(struct rcom_config); mh = dlm_midcomms_get_mhandle(nodeid, mb_len, GFP_NOFS, &mb); if (!mh) return -ENOBUFS; rc = (struct dlm_rcom *) mb; rc->rc_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR); rc->rc_header.u.h_lockspace = rc_in->rc_header.u.h_lockspace; rc->rc_header.h_nodeid = cpu_to_le32(dlm_our_nodeid()); rc->rc_header.h_length = cpu_to_le16(mb_len); rc->rc_header.h_cmd = DLM_RCOM; rc->rc_type = cpu_to_le32(DLM_RCOM_STATUS_REPLY); rc->rc_id = rc_in->rc_id; rc->rc_seq_reply = rc_in->rc_seq; rc->rc_result = cpu_to_le32(-ESRCH); rf = (struct rcom_config *) rc->rc_buf; rf->rf_lvblen = cpu_to_le32(~0U); dlm_midcomms_commit_mhandle(mh, NULL, 0); return 0; } /* * Ignore messages for stage Y before we set * recover_status bit for stage X: * * recover_status = 0 * * dlm_recover_members() * - send nothing * - recv nothing * - ignore NAMES, NAMES_REPLY * - ignore LOOKUP, LOOKUP_REPLY * - ignore LOCK, LOCK_REPLY * * recover_status |= NODES * * dlm_recover_members_wait() * * dlm_recover_directory() * - send NAMES * - recv NAMES_REPLY * - ignore LOOKUP, LOOKUP_REPLY * - ignore LOCK, LOCK_REPLY * * recover_status |= DIR * * dlm_recover_directory_wait() * * dlm_recover_masters() * - send LOOKUP * - recv LOOKUP_REPLY * * dlm_recover_locks() * - send LOCKS * - recv LOCKS_REPLY * * recover_status |= LOCKS * * dlm_recover_locks_wait() * * recover_status |= DONE */ /* Called by dlm_recv; corresponds to dlm_receive_message() but special recovery-only comms are sent through here. */ void dlm_receive_rcom(struct dlm_ls *ls, const struct dlm_rcom *rc, int nodeid) { int lock_size = sizeof(struct dlm_rcom) + sizeof(struct rcom_lock); int stop, reply = 0, names = 0, lookup = 0, lock = 0; uint32_t status; uint64_t seq; switch (rc->rc_type) { case cpu_to_le32(DLM_RCOM_STATUS_REPLY): reply = 1; break; case cpu_to_le32(DLM_RCOM_NAMES): names = 1; break; case cpu_to_le32(DLM_RCOM_NAMES_REPLY): names = 1; reply = 1; break; case cpu_to_le32(DLM_RCOM_LOOKUP): lookup = 1; break; case cpu_to_le32(DLM_RCOM_LOOKUP_REPLY): lookup = 1; reply = 1; break; case cpu_to_le32(DLM_RCOM_LOCK): lock = 1; break; case cpu_to_le32(DLM_RCOM_LOCK_REPLY): lock = 1; reply = 1; break; } spin_lock(&ls->ls_recover_lock); status = ls->ls_recover_status; stop = dlm_recovery_stopped(ls); seq = ls->ls_recover_seq; spin_unlock(&ls->ls_recover_lock); if (stop && (rc->rc_type != cpu_to_le32(DLM_RCOM_STATUS))) goto ignore; if (reply && (le64_to_cpu(rc->rc_seq_reply) != seq)) goto ignore; if (!(status & DLM_RS_NODES) && (names || lookup || lock)) goto ignore; if (!(status & DLM_RS_DIR) && (lookup || lock)) goto ignore; switch (rc->rc_type) { case cpu_to_le32(DLM_RCOM_STATUS): receive_rcom_status(ls, rc, seq); break; case cpu_to_le32(DLM_RCOM_NAMES): receive_rcom_names(ls, rc, seq); break; case cpu_to_le32(DLM_RCOM_LOOKUP): receive_rcom_lookup(ls, rc, seq); break; case cpu_to_le32(DLM_RCOM_LOCK): if (le16_to_cpu(rc->rc_header.h_length) < lock_size) goto Eshort; receive_rcom_lock(ls, rc, seq); break; case cpu_to_le32(DLM_RCOM_STATUS_REPLY): receive_sync_reply(ls, rc); break; case cpu_to_le32(DLM_RCOM_NAMES_REPLY): receive_sync_reply(ls, rc); break; case cpu_to_le32(DLM_RCOM_LOOKUP_REPLY): receive_rcom_lookup_reply(ls, rc); break; case cpu_to_le32(DLM_RCOM_LOCK_REPLY): if (le16_to_cpu(rc->rc_header.h_length) < lock_size) goto Eshort; dlm_recover_process_copy(ls, rc, seq); break; default: log_error(ls, "receive_rcom bad type %d", le32_to_cpu(rc->rc_type)); } return; ignore: log_limit(ls, "dlm_receive_rcom ignore msg %d " "from %d %llu %llu recover seq %llu sts %x gen %u", le32_to_cpu(rc->rc_type), nodeid, (unsigned long long)le64_to_cpu(rc->rc_seq), (unsigned long long)le64_to_cpu(rc->rc_seq_reply), (unsigned long long)seq, status, ls->ls_generation); return; Eshort: log_error(ls, "recovery message %d from %d is too short", le32_to_cpu(rc->rc_type), nodeid); }
linux-master
fs/dlm/rcom.c
// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved. ** ** ******************************************************************************* ******************************************************************************/ #include "dlm_internal.h" #include "member.h" #include "lock.h" #include "dir.h" #include "config.h" #include "requestqueue.h" #include "util.h" struct rq_entry { struct list_head list; uint32_t recover_seq; int nodeid; struct dlm_message request; }; /* * Requests received while the lockspace is in recovery get added to the * request queue and processed when recovery is complete. This happens when * the lockspace is suspended on some nodes before it is on others, or the * lockspace is enabled on some while still suspended on others. */ void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, const struct dlm_message *ms) { struct rq_entry *e; int length = le16_to_cpu(ms->m_header.h_length) - sizeof(struct dlm_message); e = kmalloc(sizeof(struct rq_entry) + length, GFP_NOFS); if (!e) { log_print("dlm_add_requestqueue: out of memory len %d", length); return; } e->recover_seq = ls->ls_recover_seq & 0xFFFFFFFF; e->nodeid = nodeid; memcpy(&e->request, ms, sizeof(*ms)); memcpy(&e->request.m_extra, ms->m_extra, length); atomic_inc(&ls->ls_requestqueue_cnt); mutex_lock(&ls->ls_requestqueue_mutex); list_add_tail(&e->list, &ls->ls_requestqueue); mutex_unlock(&ls->ls_requestqueue_mutex); } /* * Called by dlm_recoverd to process normal messages saved while recovery was * happening. Normal locking has been enabled before this is called. dlm_recv * upon receiving a message, will wait for all saved messages to be drained * here before processing the message it got. If a new dlm_ls_stop() arrives * while we're processing these saved messages, it may block trying to suspend * dlm_recv if dlm_recv is waiting for us in dlm_wait_requestqueue. In that * case, we don't abort since locking_stopped is still 0. If dlm_recv is not * waiting for us, then this processing may be aborted due to locking_stopped. */ int dlm_process_requestqueue(struct dlm_ls *ls) { struct rq_entry *e; struct dlm_message *ms; int error = 0; mutex_lock(&ls->ls_requestqueue_mutex); for (;;) { if (list_empty(&ls->ls_requestqueue)) { mutex_unlock(&ls->ls_requestqueue_mutex); error = 0; break; } e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list); mutex_unlock(&ls->ls_requestqueue_mutex); ms = &e->request; log_limit(ls, "dlm_process_requestqueue msg %d from %d " "lkid %x remid %x result %d seq %u", le32_to_cpu(ms->m_type), le32_to_cpu(ms->m_header.h_nodeid), le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid), from_dlm_errno(le32_to_cpu(ms->m_result)), e->recover_seq); dlm_receive_message_saved(ls, &e->request, e->recover_seq); mutex_lock(&ls->ls_requestqueue_mutex); list_del(&e->list); if (atomic_dec_and_test(&ls->ls_requestqueue_cnt)) wake_up(&ls->ls_requestqueue_wait); kfree(e); if (dlm_locking_stopped(ls)) { log_debug(ls, "process_requestqueue abort running"); mutex_unlock(&ls->ls_requestqueue_mutex); error = -EINTR; break; } schedule(); } return error; } /* * After recovery is done, locking is resumed and dlm_recoverd takes all the * saved requests and processes them as they would have been by dlm_recv. At * the same time, dlm_recv will start receiving new requests from remote nodes. * We want to delay dlm_recv processing new requests until dlm_recoverd has * finished processing the old saved requests. We don't check for locking * stopped here because dlm_ls_stop won't stop locking until it's suspended us * (dlm_recv). */ void dlm_wait_requestqueue(struct dlm_ls *ls) { wait_event(ls->ls_requestqueue_wait, atomic_read(&ls->ls_requestqueue_cnt) == 0); } static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid) { __le32 type = ms->m_type; /* the ls is being cleaned up and freed by release_lockspace */ if (!atomic_read(&ls->ls_count)) return 1; if (dlm_is_removed(ls, nodeid)) return 1; /* directory operations are always purged because the directory is always rebuilt during recovery and the lookups resent */ if (type == cpu_to_le32(DLM_MSG_REMOVE) || type == cpu_to_le32(DLM_MSG_LOOKUP) || type == cpu_to_le32(DLM_MSG_LOOKUP_REPLY)) return 1; if (!dlm_no_directory(ls)) return 0; return 1; } void dlm_purge_requestqueue(struct dlm_ls *ls) { struct dlm_message *ms; struct rq_entry *e, *safe; mutex_lock(&ls->ls_requestqueue_mutex); list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) { ms = &e->request; if (purge_request(ls, ms, e->nodeid)) { list_del(&e->list); if (atomic_dec_and_test(&ls->ls_requestqueue_cnt)) wake_up(&ls->ls_requestqueue_wait); kfree(e); } } mutex_unlock(&ls->ls_requestqueue_mutex); }
linux-master
fs/dlm/requestqueue.c
// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) 2005-2011 Red Hat, Inc. All rights reserved. ** ** ******************************************************************************* ******************************************************************************/ #include "dlm_internal.h" #include "lockspace.h" #include "member.h" #include "recoverd.h" #include "recover.h" #include "rcom.h" #include "config.h" #include "midcomms.h" #include "lowcomms.h" int dlm_slots_version(const struct dlm_header *h) { if ((le32_to_cpu(h->h_version) & 0x0000FFFF) < DLM_HEADER_SLOTS) return 0; return 1; } void dlm_slot_save(struct dlm_ls *ls, struct dlm_rcom *rc, struct dlm_member *memb) { struct rcom_config *rf = (struct rcom_config *)rc->rc_buf; if (!dlm_slots_version(&rc->rc_header)) return; memb->slot = le16_to_cpu(rf->rf_our_slot); memb->generation = le32_to_cpu(rf->rf_generation); } void dlm_slots_copy_out(struct dlm_ls *ls, struct dlm_rcom *rc) { struct dlm_slot *slot; struct rcom_slot *ro; int i; ro = (struct rcom_slot *)(rc->rc_buf + sizeof(struct rcom_config)); /* ls_slots array is sparse, but not rcom_slots */ for (i = 0; i < ls->ls_slots_size; i++) { slot = &ls->ls_slots[i]; if (!slot->nodeid) continue; ro->ro_nodeid = cpu_to_le32(slot->nodeid); ro->ro_slot = cpu_to_le16(slot->slot); ro++; } } #define SLOT_DEBUG_LINE 128 static void log_slots(struct dlm_ls *ls, uint32_t gen, int num_slots, struct rcom_slot *ro0, struct dlm_slot *array, int array_size) { char line[SLOT_DEBUG_LINE]; int len = SLOT_DEBUG_LINE - 1; int pos = 0; int ret, i; memset(line, 0, sizeof(line)); if (array) { for (i = 0; i < array_size; i++) { if (!array[i].nodeid) continue; ret = snprintf(line + pos, len - pos, " %d:%d", array[i].slot, array[i].nodeid); if (ret >= len - pos) break; pos += ret; } } else if (ro0) { for (i = 0; i < num_slots; i++) { ret = snprintf(line + pos, len - pos, " %d:%d", ro0[i].ro_slot, ro0[i].ro_nodeid); if (ret >= len - pos) break; pos += ret; } } log_rinfo(ls, "generation %u slots %d%s", gen, num_slots, line); } int dlm_slots_copy_in(struct dlm_ls *ls) { struct dlm_member *memb; struct dlm_rcom *rc = ls->ls_recover_buf; struct rcom_config *rf = (struct rcom_config *)rc->rc_buf; struct rcom_slot *ro0, *ro; int our_nodeid = dlm_our_nodeid(); int i, num_slots; uint32_t gen; if (!dlm_slots_version(&rc->rc_header)) return -1; gen = le32_to_cpu(rf->rf_generation); if (gen <= ls->ls_generation) { log_error(ls, "dlm_slots_copy_in gen %u old %u", gen, ls->ls_generation); } ls->ls_generation = gen; num_slots = le16_to_cpu(rf->rf_num_slots); if (!num_slots) return -1; ro0 = (struct rcom_slot *)(rc->rc_buf + sizeof(struct rcom_config)); log_slots(ls, gen, num_slots, ro0, NULL, 0); list_for_each_entry(memb, &ls->ls_nodes, list) { for (i = 0, ro = ro0; i < num_slots; i++, ro++) { if (le32_to_cpu(ro->ro_nodeid) != memb->nodeid) continue; memb->slot = le16_to_cpu(ro->ro_slot); memb->slot_prev = memb->slot; break; } if (memb->nodeid == our_nodeid) { if (ls->ls_slot && ls->ls_slot != memb->slot) { log_error(ls, "dlm_slots_copy_in our slot " "changed %d %d", ls->ls_slot, memb->slot); return -1; } if (!ls->ls_slot) ls->ls_slot = memb->slot; } if (!memb->slot) { log_error(ls, "dlm_slots_copy_in nodeid %d no slot", memb->nodeid); return -1; } } return 0; } /* for any nodes that do not support slots, we will not have set memb->slot in wait_status_all(), so memb->slot will remain -1, and we will not assign slots or set ls_num_slots here */ int dlm_slots_assign(struct dlm_ls *ls, int *num_slots, int *slots_size, struct dlm_slot **slots_out, uint32_t *gen_out) { struct dlm_member *memb; struct dlm_slot *array; int our_nodeid = dlm_our_nodeid(); int array_size, max_slots, i; int need = 0; int max = 0; int num = 0; uint32_t gen = 0; /* our own memb struct will have slot -1 gen 0 */ list_for_each_entry(memb, &ls->ls_nodes, list) { if (memb->nodeid == our_nodeid) { memb->slot = ls->ls_slot; memb->generation = ls->ls_generation; break; } } list_for_each_entry(memb, &ls->ls_nodes, list) { if (memb->generation > gen) gen = memb->generation; /* node doesn't support slots */ if (memb->slot == -1) return -1; /* node needs a slot assigned */ if (!memb->slot) need++; /* node has a slot assigned */ num++; if (!max || max < memb->slot) max = memb->slot; /* sanity check, once slot is assigned it shouldn't change */ if (memb->slot_prev && memb->slot && memb->slot_prev != memb->slot) { log_error(ls, "nodeid %d slot changed %d %d", memb->nodeid, memb->slot_prev, memb->slot); return -1; } memb->slot_prev = memb->slot; } array_size = max + need; array = kcalloc(array_size, sizeof(*array), GFP_NOFS); if (!array) return -ENOMEM; num = 0; /* fill in slots (offsets) that are used */ list_for_each_entry(memb, &ls->ls_nodes, list) { if (!memb->slot) continue; if (memb->slot > array_size) { log_error(ls, "invalid slot number %d", memb->slot); kfree(array); return -1; } array[memb->slot - 1].nodeid = memb->nodeid; array[memb->slot - 1].slot = memb->slot; num++; } /* assign new slots from unused offsets */ list_for_each_entry(memb, &ls->ls_nodes, list) { if (memb->slot) continue; for (i = 0; i < array_size; i++) { if (array[i].nodeid) continue; memb->slot = i + 1; memb->slot_prev = memb->slot; array[i].nodeid = memb->nodeid; array[i].slot = memb->slot; num++; if (!ls->ls_slot && memb->nodeid == our_nodeid) ls->ls_slot = memb->slot; break; } if (!memb->slot) { log_error(ls, "no free slot found"); kfree(array); return -1; } } gen++; log_slots(ls, gen, num, NULL, array, array_size); max_slots = (DLM_MAX_APP_BUFSIZE - sizeof(struct dlm_rcom) - sizeof(struct rcom_config)) / sizeof(struct rcom_slot); if (num > max_slots) { log_error(ls, "num_slots %d exceeds max_slots %d", num, max_slots); kfree(array); return -1; } *gen_out = gen; *slots_out = array; *slots_size = array_size; *num_slots = num; return 0; } static void add_ordered_member(struct dlm_ls *ls, struct dlm_member *new) { struct dlm_member *memb = NULL; struct list_head *tmp; struct list_head *newlist = &new->list; struct list_head *head = &ls->ls_nodes; list_for_each(tmp, head) { memb = list_entry(tmp, struct dlm_member, list); if (new->nodeid < memb->nodeid) break; } if (!memb) list_add_tail(newlist, head); else { /* FIXME: can use list macro here */ newlist->prev = tmp->prev; newlist->next = tmp; tmp->prev->next = newlist; tmp->prev = newlist; } } static int add_remote_member(int nodeid) { int error; if (nodeid == dlm_our_nodeid()) return 0; error = dlm_lowcomms_connect_node(nodeid); if (error < 0) return error; dlm_midcomms_add_member(nodeid); return 0; } static int dlm_add_member(struct dlm_ls *ls, struct dlm_config_node *node) { struct dlm_member *memb; int error; memb = kzalloc(sizeof(*memb), GFP_NOFS); if (!memb) return -ENOMEM; memb->nodeid = node->nodeid; memb->weight = node->weight; memb->comm_seq = node->comm_seq; error = add_remote_member(node->nodeid); if (error < 0) { kfree(memb); return error; } add_ordered_member(ls, memb); ls->ls_num_nodes++; return 0; } static struct dlm_member *find_memb(struct list_head *head, int nodeid) { struct dlm_member *memb; list_for_each_entry(memb, head, list) { if (memb->nodeid == nodeid) return memb; } return NULL; } int dlm_is_member(struct dlm_ls *ls, int nodeid) { if (find_memb(&ls->ls_nodes, nodeid)) return 1; return 0; } int dlm_is_removed(struct dlm_ls *ls, int nodeid) { if (find_memb(&ls->ls_nodes_gone, nodeid)) return 1; return 0; } static void clear_memb_list(struct list_head *head, void (*after_del)(int nodeid)) { struct dlm_member *memb; while (!list_empty(head)) { memb = list_entry(head->next, struct dlm_member, list); list_del(&memb->list); if (after_del) after_del(memb->nodeid); kfree(memb); } } static void remove_remote_member(int nodeid) { if (nodeid == dlm_our_nodeid()) return; dlm_midcomms_remove_member(nodeid); } void dlm_clear_members(struct dlm_ls *ls) { clear_memb_list(&ls->ls_nodes, remove_remote_member); ls->ls_num_nodes = 0; } void dlm_clear_members_gone(struct dlm_ls *ls) { clear_memb_list(&ls->ls_nodes_gone, NULL); } static void make_member_array(struct dlm_ls *ls) { struct dlm_member *memb; int i, w, x = 0, total = 0, all_zero = 0, *array; kfree(ls->ls_node_array); ls->ls_node_array = NULL; list_for_each_entry(memb, &ls->ls_nodes, list) { if (memb->weight) total += memb->weight; } /* all nodes revert to weight of 1 if all have weight 0 */ if (!total) { total = ls->ls_num_nodes; all_zero = 1; } ls->ls_total_weight = total; array = kmalloc_array(total, sizeof(*array), GFP_NOFS); if (!array) return; list_for_each_entry(memb, &ls->ls_nodes, list) { if (!all_zero && !memb->weight) continue; if (all_zero) w = 1; else w = memb->weight; DLM_ASSERT(x < total, printk("total %d x %d\n", total, x);); for (i = 0; i < w; i++) array[x++] = memb->nodeid; } ls->ls_node_array = array; } /* send a status request to all members just to establish comms connections */ static int ping_members(struct dlm_ls *ls, uint64_t seq) { struct dlm_member *memb; int error = 0; list_for_each_entry(memb, &ls->ls_nodes, list) { if (dlm_recovery_stopped(ls)) { error = -EINTR; break; } error = dlm_rcom_status(ls, memb->nodeid, 0, seq); if (error) break; } if (error) log_rinfo(ls, "ping_members aborted %d last nodeid %d", error, ls->ls_recover_nodeid); return error; } static void dlm_lsop_recover_prep(struct dlm_ls *ls) { if (!ls->ls_ops || !ls->ls_ops->recover_prep) return; ls->ls_ops->recover_prep(ls->ls_ops_arg); } static void dlm_lsop_recover_slot(struct dlm_ls *ls, struct dlm_member *memb) { struct dlm_slot slot; uint32_t seq; int error; if (!ls->ls_ops || !ls->ls_ops->recover_slot) return; /* if there is no comms connection with this node or the present comms connection is newer than the one when this member was added, then we consider the node to have failed (versus being removed due to dlm_release_lockspace) */ error = dlm_comm_seq(memb->nodeid, &seq); if (!error && seq == memb->comm_seq) return; slot.nodeid = memb->nodeid; slot.slot = memb->slot; ls->ls_ops->recover_slot(ls->ls_ops_arg, &slot); } void dlm_lsop_recover_done(struct dlm_ls *ls) { struct dlm_member *memb; struct dlm_slot *slots; int i, num; if (!ls->ls_ops || !ls->ls_ops->recover_done) return; num = ls->ls_num_nodes; slots = kcalloc(num, sizeof(*slots), GFP_KERNEL); if (!slots) return; i = 0; list_for_each_entry(memb, &ls->ls_nodes, list) { if (i == num) { log_error(ls, "dlm_lsop_recover_done bad num %d", num); goto out; } slots[i].nodeid = memb->nodeid; slots[i].slot = memb->slot; i++; } ls->ls_ops->recover_done(ls->ls_ops_arg, slots, num, ls->ls_slot, ls->ls_generation); out: kfree(slots); } static struct dlm_config_node *find_config_node(struct dlm_recover *rv, int nodeid) { int i; for (i = 0; i < rv->nodes_count; i++) { if (rv->nodes[i].nodeid == nodeid) return &rv->nodes[i]; } return NULL; } int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out) { struct dlm_member *memb, *safe; struct dlm_config_node *node; int i, error, neg = 0, low = -1; /* previously removed members that we've not finished removing need to * count as a negative change so the "neg" recovery steps will happen * * This functionality must report all member changes to lsops or * midcomms layer and must never return before. */ list_for_each_entry(memb, &ls->ls_nodes_gone, list) { log_rinfo(ls, "prev removed member %d", memb->nodeid); neg++; } /* move departed members from ls_nodes to ls_nodes_gone */ list_for_each_entry_safe(memb, safe, &ls->ls_nodes, list) { node = find_config_node(rv, memb->nodeid); if (node && !node->new) continue; if (!node) { log_rinfo(ls, "remove member %d", memb->nodeid); } else { /* removed and re-added */ log_rinfo(ls, "remove member %d comm_seq %u %u", memb->nodeid, memb->comm_seq, node->comm_seq); } neg++; list_move(&memb->list, &ls->ls_nodes_gone); remove_remote_member(memb->nodeid); ls->ls_num_nodes--; dlm_lsop_recover_slot(ls, memb); } /* add new members to ls_nodes */ for (i = 0; i < rv->nodes_count; i++) { node = &rv->nodes[i]; if (dlm_is_member(ls, node->nodeid)) continue; error = dlm_add_member(ls, node); if (error) return error; log_rinfo(ls, "add member %d", node->nodeid); } list_for_each_entry(memb, &ls->ls_nodes, list) { if (low == -1 || memb->nodeid < low) low = memb->nodeid; } ls->ls_low_nodeid = low; make_member_array(ls); *neg_out = neg; error = ping_members(ls, rv->seq); log_rinfo(ls, "dlm_recover_members %d nodes", ls->ls_num_nodes); return error; } /* Userspace guarantees that dlm_ls_stop() has completed on all nodes before dlm_ls_start() is called on any of them to start the new recovery. */ int dlm_ls_stop(struct dlm_ls *ls) { int new; /* * Prevent dlm_recv from being in the middle of something when we do * the stop. This includes ensuring dlm_recv isn't processing a * recovery message (rcom), while dlm_recoverd is aborting and * resetting things from an in-progress recovery. i.e. we want * dlm_recoverd to abort its recovery without worrying about dlm_recv * processing an rcom at the same time. Stopping dlm_recv also makes * it easy for dlm_receive_message() to check locking stopped and add a * message to the requestqueue without races. */ down_write(&ls->ls_recv_active); /* * Abort any recovery that's in progress (see RECOVER_STOP, * dlm_recovery_stopped()) and tell any other threads running in the * dlm to quit any processing (see RUNNING, dlm_locking_stopped()). */ spin_lock(&ls->ls_recover_lock); set_bit(LSFL_RECOVER_STOP, &ls->ls_flags); new = test_and_clear_bit(LSFL_RUNNING, &ls->ls_flags); ls->ls_recover_seq++; spin_unlock(&ls->ls_recover_lock); /* * Let dlm_recv run again, now any normal messages will be saved on the * requestqueue for later. */ up_write(&ls->ls_recv_active); /* * This in_recovery lock does two things: * 1) Keeps this function from returning until all threads are out * of locking routines and locking is truly stopped. * 2) Keeps any new requests from being processed until it's unlocked * when recovery is complete. */ if (new) { set_bit(LSFL_RECOVER_DOWN, &ls->ls_flags); wake_up_process(ls->ls_recoverd_task); wait_event(ls->ls_recover_lock_wait, test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags)); } /* * The recoverd suspend/resume makes sure that dlm_recoverd (if * running) has noticed RECOVER_STOP above and quit processing the * previous recovery. */ dlm_recoverd_suspend(ls); spin_lock(&ls->ls_recover_lock); kfree(ls->ls_slots); ls->ls_slots = NULL; ls->ls_num_slots = 0; ls->ls_slots_size = 0; ls->ls_recover_status = 0; spin_unlock(&ls->ls_recover_lock); dlm_recoverd_resume(ls); if (!ls->ls_recover_begin) ls->ls_recover_begin = jiffies; /* call recover_prep ops only once and not multiple times * for each possible dlm_ls_stop() when recovery is already * stopped. * * If we successful was able to clear LSFL_RUNNING bit and * it was set we know it is the first dlm_ls_stop() call. */ if (new) dlm_lsop_recover_prep(ls); return 0; } int dlm_ls_start(struct dlm_ls *ls) { struct dlm_recover *rv, *rv_old; struct dlm_config_node *nodes = NULL; int error, count; rv = kzalloc(sizeof(*rv), GFP_NOFS); if (!rv) return -ENOMEM; error = dlm_config_nodes(ls->ls_name, &nodes, &count); if (error < 0) goto fail_rv; spin_lock(&ls->ls_recover_lock); /* the lockspace needs to be stopped before it can be started */ if (!dlm_locking_stopped(ls)) { spin_unlock(&ls->ls_recover_lock); log_error(ls, "start ignored: lockspace running"); error = -EINVAL; goto fail; } rv->nodes = nodes; rv->nodes_count = count; rv->seq = ++ls->ls_recover_seq; rv_old = ls->ls_recover_args; ls->ls_recover_args = rv; spin_unlock(&ls->ls_recover_lock); if (rv_old) { log_error(ls, "unused recovery %llx %d", (unsigned long long)rv_old->seq, rv_old->nodes_count); kfree(rv_old->nodes); kfree(rv_old); } set_bit(LSFL_RECOVER_WORK, &ls->ls_flags); wake_up_process(ls->ls_recoverd_task); return 0; fail: kfree(nodes); fail_rv: kfree(rv); return error; }
linux-master
fs/dlm/member.c
// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. ** ** ******************************************************************************* ******************************************************************************/ #include "dlm_internal.h" #include "lockspace.h" #include "member.h" #include "lowcomms.h" #include "rcom.h" #include "config.h" #include "memory.h" #include "recover.h" #include "util.h" #include "lock.h" #include "dir.h" /* * We use the upper 16 bits of the hash value to select the directory node. * Low bits are used for distribution of rsb's among hash buckets on each node. * * To give the exact range wanted (0 to num_nodes-1), we apply a modulus of * num_nodes to the hash value. This value in the desired range is used as an * offset into the sorted list of nodeid's to give the particular nodeid. */ int dlm_hash2nodeid(struct dlm_ls *ls, uint32_t hash) { uint32_t node; if (ls->ls_num_nodes == 1) return dlm_our_nodeid(); else { node = (hash >> 16) % ls->ls_total_weight; return ls->ls_node_array[node]; } } int dlm_dir_nodeid(struct dlm_rsb *r) { return r->res_dir_nodeid; } void dlm_recover_dir_nodeid(struct dlm_ls *ls) { struct dlm_rsb *r; down_read(&ls->ls_root_sem); list_for_each_entry(r, &ls->ls_root_list, res_root_list) { r->res_dir_nodeid = dlm_hash2nodeid(ls, r->res_hash); } up_read(&ls->ls_root_sem); } int dlm_recover_directory(struct dlm_ls *ls, uint64_t seq) { struct dlm_member *memb; char *b, *last_name = NULL; int error = -ENOMEM, last_len, nodeid, result; uint16_t namelen; unsigned int count = 0, count_match = 0, count_bad = 0, count_add = 0; log_rinfo(ls, "dlm_recover_directory"); if (dlm_no_directory(ls)) goto out_status; last_name = kmalloc(DLM_RESNAME_MAXLEN, GFP_NOFS); if (!last_name) goto out; list_for_each_entry(memb, &ls->ls_nodes, list) { if (memb->nodeid == dlm_our_nodeid()) continue; memset(last_name, 0, DLM_RESNAME_MAXLEN); last_len = 0; for (;;) { int left; if (dlm_recovery_stopped(ls)) { error = -EINTR; goto out_free; } error = dlm_rcom_names(ls, memb->nodeid, last_name, last_len, seq); if (error) goto out_free; cond_resched(); /* * pick namelen/name pairs out of received buffer */ b = ls->ls_recover_buf->rc_buf; left = le16_to_cpu(ls->ls_recover_buf->rc_header.h_length); left -= sizeof(struct dlm_rcom); for (;;) { __be16 v; error = -EINVAL; if (left < sizeof(__be16)) goto out_free; memcpy(&v, b, sizeof(__be16)); namelen = be16_to_cpu(v); b += sizeof(__be16); left -= sizeof(__be16); /* namelen of 0xFFFFF marks end of names for this node; namelen of 0 marks end of the buffer */ if (namelen == 0xFFFF) goto done; if (!namelen) break; if (namelen > left) goto out_free; if (namelen > DLM_RESNAME_MAXLEN) goto out_free; error = dlm_master_lookup(ls, memb->nodeid, b, namelen, DLM_LU_RECOVER_DIR, &nodeid, &result); if (error) { log_error(ls, "recover_dir lookup %d", error); goto out_free; } /* The name was found in rsbtbl, but the * master nodeid is different from * memb->nodeid which says it is the master. * This should not happen. */ if (result == DLM_LU_MATCH && nodeid != memb->nodeid) { count_bad++; log_error(ls, "recover_dir lookup %d " "nodeid %d memb %d bad %u", result, nodeid, memb->nodeid, count_bad); print_hex_dump_bytes("dlm_recover_dir ", DUMP_PREFIX_NONE, b, namelen); } /* The name was found in rsbtbl, and the * master nodeid matches memb->nodeid. */ if (result == DLM_LU_MATCH && nodeid == memb->nodeid) { count_match++; } /* The name was not found in rsbtbl and was * added with memb->nodeid as the master. */ if (result == DLM_LU_ADD) { count_add++; } last_len = namelen; memcpy(last_name, b, namelen); b += namelen; left -= namelen; count++; } } done: ; } out_status: error = 0; dlm_set_recover_status(ls, DLM_RS_DIR); log_rinfo(ls, "dlm_recover_directory %u in %u new", count, count_add); out_free: kfree(last_name); out: return error; } static struct dlm_rsb *find_rsb_root(struct dlm_ls *ls, const char *name, int len) { struct dlm_rsb *r; uint32_t hash, bucket; int rv; hash = jhash(name, len, 0); bucket = hash & (ls->ls_rsbtbl_size - 1); spin_lock(&ls->ls_rsbtbl[bucket].lock); rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].keep, name, len, &r); if (rv) rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].toss, name, len, &r); spin_unlock(&ls->ls_rsbtbl[bucket].lock); if (!rv) return r; down_read(&ls->ls_root_sem); list_for_each_entry(r, &ls->ls_root_list, res_root_list) { if (len == r->res_length && !memcmp(name, r->res_name, len)) { up_read(&ls->ls_root_sem); log_debug(ls, "find_rsb_root revert to root_list %s", r->res_name); return r; } } up_read(&ls->ls_root_sem); return NULL; } /* Find the rsb where we left off (or start again), then send rsb names for rsb's we're master of and whose directory node matches the requesting node. inbuf is the rsb name last sent, inlen is the name's length */ void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen, char *outbuf, int outlen, int nodeid) { struct list_head *list; struct dlm_rsb *r; int offset = 0, dir_nodeid; __be16 be_namelen; down_read(&ls->ls_root_sem); if (inlen > 1) { r = find_rsb_root(ls, inbuf, inlen); if (!r) { log_error(ls, "copy_master_names from %d start %d %.*s", nodeid, inlen, inlen, inbuf); goto out; } list = r->res_root_list.next; } else { list = ls->ls_root_list.next; } for (offset = 0; list != &ls->ls_root_list; list = list->next) { r = list_entry(list, struct dlm_rsb, res_root_list); if (r->res_nodeid) continue; dir_nodeid = dlm_dir_nodeid(r); if (dir_nodeid != nodeid) continue; /* * The block ends when we can't fit the following in the * remaining buffer space: * namelen (uint16_t) + * name (r->res_length) + * end-of-block record 0x0000 (uint16_t) */ if (offset + sizeof(uint16_t)*2 + r->res_length > outlen) { /* Write end-of-block record */ be_namelen = cpu_to_be16(0); memcpy(outbuf + offset, &be_namelen, sizeof(__be16)); offset += sizeof(__be16); ls->ls_recover_dir_sent_msg++; goto out; } be_namelen = cpu_to_be16(r->res_length); memcpy(outbuf + offset, &be_namelen, sizeof(__be16)); offset += sizeof(__be16); memcpy(outbuf + offset, r->res_name, r->res_length); offset += r->res_length; ls->ls_recover_dir_sent_res++; } /* * If we've reached the end of the list (and there's room) write a * terminating record. */ if ((list == &ls->ls_root_list) && (offset + sizeof(uint16_t) <= outlen)) { be_namelen = cpu_to_be16(0xFFFF); memcpy(outbuf + offset, &be_namelen, sizeof(__be16)); offset += sizeof(__be16); ls->ls_recover_dir_sent_msg++; } out: up_read(&ls->ls_root_sem); }
linux-master
fs/dlm/dir.c
// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved. ** ** ******************************************************************************* ******************************************************************************/ /* * lowcomms.c * * This is the "low-level" comms layer. * * It is responsible for sending/receiving messages * from other nodes in the cluster. * * Cluster nodes are referred to by their nodeids. nodeids are * simply 32 bit numbers to the locking module - if they need to * be expanded for the cluster infrastructure then that is its * responsibility. It is this layer's * responsibility to resolve these into IP address or * whatever it needs for inter-node communication. * * The comms level is two kernel threads that deal mainly with * the receiving of messages from other nodes and passing them * up to the mid-level comms layer (which understands the * message format) for execution by the locking core, and * a send thread which does all the setting up of connections * to remote nodes and the sending of data. Threads are not allowed * to send their own data because it may cause them to wait in times * of high load. Also, this way, the sending thread can collect together * messages bound for one node and send them in one block. * * lowcomms will choose to use either TCP or SCTP as its transport layer * depending on the configuration variable 'protocol'. This should be set * to 0 (default) for TCP or 1 for SCTP. It should be configured using a * cluster-wide mechanism as it must be the same on all nodes of the cluster * for the DLM to function. * */ #include <asm/ioctls.h> #include <net/sock.h> #include <net/tcp.h> #include <linux/pagemap.h> #include <linux/file.h> #include <linux/mutex.h> #include <linux/sctp.h> #include <linux/slab.h> #include <net/sctp/sctp.h> #include <net/ipv6.h> #include <trace/events/dlm.h> #include <trace/events/sock.h> #include "dlm_internal.h" #include "lowcomms.h" #include "midcomms.h" #include "memory.h" #include "config.h" #define DLM_SHUTDOWN_WAIT_TIMEOUT msecs_to_jiffies(5000) #define NEEDED_RMEM (4*1024*1024) struct connection { struct socket *sock; /* NULL if not connected */ uint32_t nodeid; /* So we know who we are in the list */ /* this semaphore is used to allow parallel recv/send in read * lock mode. When we release a sock we need to held the write lock. * * However this is locking code and not nice. When we remove the * othercon handling we can look into other mechanism to synchronize * io handling to call sock_release() at the right time. */ struct rw_semaphore sock_lock; unsigned long flags; #define CF_APP_LIMITED 0 #define CF_RECV_PENDING 1 #define CF_SEND_PENDING 2 #define CF_RECV_INTR 3 #define CF_IO_STOP 4 #define CF_IS_OTHERCON 5 struct list_head writequeue; /* List of outgoing writequeue_entries */ spinlock_t writequeue_lock; int retries; struct hlist_node list; /* due some connect()/accept() races we currently have this cross over * connection attempt second connection for one node. * * There is a solution to avoid the race by introducing a connect * rule as e.g. our_nodeid > nodeid_to_connect who is allowed to * connect. Otherside can connect but will only be considered that * the other side wants to have a reconnect. * * However changing to this behaviour will break backwards compatible. * In a DLM protocol major version upgrade we should remove this! */ struct connection *othercon; struct work_struct rwork; /* receive worker */ struct work_struct swork; /* send worker */ wait_queue_head_t shutdown_wait; unsigned char rx_leftover_buf[DLM_MAX_SOCKET_BUFSIZE]; int rx_leftover; int mark; int addr_count; int curr_addr_index; struct sockaddr_storage addr[DLM_MAX_ADDR_COUNT]; spinlock_t addrs_lock; struct rcu_head rcu; }; #define sock2con(x) ((struct connection *)(x)->sk_user_data) struct listen_connection { struct socket *sock; struct work_struct rwork; }; #define DLM_WQ_REMAIN_BYTES(e) (PAGE_SIZE - e->end) #define DLM_WQ_LENGTH_BYTES(e) (e->end - e->offset) /* An entry waiting to be sent */ struct writequeue_entry { struct list_head list; struct page *page; int offset; int len; int end; int users; bool dirty; struct connection *con; struct list_head msgs; struct kref ref; }; struct dlm_msg { struct writequeue_entry *entry; struct dlm_msg *orig_msg; bool retransmit; void *ppc; int len; int idx; /* new()/commit() idx exchange */ struct list_head list; struct kref ref; }; struct processqueue_entry { unsigned char *buf; int nodeid; int buflen; struct list_head list; }; struct dlm_proto_ops { bool try_new_addr; const char *name; int proto; int (*connect)(struct connection *con, struct socket *sock, struct sockaddr *addr, int addr_len); void (*sockopts)(struct socket *sock); int (*bind)(struct socket *sock); int (*listen_validate)(void); void (*listen_sockopts)(struct socket *sock); int (*listen_bind)(struct socket *sock); }; static struct listen_sock_callbacks { void (*sk_error_report)(struct sock *); void (*sk_data_ready)(struct sock *); void (*sk_state_change)(struct sock *); void (*sk_write_space)(struct sock *); } listen_sock; static struct listen_connection listen_con; static struct sockaddr_storage dlm_local_addr[DLM_MAX_ADDR_COUNT]; static int dlm_local_count; /* Work queues */ static struct workqueue_struct *io_workqueue; static struct workqueue_struct *process_workqueue; static struct hlist_head connection_hash[CONN_HASH_SIZE]; static DEFINE_SPINLOCK(connections_lock); DEFINE_STATIC_SRCU(connections_srcu); static const struct dlm_proto_ops *dlm_proto_ops; #define DLM_IO_SUCCESS 0 #define DLM_IO_END 1 #define DLM_IO_EOF 2 #define DLM_IO_RESCHED 3 static void process_recv_sockets(struct work_struct *work); static void process_send_sockets(struct work_struct *work); static void process_dlm_messages(struct work_struct *work); static DECLARE_WORK(process_work, process_dlm_messages); static DEFINE_SPINLOCK(processqueue_lock); static bool process_dlm_messages_pending; static LIST_HEAD(processqueue); bool dlm_lowcomms_is_running(void) { return !!listen_con.sock; } static void lowcomms_queue_swork(struct connection *con) { assert_spin_locked(&con->writequeue_lock); if (!test_bit(CF_IO_STOP, &con->flags) && !test_bit(CF_APP_LIMITED, &con->flags) && !test_and_set_bit(CF_SEND_PENDING, &con->flags)) queue_work(io_workqueue, &con->swork); } static void lowcomms_queue_rwork(struct connection *con) { #ifdef CONFIG_LOCKDEP WARN_ON_ONCE(!lockdep_sock_is_held(con->sock->sk)); #endif if (!test_bit(CF_IO_STOP, &con->flags) && !test_and_set_bit(CF_RECV_PENDING, &con->flags)) queue_work(io_workqueue, &con->rwork); } static void writequeue_entry_ctor(void *data) { struct writequeue_entry *entry = data; INIT_LIST_HEAD(&entry->msgs); } struct kmem_cache *dlm_lowcomms_writequeue_cache_create(void) { return kmem_cache_create("dlm_writequeue", sizeof(struct writequeue_entry), 0, 0, writequeue_entry_ctor); } struct kmem_cache *dlm_lowcomms_msg_cache_create(void) { return kmem_cache_create("dlm_msg", sizeof(struct dlm_msg), 0, 0, NULL); } /* need to held writequeue_lock */ static struct writequeue_entry *con_next_wq(struct connection *con) { struct writequeue_entry *e; e = list_first_entry_or_null(&con->writequeue, struct writequeue_entry, list); /* if len is zero nothing is to send, if there are users filling * buffers we wait until the users are done so we can send more. */ if (!e || e->users || e->len == 0) return NULL; return e; } static struct connection *__find_con(int nodeid, int r) { struct connection *con; hlist_for_each_entry_rcu(con, &connection_hash[r], list) { if (con->nodeid == nodeid) return con; } return NULL; } static void dlm_con_init(struct connection *con, int nodeid) { con->nodeid = nodeid; init_rwsem(&con->sock_lock); INIT_LIST_HEAD(&con->writequeue); spin_lock_init(&con->writequeue_lock); INIT_WORK(&con->swork, process_send_sockets); INIT_WORK(&con->rwork, process_recv_sockets); spin_lock_init(&con->addrs_lock); init_waitqueue_head(&con->shutdown_wait); } /* * If 'allocation' is zero then we don't attempt to create a new * connection structure for this node. */ static struct connection *nodeid2con(int nodeid, gfp_t alloc) { struct connection *con, *tmp; int r; r = nodeid_hash(nodeid); con = __find_con(nodeid, r); if (con || !alloc) return con; con = kzalloc(sizeof(*con), alloc); if (!con) return NULL; dlm_con_init(con, nodeid); spin_lock(&connections_lock); /* Because multiple workqueues/threads calls this function it can * race on multiple cpu's. Instead of locking hot path __find_con() * we just check in rare cases of recently added nodes again * under protection of connections_lock. If this is the case we * abort our connection creation and return the existing connection. */ tmp = __find_con(nodeid, r); if (tmp) { spin_unlock(&connections_lock); kfree(con); return tmp; } hlist_add_head_rcu(&con->list, &connection_hash[r]); spin_unlock(&connections_lock); return con; } static int addr_compare(const struct sockaddr_storage *x, const struct sockaddr_storage *y) { switch (x->ss_family) { case AF_INET: { struct sockaddr_in *sinx = (struct sockaddr_in *)x; struct sockaddr_in *siny = (struct sockaddr_in *)y; if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr) return 0; if (sinx->sin_port != siny->sin_port) return 0; break; } case AF_INET6: { struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x; struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y; if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr)) return 0; if (sinx->sin6_port != siny->sin6_port) return 0; break; } default: return 0; } return 1; } static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out, struct sockaddr *sa_out, bool try_new_addr, unsigned int *mark) { struct sockaddr_storage sas; struct connection *con; int idx; if (!dlm_local_count) return -1; idx = srcu_read_lock(&connections_srcu); con = nodeid2con(nodeid, 0); if (!con) { srcu_read_unlock(&connections_srcu, idx); return -ENOENT; } spin_lock(&con->addrs_lock); if (!con->addr_count) { spin_unlock(&con->addrs_lock); srcu_read_unlock(&connections_srcu, idx); return -ENOENT; } memcpy(&sas, &con->addr[con->curr_addr_index], sizeof(struct sockaddr_storage)); if (try_new_addr) { con->curr_addr_index++; if (con->curr_addr_index == con->addr_count) con->curr_addr_index = 0; } *mark = con->mark; spin_unlock(&con->addrs_lock); if (sas_out) memcpy(sas_out, &sas, sizeof(struct sockaddr_storage)); if (!sa_out) { srcu_read_unlock(&connections_srcu, idx); return 0; } if (dlm_local_addr[0].ss_family == AF_INET) { struct sockaddr_in *in4 = (struct sockaddr_in *) &sas; struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out; ret4->sin_addr.s_addr = in4->sin_addr.s_addr; } else { struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &sas; struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) sa_out; ret6->sin6_addr = in6->sin6_addr; } srcu_read_unlock(&connections_srcu, idx); return 0; } static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid, unsigned int *mark) { struct connection *con; int i, idx, addr_i; idx = srcu_read_lock(&connections_srcu); for (i = 0; i < CONN_HASH_SIZE; i++) { hlist_for_each_entry_rcu(con, &connection_hash[i], list) { WARN_ON_ONCE(!con->addr_count); spin_lock(&con->addrs_lock); for (addr_i = 0; addr_i < con->addr_count; addr_i++) { if (addr_compare(&con->addr[addr_i], addr)) { *nodeid = con->nodeid; *mark = con->mark; spin_unlock(&con->addrs_lock); srcu_read_unlock(&connections_srcu, idx); return 0; } } spin_unlock(&con->addrs_lock); } } srcu_read_unlock(&connections_srcu, idx); return -ENOENT; } static bool dlm_lowcomms_con_has_addr(const struct connection *con, const struct sockaddr_storage *addr) { int i; for (i = 0; i < con->addr_count; i++) { if (addr_compare(&con->addr[i], addr)) return true; } return false; } int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len) { struct connection *con; bool ret, idx; idx = srcu_read_lock(&connections_srcu); con = nodeid2con(nodeid, GFP_NOFS); if (!con) { srcu_read_unlock(&connections_srcu, idx); return -ENOMEM; } spin_lock(&con->addrs_lock); if (!con->addr_count) { memcpy(&con->addr[0], addr, sizeof(*addr)); con->addr_count = 1; con->mark = dlm_config.ci_mark; spin_unlock(&con->addrs_lock); srcu_read_unlock(&connections_srcu, idx); return 0; } ret = dlm_lowcomms_con_has_addr(con, addr); if (ret) { spin_unlock(&con->addrs_lock); srcu_read_unlock(&connections_srcu, idx); return -EEXIST; } if (con->addr_count >= DLM_MAX_ADDR_COUNT) { spin_unlock(&con->addrs_lock); srcu_read_unlock(&connections_srcu, idx); return -ENOSPC; } memcpy(&con->addr[con->addr_count++], addr, sizeof(*addr)); srcu_read_unlock(&connections_srcu, idx); spin_unlock(&con->addrs_lock); return 0; } /* Data available on socket or listen socket received a connect */ static void lowcomms_data_ready(struct sock *sk) { struct connection *con = sock2con(sk); trace_sk_data_ready(sk); set_bit(CF_RECV_INTR, &con->flags); lowcomms_queue_rwork(con); } static void lowcomms_write_space(struct sock *sk) { struct connection *con = sock2con(sk); clear_bit(SOCK_NOSPACE, &con->sock->flags); spin_lock_bh(&con->writequeue_lock); if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) { con->sock->sk->sk_write_pending--; clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags); } lowcomms_queue_swork(con); spin_unlock_bh(&con->writequeue_lock); } static void lowcomms_state_change(struct sock *sk) { /* SCTP layer is not calling sk_data_ready when the connection * is done, so we catch the signal through here. */ if (sk->sk_shutdown == RCV_SHUTDOWN) lowcomms_data_ready(sk); } static void lowcomms_listen_data_ready(struct sock *sk) { trace_sk_data_ready(sk); queue_work(io_workqueue, &listen_con.rwork); } int dlm_lowcomms_connect_node(int nodeid) { struct connection *con; int idx; idx = srcu_read_lock(&connections_srcu); con = nodeid2con(nodeid, 0); if (WARN_ON_ONCE(!con)) { srcu_read_unlock(&connections_srcu, idx); return -ENOENT; } down_read(&con->sock_lock); if (!con->sock) { spin_lock_bh(&con->writequeue_lock); lowcomms_queue_swork(con); spin_unlock_bh(&con->writequeue_lock); } up_read(&con->sock_lock); srcu_read_unlock(&connections_srcu, idx); cond_resched(); return 0; } int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark) { struct connection *con; int idx; idx = srcu_read_lock(&connections_srcu); con = nodeid2con(nodeid, 0); if (!con) { srcu_read_unlock(&connections_srcu, idx); return -ENOENT; } spin_lock(&con->addrs_lock); con->mark = mark; spin_unlock(&con->addrs_lock); srcu_read_unlock(&connections_srcu, idx); return 0; } static void lowcomms_error_report(struct sock *sk) { struct connection *con = sock2con(sk); struct inet_sock *inet; inet = inet_sk(sk); switch (sk->sk_family) { case AF_INET: printk_ratelimited(KERN_ERR "dlm: node %d: socket error " "sending to node %d at %pI4, dport %d, " "sk_err=%d/%d\n", dlm_our_nodeid(), con->nodeid, &inet->inet_daddr, ntohs(inet->inet_dport), sk->sk_err, READ_ONCE(sk->sk_err_soft)); break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: printk_ratelimited(KERN_ERR "dlm: node %d: socket error " "sending to node %d at %pI6c, " "dport %d, sk_err=%d/%d\n", dlm_our_nodeid(), con->nodeid, &sk->sk_v6_daddr, ntohs(inet->inet_dport), sk->sk_err, READ_ONCE(sk->sk_err_soft)); break; #endif default: printk_ratelimited(KERN_ERR "dlm: node %d: socket error " "invalid socket family %d set, " "sk_err=%d/%d\n", dlm_our_nodeid(), sk->sk_family, sk->sk_err, READ_ONCE(sk->sk_err_soft)); break; } dlm_midcomms_unack_msg_resend(con->nodeid); listen_sock.sk_error_report(sk); } static void restore_callbacks(struct sock *sk) { #ifdef CONFIG_LOCKDEP WARN_ON_ONCE(!lockdep_sock_is_held(sk)); #endif sk->sk_user_data = NULL; sk->sk_data_ready = listen_sock.sk_data_ready; sk->sk_state_change = listen_sock.sk_state_change; sk->sk_write_space = listen_sock.sk_write_space; sk->sk_error_report = listen_sock.sk_error_report; } /* Make a socket active */ static void add_sock(struct socket *sock, struct connection *con) { struct sock *sk = sock->sk; lock_sock(sk); con->sock = sock; sk->sk_user_data = con; sk->sk_data_ready = lowcomms_data_ready; sk->sk_write_space = lowcomms_write_space; if (dlm_config.ci_protocol == DLM_PROTO_SCTP) sk->sk_state_change = lowcomms_state_change; sk->sk_allocation = GFP_NOFS; sk->sk_use_task_frag = false; sk->sk_error_report = lowcomms_error_report; release_sock(sk); } /* Add the port number to an IPv6 or 4 sockaddr and return the address length */ static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port, int *addr_len) { saddr->ss_family = dlm_local_addr[0].ss_family; if (saddr->ss_family == AF_INET) { struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr; in4_addr->sin_port = cpu_to_be16(port); *addr_len = sizeof(struct sockaddr_in); memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero)); } else { struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr; in6_addr->sin6_port = cpu_to_be16(port); *addr_len = sizeof(struct sockaddr_in6); } memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len); } static void dlm_page_release(struct kref *kref) { struct writequeue_entry *e = container_of(kref, struct writequeue_entry, ref); __free_page(e->page); dlm_free_writequeue(e); } static void dlm_msg_release(struct kref *kref) { struct dlm_msg *msg = container_of(kref, struct dlm_msg, ref); kref_put(&msg->entry->ref, dlm_page_release); dlm_free_msg(msg); } static void free_entry(struct writequeue_entry *e) { struct dlm_msg *msg, *tmp; list_for_each_entry_safe(msg, tmp, &e->msgs, list) { if (msg->orig_msg) { msg->orig_msg->retransmit = false; kref_put(&msg->orig_msg->ref, dlm_msg_release); } list_del(&msg->list); kref_put(&msg->ref, dlm_msg_release); } list_del(&e->list); kref_put(&e->ref, dlm_page_release); } static void dlm_close_sock(struct socket **sock) { lock_sock((*sock)->sk); restore_callbacks((*sock)->sk); release_sock((*sock)->sk); sock_release(*sock); *sock = NULL; } static void allow_connection_io(struct connection *con) { if (con->othercon) clear_bit(CF_IO_STOP, &con->othercon->flags); clear_bit(CF_IO_STOP, &con->flags); } static void stop_connection_io(struct connection *con) { if (con->othercon) stop_connection_io(con->othercon); spin_lock_bh(&con->writequeue_lock); set_bit(CF_IO_STOP, &con->flags); spin_unlock_bh(&con->writequeue_lock); down_write(&con->sock_lock); if (con->sock) { lock_sock(con->sock->sk); restore_callbacks(con->sock->sk); release_sock(con->sock->sk); } up_write(&con->sock_lock); cancel_work_sync(&con->swork); cancel_work_sync(&con->rwork); } /* Close a remote connection and tidy up */ static void close_connection(struct connection *con, bool and_other) { struct writequeue_entry *e; if (con->othercon && and_other) close_connection(con->othercon, false); down_write(&con->sock_lock); if (!con->sock) { up_write(&con->sock_lock); return; } dlm_close_sock(&con->sock); /* if we send a writequeue entry only a half way, we drop the * whole entry because reconnection and that we not start of the * middle of a msg which will confuse the other end. * * we can always drop messages because retransmits, but what we * cannot allow is to transmit half messages which may be processed * at the other side. * * our policy is to start on a clean state when disconnects, we don't * know what's send/received on transport layer in this case. */ spin_lock_bh(&con->writequeue_lock); if (!list_empty(&con->writequeue)) { e = list_first_entry(&con->writequeue, struct writequeue_entry, list); if (e->dirty) free_entry(e); } spin_unlock_bh(&con->writequeue_lock); con->rx_leftover = 0; con->retries = 0; clear_bit(CF_APP_LIMITED, &con->flags); clear_bit(CF_RECV_PENDING, &con->flags); clear_bit(CF_SEND_PENDING, &con->flags); up_write(&con->sock_lock); } static void shutdown_connection(struct connection *con, bool and_other) { int ret; if (con->othercon && and_other) shutdown_connection(con->othercon, false); flush_workqueue(io_workqueue); down_read(&con->sock_lock); /* nothing to shutdown */ if (!con->sock) { up_read(&con->sock_lock); return; } ret = kernel_sock_shutdown(con->sock, SHUT_WR); up_read(&con->sock_lock); if (ret) { log_print("Connection %p failed to shutdown: %d will force close", con, ret); goto force_close; } else { ret = wait_event_timeout(con->shutdown_wait, !con->sock, DLM_SHUTDOWN_WAIT_TIMEOUT); if (ret == 0) { log_print("Connection %p shutdown timed out, will force close", con); goto force_close; } } return; force_close: close_connection(con, false); } static struct processqueue_entry *new_processqueue_entry(int nodeid, int buflen) { struct processqueue_entry *pentry; pentry = kmalloc(sizeof(*pentry), GFP_NOFS); if (!pentry) return NULL; pentry->buf = kmalloc(buflen, GFP_NOFS); if (!pentry->buf) { kfree(pentry); return NULL; } pentry->nodeid = nodeid; return pentry; } static void free_processqueue_entry(struct processqueue_entry *pentry) { kfree(pentry->buf); kfree(pentry); } struct dlm_processed_nodes { int nodeid; struct list_head list; }; static void process_dlm_messages(struct work_struct *work) { struct processqueue_entry *pentry; spin_lock(&processqueue_lock); pentry = list_first_entry_or_null(&processqueue, struct processqueue_entry, list); if (WARN_ON_ONCE(!pentry)) { process_dlm_messages_pending = false; spin_unlock(&processqueue_lock); return; } list_del(&pentry->list); spin_unlock(&processqueue_lock); for (;;) { dlm_process_incoming_buffer(pentry->nodeid, pentry->buf, pentry->buflen); free_processqueue_entry(pentry); spin_lock(&processqueue_lock); pentry = list_first_entry_or_null(&processqueue, struct processqueue_entry, list); if (!pentry) { process_dlm_messages_pending = false; spin_unlock(&processqueue_lock); break; } list_del(&pentry->list); spin_unlock(&processqueue_lock); } } /* Data received from remote end */ static int receive_from_sock(struct connection *con, int buflen) { struct processqueue_entry *pentry; int ret, buflen_real; struct msghdr msg; struct kvec iov; pentry = new_processqueue_entry(con->nodeid, buflen); if (!pentry) return DLM_IO_RESCHED; memcpy(pentry->buf, con->rx_leftover_buf, con->rx_leftover); /* calculate new buffer parameter regarding last receive and * possible leftover bytes */ iov.iov_base = pentry->buf + con->rx_leftover; iov.iov_len = buflen - con->rx_leftover; memset(&msg, 0, sizeof(msg)); msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; clear_bit(CF_RECV_INTR, &con->flags); again: ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len, msg.msg_flags); trace_dlm_recv(con->nodeid, ret); if (ret == -EAGAIN) { lock_sock(con->sock->sk); if (test_and_clear_bit(CF_RECV_INTR, &con->flags)) { release_sock(con->sock->sk); goto again; } clear_bit(CF_RECV_PENDING, &con->flags); release_sock(con->sock->sk); free_processqueue_entry(pentry); return DLM_IO_END; } else if (ret == 0) { /* close will clear CF_RECV_PENDING */ free_processqueue_entry(pentry); return DLM_IO_EOF; } else if (ret < 0) { free_processqueue_entry(pentry); return ret; } /* new buflen according readed bytes and leftover from last receive */ buflen_real = ret + con->rx_leftover; ret = dlm_validate_incoming_buffer(con->nodeid, pentry->buf, buflen_real); if (ret < 0) { free_processqueue_entry(pentry); return ret; } pentry->buflen = ret; /* calculate leftover bytes from process and put it into begin of * the receive buffer, so next receive we have the full message * at the start address of the receive buffer. */ con->rx_leftover = buflen_real - ret; memmove(con->rx_leftover_buf, pentry->buf + ret, con->rx_leftover); spin_lock(&processqueue_lock); list_add_tail(&pentry->list, &processqueue); if (!process_dlm_messages_pending) { process_dlm_messages_pending = true; queue_work(process_workqueue, &process_work); } spin_unlock(&processqueue_lock); return DLM_IO_SUCCESS; } /* Listening socket is busy, accept a connection */ static int accept_from_sock(void) { struct sockaddr_storage peeraddr; int len, idx, result, nodeid; struct connection *newcon; struct socket *newsock; unsigned int mark; result = kernel_accept(listen_con.sock, &newsock, O_NONBLOCK); if (result == -EAGAIN) return DLM_IO_END; else if (result < 0) goto accept_err; /* Get the connected socket's peer */ memset(&peeraddr, 0, sizeof(peeraddr)); len = newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, 2); if (len < 0) { result = -ECONNABORTED; goto accept_err; } /* Get the new node's NODEID */ make_sockaddr(&peeraddr, 0, &len); if (addr_to_nodeid(&peeraddr, &nodeid, &mark)) { switch (peeraddr.ss_family) { case AF_INET: { struct sockaddr_in *sin = (struct sockaddr_in *)&peeraddr; log_print("connect from non cluster IPv4 node %pI4", &sin->sin_addr); break; } #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&peeraddr; log_print("connect from non cluster IPv6 node %pI6c", &sin6->sin6_addr); break; } #endif default: log_print("invalid family from non cluster node"); break; } sock_release(newsock); return -1; } log_print("got connection from %d", nodeid); /* Check to see if we already have a connection to this node. This * could happen if the two nodes initiate a connection at roughly * the same time and the connections cross on the wire. * In this case we store the incoming one in "othercon" */ idx = srcu_read_lock(&connections_srcu); newcon = nodeid2con(nodeid, 0); if (WARN_ON_ONCE(!newcon)) { srcu_read_unlock(&connections_srcu, idx); result = -ENOENT; goto accept_err; } sock_set_mark(newsock->sk, mark); down_write(&newcon->sock_lock); if (newcon->sock) { struct connection *othercon = newcon->othercon; if (!othercon) { othercon = kzalloc(sizeof(*othercon), GFP_NOFS); if (!othercon) { log_print("failed to allocate incoming socket"); up_write(&newcon->sock_lock); srcu_read_unlock(&connections_srcu, idx); result = -ENOMEM; goto accept_err; } dlm_con_init(othercon, nodeid); lockdep_set_subclass(&othercon->sock_lock, 1); newcon->othercon = othercon; set_bit(CF_IS_OTHERCON, &othercon->flags); } else { /* close other sock con if we have something new */ close_connection(othercon, false); } down_write(&othercon->sock_lock); add_sock(newsock, othercon); /* check if we receved something while adding */ lock_sock(othercon->sock->sk); lowcomms_queue_rwork(othercon); release_sock(othercon->sock->sk); up_write(&othercon->sock_lock); } else { /* accept copies the sk after we've saved the callbacks, so we don't want to save them a second time or comm errors will result in calling sk_error_report recursively. */ add_sock(newsock, newcon); /* check if we receved something while adding */ lock_sock(newcon->sock->sk); lowcomms_queue_rwork(newcon); release_sock(newcon->sock->sk); } up_write(&newcon->sock_lock); srcu_read_unlock(&connections_srcu, idx); return DLM_IO_SUCCESS; accept_err: if (newsock) sock_release(newsock); return result; } /* * writequeue_entry_complete - try to delete and free write queue entry * @e: write queue entry to try to delete * @completed: bytes completed * * writequeue_lock must be held. */ static void writequeue_entry_complete(struct writequeue_entry *e, int completed) { e->offset += completed; e->len -= completed; /* signal that page was half way transmitted */ e->dirty = true; if (e->len == 0 && e->users == 0) free_entry(e); } /* * sctp_bind_addrs - bind a SCTP socket to all our addresses */ static int sctp_bind_addrs(struct socket *sock, uint16_t port) { struct sockaddr_storage localaddr; struct sockaddr *addr = (struct sockaddr *)&localaddr; int i, addr_len, result = 0; for (i = 0; i < dlm_local_count; i++) { memcpy(&localaddr, &dlm_local_addr[i], sizeof(localaddr)); make_sockaddr(&localaddr, port, &addr_len); if (!i) result = kernel_bind(sock, addr, addr_len); else result = sock_bind_add(sock->sk, addr, addr_len); if (result < 0) { log_print("Can't bind to %d addr number %d, %d.\n", port, i + 1, result); break; } } return result; } /* Get local addresses */ static void init_local(void) { struct sockaddr_storage sas; int i; dlm_local_count = 0; for (i = 0; i < DLM_MAX_ADDR_COUNT; i++) { if (dlm_our_addr(&sas, i)) break; memcpy(&dlm_local_addr[dlm_local_count++], &sas, sizeof(sas)); } } static struct writequeue_entry *new_writequeue_entry(struct connection *con) { struct writequeue_entry *entry; entry = dlm_allocate_writequeue(); if (!entry) return NULL; entry->page = alloc_page(GFP_ATOMIC | __GFP_ZERO); if (!entry->page) { dlm_free_writequeue(entry); return NULL; } entry->offset = 0; entry->len = 0; entry->end = 0; entry->dirty = false; entry->con = con; entry->users = 1; kref_init(&entry->ref); return entry; } static struct writequeue_entry *new_wq_entry(struct connection *con, int len, char **ppc, void (*cb)(void *data), void *data) { struct writequeue_entry *e; spin_lock_bh(&con->writequeue_lock); if (!list_empty(&con->writequeue)) { e = list_last_entry(&con->writequeue, struct writequeue_entry, list); if (DLM_WQ_REMAIN_BYTES(e) >= len) { kref_get(&e->ref); *ppc = page_address(e->page) + e->end; if (cb) cb(data); e->end += len; e->users++; goto out; } } e = new_writequeue_entry(con); if (!e) goto out; kref_get(&e->ref); *ppc = page_address(e->page); e->end += len; if (cb) cb(data); list_add_tail(&e->list, &con->writequeue); out: spin_unlock_bh(&con->writequeue_lock); return e; }; static struct dlm_msg *dlm_lowcomms_new_msg_con(struct connection *con, int len, gfp_t allocation, char **ppc, void (*cb)(void *data), void *data) { struct writequeue_entry *e; struct dlm_msg *msg; msg = dlm_allocate_msg(allocation); if (!msg) return NULL; kref_init(&msg->ref); e = new_wq_entry(con, len, ppc, cb, data); if (!e) { dlm_free_msg(msg); return NULL; } msg->retransmit = false; msg->orig_msg = NULL; msg->ppc = *ppc; msg->len = len; msg->entry = e; return msg; } /* avoid false positive for nodes_srcu, unlock happens in * dlm_lowcomms_commit_msg which is a must call if success */ #ifndef __CHECKER__ struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, gfp_t allocation, char **ppc, void (*cb)(void *data), void *data) { struct connection *con; struct dlm_msg *msg; int idx; if (len > DLM_MAX_SOCKET_BUFSIZE || len < sizeof(struct dlm_header)) { BUILD_BUG_ON(PAGE_SIZE < DLM_MAX_SOCKET_BUFSIZE); log_print("failed to allocate a buffer of size %d", len); WARN_ON_ONCE(1); return NULL; } idx = srcu_read_lock(&connections_srcu); con = nodeid2con(nodeid, 0); if (WARN_ON_ONCE(!con)) { srcu_read_unlock(&connections_srcu, idx); return NULL; } msg = dlm_lowcomms_new_msg_con(con, len, allocation, ppc, cb, data); if (!msg) { srcu_read_unlock(&connections_srcu, idx); return NULL; } /* for dlm_lowcomms_commit_msg() */ kref_get(&msg->ref); /* we assume if successful commit must called */ msg->idx = idx; return msg; } #endif static void _dlm_lowcomms_commit_msg(struct dlm_msg *msg) { struct writequeue_entry *e = msg->entry; struct connection *con = e->con; int users; spin_lock_bh(&con->writequeue_lock); kref_get(&msg->ref); list_add(&msg->list, &e->msgs); users = --e->users; if (users) goto out; e->len = DLM_WQ_LENGTH_BYTES(e); lowcomms_queue_swork(con); out: spin_unlock_bh(&con->writequeue_lock); return; } /* avoid false positive for nodes_srcu, lock was happen in * dlm_lowcomms_new_msg */ #ifndef __CHECKER__ void dlm_lowcomms_commit_msg(struct dlm_msg *msg) { _dlm_lowcomms_commit_msg(msg); srcu_read_unlock(&connections_srcu, msg->idx); /* because dlm_lowcomms_new_msg() */ kref_put(&msg->ref, dlm_msg_release); } #endif void dlm_lowcomms_put_msg(struct dlm_msg *msg) { kref_put(&msg->ref, dlm_msg_release); } /* does not held connections_srcu, usage lowcomms_error_report only */ int dlm_lowcomms_resend_msg(struct dlm_msg *msg) { struct dlm_msg *msg_resend; char *ppc; if (msg->retransmit) return 1; msg_resend = dlm_lowcomms_new_msg_con(msg->entry->con, msg->len, GFP_ATOMIC, &ppc, NULL, NULL); if (!msg_resend) return -ENOMEM; msg->retransmit = true; kref_get(&msg->ref); msg_resend->orig_msg = msg; memcpy(ppc, msg->ppc, msg->len); _dlm_lowcomms_commit_msg(msg_resend); dlm_lowcomms_put_msg(msg_resend); return 0; } /* Send a message */ static int send_to_sock(struct connection *con) { struct writequeue_entry *e; struct bio_vec bvec; struct msghdr msg = { .msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT | MSG_NOSIGNAL, }; int len, offset, ret; spin_lock_bh(&con->writequeue_lock); e = con_next_wq(con); if (!e) { clear_bit(CF_SEND_PENDING, &con->flags); spin_unlock_bh(&con->writequeue_lock); return DLM_IO_END; } len = e->len; offset = e->offset; WARN_ON_ONCE(len == 0 && e->users == 0); spin_unlock_bh(&con->writequeue_lock); bvec_set_page(&bvec, e->page, len, offset); iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len); ret = sock_sendmsg(con->sock, &msg); trace_dlm_send(con->nodeid, ret); if (ret == -EAGAIN || ret == 0) { lock_sock(con->sock->sk); spin_lock_bh(&con->writequeue_lock); if (test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) && !test_and_set_bit(CF_APP_LIMITED, &con->flags)) { /* Notify TCP that we're limited by the * application window size. */ set_bit(SOCK_NOSPACE, &con->sock->sk->sk_socket->flags); con->sock->sk->sk_write_pending++; clear_bit(CF_SEND_PENDING, &con->flags); spin_unlock_bh(&con->writequeue_lock); release_sock(con->sock->sk); /* wait for write_space() event */ return DLM_IO_END; } spin_unlock_bh(&con->writequeue_lock); release_sock(con->sock->sk); return DLM_IO_RESCHED; } else if (ret < 0) { return ret; } spin_lock_bh(&con->writequeue_lock); writequeue_entry_complete(e, ret); spin_unlock_bh(&con->writequeue_lock); return DLM_IO_SUCCESS; } static void clean_one_writequeue(struct connection *con) { struct writequeue_entry *e, *safe; spin_lock_bh(&con->writequeue_lock); list_for_each_entry_safe(e, safe, &con->writequeue, list) { free_entry(e); } spin_unlock_bh(&con->writequeue_lock); } static void connection_release(struct rcu_head *rcu) { struct connection *con = container_of(rcu, struct connection, rcu); WARN_ON_ONCE(!list_empty(&con->writequeue)); WARN_ON_ONCE(con->sock); kfree(con); } /* Called from recovery when it knows that a node has left the cluster */ int dlm_lowcomms_close(int nodeid) { struct connection *con; int idx; log_print("closing connection to node %d", nodeid); idx = srcu_read_lock(&connections_srcu); con = nodeid2con(nodeid, 0); if (WARN_ON_ONCE(!con)) { srcu_read_unlock(&connections_srcu, idx); return -ENOENT; } stop_connection_io(con); log_print("io handling for node: %d stopped", nodeid); close_connection(con, true); spin_lock(&connections_lock); hlist_del_rcu(&con->list); spin_unlock(&connections_lock); clean_one_writequeue(con); call_srcu(&connections_srcu, &con->rcu, connection_release); if (con->othercon) { clean_one_writequeue(con->othercon); call_srcu(&connections_srcu, &con->othercon->rcu, connection_release); } srcu_read_unlock(&connections_srcu, idx); /* for debugging we print when we are done to compare with other * messages in between. This function need to be correctly synchronized * with io handling */ log_print("closing connection to node %d done", nodeid); return 0; } /* Receive worker function */ static void process_recv_sockets(struct work_struct *work) { struct connection *con = container_of(work, struct connection, rwork); int ret, buflen; down_read(&con->sock_lock); if (!con->sock) { up_read(&con->sock_lock); return; } buflen = READ_ONCE(dlm_config.ci_buffer_size); do { ret = receive_from_sock(con, buflen); } while (ret == DLM_IO_SUCCESS); up_read(&con->sock_lock); switch (ret) { case DLM_IO_END: /* CF_RECV_PENDING cleared */ break; case DLM_IO_EOF: close_connection(con, false); wake_up(&con->shutdown_wait); /* CF_RECV_PENDING cleared */ break; case DLM_IO_RESCHED: cond_resched(); queue_work(io_workqueue, &con->rwork); /* CF_RECV_PENDING not cleared */ break; default: if (ret < 0) { if (test_bit(CF_IS_OTHERCON, &con->flags)) { close_connection(con, false); } else { spin_lock_bh(&con->writequeue_lock); lowcomms_queue_swork(con); spin_unlock_bh(&con->writequeue_lock); } /* CF_RECV_PENDING cleared for othercon * we trigger send queue if not already done * and process_send_sockets will handle it */ break; } WARN_ON_ONCE(1); break; } } static void process_listen_recv_socket(struct work_struct *work) { int ret; if (WARN_ON_ONCE(!listen_con.sock)) return; do { ret = accept_from_sock(); } while (ret == DLM_IO_SUCCESS); if (ret < 0) log_print("critical error accepting connection: %d", ret); } static int dlm_connect(struct connection *con) { struct sockaddr_storage addr; int result, addr_len; struct socket *sock; unsigned int mark; memset(&addr, 0, sizeof(addr)); result = nodeid_to_addr(con->nodeid, &addr, NULL, dlm_proto_ops->try_new_addr, &mark); if (result < 0) { log_print("no address for nodeid %d", con->nodeid); return result; } /* Create a socket to communicate with */ result = sock_create_kern(&init_net, dlm_local_addr[0].ss_family, SOCK_STREAM, dlm_proto_ops->proto, &sock); if (result < 0) return result; sock_set_mark(sock->sk, mark); dlm_proto_ops->sockopts(sock); result = dlm_proto_ops->bind(sock); if (result < 0) { sock_release(sock); return result; } add_sock(sock, con); log_print_ratelimited("connecting to %d", con->nodeid); make_sockaddr(&addr, dlm_config.ci_tcp_port, &addr_len); result = dlm_proto_ops->connect(con, sock, (struct sockaddr *)&addr, addr_len); switch (result) { case -EINPROGRESS: /* not an error */ fallthrough; case 0: break; default: if (result < 0) dlm_close_sock(&con->sock); break; } return result; } /* Send worker function */ static void process_send_sockets(struct work_struct *work) { struct connection *con = container_of(work, struct connection, swork); int ret; WARN_ON_ONCE(test_bit(CF_IS_OTHERCON, &con->flags)); down_read(&con->sock_lock); if (!con->sock) { up_read(&con->sock_lock); down_write(&con->sock_lock); if (!con->sock) { ret = dlm_connect(con); switch (ret) { case 0: break; case -EINPROGRESS: /* avoid spamming resched on connection * we might can switch to a state_change * event based mechanism if established */ msleep(100); break; default: /* CF_SEND_PENDING not cleared */ up_write(&con->sock_lock); log_print("connect to node %d try %d error %d", con->nodeid, con->retries++, ret); msleep(1000); /* For now we try forever to reconnect. In * future we should send a event to cluster * manager to fence itself after certain amount * of retries. */ queue_work(io_workqueue, &con->swork); return; } } downgrade_write(&con->sock_lock); } do { ret = send_to_sock(con); } while (ret == DLM_IO_SUCCESS); up_read(&con->sock_lock); switch (ret) { case DLM_IO_END: /* CF_SEND_PENDING cleared */ break; case DLM_IO_RESCHED: /* CF_SEND_PENDING not cleared */ cond_resched(); queue_work(io_workqueue, &con->swork); break; default: if (ret < 0) { close_connection(con, false); /* CF_SEND_PENDING cleared */ spin_lock_bh(&con->writequeue_lock); lowcomms_queue_swork(con); spin_unlock_bh(&con->writequeue_lock); break; } WARN_ON_ONCE(1); break; } } static void work_stop(void) { if (io_workqueue) { destroy_workqueue(io_workqueue); io_workqueue = NULL; } if (process_workqueue) { destroy_workqueue(process_workqueue); process_workqueue = NULL; } } static int work_start(void) { io_workqueue = alloc_workqueue("dlm_io", WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 0); if (!io_workqueue) { log_print("can't start dlm_io"); return -ENOMEM; } /* ordered dlm message process queue, * should be converted to a tasklet */ process_workqueue = alloc_ordered_workqueue("dlm_process", WQ_HIGHPRI | WQ_MEM_RECLAIM); if (!process_workqueue) { log_print("can't start dlm_process"); destroy_workqueue(io_workqueue); io_workqueue = NULL; return -ENOMEM; } return 0; } void dlm_lowcomms_shutdown(void) { struct connection *con; int i, idx; /* stop lowcomms_listen_data_ready calls */ lock_sock(listen_con.sock->sk); listen_con.sock->sk->sk_data_ready = listen_sock.sk_data_ready; release_sock(listen_con.sock->sk); cancel_work_sync(&listen_con.rwork); dlm_close_sock(&listen_con.sock); idx = srcu_read_lock(&connections_srcu); for (i = 0; i < CONN_HASH_SIZE; i++) { hlist_for_each_entry_rcu(con, &connection_hash[i], list) { shutdown_connection(con, true); stop_connection_io(con); flush_workqueue(process_workqueue); close_connection(con, true); clean_one_writequeue(con); if (con->othercon) clean_one_writequeue(con->othercon); allow_connection_io(con); } } srcu_read_unlock(&connections_srcu, idx); } void dlm_lowcomms_stop(void) { work_stop(); dlm_proto_ops = NULL; } static int dlm_listen_for_all(void) { struct socket *sock; int result; log_print("Using %s for communications", dlm_proto_ops->name); result = dlm_proto_ops->listen_validate(); if (result < 0) return result; result = sock_create_kern(&init_net, dlm_local_addr[0].ss_family, SOCK_STREAM, dlm_proto_ops->proto, &sock); if (result < 0) { log_print("Can't create comms socket: %d", result); return result; } sock_set_mark(sock->sk, dlm_config.ci_mark); dlm_proto_ops->listen_sockopts(sock); result = dlm_proto_ops->listen_bind(sock); if (result < 0) goto out; lock_sock(sock->sk); listen_sock.sk_data_ready = sock->sk->sk_data_ready; listen_sock.sk_write_space = sock->sk->sk_write_space; listen_sock.sk_error_report = sock->sk->sk_error_report; listen_sock.sk_state_change = sock->sk->sk_state_change; listen_con.sock = sock; sock->sk->sk_allocation = GFP_NOFS; sock->sk->sk_use_task_frag = false; sock->sk->sk_data_ready = lowcomms_listen_data_ready; release_sock(sock->sk); result = sock->ops->listen(sock, 128); if (result < 0) { dlm_close_sock(&listen_con.sock); return result; } return 0; out: sock_release(sock); return result; } static int dlm_tcp_bind(struct socket *sock) { struct sockaddr_storage src_addr; int result, addr_len; /* Bind to our cluster-known address connecting to avoid * routing problems. */ memcpy(&src_addr, &dlm_local_addr[0], sizeof(src_addr)); make_sockaddr(&src_addr, 0, &addr_len); result = sock->ops->bind(sock, (struct sockaddr *)&src_addr, addr_len); if (result < 0) { /* This *may* not indicate a critical error */ log_print("could not bind for connect: %d", result); } return 0; } static int dlm_tcp_connect(struct connection *con, struct socket *sock, struct sockaddr *addr, int addr_len) { return sock->ops->connect(sock, addr, addr_len, O_NONBLOCK); } static int dlm_tcp_listen_validate(void) { /* We don't support multi-homed hosts */ if (dlm_local_count > 1) { log_print("TCP protocol can't handle multi-homed hosts, try SCTP"); return -EINVAL; } return 0; } static void dlm_tcp_sockopts(struct socket *sock) { /* Turn off Nagle's algorithm */ tcp_sock_set_nodelay(sock->sk); } static void dlm_tcp_listen_sockopts(struct socket *sock) { dlm_tcp_sockopts(sock); sock_set_reuseaddr(sock->sk); } static int dlm_tcp_listen_bind(struct socket *sock) { int addr_len; /* Bind to our port */ make_sockaddr(&dlm_local_addr[0], dlm_config.ci_tcp_port, &addr_len); return sock->ops->bind(sock, (struct sockaddr *)&dlm_local_addr[0], addr_len); } static const struct dlm_proto_ops dlm_tcp_ops = { .name = "TCP", .proto = IPPROTO_TCP, .connect = dlm_tcp_connect, .sockopts = dlm_tcp_sockopts, .bind = dlm_tcp_bind, .listen_validate = dlm_tcp_listen_validate, .listen_sockopts = dlm_tcp_listen_sockopts, .listen_bind = dlm_tcp_listen_bind, }; static int dlm_sctp_bind(struct socket *sock) { return sctp_bind_addrs(sock, 0); } static int dlm_sctp_connect(struct connection *con, struct socket *sock, struct sockaddr *addr, int addr_len) { int ret; /* * Make sock->ops->connect() function return in specified time, * since O_NONBLOCK argument in connect() function does not work here, * then, we should restore the default value of this attribute. */ sock_set_sndtimeo(sock->sk, 5); ret = sock->ops->connect(sock, addr, addr_len, 0); sock_set_sndtimeo(sock->sk, 0); return ret; } static int dlm_sctp_listen_validate(void) { if (!IS_ENABLED(CONFIG_IP_SCTP)) { log_print("SCTP is not enabled by this kernel"); return -EOPNOTSUPP; } request_module("sctp"); return 0; } static int dlm_sctp_bind_listen(struct socket *sock) { return sctp_bind_addrs(sock, dlm_config.ci_tcp_port); } static void dlm_sctp_sockopts(struct socket *sock) { /* Turn off Nagle's algorithm */ sctp_sock_set_nodelay(sock->sk); sock_set_rcvbuf(sock->sk, NEEDED_RMEM); } static const struct dlm_proto_ops dlm_sctp_ops = { .name = "SCTP", .proto = IPPROTO_SCTP, .try_new_addr = true, .connect = dlm_sctp_connect, .sockopts = dlm_sctp_sockopts, .bind = dlm_sctp_bind, .listen_validate = dlm_sctp_listen_validate, .listen_sockopts = dlm_sctp_sockopts, .listen_bind = dlm_sctp_bind_listen, }; int dlm_lowcomms_start(void) { int error; init_local(); if (!dlm_local_count) { error = -ENOTCONN; log_print("no local IP address has been set"); goto fail; } error = work_start(); if (error) goto fail; /* Start listening */ switch (dlm_config.ci_protocol) { case DLM_PROTO_TCP: dlm_proto_ops = &dlm_tcp_ops; break; case DLM_PROTO_SCTP: dlm_proto_ops = &dlm_sctp_ops; break; default: log_print("Invalid protocol identifier %d set", dlm_config.ci_protocol); error = -EINVAL; goto fail_proto_ops; } error = dlm_listen_for_all(); if (error) goto fail_listen; return 0; fail_listen: dlm_proto_ops = NULL; fail_proto_ops: work_stop(); fail: return error; } void dlm_lowcomms_init(void) { int i; for (i = 0; i < CONN_HASH_SIZE; i++) INIT_HLIST_HEAD(&connection_hash[i]); INIT_WORK(&listen_con.rwork, process_listen_recv_socket); } void dlm_lowcomms_exit(void) { struct connection *con; int i, idx; idx = srcu_read_lock(&connections_srcu); for (i = 0; i < CONN_HASH_SIZE; i++) { hlist_for_each_entry_rcu(con, &connection_hash[i], list) { spin_lock(&connections_lock); hlist_del_rcu(&con->list); spin_unlock(&connections_lock); if (con->othercon) call_srcu(&connections_srcu, &con->othercon->rcu, connection_release); call_srcu(&connections_srcu, &con->rcu, connection_release); } } srcu_read_unlock(&connections_srcu, idx); }
linux-master
fs/dlm/lowcomms.c
// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. ** ** ******************************************************************************* ******************************************************************************/ #include <linux/module.h> #include "dlm_internal.h" #include "lockspace.h" #include "lock.h" #include "user.h" #include "memory.h" #include "config.h" #include "midcomms.h" #define CREATE_TRACE_POINTS #include <trace/events/dlm.h> static int __init init_dlm(void) { int error; error = dlm_memory_init(); if (error) goto out; dlm_midcomms_init(); error = dlm_lockspace_init(); if (error) goto out_mem; error = dlm_config_init(); if (error) goto out_lockspace; dlm_register_debugfs(); error = dlm_user_init(); if (error) goto out_debug; error = dlm_plock_init(); if (error) goto out_user; printk("DLM installed\n"); return 0; out_user: dlm_user_exit(); out_debug: dlm_unregister_debugfs(); dlm_config_exit(); out_lockspace: dlm_lockspace_exit(); out_mem: dlm_midcomms_exit(); dlm_memory_exit(); out: return error; } static void __exit exit_dlm(void) { dlm_plock_exit(); dlm_user_exit(); dlm_config_exit(); dlm_lockspace_exit(); dlm_midcomms_exit(); dlm_unregister_debugfs(); dlm_memory_exit(); } module_init(init_dlm); module_exit(exit_dlm); MODULE_DESCRIPTION("Distributed Lock Manager"); MODULE_AUTHOR("Red Hat, Inc."); MODULE_LICENSE("GPL"); EXPORT_SYMBOL_GPL(dlm_new_lockspace); EXPORT_SYMBOL_GPL(dlm_release_lockspace); EXPORT_SYMBOL_GPL(dlm_lock); EXPORT_SYMBOL_GPL(dlm_unlock);
linux-master
fs/dlm/main.c
// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved. ** ** ******************************************************************************* ******************************************************************************/ #include <linux/kernel.h> #include <linux/init.h> #include <linux/configfs.h> #include <linux/slab.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/dlmconstants.h> #include <net/ipv6.h> #include <net/sock.h> #include "config.h" #include "midcomms.h" #include "lowcomms.h" /* * /config/dlm/<cluster>/spaces/<space>/nodes/<node>/nodeid * /config/dlm/<cluster>/spaces/<space>/nodes/<node>/weight * /config/dlm/<cluster>/comms/<comm>/nodeid * /config/dlm/<cluster>/comms/<comm>/local * /config/dlm/<cluster>/comms/<comm>/addr (write only) * /config/dlm/<cluster>/comms/<comm>/addr_list (read only) * The <cluster> level is useless, but I haven't figured out how to avoid it. */ static struct config_group *space_list; static struct config_group *comm_list; static struct dlm_comm *local_comm; static uint32_t dlm_comm_count; struct dlm_clusters; struct dlm_cluster; struct dlm_spaces; struct dlm_space; struct dlm_comms; struct dlm_comm; struct dlm_nodes; struct dlm_node; static struct config_group *make_cluster(struct config_group *, const char *); static void drop_cluster(struct config_group *, struct config_item *); static void release_cluster(struct config_item *); static struct config_group *make_space(struct config_group *, const char *); static void drop_space(struct config_group *, struct config_item *); static void release_space(struct config_item *); static struct config_item *make_comm(struct config_group *, const char *); static void drop_comm(struct config_group *, struct config_item *); static void release_comm(struct config_item *); static struct config_item *make_node(struct config_group *, const char *); static void drop_node(struct config_group *, struct config_item *); static void release_node(struct config_item *); static struct configfs_attribute *comm_attrs[]; static struct configfs_attribute *node_attrs[]; struct dlm_cluster { struct config_group group; unsigned int cl_tcp_port; unsigned int cl_buffer_size; unsigned int cl_rsbtbl_size; unsigned int cl_recover_timer; unsigned int cl_toss_secs; unsigned int cl_scan_secs; unsigned int cl_log_debug; unsigned int cl_log_info; unsigned int cl_protocol; unsigned int cl_mark; unsigned int cl_new_rsb_count; unsigned int cl_recover_callbacks; char cl_cluster_name[DLM_LOCKSPACE_LEN]; struct dlm_spaces *sps; struct dlm_comms *cms; }; static struct dlm_cluster *config_item_to_cluster(struct config_item *i) { return i ? container_of(to_config_group(i), struct dlm_cluster, group) : NULL; } enum { CLUSTER_ATTR_TCP_PORT = 0, CLUSTER_ATTR_BUFFER_SIZE, CLUSTER_ATTR_RSBTBL_SIZE, CLUSTER_ATTR_RECOVER_TIMER, CLUSTER_ATTR_TOSS_SECS, CLUSTER_ATTR_SCAN_SECS, CLUSTER_ATTR_LOG_DEBUG, CLUSTER_ATTR_LOG_INFO, CLUSTER_ATTR_PROTOCOL, CLUSTER_ATTR_MARK, CLUSTER_ATTR_NEW_RSB_COUNT, CLUSTER_ATTR_RECOVER_CALLBACKS, CLUSTER_ATTR_CLUSTER_NAME, }; static ssize_t cluster_cluster_name_show(struct config_item *item, char *buf) { struct dlm_cluster *cl = config_item_to_cluster(item); return sprintf(buf, "%s\n", cl->cl_cluster_name); } static ssize_t cluster_cluster_name_store(struct config_item *item, const char *buf, size_t len) { struct dlm_cluster *cl = config_item_to_cluster(item); strscpy(dlm_config.ci_cluster_name, buf, sizeof(dlm_config.ci_cluster_name)); strscpy(cl->cl_cluster_name, buf, sizeof(cl->cl_cluster_name)); return len; } CONFIGFS_ATTR(cluster_, cluster_name); static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field, int *info_field, int (*check_cb)(unsigned int x), const char *buf, size_t len) { unsigned int x; int rc; if (!capable(CAP_SYS_ADMIN)) return -EPERM; rc = kstrtouint(buf, 0, &x); if (rc) return rc; if (check_cb) { rc = check_cb(x); if (rc) return rc; } *cl_field = x; *info_field = x; return len; } #define CLUSTER_ATTR(name, check_cb) \ static ssize_t cluster_##name##_store(struct config_item *item, \ const char *buf, size_t len) \ { \ struct dlm_cluster *cl = config_item_to_cluster(item); \ return cluster_set(cl, &cl->cl_##name, &dlm_config.ci_##name, \ check_cb, buf, len); \ } \ static ssize_t cluster_##name##_show(struct config_item *item, char *buf) \ { \ struct dlm_cluster *cl = config_item_to_cluster(item); \ return snprintf(buf, PAGE_SIZE, "%u\n", cl->cl_##name); \ } \ CONFIGFS_ATTR(cluster_, name); static int dlm_check_protocol_and_dlm_running(unsigned int x) { switch (x) { case 0: /* TCP */ break; case 1: /* SCTP */ break; default: return -EINVAL; } if (dlm_lowcomms_is_running()) return -EBUSY; return 0; } static int dlm_check_zero_and_dlm_running(unsigned int x) { if (!x) return -EINVAL; if (dlm_lowcomms_is_running()) return -EBUSY; return 0; } static int dlm_check_zero(unsigned int x) { if (!x) return -EINVAL; return 0; } static int dlm_check_buffer_size(unsigned int x) { if (x < DLM_MAX_SOCKET_BUFSIZE) return -EINVAL; return 0; } CLUSTER_ATTR(tcp_port, dlm_check_zero_and_dlm_running); CLUSTER_ATTR(buffer_size, dlm_check_buffer_size); CLUSTER_ATTR(rsbtbl_size, dlm_check_zero); CLUSTER_ATTR(recover_timer, dlm_check_zero); CLUSTER_ATTR(toss_secs, dlm_check_zero); CLUSTER_ATTR(scan_secs, dlm_check_zero); CLUSTER_ATTR(log_debug, NULL); CLUSTER_ATTR(log_info, NULL); CLUSTER_ATTR(protocol, dlm_check_protocol_and_dlm_running); CLUSTER_ATTR(mark, NULL); CLUSTER_ATTR(new_rsb_count, NULL); CLUSTER_ATTR(recover_callbacks, NULL); static struct configfs_attribute *cluster_attrs[] = { [CLUSTER_ATTR_TCP_PORT] = &cluster_attr_tcp_port, [CLUSTER_ATTR_BUFFER_SIZE] = &cluster_attr_buffer_size, [CLUSTER_ATTR_RSBTBL_SIZE] = &cluster_attr_rsbtbl_size, [CLUSTER_ATTR_RECOVER_TIMER] = &cluster_attr_recover_timer, [CLUSTER_ATTR_TOSS_SECS] = &cluster_attr_toss_secs, [CLUSTER_ATTR_SCAN_SECS] = &cluster_attr_scan_secs, [CLUSTER_ATTR_LOG_DEBUG] = &cluster_attr_log_debug, [CLUSTER_ATTR_LOG_INFO] = &cluster_attr_log_info, [CLUSTER_ATTR_PROTOCOL] = &cluster_attr_protocol, [CLUSTER_ATTR_MARK] = &cluster_attr_mark, [CLUSTER_ATTR_NEW_RSB_COUNT] = &cluster_attr_new_rsb_count, [CLUSTER_ATTR_RECOVER_CALLBACKS] = &cluster_attr_recover_callbacks, [CLUSTER_ATTR_CLUSTER_NAME] = &cluster_attr_cluster_name, NULL, }; enum { COMM_ATTR_NODEID = 0, COMM_ATTR_LOCAL, COMM_ATTR_ADDR, COMM_ATTR_ADDR_LIST, COMM_ATTR_MARK, }; enum { NODE_ATTR_NODEID = 0, NODE_ATTR_WEIGHT, }; struct dlm_clusters { struct configfs_subsystem subsys; }; struct dlm_spaces { struct config_group ss_group; }; struct dlm_space { struct config_group group; struct list_head members; struct mutex members_lock; int members_count; struct dlm_nodes *nds; }; struct dlm_comms { struct config_group cs_group; }; struct dlm_comm { struct config_item item; int seq; int nodeid; int local; int addr_count; unsigned int mark; struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT]; }; struct dlm_nodes { struct config_group ns_group; }; struct dlm_node { struct config_item item; struct list_head list; /* space->members */ int nodeid; int weight; int new; int comm_seq; /* copy of cm->seq when nd->nodeid is set */ }; static struct configfs_group_operations clusters_ops = { .make_group = make_cluster, .drop_item = drop_cluster, }; static struct configfs_item_operations cluster_ops = { .release = release_cluster, }; static struct configfs_group_operations spaces_ops = { .make_group = make_space, .drop_item = drop_space, }; static struct configfs_item_operations space_ops = { .release = release_space, }; static struct configfs_group_operations comms_ops = { .make_item = make_comm, .drop_item = drop_comm, }; static struct configfs_item_operations comm_ops = { .release = release_comm, }; static struct configfs_group_operations nodes_ops = { .make_item = make_node, .drop_item = drop_node, }; static struct configfs_item_operations node_ops = { .release = release_node, }; static const struct config_item_type clusters_type = { .ct_group_ops = &clusters_ops, .ct_owner = THIS_MODULE, }; static const struct config_item_type cluster_type = { .ct_item_ops = &cluster_ops, .ct_attrs = cluster_attrs, .ct_owner = THIS_MODULE, }; static const struct config_item_type spaces_type = { .ct_group_ops = &spaces_ops, .ct_owner = THIS_MODULE, }; static const struct config_item_type space_type = { .ct_item_ops = &space_ops, .ct_owner = THIS_MODULE, }; static const struct config_item_type comms_type = { .ct_group_ops = &comms_ops, .ct_owner = THIS_MODULE, }; static const struct config_item_type comm_type = { .ct_item_ops = &comm_ops, .ct_attrs = comm_attrs, .ct_owner = THIS_MODULE, }; static const struct config_item_type nodes_type = { .ct_group_ops = &nodes_ops, .ct_owner = THIS_MODULE, }; static const struct config_item_type node_type = { .ct_item_ops = &node_ops, .ct_attrs = node_attrs, .ct_owner = THIS_MODULE, }; static struct dlm_space *config_item_to_space(struct config_item *i) { return i ? container_of(to_config_group(i), struct dlm_space, group) : NULL; } static struct dlm_comm *config_item_to_comm(struct config_item *i) { return i ? container_of(i, struct dlm_comm, item) : NULL; } static struct dlm_node *config_item_to_node(struct config_item *i) { return i ? container_of(i, struct dlm_node, item) : NULL; } static struct config_group *make_cluster(struct config_group *g, const char *name) { struct dlm_cluster *cl = NULL; struct dlm_spaces *sps = NULL; struct dlm_comms *cms = NULL; cl = kzalloc(sizeof(struct dlm_cluster), GFP_NOFS); sps = kzalloc(sizeof(struct dlm_spaces), GFP_NOFS); cms = kzalloc(sizeof(struct dlm_comms), GFP_NOFS); if (!cl || !sps || !cms) goto fail; cl->sps = sps; cl->cms = cms; config_group_init_type_name(&cl->group, name, &cluster_type); config_group_init_type_name(&sps->ss_group, "spaces", &spaces_type); config_group_init_type_name(&cms->cs_group, "comms", &comms_type); configfs_add_default_group(&sps->ss_group, &cl->group); configfs_add_default_group(&cms->cs_group, &cl->group); cl->cl_tcp_port = dlm_config.ci_tcp_port; cl->cl_buffer_size = dlm_config.ci_buffer_size; cl->cl_rsbtbl_size = dlm_config.ci_rsbtbl_size; cl->cl_recover_timer = dlm_config.ci_recover_timer; cl->cl_toss_secs = dlm_config.ci_toss_secs; cl->cl_scan_secs = dlm_config.ci_scan_secs; cl->cl_log_debug = dlm_config.ci_log_debug; cl->cl_log_info = dlm_config.ci_log_info; cl->cl_protocol = dlm_config.ci_protocol; cl->cl_new_rsb_count = dlm_config.ci_new_rsb_count; cl->cl_recover_callbacks = dlm_config.ci_recover_callbacks; memcpy(cl->cl_cluster_name, dlm_config.ci_cluster_name, DLM_LOCKSPACE_LEN); space_list = &sps->ss_group; comm_list = &cms->cs_group; return &cl->group; fail: kfree(cl); kfree(sps); kfree(cms); return ERR_PTR(-ENOMEM); } static void drop_cluster(struct config_group *g, struct config_item *i) { struct dlm_cluster *cl = config_item_to_cluster(i); configfs_remove_default_groups(&cl->group); space_list = NULL; comm_list = NULL; config_item_put(i); } static void release_cluster(struct config_item *i) { struct dlm_cluster *cl = config_item_to_cluster(i); kfree(cl->sps); kfree(cl->cms); kfree(cl); } static struct config_group *make_space(struct config_group *g, const char *name) { struct dlm_space *sp = NULL; struct dlm_nodes *nds = NULL; sp = kzalloc(sizeof(struct dlm_space), GFP_NOFS); nds = kzalloc(sizeof(struct dlm_nodes), GFP_NOFS); if (!sp || !nds) goto fail; config_group_init_type_name(&sp->group, name, &space_type); config_group_init_type_name(&nds->ns_group, "nodes", &nodes_type); configfs_add_default_group(&nds->ns_group, &sp->group); INIT_LIST_HEAD(&sp->members); mutex_init(&sp->members_lock); sp->members_count = 0; sp->nds = nds; return &sp->group; fail: kfree(sp); kfree(nds); return ERR_PTR(-ENOMEM); } static void drop_space(struct config_group *g, struct config_item *i) { struct dlm_space *sp = config_item_to_space(i); /* assert list_empty(&sp->members) */ configfs_remove_default_groups(&sp->group); config_item_put(i); } static void release_space(struct config_item *i) { struct dlm_space *sp = config_item_to_space(i); kfree(sp->nds); kfree(sp); } static struct config_item *make_comm(struct config_group *g, const char *name) { struct dlm_comm *cm; cm = kzalloc(sizeof(struct dlm_comm), GFP_NOFS); if (!cm) return ERR_PTR(-ENOMEM); config_item_init_type_name(&cm->item, name, &comm_type); cm->seq = dlm_comm_count++; if (!cm->seq) cm->seq = dlm_comm_count++; cm->nodeid = -1; cm->local = 0; cm->addr_count = 0; cm->mark = 0; return &cm->item; } static void drop_comm(struct config_group *g, struct config_item *i) { struct dlm_comm *cm = config_item_to_comm(i); if (local_comm == cm) local_comm = NULL; dlm_midcomms_close(cm->nodeid); while (cm->addr_count--) kfree(cm->addr[cm->addr_count]); config_item_put(i); } static void release_comm(struct config_item *i) { struct dlm_comm *cm = config_item_to_comm(i); kfree(cm); } static struct config_item *make_node(struct config_group *g, const char *name) { struct dlm_space *sp = config_item_to_space(g->cg_item.ci_parent); struct dlm_node *nd; nd = kzalloc(sizeof(struct dlm_node), GFP_NOFS); if (!nd) return ERR_PTR(-ENOMEM); config_item_init_type_name(&nd->item, name, &node_type); nd->nodeid = -1; nd->weight = 1; /* default weight of 1 if none is set */ nd->new = 1; /* set to 0 once it's been read by dlm_nodeid_list() */ mutex_lock(&sp->members_lock); list_add(&nd->list, &sp->members); sp->members_count++; mutex_unlock(&sp->members_lock); return &nd->item; } static void drop_node(struct config_group *g, struct config_item *i) { struct dlm_space *sp = config_item_to_space(g->cg_item.ci_parent); struct dlm_node *nd = config_item_to_node(i); mutex_lock(&sp->members_lock); list_del(&nd->list); sp->members_count--; mutex_unlock(&sp->members_lock); config_item_put(i); } static void release_node(struct config_item *i) { struct dlm_node *nd = config_item_to_node(i); kfree(nd); } static struct dlm_clusters clusters_root = { .subsys = { .su_group = { .cg_item = { .ci_namebuf = "dlm", .ci_type = &clusters_type, }, }, }, }; int __init dlm_config_init(void) { config_group_init(&clusters_root.subsys.su_group); mutex_init(&clusters_root.subsys.su_mutex); return configfs_register_subsystem(&clusters_root.subsys); } void dlm_config_exit(void) { configfs_unregister_subsystem(&clusters_root.subsys); } /* * Functions for user space to read/write attributes */ static ssize_t comm_nodeid_show(struct config_item *item, char *buf) { return sprintf(buf, "%d\n", config_item_to_comm(item)->nodeid); } static ssize_t comm_nodeid_store(struct config_item *item, const char *buf, size_t len) { int rc = kstrtoint(buf, 0, &config_item_to_comm(item)->nodeid); if (rc) return rc; return len; } static ssize_t comm_local_show(struct config_item *item, char *buf) { return sprintf(buf, "%d\n", config_item_to_comm(item)->local); } static ssize_t comm_local_store(struct config_item *item, const char *buf, size_t len) { struct dlm_comm *cm = config_item_to_comm(item); int rc = kstrtoint(buf, 0, &cm->local); if (rc) return rc; if (cm->local && !local_comm) local_comm = cm; return len; } static ssize_t comm_addr_store(struct config_item *item, const char *buf, size_t len) { struct dlm_comm *cm = config_item_to_comm(item); struct sockaddr_storage *addr; int rv; if (len != sizeof(struct sockaddr_storage)) return -EINVAL; if (cm->addr_count >= DLM_MAX_ADDR_COUNT) return -ENOSPC; addr = kzalloc(sizeof(*addr), GFP_NOFS); if (!addr) return -ENOMEM; memcpy(addr, buf, len); rv = dlm_midcomms_addr(cm->nodeid, addr, len); if (rv) { kfree(addr); return rv; } cm->addr[cm->addr_count++] = addr; return len; } static ssize_t comm_addr_list_show(struct config_item *item, char *buf) { struct dlm_comm *cm = config_item_to_comm(item); ssize_t s; ssize_t allowance; int i; struct sockaddr_storage *addr; struct sockaddr_in *addr_in; struct sockaddr_in6 *addr_in6; /* Taken from ip6_addr_string() defined in lib/vsprintf.c */ char buf0[sizeof("AF_INET6 xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255\n")]; /* Derived from SIMPLE_ATTR_SIZE of fs/configfs/file.c */ allowance = 4096; buf[0] = '\0'; for (i = 0; i < cm->addr_count; i++) { addr = cm->addr[i]; switch(addr->ss_family) { case AF_INET: addr_in = (struct sockaddr_in *)addr; s = sprintf(buf0, "AF_INET %pI4\n", &addr_in->sin_addr.s_addr); break; case AF_INET6: addr_in6 = (struct sockaddr_in6 *)addr; s = sprintf(buf0, "AF_INET6 %pI6\n", &addr_in6->sin6_addr); break; default: s = sprintf(buf0, "%s\n", "<UNKNOWN>"); break; } allowance -= s; if (allowance >= 0) strcat(buf, buf0); else { allowance += s; break; } } return 4096 - allowance; } static ssize_t comm_mark_show(struct config_item *item, char *buf) { return sprintf(buf, "%u\n", config_item_to_comm(item)->mark); } static ssize_t comm_mark_store(struct config_item *item, const char *buf, size_t len) { struct dlm_comm *comm; unsigned int mark; int rc; rc = kstrtouint(buf, 0, &mark); if (rc) return rc; if (mark == 0) mark = dlm_config.ci_mark; comm = config_item_to_comm(item); rc = dlm_lowcomms_nodes_set_mark(comm->nodeid, mark); if (rc) return rc; comm->mark = mark; return len; } CONFIGFS_ATTR(comm_, nodeid); CONFIGFS_ATTR(comm_, local); CONFIGFS_ATTR(comm_, mark); CONFIGFS_ATTR_WO(comm_, addr); CONFIGFS_ATTR_RO(comm_, addr_list); static struct configfs_attribute *comm_attrs[] = { [COMM_ATTR_NODEID] = &comm_attr_nodeid, [COMM_ATTR_LOCAL] = &comm_attr_local, [COMM_ATTR_ADDR] = &comm_attr_addr, [COMM_ATTR_ADDR_LIST] = &comm_attr_addr_list, [COMM_ATTR_MARK] = &comm_attr_mark, NULL, }; static ssize_t node_nodeid_show(struct config_item *item, char *buf) { return sprintf(buf, "%d\n", config_item_to_node(item)->nodeid); } static ssize_t node_nodeid_store(struct config_item *item, const char *buf, size_t len) { struct dlm_node *nd = config_item_to_node(item); uint32_t seq = 0; int rc = kstrtoint(buf, 0, &nd->nodeid); if (rc) return rc; dlm_comm_seq(nd->nodeid, &seq); nd->comm_seq = seq; return len; } static ssize_t node_weight_show(struct config_item *item, char *buf) { return sprintf(buf, "%d\n", config_item_to_node(item)->weight); } static ssize_t node_weight_store(struct config_item *item, const char *buf, size_t len) { int rc = kstrtoint(buf, 0, &config_item_to_node(item)->weight); if (rc) return rc; return len; } CONFIGFS_ATTR(node_, nodeid); CONFIGFS_ATTR(node_, weight); static struct configfs_attribute *node_attrs[] = { [NODE_ATTR_NODEID] = &node_attr_nodeid, [NODE_ATTR_WEIGHT] = &node_attr_weight, NULL, }; /* * Functions for the dlm to get the info that's been configured */ static struct dlm_space *get_space(char *name) { struct config_item *i; if (!space_list) return NULL; mutex_lock(&space_list->cg_subsys->su_mutex); i = config_group_find_item(space_list, name); mutex_unlock(&space_list->cg_subsys->su_mutex); return config_item_to_space(i); } static void put_space(struct dlm_space *sp) { config_item_put(&sp->group.cg_item); } static struct dlm_comm *get_comm(int nodeid) { struct config_item *i; struct dlm_comm *cm = NULL; int found = 0; if (!comm_list) return NULL; mutex_lock(&clusters_root.subsys.su_mutex); list_for_each_entry(i, &comm_list->cg_children, ci_entry) { cm = config_item_to_comm(i); if (cm->nodeid != nodeid) continue; found = 1; config_item_get(i); break; } mutex_unlock(&clusters_root.subsys.su_mutex); if (!found) cm = NULL; return cm; } static void put_comm(struct dlm_comm *cm) { config_item_put(&cm->item); } /* caller must free mem */ int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out, int *count_out) { struct dlm_space *sp; struct dlm_node *nd; struct dlm_config_node *nodes, *node; int rv, count; sp = get_space(lsname); if (!sp) return -EEXIST; mutex_lock(&sp->members_lock); if (!sp->members_count) { rv = -EINVAL; printk(KERN_ERR "dlm: zero members_count\n"); goto out; } count = sp->members_count; nodes = kcalloc(count, sizeof(struct dlm_config_node), GFP_NOFS); if (!nodes) { rv = -ENOMEM; goto out; } node = nodes; list_for_each_entry(nd, &sp->members, list) { node->nodeid = nd->nodeid; node->weight = nd->weight; node->new = nd->new; node->comm_seq = nd->comm_seq; node++; nd->new = 0; } *count_out = count; *nodes_out = nodes; rv = 0; out: mutex_unlock(&sp->members_lock); put_space(sp); return rv; } int dlm_comm_seq(int nodeid, uint32_t *seq) { struct dlm_comm *cm = get_comm(nodeid); if (!cm) return -EEXIST; *seq = cm->seq; put_comm(cm); return 0; } int dlm_our_nodeid(void) { return local_comm ? local_comm->nodeid : 0; } /* num 0 is first addr, num 1 is second addr */ int dlm_our_addr(struct sockaddr_storage *addr, int num) { if (!local_comm) return -1; if (num + 1 > local_comm->addr_count) return -1; memcpy(addr, local_comm->addr[num], sizeof(*addr)); return 0; } /* Config file defaults */ #define DEFAULT_TCP_PORT 21064 #define DEFAULT_RSBTBL_SIZE 1024 #define DEFAULT_RECOVER_TIMER 5 #define DEFAULT_TOSS_SECS 10 #define DEFAULT_SCAN_SECS 5 #define DEFAULT_LOG_DEBUG 0 #define DEFAULT_LOG_INFO 1 #define DEFAULT_PROTOCOL DLM_PROTO_TCP #define DEFAULT_MARK 0 #define DEFAULT_NEW_RSB_COUNT 128 #define DEFAULT_RECOVER_CALLBACKS 0 #define DEFAULT_CLUSTER_NAME "" struct dlm_config_info dlm_config = { .ci_tcp_port = DEFAULT_TCP_PORT, .ci_buffer_size = DLM_MAX_SOCKET_BUFSIZE, .ci_rsbtbl_size = DEFAULT_RSBTBL_SIZE, .ci_recover_timer = DEFAULT_RECOVER_TIMER, .ci_toss_secs = DEFAULT_TOSS_SECS, .ci_scan_secs = DEFAULT_SCAN_SECS, .ci_log_debug = DEFAULT_LOG_DEBUG, .ci_log_info = DEFAULT_LOG_INFO, .ci_protocol = DEFAULT_PROTOCOL, .ci_mark = DEFAULT_MARK, .ci_new_rsb_count = DEFAULT_NEW_RSB_COUNT, .ci_recover_callbacks = DEFAULT_RECOVER_CALLBACKS, .ci_cluster_name = DEFAULT_CLUSTER_NAME };
linux-master
fs/dlm/config.c
// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. ** ** ******************************************************************************* ******************************************************************************/ #include "dlm_internal.h" #include "lockspace.h" #include "dir.h" #include "config.h" #include "ast.h" #include "memory.h" #include "rcom.h" #include "lock.h" #include "lowcomms.h" #include "member.h" #include "recover.h" /* * Recovery waiting routines: these functions wait for a particular reply from * a remote node, or for the remote node to report a certain status. They need * to abort if the lockspace is stopped indicating a node has failed (perhaps * the one being waited for). */ /* * Wait until given function returns non-zero or lockspace is stopped * (LS_RECOVERY_STOP set due to failure of a node in ls_nodes). When another * function thinks it could have completed the waited-on task, they should wake * up ls_wait_general to get an immediate response rather than waiting for the * timeout. This uses a timeout so it can check periodically if the wait * should abort due to node failure (which doesn't cause a wake_up). * This should only be called by the dlm_recoverd thread. */ int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls)) { int error = 0; int rv; while (1) { rv = wait_event_timeout(ls->ls_wait_general, testfn(ls) || dlm_recovery_stopped(ls), dlm_config.ci_recover_timer * HZ); if (rv) break; if (test_bit(LSFL_RCOM_WAIT, &ls->ls_flags)) { log_debug(ls, "dlm_wait_function timed out"); return -ETIMEDOUT; } } if (dlm_recovery_stopped(ls)) { log_debug(ls, "dlm_wait_function aborted"); error = -EINTR; } return error; } /* * An efficient way for all nodes to wait for all others to have a certain * status. The node with the lowest nodeid polls all the others for their * status (wait_status_all) and all the others poll the node with the low id * for its accumulated result (wait_status_low). When all nodes have set * status flag X, then status flag X_ALL will be set on the low nodeid. */ uint32_t dlm_recover_status(struct dlm_ls *ls) { uint32_t status; spin_lock(&ls->ls_recover_lock); status = ls->ls_recover_status; spin_unlock(&ls->ls_recover_lock); return status; } static void _set_recover_status(struct dlm_ls *ls, uint32_t status) { ls->ls_recover_status |= status; } void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status) { spin_lock(&ls->ls_recover_lock); _set_recover_status(ls, status); spin_unlock(&ls->ls_recover_lock); } static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status, int save_slots, uint64_t seq) { struct dlm_rcom *rc = ls->ls_recover_buf; struct dlm_member *memb; int error = 0, delay; list_for_each_entry(memb, &ls->ls_nodes, list) { delay = 0; for (;;) { if (dlm_recovery_stopped(ls)) { error = -EINTR; goto out; } error = dlm_rcom_status(ls, memb->nodeid, 0, seq); if (error) goto out; if (save_slots) dlm_slot_save(ls, rc, memb); if (le32_to_cpu(rc->rc_result) & wait_status) break; if (delay < 1000) delay += 20; msleep(delay); } } out: return error; } static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status, uint32_t status_flags, uint64_t seq) { struct dlm_rcom *rc = ls->ls_recover_buf; int error = 0, delay = 0, nodeid = ls->ls_low_nodeid; for (;;) { if (dlm_recovery_stopped(ls)) { error = -EINTR; goto out; } error = dlm_rcom_status(ls, nodeid, status_flags, seq); if (error) break; if (le32_to_cpu(rc->rc_result) & wait_status) break; if (delay < 1000) delay += 20; msleep(delay); } out: return error; } static int wait_status(struct dlm_ls *ls, uint32_t status, uint64_t seq) { uint32_t status_all = status << 1; int error; if (ls->ls_low_nodeid == dlm_our_nodeid()) { error = wait_status_all(ls, status, 0, seq); if (!error) dlm_set_recover_status(ls, status_all); } else error = wait_status_low(ls, status_all, 0, seq); return error; } int dlm_recover_members_wait(struct dlm_ls *ls, uint64_t seq) { struct dlm_member *memb; struct dlm_slot *slots; int num_slots, slots_size; int error, rv; uint32_t gen; list_for_each_entry(memb, &ls->ls_nodes, list) { memb->slot = -1; memb->generation = 0; } if (ls->ls_low_nodeid == dlm_our_nodeid()) { error = wait_status_all(ls, DLM_RS_NODES, 1, seq); if (error) goto out; /* slots array is sparse, slots_size may be > num_slots */ rv = dlm_slots_assign(ls, &num_slots, &slots_size, &slots, &gen); if (!rv) { spin_lock(&ls->ls_recover_lock); _set_recover_status(ls, DLM_RS_NODES_ALL); ls->ls_num_slots = num_slots; ls->ls_slots_size = slots_size; ls->ls_slots = slots; ls->ls_generation = gen; spin_unlock(&ls->ls_recover_lock); } else { dlm_set_recover_status(ls, DLM_RS_NODES_ALL); } } else { error = wait_status_low(ls, DLM_RS_NODES_ALL, DLM_RSF_NEED_SLOTS, seq); if (error) goto out; dlm_slots_copy_in(ls); } out: return error; } int dlm_recover_directory_wait(struct dlm_ls *ls, uint64_t seq) { return wait_status(ls, DLM_RS_DIR, seq); } int dlm_recover_locks_wait(struct dlm_ls *ls, uint64_t seq) { return wait_status(ls, DLM_RS_LOCKS, seq); } int dlm_recover_done_wait(struct dlm_ls *ls, uint64_t seq) { return wait_status(ls, DLM_RS_DONE, seq); } /* * The recover_list contains all the rsb's for which we've requested the new * master nodeid. As replies are returned from the resource directories the * rsb's are removed from the list. When the list is empty we're done. * * The recover_list is later similarly used for all rsb's for which we've sent * new lkb's and need to receive new corresponding lkid's. * * We use the address of the rsb struct as a simple local identifier for the * rsb so we can match an rcom reply with the rsb it was sent for. */ static int recover_list_empty(struct dlm_ls *ls) { int empty; spin_lock(&ls->ls_recover_list_lock); empty = list_empty(&ls->ls_recover_list); spin_unlock(&ls->ls_recover_list_lock); return empty; } static void recover_list_add(struct dlm_rsb *r) { struct dlm_ls *ls = r->res_ls; spin_lock(&ls->ls_recover_list_lock); if (list_empty(&r->res_recover_list)) { list_add_tail(&r->res_recover_list, &ls->ls_recover_list); ls->ls_recover_list_count++; dlm_hold_rsb(r); } spin_unlock(&ls->ls_recover_list_lock); } static void recover_list_del(struct dlm_rsb *r) { struct dlm_ls *ls = r->res_ls; spin_lock(&ls->ls_recover_list_lock); list_del_init(&r->res_recover_list); ls->ls_recover_list_count--; spin_unlock(&ls->ls_recover_list_lock); dlm_put_rsb(r); } static void recover_list_clear(struct dlm_ls *ls) { struct dlm_rsb *r, *s; spin_lock(&ls->ls_recover_list_lock); list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) { list_del_init(&r->res_recover_list); r->res_recover_locks_count = 0; dlm_put_rsb(r); ls->ls_recover_list_count--; } if (ls->ls_recover_list_count != 0) { log_error(ls, "warning: recover_list_count %d", ls->ls_recover_list_count); ls->ls_recover_list_count = 0; } spin_unlock(&ls->ls_recover_list_lock); } static int recover_idr_empty(struct dlm_ls *ls) { int empty = 1; spin_lock(&ls->ls_recover_idr_lock); if (ls->ls_recover_list_count) empty = 0; spin_unlock(&ls->ls_recover_idr_lock); return empty; } static int recover_idr_add(struct dlm_rsb *r) { struct dlm_ls *ls = r->res_ls; int rv; idr_preload(GFP_NOFS); spin_lock(&ls->ls_recover_idr_lock); if (r->res_id) { rv = -1; goto out_unlock; } rv = idr_alloc(&ls->ls_recover_idr, r, 1, 0, GFP_NOWAIT); if (rv < 0) goto out_unlock; r->res_id = rv; ls->ls_recover_list_count++; dlm_hold_rsb(r); rv = 0; out_unlock: spin_unlock(&ls->ls_recover_idr_lock); idr_preload_end(); return rv; } static void recover_idr_del(struct dlm_rsb *r) { struct dlm_ls *ls = r->res_ls; spin_lock(&ls->ls_recover_idr_lock); idr_remove(&ls->ls_recover_idr, r->res_id); r->res_id = 0; ls->ls_recover_list_count--; spin_unlock(&ls->ls_recover_idr_lock); dlm_put_rsb(r); } static struct dlm_rsb *recover_idr_find(struct dlm_ls *ls, uint64_t id) { struct dlm_rsb *r; spin_lock(&ls->ls_recover_idr_lock); r = idr_find(&ls->ls_recover_idr, (int)id); spin_unlock(&ls->ls_recover_idr_lock); return r; } static void recover_idr_clear(struct dlm_ls *ls) { struct dlm_rsb *r; int id; spin_lock(&ls->ls_recover_idr_lock); idr_for_each_entry(&ls->ls_recover_idr, r, id) { idr_remove(&ls->ls_recover_idr, id); r->res_id = 0; r->res_recover_locks_count = 0; ls->ls_recover_list_count--; dlm_put_rsb(r); } if (ls->ls_recover_list_count != 0) { log_error(ls, "warning: recover_list_count %d", ls->ls_recover_list_count); ls->ls_recover_list_count = 0; } spin_unlock(&ls->ls_recover_idr_lock); } /* Master recovery: find new master node for rsb's that were mastered on nodes that have been removed. dlm_recover_masters recover_master dlm_send_rcom_lookup -> receive_rcom_lookup dlm_dir_lookup receive_rcom_lookup_reply <- dlm_recover_master_reply set_new_master set_master_lkbs set_lock_master */ /* * Set the lock master for all LKBs in a lock queue * If we are the new master of the rsb, we may have received new * MSTCPY locks from other nodes already which we need to ignore * when setting the new nodeid. */ static void set_lock_master(struct list_head *queue, int nodeid) { struct dlm_lkb *lkb; list_for_each_entry(lkb, queue, lkb_statequeue) { if (!test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags)) { lkb->lkb_nodeid = nodeid; lkb->lkb_remid = 0; } } } static void set_master_lkbs(struct dlm_rsb *r) { set_lock_master(&r->res_grantqueue, r->res_nodeid); set_lock_master(&r->res_convertqueue, r->res_nodeid); set_lock_master(&r->res_waitqueue, r->res_nodeid); } /* * Propagate the new master nodeid to locks * The NEW_MASTER flag tells dlm_recover_locks() which rsb's to consider. * The NEW_MASTER2 flag tells recover_lvb() and recover_grant() which * rsb's to consider. */ static void set_new_master(struct dlm_rsb *r) { set_master_lkbs(r); rsb_set_flag(r, RSB_NEW_MASTER); rsb_set_flag(r, RSB_NEW_MASTER2); } /* * We do async lookups on rsb's that need new masters. The rsb's * waiting for a lookup reply are kept on the recover_list. * * Another node recovering the master may have sent us a rcom lookup, * and our dlm_master_lookup() set it as the new master, along with * NEW_MASTER so that we'll recover it here (this implies dir_nodeid * equals our_nodeid below). */ static int recover_master(struct dlm_rsb *r, unsigned int *count, uint64_t seq) { struct dlm_ls *ls = r->res_ls; int our_nodeid, dir_nodeid; int is_removed = 0; int error; if (is_master(r)) return 0; is_removed = dlm_is_removed(ls, r->res_nodeid); if (!is_removed && !rsb_flag(r, RSB_NEW_MASTER)) return 0; our_nodeid = dlm_our_nodeid(); dir_nodeid = dlm_dir_nodeid(r); if (dir_nodeid == our_nodeid) { if (is_removed) { r->res_master_nodeid = our_nodeid; r->res_nodeid = 0; } /* set master of lkbs to ourself when is_removed, or to another new master which we set along with NEW_MASTER in dlm_master_lookup */ set_new_master(r); error = 0; } else { recover_idr_add(r); error = dlm_send_rcom_lookup(r, dir_nodeid, seq); } (*count)++; return error; } /* * All MSTCPY locks are purged and rebuilt, even if the master stayed the same. * This is necessary because recovery can be started, aborted and restarted, * causing the master nodeid to briefly change during the aborted recovery, and * change back to the original value in the second recovery. The MSTCPY locks * may or may not have been purged during the aborted recovery. Another node * with an outstanding request in waiters list and a request reply saved in the * requestqueue, cannot know whether it should ignore the reply and resend the * request, or accept the reply and complete the request. It must do the * former if the remote node purged MSTCPY locks, and it must do the later if * the remote node did not. This is solved by always purging MSTCPY locks, in * which case, the request reply would always be ignored and the request * resent. */ static int recover_master_static(struct dlm_rsb *r, unsigned int *count) { int dir_nodeid = dlm_dir_nodeid(r); int new_master = dir_nodeid; if (dir_nodeid == dlm_our_nodeid()) new_master = 0; dlm_purge_mstcpy_locks(r); r->res_master_nodeid = dir_nodeid; r->res_nodeid = new_master; set_new_master(r); (*count)++; return 0; } /* * Go through local root resources and for each rsb which has a master which * has departed, get the new master nodeid from the directory. The dir will * assign mastery to the first node to look up the new master. That means * we'll discover in this lookup if we're the new master of any rsb's. * * We fire off all the dir lookup requests individually and asynchronously to * the correct dir node. */ int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq) { struct dlm_rsb *r; unsigned int total = 0; unsigned int count = 0; int nodir = dlm_no_directory(ls); int error; log_rinfo(ls, "dlm_recover_masters"); down_read(&ls->ls_root_sem); list_for_each_entry(r, &ls->ls_root_list, res_root_list) { if (dlm_recovery_stopped(ls)) { up_read(&ls->ls_root_sem); error = -EINTR; goto out; } lock_rsb(r); if (nodir) error = recover_master_static(r, &count); else error = recover_master(r, &count, seq); unlock_rsb(r); cond_resched(); total++; if (error) { up_read(&ls->ls_root_sem); goto out; } } up_read(&ls->ls_root_sem); log_rinfo(ls, "dlm_recover_masters %u of %u", count, total); error = dlm_wait_function(ls, &recover_idr_empty); out: if (error) recover_idr_clear(ls); return error; } int dlm_recover_master_reply(struct dlm_ls *ls, const struct dlm_rcom *rc) { struct dlm_rsb *r; int ret_nodeid, new_master; r = recover_idr_find(ls, le64_to_cpu(rc->rc_id)); if (!r) { log_error(ls, "dlm_recover_master_reply no id %llx", (unsigned long long)le64_to_cpu(rc->rc_id)); goto out; } ret_nodeid = le32_to_cpu(rc->rc_result); if (ret_nodeid == dlm_our_nodeid()) new_master = 0; else new_master = ret_nodeid; lock_rsb(r); r->res_master_nodeid = ret_nodeid; r->res_nodeid = new_master; set_new_master(r); unlock_rsb(r); recover_idr_del(r); if (recover_idr_empty(ls)) wake_up(&ls->ls_wait_general); out: return 0; } /* Lock recovery: rebuild the process-copy locks we hold on a remastered rsb on the new rsb master. dlm_recover_locks recover_locks recover_locks_queue dlm_send_rcom_lock -> receive_rcom_lock dlm_recover_master_copy receive_rcom_lock_reply <- dlm_recover_process_copy */ /* * keep a count of the number of lkb's we send to the new master; when we get * an equal number of replies then recovery for the rsb is done */ static int recover_locks_queue(struct dlm_rsb *r, struct list_head *head, uint64_t seq) { struct dlm_lkb *lkb; int error = 0; list_for_each_entry(lkb, head, lkb_statequeue) { error = dlm_send_rcom_lock(r, lkb, seq); if (error) break; r->res_recover_locks_count++; } return error; } static int recover_locks(struct dlm_rsb *r, uint64_t seq) { int error = 0; lock_rsb(r); DLM_ASSERT(!r->res_recover_locks_count, dlm_dump_rsb(r);); error = recover_locks_queue(r, &r->res_grantqueue, seq); if (error) goto out; error = recover_locks_queue(r, &r->res_convertqueue, seq); if (error) goto out; error = recover_locks_queue(r, &r->res_waitqueue, seq); if (error) goto out; if (r->res_recover_locks_count) recover_list_add(r); else rsb_clear_flag(r, RSB_NEW_MASTER); out: unlock_rsb(r); return error; } int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq) { struct dlm_rsb *r; int error, count = 0; down_read(&ls->ls_root_sem); list_for_each_entry(r, &ls->ls_root_list, res_root_list) { if (is_master(r)) { rsb_clear_flag(r, RSB_NEW_MASTER); continue; } if (!rsb_flag(r, RSB_NEW_MASTER)) continue; if (dlm_recovery_stopped(ls)) { error = -EINTR; up_read(&ls->ls_root_sem); goto out; } error = recover_locks(r, seq); if (error) { up_read(&ls->ls_root_sem); goto out; } count += r->res_recover_locks_count; } up_read(&ls->ls_root_sem); log_rinfo(ls, "dlm_recover_locks %d out", count); error = dlm_wait_function(ls, &recover_list_empty); out: if (error) recover_list_clear(ls); return error; } void dlm_recovered_lock(struct dlm_rsb *r) { DLM_ASSERT(rsb_flag(r, RSB_NEW_MASTER), dlm_dump_rsb(r);); r->res_recover_locks_count--; if (!r->res_recover_locks_count) { rsb_clear_flag(r, RSB_NEW_MASTER); recover_list_del(r); } if (recover_list_empty(r->res_ls)) wake_up(&r->res_ls->ls_wait_general); } /* * The lvb needs to be recovered on all master rsb's. This includes setting * the VALNOTVALID flag if necessary, and determining the correct lvb contents * based on the lvb's of the locks held on the rsb. * * RSB_VALNOTVALID is set in two cases: * * 1. we are master, but not new, and we purged an EX/PW lock held by a * failed node (in dlm_recover_purge which set RSB_RECOVER_LVB_INVAL) * * 2. we are a new master, and there are only NL/CR locks left. * (We could probably improve this by only invaliding in this way when * the previous master left uncleanly. VMS docs mention that.) * * The LVB contents are only considered for changing when this is a new master * of the rsb (NEW_MASTER2). Then, the rsb's lvb is taken from any lkb with * mode > CR. If no lkb's exist with mode above CR, the lvb contents are taken * from the lkb with the largest lvb sequence number. */ static void recover_lvb(struct dlm_rsb *r) { struct dlm_lkb *big_lkb = NULL, *iter, *high_lkb = NULL; uint32_t high_seq = 0; int lock_lvb_exists = 0; int lvblen = r->res_ls->ls_lvblen; if (!rsb_flag(r, RSB_NEW_MASTER2) && rsb_flag(r, RSB_RECOVER_LVB_INVAL)) { /* case 1 above */ rsb_set_flag(r, RSB_VALNOTVALID); return; } if (!rsb_flag(r, RSB_NEW_MASTER2)) return; /* we are the new master, so figure out if VALNOTVALID should be set, and set the rsb lvb from the best lkb available. */ list_for_each_entry(iter, &r->res_grantqueue, lkb_statequeue) { if (!(iter->lkb_exflags & DLM_LKF_VALBLK)) continue; lock_lvb_exists = 1; if (iter->lkb_grmode > DLM_LOCK_CR) { big_lkb = iter; goto setflag; } if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) { high_lkb = iter; high_seq = iter->lkb_lvbseq; } } list_for_each_entry(iter, &r->res_convertqueue, lkb_statequeue) { if (!(iter->lkb_exflags & DLM_LKF_VALBLK)) continue; lock_lvb_exists = 1; if (iter->lkb_grmode > DLM_LOCK_CR) { big_lkb = iter; goto setflag; } if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) { high_lkb = iter; high_seq = iter->lkb_lvbseq; } } setflag: if (!lock_lvb_exists) goto out; /* lvb is invalidated if only NL/CR locks remain */ if (!big_lkb) rsb_set_flag(r, RSB_VALNOTVALID); if (!r->res_lvbptr) { r->res_lvbptr = dlm_allocate_lvb(r->res_ls); if (!r->res_lvbptr) goto out; } if (big_lkb) { r->res_lvbseq = big_lkb->lkb_lvbseq; memcpy(r->res_lvbptr, big_lkb->lkb_lvbptr, lvblen); } else if (high_lkb) { r->res_lvbseq = high_lkb->lkb_lvbseq; memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen); } else { r->res_lvbseq = 0; memset(r->res_lvbptr, 0, lvblen); } out: return; } /* All master rsb's flagged RECOVER_CONVERT need to be looked at. The locks converting PR->CW or CW->PR need to have their lkb_grmode set. */ static void recover_conversion(struct dlm_rsb *r) { struct dlm_ls *ls = r->res_ls; struct dlm_lkb *lkb; int grmode = -1; list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { if (lkb->lkb_grmode == DLM_LOCK_PR || lkb->lkb_grmode == DLM_LOCK_CW) { grmode = lkb->lkb_grmode; break; } } list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) { if (lkb->lkb_grmode != DLM_LOCK_IV) continue; if (grmode == -1) { log_debug(ls, "recover_conversion %x set gr to rq %d", lkb->lkb_id, lkb->lkb_rqmode); lkb->lkb_grmode = lkb->lkb_rqmode; } else { log_debug(ls, "recover_conversion %x set gr %d", lkb->lkb_id, grmode); lkb->lkb_grmode = grmode; } } } /* We've become the new master for this rsb and waiting/converting locks may need to be granted in dlm_recover_grant() due to locks that may have existed from a removed node. */ static void recover_grant(struct dlm_rsb *r) { if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue)) rsb_set_flag(r, RSB_RECOVER_GRANT); } void dlm_recover_rsbs(struct dlm_ls *ls) { struct dlm_rsb *r; unsigned int count = 0; down_read(&ls->ls_root_sem); list_for_each_entry(r, &ls->ls_root_list, res_root_list) { lock_rsb(r); if (is_master(r)) { if (rsb_flag(r, RSB_RECOVER_CONVERT)) recover_conversion(r); /* recover lvb before granting locks so the updated lvb/VALNOTVALID is presented in the completion */ recover_lvb(r); if (rsb_flag(r, RSB_NEW_MASTER2)) recover_grant(r); count++; } else { rsb_clear_flag(r, RSB_VALNOTVALID); } rsb_clear_flag(r, RSB_RECOVER_CONVERT); rsb_clear_flag(r, RSB_RECOVER_LVB_INVAL); rsb_clear_flag(r, RSB_NEW_MASTER2); unlock_rsb(r); } up_read(&ls->ls_root_sem); if (count) log_rinfo(ls, "dlm_recover_rsbs %d done", count); } /* Create a single list of all root rsb's to be used during recovery */ int dlm_create_root_list(struct dlm_ls *ls) { struct rb_node *n; struct dlm_rsb *r; int i, error = 0; down_write(&ls->ls_root_sem); if (!list_empty(&ls->ls_root_list)) { log_error(ls, "root list not empty"); error = -EINVAL; goto out; } for (i = 0; i < ls->ls_rsbtbl_size; i++) { spin_lock(&ls->ls_rsbtbl[i].lock); for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { r = rb_entry(n, struct dlm_rsb, res_hashnode); list_add(&r->res_root_list, &ls->ls_root_list); dlm_hold_rsb(r); } if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[i].toss)) log_error(ls, "dlm_create_root_list toss not empty"); spin_unlock(&ls->ls_rsbtbl[i].lock); } out: up_write(&ls->ls_root_sem); return error; } void dlm_release_root_list(struct dlm_ls *ls) { struct dlm_rsb *r, *safe; down_write(&ls->ls_root_sem); list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) { list_del_init(&r->res_root_list); dlm_put_rsb(r); } up_write(&ls->ls_root_sem); } void dlm_clear_toss(struct dlm_ls *ls) { struct rb_node *n, *next; struct dlm_rsb *r; unsigned int count = 0; int i; for (i = 0; i < ls->ls_rsbtbl_size; i++) { spin_lock(&ls->ls_rsbtbl[i].lock); for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) { next = rb_next(n); r = rb_entry(n, struct dlm_rsb, res_hashnode); rb_erase(n, &ls->ls_rsbtbl[i].toss); dlm_free_rsb(r); count++; } spin_unlock(&ls->ls_rsbtbl[i].lock); } if (count) log_rinfo(ls, "dlm_clear_toss %u done", count); }
linux-master
fs/dlm/recover.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. */ #include <linux/fs.h> #include <linux/filelock.h> #include <linux/miscdevice.h> #include <linux/poll.h> #include <linux/dlm.h> #include <linux/dlm_plock.h> #include <linux/slab.h> #include <trace/events/dlm.h> #include "dlm_internal.h" #include "lockspace.h" static DEFINE_SPINLOCK(ops_lock); static LIST_HEAD(send_list); static LIST_HEAD(recv_list); static DECLARE_WAIT_QUEUE_HEAD(send_wq); static DECLARE_WAIT_QUEUE_HEAD(recv_wq); struct plock_async_data { void *fl; void *file; struct file_lock flc; int (*callback)(struct file_lock *fl, int result); }; struct plock_op { struct list_head list; int done; struct dlm_plock_info info; /* if set indicates async handling */ struct plock_async_data *data; }; static inline void set_version(struct dlm_plock_info *info) { info->version[0] = DLM_PLOCK_VERSION_MAJOR; info->version[1] = DLM_PLOCK_VERSION_MINOR; info->version[2] = DLM_PLOCK_VERSION_PATCH; } static struct plock_op *plock_lookup_waiter(const struct dlm_plock_info *info) { struct plock_op *op = NULL, *iter; list_for_each_entry(iter, &recv_list, list) { if (iter->info.fsid == info->fsid && iter->info.number == info->number && iter->info.owner == info->owner && iter->info.pid == info->pid && iter->info.start == info->start && iter->info.end == info->end && iter->info.ex == info->ex && iter->info.wait) { op = iter; break; } } return op; } static int check_version(struct dlm_plock_info *info) { if ((DLM_PLOCK_VERSION_MAJOR != info->version[0]) || (DLM_PLOCK_VERSION_MINOR < info->version[1])) { log_print("plock device version mismatch: " "kernel (%u.%u.%u), user (%u.%u.%u)", DLM_PLOCK_VERSION_MAJOR, DLM_PLOCK_VERSION_MINOR, DLM_PLOCK_VERSION_PATCH, info->version[0], info->version[1], info->version[2]); return -EINVAL; } return 0; } static void dlm_release_plock_op(struct plock_op *op) { kfree(op->data); kfree(op); } static void send_op(struct plock_op *op) { set_version(&op->info); spin_lock(&ops_lock); list_add_tail(&op->list, &send_list); spin_unlock(&ops_lock); wake_up(&send_wq); } static int do_lock_cancel(const struct dlm_plock_info *orig_info) { struct plock_op *op; int rv; op = kzalloc(sizeof(*op), GFP_NOFS); if (!op) return -ENOMEM; op->info = *orig_info; op->info.optype = DLM_PLOCK_OP_CANCEL; op->info.wait = 0; send_op(op); wait_event(recv_wq, (op->done != 0)); rv = op->info.rv; dlm_release_plock_op(op); return rv; } int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file, int cmd, struct file_lock *fl) { struct plock_async_data *op_data; struct dlm_ls *ls; struct plock_op *op; int rv; ls = dlm_find_lockspace_local(lockspace); if (!ls) return -EINVAL; op = kzalloc(sizeof(*op), GFP_NOFS); if (!op) { rv = -ENOMEM; goto out; } op->info.optype = DLM_PLOCK_OP_LOCK; op->info.pid = fl->fl_pid; op->info.ex = (fl->fl_type == F_WRLCK); op->info.wait = IS_SETLKW(cmd); op->info.fsid = ls->ls_global_id; op->info.number = number; op->info.start = fl->fl_start; op->info.end = fl->fl_end; /* async handling */ if (fl->fl_lmops && fl->fl_lmops->lm_grant) { op_data = kzalloc(sizeof(*op_data), GFP_NOFS); if (!op_data) { dlm_release_plock_op(op); rv = -ENOMEM; goto out; } /* fl_owner is lockd which doesn't distinguish processes on the nfs client */ op->info.owner = (__u64) fl->fl_pid; op_data->callback = fl->fl_lmops->lm_grant; locks_init_lock(&op_data->flc); locks_copy_lock(&op_data->flc, fl); op_data->fl = fl; op_data->file = file; op->data = op_data; send_op(op); rv = FILE_LOCK_DEFERRED; goto out; } else { op->info.owner = (__u64)(long) fl->fl_owner; } send_op(op); if (op->info.wait) { rv = wait_event_interruptible(recv_wq, (op->done != 0)); if (rv == -ERESTARTSYS) { spin_lock(&ops_lock); /* recheck under ops_lock if we got a done != 0, * if so this interrupt case should be ignored */ if (op->done != 0) { spin_unlock(&ops_lock); goto do_lock_wait; } spin_unlock(&ops_lock); rv = do_lock_cancel(&op->info); switch (rv) { case 0: /* waiter was deleted in user space, answer will never come * remove original request. The original request must be * on recv_list because the answer of do_lock_cancel() * synchronized it. */ spin_lock(&ops_lock); list_del(&op->list); spin_unlock(&ops_lock); rv = -EINTR; break; case -ENOENT: /* cancellation wasn't successful but op should be done */ fallthrough; default: /* internal error doing cancel we need to wait */ goto wait; } log_debug(ls, "%s: wait interrupted %x %llx pid %d", __func__, ls->ls_global_id, (unsigned long long)number, op->info.pid); dlm_release_plock_op(op); goto out; } } else { wait: wait_event(recv_wq, (op->done != 0)); } do_lock_wait: WARN_ON(!list_empty(&op->list)); rv = op->info.rv; if (!rv) { if (locks_lock_file_wait(file, fl) < 0) log_error(ls, "dlm_posix_lock: vfs lock error %llx", (unsigned long long)number); } dlm_release_plock_op(op); out: dlm_put_lockspace(ls); return rv; } EXPORT_SYMBOL_GPL(dlm_posix_lock); /* Returns failure iff a successful lock operation should be canceled */ static int dlm_plock_callback(struct plock_op *op) { struct plock_async_data *op_data = op->data; struct file *file; struct file_lock *fl; struct file_lock *flc; int (*notify)(struct file_lock *fl, int result) = NULL; int rv = 0; WARN_ON(!list_empty(&op->list)); /* check if the following 2 are still valid or make a copy */ file = op_data->file; flc = &op_data->flc; fl = op_data->fl; notify = op_data->callback; if (op->info.rv) { notify(fl, op->info.rv); goto out; } /* got fs lock; bookkeep locally as well: */ flc->fl_flags &= ~FL_SLEEP; if (posix_lock_file(file, flc, NULL)) { /* * This can only happen in the case of kmalloc() failure. * The filesystem's own lock is the authoritative lock, * so a failure to get the lock locally is not a disaster. * As long as the fs cannot reliably cancel locks (especially * in a low-memory situation), we're better off ignoring * this failure than trying to recover. */ log_print("dlm_plock_callback: vfs lock error %llx file %p fl %p", (unsigned long long)op->info.number, file, fl); } rv = notify(fl, 0); if (rv) { /* XXX: We need to cancel the fs lock here: */ log_print("%s: lock granted after lock request failed; dangling lock!", __func__); goto out; } out: dlm_release_plock_op(op); return rv; } int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file, struct file_lock *fl) { struct dlm_ls *ls; struct plock_op *op; int rv; unsigned char fl_flags = fl->fl_flags; ls = dlm_find_lockspace_local(lockspace); if (!ls) return -EINVAL; op = kzalloc(sizeof(*op), GFP_NOFS); if (!op) { rv = -ENOMEM; goto out; } /* cause the vfs unlock to return ENOENT if lock is not found */ fl->fl_flags |= FL_EXISTS; rv = locks_lock_file_wait(file, fl); if (rv == -ENOENT) { rv = 0; goto out_free; } if (rv < 0) { log_error(ls, "dlm_posix_unlock: vfs unlock error %d %llx", rv, (unsigned long long)number); } op->info.optype = DLM_PLOCK_OP_UNLOCK; op->info.pid = fl->fl_pid; op->info.fsid = ls->ls_global_id; op->info.number = number; op->info.start = fl->fl_start; op->info.end = fl->fl_end; if (fl->fl_lmops && fl->fl_lmops->lm_grant) op->info.owner = (__u64) fl->fl_pid; else op->info.owner = (__u64)(long) fl->fl_owner; if (fl->fl_flags & FL_CLOSE) { op->info.flags |= DLM_PLOCK_FL_CLOSE; send_op(op); rv = 0; goto out; } send_op(op); wait_event(recv_wq, (op->done != 0)); WARN_ON(!list_empty(&op->list)); rv = op->info.rv; if (rv == -ENOENT) rv = 0; out_free: dlm_release_plock_op(op); out: dlm_put_lockspace(ls); fl->fl_flags = fl_flags; return rv; } EXPORT_SYMBOL_GPL(dlm_posix_unlock); /* * NOTE: This implementation can only handle async lock requests as nfs * do it. It cannot handle cancellation of a pending lock request sitting * in wait_event(), but for now only nfs is the only user local kernel * user. */ int dlm_posix_cancel(dlm_lockspace_t *lockspace, u64 number, struct file *file, struct file_lock *fl) { struct dlm_plock_info info; struct plock_op *op; struct dlm_ls *ls; int rv; /* this only works for async request for now and nfs is the only * kernel user right now. */ if (WARN_ON_ONCE(!fl->fl_lmops || !fl->fl_lmops->lm_grant)) return -EOPNOTSUPP; ls = dlm_find_lockspace_local(lockspace); if (!ls) return -EINVAL; memset(&info, 0, sizeof(info)); info.pid = fl->fl_pid; info.ex = (fl->fl_type == F_WRLCK); info.fsid = ls->ls_global_id; dlm_put_lockspace(ls); info.number = number; info.start = fl->fl_start; info.end = fl->fl_end; info.owner = (__u64)fl->fl_pid; rv = do_lock_cancel(&info); switch (rv) { case 0: spin_lock(&ops_lock); /* lock request to cancel must be on recv_list because * do_lock_cancel() synchronizes it. */ op = plock_lookup_waiter(&info); if (WARN_ON_ONCE(!op)) { spin_unlock(&ops_lock); rv = -ENOLCK; break; } list_del(&op->list); spin_unlock(&ops_lock); WARN_ON(op->info.optype != DLM_PLOCK_OP_LOCK); op->data->callback(op->data->fl, -EINTR); dlm_release_plock_op(op); rv = -EINTR; break; case -ENOENT: /* if cancel wasn't successful we probably were to late * or it was a non-blocking lock request, so just unlock it. */ rv = dlm_posix_unlock(lockspace, number, file, fl); break; default: break; } return rv; } EXPORT_SYMBOL_GPL(dlm_posix_cancel); int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file, struct file_lock *fl) { struct dlm_ls *ls; struct plock_op *op; int rv; ls = dlm_find_lockspace_local(lockspace); if (!ls) return -EINVAL; op = kzalloc(sizeof(*op), GFP_NOFS); if (!op) { rv = -ENOMEM; goto out; } op->info.optype = DLM_PLOCK_OP_GET; op->info.pid = fl->fl_pid; op->info.ex = (fl->fl_type == F_WRLCK); op->info.fsid = ls->ls_global_id; op->info.number = number; op->info.start = fl->fl_start; op->info.end = fl->fl_end; if (fl->fl_lmops && fl->fl_lmops->lm_grant) op->info.owner = (__u64) fl->fl_pid; else op->info.owner = (__u64)(long) fl->fl_owner; send_op(op); wait_event(recv_wq, (op->done != 0)); WARN_ON(!list_empty(&op->list)); /* info.rv from userspace is 1 for conflict, 0 for no-conflict, -ENOENT if there are no locks on the file */ rv = op->info.rv; fl->fl_type = F_UNLCK; if (rv == -ENOENT) rv = 0; else if (rv > 0) { locks_init_lock(fl); fl->fl_type = (op->info.ex) ? F_WRLCK : F_RDLCK; fl->fl_flags = FL_POSIX; fl->fl_pid = op->info.pid; if (op->info.nodeid != dlm_our_nodeid()) fl->fl_pid = -fl->fl_pid; fl->fl_start = op->info.start; fl->fl_end = op->info.end; rv = 0; } dlm_release_plock_op(op); out: dlm_put_lockspace(ls); return rv; } EXPORT_SYMBOL_GPL(dlm_posix_get); /* a read copies out one plock request from the send list */ static ssize_t dev_read(struct file *file, char __user *u, size_t count, loff_t *ppos) { struct dlm_plock_info info; struct plock_op *op = NULL; if (count < sizeof(info)) return -EINVAL; spin_lock(&ops_lock); if (!list_empty(&send_list)) { op = list_first_entry(&send_list, struct plock_op, list); if (op->info.flags & DLM_PLOCK_FL_CLOSE) list_del(&op->list); else list_move_tail(&op->list, &recv_list); memcpy(&info, &op->info, sizeof(info)); } spin_unlock(&ops_lock); if (!op) return -EAGAIN; trace_dlm_plock_read(&info); /* there is no need to get a reply from userspace for unlocks that were generated by the vfs cleaning up for a close (the process did not make an unlock call). */ if (op->info.flags & DLM_PLOCK_FL_CLOSE) dlm_release_plock_op(op); if (copy_to_user(u, &info, sizeof(info))) return -EFAULT; return sizeof(info); } /* a write copies in one plock result that should match a plock_op on the recv list */ static ssize_t dev_write(struct file *file, const char __user *u, size_t count, loff_t *ppos) { struct plock_op *op = NULL, *iter; struct dlm_plock_info info; int do_callback = 0; if (count != sizeof(info)) return -EINVAL; if (copy_from_user(&info, u, sizeof(info))) return -EFAULT; trace_dlm_plock_write(&info); if (check_version(&info)) return -EINVAL; /* * The results for waiting ops (SETLKW) can be returned in any * order, so match all fields to find the op. The results for * non-waiting ops are returned in the order that they were sent * to userspace, so match the result with the first non-waiting op. */ spin_lock(&ops_lock); if (info.wait) { op = plock_lookup_waiter(&info); } else { list_for_each_entry(iter, &recv_list, list) { if (!iter->info.wait && iter->info.fsid == info.fsid) { op = iter; break; } } } if (op) { /* Sanity check that op and info match. */ if (info.wait) WARN_ON(op->info.optype != DLM_PLOCK_OP_LOCK); else WARN_ON(op->info.number != info.number || op->info.owner != info.owner || op->info.optype != info.optype); list_del_init(&op->list); memcpy(&op->info, &info, sizeof(info)); if (op->data) do_callback = 1; else op->done = 1; } spin_unlock(&ops_lock); if (op) { if (do_callback) dlm_plock_callback(op); else wake_up(&recv_wq); } else pr_debug("%s: no op %x %llx", __func__, info.fsid, (unsigned long long)info.number); return count; } static __poll_t dev_poll(struct file *file, poll_table *wait) { __poll_t mask = 0; poll_wait(file, &send_wq, wait); spin_lock(&ops_lock); if (!list_empty(&send_list)) mask = EPOLLIN | EPOLLRDNORM; spin_unlock(&ops_lock); return mask; } static const struct file_operations dev_fops = { .read = dev_read, .write = dev_write, .poll = dev_poll, .owner = THIS_MODULE, .llseek = noop_llseek, }; static struct miscdevice plock_dev_misc = { .minor = MISC_DYNAMIC_MINOR, .name = DLM_PLOCK_MISC_NAME, .fops = &dev_fops }; int dlm_plock_init(void) { int rv; rv = misc_register(&plock_dev_misc); if (rv) log_print("dlm_plock_init: misc_register failed %d", rv); return rv; } void dlm_plock_exit(void) { misc_deregister(&plock_dev_misc); WARN_ON(!list_empty(&send_list)); WARN_ON(!list_empty(&recv_list)); }
linux-master
fs/dlm/plock.c
// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) 2005-2009 Red Hat, Inc. All rights reserved. ** ** ******************************************************************************* ******************************************************************************/ #include <linux/pagemap.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/ctype.h> #include <linux/debugfs.h> #include <linux/slab.h> #include "dlm_internal.h" #include "midcomms.h" #include "lock.h" #include "ast.h" #define DLM_DEBUG_BUF_LEN 4096 static char debug_buf[DLM_DEBUG_BUF_LEN]; static struct mutex debug_buf_lock; static struct dentry *dlm_root; static struct dentry *dlm_comms; static char *print_lockmode(int mode) { switch (mode) { case DLM_LOCK_IV: return "--"; case DLM_LOCK_NL: return "NL"; case DLM_LOCK_CR: return "CR"; case DLM_LOCK_CW: return "CW"; case DLM_LOCK_PR: return "PR"; case DLM_LOCK_PW: return "PW"; case DLM_LOCK_EX: return "EX"; default: return "??"; } } static void print_format1_lock(struct seq_file *s, struct dlm_lkb *lkb, struct dlm_rsb *res) { seq_printf(s, "%08x %s", lkb->lkb_id, print_lockmode(lkb->lkb_grmode)); if (lkb->lkb_status == DLM_LKSTS_CONVERT || lkb->lkb_status == DLM_LKSTS_WAITING) seq_printf(s, " (%s)", print_lockmode(lkb->lkb_rqmode)); if (lkb->lkb_nodeid) { if (lkb->lkb_nodeid != res->res_nodeid) seq_printf(s, " Remote: %3d %08x", lkb->lkb_nodeid, lkb->lkb_remid); else seq_printf(s, " Master: %08x", lkb->lkb_remid); } if (lkb->lkb_wait_type) seq_printf(s, " wait_type: %d", lkb->lkb_wait_type); seq_putc(s, '\n'); } static void print_format1(struct dlm_rsb *res, struct seq_file *s) { struct dlm_lkb *lkb; int i, lvblen = res->res_ls->ls_lvblen, recover_list, root_list; lock_rsb(res); seq_printf(s, "\nResource %p Name (len=%d) \"", res, res->res_length); for (i = 0; i < res->res_length; i++) { if (isprint(res->res_name[i])) seq_printf(s, "%c", res->res_name[i]); else seq_printf(s, "%c", '.'); } if (res->res_nodeid > 0) seq_printf(s, "\"\nLocal Copy, Master is node %d\n", res->res_nodeid); else if (res->res_nodeid == 0) seq_puts(s, "\"\nMaster Copy\n"); else if (res->res_nodeid == -1) seq_printf(s, "\"\nLooking up master (lkid %x)\n", res->res_first_lkid); else seq_printf(s, "\"\nInvalid master %d\n", res->res_nodeid); if (seq_has_overflowed(s)) goto out; /* Print the LVB: */ if (res->res_lvbptr) { seq_puts(s, "LVB: "); for (i = 0; i < lvblen; i++) { if (i == lvblen / 2) seq_puts(s, "\n "); seq_printf(s, "%02x ", (unsigned char) res->res_lvbptr[i]); } if (rsb_flag(res, RSB_VALNOTVALID)) seq_puts(s, " (INVALID)"); seq_putc(s, '\n'); if (seq_has_overflowed(s)) goto out; } root_list = !list_empty(&res->res_root_list); recover_list = !list_empty(&res->res_recover_list); if (root_list || recover_list) { seq_printf(s, "Recovery: root %d recover %d flags %lx count %d\n", root_list, recover_list, res->res_flags, res->res_recover_locks_count); } /* Print the locks attached to this resource */ seq_puts(s, "Granted Queue\n"); list_for_each_entry(lkb, &res->res_grantqueue, lkb_statequeue) { print_format1_lock(s, lkb, res); if (seq_has_overflowed(s)) goto out; } seq_puts(s, "Conversion Queue\n"); list_for_each_entry(lkb, &res->res_convertqueue, lkb_statequeue) { print_format1_lock(s, lkb, res); if (seq_has_overflowed(s)) goto out; } seq_puts(s, "Waiting Queue\n"); list_for_each_entry(lkb, &res->res_waitqueue, lkb_statequeue) { print_format1_lock(s, lkb, res); if (seq_has_overflowed(s)) goto out; } if (list_empty(&res->res_lookup)) goto out; seq_puts(s, "Lookup Queue\n"); list_for_each_entry(lkb, &res->res_lookup, lkb_rsb_lookup) { seq_printf(s, "%08x %s", lkb->lkb_id, print_lockmode(lkb->lkb_rqmode)); if (lkb->lkb_wait_type) seq_printf(s, " wait_type: %d", lkb->lkb_wait_type); seq_putc(s, '\n'); if (seq_has_overflowed(s)) goto out; } out: unlock_rsb(res); } static void print_format2_lock(struct seq_file *s, struct dlm_lkb *lkb, struct dlm_rsb *r) { u64 xid = 0; u64 us; if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) { if (lkb->lkb_ua) xid = lkb->lkb_ua->xid; } /* microseconds since lkb was added to current queue */ us = ktime_to_us(ktime_sub(ktime_get(), lkb->lkb_timestamp)); /* id nodeid remid pid xid exflags flags sts grmode rqmode time_us r_nodeid r_len r_name */ seq_printf(s, "%x %d %x %u %llu %x %x %d %d %d %llu %u %d \"%s\"\n", lkb->lkb_id, lkb->lkb_nodeid, lkb->lkb_remid, lkb->lkb_ownpid, (unsigned long long)xid, lkb->lkb_exflags, dlm_iflags_val(lkb), lkb->lkb_status, lkb->lkb_grmode, lkb->lkb_rqmode, (unsigned long long)us, r->res_nodeid, r->res_length, r->res_name); } static void print_format2(struct dlm_rsb *r, struct seq_file *s) { struct dlm_lkb *lkb; lock_rsb(r); list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { print_format2_lock(s, lkb, r); if (seq_has_overflowed(s)) goto out; } list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) { print_format2_lock(s, lkb, r); if (seq_has_overflowed(s)) goto out; } list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue) { print_format2_lock(s, lkb, r); if (seq_has_overflowed(s)) goto out; } out: unlock_rsb(r); } static void print_format3_lock(struct seq_file *s, struct dlm_lkb *lkb, int rsb_lookup) { u64 xid = 0; if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) { if (lkb->lkb_ua) xid = lkb->lkb_ua->xid; } seq_printf(s, "lkb %x %d %x %u %llu %x %x %d %d %d %d %d %d %u %llu %llu\n", lkb->lkb_id, lkb->lkb_nodeid, lkb->lkb_remid, lkb->lkb_ownpid, (unsigned long long)xid, lkb->lkb_exflags, dlm_iflags_val(lkb), lkb->lkb_status, lkb->lkb_grmode, lkb->lkb_rqmode, lkb->lkb_last_bast_mode, rsb_lookup, lkb->lkb_wait_type, lkb->lkb_lvbseq, (unsigned long long)ktime_to_ns(lkb->lkb_timestamp), (unsigned long long)ktime_to_ns(lkb->lkb_last_bast_time)); } static void print_format3(struct dlm_rsb *r, struct seq_file *s) { struct dlm_lkb *lkb; int i, lvblen = r->res_ls->ls_lvblen; int print_name = 1; lock_rsb(r); seq_printf(s, "rsb %p %d %x %lx %d %d %u %d ", r, r->res_nodeid, r->res_first_lkid, r->res_flags, !list_empty(&r->res_root_list), !list_empty(&r->res_recover_list), r->res_recover_locks_count, r->res_length); if (seq_has_overflowed(s)) goto out; for (i = 0; i < r->res_length; i++) { if (!isascii(r->res_name[i]) || !isprint(r->res_name[i])) print_name = 0; } seq_puts(s, print_name ? "str " : "hex"); for (i = 0; i < r->res_length; i++) { if (print_name) seq_printf(s, "%c", r->res_name[i]); else seq_printf(s, " %02x", (unsigned char)r->res_name[i]); } seq_putc(s, '\n'); if (seq_has_overflowed(s)) goto out; if (!r->res_lvbptr) goto do_locks; seq_printf(s, "lvb %u %d", r->res_lvbseq, lvblen); for (i = 0; i < lvblen; i++) seq_printf(s, " %02x", (unsigned char)r->res_lvbptr[i]); seq_putc(s, '\n'); if (seq_has_overflowed(s)) goto out; do_locks: list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { print_format3_lock(s, lkb, 0); if (seq_has_overflowed(s)) goto out; } list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) { print_format3_lock(s, lkb, 0); if (seq_has_overflowed(s)) goto out; } list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue) { print_format3_lock(s, lkb, 0); if (seq_has_overflowed(s)) goto out; } list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup) { print_format3_lock(s, lkb, 1); if (seq_has_overflowed(s)) goto out; } out: unlock_rsb(r); } static void print_format4(struct dlm_rsb *r, struct seq_file *s) { int our_nodeid = dlm_our_nodeid(); int print_name = 1; int i; lock_rsb(r); seq_printf(s, "rsb %p %d %d %d %d %lu %lx %d ", r, r->res_nodeid, r->res_master_nodeid, r->res_dir_nodeid, our_nodeid, r->res_toss_time, r->res_flags, r->res_length); for (i = 0; i < r->res_length; i++) { if (!isascii(r->res_name[i]) || !isprint(r->res_name[i])) print_name = 0; } seq_puts(s, print_name ? "str " : "hex"); for (i = 0; i < r->res_length; i++) { if (print_name) seq_printf(s, "%c", r->res_name[i]); else seq_printf(s, " %02x", (unsigned char)r->res_name[i]); } seq_putc(s, '\n'); unlock_rsb(r); } static void print_format5_lock(struct seq_file *s, struct dlm_lkb *lkb) { struct dlm_callback *cb; /* lkb_id lkb_flags mode flags sb_status sb_flags */ spin_lock(&lkb->lkb_cb_lock); list_for_each_entry(cb, &lkb->lkb_callbacks, list) { seq_printf(s, "%x %x %d %x %d %x\n", lkb->lkb_id, dlm_iflags_val(lkb), cb->mode, cb->flags, cb->sb_status, cb->sb_flags); } spin_unlock(&lkb->lkb_cb_lock); } static void print_format5(struct dlm_rsb *r, struct seq_file *s) { struct dlm_lkb *lkb; lock_rsb(r); list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { print_format5_lock(s, lkb); if (seq_has_overflowed(s)) goto out; } list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) { print_format5_lock(s, lkb); if (seq_has_overflowed(s)) goto out; } list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue) { print_format5_lock(s, lkb); if (seq_has_overflowed(s)) goto out; } out: unlock_rsb(r); } struct rsbtbl_iter { struct dlm_rsb *rsb; unsigned bucket; int format; int header; }; /* * If the buffer is full, seq_printf can be called again, but it * does nothing. So, the these printing routines periodically check * seq_has_overflowed to avoid wasting too much time trying to print to * a full buffer. */ static int table_seq_show(struct seq_file *seq, void *iter_ptr) { struct rsbtbl_iter *ri = iter_ptr; switch (ri->format) { case 1: print_format1(ri->rsb, seq); break; case 2: if (ri->header) { seq_puts(seq, "id nodeid remid pid xid exflags flags sts grmode rqmode time_ms r_nodeid r_len r_name\n"); ri->header = 0; } print_format2(ri->rsb, seq); break; case 3: if (ri->header) { seq_puts(seq, "version rsb 1.1 lvb 1.1 lkb 1.1\n"); ri->header = 0; } print_format3(ri->rsb, seq); break; case 4: if (ri->header) { seq_puts(seq, "version 4 rsb 2\n"); ri->header = 0; } print_format4(ri->rsb, seq); break; case 5: if (ri->header) { seq_puts(seq, "lkb_id lkb_flags mode flags sb_status sb_flags\n"); ri->header = 0; } print_format5(ri->rsb, seq); break; } return 0; } static const struct seq_operations format1_seq_ops; static const struct seq_operations format2_seq_ops; static const struct seq_operations format3_seq_ops; static const struct seq_operations format4_seq_ops; static const struct seq_operations format5_seq_ops; static void *table_seq_start(struct seq_file *seq, loff_t *pos) { struct rb_root *tree; struct rb_node *node; struct dlm_ls *ls = seq->private; struct rsbtbl_iter *ri; struct dlm_rsb *r; loff_t n = *pos; unsigned bucket, entry; int toss = (seq->op == &format4_seq_ops); bucket = n >> 32; entry = n & ((1LL << 32) - 1); if (bucket >= ls->ls_rsbtbl_size) return NULL; ri = kzalloc(sizeof(*ri), GFP_NOFS); if (!ri) return NULL; if (n == 0) ri->header = 1; if (seq->op == &format1_seq_ops) ri->format = 1; if (seq->op == &format2_seq_ops) ri->format = 2; if (seq->op == &format3_seq_ops) ri->format = 3; if (seq->op == &format4_seq_ops) ri->format = 4; if (seq->op == &format5_seq_ops) ri->format = 5; tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; spin_lock(&ls->ls_rsbtbl[bucket].lock); if (!RB_EMPTY_ROOT(tree)) { for (node = rb_first(tree); node; node = rb_next(node)) { r = rb_entry(node, struct dlm_rsb, res_hashnode); if (!entry--) { dlm_hold_rsb(r); ri->rsb = r; ri->bucket = bucket; spin_unlock(&ls->ls_rsbtbl[bucket].lock); return ri; } } } spin_unlock(&ls->ls_rsbtbl[bucket].lock); /* * move to the first rsb in the next non-empty bucket */ /* zero the entry */ n &= ~((1LL << 32) - 1); while (1) { bucket++; n += 1LL << 32; if (bucket >= ls->ls_rsbtbl_size) { kfree(ri); return NULL; } tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; spin_lock(&ls->ls_rsbtbl[bucket].lock); if (!RB_EMPTY_ROOT(tree)) { node = rb_first(tree); r = rb_entry(node, struct dlm_rsb, res_hashnode); dlm_hold_rsb(r); ri->rsb = r; ri->bucket = bucket; spin_unlock(&ls->ls_rsbtbl[bucket].lock); *pos = n; return ri; } spin_unlock(&ls->ls_rsbtbl[bucket].lock); } } static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos) { struct dlm_ls *ls = seq->private; struct rsbtbl_iter *ri = iter_ptr; struct rb_root *tree; struct rb_node *next; struct dlm_rsb *r, *rp; loff_t n = *pos; unsigned bucket; int toss = (seq->op == &format4_seq_ops); bucket = n >> 32; /* * move to the next rsb in the same bucket */ spin_lock(&ls->ls_rsbtbl[bucket].lock); rp = ri->rsb; next = rb_next(&rp->res_hashnode); if (next) { r = rb_entry(next, struct dlm_rsb, res_hashnode); dlm_hold_rsb(r); ri->rsb = r; spin_unlock(&ls->ls_rsbtbl[bucket].lock); dlm_put_rsb(rp); ++*pos; return ri; } spin_unlock(&ls->ls_rsbtbl[bucket].lock); dlm_put_rsb(rp); /* * move to the first rsb in the next non-empty bucket */ /* zero the entry */ n &= ~((1LL << 32) - 1); while (1) { bucket++; n += 1LL << 32; if (bucket >= ls->ls_rsbtbl_size) { kfree(ri); ++*pos; return NULL; } tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; spin_lock(&ls->ls_rsbtbl[bucket].lock); if (!RB_EMPTY_ROOT(tree)) { next = rb_first(tree); r = rb_entry(next, struct dlm_rsb, res_hashnode); dlm_hold_rsb(r); ri->rsb = r; ri->bucket = bucket; spin_unlock(&ls->ls_rsbtbl[bucket].lock); *pos = n; return ri; } spin_unlock(&ls->ls_rsbtbl[bucket].lock); } } static void table_seq_stop(struct seq_file *seq, void *iter_ptr) { struct rsbtbl_iter *ri = iter_ptr; if (ri) { dlm_put_rsb(ri->rsb); kfree(ri); } } static const struct seq_operations format1_seq_ops = { .start = table_seq_start, .next = table_seq_next, .stop = table_seq_stop, .show = table_seq_show, }; static const struct seq_operations format2_seq_ops = { .start = table_seq_start, .next = table_seq_next, .stop = table_seq_stop, .show = table_seq_show, }; static const struct seq_operations format3_seq_ops = { .start = table_seq_start, .next = table_seq_next, .stop = table_seq_stop, .show = table_seq_show, }; static const struct seq_operations format4_seq_ops = { .start = table_seq_start, .next = table_seq_next, .stop = table_seq_stop, .show = table_seq_show, }; static const struct seq_operations format5_seq_ops = { .start = table_seq_start, .next = table_seq_next, .stop = table_seq_stop, .show = table_seq_show, }; static const struct file_operations format1_fops; static const struct file_operations format2_fops; static const struct file_operations format3_fops; static const struct file_operations format4_fops; static const struct file_operations format5_fops; static int table_open1(struct inode *inode, struct file *file) { struct seq_file *seq; int ret; ret = seq_open(file, &format1_seq_ops); if (ret) return ret; seq = file->private_data; seq->private = inode->i_private; /* the dlm_ls */ return 0; } static int table_open2(struct inode *inode, struct file *file) { struct seq_file *seq; int ret; ret = seq_open(file, &format2_seq_ops); if (ret) return ret; seq = file->private_data; seq->private = inode->i_private; /* the dlm_ls */ return 0; } static ssize_t table_write2(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct seq_file *seq = file->private_data; int n, len, lkb_nodeid, lkb_status, error; char name[DLM_RESNAME_MAXLEN + 1] = {}; struct dlm_ls *ls = seq->private; unsigned int lkb_flags; char buf[256] = {}; uint32_t lkb_id; if (copy_from_user(buf, user_buf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; n = sscanf(buf, "%x %" __stringify(DLM_RESNAME_MAXLEN) "s %x %d %d", &lkb_id, name, &lkb_flags, &lkb_nodeid, &lkb_status); if (n != 5) return -EINVAL; len = strnlen(name, DLM_RESNAME_MAXLEN); error = dlm_debug_add_lkb(ls, lkb_id, name, len, lkb_flags, lkb_nodeid, lkb_status); if (error) return error; return count; } static int table_open3(struct inode *inode, struct file *file) { struct seq_file *seq; int ret; ret = seq_open(file, &format3_seq_ops); if (ret) return ret; seq = file->private_data; seq->private = inode->i_private; /* the dlm_ls */ return 0; } static int table_open4(struct inode *inode, struct file *file) { struct seq_file *seq; int ret; ret = seq_open(file, &format5_seq_ops); if (ret) return ret; seq = file->private_data; seq->private = inode->i_private; /* the dlm_ls */ return 0; } static int table_open5(struct inode *inode, struct file *file) { struct seq_file *seq; int ret; ret = seq_open(file, &format5_seq_ops); if (ret) return ret; seq = file->private_data; seq->private = inode->i_private; /* the dlm_ls */ return 0; } static const struct file_operations format1_fops = { .owner = THIS_MODULE, .open = table_open1, .read = seq_read, .llseek = seq_lseek, .release = seq_release }; static const struct file_operations format2_fops = { .owner = THIS_MODULE, .open = table_open2, .read = seq_read, .write = table_write2, .llseek = seq_lseek, .release = seq_release }; static const struct file_operations format3_fops = { .owner = THIS_MODULE, .open = table_open3, .read = seq_read, .llseek = seq_lseek, .release = seq_release }; static const struct file_operations format4_fops = { .owner = THIS_MODULE, .open = table_open4, .read = seq_read, .llseek = seq_lseek, .release = seq_release }; static const struct file_operations format5_fops = { .owner = THIS_MODULE, .open = table_open5, .read = seq_read, .llseek = seq_lseek, .release = seq_release }; /* * dump lkb's on the ls_waiters list */ static ssize_t waiters_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct dlm_ls *ls = file->private_data; struct dlm_lkb *lkb; size_t len = DLM_DEBUG_BUF_LEN, pos = 0, ret, rv; mutex_lock(&debug_buf_lock); mutex_lock(&ls->ls_waiters_mutex); memset(debug_buf, 0, sizeof(debug_buf)); list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) { ret = snprintf(debug_buf + pos, len - pos, "%x %d %d %s\n", lkb->lkb_id, lkb->lkb_wait_type, lkb->lkb_nodeid, lkb->lkb_resource->res_name); if (ret >= len - pos) break; pos += ret; } mutex_unlock(&ls->ls_waiters_mutex); rv = simple_read_from_buffer(userbuf, count, ppos, debug_buf, pos); mutex_unlock(&debug_buf_lock); return rv; } static ssize_t waiters_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct dlm_ls *ls = file->private_data; int mstype, to_nodeid; char buf[128] = {}; uint32_t lkb_id; int n, error; if (copy_from_user(buf, user_buf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; n = sscanf(buf, "%x %d %d", &lkb_id, &mstype, &to_nodeid); if (n != 3) return -EINVAL; error = dlm_debug_add_lkb_to_waiters(ls, lkb_id, mstype, to_nodeid); if (error) return error; return count; } static const struct file_operations waiters_fops = { .owner = THIS_MODULE, .open = simple_open, .read = waiters_read, .write = waiters_write, .llseek = default_llseek, }; void dlm_delete_debug_file(struct dlm_ls *ls) { debugfs_remove(ls->ls_debug_rsb_dentry); debugfs_remove(ls->ls_debug_waiters_dentry); debugfs_remove(ls->ls_debug_locks_dentry); debugfs_remove(ls->ls_debug_all_dentry); debugfs_remove(ls->ls_debug_toss_dentry); debugfs_remove(ls->ls_debug_queued_asts_dentry); } static int dlm_state_show(struct seq_file *file, void *offset) { seq_printf(file, "%s\n", dlm_midcomms_state(file->private)); return 0; } DEFINE_SHOW_ATTRIBUTE(dlm_state); static int dlm_flags_show(struct seq_file *file, void *offset) { seq_printf(file, "%lu\n", dlm_midcomms_flags(file->private)); return 0; } DEFINE_SHOW_ATTRIBUTE(dlm_flags); static int dlm_send_queue_cnt_show(struct seq_file *file, void *offset) { seq_printf(file, "%d\n", dlm_midcomms_send_queue_cnt(file->private)); return 0; } DEFINE_SHOW_ATTRIBUTE(dlm_send_queue_cnt); static int dlm_version_show(struct seq_file *file, void *offset) { seq_printf(file, "0x%08x\n", dlm_midcomms_version(file->private)); return 0; } DEFINE_SHOW_ATTRIBUTE(dlm_version); static ssize_t dlm_rawmsg_write(struct file *fp, const char __user *user_buf, size_t count, loff_t *ppos) { void *buf; int ret; if (count > PAGE_SIZE || count < sizeof(struct dlm_header)) return -EINVAL; buf = kmalloc(PAGE_SIZE, GFP_NOFS); if (!buf) return -ENOMEM; if (copy_from_user(buf, user_buf, count)) { ret = -EFAULT; goto out; } ret = dlm_midcomms_rawmsg_send(fp->private_data, buf, count); if (ret) goto out; kfree(buf); return count; out: kfree(buf); return ret; } static const struct file_operations dlm_rawmsg_fops = { .open = simple_open, .write = dlm_rawmsg_write, .llseek = no_llseek, }; void *dlm_create_debug_comms_file(int nodeid, void *data) { struct dentry *d_node; char name[256]; memset(name, 0, sizeof(name)); snprintf(name, 256, "%d", nodeid); d_node = debugfs_create_dir(name, dlm_comms); debugfs_create_file("state", 0444, d_node, data, &dlm_state_fops); debugfs_create_file("flags", 0444, d_node, data, &dlm_flags_fops); debugfs_create_file("send_queue_count", 0444, d_node, data, &dlm_send_queue_cnt_fops); debugfs_create_file("version", 0444, d_node, data, &dlm_version_fops); debugfs_create_file("rawmsg", 0200, d_node, data, &dlm_rawmsg_fops); return d_node; } void dlm_delete_debug_comms_file(void *ctx) { debugfs_remove(ctx); } void dlm_create_debug_file(struct dlm_ls *ls) { char name[DLM_LOCKSPACE_LEN + 8]; /* format 1 */ ls->ls_debug_rsb_dentry = debugfs_create_file(ls->ls_name, S_IFREG | S_IRUGO, dlm_root, ls, &format1_fops); /* format 2 */ memset(name, 0, sizeof(name)); snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_locks", ls->ls_name); ls->ls_debug_locks_dentry = debugfs_create_file(name, 0644, dlm_root, ls, &format2_fops); /* format 3 */ memset(name, 0, sizeof(name)); snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_all", ls->ls_name); ls->ls_debug_all_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, dlm_root, ls, &format3_fops); /* format 4 */ memset(name, 0, sizeof(name)); snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_toss", ls->ls_name); ls->ls_debug_toss_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, dlm_root, ls, &format4_fops); memset(name, 0, sizeof(name)); snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_waiters", ls->ls_name); ls->ls_debug_waiters_dentry = debugfs_create_file(name, 0644, dlm_root, ls, &waiters_fops); /* format 5 */ memset(name, 0, sizeof(name)); snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_queued_asts", ls->ls_name); ls->ls_debug_queued_asts_dentry = debugfs_create_file(name, 0644, dlm_root, ls, &format5_fops); } void __init dlm_register_debugfs(void) { mutex_init(&debug_buf_lock); dlm_root = debugfs_create_dir("dlm", NULL); dlm_comms = debugfs_create_dir("comms", dlm_root); } void dlm_unregister_debugfs(void) { debugfs_remove(dlm_root); }
linux-master
fs/dlm/debug_fs.c
// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved. ** ** ******************************************************************************* ******************************************************************************/ #include "dlm_internal.h" #include "lockspace.h" #include "member.h" #include "dir.h" #include "ast.h" #include "recover.h" #include "lowcomms.h" #include "lock.h" #include "requestqueue.h" #include "recoverd.h" /* If the start for which we're re-enabling locking (seq) has been superseded by a newer stop (ls_recover_seq), we need to leave locking disabled. We suspend dlm_recv threads here to avoid the race where dlm_recv a) sees locking stopped and b) adds a message to the requestqueue, but dlm_recoverd enables locking and clears the requestqueue between a and b. */ static int enable_locking(struct dlm_ls *ls, uint64_t seq) { int error = -EINTR; down_write(&ls->ls_recv_active); spin_lock(&ls->ls_recover_lock); if (ls->ls_recover_seq == seq) { set_bit(LSFL_RUNNING, &ls->ls_flags); /* unblocks processes waiting to enter the dlm */ up_write(&ls->ls_in_recovery); clear_bit(LSFL_RECOVER_LOCK, &ls->ls_flags); error = 0; } spin_unlock(&ls->ls_recover_lock); up_write(&ls->ls_recv_active); return error; } static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) { unsigned long start; int error, neg = 0; log_rinfo(ls, "dlm_recover %llu", (unsigned long long)rv->seq); mutex_lock(&ls->ls_recoverd_active); dlm_callback_suspend(ls); dlm_clear_toss(ls); /* * This list of root rsb's will be the basis of most of the recovery * routines. */ dlm_create_root_list(ls); /* * Add or remove nodes from the lockspace's ls_nodes list. * * Due to the fact that we must report all membership changes to lsops * or midcomms layer, it is not permitted to abort ls_recover() until * this is done. */ error = dlm_recover_members(ls, rv, &neg); if (error) { log_rinfo(ls, "dlm_recover_members error %d", error); goto fail; } dlm_recover_dir_nodeid(ls); ls->ls_recover_dir_sent_res = 0; ls->ls_recover_dir_sent_msg = 0; ls->ls_recover_locks_in = 0; dlm_set_recover_status(ls, DLM_RS_NODES); error = dlm_recover_members_wait(ls, rv->seq); if (error) { log_rinfo(ls, "dlm_recover_members_wait error %d", error); goto fail; } start = jiffies; /* * Rebuild our own share of the directory by collecting from all other * nodes their master rsb names that hash to us. */ error = dlm_recover_directory(ls, rv->seq); if (error) { log_rinfo(ls, "dlm_recover_directory error %d", error); goto fail; } dlm_set_recover_status(ls, DLM_RS_DIR); error = dlm_recover_directory_wait(ls, rv->seq); if (error) { log_rinfo(ls, "dlm_recover_directory_wait error %d", error); goto fail; } log_rinfo(ls, "dlm_recover_directory %u out %u messages", ls->ls_recover_dir_sent_res, ls->ls_recover_dir_sent_msg); /* * We may have outstanding operations that are waiting for a reply from * a failed node. Mark these to be resent after recovery. Unlock and * cancel ops can just be completed. */ dlm_recover_waiters_pre(ls); if (dlm_recovery_stopped(ls)) { error = -EINTR; goto fail; } if (neg || dlm_no_directory(ls)) { /* * Clear lkb's for departed nodes. */ dlm_recover_purge(ls); /* * Get new master nodeid's for rsb's that were mastered on * departed nodes. */ error = dlm_recover_masters(ls, rv->seq); if (error) { log_rinfo(ls, "dlm_recover_masters error %d", error); goto fail; } /* * Send our locks on remastered rsb's to the new masters. */ error = dlm_recover_locks(ls, rv->seq); if (error) { log_rinfo(ls, "dlm_recover_locks error %d", error); goto fail; } dlm_set_recover_status(ls, DLM_RS_LOCKS); error = dlm_recover_locks_wait(ls, rv->seq); if (error) { log_rinfo(ls, "dlm_recover_locks_wait error %d", error); goto fail; } log_rinfo(ls, "dlm_recover_locks %u in", ls->ls_recover_locks_in); /* * Finalize state in master rsb's now that all locks can be * checked. This includes conversion resolution and lvb * settings. */ dlm_recover_rsbs(ls); } else { /* * Other lockspace members may be going through the "neg" steps * while also adding us to the lockspace, in which case they'll * be doing the recover_locks (RS_LOCKS) barrier. */ dlm_set_recover_status(ls, DLM_RS_LOCKS); error = dlm_recover_locks_wait(ls, rv->seq); if (error) { log_rinfo(ls, "dlm_recover_locks_wait error %d", error); goto fail; } } dlm_release_root_list(ls); /* * Purge directory-related requests that are saved in requestqueue. * All dir requests from before recovery are invalid now due to the dir * rebuild and will be resent by the requesting nodes. */ dlm_purge_requestqueue(ls); dlm_set_recover_status(ls, DLM_RS_DONE); error = dlm_recover_done_wait(ls, rv->seq); if (error) { log_rinfo(ls, "dlm_recover_done_wait error %d", error); goto fail; } dlm_clear_members_gone(ls); dlm_callback_resume(ls); error = enable_locking(ls, rv->seq); if (error) { log_rinfo(ls, "enable_locking error %d", error); goto fail; } error = dlm_process_requestqueue(ls); if (error) { log_rinfo(ls, "dlm_process_requestqueue error %d", error); goto fail; } error = dlm_recover_waiters_post(ls); if (error) { log_rinfo(ls, "dlm_recover_waiters_post error %d", error); goto fail; } dlm_recover_grant(ls); log_rinfo(ls, "dlm_recover %llu generation %u done: %u ms", (unsigned long long)rv->seq, ls->ls_generation, jiffies_to_msecs(jiffies - start)); mutex_unlock(&ls->ls_recoverd_active); return 0; fail: dlm_release_root_list(ls); mutex_unlock(&ls->ls_recoverd_active); return error; } /* The dlm_ls_start() that created the rv we take here may already have been stopped via dlm_ls_stop(); in that case we need to leave the RECOVERY_STOP flag set. */ static void do_ls_recovery(struct dlm_ls *ls) { struct dlm_recover *rv = NULL; int error; spin_lock(&ls->ls_recover_lock); rv = ls->ls_recover_args; ls->ls_recover_args = NULL; if (rv && ls->ls_recover_seq == rv->seq) clear_bit(LSFL_RECOVER_STOP, &ls->ls_flags); spin_unlock(&ls->ls_recover_lock); if (rv) { error = ls_recover(ls, rv); switch (error) { case 0: ls->ls_recovery_result = 0; complete(&ls->ls_recovery_done); dlm_lsop_recover_done(ls); break; case -EINTR: /* if recovery was interrupted -EINTR we wait for the next * ls_recover() iteration until it hopefully succeeds. */ log_rinfo(ls, "%s %llu interrupted and should be queued to run again", __func__, (unsigned long long)rv->seq); break; default: log_rinfo(ls, "%s %llu error %d", __func__, (unsigned long long)rv->seq, error); /* let new_lockspace() get aware of critical error */ ls->ls_recovery_result = error; complete(&ls->ls_recovery_done); break; } kfree(rv->nodes); kfree(rv); } } static int dlm_recoverd(void *arg) { struct dlm_ls *ls; ls = dlm_find_lockspace_local(arg); if (!ls) { log_print("dlm_recoverd: no lockspace %p", arg); return -1; } down_write(&ls->ls_in_recovery); set_bit(LSFL_RECOVER_LOCK, &ls->ls_flags); wake_up(&ls->ls_recover_lock_wait); while (1) { /* * We call kthread_should_stop() after set_current_state(). * This is because it works correctly if kthread_stop() is * called just before set_current_state(). */ set_current_state(TASK_INTERRUPTIBLE); if (kthread_should_stop()) { set_current_state(TASK_RUNNING); break; } if (!test_bit(LSFL_RECOVER_WORK, &ls->ls_flags) && !test_bit(LSFL_RECOVER_DOWN, &ls->ls_flags)) { if (kthread_should_stop()) break; schedule(); } set_current_state(TASK_RUNNING); if (test_and_clear_bit(LSFL_RECOVER_DOWN, &ls->ls_flags)) { down_write(&ls->ls_in_recovery); set_bit(LSFL_RECOVER_LOCK, &ls->ls_flags); wake_up(&ls->ls_recover_lock_wait); } if (test_and_clear_bit(LSFL_RECOVER_WORK, &ls->ls_flags)) do_ls_recovery(ls); } if (test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags)) up_write(&ls->ls_in_recovery); dlm_put_lockspace(ls); return 0; } int dlm_recoverd_start(struct dlm_ls *ls) { struct task_struct *p; int error = 0; p = kthread_run(dlm_recoverd, ls, "dlm_recoverd"); if (IS_ERR(p)) error = PTR_ERR(p); else ls->ls_recoverd_task = p; return error; } void dlm_recoverd_stop(struct dlm_ls *ls) { kthread_stop(ls->ls_recoverd_task); } void dlm_recoverd_suspend(struct dlm_ls *ls) { wake_up(&ls->ls_wait_general); mutex_lock(&ls->ls_recoverd_active); } void dlm_recoverd_resume(struct dlm_ls *ls) { mutex_unlock(&ls->ls_recoverd_active); }
linux-master
fs/dlm/recoverd.c
// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) 2005-2010 Red Hat, Inc. All rights reserved. ** ** ******************************************************************************* ******************************************************************************/ /* Central locking logic has four stages: dlm_lock() dlm_unlock() request_lock(ls, lkb) convert_lock(ls, lkb) unlock_lock(ls, lkb) cancel_lock(ls, lkb) _request_lock(r, lkb) _convert_lock(r, lkb) _unlock_lock(r, lkb) _cancel_lock(r, lkb) do_request(r, lkb) do_convert(r, lkb) do_unlock(r, lkb) do_cancel(r, lkb) Stage 1 (lock, unlock) is mainly about checking input args and splitting into one of the four main operations: dlm_lock = request_lock dlm_lock+CONVERT = convert_lock dlm_unlock = unlock_lock dlm_unlock+CANCEL = cancel_lock Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is provided to the next stage. Stage 3, _xxxx_lock(), determines if the operation is local or remote. When remote, it calls send_xxxx(), when local it calls do_xxxx(). Stage 4, do_xxxx(), is the guts of the operation. It manipulates the given rsb and lkb and queues callbacks. For remote operations, send_xxxx() results in the corresponding do_xxxx() function being executed on the remote node. The connecting send/receive calls on local (L) and remote (R) nodes: L: send_xxxx() -> R: receive_xxxx() R: do_xxxx() L: receive_xxxx_reply() <- R: send_xxxx_reply() */ #include <trace/events/dlm.h> #include <linux/types.h> #include <linux/rbtree.h> #include <linux/slab.h> #include "dlm_internal.h" #include <linux/dlm_device.h> #include "memory.h" #include "midcomms.h" #include "requestqueue.h" #include "util.h" #include "dir.h" #include "member.h" #include "lockspace.h" #include "ast.h" #include "lock.h" #include "rcom.h" #include "recover.h" #include "lvb_table.h" #include "user.h" #include "config.h" static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb); static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb); static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb); static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb); static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb); static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode); static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb); static int send_remove(struct dlm_rsb *r); static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb); static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb); static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, const struct dlm_message *ms, bool local); static int receive_extralen(const struct dlm_message *ms); static void do_purge(struct dlm_ls *ls, int nodeid, int pid); static void toss_rsb(struct kref *kref); /* * Lock compatibilty matrix - thanks Steve * UN = Unlocked state. Not really a state, used as a flag * PD = Padding. Used to make the matrix a nice power of two in size * Other states are the same as the VMS DLM. * Usage: matrix[grmode+1][rqmode+1] (although m[rq+1][gr+1] is the same) */ static const int __dlm_compat_matrix[8][8] = { /* UN NL CR CW PR PW EX PD */ {1, 1, 1, 1, 1, 1, 1, 0}, /* UN */ {1, 1, 1, 1, 1, 1, 1, 0}, /* NL */ {1, 1, 1, 1, 1, 1, 0, 0}, /* CR */ {1, 1, 1, 1, 0, 0, 0, 0}, /* CW */ {1, 1, 1, 0, 1, 0, 0, 0}, /* PR */ {1, 1, 1, 0, 0, 0, 0, 0}, /* PW */ {1, 1, 0, 0, 0, 0, 0, 0}, /* EX */ {0, 0, 0, 0, 0, 0, 0, 0} /* PD */ }; /* * This defines the direction of transfer of LVB data. * Granted mode is the row; requested mode is the column. * Usage: matrix[grmode+1][rqmode+1] * 1 = LVB is returned to the caller * 0 = LVB is written to the resource * -1 = nothing happens to the LVB */ const int dlm_lvb_operations[8][8] = { /* UN NL CR CW PR PW EX PD*/ { -1, 1, 1, 1, 1, 1, 1, -1 }, /* UN */ { -1, 1, 1, 1, 1, 1, 1, 0 }, /* NL */ { -1, -1, 1, 1, 1, 1, 1, 0 }, /* CR */ { -1, -1, -1, 1, 1, 1, 1, 0 }, /* CW */ { -1, -1, -1, -1, 1, 1, 1, 0 }, /* PR */ { -1, 0, 0, 0, 0, 0, 1, 0 }, /* PW */ { -1, 0, 0, 0, 0, 0, 0, 0 }, /* EX */ { -1, 0, 0, 0, 0, 0, 0, 0 } /* PD */ }; #define modes_compat(gr, rq) \ __dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1] int dlm_modes_compat(int mode1, int mode2) { return __dlm_compat_matrix[mode1 + 1][mode2 + 1]; } /* * Compatibility matrix for conversions with QUECVT set. * Granted mode is the row; requested mode is the column. * Usage: matrix[grmode+1][rqmode+1] */ static const int __quecvt_compat_matrix[8][8] = { /* UN NL CR CW PR PW EX PD */ {0, 0, 0, 0, 0, 0, 0, 0}, /* UN */ {0, 0, 1, 1, 1, 1, 1, 0}, /* NL */ {0, 0, 0, 1, 1, 1, 1, 0}, /* CR */ {0, 0, 0, 0, 1, 1, 1, 0}, /* CW */ {0, 0, 0, 1, 0, 1, 1, 0}, /* PR */ {0, 0, 0, 0, 0, 0, 1, 0}, /* PW */ {0, 0, 0, 0, 0, 0, 0, 0}, /* EX */ {0, 0, 0, 0, 0, 0, 0, 0} /* PD */ }; void dlm_print_lkb(struct dlm_lkb *lkb) { printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x " "sts %d rq %d gr %d wait_type %d wait_nodeid %d seq %llu\n", lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags, dlm_iflags_val(lkb), lkb->lkb_status, lkb->lkb_rqmode, lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_wait_nodeid, (unsigned long long)lkb->lkb_recover_seq); } static void dlm_print_rsb(struct dlm_rsb *r) { printk(KERN_ERR "rsb: nodeid %d master %d dir %d flags %lx first %x " "rlc %d name %s\n", r->res_nodeid, r->res_master_nodeid, r->res_dir_nodeid, r->res_flags, r->res_first_lkid, r->res_recover_locks_count, r->res_name); } void dlm_dump_rsb(struct dlm_rsb *r) { struct dlm_lkb *lkb; dlm_print_rsb(r); printk(KERN_ERR "rsb: root_list empty %d recover_list empty %d\n", list_empty(&r->res_root_list), list_empty(&r->res_recover_list)); printk(KERN_ERR "rsb lookup list\n"); list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup) dlm_print_lkb(lkb); printk(KERN_ERR "rsb grant queue:\n"); list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) dlm_print_lkb(lkb); printk(KERN_ERR "rsb convert queue:\n"); list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) dlm_print_lkb(lkb); printk(KERN_ERR "rsb wait queue:\n"); list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue) dlm_print_lkb(lkb); } /* Threads cannot use the lockspace while it's being recovered */ static inline void dlm_lock_recovery(struct dlm_ls *ls) { down_read(&ls->ls_in_recovery); } void dlm_unlock_recovery(struct dlm_ls *ls) { up_read(&ls->ls_in_recovery); } int dlm_lock_recovery_try(struct dlm_ls *ls) { return down_read_trylock(&ls->ls_in_recovery); } static inline int can_be_queued(struct dlm_lkb *lkb) { return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE); } static inline int force_blocking_asts(struct dlm_lkb *lkb) { return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST); } static inline int is_demoted(struct dlm_lkb *lkb) { return test_bit(DLM_SBF_DEMOTED_BIT, &lkb->lkb_sbflags); } static inline int is_altmode(struct dlm_lkb *lkb) { return test_bit(DLM_SBF_ALTMODE_BIT, &lkb->lkb_sbflags); } static inline int is_granted(struct dlm_lkb *lkb) { return (lkb->lkb_status == DLM_LKSTS_GRANTED); } static inline int is_remote(struct dlm_rsb *r) { DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r);); return !!r->res_nodeid; } static inline int is_process_copy(struct dlm_lkb *lkb) { return lkb->lkb_nodeid && !test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags); } static inline int is_master_copy(struct dlm_lkb *lkb) { return test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags); } static inline int middle_conversion(struct dlm_lkb *lkb) { if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) || (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW)) return 1; return 0; } static inline int down_conversion(struct dlm_lkb *lkb) { return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode); } static inline int is_overlap_unlock(struct dlm_lkb *lkb) { return test_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags); } static inline int is_overlap_cancel(struct dlm_lkb *lkb) { return test_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); } static inline int is_overlap(struct dlm_lkb *lkb) { return test_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags) || test_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); } static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv) { if (is_master_copy(lkb)) return; DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb);); if (rv == -DLM_ECANCEL && test_and_clear_bit(DLM_IFL_DEADLOCK_CANCEL_BIT, &lkb->lkb_iflags)) rv = -EDEADLK; dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, dlm_sbflags_val(lkb)); } static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb) { queue_cast(r, lkb, is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL); } static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode) { if (is_master_copy(lkb)) { send_bast(r, lkb, rqmode); } else { dlm_add_cb(lkb, DLM_CB_BAST, rqmode, 0, 0); } } /* * Basic operations on rsb's and lkb's */ /* This is only called to add a reference when the code already holds a valid reference to the rsb, so there's no need for locking. */ static inline void hold_rsb(struct dlm_rsb *r) { kref_get(&r->res_ref); } void dlm_hold_rsb(struct dlm_rsb *r) { hold_rsb(r); } /* When all references to the rsb are gone it's transferred to the tossed list for later disposal. */ static void put_rsb(struct dlm_rsb *r) { struct dlm_ls *ls = r->res_ls; uint32_t bucket = r->res_bucket; int rv; rv = kref_put_lock(&r->res_ref, toss_rsb, &ls->ls_rsbtbl[bucket].lock); if (rv) spin_unlock(&ls->ls_rsbtbl[bucket].lock); } void dlm_put_rsb(struct dlm_rsb *r) { put_rsb(r); } static int pre_rsb_struct(struct dlm_ls *ls) { struct dlm_rsb *r1, *r2; int count = 0; spin_lock(&ls->ls_new_rsb_spin); if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) { spin_unlock(&ls->ls_new_rsb_spin); return 0; } spin_unlock(&ls->ls_new_rsb_spin); r1 = dlm_allocate_rsb(ls); r2 = dlm_allocate_rsb(ls); spin_lock(&ls->ls_new_rsb_spin); if (r1) { list_add(&r1->res_hashchain, &ls->ls_new_rsb); ls->ls_new_rsb_count++; } if (r2) { list_add(&r2->res_hashchain, &ls->ls_new_rsb); ls->ls_new_rsb_count++; } count = ls->ls_new_rsb_count; spin_unlock(&ls->ls_new_rsb_spin); if (!count) return -ENOMEM; return 0; } /* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can unlock any spinlocks, go back and call pre_rsb_struct again. Otherwise, take an rsb off the list and return it. */ static int get_rsb_struct(struct dlm_ls *ls, const void *name, int len, struct dlm_rsb **r_ret) { struct dlm_rsb *r; int count; spin_lock(&ls->ls_new_rsb_spin); if (list_empty(&ls->ls_new_rsb)) { count = ls->ls_new_rsb_count; spin_unlock(&ls->ls_new_rsb_spin); log_debug(ls, "find_rsb retry %d %d %s", count, dlm_config.ci_new_rsb_count, (const char *)name); return -EAGAIN; } r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain); list_del(&r->res_hashchain); /* Convert the empty list_head to a NULL rb_node for tree usage: */ memset(&r->res_hashnode, 0, sizeof(struct rb_node)); ls->ls_new_rsb_count--; spin_unlock(&ls->ls_new_rsb_spin); r->res_ls = ls; r->res_length = len; memcpy(r->res_name, name, len); mutex_init(&r->res_mutex); INIT_LIST_HEAD(&r->res_lookup); INIT_LIST_HEAD(&r->res_grantqueue); INIT_LIST_HEAD(&r->res_convertqueue); INIT_LIST_HEAD(&r->res_waitqueue); INIT_LIST_HEAD(&r->res_root_list); INIT_LIST_HEAD(&r->res_recover_list); *r_ret = r; return 0; } static int rsb_cmp(struct dlm_rsb *r, const char *name, int nlen) { char maxname[DLM_RESNAME_MAXLEN]; memset(maxname, 0, DLM_RESNAME_MAXLEN); memcpy(maxname, name, nlen); return memcmp(r->res_name, maxname, DLM_RESNAME_MAXLEN); } int dlm_search_rsb_tree(struct rb_root *tree, const void *name, int len, struct dlm_rsb **r_ret) { struct rb_node *node = tree->rb_node; struct dlm_rsb *r; int rc; while (node) { r = rb_entry(node, struct dlm_rsb, res_hashnode); rc = rsb_cmp(r, name, len); if (rc < 0) node = node->rb_left; else if (rc > 0) node = node->rb_right; else goto found; } *r_ret = NULL; return -EBADR; found: *r_ret = r; return 0; } static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree) { struct rb_node **newn = &tree->rb_node; struct rb_node *parent = NULL; int rc; while (*newn) { struct dlm_rsb *cur = rb_entry(*newn, struct dlm_rsb, res_hashnode); parent = *newn; rc = rsb_cmp(cur, rsb->res_name, rsb->res_length); if (rc < 0) newn = &parent->rb_left; else if (rc > 0) newn = &parent->rb_right; else { log_print("rsb_insert match"); dlm_dump_rsb(rsb); dlm_dump_rsb(cur); return -EEXIST; } } rb_link_node(&rsb->res_hashnode, parent, newn); rb_insert_color(&rsb->res_hashnode, tree); return 0; } /* * Find rsb in rsbtbl and potentially create/add one * * Delaying the release of rsb's has a similar benefit to applications keeping * NL locks on an rsb, but without the guarantee that the cached master value * will still be valid when the rsb is reused. Apps aren't always smart enough * to keep NL locks on an rsb that they may lock again shortly; this can lead * to excessive master lookups and removals if we don't delay the release. * * Searching for an rsb means looking through both the normal list and toss * list. When found on the toss list the rsb is moved to the normal list with * ref count of 1; when found on normal list the ref count is incremented. * * rsb's on the keep list are being used locally and refcounted. * rsb's on the toss list are not being used locally, and are not refcounted. * * The toss list rsb's were either * - previously used locally but not any more (were on keep list, then * moved to toss list when last refcount dropped) * - created and put on toss list as a directory record for a lookup * (we are the dir node for the res, but are not using the res right now, * but some other node is) * * The purpose of find_rsb() is to return a refcounted rsb for local use. * So, if the given rsb is on the toss list, it is moved to the keep list * before being returned. * * toss_rsb() happens when all local usage of the rsb is done, i.e. no * more refcounts exist, so the rsb is moved from the keep list to the * toss list. * * rsb's on both keep and toss lists are used for doing a name to master * lookups. rsb's that are in use locally (and being refcounted) are on * the keep list, rsb's that are not in use locally (not refcounted) and * only exist for name/master lookups are on the toss list. * * rsb's on the toss list who's dir_nodeid is not local can have stale * name/master mappings. So, remote requests on such rsb's can potentially * return with an error, which means the mapping is stale and needs to * be updated with a new lookup. (The idea behind MASTER UNCERTAIN and * first_lkid is to keep only a single outstanding request on an rsb * while that rsb has a potentially stale master.) */ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, uint32_t hash, uint32_t b, int dir_nodeid, int from_nodeid, unsigned int flags, struct dlm_rsb **r_ret) { struct dlm_rsb *r = NULL; int our_nodeid = dlm_our_nodeid(); int from_local = 0; int from_other = 0; int from_dir = 0; int create = 0; int error; if (flags & R_RECEIVE_REQUEST) { if (from_nodeid == dir_nodeid) from_dir = 1; else from_other = 1; } else if (flags & R_REQUEST) { from_local = 1; } /* * flags & R_RECEIVE_RECOVER is from dlm_recover_master_copy, so * from_nodeid has sent us a lock in dlm_recover_locks, believing * we're the new master. Our local recovery may not have set * res_master_nodeid to our_nodeid yet, so allow either. Don't * create the rsb; dlm_recover_process_copy() will handle EBADR * by resending. * * If someone sends us a request, we are the dir node, and we do * not find the rsb anywhere, then recreate it. This happens if * someone sends us a request after we have removed/freed an rsb * from our toss list. (They sent a request instead of lookup * because they are using an rsb from their toss list.) */ if (from_local || from_dir || (from_other && (dir_nodeid == our_nodeid))) { create = 1; } retry: if (create) { error = pre_rsb_struct(ls); if (error < 0) goto out; } spin_lock(&ls->ls_rsbtbl[b].lock); error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); if (error) goto do_toss; /* * rsb is active, so we can't check master_nodeid without lock_rsb. */ kref_get(&r->res_ref); goto out_unlock; do_toss: error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); if (error) goto do_new; /* * rsb found inactive (master_nodeid may be out of date unless * we are the dir_nodeid or were the master) No other thread * is using this rsb because it's on the toss list, so we can * look at or update res_master_nodeid without lock_rsb. */ if ((r->res_master_nodeid != our_nodeid) && from_other) { /* our rsb was not master, and another node (not the dir node) has sent us a request */ log_debug(ls, "find_rsb toss from_other %d master %d dir %d %s", from_nodeid, r->res_master_nodeid, dir_nodeid, r->res_name); error = -ENOTBLK; goto out_unlock; } if ((r->res_master_nodeid != our_nodeid) && from_dir) { /* don't think this should ever happen */ log_error(ls, "find_rsb toss from_dir %d master %d", from_nodeid, r->res_master_nodeid); dlm_print_rsb(r); /* fix it and go on */ r->res_master_nodeid = our_nodeid; r->res_nodeid = 0; rsb_clear_flag(r, RSB_MASTER_UNCERTAIN); r->res_first_lkid = 0; } if (from_local && (r->res_master_nodeid != our_nodeid)) { /* Because we have held no locks on this rsb, res_master_nodeid could have become stale. */ rsb_set_flag(r, RSB_MASTER_UNCERTAIN); r->res_first_lkid = 0; } rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); goto out_unlock; do_new: /* * rsb not found */ if (error == -EBADR && !create) goto out_unlock; error = get_rsb_struct(ls, name, len, &r); if (error == -EAGAIN) { spin_unlock(&ls->ls_rsbtbl[b].lock); goto retry; } if (error) goto out_unlock; r->res_hash = hash; r->res_bucket = b; r->res_dir_nodeid = dir_nodeid; kref_init(&r->res_ref); if (from_dir) { /* want to see how often this happens */ log_debug(ls, "find_rsb new from_dir %d recreate %s", from_nodeid, r->res_name); r->res_master_nodeid = our_nodeid; r->res_nodeid = 0; goto out_add; } if (from_other && (dir_nodeid != our_nodeid)) { /* should never happen */ log_error(ls, "find_rsb new from_other %d dir %d our %d %s", from_nodeid, dir_nodeid, our_nodeid, r->res_name); dlm_free_rsb(r); r = NULL; error = -ENOTBLK; goto out_unlock; } if (from_other) { log_debug(ls, "find_rsb new from_other %d dir %d %s", from_nodeid, dir_nodeid, r->res_name); } if (dir_nodeid == our_nodeid) { /* When we are the dir nodeid, we can set the master node immediately */ r->res_master_nodeid = our_nodeid; r->res_nodeid = 0; } else { /* set_master will send_lookup to dir_nodeid */ r->res_master_nodeid = 0; r->res_nodeid = -1; } out_add: error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); out_unlock: spin_unlock(&ls->ls_rsbtbl[b].lock); out: *r_ret = r; return error; } /* During recovery, other nodes can send us new MSTCPY locks (from dlm_recover_locks) before we've made ourself master (in dlm_recover_masters). */ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, uint32_t hash, uint32_t b, int dir_nodeid, int from_nodeid, unsigned int flags, struct dlm_rsb **r_ret) { struct dlm_rsb *r = NULL; int our_nodeid = dlm_our_nodeid(); int recover = (flags & R_RECEIVE_RECOVER); int error; retry: error = pre_rsb_struct(ls); if (error < 0) goto out; spin_lock(&ls->ls_rsbtbl[b].lock); error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); if (error) goto do_toss; /* * rsb is active, so we can't check master_nodeid without lock_rsb. */ kref_get(&r->res_ref); goto out_unlock; do_toss: error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); if (error) goto do_new; /* * rsb found inactive. No other thread is using this rsb because * it's on the toss list, so we can look at or update * res_master_nodeid without lock_rsb. */ if (!recover && (r->res_master_nodeid != our_nodeid) && from_nodeid) { /* our rsb is not master, and another node has sent us a request; this should never happen */ log_error(ls, "find_rsb toss from_nodeid %d master %d dir %d", from_nodeid, r->res_master_nodeid, dir_nodeid); dlm_print_rsb(r); error = -ENOTBLK; goto out_unlock; } if (!recover && (r->res_master_nodeid != our_nodeid) && (dir_nodeid == our_nodeid)) { /* our rsb is not master, and we are dir; may as well fix it; this should never happen */ log_error(ls, "find_rsb toss our %d master %d dir %d", our_nodeid, r->res_master_nodeid, dir_nodeid); dlm_print_rsb(r); r->res_master_nodeid = our_nodeid; r->res_nodeid = 0; } rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); goto out_unlock; do_new: /* * rsb not found */ error = get_rsb_struct(ls, name, len, &r); if (error == -EAGAIN) { spin_unlock(&ls->ls_rsbtbl[b].lock); goto retry; } if (error) goto out_unlock; r->res_hash = hash; r->res_bucket = b; r->res_dir_nodeid = dir_nodeid; r->res_master_nodeid = dir_nodeid; r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid; kref_init(&r->res_ref); error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); out_unlock: spin_unlock(&ls->ls_rsbtbl[b].lock); out: *r_ret = r; return error; } static int find_rsb(struct dlm_ls *ls, const void *name, int len, int from_nodeid, unsigned int flags, struct dlm_rsb **r_ret) { uint32_t hash, b; int dir_nodeid; if (len > DLM_RESNAME_MAXLEN) return -EINVAL; hash = jhash(name, len, 0); b = hash & (ls->ls_rsbtbl_size - 1); dir_nodeid = dlm_hash2nodeid(ls, hash); if (dlm_no_directory(ls)) return find_rsb_nodir(ls, name, len, hash, b, dir_nodeid, from_nodeid, flags, r_ret); else return find_rsb_dir(ls, name, len, hash, b, dir_nodeid, from_nodeid, flags, r_ret); } /* we have received a request and found that res_master_nodeid != our_nodeid, so we need to return an error or make ourself the master */ static int validate_master_nodeid(struct dlm_ls *ls, struct dlm_rsb *r, int from_nodeid) { if (dlm_no_directory(ls)) { log_error(ls, "find_rsb keep from_nodeid %d master %d dir %d", from_nodeid, r->res_master_nodeid, r->res_dir_nodeid); dlm_print_rsb(r); return -ENOTBLK; } if (from_nodeid != r->res_dir_nodeid) { /* our rsb is not master, and another node (not the dir node) has sent us a request. this is much more common when our master_nodeid is zero, so limit debug to non-zero. */ if (r->res_master_nodeid) { log_debug(ls, "validate master from_other %d master %d " "dir %d first %x %s", from_nodeid, r->res_master_nodeid, r->res_dir_nodeid, r->res_first_lkid, r->res_name); } return -ENOTBLK; } else { /* our rsb is not master, but the dir nodeid has sent us a request; this could happen with master 0 / res_nodeid -1 */ if (r->res_master_nodeid) { log_error(ls, "validate master from_dir %d master %d " "first %x %s", from_nodeid, r->res_master_nodeid, r->res_first_lkid, r->res_name); } r->res_master_nodeid = dlm_our_nodeid(); r->res_nodeid = 0; return 0; } } static void __dlm_master_lookup(struct dlm_ls *ls, struct dlm_rsb *r, int our_nodeid, int from_nodeid, bool toss_list, unsigned int flags, int *r_nodeid, int *result) { int fix_master = (flags & DLM_LU_RECOVER_MASTER); int from_master = (flags & DLM_LU_RECOVER_DIR); if (r->res_dir_nodeid != our_nodeid) { /* should not happen, but may as well fix it and carry on */ log_error(ls, "%s res_dir %d our %d %s", __func__, r->res_dir_nodeid, our_nodeid, r->res_name); r->res_dir_nodeid = our_nodeid; } if (fix_master && dlm_is_removed(ls, r->res_master_nodeid)) { /* Recovery uses this function to set a new master when * the previous master failed. Setting NEW_MASTER will * force dlm_recover_masters to call recover_master on this * rsb even though the res_nodeid is no longer removed. */ r->res_master_nodeid = from_nodeid; r->res_nodeid = from_nodeid; rsb_set_flag(r, RSB_NEW_MASTER); if (toss_list) { /* I don't think we should ever find it on toss list. */ log_error(ls, "%s fix_master on toss", __func__); dlm_dump_rsb(r); } } if (from_master && (r->res_master_nodeid != from_nodeid)) { /* this will happen if from_nodeid became master during * a previous recovery cycle, and we aborted the previous * cycle before recovering this master value */ log_limit(ls, "%s from_master %d master_nodeid %d res_nodeid %d first %x %s", __func__, from_nodeid, r->res_master_nodeid, r->res_nodeid, r->res_first_lkid, r->res_name); if (r->res_master_nodeid == our_nodeid) { log_error(ls, "from_master %d our_master", from_nodeid); dlm_dump_rsb(r); goto ret_assign; } r->res_master_nodeid = from_nodeid; r->res_nodeid = from_nodeid; rsb_set_flag(r, RSB_NEW_MASTER); } if (!r->res_master_nodeid) { /* this will happen if recovery happens while we're looking * up the master for this rsb */ log_debug(ls, "%s master 0 to %d first %x %s", __func__, from_nodeid, r->res_first_lkid, r->res_name); r->res_master_nodeid = from_nodeid; r->res_nodeid = from_nodeid; } if (!from_master && !fix_master && (r->res_master_nodeid == from_nodeid)) { /* this can happen when the master sends remove, the dir node * finds the rsb on the keep list and ignores the remove, * and the former master sends a lookup */ log_limit(ls, "%s from master %d flags %x first %x %s", __func__, from_nodeid, flags, r->res_first_lkid, r->res_name); } ret_assign: *r_nodeid = r->res_master_nodeid; if (result) *result = DLM_LU_MATCH; } /* * We're the dir node for this res and another node wants to know the * master nodeid. During normal operation (non recovery) this is only * called from receive_lookup(); master lookups when the local node is * the dir node are done by find_rsb(). * * normal operation, we are the dir node for a resource * . _request_lock * . set_master * . send_lookup * . receive_lookup * . dlm_master_lookup flags 0 * * recover directory, we are rebuilding dir for all resources * . dlm_recover_directory * . dlm_rcom_names * remote node sends back the rsb names it is master of and we are dir of * . dlm_master_lookup RECOVER_DIR (fix_master 0, from_master 1) * we either create new rsb setting remote node as master, or find existing * rsb and set master to be the remote node. * * recover masters, we are finding the new master for resources * . dlm_recover_masters * . recover_master * . dlm_send_rcom_lookup * . receive_rcom_lookup * . dlm_master_lookup RECOVER_MASTER (fix_master 1, from_master 0) */ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, int len, unsigned int flags, int *r_nodeid, int *result) { struct dlm_rsb *r = NULL; uint32_t hash, b; int our_nodeid = dlm_our_nodeid(); int dir_nodeid, error; if (len > DLM_RESNAME_MAXLEN) return -EINVAL; if (from_nodeid == our_nodeid) { log_error(ls, "dlm_master_lookup from our_nodeid %d flags %x", our_nodeid, flags); return -EINVAL; } hash = jhash(name, len, 0); b = hash & (ls->ls_rsbtbl_size - 1); dir_nodeid = dlm_hash2nodeid(ls, hash); if (dir_nodeid != our_nodeid) { log_error(ls, "dlm_master_lookup from %d dir %d our %d h %x %d", from_nodeid, dir_nodeid, our_nodeid, hash, ls->ls_num_nodes); *r_nodeid = -1; return -EINVAL; } retry: error = pre_rsb_struct(ls); if (error < 0) return error; spin_lock(&ls->ls_rsbtbl[b].lock); error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); if (!error) { /* because the rsb is active, we need to lock_rsb before * checking/changing re_master_nodeid */ hold_rsb(r); spin_unlock(&ls->ls_rsbtbl[b].lock); lock_rsb(r); __dlm_master_lookup(ls, r, our_nodeid, from_nodeid, false, flags, r_nodeid, result); /* the rsb was active */ unlock_rsb(r); put_rsb(r); return 0; } error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); if (error) goto not_found; /* because the rsb is inactive (on toss list), it's not refcounted * and lock_rsb is not used, but is protected by the rsbtbl lock */ __dlm_master_lookup(ls, r, our_nodeid, from_nodeid, true, flags, r_nodeid, result); r->res_toss_time = jiffies; /* the rsb was inactive (on toss list) */ spin_unlock(&ls->ls_rsbtbl[b].lock); return 0; not_found: error = get_rsb_struct(ls, name, len, &r); if (error == -EAGAIN) { spin_unlock(&ls->ls_rsbtbl[b].lock); goto retry; } if (error) goto out_unlock; r->res_hash = hash; r->res_bucket = b; r->res_dir_nodeid = our_nodeid; r->res_master_nodeid = from_nodeid; r->res_nodeid = from_nodeid; kref_init(&r->res_ref); r->res_toss_time = jiffies; error = rsb_insert(r, &ls->ls_rsbtbl[b].toss); if (error) { /* should never happen */ dlm_free_rsb(r); spin_unlock(&ls->ls_rsbtbl[b].lock); goto retry; } if (result) *result = DLM_LU_ADD; *r_nodeid = from_nodeid; out_unlock: spin_unlock(&ls->ls_rsbtbl[b].lock); return error; } static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash) { struct rb_node *n; struct dlm_rsb *r; int i; for (i = 0; i < ls->ls_rsbtbl_size; i++) { spin_lock(&ls->ls_rsbtbl[i].lock); for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { r = rb_entry(n, struct dlm_rsb, res_hashnode); if (r->res_hash == hash) dlm_dump_rsb(r); } spin_unlock(&ls->ls_rsbtbl[i].lock); } } void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len) { struct dlm_rsb *r = NULL; uint32_t hash, b; int error; hash = jhash(name, len, 0); b = hash & (ls->ls_rsbtbl_size - 1); spin_lock(&ls->ls_rsbtbl[b].lock); error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); if (!error) goto out_dump; error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); if (error) goto out; out_dump: dlm_dump_rsb(r); out: spin_unlock(&ls->ls_rsbtbl[b].lock); } static void toss_rsb(struct kref *kref) { struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref); struct dlm_ls *ls = r->res_ls; DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r);); kref_init(&r->res_ref); rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].keep); rsb_insert(r, &ls->ls_rsbtbl[r->res_bucket].toss); r->res_toss_time = jiffies; set_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[r->res_bucket].flags); if (r->res_lvbptr) { dlm_free_lvb(r->res_lvbptr); r->res_lvbptr = NULL; } } /* See comment for unhold_lkb */ static void unhold_rsb(struct dlm_rsb *r) { int rv; rv = kref_put(&r->res_ref, toss_rsb); DLM_ASSERT(!rv, dlm_dump_rsb(r);); } static void kill_rsb(struct kref *kref) { struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref); /* All work is done after the return from kref_put() so we can release the write_lock before the remove and free. */ DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r);); DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r);); DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r);); DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r);); DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r);); DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r);); } /* Attaching/detaching lkb's from rsb's is for rsb reference counting. The rsb must exist as long as any lkb's for it do. */ static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb) { hold_rsb(r); lkb->lkb_resource = r; } static void detach_lkb(struct dlm_lkb *lkb) { if (lkb->lkb_resource) { put_rsb(lkb->lkb_resource); lkb->lkb_resource = NULL; } } static int _create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret, int start, int end) { struct dlm_lkb *lkb; int rv; lkb = dlm_allocate_lkb(ls); if (!lkb) return -ENOMEM; lkb->lkb_last_bast_mode = -1; lkb->lkb_nodeid = -1; lkb->lkb_grmode = DLM_LOCK_IV; kref_init(&lkb->lkb_ref); INIT_LIST_HEAD(&lkb->lkb_ownqueue); INIT_LIST_HEAD(&lkb->lkb_rsb_lookup); INIT_LIST_HEAD(&lkb->lkb_cb_list); INIT_LIST_HEAD(&lkb->lkb_callbacks); spin_lock_init(&lkb->lkb_cb_lock); INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work); idr_preload(GFP_NOFS); spin_lock(&ls->ls_lkbidr_spin); rv = idr_alloc(&ls->ls_lkbidr, lkb, start, end, GFP_NOWAIT); if (rv >= 0) lkb->lkb_id = rv; spin_unlock(&ls->ls_lkbidr_spin); idr_preload_end(); if (rv < 0) { log_error(ls, "create_lkb idr error %d", rv); dlm_free_lkb(lkb); return rv; } *lkb_ret = lkb; return 0; } static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret) { return _create_lkb(ls, lkb_ret, 1, 0); } static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret) { struct dlm_lkb *lkb; spin_lock(&ls->ls_lkbidr_spin); lkb = idr_find(&ls->ls_lkbidr, lkid); if (lkb) kref_get(&lkb->lkb_ref); spin_unlock(&ls->ls_lkbidr_spin); *lkb_ret = lkb; return lkb ? 0 : -ENOENT; } static void kill_lkb(struct kref *kref) { struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref); /* All work is done after the return from kref_put() so we can release the write_lock before the detach_lkb */ DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb);); } /* __put_lkb() is used when an lkb may not have an rsb attached to it so we need to provide the lockspace explicitly */ static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb) { uint32_t lkid = lkb->lkb_id; int rv; rv = kref_put_lock(&lkb->lkb_ref, kill_lkb, &ls->ls_lkbidr_spin); if (rv) { idr_remove(&ls->ls_lkbidr, lkid); spin_unlock(&ls->ls_lkbidr_spin); detach_lkb(lkb); /* for local/process lkbs, lvbptr points to caller's lksb */ if (lkb->lkb_lvbptr && is_master_copy(lkb)) dlm_free_lvb(lkb->lkb_lvbptr); dlm_free_lkb(lkb); } return rv; } int dlm_put_lkb(struct dlm_lkb *lkb) { struct dlm_ls *ls; DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb);); DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb);); ls = lkb->lkb_resource->res_ls; return __put_lkb(ls, lkb); } /* This is only called to add a reference when the code already holds a valid reference to the lkb, so there's no need for locking. */ static inline void hold_lkb(struct dlm_lkb *lkb) { kref_get(&lkb->lkb_ref); } static void unhold_lkb_assert(struct kref *kref) { struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref); DLM_ASSERT(false, dlm_print_lkb(lkb);); } /* This is called when we need to remove a reference and are certain it's not the last ref. e.g. del_lkb is always called between a find_lkb/put_lkb and is always the inverse of a previous add_lkb. put_lkb would work fine, but would involve unnecessary locking */ static inline void unhold_lkb(struct dlm_lkb *lkb) { kref_put(&lkb->lkb_ref, unhold_lkb_assert); } static void lkb_add_ordered(struct list_head *new, struct list_head *head, int mode) { struct dlm_lkb *lkb = NULL, *iter; list_for_each_entry(iter, head, lkb_statequeue) if (iter->lkb_rqmode < mode) { lkb = iter; list_add_tail(new, &iter->lkb_statequeue); break; } if (!lkb) list_add_tail(new, head); } /* add/remove lkb to rsb's grant/convert/wait queue */ static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status) { kref_get(&lkb->lkb_ref); DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb);); lkb->lkb_timestamp = ktime_get(); lkb->lkb_status = status; switch (status) { case DLM_LKSTS_WAITING: if (lkb->lkb_exflags & DLM_LKF_HEADQUE) list_add(&lkb->lkb_statequeue, &r->res_waitqueue); else list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue); break; case DLM_LKSTS_GRANTED: /* convention says granted locks kept in order of grmode */ lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue, lkb->lkb_grmode); break; case DLM_LKSTS_CONVERT: if (lkb->lkb_exflags & DLM_LKF_HEADQUE) list_add(&lkb->lkb_statequeue, &r->res_convertqueue); else list_add_tail(&lkb->lkb_statequeue, &r->res_convertqueue); break; default: DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status);); } } static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb) { lkb->lkb_status = 0; list_del(&lkb->lkb_statequeue); unhold_lkb(lkb); } static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts) { hold_lkb(lkb); del_lkb(r, lkb); add_lkb(r, lkb, sts); unhold_lkb(lkb); } static int msg_reply_type(int mstype) { switch (mstype) { case DLM_MSG_REQUEST: return DLM_MSG_REQUEST_REPLY; case DLM_MSG_CONVERT: return DLM_MSG_CONVERT_REPLY; case DLM_MSG_UNLOCK: return DLM_MSG_UNLOCK_REPLY; case DLM_MSG_CANCEL: return DLM_MSG_CANCEL_REPLY; case DLM_MSG_LOOKUP: return DLM_MSG_LOOKUP_REPLY; } return -1; } /* add/remove lkb from global waiters list of lkb's waiting for a reply from a remote node */ static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid) { struct dlm_ls *ls = lkb->lkb_resource->res_ls; int error = 0; int wc; mutex_lock(&ls->ls_waiters_mutex); if (is_overlap_unlock(lkb) || (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) { error = -EINVAL; goto out; } if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) { switch (mstype) { case DLM_MSG_UNLOCK: set_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags); break; case DLM_MSG_CANCEL: set_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); break; default: error = -EBUSY; goto out; } wc = atomic_inc_return(&lkb->lkb_wait_count); hold_lkb(lkb); log_debug(ls, "addwait %x cur %d overlap %d count %d f %x", lkb->lkb_id, lkb->lkb_wait_type, mstype, wc, dlm_iflags_val(lkb)); goto out; } wc = atomic_fetch_inc(&lkb->lkb_wait_count); DLM_ASSERT(!wc, dlm_print_lkb(lkb); printk("wait_count %d\n", wc);); lkb->lkb_wait_type = mstype; lkb->lkb_wait_nodeid = to_nodeid; /* for debugging */ hold_lkb(lkb); list_add(&lkb->lkb_wait_reply, &ls->ls_waiters); out: if (error) log_error(ls, "addwait error %x %d flags %x %d %d %s", lkb->lkb_id, error, dlm_iflags_val(lkb), mstype, lkb->lkb_wait_type, lkb->lkb_resource->res_name); mutex_unlock(&ls->ls_waiters_mutex); return error; } /* We clear the RESEND flag because we might be taking an lkb off the waiters list as part of process_requestqueue (e.g. a lookup that has an optimized request reply on the requestqueue) between dlm_recover_waiters_pre() which set RESEND and dlm_recover_waiters_post() */ static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype, const struct dlm_message *ms) { struct dlm_ls *ls = lkb->lkb_resource->res_ls; int overlap_done = 0; if (mstype == DLM_MSG_UNLOCK_REPLY && test_and_clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags)) { log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id); overlap_done = 1; goto out_del; } if (mstype == DLM_MSG_CANCEL_REPLY && test_and_clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags)) { log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id); overlap_done = 1; goto out_del; } /* Cancel state was preemptively cleared by a successful convert, see next comment, nothing to do. */ if ((mstype == DLM_MSG_CANCEL_REPLY) && (lkb->lkb_wait_type != DLM_MSG_CANCEL)) { log_debug(ls, "remwait %x cancel_reply wait_type %d", lkb->lkb_id, lkb->lkb_wait_type); return -1; } /* Remove for the convert reply, and premptively remove for the cancel reply. A convert has been granted while there's still an outstanding cancel on it (the cancel is moot and the result in the cancel reply should be 0). We preempt the cancel reply because the app gets the convert result and then can follow up with another op, like convert. This subsequent op would see the lingering state of the cancel and fail with -EBUSY. */ if ((mstype == DLM_MSG_CONVERT_REPLY) && (lkb->lkb_wait_type == DLM_MSG_CONVERT) && ms && !ms->m_result && test_and_clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags)) { log_debug(ls, "remwait %x convert_reply zap overlap_cancel", lkb->lkb_id); lkb->lkb_wait_type = 0; atomic_dec(&lkb->lkb_wait_count); unhold_lkb(lkb); goto out_del; } /* N.B. type of reply may not always correspond to type of original msg due to lookup->request optimization, verify others? */ if (lkb->lkb_wait_type) { lkb->lkb_wait_type = 0; goto out_del; } log_error(ls, "remwait error %x remote %d %x msg %d flags %x no wait", lkb->lkb_id, ms ? le32_to_cpu(ms->m_header.h_nodeid) : 0, lkb->lkb_remid, mstype, dlm_iflags_val(lkb)); return -1; out_del: /* the force-unlock/cancel has completed and we haven't recvd a reply to the op that was in progress prior to the unlock/cancel; we give up on any reply to the earlier op. FIXME: not sure when/how this would happen */ if (overlap_done && lkb->lkb_wait_type) { log_error(ls, "remwait error %x reply %d wait_type %d overlap", lkb->lkb_id, mstype, lkb->lkb_wait_type); atomic_dec(&lkb->lkb_wait_count); unhold_lkb(lkb); lkb->lkb_wait_type = 0; } DLM_ASSERT(atomic_read(&lkb->lkb_wait_count), dlm_print_lkb(lkb);); clear_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags); if (atomic_dec_and_test(&lkb->lkb_wait_count)) list_del_init(&lkb->lkb_wait_reply); unhold_lkb(lkb); return 0; } static int remove_from_waiters(struct dlm_lkb *lkb, int mstype) { struct dlm_ls *ls = lkb->lkb_resource->res_ls; int error; mutex_lock(&ls->ls_waiters_mutex); error = _remove_from_waiters(lkb, mstype, NULL); mutex_unlock(&ls->ls_waiters_mutex); return error; } /* Handles situations where we might be processing a "fake" or "local" reply in which we can't try to take waiters_mutex again. */ static int remove_from_waiters_ms(struct dlm_lkb *lkb, const struct dlm_message *ms, bool local) { struct dlm_ls *ls = lkb->lkb_resource->res_ls; int error; if (!local) mutex_lock(&ls->ls_waiters_mutex); error = _remove_from_waiters(lkb, le32_to_cpu(ms->m_type), ms); if (!local) mutex_unlock(&ls->ls_waiters_mutex); return error; } static void shrink_bucket(struct dlm_ls *ls, int b) { struct rb_node *n, *next; struct dlm_rsb *r; char *name; int our_nodeid = dlm_our_nodeid(); int remote_count = 0; int need_shrink = 0; int i, len, rv; memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX); spin_lock(&ls->ls_rsbtbl[b].lock); if (!test_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags)) { spin_unlock(&ls->ls_rsbtbl[b].lock); return; } for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = next) { next = rb_next(n); r = rb_entry(n, struct dlm_rsb, res_hashnode); /* If we're the directory record for this rsb, and we're not the master of it, then we need to wait for the master node to send us a dir remove for before removing the dir record. */ if (!dlm_no_directory(ls) && (r->res_master_nodeid != our_nodeid) && (dlm_dir_nodeid(r) == our_nodeid)) { continue; } need_shrink = 1; if (!time_after_eq(jiffies, r->res_toss_time + dlm_config.ci_toss_secs * HZ)) { continue; } if (!dlm_no_directory(ls) && (r->res_master_nodeid == our_nodeid) && (dlm_dir_nodeid(r) != our_nodeid)) { /* We're the master of this rsb but we're not the directory record, so we need to tell the dir node to remove the dir record. */ ls->ls_remove_lens[remote_count] = r->res_length; memcpy(ls->ls_remove_names[remote_count], r->res_name, DLM_RESNAME_MAXLEN); remote_count++; if (remote_count >= DLM_REMOVE_NAMES_MAX) break; continue; } if (!kref_put(&r->res_ref, kill_rsb)) { log_error(ls, "tossed rsb in use %s", r->res_name); continue; } rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); dlm_free_rsb(r); } if (need_shrink) set_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags); else clear_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags); spin_unlock(&ls->ls_rsbtbl[b].lock); /* * While searching for rsb's to free, we found some that require * remote removal. We leave them in place and find them again here * so there is a very small gap between removing them from the toss * list and sending the removal. Keeping this gap small is * important to keep us (the master node) from being out of sync * with the remote dir node for very long. */ for (i = 0; i < remote_count; i++) { name = ls->ls_remove_names[i]; len = ls->ls_remove_lens[i]; spin_lock(&ls->ls_rsbtbl[b].lock); rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); if (rv) { spin_unlock(&ls->ls_rsbtbl[b].lock); log_debug(ls, "remove_name not toss %s", name); continue; } if (r->res_master_nodeid != our_nodeid) { spin_unlock(&ls->ls_rsbtbl[b].lock); log_debug(ls, "remove_name master %d dir %d our %d %s", r->res_master_nodeid, r->res_dir_nodeid, our_nodeid, name); continue; } if (r->res_dir_nodeid == our_nodeid) { /* should never happen */ spin_unlock(&ls->ls_rsbtbl[b].lock); log_error(ls, "remove_name dir %d master %d our %d %s", r->res_dir_nodeid, r->res_master_nodeid, our_nodeid, name); continue; } if (!time_after_eq(jiffies, r->res_toss_time + dlm_config.ci_toss_secs * HZ)) { spin_unlock(&ls->ls_rsbtbl[b].lock); log_debug(ls, "remove_name toss_time %lu now %lu %s", r->res_toss_time, jiffies, name); continue; } if (!kref_put(&r->res_ref, kill_rsb)) { spin_unlock(&ls->ls_rsbtbl[b].lock); log_error(ls, "remove_name in use %s", name); continue; } rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); send_remove(r); spin_unlock(&ls->ls_rsbtbl[b].lock); dlm_free_rsb(r); } } void dlm_scan_rsbs(struct dlm_ls *ls) { int i; for (i = 0; i < ls->ls_rsbtbl_size; i++) { shrink_bucket(ls, i); if (dlm_locking_stopped(ls)) break; cond_resched(); } } /* lkb is master or local copy */ static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb) { int b, len = r->res_ls->ls_lvblen; /* b=1 lvb returned to caller b=0 lvb written to rsb or invalidated b=-1 do nothing */ b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1]; if (b == 1) { if (!lkb->lkb_lvbptr) return; if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) return; if (!r->res_lvbptr) return; memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len); lkb->lkb_lvbseq = r->res_lvbseq; } else if (b == 0) { if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) { rsb_set_flag(r, RSB_VALNOTVALID); return; } if (!lkb->lkb_lvbptr) return; if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) return; if (!r->res_lvbptr) r->res_lvbptr = dlm_allocate_lvb(r->res_ls); if (!r->res_lvbptr) return; memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len); r->res_lvbseq++; lkb->lkb_lvbseq = r->res_lvbseq; rsb_clear_flag(r, RSB_VALNOTVALID); } if (rsb_flag(r, RSB_VALNOTVALID)) set_bit(DLM_SBF_VALNOTVALID_BIT, &lkb->lkb_sbflags); } static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb) { if (lkb->lkb_grmode < DLM_LOCK_PW) return; if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) { rsb_set_flag(r, RSB_VALNOTVALID); return; } if (!lkb->lkb_lvbptr) return; if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) return; if (!r->res_lvbptr) r->res_lvbptr = dlm_allocate_lvb(r->res_ls); if (!r->res_lvbptr) return; memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen); r->res_lvbseq++; rsb_clear_flag(r, RSB_VALNOTVALID); } /* lkb is process copy (pc) */ static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb, const struct dlm_message *ms) { int b; if (!lkb->lkb_lvbptr) return; if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) return; b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1]; if (b == 1) { int len = receive_extralen(ms); if (len > r->res_ls->ls_lvblen) len = r->res_ls->ls_lvblen; memcpy(lkb->lkb_lvbptr, ms->m_extra, len); lkb->lkb_lvbseq = le32_to_cpu(ms->m_lvbseq); } } /* Manipulate lkb's on rsb's convert/granted/waiting queues remove_lock -- used for unlock, removes lkb from granted revert_lock -- used for cancel, moves lkb from convert to granted grant_lock -- used for request and convert, adds lkb to granted or moves lkb from convert or waiting to granted Each of these is used for master or local copy lkb's. There is also a _pc() variation used to make the corresponding change on a process copy (pc) lkb. */ static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb) { del_lkb(r, lkb); lkb->lkb_grmode = DLM_LOCK_IV; /* this unhold undoes the original ref from create_lkb() so this leads to the lkb being freed */ unhold_lkb(lkb); } static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb) { set_lvb_unlock(r, lkb); _remove_lock(r, lkb); } static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb) { _remove_lock(r, lkb); } /* returns: 0 did nothing 1 moved lock to granted -1 removed lock */ static int revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb) { int rv = 0; lkb->lkb_rqmode = DLM_LOCK_IV; switch (lkb->lkb_status) { case DLM_LKSTS_GRANTED: break; case DLM_LKSTS_CONVERT: move_lkb(r, lkb, DLM_LKSTS_GRANTED); rv = 1; break; case DLM_LKSTS_WAITING: del_lkb(r, lkb); lkb->lkb_grmode = DLM_LOCK_IV; /* this unhold undoes the original ref from create_lkb() so this leads to the lkb being freed */ unhold_lkb(lkb); rv = -1; break; default: log_print("invalid status for revert %d", lkb->lkb_status); } return rv; } static int revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb) { return revert_lock(r, lkb); } static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb) { if (lkb->lkb_grmode != lkb->lkb_rqmode) { lkb->lkb_grmode = lkb->lkb_rqmode; if (lkb->lkb_status) move_lkb(r, lkb, DLM_LKSTS_GRANTED); else add_lkb(r, lkb, DLM_LKSTS_GRANTED); } lkb->lkb_rqmode = DLM_LOCK_IV; lkb->lkb_highbast = 0; } static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb) { set_lvb_lock(r, lkb); _grant_lock(r, lkb); } static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb, const struct dlm_message *ms) { set_lvb_lock_pc(r, lkb, ms); _grant_lock(r, lkb); } /* called by grant_pending_locks() which means an async grant message must be sent to the requesting node in addition to granting the lock if the lkb belongs to a remote node. */ static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb) { grant_lock(r, lkb); if (is_master_copy(lkb)) send_grant(r, lkb); else queue_cast(r, lkb, 0); } /* The special CONVDEADLK, ALTPR and ALTCW flags allow the master to change the granted/requested modes. We're munging things accordingly in the process copy. CONVDEADLK: our grmode may have been forced down to NL to resolve a conversion deadlock ALTPR/ALTCW: our rqmode may have been changed to PR or CW to become compatible with other granted locks */ static void munge_demoted(struct dlm_lkb *lkb) { if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) { log_print("munge_demoted %x invalid modes gr %d rq %d", lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode); return; } lkb->lkb_grmode = DLM_LOCK_NL; } static void munge_altmode(struct dlm_lkb *lkb, const struct dlm_message *ms) { if (ms->m_type != cpu_to_le32(DLM_MSG_REQUEST_REPLY) && ms->m_type != cpu_to_le32(DLM_MSG_GRANT)) { log_print("munge_altmode %x invalid reply type %d", lkb->lkb_id, le32_to_cpu(ms->m_type)); return; } if (lkb->lkb_exflags & DLM_LKF_ALTPR) lkb->lkb_rqmode = DLM_LOCK_PR; else if (lkb->lkb_exflags & DLM_LKF_ALTCW) lkb->lkb_rqmode = DLM_LOCK_CW; else { log_print("munge_altmode invalid exflags %x", lkb->lkb_exflags); dlm_print_lkb(lkb); } } static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head) { struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb, lkb_statequeue); if (lkb->lkb_id == first->lkb_id) return 1; return 0; } /* Check if the given lkb conflicts with another lkb on the queue. */ static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb) { struct dlm_lkb *this; list_for_each_entry(this, head, lkb_statequeue) { if (this == lkb) continue; if (!modes_compat(this, lkb)) return 1; } return 0; } /* * "A conversion deadlock arises with a pair of lock requests in the converting * queue for one resource. The granted mode of each lock blocks the requested * mode of the other lock." * * Part 2: if the granted mode of lkb is preventing an earlier lkb in the * convert queue from being granted, then deadlk/demote lkb. * * Example: * Granted Queue: empty * Convert Queue: NL->EX (first lock) * PR->EX (second lock) * * The first lock can't be granted because of the granted mode of the second * lock and the second lock can't be granted because it's not first in the * list. We either cancel lkb's conversion (PR->EX) and return EDEADLK, or we * demote the granted mode of lkb (from PR to NL) if it has the CONVDEADLK * flag set and return DEMOTED in the lksb flags. * * Originally, this function detected conv-deadlk in a more limited scope: * - if !modes_compat(lkb1, lkb2) && !modes_compat(lkb2, lkb1), or * - if lkb1 was the first entry in the queue (not just earlier), and was * blocked by the granted mode of lkb2, and there was nothing on the * granted queue preventing lkb1 from being granted immediately, i.e. * lkb2 was the only thing preventing lkb1 from being granted. * * That second condition meant we'd only say there was conv-deadlk if * resolving it (by demotion) would lead to the first lock on the convert * queue being granted right away. It allowed conversion deadlocks to exist * between locks on the convert queue while they couldn't be granted anyway. * * Now, we detect and take action on conversion deadlocks immediately when * they're created, even if they may not be immediately consequential. If * lkb1 exists anywhere in the convert queue and lkb2 comes in with a granted * mode that would prevent lkb1's conversion from being granted, we do a * deadlk/demote on lkb2 right away and don't let it onto the convert queue. * I think this means that the lkb_is_ahead condition below should always * be zero, i.e. there will never be conv-deadlk between two locks that are * both already on the convert queue. */ static int conversion_deadlock_detect(struct dlm_rsb *r, struct dlm_lkb *lkb2) { struct dlm_lkb *lkb1; int lkb_is_ahead = 0; list_for_each_entry(lkb1, &r->res_convertqueue, lkb_statequeue) { if (lkb1 == lkb2) { lkb_is_ahead = 1; continue; } if (!lkb_is_ahead) { if (!modes_compat(lkb2, lkb1)) return 1; } else { if (!modes_compat(lkb2, lkb1) && !modes_compat(lkb1, lkb2)) return 1; } } return 0; } /* * Return 1 if the lock can be granted, 0 otherwise. * Also detect and resolve conversion deadlocks. * * lkb is the lock to be granted * * now is 1 if the function is being called in the context of the * immediate request, it is 0 if called later, after the lock has been * queued. * * recover is 1 if dlm_recover_grant() is trying to grant conversions * after recovery. * * References are from chapter 6 of "VAXcluster Principles" by Roy Davis */ static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now, int recover) { int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV); /* * 6-10: Version 5.4 introduced an option to address the phenomenon of * a new request for a NL mode lock being blocked. * * 6-11: If the optional EXPEDITE flag is used with the new NL mode * request, then it would be granted. In essence, the use of this flag * tells the Lock Manager to expedite theis request by not considering * what may be in the CONVERTING or WAITING queues... As of this * writing, the EXPEDITE flag can be used only with new requests for NL * mode locks. This flag is not valid for conversion requests. * * A shortcut. Earlier checks return an error if EXPEDITE is used in a * conversion or used with a non-NL requested mode. We also know an * EXPEDITE request is always granted immediately, so now must always * be 1. The full condition to grant an expedite request: (now && * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can * therefore be shortened to just checking the flag. */ if (lkb->lkb_exflags & DLM_LKF_EXPEDITE) return 1; /* * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be * added to the remaining conditions. */ if (queue_conflict(&r->res_grantqueue, lkb)) return 0; /* * 6-3: By default, a conversion request is immediately granted if the * requested mode is compatible with the modes of all other granted * locks */ if (queue_conflict(&r->res_convertqueue, lkb)) return 0; /* * The RECOVER_GRANT flag means dlm_recover_grant() is granting * locks for a recovered rsb, on which lkb's have been rebuilt. * The lkb's may have been rebuilt on the queues in a different * order than they were in on the previous master. So, granting * queued conversions in order after recovery doesn't make sense * since the order hasn't been preserved anyway. The new order * could also have created a new "in place" conversion deadlock. * (e.g. old, failed master held granted EX, with PR->EX, NL->EX. * After recovery, there would be no granted locks, and possibly * NL->EX, PR->EX, an in-place conversion deadlock.) So, after * recovery, grant conversions without considering order. */ if (conv && recover) return 1; /* * 6-5: But the default algorithm for deciding whether to grant or * queue conversion requests does not by itself guarantee that such * requests are serviced on a "first come first serve" basis. This, in * turn, can lead to a phenomenon known as "indefinate postponement". * * 6-7: This issue is dealt with by using the optional QUECVT flag with * the system service employed to request a lock conversion. This flag * forces certain conversion requests to be queued, even if they are * compatible with the granted modes of other locks on the same * resource. Thus, the use of this flag results in conversion requests * being ordered on a "first come first servce" basis. * * DCT: This condition is all about new conversions being able to occur * "in place" while the lock remains on the granted queue (assuming * nothing else conflicts.) IOW if QUECVT isn't set, a conversion * doesn't _have_ to go onto the convert queue where it's processed in * order. The "now" variable is necessary to distinguish converts * being received and processed for the first time now, because once a * convert is moved to the conversion queue the condition below applies * requiring fifo granting. */ if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT)) return 1; /* * Even if the convert is compat with all granted locks, * QUECVT forces it behind other locks on the convert queue. */ if (now && conv && (lkb->lkb_exflags & DLM_LKF_QUECVT)) { if (list_empty(&r->res_convertqueue)) return 1; else return 0; } /* * The NOORDER flag is set to avoid the standard vms rules on grant * order. */ if (lkb->lkb_exflags & DLM_LKF_NOORDER) return 1; /* * 6-3: Once in that queue [CONVERTING], a conversion request cannot be * granted until all other conversion requests ahead of it are granted * and/or canceled. */ if (!now && conv && first_in_list(lkb, &r->res_convertqueue)) return 1; /* * 6-4: By default, a new request is immediately granted only if all * three of the following conditions are satisfied when the request is * issued: * - The queue of ungranted conversion requests for the resource is * empty. * - The queue of ungranted new requests for the resource is empty. * - The mode of the new request is compatible with the most * restrictive mode of all granted locks on the resource. */ if (now && !conv && list_empty(&r->res_convertqueue) && list_empty(&r->res_waitqueue)) return 1; /* * 6-4: Once a lock request is in the queue of ungranted new requests, * it cannot be granted until the queue of ungranted conversion * requests is empty, all ungranted new requests ahead of it are * granted and/or canceled, and it is compatible with the granted mode * of the most restrictive lock granted on the resource. */ if (!now && !conv && list_empty(&r->res_convertqueue) && first_in_list(lkb, &r->res_waitqueue)) return 1; return 0; } static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now, int recover, int *err) { int rv; int8_t alt = 0, rqmode = lkb->lkb_rqmode; int8_t is_convert = (lkb->lkb_grmode != DLM_LOCK_IV); if (err) *err = 0; rv = _can_be_granted(r, lkb, now, recover); if (rv) goto out; /* * The CONVDEADLK flag is non-standard and tells the dlm to resolve * conversion deadlocks by demoting grmode to NL, otherwise the dlm * cancels one of the locks. */ if (is_convert && can_be_queued(lkb) && conversion_deadlock_detect(r, lkb)) { if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) { lkb->lkb_grmode = DLM_LOCK_NL; set_bit(DLM_SBF_DEMOTED_BIT, &lkb->lkb_sbflags); } else if (err) { *err = -EDEADLK; } else { log_print("can_be_granted deadlock %x now %d", lkb->lkb_id, now); dlm_dump_rsb(r); } goto out; } /* * The ALTPR and ALTCW flags are non-standard and tell the dlm to try * to grant a request in a mode other than the normal rqmode. It's a * simple way to provide a big optimization to applications that can * use them. */ if (rqmode != DLM_LOCK_PR && (lkb->lkb_exflags & DLM_LKF_ALTPR)) alt = DLM_LOCK_PR; else if (rqmode != DLM_LOCK_CW && (lkb->lkb_exflags & DLM_LKF_ALTCW)) alt = DLM_LOCK_CW; if (alt) { lkb->lkb_rqmode = alt; rv = _can_be_granted(r, lkb, now, 0); if (rv) set_bit(DLM_SBF_ALTMODE_BIT, &lkb->lkb_sbflags); else lkb->lkb_rqmode = rqmode; } out: return rv; } /* Returns the highest requested mode of all blocked conversions; sets cw if there's a blocked conversion to DLM_LOCK_CW. */ static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw, unsigned int *count) { struct dlm_lkb *lkb, *s; int recover = rsb_flag(r, RSB_RECOVER_GRANT); int hi, demoted, quit, grant_restart, demote_restart; int deadlk; quit = 0; restart: grant_restart = 0; demote_restart = 0; hi = DLM_LOCK_IV; list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) { demoted = is_demoted(lkb); deadlk = 0; if (can_be_granted(r, lkb, 0, recover, &deadlk)) { grant_lock_pending(r, lkb); grant_restart = 1; if (count) (*count)++; continue; } if (!demoted && is_demoted(lkb)) { log_print("WARN: pending demoted %x node %d %s", lkb->lkb_id, lkb->lkb_nodeid, r->res_name); demote_restart = 1; continue; } if (deadlk) { /* * If DLM_LKB_NODLKWT flag is set and conversion * deadlock is detected, we request blocking AST and * down (or cancel) conversion. */ if (lkb->lkb_exflags & DLM_LKF_NODLCKWT) { if (lkb->lkb_highbast < lkb->lkb_rqmode) { queue_bast(r, lkb, lkb->lkb_rqmode); lkb->lkb_highbast = lkb->lkb_rqmode; } } else { log_print("WARN: pending deadlock %x node %d %s", lkb->lkb_id, lkb->lkb_nodeid, r->res_name); dlm_dump_rsb(r); } continue; } hi = max_t(int, lkb->lkb_rqmode, hi); if (cw && lkb->lkb_rqmode == DLM_LOCK_CW) *cw = 1; } if (grant_restart) goto restart; if (demote_restart && !quit) { quit = 1; goto restart; } return max_t(int, high, hi); } static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw, unsigned int *count) { struct dlm_lkb *lkb, *s; list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) { if (can_be_granted(r, lkb, 0, 0, NULL)) { grant_lock_pending(r, lkb); if (count) (*count)++; } else { high = max_t(int, lkb->lkb_rqmode, high); if (lkb->lkb_rqmode == DLM_LOCK_CW) *cw = 1; } } return high; } /* cw of 1 means there's a lock with a rqmode of DLM_LOCK_CW that's blocked on either the convert or waiting queue. high is the largest rqmode of all locks blocked on the convert or waiting queue. */ static int lock_requires_bast(struct dlm_lkb *gr, int high, int cw) { if (gr->lkb_grmode == DLM_LOCK_PR && cw) { if (gr->lkb_highbast < DLM_LOCK_EX) return 1; return 0; } if (gr->lkb_highbast < high && !__dlm_compat_matrix[gr->lkb_grmode+1][high+1]) return 1; return 0; } static void grant_pending_locks(struct dlm_rsb *r, unsigned int *count) { struct dlm_lkb *lkb, *s; int high = DLM_LOCK_IV; int cw = 0; if (!is_master(r)) { log_print("grant_pending_locks r nodeid %d", r->res_nodeid); dlm_dump_rsb(r); return; } high = grant_pending_convert(r, high, &cw, count); high = grant_pending_wait(r, high, &cw, count); if (high == DLM_LOCK_IV) return; /* * If there are locks left on the wait/convert queue then send blocking * ASTs to granted locks based on the largest requested mode (high) * found above. */ list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) { if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) { if (cw && high == DLM_LOCK_PR && lkb->lkb_grmode == DLM_LOCK_PR) queue_bast(r, lkb, DLM_LOCK_CW); else queue_bast(r, lkb, high); lkb->lkb_highbast = high; } } } static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq) { if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) || (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) { if (gr->lkb_highbast < DLM_LOCK_EX) return 1; return 0; } if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq)) return 1; return 0; } static void send_bast_queue(struct dlm_rsb *r, struct list_head *head, struct dlm_lkb *lkb) { struct dlm_lkb *gr; list_for_each_entry(gr, head, lkb_statequeue) { /* skip self when sending basts to convertqueue */ if (gr == lkb) continue; if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) { queue_bast(r, gr, lkb->lkb_rqmode); gr->lkb_highbast = lkb->lkb_rqmode; } } } static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb) { send_bast_queue(r, &r->res_grantqueue, lkb); } static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb) { send_bast_queue(r, &r->res_grantqueue, lkb); send_bast_queue(r, &r->res_convertqueue, lkb); } /* set_master(r, lkb) -- set the master nodeid of a resource The purpose of this function is to set the nodeid field in the given lkb using the nodeid field in the given rsb. If the rsb's nodeid is known, it can just be copied to the lkb and the function will return 0. If the rsb's nodeid is _not_ known, it needs to be looked up before it can be copied to the lkb. When the rsb nodeid is being looked up remotely, the initial lkb causing the lookup is kept on the ls_waiters list waiting for the lookup reply. Other lkb's waiting for the same rsb lookup are kept on the rsb's res_lookup list until the master is verified. Return values: 0: nodeid is set in rsb/lkb and the caller should go ahead and use it 1: the rsb master is not available and the lkb has been placed on a wait queue */ static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb) { int our_nodeid = dlm_our_nodeid(); if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) { rsb_clear_flag(r, RSB_MASTER_UNCERTAIN); r->res_first_lkid = lkb->lkb_id; lkb->lkb_nodeid = r->res_nodeid; return 0; } if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) { list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup); return 1; } if (r->res_master_nodeid == our_nodeid) { lkb->lkb_nodeid = 0; return 0; } if (r->res_master_nodeid) { lkb->lkb_nodeid = r->res_master_nodeid; return 0; } if (dlm_dir_nodeid(r) == our_nodeid) { /* This is a somewhat unusual case; find_rsb will usually have set res_master_nodeid when dir nodeid is local, but there are cases where we become the dir node after we've past find_rsb and go through _request_lock again. confirm_master() or process_lookup_list() needs to be called after this. */ log_debug(r->res_ls, "set_master %x self master %d dir %d %s", lkb->lkb_id, r->res_master_nodeid, r->res_dir_nodeid, r->res_name); r->res_master_nodeid = our_nodeid; r->res_nodeid = 0; lkb->lkb_nodeid = 0; return 0; } r->res_first_lkid = lkb->lkb_id; send_lookup(r, lkb); return 1; } static void process_lookup_list(struct dlm_rsb *r) { struct dlm_lkb *lkb, *safe; list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) { list_del_init(&lkb->lkb_rsb_lookup); _request_lock(r, lkb); schedule(); } } /* confirm_master -- confirm (or deny) an rsb's master nodeid */ static void confirm_master(struct dlm_rsb *r, int error) { struct dlm_lkb *lkb; if (!r->res_first_lkid) return; switch (error) { case 0: case -EINPROGRESS: r->res_first_lkid = 0; process_lookup_list(r); break; case -EAGAIN: case -EBADR: case -ENOTBLK: /* the remote request failed and won't be retried (it was a NOQUEUE, or has been canceled/unlocked); make a waiting lkb the first_lkid */ r->res_first_lkid = 0; if (!list_empty(&r->res_lookup)) { lkb = list_entry(r->res_lookup.next, struct dlm_lkb, lkb_rsb_lookup); list_del_init(&lkb->lkb_rsb_lookup); r->res_first_lkid = lkb->lkb_id; _request_lock(r, lkb); } break; default: log_error(r->res_ls, "confirm_master unknown error %d", error); } } static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags, int namelen, void (*ast)(void *astparam), void *astparam, void (*bast)(void *astparam, int mode), struct dlm_args *args) { int rv = -EINVAL; /* check for invalid arg usage */ if (mode < 0 || mode > DLM_LOCK_EX) goto out; if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN)) goto out; if (flags & DLM_LKF_CANCEL) goto out; if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT)) goto out; if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT)) goto out; if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE) goto out; if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT) goto out; if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT) goto out; if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE) goto out; if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL) goto out; if (!ast || !lksb) goto out; if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr) goto out; if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid) goto out; /* these args will be copied to the lkb in validate_lock_args, it cannot be done now because when converting locks, fields in an active lkb cannot be modified before locking the rsb */ args->flags = flags; args->astfn = ast; args->astparam = astparam; args->bastfn = bast; args->mode = mode; args->lksb = lksb; rv = 0; out: return rv; } static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args) { if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK | DLM_LKF_FORCEUNLOCK)) return -EINVAL; if (flags & DLM_LKF_CANCEL && flags & DLM_LKF_FORCEUNLOCK) return -EINVAL; args->flags = flags; args->astparam = astarg; return 0; } static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, struct dlm_args *args) { int rv = -EBUSY; if (args->flags & DLM_LKF_CONVERT) { if (lkb->lkb_status != DLM_LKSTS_GRANTED) goto out; /* lock not allowed if there's any op in progress */ if (lkb->lkb_wait_type || atomic_read(&lkb->lkb_wait_count)) goto out; if (is_overlap(lkb)) goto out; rv = -EINVAL; if (test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags)) goto out; if (args->flags & DLM_LKF_QUECVT && !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1]) goto out; } lkb->lkb_exflags = args->flags; dlm_set_sbflags_val(lkb, 0); lkb->lkb_astfn = args->astfn; lkb->lkb_astparam = args->astparam; lkb->lkb_bastfn = args->bastfn; lkb->lkb_rqmode = args->mode; lkb->lkb_lksb = args->lksb; lkb->lkb_lvbptr = args->lksb->sb_lvbptr; lkb->lkb_ownpid = (int) current->pid; rv = 0; out: switch (rv) { case 0: break; case -EINVAL: /* annoy the user because dlm usage is wrong */ WARN_ON(1); log_error(ls, "%s %d %x %x %x %d %d %s", __func__, rv, lkb->lkb_id, dlm_iflags_val(lkb), args->flags, lkb->lkb_status, lkb->lkb_wait_type, lkb->lkb_resource->res_name); break; default: log_debug(ls, "%s %d %x %x %x %d %d %s", __func__, rv, lkb->lkb_id, dlm_iflags_val(lkb), args->flags, lkb->lkb_status, lkb->lkb_wait_type, lkb->lkb_resource->res_name); break; } return rv; } /* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0 for success */ /* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here because there may be a lookup in progress and it's valid to do cancel/unlockf on it */ static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args) { struct dlm_ls *ls = lkb->lkb_resource->res_ls; int rv = -EBUSY; /* normal unlock not allowed if there's any op in progress */ if (!(args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) && (lkb->lkb_wait_type || atomic_read(&lkb->lkb_wait_count))) goto out; /* an lkb may be waiting for an rsb lookup to complete where the lookup was initiated by another lock */ if (!list_empty(&lkb->lkb_rsb_lookup)) { if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) { log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id); list_del_init(&lkb->lkb_rsb_lookup); queue_cast(lkb->lkb_resource, lkb, args->flags & DLM_LKF_CANCEL ? -DLM_ECANCEL : -DLM_EUNLOCK); unhold_lkb(lkb); /* undoes create_lkb() */ } /* caller changes -EBUSY to 0 for CANCEL and FORCEUNLOCK */ goto out; } rv = -EINVAL; if (test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags)) { log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id); dlm_print_lkb(lkb); goto out; } /* an lkb may still exist even though the lock is EOL'ed due to a * cancel, unlock or failed noqueue request; an app can't use these * locks; return same error as if the lkid had not been found at all */ if (test_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags)) { log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id); rv = -ENOENT; goto out; } /* cancel not allowed with another cancel/unlock in progress */ if (args->flags & DLM_LKF_CANCEL) { if (lkb->lkb_exflags & DLM_LKF_CANCEL) goto out; if (is_overlap(lkb)) goto out; if (test_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags)) { set_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); rv = -EBUSY; goto out; } /* there's nothing to cancel */ if (lkb->lkb_status == DLM_LKSTS_GRANTED && !lkb->lkb_wait_type) { rv = -EBUSY; goto out; } switch (lkb->lkb_wait_type) { case DLM_MSG_LOOKUP: case DLM_MSG_REQUEST: set_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); rv = -EBUSY; goto out; case DLM_MSG_UNLOCK: case DLM_MSG_CANCEL: goto out; } /* add_to_waiters() will set OVERLAP_CANCEL */ goto out_ok; } /* do we need to allow a force-unlock if there's a normal unlock already in progress? in what conditions could the normal unlock fail such that we'd want to send a force-unlock to be sure? */ if (args->flags & DLM_LKF_FORCEUNLOCK) { if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK) goto out; if (is_overlap_unlock(lkb)) goto out; if (test_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags)) { set_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags); rv = -EBUSY; goto out; } switch (lkb->lkb_wait_type) { case DLM_MSG_LOOKUP: case DLM_MSG_REQUEST: set_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags); rv = -EBUSY; goto out; case DLM_MSG_UNLOCK: goto out; } /* add_to_waiters() will set OVERLAP_UNLOCK */ } out_ok: /* an overlapping op shouldn't blow away exflags from other op */ lkb->lkb_exflags |= args->flags; dlm_set_sbflags_val(lkb, 0); lkb->lkb_astparam = args->astparam; rv = 0; out: switch (rv) { case 0: break; case -EINVAL: /* annoy the user because dlm usage is wrong */ WARN_ON(1); log_error(ls, "%s %d %x %x %x %x %d %s", __func__, rv, lkb->lkb_id, dlm_iflags_val(lkb), lkb->lkb_exflags, args->flags, lkb->lkb_wait_type, lkb->lkb_resource->res_name); break; default: log_debug(ls, "%s %d %x %x %x %x %d %s", __func__, rv, lkb->lkb_id, dlm_iflags_val(lkb), lkb->lkb_exflags, args->flags, lkb->lkb_wait_type, lkb->lkb_resource->res_name); break; } return rv; } /* * Four stage 4 varieties: * do_request(), do_convert(), do_unlock(), do_cancel() * These are called on the master node for the given lock and * from the central locking logic. */ static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb) { int error = 0; if (can_be_granted(r, lkb, 1, 0, NULL)) { grant_lock(r, lkb); queue_cast(r, lkb, 0); goto out; } if (can_be_queued(lkb)) { error = -EINPROGRESS; add_lkb(r, lkb, DLM_LKSTS_WAITING); goto out; } error = -EAGAIN; queue_cast(r, lkb, -EAGAIN); out: return error; } static void do_request_effects(struct dlm_rsb *r, struct dlm_lkb *lkb, int error) { switch (error) { case -EAGAIN: if (force_blocking_asts(lkb)) send_blocking_asts_all(r, lkb); break; case -EINPROGRESS: send_blocking_asts(r, lkb); break; } } static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb) { int error = 0; int deadlk = 0; /* changing an existing lock may allow others to be granted */ if (can_be_granted(r, lkb, 1, 0, &deadlk)) { grant_lock(r, lkb); queue_cast(r, lkb, 0); goto out; } /* can_be_granted() detected that this lock would block in a conversion deadlock, so we leave it on the granted queue and return EDEADLK in the ast for the convert. */ if (deadlk && !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) { /* it's left on the granted queue */ revert_lock(r, lkb); queue_cast(r, lkb, -EDEADLK); error = -EDEADLK; goto out; } /* is_demoted() means the can_be_granted() above set the grmode to NL, and left us on the granted queue. This auto-demotion (due to CONVDEADLK) might mean other locks, and/or this lock, are now grantable. We have to try to grant other converting locks before we try again to grant this one. */ if (is_demoted(lkb)) { grant_pending_convert(r, DLM_LOCK_IV, NULL, NULL); if (_can_be_granted(r, lkb, 1, 0)) { grant_lock(r, lkb); queue_cast(r, lkb, 0); goto out; } /* else fall through and move to convert queue */ } if (can_be_queued(lkb)) { error = -EINPROGRESS; del_lkb(r, lkb); add_lkb(r, lkb, DLM_LKSTS_CONVERT); goto out; } error = -EAGAIN; queue_cast(r, lkb, -EAGAIN); out: return error; } static void do_convert_effects(struct dlm_rsb *r, struct dlm_lkb *lkb, int error) { switch (error) { case 0: grant_pending_locks(r, NULL); /* grant_pending_locks also sends basts */ break; case -EAGAIN: if (force_blocking_asts(lkb)) send_blocking_asts_all(r, lkb); break; case -EINPROGRESS: send_blocking_asts(r, lkb); break; } } static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb) { remove_lock(r, lkb); queue_cast(r, lkb, -DLM_EUNLOCK); return -DLM_EUNLOCK; } static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb, int error) { grant_pending_locks(r, NULL); } /* returns: 0 did nothing, -DLM_ECANCEL canceled lock */ static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb) { int error; error = revert_lock(r, lkb); if (error) { queue_cast(r, lkb, -DLM_ECANCEL); return -DLM_ECANCEL; } return 0; } static void do_cancel_effects(struct dlm_rsb *r, struct dlm_lkb *lkb, int error) { if (error) grant_pending_locks(r, NULL); } /* * Four stage 3 varieties: * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock() */ /* add a new lkb to a possibly new rsb, called by requesting process */ static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb) { int error; /* set_master: sets lkb nodeid from r */ error = set_master(r, lkb); if (error < 0) goto out; if (error) { error = 0; goto out; } if (is_remote(r)) { /* receive_request() calls do_request() on remote node */ error = send_request(r, lkb); } else { error = do_request(r, lkb); /* for remote locks the request_reply is sent between do_request and do_request_effects */ do_request_effects(r, lkb, error); } out: return error; } /* change some property of an existing lkb, e.g. mode */ static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb) { int error; if (is_remote(r)) { /* receive_convert() calls do_convert() on remote node */ error = send_convert(r, lkb); } else { error = do_convert(r, lkb); /* for remote locks the convert_reply is sent between do_convert and do_convert_effects */ do_convert_effects(r, lkb, error); } return error; } /* remove an existing lkb from the granted queue */ static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb) { int error; if (is_remote(r)) { /* receive_unlock() calls do_unlock() on remote node */ error = send_unlock(r, lkb); } else { error = do_unlock(r, lkb); /* for remote locks the unlock_reply is sent between do_unlock and do_unlock_effects */ do_unlock_effects(r, lkb, error); } return error; } /* remove an existing lkb from the convert or wait queue */ static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb) { int error; if (is_remote(r)) { /* receive_cancel() calls do_cancel() on remote node */ error = send_cancel(r, lkb); } else { error = do_cancel(r, lkb); /* for remote locks the cancel_reply is sent between do_cancel and do_cancel_effects */ do_cancel_effects(r, lkb, error); } return error; } /* * Four stage 2 varieties: * request_lock(), convert_lock(), unlock_lock(), cancel_lock() */ static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, const void *name, int len, struct dlm_args *args) { struct dlm_rsb *r; int error; error = validate_lock_args(ls, lkb, args); if (error) return error; error = find_rsb(ls, name, len, 0, R_REQUEST, &r); if (error) return error; lock_rsb(r); attach_lkb(r, lkb); lkb->lkb_lksb->sb_lkid = lkb->lkb_id; error = _request_lock(r, lkb); unlock_rsb(r); put_rsb(r); return error; } static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, struct dlm_args *args) { struct dlm_rsb *r; int error; r = lkb->lkb_resource; hold_rsb(r); lock_rsb(r); error = validate_lock_args(ls, lkb, args); if (error) goto out; error = _convert_lock(r, lkb); out: unlock_rsb(r); put_rsb(r); return error; } static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, struct dlm_args *args) { struct dlm_rsb *r; int error; r = lkb->lkb_resource; hold_rsb(r); lock_rsb(r); error = validate_unlock_args(lkb, args); if (error) goto out; error = _unlock_lock(r, lkb); out: unlock_rsb(r); put_rsb(r); return error; } static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, struct dlm_args *args) { struct dlm_rsb *r; int error; r = lkb->lkb_resource; hold_rsb(r); lock_rsb(r); error = validate_unlock_args(lkb, args); if (error) goto out; error = _cancel_lock(r, lkb); out: unlock_rsb(r); put_rsb(r); return error; } /* * Two stage 1 varieties: dlm_lock() and dlm_unlock() */ int dlm_lock(dlm_lockspace_t *lockspace, int mode, struct dlm_lksb *lksb, uint32_t flags, const void *name, unsigned int namelen, uint32_t parent_lkid, void (*ast) (void *astarg), void *astarg, void (*bast) (void *astarg, int mode)) { struct dlm_ls *ls; struct dlm_lkb *lkb; struct dlm_args args; int error, convert = flags & DLM_LKF_CONVERT; ls = dlm_find_lockspace_local(lockspace); if (!ls) return -EINVAL; dlm_lock_recovery(ls); if (convert) error = find_lkb(ls, lksb->sb_lkid, &lkb); else error = create_lkb(ls, &lkb); if (error) goto out; trace_dlm_lock_start(ls, lkb, name, namelen, mode, flags); error = set_lock_args(mode, lksb, flags, namelen, ast, astarg, bast, &args); if (error) goto out_put; if (convert) error = convert_lock(ls, lkb, &args); else error = request_lock(ls, lkb, name, namelen, &args); if (error == -EINPROGRESS) error = 0; out_put: trace_dlm_lock_end(ls, lkb, name, namelen, mode, flags, error, true); if (convert || error) __put_lkb(ls, lkb); if (error == -EAGAIN || error == -EDEADLK) error = 0; out: dlm_unlock_recovery(ls); dlm_put_lockspace(ls); return error; } int dlm_unlock(dlm_lockspace_t *lockspace, uint32_t lkid, uint32_t flags, struct dlm_lksb *lksb, void *astarg) { struct dlm_ls *ls; struct dlm_lkb *lkb; struct dlm_args args; int error; ls = dlm_find_lockspace_local(lockspace); if (!ls) return -EINVAL; dlm_lock_recovery(ls); error = find_lkb(ls, lkid, &lkb); if (error) goto out; trace_dlm_unlock_start(ls, lkb, flags); error = set_unlock_args(flags, astarg, &args); if (error) goto out_put; if (flags & DLM_LKF_CANCEL) error = cancel_lock(ls, lkb, &args); else error = unlock_lock(ls, lkb, &args); if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL) error = 0; if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK))) error = 0; out_put: trace_dlm_unlock_end(ls, lkb, flags, error); dlm_put_lkb(lkb); out: dlm_unlock_recovery(ls); dlm_put_lockspace(ls); return error; } /* * send/receive routines for remote operations and replies * * send_args * send_common * send_request receive_request * send_convert receive_convert * send_unlock receive_unlock * send_cancel receive_cancel * send_grant receive_grant * send_bast receive_bast * send_lookup receive_lookup * send_remove receive_remove * * send_common_reply * receive_request_reply send_request_reply * receive_convert_reply send_convert_reply * receive_unlock_reply send_unlock_reply * receive_cancel_reply send_cancel_reply * receive_lookup_reply send_lookup_reply */ static int _create_message(struct dlm_ls *ls, int mb_len, int to_nodeid, int mstype, struct dlm_message **ms_ret, struct dlm_mhandle **mh_ret, gfp_t allocation) { struct dlm_message *ms; struct dlm_mhandle *mh; char *mb; /* get_buffer gives us a message handle (mh) that we need to pass into midcomms_commit and a message buffer (mb) that we write our data into */ mh = dlm_midcomms_get_mhandle(to_nodeid, mb_len, allocation, &mb); if (!mh) return -ENOBUFS; ms = (struct dlm_message *) mb; ms->m_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR); ms->m_header.u.h_lockspace = cpu_to_le32(ls->ls_global_id); ms->m_header.h_nodeid = cpu_to_le32(dlm_our_nodeid()); ms->m_header.h_length = cpu_to_le16(mb_len); ms->m_header.h_cmd = DLM_MSG; ms->m_type = cpu_to_le32(mstype); *mh_ret = mh; *ms_ret = ms; return 0; } static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb, int to_nodeid, int mstype, struct dlm_message **ms_ret, struct dlm_mhandle **mh_ret, gfp_t allocation) { int mb_len = sizeof(struct dlm_message); switch (mstype) { case DLM_MSG_REQUEST: case DLM_MSG_LOOKUP: case DLM_MSG_REMOVE: mb_len += r->res_length; break; case DLM_MSG_CONVERT: case DLM_MSG_UNLOCK: case DLM_MSG_REQUEST_REPLY: case DLM_MSG_CONVERT_REPLY: case DLM_MSG_GRANT: if (lkb && lkb->lkb_lvbptr && (lkb->lkb_exflags & DLM_LKF_VALBLK)) mb_len += r->res_ls->ls_lvblen; break; } return _create_message(r->res_ls, mb_len, to_nodeid, mstype, ms_ret, mh_ret, allocation); } /* further lowcomms enhancements or alternate implementations may make the return value from this function useful at some point */ static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms, const void *name, int namelen) { dlm_midcomms_commit_mhandle(mh, name, namelen); return 0; } static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb, struct dlm_message *ms) { ms->m_nodeid = cpu_to_le32(lkb->lkb_nodeid); ms->m_pid = cpu_to_le32(lkb->lkb_ownpid); ms->m_lkid = cpu_to_le32(lkb->lkb_id); ms->m_remid = cpu_to_le32(lkb->lkb_remid); ms->m_exflags = cpu_to_le32(lkb->lkb_exflags); ms->m_sbflags = cpu_to_le32(dlm_sbflags_val(lkb)); ms->m_flags = cpu_to_le32(dlm_dflags_val(lkb)); ms->m_lvbseq = cpu_to_le32(lkb->lkb_lvbseq); ms->m_status = cpu_to_le32(lkb->lkb_status); ms->m_grmode = cpu_to_le32(lkb->lkb_grmode); ms->m_rqmode = cpu_to_le32(lkb->lkb_rqmode); ms->m_hash = cpu_to_le32(r->res_hash); /* m_result and m_bastmode are set from function args, not from lkb fields */ if (lkb->lkb_bastfn) ms->m_asts |= cpu_to_le32(DLM_CB_BAST); if (lkb->lkb_astfn) ms->m_asts |= cpu_to_le32(DLM_CB_CAST); /* compare with switch in create_message; send_remove() doesn't use send_args() */ switch (ms->m_type) { case cpu_to_le32(DLM_MSG_REQUEST): case cpu_to_le32(DLM_MSG_LOOKUP): memcpy(ms->m_extra, r->res_name, r->res_length); break; case cpu_to_le32(DLM_MSG_CONVERT): case cpu_to_le32(DLM_MSG_UNLOCK): case cpu_to_le32(DLM_MSG_REQUEST_REPLY): case cpu_to_le32(DLM_MSG_CONVERT_REPLY): case cpu_to_le32(DLM_MSG_GRANT): if (!lkb->lkb_lvbptr || !(lkb->lkb_exflags & DLM_LKF_VALBLK)) break; memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen); break; } } static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype) { struct dlm_message *ms; struct dlm_mhandle *mh; int to_nodeid, error; to_nodeid = r->res_nodeid; error = add_to_waiters(lkb, mstype, to_nodeid); if (error) return error; error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh, GFP_NOFS); if (error) goto fail; send_args(r, lkb, ms); error = send_message(mh, ms, r->res_name, r->res_length); if (error) goto fail; return 0; fail: remove_from_waiters(lkb, msg_reply_type(mstype)); return error; } static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb) { return send_common(r, lkb, DLM_MSG_REQUEST); } static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb) { int error; error = send_common(r, lkb, DLM_MSG_CONVERT); /* down conversions go without a reply from the master */ if (!error && down_conversion(lkb)) { remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY); r->res_ls->ls_local_ms.m_type = cpu_to_le32(DLM_MSG_CONVERT_REPLY); r->res_ls->ls_local_ms.m_result = 0; __receive_convert_reply(r, lkb, &r->res_ls->ls_local_ms, true); } return error; } /* FIXME: if this lkb is the only lock we hold on the rsb, then set MASTER_UNCERTAIN to force the next request on the rsb to confirm that the master is still correct. */ static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb) { return send_common(r, lkb, DLM_MSG_UNLOCK); } static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb) { return send_common(r, lkb, DLM_MSG_CANCEL); } static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb) { struct dlm_message *ms; struct dlm_mhandle *mh; int to_nodeid, error; to_nodeid = lkb->lkb_nodeid; error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh, GFP_NOFS); if (error) goto out; send_args(r, lkb, ms); ms->m_result = 0; error = send_message(mh, ms, r->res_name, r->res_length); out: return error; } static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode) { struct dlm_message *ms; struct dlm_mhandle *mh; int to_nodeid, error; to_nodeid = lkb->lkb_nodeid; error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh, GFP_NOFS); if (error) goto out; send_args(r, lkb, ms); ms->m_bastmode = cpu_to_le32(mode); error = send_message(mh, ms, r->res_name, r->res_length); out: return error; } static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb) { struct dlm_message *ms; struct dlm_mhandle *mh; int to_nodeid, error; to_nodeid = dlm_dir_nodeid(r); error = add_to_waiters(lkb, DLM_MSG_LOOKUP, to_nodeid); if (error) return error; error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh, GFP_NOFS); if (error) goto fail; send_args(r, lkb, ms); error = send_message(mh, ms, r->res_name, r->res_length); if (error) goto fail; return 0; fail: remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY); return error; } static int send_remove(struct dlm_rsb *r) { struct dlm_message *ms; struct dlm_mhandle *mh; int to_nodeid, error; to_nodeid = dlm_dir_nodeid(r); error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh, GFP_ATOMIC); if (error) goto out; memcpy(ms->m_extra, r->res_name, r->res_length); ms->m_hash = cpu_to_le32(r->res_hash); error = send_message(mh, ms, r->res_name, r->res_length); out: return error; } static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype, int rv) { struct dlm_message *ms; struct dlm_mhandle *mh; int to_nodeid, error; to_nodeid = lkb->lkb_nodeid; error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh, GFP_NOFS); if (error) goto out; send_args(r, lkb, ms); ms->m_result = cpu_to_le32(to_dlm_errno(rv)); error = send_message(mh, ms, r->res_name, r->res_length); out: return error; } static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv) { return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv); } static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv) { return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv); } static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv) { return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv); } static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv) { return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv); } static int send_lookup_reply(struct dlm_ls *ls, const struct dlm_message *ms_in, int ret_nodeid, int rv) { struct dlm_rsb *r = &ls->ls_local_rsb; struct dlm_message *ms; struct dlm_mhandle *mh; int error, nodeid = le32_to_cpu(ms_in->m_header.h_nodeid); error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh, GFP_NOFS); if (error) goto out; ms->m_lkid = ms_in->m_lkid; ms->m_result = cpu_to_le32(to_dlm_errno(rv)); ms->m_nodeid = cpu_to_le32(ret_nodeid); error = send_message(mh, ms, ms_in->m_extra, receive_extralen(ms_in)); out: return error; } /* which args we save from a received message depends heavily on the type of message, unlike the send side where we can safely send everything about the lkb for any type of message */ static void receive_flags(struct dlm_lkb *lkb, const struct dlm_message *ms) { lkb->lkb_exflags = le32_to_cpu(ms->m_exflags); dlm_set_sbflags_val(lkb, le32_to_cpu(ms->m_sbflags)); dlm_set_dflags_val(lkb, le32_to_cpu(ms->m_flags)); } static void receive_flags_reply(struct dlm_lkb *lkb, const struct dlm_message *ms, bool local) { if (local) return; dlm_set_sbflags_val(lkb, le32_to_cpu(ms->m_sbflags)); dlm_set_dflags_val(lkb, le32_to_cpu(ms->m_flags)); } static int receive_extralen(const struct dlm_message *ms) { return (le16_to_cpu(ms->m_header.h_length) - sizeof(struct dlm_message)); } static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb, const struct dlm_message *ms) { int len; if (lkb->lkb_exflags & DLM_LKF_VALBLK) { if (!lkb->lkb_lvbptr) lkb->lkb_lvbptr = dlm_allocate_lvb(ls); if (!lkb->lkb_lvbptr) return -ENOMEM; len = receive_extralen(ms); if (len > ls->ls_lvblen) len = ls->ls_lvblen; memcpy(lkb->lkb_lvbptr, ms->m_extra, len); } return 0; } static void fake_bastfn(void *astparam, int mode) { log_print("fake_bastfn should not be called"); } static void fake_astfn(void *astparam) { log_print("fake_astfn should not be called"); } static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb, const struct dlm_message *ms) { lkb->lkb_nodeid = le32_to_cpu(ms->m_header.h_nodeid); lkb->lkb_ownpid = le32_to_cpu(ms->m_pid); lkb->lkb_remid = le32_to_cpu(ms->m_lkid); lkb->lkb_grmode = DLM_LOCK_IV; lkb->lkb_rqmode = le32_to_cpu(ms->m_rqmode); lkb->lkb_bastfn = (ms->m_asts & cpu_to_le32(DLM_CB_BAST)) ? &fake_bastfn : NULL; lkb->lkb_astfn = (ms->m_asts & cpu_to_le32(DLM_CB_CAST)) ? &fake_astfn : NULL; if (lkb->lkb_exflags & DLM_LKF_VALBLK) { /* lkb was just created so there won't be an lvb yet */ lkb->lkb_lvbptr = dlm_allocate_lvb(ls); if (!lkb->lkb_lvbptr) return -ENOMEM; } return 0; } static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb, const struct dlm_message *ms) { if (lkb->lkb_status != DLM_LKSTS_GRANTED) return -EBUSY; if (receive_lvb(ls, lkb, ms)) return -ENOMEM; lkb->lkb_rqmode = le32_to_cpu(ms->m_rqmode); lkb->lkb_lvbseq = le32_to_cpu(ms->m_lvbseq); return 0; } static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, const struct dlm_message *ms) { if (receive_lvb(ls, lkb, ms)) return -ENOMEM; return 0; } /* We fill in the local-lkb fields with the info that send_xxxx_reply() uses to send a reply and that the remote end uses to process the reply. */ static void setup_local_lkb(struct dlm_ls *ls, const struct dlm_message *ms) { struct dlm_lkb *lkb = &ls->ls_local_lkb; lkb->lkb_nodeid = le32_to_cpu(ms->m_header.h_nodeid); lkb->lkb_remid = le32_to_cpu(ms->m_lkid); } /* This is called after the rsb is locked so that we can safely inspect fields in the lkb. */ static int validate_message(struct dlm_lkb *lkb, const struct dlm_message *ms) { int from = le32_to_cpu(ms->m_header.h_nodeid); int error = 0; /* currently mixing of user/kernel locks are not supported */ if (ms->m_flags & cpu_to_le32(BIT(DLM_DFL_USER_BIT)) && !test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) { log_error(lkb->lkb_resource->res_ls, "got user dlm message for a kernel lock"); error = -EINVAL; goto out; } switch (ms->m_type) { case cpu_to_le32(DLM_MSG_CONVERT): case cpu_to_le32(DLM_MSG_UNLOCK): case cpu_to_le32(DLM_MSG_CANCEL): if (!is_master_copy(lkb) || lkb->lkb_nodeid != from) error = -EINVAL; break; case cpu_to_le32(DLM_MSG_CONVERT_REPLY): case cpu_to_le32(DLM_MSG_UNLOCK_REPLY): case cpu_to_le32(DLM_MSG_CANCEL_REPLY): case cpu_to_le32(DLM_MSG_GRANT): case cpu_to_le32(DLM_MSG_BAST): if (!is_process_copy(lkb) || lkb->lkb_nodeid != from) error = -EINVAL; break; case cpu_to_le32(DLM_MSG_REQUEST_REPLY): if (!is_process_copy(lkb)) error = -EINVAL; else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from) error = -EINVAL; break; default: error = -EINVAL; } out: if (error) log_error(lkb->lkb_resource->res_ls, "ignore invalid message %d from %d %x %x %x %d", le32_to_cpu(ms->m_type), from, lkb->lkb_id, lkb->lkb_remid, dlm_iflags_val(lkb), lkb->lkb_nodeid); return error; } static int receive_request(struct dlm_ls *ls, const struct dlm_message *ms) { struct dlm_lkb *lkb; struct dlm_rsb *r; int from_nodeid; int error, namelen = 0; from_nodeid = le32_to_cpu(ms->m_header.h_nodeid); error = create_lkb(ls, &lkb); if (error) goto fail; receive_flags(lkb, ms); set_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags); error = receive_request_args(ls, lkb, ms); if (error) { __put_lkb(ls, lkb); goto fail; } /* The dir node is the authority on whether we are the master for this rsb or not, so if the master sends us a request, we should recreate the rsb if we've destroyed it. This race happens when we send a remove message to the dir node at the same time that the dir node sends us a request for the rsb. */ namelen = receive_extralen(ms); error = find_rsb(ls, ms->m_extra, namelen, from_nodeid, R_RECEIVE_REQUEST, &r); if (error) { __put_lkb(ls, lkb); goto fail; } lock_rsb(r); if (r->res_master_nodeid != dlm_our_nodeid()) { error = validate_master_nodeid(ls, r, from_nodeid); if (error) { unlock_rsb(r); put_rsb(r); __put_lkb(ls, lkb); goto fail; } } attach_lkb(r, lkb); error = do_request(r, lkb); send_request_reply(r, lkb, error); do_request_effects(r, lkb, error); unlock_rsb(r); put_rsb(r); if (error == -EINPROGRESS) error = 0; if (error) dlm_put_lkb(lkb); return 0; fail: /* TODO: instead of returning ENOTBLK, add the lkb to res_lookup and do this receive_request again from process_lookup_list once we get the lookup reply. This would avoid a many repeated ENOTBLK request failures when the lookup reply designating us as master is delayed. */ if (error != -ENOTBLK) { log_limit(ls, "receive_request %x from %d %d", le32_to_cpu(ms->m_lkid), from_nodeid, error); } setup_local_lkb(ls, ms); send_request_reply(&ls->ls_local_rsb, &ls->ls_local_lkb, error); return error; } static int receive_convert(struct dlm_ls *ls, const struct dlm_message *ms) { struct dlm_lkb *lkb; struct dlm_rsb *r; int error, reply = 1; error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); if (error) goto fail; if (lkb->lkb_remid != le32_to_cpu(ms->m_lkid)) { log_error(ls, "receive_convert %x remid %x recover_seq %llu " "remote %d %x", lkb->lkb_id, lkb->lkb_remid, (unsigned long long)lkb->lkb_recover_seq, le32_to_cpu(ms->m_header.h_nodeid), le32_to_cpu(ms->m_lkid)); error = -ENOENT; dlm_put_lkb(lkb); goto fail; } r = lkb->lkb_resource; hold_rsb(r); lock_rsb(r); error = validate_message(lkb, ms); if (error) goto out; receive_flags(lkb, ms); error = receive_convert_args(ls, lkb, ms); if (error) { send_convert_reply(r, lkb, error); goto out; } reply = !down_conversion(lkb); error = do_convert(r, lkb); if (reply) send_convert_reply(r, lkb, error); do_convert_effects(r, lkb, error); out: unlock_rsb(r); put_rsb(r); dlm_put_lkb(lkb); return 0; fail: setup_local_lkb(ls, ms); send_convert_reply(&ls->ls_local_rsb, &ls->ls_local_lkb, error); return error; } static int receive_unlock(struct dlm_ls *ls, const struct dlm_message *ms) { struct dlm_lkb *lkb; struct dlm_rsb *r; int error; error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); if (error) goto fail; if (lkb->lkb_remid != le32_to_cpu(ms->m_lkid)) { log_error(ls, "receive_unlock %x remid %x remote %d %x", lkb->lkb_id, lkb->lkb_remid, le32_to_cpu(ms->m_header.h_nodeid), le32_to_cpu(ms->m_lkid)); error = -ENOENT; dlm_put_lkb(lkb); goto fail; } r = lkb->lkb_resource; hold_rsb(r); lock_rsb(r); error = validate_message(lkb, ms); if (error) goto out; receive_flags(lkb, ms); error = receive_unlock_args(ls, lkb, ms); if (error) { send_unlock_reply(r, lkb, error); goto out; } error = do_unlock(r, lkb); send_unlock_reply(r, lkb, error); do_unlock_effects(r, lkb, error); out: unlock_rsb(r); put_rsb(r); dlm_put_lkb(lkb); return 0; fail: setup_local_lkb(ls, ms); send_unlock_reply(&ls->ls_local_rsb, &ls->ls_local_lkb, error); return error; } static int receive_cancel(struct dlm_ls *ls, const struct dlm_message *ms) { struct dlm_lkb *lkb; struct dlm_rsb *r; int error; error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); if (error) goto fail; receive_flags(lkb, ms); r = lkb->lkb_resource; hold_rsb(r); lock_rsb(r); error = validate_message(lkb, ms); if (error) goto out; error = do_cancel(r, lkb); send_cancel_reply(r, lkb, error); do_cancel_effects(r, lkb, error); out: unlock_rsb(r); put_rsb(r); dlm_put_lkb(lkb); return 0; fail: setup_local_lkb(ls, ms); send_cancel_reply(&ls->ls_local_rsb, &ls->ls_local_lkb, error); return error; } static int receive_grant(struct dlm_ls *ls, const struct dlm_message *ms) { struct dlm_lkb *lkb; struct dlm_rsb *r; int error; error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); if (error) return error; r = lkb->lkb_resource; hold_rsb(r); lock_rsb(r); error = validate_message(lkb, ms); if (error) goto out; receive_flags_reply(lkb, ms, false); if (is_altmode(lkb)) munge_altmode(lkb, ms); grant_lock_pc(r, lkb, ms); queue_cast(r, lkb, 0); out: unlock_rsb(r); put_rsb(r); dlm_put_lkb(lkb); return 0; } static int receive_bast(struct dlm_ls *ls, const struct dlm_message *ms) { struct dlm_lkb *lkb; struct dlm_rsb *r; int error; error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); if (error) return error; r = lkb->lkb_resource; hold_rsb(r); lock_rsb(r); error = validate_message(lkb, ms); if (error) goto out; queue_bast(r, lkb, le32_to_cpu(ms->m_bastmode)); lkb->lkb_highbast = le32_to_cpu(ms->m_bastmode); out: unlock_rsb(r); put_rsb(r); dlm_put_lkb(lkb); return 0; } static void receive_lookup(struct dlm_ls *ls, const struct dlm_message *ms) { int len, error, ret_nodeid, from_nodeid, our_nodeid; from_nodeid = le32_to_cpu(ms->m_header.h_nodeid); our_nodeid = dlm_our_nodeid(); len = receive_extralen(ms); error = dlm_master_lookup(ls, from_nodeid, ms->m_extra, len, 0, &ret_nodeid, NULL); /* Optimization: we're master so treat lookup as a request */ if (!error && ret_nodeid == our_nodeid) { receive_request(ls, ms); return; } send_lookup_reply(ls, ms, ret_nodeid, error); } static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms) { char name[DLM_RESNAME_MAXLEN+1]; struct dlm_rsb *r; uint32_t hash, b; int rv, len, dir_nodeid, from_nodeid; from_nodeid = le32_to_cpu(ms->m_header.h_nodeid); len = receive_extralen(ms); if (len > DLM_RESNAME_MAXLEN) { log_error(ls, "receive_remove from %d bad len %d", from_nodeid, len); return; } dir_nodeid = dlm_hash2nodeid(ls, le32_to_cpu(ms->m_hash)); if (dir_nodeid != dlm_our_nodeid()) { log_error(ls, "receive_remove from %d bad nodeid %d", from_nodeid, dir_nodeid); return; } /* Look for name on rsbtbl.toss, if it's there, kill it. If it's on rsbtbl.keep, it's being used, and we should ignore this message. This is an expected race between the dir node sending a request to the master node at the same time as the master node sends a remove to the dir node. The resolution to that race is for the dir node to ignore the remove message, and the master node to recreate the master rsb when it gets a request from the dir node for an rsb it doesn't have. */ memset(name, 0, sizeof(name)); memcpy(name, ms->m_extra, len); hash = jhash(name, len, 0); b = hash & (ls->ls_rsbtbl_size - 1); spin_lock(&ls->ls_rsbtbl[b].lock); rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); if (rv) { /* verify the rsb is on keep list per comment above */ rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); if (rv) { /* should not happen */ log_error(ls, "receive_remove from %d not found %s", from_nodeid, name); spin_unlock(&ls->ls_rsbtbl[b].lock); return; } if (r->res_master_nodeid != from_nodeid) { /* should not happen */ log_error(ls, "receive_remove keep from %d master %d", from_nodeid, r->res_master_nodeid); dlm_print_rsb(r); spin_unlock(&ls->ls_rsbtbl[b].lock); return; } log_debug(ls, "receive_remove from %d master %d first %x %s", from_nodeid, r->res_master_nodeid, r->res_first_lkid, name); spin_unlock(&ls->ls_rsbtbl[b].lock); return; } if (r->res_master_nodeid != from_nodeid) { log_error(ls, "receive_remove toss from %d master %d", from_nodeid, r->res_master_nodeid); dlm_print_rsb(r); spin_unlock(&ls->ls_rsbtbl[b].lock); return; } if (kref_put(&r->res_ref, kill_rsb)) { rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); spin_unlock(&ls->ls_rsbtbl[b].lock); dlm_free_rsb(r); } else { log_error(ls, "receive_remove from %d rsb ref error", from_nodeid); dlm_print_rsb(r); spin_unlock(&ls->ls_rsbtbl[b].lock); } } static void receive_purge(struct dlm_ls *ls, const struct dlm_message *ms) { do_purge(ls, le32_to_cpu(ms->m_nodeid), le32_to_cpu(ms->m_pid)); } static int receive_request_reply(struct dlm_ls *ls, const struct dlm_message *ms) { struct dlm_lkb *lkb; struct dlm_rsb *r; int error, mstype, result; int from_nodeid = le32_to_cpu(ms->m_header.h_nodeid); error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); if (error) return error; r = lkb->lkb_resource; hold_rsb(r); lock_rsb(r); error = validate_message(lkb, ms); if (error) goto out; mstype = lkb->lkb_wait_type; error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY); if (error) { log_error(ls, "receive_request_reply %x remote %d %x result %d", lkb->lkb_id, from_nodeid, le32_to_cpu(ms->m_lkid), from_dlm_errno(le32_to_cpu(ms->m_result))); dlm_dump_rsb(r); goto out; } /* Optimization: the dir node was also the master, so it took our lookup as a request and sent request reply instead of lookup reply */ if (mstype == DLM_MSG_LOOKUP) { r->res_master_nodeid = from_nodeid; r->res_nodeid = from_nodeid; lkb->lkb_nodeid = from_nodeid; } /* this is the value returned from do_request() on the master */ result = from_dlm_errno(le32_to_cpu(ms->m_result)); switch (result) { case -EAGAIN: /* request would block (be queued) on remote master */ queue_cast(r, lkb, -EAGAIN); confirm_master(r, -EAGAIN); unhold_lkb(lkb); /* undoes create_lkb() */ break; case -EINPROGRESS: case 0: /* request was queued or granted on remote master */ receive_flags_reply(lkb, ms, false); lkb->lkb_remid = le32_to_cpu(ms->m_lkid); if (is_altmode(lkb)) munge_altmode(lkb, ms); if (result) { add_lkb(r, lkb, DLM_LKSTS_WAITING); } else { grant_lock_pc(r, lkb, ms); queue_cast(r, lkb, 0); } confirm_master(r, result); break; case -EBADR: case -ENOTBLK: /* find_rsb failed to find rsb or rsb wasn't master */ log_limit(ls, "receive_request_reply %x from %d %d " "master %d dir %d first %x %s", lkb->lkb_id, from_nodeid, result, r->res_master_nodeid, r->res_dir_nodeid, r->res_first_lkid, r->res_name); if (r->res_dir_nodeid != dlm_our_nodeid() && r->res_master_nodeid != dlm_our_nodeid()) { /* cause _request_lock->set_master->send_lookup */ r->res_master_nodeid = 0; r->res_nodeid = -1; lkb->lkb_nodeid = -1; } if (is_overlap(lkb)) { /* we'll ignore error in cancel/unlock reply */ queue_cast_overlap(r, lkb); confirm_master(r, result); unhold_lkb(lkb); /* undoes create_lkb() */ } else { _request_lock(r, lkb); if (r->res_master_nodeid == dlm_our_nodeid()) confirm_master(r, 0); } break; default: log_error(ls, "receive_request_reply %x error %d", lkb->lkb_id, result); } if ((result == 0 || result == -EINPROGRESS) && test_and_clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags)) { log_debug(ls, "receive_request_reply %x result %d unlock", lkb->lkb_id, result); clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); send_unlock(r, lkb); } else if ((result == -EINPROGRESS) && test_and_clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags)) { log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id); clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags); send_cancel(r, lkb); } else { clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags); } out: unlock_rsb(r); put_rsb(r); dlm_put_lkb(lkb); return 0; } static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, const struct dlm_message *ms, bool local) { /* this is the value returned from do_convert() on the master */ switch (from_dlm_errno(le32_to_cpu(ms->m_result))) { case -EAGAIN: /* convert would block (be queued) on remote master */ queue_cast(r, lkb, -EAGAIN); break; case -EDEADLK: receive_flags_reply(lkb, ms, local); revert_lock_pc(r, lkb); queue_cast(r, lkb, -EDEADLK); break; case -EINPROGRESS: /* convert was queued on remote master */ receive_flags_reply(lkb, ms, local); if (is_demoted(lkb)) munge_demoted(lkb); del_lkb(r, lkb); add_lkb(r, lkb, DLM_LKSTS_CONVERT); break; case 0: /* convert was granted on remote master */ receive_flags_reply(lkb, ms, local); if (is_demoted(lkb)) munge_demoted(lkb); grant_lock_pc(r, lkb, ms); queue_cast(r, lkb, 0); break; default: log_error(r->res_ls, "receive_convert_reply %x remote %d %x %d", lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid), le32_to_cpu(ms->m_lkid), from_dlm_errno(le32_to_cpu(ms->m_result))); dlm_print_rsb(r); dlm_print_lkb(lkb); } } static void _receive_convert_reply(struct dlm_lkb *lkb, const struct dlm_message *ms, bool local) { struct dlm_rsb *r = lkb->lkb_resource; int error; hold_rsb(r); lock_rsb(r); error = validate_message(lkb, ms); if (error) goto out; /* local reply can happen with waiters_mutex held */ error = remove_from_waiters_ms(lkb, ms, local); if (error) goto out; __receive_convert_reply(r, lkb, ms, local); out: unlock_rsb(r); put_rsb(r); } static int receive_convert_reply(struct dlm_ls *ls, const struct dlm_message *ms) { struct dlm_lkb *lkb; int error; error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); if (error) return error; _receive_convert_reply(lkb, ms, false); dlm_put_lkb(lkb); return 0; } static void _receive_unlock_reply(struct dlm_lkb *lkb, const struct dlm_message *ms, bool local) { struct dlm_rsb *r = lkb->lkb_resource; int error; hold_rsb(r); lock_rsb(r); error = validate_message(lkb, ms); if (error) goto out; /* local reply can happen with waiters_mutex held */ error = remove_from_waiters_ms(lkb, ms, local); if (error) goto out; /* this is the value returned from do_unlock() on the master */ switch (from_dlm_errno(le32_to_cpu(ms->m_result))) { case -DLM_EUNLOCK: receive_flags_reply(lkb, ms, local); remove_lock_pc(r, lkb); queue_cast(r, lkb, -DLM_EUNLOCK); break; case -ENOENT: break; default: log_error(r->res_ls, "receive_unlock_reply %x error %d", lkb->lkb_id, from_dlm_errno(le32_to_cpu(ms->m_result))); } out: unlock_rsb(r); put_rsb(r); } static int receive_unlock_reply(struct dlm_ls *ls, const struct dlm_message *ms) { struct dlm_lkb *lkb; int error; error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); if (error) return error; _receive_unlock_reply(lkb, ms, false); dlm_put_lkb(lkb); return 0; } static void _receive_cancel_reply(struct dlm_lkb *lkb, const struct dlm_message *ms, bool local) { struct dlm_rsb *r = lkb->lkb_resource; int error; hold_rsb(r); lock_rsb(r); error = validate_message(lkb, ms); if (error) goto out; /* local reply can happen with waiters_mutex held */ error = remove_from_waiters_ms(lkb, ms, local); if (error) goto out; /* this is the value returned from do_cancel() on the master */ switch (from_dlm_errno(le32_to_cpu(ms->m_result))) { case -DLM_ECANCEL: receive_flags_reply(lkb, ms, local); revert_lock_pc(r, lkb); queue_cast(r, lkb, -DLM_ECANCEL); break; case 0: break; default: log_error(r->res_ls, "receive_cancel_reply %x error %d", lkb->lkb_id, from_dlm_errno(le32_to_cpu(ms->m_result))); } out: unlock_rsb(r); put_rsb(r); } static int receive_cancel_reply(struct dlm_ls *ls, const struct dlm_message *ms) { struct dlm_lkb *lkb; int error; error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); if (error) return error; _receive_cancel_reply(lkb, ms, false); dlm_put_lkb(lkb); return 0; } static void receive_lookup_reply(struct dlm_ls *ls, const struct dlm_message *ms) { struct dlm_lkb *lkb; struct dlm_rsb *r; int error, ret_nodeid; int do_lookup_list = 0; error = find_lkb(ls, le32_to_cpu(ms->m_lkid), &lkb); if (error) { log_error(ls, "%s no lkid %x", __func__, le32_to_cpu(ms->m_lkid)); return; } /* ms->m_result is the value returned by dlm_master_lookup on dir node FIXME: will a non-zero error ever be returned? */ r = lkb->lkb_resource; hold_rsb(r); lock_rsb(r); error = remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY); if (error) goto out; ret_nodeid = le32_to_cpu(ms->m_nodeid); /* We sometimes receive a request from the dir node for this rsb before we've received the dir node's loookup_reply for it. The request from the dir node implies we're the master, so we set ourself as master in receive_request_reply, and verify here that we are indeed the master. */ if (r->res_master_nodeid && (r->res_master_nodeid != ret_nodeid)) { /* This should never happen */ log_error(ls, "receive_lookup_reply %x from %d ret %d " "master %d dir %d our %d first %x %s", lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid), ret_nodeid, r->res_master_nodeid, r->res_dir_nodeid, dlm_our_nodeid(), r->res_first_lkid, r->res_name); } if (ret_nodeid == dlm_our_nodeid()) { r->res_master_nodeid = ret_nodeid; r->res_nodeid = 0; do_lookup_list = 1; r->res_first_lkid = 0; } else if (ret_nodeid == -1) { /* the remote node doesn't believe it's the dir node */ log_error(ls, "receive_lookup_reply %x from %d bad ret_nodeid", lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid)); r->res_master_nodeid = 0; r->res_nodeid = -1; lkb->lkb_nodeid = -1; } else { /* set_master() will set lkb_nodeid from r */ r->res_master_nodeid = ret_nodeid; r->res_nodeid = ret_nodeid; } if (is_overlap(lkb)) { log_debug(ls, "receive_lookup_reply %x unlock %x", lkb->lkb_id, dlm_iflags_val(lkb)); queue_cast_overlap(r, lkb); unhold_lkb(lkb); /* undoes create_lkb() */ goto out_list; } _request_lock(r, lkb); out_list: if (do_lookup_list) process_lookup_list(r); out: unlock_rsb(r); put_rsb(r); dlm_put_lkb(lkb); } static void _receive_message(struct dlm_ls *ls, const struct dlm_message *ms, uint32_t saved_seq) { int error = 0, noent = 0; if (WARN_ON_ONCE(!dlm_is_member(ls, le32_to_cpu(ms->m_header.h_nodeid)))) { log_limit(ls, "receive %d from non-member %d %x %x %d", le32_to_cpu(ms->m_type), le32_to_cpu(ms->m_header.h_nodeid), le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid), from_dlm_errno(le32_to_cpu(ms->m_result))); return; } switch (ms->m_type) { /* messages sent to a master node */ case cpu_to_le32(DLM_MSG_REQUEST): error = receive_request(ls, ms); break; case cpu_to_le32(DLM_MSG_CONVERT): error = receive_convert(ls, ms); break; case cpu_to_le32(DLM_MSG_UNLOCK): error = receive_unlock(ls, ms); break; case cpu_to_le32(DLM_MSG_CANCEL): noent = 1; error = receive_cancel(ls, ms); break; /* messages sent from a master node (replies to above) */ case cpu_to_le32(DLM_MSG_REQUEST_REPLY): error = receive_request_reply(ls, ms); break; case cpu_to_le32(DLM_MSG_CONVERT_REPLY): error = receive_convert_reply(ls, ms); break; case cpu_to_le32(DLM_MSG_UNLOCK_REPLY): error = receive_unlock_reply(ls, ms); break; case cpu_to_le32(DLM_MSG_CANCEL_REPLY): error = receive_cancel_reply(ls, ms); break; /* messages sent from a master node (only two types of async msg) */ case cpu_to_le32(DLM_MSG_GRANT): noent = 1; error = receive_grant(ls, ms); break; case cpu_to_le32(DLM_MSG_BAST): noent = 1; error = receive_bast(ls, ms); break; /* messages sent to a dir node */ case cpu_to_le32(DLM_MSG_LOOKUP): receive_lookup(ls, ms); break; case cpu_to_le32(DLM_MSG_REMOVE): receive_remove(ls, ms); break; /* messages sent from a dir node (remove has no reply) */ case cpu_to_le32(DLM_MSG_LOOKUP_REPLY): receive_lookup_reply(ls, ms); break; /* other messages */ case cpu_to_le32(DLM_MSG_PURGE): receive_purge(ls, ms); break; default: log_error(ls, "unknown message type %d", le32_to_cpu(ms->m_type)); } /* * When checking for ENOENT, we're checking the result of * find_lkb(m_remid): * * The lock id referenced in the message wasn't found. This may * happen in normal usage for the async messages and cancel, so * only use log_debug for them. * * Some errors are expected and normal. */ if (error == -ENOENT && noent) { log_debug(ls, "receive %d no %x remote %d %x saved_seq %u", le32_to_cpu(ms->m_type), le32_to_cpu(ms->m_remid), le32_to_cpu(ms->m_header.h_nodeid), le32_to_cpu(ms->m_lkid), saved_seq); } else if (error == -ENOENT) { log_error(ls, "receive %d no %x remote %d %x saved_seq %u", le32_to_cpu(ms->m_type), le32_to_cpu(ms->m_remid), le32_to_cpu(ms->m_header.h_nodeid), le32_to_cpu(ms->m_lkid), saved_seq); if (ms->m_type == cpu_to_le32(DLM_MSG_CONVERT)) dlm_dump_rsb_hash(ls, le32_to_cpu(ms->m_hash)); } if (error == -EINVAL) { log_error(ls, "receive %d inval from %d lkid %x remid %x " "saved_seq %u", le32_to_cpu(ms->m_type), le32_to_cpu(ms->m_header.h_nodeid), le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid), saved_seq); } } /* If the lockspace is in recovery mode (locking stopped), then normal messages are saved on the requestqueue for processing after recovery is done. When not in recovery mode, we wait for dlm_recoverd to drain saved messages off the requestqueue before we process new ones. This occurs right after recovery completes when we transition from saving all messages on requestqueue, to processing all the saved messages, to processing new messages as they arrive. */ static void dlm_receive_message(struct dlm_ls *ls, const struct dlm_message *ms, int nodeid) { if (dlm_locking_stopped(ls)) { /* If we were a member of this lockspace, left, and rejoined, other nodes may still be sending us messages from the lockspace generation before we left. */ if (WARN_ON_ONCE(!ls->ls_generation)) { log_limit(ls, "receive %d from %d ignore old gen", le32_to_cpu(ms->m_type), nodeid); return; } dlm_add_requestqueue(ls, nodeid, ms); } else { dlm_wait_requestqueue(ls); _receive_message(ls, ms, 0); } } /* This is called by dlm_recoverd to process messages that were saved on the requestqueue. */ void dlm_receive_message_saved(struct dlm_ls *ls, const struct dlm_message *ms, uint32_t saved_seq) { _receive_message(ls, ms, saved_seq); } /* This is called by the midcomms layer when something is received for the lockspace. It could be either a MSG (normal message sent as part of standard locking activity) or an RCOM (recovery message sent as part of lockspace recovery). */ void dlm_receive_buffer(const union dlm_packet *p, int nodeid) { const struct dlm_header *hd = &p->header; struct dlm_ls *ls; int type = 0; switch (hd->h_cmd) { case DLM_MSG: type = le32_to_cpu(p->message.m_type); break; case DLM_RCOM: type = le32_to_cpu(p->rcom.rc_type); break; default: log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid); return; } if (le32_to_cpu(hd->h_nodeid) != nodeid) { log_print("invalid h_nodeid %d from %d lockspace %x", le32_to_cpu(hd->h_nodeid), nodeid, le32_to_cpu(hd->u.h_lockspace)); return; } ls = dlm_find_lockspace_global(le32_to_cpu(hd->u.h_lockspace)); if (!ls) { if (dlm_config.ci_log_debug) { printk_ratelimited(KERN_DEBUG "dlm: invalid lockspace " "%u from %d cmd %d type %d\n", le32_to_cpu(hd->u.h_lockspace), nodeid, hd->h_cmd, type); } if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS) dlm_send_ls_not_ready(nodeid, &p->rcom); return; } /* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to be inactive (in this ls) before transitioning to recovery mode */ down_read(&ls->ls_recv_active); if (hd->h_cmd == DLM_MSG) dlm_receive_message(ls, &p->message, nodeid); else if (hd->h_cmd == DLM_RCOM) dlm_receive_rcom(ls, &p->rcom, nodeid); else log_error(ls, "invalid h_cmd %d from %d lockspace %x", hd->h_cmd, nodeid, le32_to_cpu(hd->u.h_lockspace)); up_read(&ls->ls_recv_active); dlm_put_lockspace(ls); } static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb, struct dlm_message *ms_local) { if (middle_conversion(lkb)) { hold_lkb(lkb); memset(ms_local, 0, sizeof(struct dlm_message)); ms_local->m_type = cpu_to_le32(DLM_MSG_CONVERT_REPLY); ms_local->m_result = cpu_to_le32(to_dlm_errno(-EINPROGRESS)); ms_local->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid); _receive_convert_reply(lkb, ms_local, true); /* Same special case as in receive_rcom_lock_args() */ lkb->lkb_grmode = DLM_LOCK_IV; rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT); unhold_lkb(lkb); } else if (lkb->lkb_rqmode >= lkb->lkb_grmode) { set_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags); } /* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down conversions are async; there's no reply from the remote master */ } /* A waiting lkb needs recovery if the master node has failed, or the master node is changing (only when no directory is used) */ static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb, int dir_nodeid) { if (dlm_no_directory(ls)) return 1; if (dlm_is_removed(ls, lkb->lkb_wait_nodeid)) return 1; return 0; } /* Recovery for locks that are waiting for replies from nodes that are now gone. We can just complete unlocks and cancels by faking a reply from the dead node. Requests and up-conversions we flag to be resent after recovery. Down-conversions can just be completed with a fake reply like unlocks. Conversions between PR and CW need special attention. */ void dlm_recover_waiters_pre(struct dlm_ls *ls) { struct dlm_lkb *lkb, *safe; struct dlm_message *ms_local; int wait_type, local_unlock_result, local_cancel_result; int dir_nodeid; ms_local = kmalloc(sizeof(*ms_local), GFP_KERNEL); if (!ms_local) return; mutex_lock(&ls->ls_waiters_mutex); list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) { dir_nodeid = dlm_dir_nodeid(lkb->lkb_resource); /* exclude debug messages about unlocks because there can be so many and they aren't very interesting */ if (lkb->lkb_wait_type != DLM_MSG_UNLOCK) { log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d " "lkb_nodeid %d wait_nodeid %d dir_nodeid %d", lkb->lkb_id, lkb->lkb_remid, lkb->lkb_wait_type, lkb->lkb_resource->res_nodeid, lkb->lkb_nodeid, lkb->lkb_wait_nodeid, dir_nodeid); } /* all outstanding lookups, regardless of destination will be resent after recovery is done */ if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) { set_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags); continue; } if (!waiter_needs_recovery(ls, lkb, dir_nodeid)) continue; wait_type = lkb->lkb_wait_type; local_unlock_result = -DLM_EUNLOCK; local_cancel_result = -DLM_ECANCEL; /* Main reply may have been received leaving a zero wait_type, but a reply for the overlapping op may not have been received. In that case we need to fake the appropriate reply for the overlap op. */ if (!wait_type) { if (is_overlap_cancel(lkb)) { wait_type = DLM_MSG_CANCEL; if (lkb->lkb_grmode == DLM_LOCK_IV) local_cancel_result = 0; } if (is_overlap_unlock(lkb)) { wait_type = DLM_MSG_UNLOCK; if (lkb->lkb_grmode == DLM_LOCK_IV) local_unlock_result = -ENOENT; } log_debug(ls, "rwpre overlap %x %x %d %d %d", lkb->lkb_id, dlm_iflags_val(lkb), wait_type, local_cancel_result, local_unlock_result); } switch (wait_type) { case DLM_MSG_REQUEST: set_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags); break; case DLM_MSG_CONVERT: recover_convert_waiter(ls, lkb, ms_local); break; case DLM_MSG_UNLOCK: hold_lkb(lkb); memset(ms_local, 0, sizeof(struct dlm_message)); ms_local->m_type = cpu_to_le32(DLM_MSG_UNLOCK_REPLY); ms_local->m_result = cpu_to_le32(to_dlm_errno(local_unlock_result)); ms_local->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid); _receive_unlock_reply(lkb, ms_local, true); dlm_put_lkb(lkb); break; case DLM_MSG_CANCEL: hold_lkb(lkb); memset(ms_local, 0, sizeof(struct dlm_message)); ms_local->m_type = cpu_to_le32(DLM_MSG_CANCEL_REPLY); ms_local->m_result = cpu_to_le32(to_dlm_errno(local_cancel_result)); ms_local->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid); _receive_cancel_reply(lkb, ms_local, true); dlm_put_lkb(lkb); break; default: log_error(ls, "invalid lkb wait_type %d %d", lkb->lkb_wait_type, wait_type); } schedule(); } mutex_unlock(&ls->ls_waiters_mutex); kfree(ms_local); } static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls) { struct dlm_lkb *lkb = NULL, *iter; mutex_lock(&ls->ls_waiters_mutex); list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) { if (test_bit(DLM_IFL_RESEND_BIT, &iter->lkb_iflags)) { hold_lkb(iter); lkb = iter; break; } } mutex_unlock(&ls->ls_waiters_mutex); return lkb; } /* Deal with lookups and lkb's marked RESEND from _pre. We may now be the master or dir-node for r. Processing the lkb may result in it being placed back on waiters. */ /* We do this after normal locking has been enabled and any saved messages (in requestqueue) have been processed. We should be confident that at this point we won't get or process a reply to any of these waiting operations. But, new ops may be coming in on the rsbs/locks here from userspace or remotely. */ /* there may have been an overlap unlock/cancel prior to recovery or after recovery. if before, the lkb may still have a pos wait_count; if after, the overlap flag would just have been set and nothing new sent. we can be confident here than any replies to either the initial op or overlap ops prior to recovery have been received. */ int dlm_recover_waiters_post(struct dlm_ls *ls) { struct dlm_lkb *lkb; struct dlm_rsb *r; int error = 0, mstype, err, oc, ou; while (1) { if (dlm_locking_stopped(ls)) { log_debug(ls, "recover_waiters_post aborted"); error = -EINTR; break; } lkb = find_resend_waiter(ls); if (!lkb) break; r = lkb->lkb_resource; hold_rsb(r); lock_rsb(r); mstype = lkb->lkb_wait_type; oc = test_and_clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); ou = test_and_clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags); err = 0; log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d " "lkb_nodeid %d wait_nodeid %d dir_nodeid %d " "overlap %d %d", lkb->lkb_id, lkb->lkb_remid, mstype, r->res_nodeid, lkb->lkb_nodeid, lkb->lkb_wait_nodeid, dlm_dir_nodeid(r), oc, ou); /* At this point we assume that we won't get a reply to any previous op or overlap op on this lock. First, do a big remove_from_waiters() for all previous ops. */ clear_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags); lkb->lkb_wait_type = 0; /* drop all wait_count references we still * hold a reference for this iteration. */ while (!atomic_dec_and_test(&lkb->lkb_wait_count)) unhold_lkb(lkb); mutex_lock(&ls->ls_waiters_mutex); list_del_init(&lkb->lkb_wait_reply); mutex_unlock(&ls->ls_waiters_mutex); if (oc || ou) { /* do an unlock or cancel instead of resending */ switch (mstype) { case DLM_MSG_LOOKUP: case DLM_MSG_REQUEST: queue_cast(r, lkb, ou ? -DLM_EUNLOCK : -DLM_ECANCEL); unhold_lkb(lkb); /* undoes create_lkb() */ break; case DLM_MSG_CONVERT: if (oc) { queue_cast(r, lkb, -DLM_ECANCEL); } else { lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK; _unlock_lock(r, lkb); } break; default: err = 1; } } else { switch (mstype) { case DLM_MSG_LOOKUP: case DLM_MSG_REQUEST: _request_lock(r, lkb); if (is_master(r)) confirm_master(r, 0); break; case DLM_MSG_CONVERT: _convert_lock(r, lkb); break; default: err = 1; } } if (err) { log_error(ls, "waiter %x msg %d r_nodeid %d " "dir_nodeid %d overlap %d %d", lkb->lkb_id, mstype, r->res_nodeid, dlm_dir_nodeid(r), oc, ou); } unlock_rsb(r); put_rsb(r); dlm_put_lkb(lkb); } return error; } static void purge_mstcpy_list(struct dlm_ls *ls, struct dlm_rsb *r, struct list_head *list) { struct dlm_lkb *lkb, *safe; list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) { if (!is_master_copy(lkb)) continue; /* don't purge lkbs we've added in recover_master_copy for the current recovery seq */ if (lkb->lkb_recover_seq == ls->ls_recover_seq) continue; del_lkb(r, lkb); /* this put should free the lkb */ if (!dlm_put_lkb(lkb)) log_error(ls, "purged mstcpy lkb not released"); } } void dlm_purge_mstcpy_locks(struct dlm_rsb *r) { struct dlm_ls *ls = r->res_ls; purge_mstcpy_list(ls, r, &r->res_grantqueue); purge_mstcpy_list(ls, r, &r->res_convertqueue); purge_mstcpy_list(ls, r, &r->res_waitqueue); } static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r, struct list_head *list, int nodeid_gone, unsigned int *count) { struct dlm_lkb *lkb, *safe; list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) { if (!is_master_copy(lkb)) continue; if ((lkb->lkb_nodeid == nodeid_gone) || dlm_is_removed(ls, lkb->lkb_nodeid)) { /* tell recover_lvb to invalidate the lvb because a node holding EX/PW failed */ if ((lkb->lkb_exflags & DLM_LKF_VALBLK) && (lkb->lkb_grmode >= DLM_LOCK_PW)) { rsb_set_flag(r, RSB_RECOVER_LVB_INVAL); } del_lkb(r, lkb); /* this put should free the lkb */ if (!dlm_put_lkb(lkb)) log_error(ls, "purged dead lkb not released"); rsb_set_flag(r, RSB_RECOVER_GRANT); (*count)++; } } } /* Get rid of locks held by nodes that are gone. */ void dlm_recover_purge(struct dlm_ls *ls) { struct dlm_rsb *r; struct dlm_member *memb; int nodes_count = 0; int nodeid_gone = 0; unsigned int lkb_count = 0; /* cache one removed nodeid to optimize the common case of a single node removed */ list_for_each_entry(memb, &ls->ls_nodes_gone, list) { nodes_count++; nodeid_gone = memb->nodeid; } if (!nodes_count) return; down_write(&ls->ls_root_sem); list_for_each_entry(r, &ls->ls_root_list, res_root_list) { hold_rsb(r); lock_rsb(r); if (is_master(r)) { purge_dead_list(ls, r, &r->res_grantqueue, nodeid_gone, &lkb_count); purge_dead_list(ls, r, &r->res_convertqueue, nodeid_gone, &lkb_count); purge_dead_list(ls, r, &r->res_waitqueue, nodeid_gone, &lkb_count); } unlock_rsb(r); unhold_rsb(r); cond_resched(); } up_write(&ls->ls_root_sem); if (lkb_count) log_rinfo(ls, "dlm_recover_purge %u locks for %u nodes", lkb_count, nodes_count); } static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket) { struct rb_node *n; struct dlm_rsb *r; spin_lock(&ls->ls_rsbtbl[bucket].lock); for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) { r = rb_entry(n, struct dlm_rsb, res_hashnode); if (!rsb_flag(r, RSB_RECOVER_GRANT)) continue; if (!is_master(r)) { rsb_clear_flag(r, RSB_RECOVER_GRANT); continue; } hold_rsb(r); spin_unlock(&ls->ls_rsbtbl[bucket].lock); return r; } spin_unlock(&ls->ls_rsbtbl[bucket].lock); return NULL; } /* * Attempt to grant locks on resources that we are the master of. * Locks may have become grantable during recovery because locks * from departed nodes have been purged (or not rebuilt), allowing * previously blocked locks to now be granted. The subset of rsb's * we are interested in are those with lkb's on either the convert or * waiting queues. * * Simplest would be to go through each master rsb and check for non-empty * convert or waiting queues, and attempt to grant on those rsbs. * Checking the queues requires lock_rsb, though, for which we'd need * to release the rsbtbl lock. This would make iterating through all * rsb's very inefficient. So, we rely on earlier recovery routines * to set RECOVER_GRANT on any rsb's that we should attempt to grant * locks for. */ void dlm_recover_grant(struct dlm_ls *ls) { struct dlm_rsb *r; int bucket = 0; unsigned int count = 0; unsigned int rsb_count = 0; unsigned int lkb_count = 0; while (1) { r = find_grant_rsb(ls, bucket); if (!r) { if (bucket == ls->ls_rsbtbl_size - 1) break; bucket++; continue; } rsb_count++; count = 0; lock_rsb(r); /* the RECOVER_GRANT flag is checked in the grant path */ grant_pending_locks(r, &count); rsb_clear_flag(r, RSB_RECOVER_GRANT); lkb_count += count; confirm_master(r, 0); unlock_rsb(r); put_rsb(r); cond_resched(); } if (lkb_count) log_rinfo(ls, "dlm_recover_grant %u locks on %u resources", lkb_count, rsb_count); } static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid, uint32_t remid) { struct dlm_lkb *lkb; list_for_each_entry(lkb, head, lkb_statequeue) { if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid) return lkb; } return NULL; } static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid, uint32_t remid) { struct dlm_lkb *lkb; lkb = search_remid_list(&r->res_grantqueue, nodeid, remid); if (lkb) return lkb; lkb = search_remid_list(&r->res_convertqueue, nodeid, remid); if (lkb) return lkb; lkb = search_remid_list(&r->res_waitqueue, nodeid, remid); if (lkb) return lkb; return NULL; } /* needs at least dlm_rcom + rcom_lock */ static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, struct dlm_rsb *r, const struct dlm_rcom *rc) { struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; lkb->lkb_nodeid = le32_to_cpu(rc->rc_header.h_nodeid); lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid); lkb->lkb_remid = le32_to_cpu(rl->rl_lkid); lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags); dlm_set_dflags_val(lkb, le32_to_cpu(rl->rl_flags)); set_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags); lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq); lkb->lkb_rqmode = rl->rl_rqmode; lkb->lkb_grmode = rl->rl_grmode; /* don't set lkb_status because add_lkb wants to itself */ lkb->lkb_bastfn = (rl->rl_asts & DLM_CB_BAST) ? &fake_bastfn : NULL; lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL; if (lkb->lkb_exflags & DLM_LKF_VALBLK) { int lvblen = le16_to_cpu(rc->rc_header.h_length) - sizeof(struct dlm_rcom) - sizeof(struct rcom_lock); if (lvblen > ls->ls_lvblen) return -EINVAL; lkb->lkb_lvbptr = dlm_allocate_lvb(ls); if (!lkb->lkb_lvbptr) return -ENOMEM; memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen); } /* Conversions between PR and CW (middle modes) need special handling. The real granted mode of these converting locks cannot be determined until all locks have been rebuilt on the rsb (recover_conversion) */ if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) && middle_conversion(lkb)) { rl->rl_status = DLM_LKSTS_CONVERT; lkb->lkb_grmode = DLM_LOCK_IV; rsb_set_flag(r, RSB_RECOVER_CONVERT); } return 0; } /* This lkb may have been recovered in a previous aborted recovery so we need to check if the rsb already has an lkb with the given remote nodeid/lkid. If so we just send back a standard reply. If not, we create a new lkb with the given values and send back our lkid. We send back our lkid by sending back the rcom_lock struct we got but with the remid field filled in. */ /* needs at least dlm_rcom + rcom_lock */ int dlm_recover_master_copy(struct dlm_ls *ls, const struct dlm_rcom *rc, __le32 *rl_remid, __le32 *rl_result) { struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; struct dlm_rsb *r; struct dlm_lkb *lkb; uint32_t remid = 0; int from_nodeid = le32_to_cpu(rc->rc_header.h_nodeid); int error; /* init rl_remid with rcom lock rl_remid */ *rl_remid = rl->rl_remid; if (rl->rl_parent_lkid) { error = -EOPNOTSUPP; goto out; } remid = le32_to_cpu(rl->rl_lkid); /* In general we expect the rsb returned to be R_MASTER, but we don't have to require it. Recovery of masters on one node can overlap recovery of locks on another node, so one node can send us MSTCPY locks before we've made ourselves master of this rsb. We can still add new MSTCPY locks that we receive here without any harm; when we make ourselves master, dlm_recover_masters() won't touch the MSTCPY locks we've received early. */ error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen), from_nodeid, R_RECEIVE_RECOVER, &r); if (error) goto out; lock_rsb(r); if (dlm_no_directory(ls) && (dlm_dir_nodeid(r) != dlm_our_nodeid())) { log_error(ls, "dlm_recover_master_copy remote %d %x not dir", from_nodeid, remid); error = -EBADR; goto out_unlock; } lkb = search_remid(r, from_nodeid, remid); if (lkb) { error = -EEXIST; goto out_remid; } error = create_lkb(ls, &lkb); if (error) goto out_unlock; error = receive_rcom_lock_args(ls, lkb, r, rc); if (error) { __put_lkb(ls, lkb); goto out_unlock; } attach_lkb(r, lkb); add_lkb(r, lkb, rl->rl_status); ls->ls_recover_locks_in++; if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue)) rsb_set_flag(r, RSB_RECOVER_GRANT); out_remid: /* this is the new value returned to the lock holder for saving in its process-copy lkb */ *rl_remid = cpu_to_le32(lkb->lkb_id); lkb->lkb_recover_seq = ls->ls_recover_seq; out_unlock: unlock_rsb(r); put_rsb(r); out: if (error && error != -EEXIST) log_rinfo(ls, "dlm_recover_master_copy remote %d %x error %d", from_nodeid, remid, error); *rl_result = cpu_to_le32(error); return error; } /* needs at least dlm_rcom + rcom_lock */ int dlm_recover_process_copy(struct dlm_ls *ls, const struct dlm_rcom *rc, uint64_t seq) { struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; struct dlm_rsb *r; struct dlm_lkb *lkb; uint32_t lkid, remid; int error, result; lkid = le32_to_cpu(rl->rl_lkid); remid = le32_to_cpu(rl->rl_remid); result = le32_to_cpu(rl->rl_result); error = find_lkb(ls, lkid, &lkb); if (error) { log_error(ls, "dlm_recover_process_copy no %x remote %d %x %d", lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid, result); return error; } r = lkb->lkb_resource; hold_rsb(r); lock_rsb(r); if (!is_process_copy(lkb)) { log_error(ls, "dlm_recover_process_copy bad %x remote %d %x %d", lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid, result); dlm_dump_rsb(r); unlock_rsb(r); put_rsb(r); dlm_put_lkb(lkb); return -EINVAL; } switch (result) { case -EBADR: /* There's a chance the new master received our lock before dlm_recover_master_reply(), this wouldn't happen if we did a barrier between recover_masters and recover_locks. */ log_debug(ls, "dlm_recover_process_copy %x remote %d %x %d", lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid, result); dlm_send_rcom_lock(r, lkb, seq); goto out; case -EEXIST: case 0: lkb->lkb_remid = remid; break; default: log_error(ls, "dlm_recover_process_copy %x remote %d %x %d unk", lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid, result); } /* an ack for dlm_recover_locks() which waits for replies from all the locks it sends to new masters */ dlm_recovered_lock(r); out: unlock_rsb(r); put_rsb(r); dlm_put_lkb(lkb); return 0; } int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua, int mode, uint32_t flags, void *name, unsigned int namelen) { struct dlm_lkb *lkb; struct dlm_args args; bool do_put = true; int error; dlm_lock_recovery(ls); error = create_lkb(ls, &lkb); if (error) { kfree(ua); goto out; } trace_dlm_lock_start(ls, lkb, name, namelen, mode, flags); if (flags & DLM_LKF_VALBLK) { ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS); if (!ua->lksb.sb_lvbptr) { kfree(ua); error = -ENOMEM; goto out_put; } } error = set_lock_args(mode, &ua->lksb, flags, namelen, fake_astfn, ua, fake_bastfn, &args); if (error) { kfree(ua->lksb.sb_lvbptr); ua->lksb.sb_lvbptr = NULL; kfree(ua); goto out_put; } /* After ua is attached to lkb it will be freed by dlm_free_lkb(). When DLM_DFL_USER_BIT is set, the dlm knows that this is a userspace lock and that lkb_astparam is the dlm_user_args structure. */ set_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags); error = request_lock(ls, lkb, name, namelen, &args); switch (error) { case 0: break; case -EINPROGRESS: error = 0; break; case -EAGAIN: error = 0; fallthrough; default: goto out_put; } /* add this new lkb to the per-process list of locks */ spin_lock(&ua->proc->locks_spin); hold_lkb(lkb); list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks); spin_unlock(&ua->proc->locks_spin); do_put = false; out_put: trace_dlm_lock_end(ls, lkb, name, namelen, mode, flags, error, false); if (do_put) __put_lkb(ls, lkb); out: dlm_unlock_recovery(ls); return error; } int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, int mode, uint32_t flags, uint32_t lkid, char *lvb_in) { struct dlm_lkb *lkb; struct dlm_args args; struct dlm_user_args *ua; int error; dlm_lock_recovery(ls); error = find_lkb(ls, lkid, &lkb); if (error) goto out; trace_dlm_lock_start(ls, lkb, NULL, 0, mode, flags); /* user can change the params on its lock when it converts it, or add an lvb that didn't exist before */ ua = lkb->lkb_ua; if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) { ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS); if (!ua->lksb.sb_lvbptr) { error = -ENOMEM; goto out_put; } } if (lvb_in && ua->lksb.sb_lvbptr) memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN); ua->xid = ua_tmp->xid; ua->castparam = ua_tmp->castparam; ua->castaddr = ua_tmp->castaddr; ua->bastparam = ua_tmp->bastparam; ua->bastaddr = ua_tmp->bastaddr; ua->user_lksb = ua_tmp->user_lksb; error = set_lock_args(mode, &ua->lksb, flags, 0, fake_astfn, ua, fake_bastfn, &args); if (error) goto out_put; error = convert_lock(ls, lkb, &args); if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK) error = 0; out_put: trace_dlm_lock_end(ls, lkb, NULL, 0, mode, flags, error, false); dlm_put_lkb(lkb); out: dlm_unlock_recovery(ls); kfree(ua_tmp); return error; } /* * The caller asks for an orphan lock on a given resource with a given mode. * If a matching lock exists, it's moved to the owner's list of locks and * the lkid is returned. */ int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, int mode, uint32_t flags, void *name, unsigned int namelen, uint32_t *lkid) { struct dlm_lkb *lkb = NULL, *iter; struct dlm_user_args *ua; int found_other_mode = 0; int rv = 0; mutex_lock(&ls->ls_orphans_mutex); list_for_each_entry(iter, &ls->ls_orphans, lkb_ownqueue) { if (iter->lkb_resource->res_length != namelen) continue; if (memcmp(iter->lkb_resource->res_name, name, namelen)) continue; if (iter->lkb_grmode != mode) { found_other_mode = 1; continue; } lkb = iter; list_del_init(&iter->lkb_ownqueue); clear_bit(DLM_DFL_ORPHAN_BIT, &iter->lkb_dflags); *lkid = iter->lkb_id; break; } mutex_unlock(&ls->ls_orphans_mutex); if (!lkb && found_other_mode) { rv = -EAGAIN; goto out; } if (!lkb) { rv = -ENOENT; goto out; } lkb->lkb_exflags = flags; lkb->lkb_ownpid = (int) current->pid; ua = lkb->lkb_ua; ua->proc = ua_tmp->proc; ua->xid = ua_tmp->xid; ua->castparam = ua_tmp->castparam; ua->castaddr = ua_tmp->castaddr; ua->bastparam = ua_tmp->bastparam; ua->bastaddr = ua_tmp->bastaddr; ua->user_lksb = ua_tmp->user_lksb; /* * The lkb reference from the ls_orphans list was not * removed above, and is now considered the reference * for the proc locks list. */ spin_lock(&ua->proc->locks_spin); list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks); spin_unlock(&ua->proc->locks_spin); out: kfree(ua_tmp); return rv; } int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, uint32_t flags, uint32_t lkid, char *lvb_in) { struct dlm_lkb *lkb; struct dlm_args args; struct dlm_user_args *ua; int error; dlm_lock_recovery(ls); error = find_lkb(ls, lkid, &lkb); if (error) goto out; trace_dlm_unlock_start(ls, lkb, flags); ua = lkb->lkb_ua; if (lvb_in && ua->lksb.sb_lvbptr) memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN); if (ua_tmp->castparam) ua->castparam = ua_tmp->castparam; ua->user_lksb = ua_tmp->user_lksb; error = set_unlock_args(flags, ua, &args); if (error) goto out_put; error = unlock_lock(ls, lkb, &args); if (error == -DLM_EUNLOCK) error = 0; /* from validate_unlock_args() */ if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK)) error = 0; if (error) goto out_put; spin_lock(&ua->proc->locks_spin); /* dlm_user_add_cb() may have already taken lkb off the proc list */ if (!list_empty(&lkb->lkb_ownqueue)) list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking); spin_unlock(&ua->proc->locks_spin); out_put: trace_dlm_unlock_end(ls, lkb, flags, error); dlm_put_lkb(lkb); out: dlm_unlock_recovery(ls); kfree(ua_tmp); return error; } int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, uint32_t flags, uint32_t lkid) { struct dlm_lkb *lkb; struct dlm_args args; struct dlm_user_args *ua; int error; dlm_lock_recovery(ls); error = find_lkb(ls, lkid, &lkb); if (error) goto out; trace_dlm_unlock_start(ls, lkb, flags); ua = lkb->lkb_ua; if (ua_tmp->castparam) ua->castparam = ua_tmp->castparam; ua->user_lksb = ua_tmp->user_lksb; error = set_unlock_args(flags, ua, &args); if (error) goto out_put; error = cancel_lock(ls, lkb, &args); if (error == -DLM_ECANCEL) error = 0; /* from validate_unlock_args() */ if (error == -EBUSY) error = 0; out_put: trace_dlm_unlock_end(ls, lkb, flags, error); dlm_put_lkb(lkb); out: dlm_unlock_recovery(ls); kfree(ua_tmp); return error; } int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid) { struct dlm_lkb *lkb; struct dlm_args args; struct dlm_user_args *ua; struct dlm_rsb *r; int error; dlm_lock_recovery(ls); error = find_lkb(ls, lkid, &lkb); if (error) goto out; trace_dlm_unlock_start(ls, lkb, flags); ua = lkb->lkb_ua; error = set_unlock_args(flags, ua, &args); if (error) goto out_put; /* same as cancel_lock(), but set DEADLOCK_CANCEL after lock_rsb */ r = lkb->lkb_resource; hold_rsb(r); lock_rsb(r); error = validate_unlock_args(lkb, &args); if (error) goto out_r; set_bit(DLM_IFL_DEADLOCK_CANCEL_BIT, &lkb->lkb_iflags); error = _cancel_lock(r, lkb); out_r: unlock_rsb(r); put_rsb(r); if (error == -DLM_ECANCEL) error = 0; /* from validate_unlock_args() */ if (error == -EBUSY) error = 0; out_put: trace_dlm_unlock_end(ls, lkb, flags, error); dlm_put_lkb(lkb); out: dlm_unlock_recovery(ls); return error; } /* lkb's that are removed from the waiters list by revert are just left on the orphans list with the granted orphan locks, to be freed by purge */ static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb) { struct dlm_args args; int error; hold_lkb(lkb); /* reference for the ls_orphans list */ mutex_lock(&ls->ls_orphans_mutex); list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans); mutex_unlock(&ls->ls_orphans_mutex); set_unlock_args(0, lkb->lkb_ua, &args); error = cancel_lock(ls, lkb, &args); if (error == -DLM_ECANCEL) error = 0; return error; } /* The FORCEUNLOCK flag allows the unlock to go ahead even if the lkb isn't granted. Regardless of what rsb queue the lock is on, it's removed and freed. The IVVALBLK flag causes the lvb on the resource to be invalidated if our lock is PW/EX (it's ignored if our granted mode is smaller.) */ static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb) { struct dlm_args args; int error; set_unlock_args(DLM_LKF_FORCEUNLOCK | DLM_LKF_IVVALBLK, lkb->lkb_ua, &args); error = unlock_lock(ls, lkb, &args); if (error == -DLM_EUNLOCK) error = 0; return error; } /* We have to release clear_proc_locks mutex before calling unlock_proc_lock() (which does lock_rsb) due to deadlock with receiving a message that does lock_rsb followed by dlm_user_add_cb() */ static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls, struct dlm_user_proc *proc) { struct dlm_lkb *lkb = NULL; spin_lock(&ls->ls_clear_proc_locks); if (list_empty(&proc->locks)) goto out; lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue); list_del_init(&lkb->lkb_ownqueue); if (lkb->lkb_exflags & DLM_LKF_PERSISTENT) set_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags); else set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags); out: spin_unlock(&ls->ls_clear_proc_locks); return lkb; } /* The ls_clear_proc_locks mutex protects against dlm_user_add_cb() which 1) references lkb->ua which we free here and 2) adds lkbs to proc->asts, which we clear here. */ /* proc CLOSING flag is set so no more device_reads should look at proc->asts list, and no more device_writes should add lkb's to proc->locks list; so we shouldn't need to take asts_spin or locks_spin here. this assumes that device reads/writes/closes are serialized -- FIXME: we may need to serialize them ourself. */ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc) { struct dlm_lkb *lkb, *safe; dlm_lock_recovery(ls); while (1) { lkb = del_proc_lock(ls, proc); if (!lkb) break; if (lkb->lkb_exflags & DLM_LKF_PERSISTENT) orphan_proc_lock(ls, lkb); else unlock_proc_lock(ls, lkb); /* this removes the reference for the proc->locks list added by dlm_user_request, it may result in the lkb being freed */ dlm_put_lkb(lkb); } spin_lock(&ls->ls_clear_proc_locks); /* in-progress unlocks */ list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) { list_del_init(&lkb->lkb_ownqueue); set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags); dlm_put_lkb(lkb); } list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) { dlm_purge_lkb_callbacks(lkb); list_del_init(&lkb->lkb_cb_list); dlm_put_lkb(lkb); } spin_unlock(&ls->ls_clear_proc_locks); dlm_unlock_recovery(ls); } static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc) { struct dlm_lkb *lkb, *safe; while (1) { lkb = NULL; spin_lock(&proc->locks_spin); if (!list_empty(&proc->locks)) { lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue); list_del_init(&lkb->lkb_ownqueue); } spin_unlock(&proc->locks_spin); if (!lkb) break; set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags); unlock_proc_lock(ls, lkb); dlm_put_lkb(lkb); /* ref from proc->locks list */ } spin_lock(&proc->locks_spin); list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) { list_del_init(&lkb->lkb_ownqueue); set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags); dlm_put_lkb(lkb); } spin_unlock(&proc->locks_spin); spin_lock(&proc->asts_spin); list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) { dlm_purge_lkb_callbacks(lkb); list_del_init(&lkb->lkb_cb_list); dlm_put_lkb(lkb); } spin_unlock(&proc->asts_spin); } /* pid of 0 means purge all orphans */ static void do_purge(struct dlm_ls *ls, int nodeid, int pid) { struct dlm_lkb *lkb, *safe; mutex_lock(&ls->ls_orphans_mutex); list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) { if (pid && lkb->lkb_ownpid != pid) continue; unlock_proc_lock(ls, lkb); list_del_init(&lkb->lkb_ownqueue); dlm_put_lkb(lkb); } mutex_unlock(&ls->ls_orphans_mutex); } static int send_purge(struct dlm_ls *ls, int nodeid, int pid) { struct dlm_message *ms; struct dlm_mhandle *mh; int error; error = _create_message(ls, sizeof(struct dlm_message), nodeid, DLM_MSG_PURGE, &ms, &mh, GFP_NOFS); if (error) return error; ms->m_nodeid = cpu_to_le32(nodeid); ms->m_pid = cpu_to_le32(pid); return send_message(mh, ms, NULL, 0); } int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc, int nodeid, int pid) { int error = 0; if (nodeid && (nodeid != dlm_our_nodeid())) { error = send_purge(ls, nodeid, pid); } else { dlm_lock_recovery(ls); if (pid == current->pid) purge_proc_locks(ls, proc); else do_purge(ls, nodeid, pid); dlm_unlock_recovery(ls); } return error; } /* debug functionality */ int dlm_debug_add_lkb(struct dlm_ls *ls, uint32_t lkb_id, char *name, int len, int lkb_nodeid, unsigned int lkb_dflags, int lkb_status) { struct dlm_lksb *lksb; struct dlm_lkb *lkb; struct dlm_rsb *r; int error; /* we currently can't set a valid user lock */ if (lkb_dflags & BIT(DLM_DFL_USER_BIT)) return -EOPNOTSUPP; lksb = kzalloc(sizeof(*lksb), GFP_NOFS); if (!lksb) return -ENOMEM; error = _create_lkb(ls, &lkb, lkb_id, lkb_id + 1); if (error) { kfree(lksb); return error; } dlm_set_dflags_val(lkb, lkb_dflags); lkb->lkb_nodeid = lkb_nodeid; lkb->lkb_lksb = lksb; /* user specific pointer, just don't have it NULL for kernel locks */ if (~lkb_dflags & BIT(DLM_DFL_USER_BIT)) lkb->lkb_astparam = (void *)0xDEADBEEF; error = find_rsb(ls, name, len, 0, R_REQUEST, &r); if (error) { kfree(lksb); __put_lkb(ls, lkb); return error; } lock_rsb(r); attach_lkb(r, lkb); add_lkb(r, lkb, lkb_status); unlock_rsb(r); put_rsb(r); return 0; } int dlm_debug_add_lkb_to_waiters(struct dlm_ls *ls, uint32_t lkb_id, int mstype, int to_nodeid) { struct dlm_lkb *lkb; int error; error = find_lkb(ls, lkb_id, &lkb); if (error) return error; error = add_to_waiters(lkb, mstype, to_nodeid); dlm_put_lkb(lkb); return error; }
linux-master
fs/dlm/lock.c
// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2021 Red Hat, Inc. All rights reserved. ** ** ******************************************************************************* ******************************************************************************/ /* * midcomms.c * * This is the appallingly named "mid-level" comms layer. It takes care about * deliver an on application layer "reliable" communication above the used * lowcomms transport layer. * * How it works: * * Each nodes keeps track of all send DLM messages in send_queue with a sequence * number. The receive will send an DLM_ACK message back for every DLM message * received at the other side. If a reconnect happens in lowcomms we will send * all unacknowledged dlm messages again. The receiving side might drop any already * received message by comparing sequence numbers. * * How version detection works: * * Due the fact that dlm has pre-configured node addresses on every side * it is in it's nature that every side connects at starts to transmit * dlm messages which ends in a race. However DLM_RCOM_NAMES, DLM_RCOM_STATUS * and their replies are the first messages which are exchanges. Due backwards * compatibility these messages are not covered by the midcomms re-transmission * layer. These messages have their own re-transmission handling in the dlm * application layer. The version field of every node will be set on these RCOM * messages as soon as they arrived and the node isn't yet part of the nodes * hash. There exists also logic to detect version mismatched if something weird * going on or the first messages isn't an expected one. * * Termination: * * The midcomms layer does a 4 way handshake for termination on DLM protocol * like TCP supports it with half-closed socket support. SCTP doesn't support * half-closed socket, so we do it on DLM layer. Also socket shutdown() can be * interrupted by .e.g. tcp reset itself. Additional there exists the othercon * paradigm in lowcomms which cannot be easily without breaking backwards * compatibility. A node cannot send anything to another node when a DLM_FIN * message was send. There exists additional logic to print a warning if * DLM wants to do it. There exists a state handling like RFC 793 but reduced * to termination only. The event "member removal event" describes the cluster * manager removed the node from internal lists, at this point DLM does not * send any message to the other node. There exists two cases: * * 1. The cluster member was removed and we received a FIN * OR * 2. We received a FIN but the member was not removed yet * * One of these cases will do the CLOSE_WAIT to LAST_ACK change. * * * +---------+ * | CLOSED | * +---------+ * | add member/receive RCOM version * | detection msg * V * +---------+ * | ESTAB | * +---------+ * CLOSE | | rcv FIN * ------- | | ------- * +---------+ snd FIN / \ snd ACK +---------+ * | FIN |<----------------- ------------------>| CLOSE | * | WAIT-1 |------------------ | WAIT | * +---------+ rcv FIN \ +---------+ * | rcv ACK of FIN ------- | CLOSE | member * | -------------- snd ACK | ------- | removal * V x V snd FIN V event * +---------+ +---------+ +---------+ * |FINWAIT-2| | CLOSING | | LAST-ACK| * +---------+ +---------+ +---------+ * | rcv ACK of FIN | rcv ACK of FIN | * | rcv FIN -------------- | -------------- | * | ------- x V x V * \ snd ACK +---------+ +---------+ * ------------------------>| CLOSED | | CLOSED | * +---------+ +---------+ * * NOTE: any state can interrupted by midcomms_close() and state will be * switched to CLOSED in case of fencing. There exists also some timeout * handling when we receive the version detection RCOM messages which is * made by observation. * * Future improvements: * * There exists some known issues/improvements of the dlm handling. Some * of them should be done in a next major dlm version bump which makes * it incompatible with previous versions. * * Unaligned memory access: * * There exists cases when the dlm message buffer length is not aligned * to 8 byte. However seems nobody detected any problem with it. This * can be fixed in the next major version bump of dlm. * * Version detection: * * The version detection and how it's done is related to backwards * compatibility. There exists better ways to make a better handling. * However this should be changed in the next major version bump of dlm. * * Tail Size checking: * * There exists a message tail payload in e.g. DLM_MSG however we don't * check it against the message length yet regarding to the receive buffer * length. That need to be validated. * * Fencing bad nodes: * * At timeout places or weird sequence number behaviours we should send * a fencing request to the cluster manager. */ /* Debug switch to enable a 5 seconds sleep waiting of a termination. * This can be useful to test fencing while termination is running. * This requires a setup with only gfs2 as dlm user, so that the * last umount will terminate the connection. * * However it became useful to test, while the 5 seconds block in umount * just press the reset button. In a lot of dropping the termination * process can could take several seconds. */ #define DLM_DEBUG_FENCE_TERMINATION 0 #include <trace/events/dlm.h> #include <net/tcp.h> #include "dlm_internal.h" #include "lowcomms.h" #include "config.h" #include "memory.h" #include "lock.h" #include "util.h" #include "midcomms.h" /* init value for sequence numbers for testing purpose only e.g. overflows */ #define DLM_SEQ_INIT 0 /* 5 seconds wait to sync ending of dlm */ #define DLM_SHUTDOWN_TIMEOUT msecs_to_jiffies(5000) #define DLM_VERSION_NOT_SET 0 #define DLM_SEND_ACK_BACK_MSG_THRESHOLD 32 #define DLM_RECV_ACK_BACK_MSG_THRESHOLD (DLM_SEND_ACK_BACK_MSG_THRESHOLD * 8) struct midcomms_node { int nodeid; uint32_t version; atomic_t seq_send; atomic_t seq_next; /* These queues are unbound because we cannot drop any message in dlm. * We could send a fence signal for a specific node to the cluster * manager if queues hits some maximum value, however this handling * not supported yet. */ struct list_head send_queue; spinlock_t send_queue_lock; atomic_t send_queue_cnt; #define DLM_NODE_FLAG_CLOSE 1 #define DLM_NODE_FLAG_STOP_TX 2 #define DLM_NODE_FLAG_STOP_RX 3 atomic_t ulp_delivered; unsigned long flags; wait_queue_head_t shutdown_wait; /* dlm tcp termination state */ #define DLM_CLOSED 1 #define DLM_ESTABLISHED 2 #define DLM_FIN_WAIT1 3 #define DLM_FIN_WAIT2 4 #define DLM_CLOSE_WAIT 5 #define DLM_LAST_ACK 6 #define DLM_CLOSING 7 int state; spinlock_t state_lock; /* counts how many lockspaces are using this node * this refcount is necessary to determine if the * node wants to disconnect. */ int users; /* not protected by srcu, node_hash lifetime */ void *debugfs; struct hlist_node hlist; struct rcu_head rcu; }; struct dlm_mhandle { const union dlm_packet *inner_p; struct midcomms_node *node; struct dlm_opts *opts; struct dlm_msg *msg; bool committed; uint32_t seq; void (*ack_rcv)(struct midcomms_node *node); /* get_mhandle/commit srcu idx exchange */ int idx; struct list_head list; struct rcu_head rcu; }; static struct hlist_head node_hash[CONN_HASH_SIZE]; static DEFINE_SPINLOCK(nodes_lock); DEFINE_STATIC_SRCU(nodes_srcu); /* This mutex prevents that midcomms_close() is running while * stop() or remove(). As I experienced invalid memory access * behaviours when DLM_DEBUG_FENCE_TERMINATION is enabled and * resetting machines. I will end in some double deletion in nodes * datastructure. */ static DEFINE_MUTEX(close_lock); struct kmem_cache *dlm_midcomms_cache_create(void) { return kmem_cache_create("dlm_mhandle", sizeof(struct dlm_mhandle), 0, 0, NULL); } static inline const char *dlm_state_str(int state) { switch (state) { case DLM_CLOSED: return "CLOSED"; case DLM_ESTABLISHED: return "ESTABLISHED"; case DLM_FIN_WAIT1: return "FIN_WAIT1"; case DLM_FIN_WAIT2: return "FIN_WAIT2"; case DLM_CLOSE_WAIT: return "CLOSE_WAIT"; case DLM_LAST_ACK: return "LAST_ACK"; case DLM_CLOSING: return "CLOSING"; default: return "UNKNOWN"; } } const char *dlm_midcomms_state(struct midcomms_node *node) { return dlm_state_str(node->state); } unsigned long dlm_midcomms_flags(struct midcomms_node *node) { return node->flags; } int dlm_midcomms_send_queue_cnt(struct midcomms_node *node) { return atomic_read(&node->send_queue_cnt); } uint32_t dlm_midcomms_version(struct midcomms_node *node) { return node->version; } static struct midcomms_node *__find_node(int nodeid, int r) { struct midcomms_node *node; hlist_for_each_entry_rcu(node, &node_hash[r], hlist) { if (node->nodeid == nodeid) return node; } return NULL; } static void dlm_mhandle_release(struct rcu_head *rcu) { struct dlm_mhandle *mh = container_of(rcu, struct dlm_mhandle, rcu); dlm_lowcomms_put_msg(mh->msg); dlm_free_mhandle(mh); } static void dlm_mhandle_delete(struct midcomms_node *node, struct dlm_mhandle *mh) { list_del_rcu(&mh->list); atomic_dec(&node->send_queue_cnt); call_rcu(&mh->rcu, dlm_mhandle_release); } static void dlm_send_queue_flush(struct midcomms_node *node) { struct dlm_mhandle *mh; pr_debug("flush midcomms send queue of node %d\n", node->nodeid); rcu_read_lock(); spin_lock_bh(&node->send_queue_lock); list_for_each_entry_rcu(mh, &node->send_queue, list) { dlm_mhandle_delete(node, mh); } spin_unlock_bh(&node->send_queue_lock); rcu_read_unlock(); } static void midcomms_node_reset(struct midcomms_node *node) { pr_debug("reset node %d\n", node->nodeid); atomic_set(&node->seq_next, DLM_SEQ_INIT); atomic_set(&node->seq_send, DLM_SEQ_INIT); atomic_set(&node->ulp_delivered, 0); node->version = DLM_VERSION_NOT_SET; node->flags = 0; dlm_send_queue_flush(node); node->state = DLM_CLOSED; wake_up(&node->shutdown_wait); } static struct midcomms_node *nodeid2node(int nodeid) { return __find_node(nodeid, nodeid_hash(nodeid)); } int dlm_midcomms_addr(int nodeid, struct sockaddr_storage *addr, int len) { int ret, r = nodeid_hash(nodeid); struct midcomms_node *node; ret = dlm_lowcomms_addr(nodeid, addr, len); if (ret) return ret; node = kmalloc(sizeof(*node), GFP_NOFS); if (!node) return -ENOMEM; node->nodeid = nodeid; spin_lock_init(&node->state_lock); spin_lock_init(&node->send_queue_lock); atomic_set(&node->send_queue_cnt, 0); INIT_LIST_HEAD(&node->send_queue); init_waitqueue_head(&node->shutdown_wait); node->users = 0; midcomms_node_reset(node); spin_lock(&nodes_lock); hlist_add_head_rcu(&node->hlist, &node_hash[r]); spin_unlock(&nodes_lock); node->debugfs = dlm_create_debug_comms_file(nodeid, node); return 0; } static int dlm_send_ack(int nodeid, uint32_t seq) { int mb_len = sizeof(struct dlm_header); struct dlm_header *m_header; struct dlm_msg *msg; char *ppc; msg = dlm_lowcomms_new_msg(nodeid, mb_len, GFP_ATOMIC, &ppc, NULL, NULL); if (!msg) return -ENOMEM; m_header = (struct dlm_header *)ppc; m_header->h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR); m_header->h_nodeid = cpu_to_le32(dlm_our_nodeid()); m_header->h_length = cpu_to_le16(mb_len); m_header->h_cmd = DLM_ACK; m_header->u.h_seq = cpu_to_le32(seq); dlm_lowcomms_commit_msg(msg); dlm_lowcomms_put_msg(msg); return 0; } static void dlm_send_ack_threshold(struct midcomms_node *node, uint32_t threshold) { uint32_t oval, nval; bool send_ack; /* let only send one user trigger threshold to send ack back */ do { oval = atomic_read(&node->ulp_delivered); send_ack = (oval > threshold); /* abort if threshold is not reached */ if (!send_ack) break; nval = 0; /* try to reset ulp_delivered counter */ } while (atomic_cmpxchg(&node->ulp_delivered, oval, nval) != oval); if (send_ack) dlm_send_ack(node->nodeid, atomic_read(&node->seq_next)); } static int dlm_send_fin(struct midcomms_node *node, void (*ack_rcv)(struct midcomms_node *node)) { int mb_len = sizeof(struct dlm_header); struct dlm_header *m_header; struct dlm_mhandle *mh; char *ppc; mh = dlm_midcomms_get_mhandle(node->nodeid, mb_len, GFP_ATOMIC, &ppc); if (!mh) return -ENOMEM; set_bit(DLM_NODE_FLAG_STOP_TX, &node->flags); mh->ack_rcv = ack_rcv; m_header = (struct dlm_header *)ppc; m_header->h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR); m_header->h_nodeid = cpu_to_le32(dlm_our_nodeid()); m_header->h_length = cpu_to_le16(mb_len); m_header->h_cmd = DLM_FIN; pr_debug("sending fin msg to node %d\n", node->nodeid); dlm_midcomms_commit_mhandle(mh, NULL, 0); return 0; } static void dlm_receive_ack(struct midcomms_node *node, uint32_t seq) { struct dlm_mhandle *mh; rcu_read_lock(); list_for_each_entry_rcu(mh, &node->send_queue, list) { if (before(mh->seq, seq)) { if (mh->ack_rcv) mh->ack_rcv(node); } else { /* send queue should be ordered */ break; } } spin_lock_bh(&node->send_queue_lock); list_for_each_entry_rcu(mh, &node->send_queue, list) { if (before(mh->seq, seq)) { dlm_mhandle_delete(node, mh); } else { /* send queue should be ordered */ break; } } spin_unlock_bh(&node->send_queue_lock); rcu_read_unlock(); } static void dlm_pas_fin_ack_rcv(struct midcomms_node *node) { spin_lock(&node->state_lock); pr_debug("receive passive fin ack from node %d with state %s\n", node->nodeid, dlm_state_str(node->state)); switch (node->state) { case DLM_LAST_ACK: /* DLM_CLOSED */ midcomms_node_reset(node); break; case DLM_CLOSED: /* not valid but somehow we got what we want */ wake_up(&node->shutdown_wait); break; default: spin_unlock(&node->state_lock); log_print("%s: unexpected state: %d", __func__, node->state); WARN_ON_ONCE(1); return; } spin_unlock(&node->state_lock); } static void dlm_receive_buffer_3_2_trace(uint32_t seq, const union dlm_packet *p) { switch (p->header.h_cmd) { case DLM_MSG: trace_dlm_recv_message(dlm_our_nodeid(), seq, &p->message); break; case DLM_RCOM: trace_dlm_recv_rcom(dlm_our_nodeid(), seq, &p->rcom); break; default: break; } } static void dlm_midcomms_receive_buffer(const union dlm_packet *p, struct midcomms_node *node, uint32_t seq) { bool is_expected_seq; uint32_t oval, nval; do { oval = atomic_read(&node->seq_next); is_expected_seq = (oval == seq); if (!is_expected_seq) break; nval = oval + 1; } while (atomic_cmpxchg(&node->seq_next, oval, nval) != oval); if (is_expected_seq) { switch (p->header.h_cmd) { case DLM_FIN: spin_lock(&node->state_lock); pr_debug("receive fin msg from node %d with state %s\n", node->nodeid, dlm_state_str(node->state)); switch (node->state) { case DLM_ESTABLISHED: dlm_send_ack(node->nodeid, nval); /* passive shutdown DLM_LAST_ACK case 1 * additional we check if the node is used by * cluster manager events at all. */ if (node->users == 0) { node->state = DLM_LAST_ACK; pr_debug("switch node %d to state %s case 1\n", node->nodeid, dlm_state_str(node->state)); set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags); dlm_send_fin(node, dlm_pas_fin_ack_rcv); } else { node->state = DLM_CLOSE_WAIT; pr_debug("switch node %d to state %s\n", node->nodeid, dlm_state_str(node->state)); } break; case DLM_FIN_WAIT1: dlm_send_ack(node->nodeid, nval); node->state = DLM_CLOSING; set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags); pr_debug("switch node %d to state %s\n", node->nodeid, dlm_state_str(node->state)); break; case DLM_FIN_WAIT2: dlm_send_ack(node->nodeid, nval); midcomms_node_reset(node); pr_debug("switch node %d to state %s\n", node->nodeid, dlm_state_str(node->state)); break; case DLM_LAST_ACK: /* probably remove_member caught it, do nothing */ break; default: spin_unlock(&node->state_lock); log_print("%s: unexpected state: %d", __func__, node->state); WARN_ON_ONCE(1); return; } spin_unlock(&node->state_lock); break; default: WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags)); dlm_receive_buffer_3_2_trace(seq, p); dlm_receive_buffer(p, node->nodeid); atomic_inc(&node->ulp_delivered); /* unlikely case to send ack back when we don't transmit */ dlm_send_ack_threshold(node, DLM_RECV_ACK_BACK_MSG_THRESHOLD); break; } } else { /* retry to ack message which we already have by sending back * current node->seq_next number as ack. */ if (seq < oval) dlm_send_ack(node->nodeid, oval); log_print_ratelimited("ignore dlm msg because seq mismatch, seq: %u, expected: %u, nodeid: %d", seq, oval, node->nodeid); } } static int dlm_opts_check_msglen(const union dlm_packet *p, uint16_t msglen, int nodeid) { int len = msglen; /* we only trust outer header msglen because * it's checked against receive buffer length. */ if (len < sizeof(struct dlm_opts)) return -1; len -= sizeof(struct dlm_opts); if (len < le16_to_cpu(p->opts.o_optlen)) return -1; len -= le16_to_cpu(p->opts.o_optlen); switch (p->opts.o_nextcmd) { case DLM_FIN: if (len < sizeof(struct dlm_header)) { log_print("fin too small: %d, will skip this message from node %d", len, nodeid); return -1; } break; case DLM_MSG: if (len < sizeof(struct dlm_message)) { log_print("msg too small: %d, will skip this message from node %d", msglen, nodeid); return -1; } break; case DLM_RCOM: if (len < sizeof(struct dlm_rcom)) { log_print("rcom msg too small: %d, will skip this message from node %d", len, nodeid); return -1; } break; default: log_print("unsupported o_nextcmd received: %u, will skip this message from node %d", p->opts.o_nextcmd, nodeid); return -1; } return 0; } static void dlm_midcomms_receive_buffer_3_2(const union dlm_packet *p, int nodeid) { uint16_t msglen = le16_to_cpu(p->header.h_length); struct midcomms_node *node; uint32_t seq; int ret, idx; idx = srcu_read_lock(&nodes_srcu); node = nodeid2node(nodeid); if (WARN_ON_ONCE(!node)) goto out; switch (node->version) { case DLM_VERSION_NOT_SET: node->version = DLM_VERSION_3_2; wake_up(&node->shutdown_wait); log_print("version 0x%08x for node %d detected", DLM_VERSION_3_2, node->nodeid); spin_lock(&node->state_lock); switch (node->state) { case DLM_CLOSED: node->state = DLM_ESTABLISHED; pr_debug("switch node %d to state %s\n", node->nodeid, dlm_state_str(node->state)); break; default: break; } spin_unlock(&node->state_lock); break; case DLM_VERSION_3_2: break; default: log_print_ratelimited("version mismatch detected, assumed 0x%08x but node %d has 0x%08x", DLM_VERSION_3_2, node->nodeid, node->version); goto out; } switch (p->header.h_cmd) { case DLM_RCOM: /* these rcom message we use to determine version. * they have their own retransmission handling and * are the first messages of dlm. * * length already checked. */ switch (p->rcom.rc_type) { case cpu_to_le32(DLM_RCOM_NAMES): fallthrough; case cpu_to_le32(DLM_RCOM_NAMES_REPLY): fallthrough; case cpu_to_le32(DLM_RCOM_STATUS): fallthrough; case cpu_to_le32(DLM_RCOM_STATUS_REPLY): break; default: log_print("unsupported rcom type received: %u, will skip this message from node %d", le32_to_cpu(p->rcom.rc_type), nodeid); goto out; } WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags)); dlm_receive_buffer(p, nodeid); break; case DLM_OPTS: seq = le32_to_cpu(p->header.u.h_seq); ret = dlm_opts_check_msglen(p, msglen, nodeid); if (ret < 0) { log_print("opts msg too small: %u, will skip this message from node %d", msglen, nodeid); goto out; } p = (union dlm_packet *)((unsigned char *)p->opts.o_opts + le16_to_cpu(p->opts.o_optlen)); /* recheck inner msglen just if it's not garbage */ msglen = le16_to_cpu(p->header.h_length); switch (p->header.h_cmd) { case DLM_RCOM: if (msglen < sizeof(struct dlm_rcom)) { log_print("inner rcom msg too small: %u, will skip this message from node %d", msglen, nodeid); goto out; } break; case DLM_MSG: if (msglen < sizeof(struct dlm_message)) { log_print("inner msg too small: %u, will skip this message from node %d", msglen, nodeid); goto out; } break; case DLM_FIN: if (msglen < sizeof(struct dlm_header)) { log_print("inner fin too small: %u, will skip this message from node %d", msglen, nodeid); goto out; } break; default: log_print("unsupported inner h_cmd received: %u, will skip this message from node %d", msglen, nodeid); goto out; } dlm_midcomms_receive_buffer(p, node, seq); break; case DLM_ACK: seq = le32_to_cpu(p->header.u.h_seq); dlm_receive_ack(node, seq); break; default: log_print("unsupported h_cmd received: %u, will skip this message from node %d", p->header.h_cmd, nodeid); break; } out: srcu_read_unlock(&nodes_srcu, idx); } static void dlm_midcomms_receive_buffer_3_1(const union dlm_packet *p, int nodeid) { uint16_t msglen = le16_to_cpu(p->header.h_length); struct midcomms_node *node; int idx; idx = srcu_read_lock(&nodes_srcu); node = nodeid2node(nodeid); if (WARN_ON_ONCE(!node)) { srcu_read_unlock(&nodes_srcu, idx); return; } switch (node->version) { case DLM_VERSION_NOT_SET: node->version = DLM_VERSION_3_1; wake_up(&node->shutdown_wait); log_print("version 0x%08x for node %d detected", DLM_VERSION_3_1, node->nodeid); break; case DLM_VERSION_3_1: break; default: log_print_ratelimited("version mismatch detected, assumed 0x%08x but node %d has 0x%08x", DLM_VERSION_3_1, node->nodeid, node->version); srcu_read_unlock(&nodes_srcu, idx); return; } srcu_read_unlock(&nodes_srcu, idx); switch (p->header.h_cmd) { case DLM_RCOM: /* length already checked */ break; case DLM_MSG: if (msglen < sizeof(struct dlm_message)) { log_print("msg too small: %u, will skip this message from node %d", msglen, nodeid); return; } break; default: log_print("unsupported h_cmd received: %u, will skip this message from node %d", p->header.h_cmd, nodeid); return; } dlm_receive_buffer(p, nodeid); } int dlm_validate_incoming_buffer(int nodeid, unsigned char *buf, int len) { const unsigned char *ptr = buf; const struct dlm_header *hd; uint16_t msglen; int ret = 0; while (len >= sizeof(struct dlm_header)) { hd = (struct dlm_header *)ptr; /* no message should be more than DLM_MAX_SOCKET_BUFSIZE or * less than dlm_header size. * * Some messages does not have a 8 byte length boundary yet * which can occur in a unaligned memory access of some dlm * messages. However this problem need to be fixed at the * sending side, for now it seems nobody run into architecture * related issues yet but it slows down some processing. * Fixing this issue should be scheduled in future by doing * the next major version bump. */ msglen = le16_to_cpu(hd->h_length); if (msglen > DLM_MAX_SOCKET_BUFSIZE || msglen < sizeof(struct dlm_header)) { log_print("received invalid length header: %u from node %d, will abort message parsing", msglen, nodeid); return -EBADMSG; } /* caller will take care that leftover * will be parsed next call with more data */ if (msglen > len) break; ret += msglen; len -= msglen; ptr += msglen; } return ret; } /* * Called from the low-level comms layer to process a buffer of * commands. */ int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int len) { const unsigned char *ptr = buf; const struct dlm_header *hd; uint16_t msglen; int ret = 0; while (len >= sizeof(struct dlm_header)) { hd = (struct dlm_header *)ptr; msglen = le16_to_cpu(hd->h_length); if (msglen > len) break; switch (hd->h_version) { case cpu_to_le32(DLM_VERSION_3_1): dlm_midcomms_receive_buffer_3_1((const union dlm_packet *)ptr, nodeid); break; case cpu_to_le32(DLM_VERSION_3_2): dlm_midcomms_receive_buffer_3_2((const union dlm_packet *)ptr, nodeid); break; default: log_print("received invalid version header: %u from node %d, will skip this message", le32_to_cpu(hd->h_version), nodeid); break; } ret += msglen; len -= msglen; ptr += msglen; } return ret; } void dlm_midcomms_unack_msg_resend(int nodeid) { struct midcomms_node *node; struct dlm_mhandle *mh; int idx, ret; idx = srcu_read_lock(&nodes_srcu); node = nodeid2node(nodeid); if (WARN_ON_ONCE(!node)) { srcu_read_unlock(&nodes_srcu, idx); return; } /* old protocol, we don't support to retransmit on failure */ switch (node->version) { case DLM_VERSION_3_2: break; default: srcu_read_unlock(&nodes_srcu, idx); return; } rcu_read_lock(); list_for_each_entry_rcu(mh, &node->send_queue, list) { if (!mh->committed) continue; ret = dlm_lowcomms_resend_msg(mh->msg); if (!ret) log_print_ratelimited("retransmit dlm msg, seq %u, nodeid %d", mh->seq, node->nodeid); } rcu_read_unlock(); srcu_read_unlock(&nodes_srcu, idx); } static void dlm_fill_opts_header(struct dlm_opts *opts, uint16_t inner_len, uint32_t seq) { opts->o_header.h_cmd = DLM_OPTS; opts->o_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR); opts->o_header.h_nodeid = cpu_to_le32(dlm_our_nodeid()); opts->o_header.h_length = cpu_to_le16(DLM_MIDCOMMS_OPT_LEN + inner_len); opts->o_header.u.h_seq = cpu_to_le32(seq); } static void midcomms_new_msg_cb(void *data) { struct dlm_mhandle *mh = data; atomic_inc(&mh->node->send_queue_cnt); spin_lock_bh(&mh->node->send_queue_lock); list_add_tail_rcu(&mh->list, &mh->node->send_queue); spin_unlock_bh(&mh->node->send_queue_lock); mh->seq = atomic_fetch_inc(&mh->node->seq_send); } static struct dlm_msg *dlm_midcomms_get_msg_3_2(struct dlm_mhandle *mh, int nodeid, int len, gfp_t allocation, char **ppc) { struct dlm_opts *opts; struct dlm_msg *msg; msg = dlm_lowcomms_new_msg(nodeid, len + DLM_MIDCOMMS_OPT_LEN, allocation, ppc, midcomms_new_msg_cb, mh); if (!msg) return NULL; opts = (struct dlm_opts *)*ppc; mh->opts = opts; /* add possible options here */ dlm_fill_opts_header(opts, len, mh->seq); *ppc += sizeof(*opts); mh->inner_p = (const union dlm_packet *)*ppc; return msg; } /* avoid false positive for nodes_srcu, unlock happens in * dlm_midcomms_commit_mhandle which is a must call if success */ #ifndef __CHECKER__ struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len, gfp_t allocation, char **ppc) { struct midcomms_node *node; struct dlm_mhandle *mh; struct dlm_msg *msg; int idx; idx = srcu_read_lock(&nodes_srcu); node = nodeid2node(nodeid); if (WARN_ON_ONCE(!node)) goto err; /* this is a bug, however we going on and hope it will be resolved */ WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_TX, &node->flags)); mh = dlm_allocate_mhandle(allocation); if (!mh) goto err; mh->committed = false; mh->ack_rcv = NULL; mh->idx = idx; mh->node = node; switch (node->version) { case DLM_VERSION_3_1: msg = dlm_lowcomms_new_msg(nodeid, len, allocation, ppc, NULL, NULL); if (!msg) { dlm_free_mhandle(mh); goto err; } break; case DLM_VERSION_3_2: msg = dlm_midcomms_get_msg_3_2(mh, nodeid, len, allocation, ppc); if (!msg) { dlm_free_mhandle(mh); goto err; } /* send ack back if necessary */ dlm_send_ack_threshold(node, DLM_SEND_ACK_BACK_MSG_THRESHOLD); break; default: dlm_free_mhandle(mh); WARN_ON_ONCE(1); goto err; } mh->msg = msg; /* keep in mind that is a must to call * dlm_midcomms_commit_msg() which releases * nodes_srcu using mh->idx which is assumed * here that the application will call it. */ return mh; err: srcu_read_unlock(&nodes_srcu, idx); return NULL; } #endif static void dlm_midcomms_commit_msg_3_2_trace(const struct dlm_mhandle *mh, const void *name, int namelen) { switch (mh->inner_p->header.h_cmd) { case DLM_MSG: trace_dlm_send_message(mh->node->nodeid, mh->seq, &mh->inner_p->message, name, namelen); break; case DLM_RCOM: trace_dlm_send_rcom(mh->node->nodeid, mh->seq, &mh->inner_p->rcom); break; default: /* nothing to trace */ break; } } static void dlm_midcomms_commit_msg_3_2(struct dlm_mhandle *mh, const void *name, int namelen) { /* nexthdr chain for fast lookup */ mh->opts->o_nextcmd = mh->inner_p->header.h_cmd; mh->committed = true; dlm_midcomms_commit_msg_3_2_trace(mh, name, namelen); dlm_lowcomms_commit_msg(mh->msg); } /* avoid false positive for nodes_srcu, lock was happen in * dlm_midcomms_get_mhandle */ #ifndef __CHECKER__ void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh, const void *name, int namelen) { switch (mh->node->version) { case DLM_VERSION_3_1: srcu_read_unlock(&nodes_srcu, mh->idx); dlm_lowcomms_commit_msg(mh->msg); dlm_lowcomms_put_msg(mh->msg); /* mh is not part of rcu list in this case */ dlm_free_mhandle(mh); break; case DLM_VERSION_3_2: /* held rcu read lock here, because we sending the * dlm message out, when we do that we could receive * an ack back which releases the mhandle and we * get a use after free. */ rcu_read_lock(); dlm_midcomms_commit_msg_3_2(mh, name, namelen); srcu_read_unlock(&nodes_srcu, mh->idx); rcu_read_unlock(); break; default: srcu_read_unlock(&nodes_srcu, mh->idx); WARN_ON_ONCE(1); break; } } #endif int dlm_midcomms_start(void) { return dlm_lowcomms_start(); } void dlm_midcomms_stop(void) { dlm_lowcomms_stop(); } void dlm_midcomms_init(void) { int i; for (i = 0; i < CONN_HASH_SIZE; i++) INIT_HLIST_HEAD(&node_hash[i]); dlm_lowcomms_init(); } static void midcomms_node_release(struct rcu_head *rcu) { struct midcomms_node *node = container_of(rcu, struct midcomms_node, rcu); WARN_ON_ONCE(atomic_read(&node->send_queue_cnt)); dlm_send_queue_flush(node); kfree(node); } void dlm_midcomms_exit(void) { struct midcomms_node *node; int i, idx; idx = srcu_read_lock(&nodes_srcu); for (i = 0; i < CONN_HASH_SIZE; i++) { hlist_for_each_entry_rcu(node, &node_hash[i], hlist) { dlm_delete_debug_comms_file(node->debugfs); spin_lock(&nodes_lock); hlist_del_rcu(&node->hlist); spin_unlock(&nodes_lock); call_srcu(&nodes_srcu, &node->rcu, midcomms_node_release); } } srcu_read_unlock(&nodes_srcu, idx); dlm_lowcomms_exit(); } static void dlm_act_fin_ack_rcv(struct midcomms_node *node) { spin_lock(&node->state_lock); pr_debug("receive active fin ack from node %d with state %s\n", node->nodeid, dlm_state_str(node->state)); switch (node->state) { case DLM_FIN_WAIT1: node->state = DLM_FIN_WAIT2; pr_debug("switch node %d to state %s\n", node->nodeid, dlm_state_str(node->state)); break; case DLM_CLOSING: midcomms_node_reset(node); pr_debug("switch node %d to state %s\n", node->nodeid, dlm_state_str(node->state)); break; case DLM_CLOSED: /* not valid but somehow we got what we want */ wake_up(&node->shutdown_wait); break; default: spin_unlock(&node->state_lock); log_print("%s: unexpected state: %d", __func__, node->state); WARN_ON_ONCE(1); return; } spin_unlock(&node->state_lock); } void dlm_midcomms_add_member(int nodeid) { struct midcomms_node *node; int idx; idx = srcu_read_lock(&nodes_srcu); node = nodeid2node(nodeid); if (WARN_ON_ONCE(!node)) { srcu_read_unlock(&nodes_srcu, idx); return; } spin_lock(&node->state_lock); if (!node->users) { pr_debug("receive add member from node %d with state %s\n", node->nodeid, dlm_state_str(node->state)); switch (node->state) { case DLM_ESTABLISHED: break; case DLM_CLOSED: node->state = DLM_ESTABLISHED; pr_debug("switch node %d to state %s\n", node->nodeid, dlm_state_str(node->state)); break; default: /* some invalid state passive shutdown * was failed, we try to reset and * hope it will go on. */ log_print("reset node %d because shutdown stuck", node->nodeid); midcomms_node_reset(node); node->state = DLM_ESTABLISHED; break; } } node->users++; pr_debug("node %d users inc count %d\n", nodeid, node->users); spin_unlock(&node->state_lock); srcu_read_unlock(&nodes_srcu, idx); } void dlm_midcomms_remove_member(int nodeid) { struct midcomms_node *node; int idx; idx = srcu_read_lock(&nodes_srcu); node = nodeid2node(nodeid); if (WARN_ON_ONCE(!node)) { srcu_read_unlock(&nodes_srcu, idx); return; } spin_lock(&node->state_lock); node->users--; pr_debug("node %d users dec count %d\n", nodeid, node->users); /* hitting users count to zero means the * other side is running dlm_midcomms_stop() * we meet us to have a clean disconnect. */ if (node->users == 0) { pr_debug("receive remove member from node %d with state %s\n", node->nodeid, dlm_state_str(node->state)); switch (node->state) { case DLM_ESTABLISHED: break; case DLM_CLOSE_WAIT: /* passive shutdown DLM_LAST_ACK case 2 */ node->state = DLM_LAST_ACK; pr_debug("switch node %d to state %s case 2\n", node->nodeid, dlm_state_str(node->state)); set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags); dlm_send_fin(node, dlm_pas_fin_ack_rcv); break; case DLM_LAST_ACK: /* probably receive fin caught it, do nothing */ break; case DLM_CLOSED: /* already gone, do nothing */ break; default: log_print("%s: unexpected state: %d", __func__, node->state); break; } } spin_unlock(&node->state_lock); srcu_read_unlock(&nodes_srcu, idx); } void dlm_midcomms_version_wait(void) { struct midcomms_node *node; int i, idx, ret; idx = srcu_read_lock(&nodes_srcu); for (i = 0; i < CONN_HASH_SIZE; i++) { hlist_for_each_entry_rcu(node, &node_hash[i], hlist) { ret = wait_event_timeout(node->shutdown_wait, node->version != DLM_VERSION_NOT_SET || node->state == DLM_CLOSED || test_bit(DLM_NODE_FLAG_CLOSE, &node->flags), DLM_SHUTDOWN_TIMEOUT); if (!ret || test_bit(DLM_NODE_FLAG_CLOSE, &node->flags)) pr_debug("version wait timed out for node %d with state %s\n", node->nodeid, dlm_state_str(node->state)); } } srcu_read_unlock(&nodes_srcu, idx); } static void midcomms_shutdown(struct midcomms_node *node) { int ret; /* old protocol, we don't wait for pending operations */ switch (node->version) { case DLM_VERSION_3_2: break; default: return; } spin_lock(&node->state_lock); pr_debug("receive active shutdown for node %d with state %s\n", node->nodeid, dlm_state_str(node->state)); switch (node->state) { case DLM_ESTABLISHED: node->state = DLM_FIN_WAIT1; pr_debug("switch node %d to state %s case 2\n", node->nodeid, dlm_state_str(node->state)); dlm_send_fin(node, dlm_act_fin_ack_rcv); break; case DLM_CLOSED: /* we have what we want */ break; default: /* busy to enter DLM_FIN_WAIT1, wait until passive * done in shutdown_wait to enter DLM_CLOSED. */ break; } spin_unlock(&node->state_lock); if (DLM_DEBUG_FENCE_TERMINATION) msleep(5000); /* wait for other side dlm + fin */ ret = wait_event_timeout(node->shutdown_wait, node->state == DLM_CLOSED || test_bit(DLM_NODE_FLAG_CLOSE, &node->flags), DLM_SHUTDOWN_TIMEOUT); if (!ret) pr_debug("active shutdown timed out for node %d with state %s\n", node->nodeid, dlm_state_str(node->state)); else pr_debug("active shutdown done for node %d with state %s\n", node->nodeid, dlm_state_str(node->state)); } void dlm_midcomms_shutdown(void) { struct midcomms_node *node; int i, idx; mutex_lock(&close_lock); idx = srcu_read_lock(&nodes_srcu); for (i = 0; i < CONN_HASH_SIZE; i++) { hlist_for_each_entry_rcu(node, &node_hash[i], hlist) { midcomms_shutdown(node); } } srcu_read_unlock(&nodes_srcu, idx); mutex_unlock(&close_lock); dlm_lowcomms_shutdown(); } int dlm_midcomms_close(int nodeid) { struct midcomms_node *node; int idx, ret; idx = srcu_read_lock(&nodes_srcu); /* Abort pending close/remove operation */ node = nodeid2node(nodeid); if (node) { /* let shutdown waiters leave */ set_bit(DLM_NODE_FLAG_CLOSE, &node->flags); wake_up(&node->shutdown_wait); } srcu_read_unlock(&nodes_srcu, idx); synchronize_srcu(&nodes_srcu); mutex_lock(&close_lock); idx = srcu_read_lock(&nodes_srcu); node = nodeid2node(nodeid); if (!node) { srcu_read_unlock(&nodes_srcu, idx); mutex_unlock(&close_lock); return dlm_lowcomms_close(nodeid); } ret = dlm_lowcomms_close(nodeid); dlm_delete_debug_comms_file(node->debugfs); spin_lock(&nodes_lock); hlist_del_rcu(&node->hlist); spin_unlock(&nodes_lock); srcu_read_unlock(&nodes_srcu, idx); /* wait that all readers left until flush send queue */ synchronize_srcu(&nodes_srcu); /* drop all pending dlm messages, this is fine as * this function get called when the node is fenced */ dlm_send_queue_flush(node); call_srcu(&nodes_srcu, &node->rcu, midcomms_node_release); mutex_unlock(&close_lock); return ret; } /* debug functionality to send raw dlm msg from user space */ struct dlm_rawmsg_data { struct midcomms_node *node; void *buf; }; static void midcomms_new_rawmsg_cb(void *data) { struct dlm_rawmsg_data *rd = data; struct dlm_header *h = rd->buf; switch (h->h_version) { case cpu_to_le32(DLM_VERSION_3_1): break; default: switch (h->h_cmd) { case DLM_OPTS: if (!h->u.h_seq) h->u.h_seq = cpu_to_le32(atomic_fetch_inc(&rd->node->seq_send)); break; default: break; } break; } } int dlm_midcomms_rawmsg_send(struct midcomms_node *node, void *buf, int buflen) { struct dlm_rawmsg_data rd; struct dlm_msg *msg; char *msgbuf; rd.node = node; rd.buf = buf; msg = dlm_lowcomms_new_msg(node->nodeid, buflen, GFP_NOFS, &msgbuf, midcomms_new_rawmsg_cb, &rd); if (!msg) return -ENOMEM; memcpy(msgbuf, buf, buflen); dlm_lowcomms_commit_msg(msg); return 0; }
linux-master
fs/dlm/midcomms.c
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (C) 2011 Novell Inc. */ #include <uapi/linux/magic.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/xattr.h> #include <linux/mount.h> #include <linux/parser.h> #include <linux/module.h> #include <linux/statfs.h> #include <linux/seq_file.h> #include <linux/posix_acl_xattr.h> #include <linux/exportfs.h> #include <linux/file.h> #include <linux/fs_context.h> #include <linux/fs_parser.h> #include "overlayfs.h" #include "params.h" MODULE_AUTHOR("Miklos Szeredi <[email protected]>"); MODULE_DESCRIPTION("Overlay filesystem"); MODULE_LICENSE("GPL"); struct ovl_dir_cache; static struct dentry *ovl_d_real(struct dentry *dentry, const struct inode *inode) { struct dentry *real = NULL, *lower; int err; /* It's an overlay file */ if (inode && d_inode(dentry) == inode) return dentry; if (!d_is_reg(dentry)) { if (!inode || inode == d_inode(dentry)) return dentry; goto bug; } real = ovl_dentry_upper(dentry); if (real && (inode == d_inode(real))) return real; if (real && !inode && ovl_has_upperdata(d_inode(dentry))) return real; /* * Best effort lazy lookup of lowerdata for !inode case to return * the real lowerdata dentry. The only current caller of d_real() with * NULL inode is d_real_inode() from trace_uprobe and this caller is * likely going to be followed reading from the file, before placing * uprobes on offset within the file, so lowerdata should be available * when setting the uprobe. */ err = ovl_verify_lowerdata(dentry); if (err) goto bug; lower = ovl_dentry_lowerdata(dentry); if (!lower) goto bug; real = lower; /* Handle recursion */ real = d_real(real, inode); if (!inode || inode == d_inode(real)) return real; bug: WARN(1, "%s(%pd4, %s:%lu): real dentry (%p/%lu) not found\n", __func__, dentry, inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0, real, real && d_inode(real) ? d_inode(real)->i_ino : 0); return dentry; } static int ovl_revalidate_real(struct dentry *d, unsigned int flags, bool weak) { int ret = 1; if (!d) return 1; if (weak) { if (d->d_flags & DCACHE_OP_WEAK_REVALIDATE) ret = d->d_op->d_weak_revalidate(d, flags); } else if (d->d_flags & DCACHE_OP_REVALIDATE) { ret = d->d_op->d_revalidate(d, flags); if (!ret) { if (!(flags & LOOKUP_RCU)) d_invalidate(d); ret = -ESTALE; } } return ret; } static int ovl_dentry_revalidate_common(struct dentry *dentry, unsigned int flags, bool weak) { struct ovl_entry *oe = OVL_E(dentry); struct ovl_path *lowerstack = ovl_lowerstack(oe); struct inode *inode = d_inode_rcu(dentry); struct dentry *upper; unsigned int i; int ret = 1; /* Careful in RCU mode */ if (!inode) return -ECHILD; upper = ovl_i_dentry_upper(inode); if (upper) ret = ovl_revalidate_real(upper, flags, weak); for (i = 0; ret > 0 && i < ovl_numlower(oe); i++) ret = ovl_revalidate_real(lowerstack[i].dentry, flags, weak); return ret; } static int ovl_dentry_revalidate(struct dentry *dentry, unsigned int flags) { return ovl_dentry_revalidate_common(dentry, flags, false); } static int ovl_dentry_weak_revalidate(struct dentry *dentry, unsigned int flags) { return ovl_dentry_revalidate_common(dentry, flags, true); } static const struct dentry_operations ovl_dentry_operations = { .d_real = ovl_d_real, .d_revalidate = ovl_dentry_revalidate, .d_weak_revalidate = ovl_dentry_weak_revalidate, }; static struct kmem_cache *ovl_inode_cachep; static struct inode *ovl_alloc_inode(struct super_block *sb) { struct ovl_inode *oi = alloc_inode_sb(sb, ovl_inode_cachep, GFP_KERNEL); if (!oi) return NULL; oi->cache = NULL; oi->redirect = NULL; oi->version = 0; oi->flags = 0; oi->__upperdentry = NULL; oi->lowerdata_redirect = NULL; oi->oe = NULL; mutex_init(&oi->lock); return &oi->vfs_inode; } static void ovl_free_inode(struct inode *inode) { struct ovl_inode *oi = OVL_I(inode); kfree(oi->redirect); mutex_destroy(&oi->lock); kmem_cache_free(ovl_inode_cachep, oi); } static void ovl_destroy_inode(struct inode *inode) { struct ovl_inode *oi = OVL_I(inode); dput(oi->__upperdentry); ovl_free_entry(oi->oe); if (S_ISDIR(inode->i_mode)) ovl_dir_cache_free(inode); else kfree(oi->lowerdata_redirect); } static void ovl_put_super(struct super_block *sb) { struct ovl_fs *ofs = OVL_FS(sb); if (ofs) ovl_free_fs(ofs); } /* Sync real dirty inodes in upper filesystem (if it exists) */ static int ovl_sync_fs(struct super_block *sb, int wait) { struct ovl_fs *ofs = OVL_FS(sb); struct super_block *upper_sb; int ret; ret = ovl_sync_status(ofs); /* * We have to always set the err, because the return value isn't * checked in syncfs, and instead indirectly return an error via * the sb's writeback errseq, which VFS inspects after this call. */ if (ret < 0) { errseq_set(&sb->s_wb_err, -EIO); return -EIO; } if (!ret) return ret; /* * Not called for sync(2) call or an emergency sync (SB_I_SKIP_SYNC). * All the super blocks will be iterated, including upper_sb. * * If this is a syncfs(2) call, then we do need to call * sync_filesystem() on upper_sb, but enough if we do it when being * called with wait == 1. */ if (!wait) return 0; upper_sb = ovl_upper_mnt(ofs)->mnt_sb; down_read(&upper_sb->s_umount); ret = sync_filesystem(upper_sb); up_read(&upper_sb->s_umount); return ret; } /** * ovl_statfs * @dentry: The dentry to query * @buf: The struct kstatfs to fill in with stats * * Get the filesystem statistics. As writes always target the upper layer * filesystem pass the statfs to the upper filesystem (if it exists) */ static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct ovl_fs *ofs = OVL_FS(sb); struct dentry *root_dentry = sb->s_root; struct path path; int err; ovl_path_real(root_dentry, &path); err = vfs_statfs(&path, buf); if (!err) { buf->f_namelen = ofs->namelen; buf->f_type = OVERLAYFS_SUPER_MAGIC; if (ovl_has_fsid(ofs)) buf->f_fsid = uuid_to_fsid(sb->s_uuid.b); } return err; } static const struct super_operations ovl_super_operations = { .alloc_inode = ovl_alloc_inode, .free_inode = ovl_free_inode, .destroy_inode = ovl_destroy_inode, .drop_inode = generic_delete_inode, .put_super = ovl_put_super, .sync_fs = ovl_sync_fs, .statfs = ovl_statfs, .show_options = ovl_show_options, }; #define OVL_WORKDIR_NAME "work" #define OVL_INDEXDIR_NAME "index" static struct dentry *ovl_workdir_create(struct ovl_fs *ofs, const char *name, bool persist) { struct inode *dir = ofs->workbasedir->d_inode; struct vfsmount *mnt = ovl_upper_mnt(ofs); struct dentry *work; int err; bool retried = false; inode_lock_nested(dir, I_MUTEX_PARENT); retry: work = ovl_lookup_upper(ofs, name, ofs->workbasedir, strlen(name)); if (!IS_ERR(work)) { struct iattr attr = { .ia_valid = ATTR_MODE, .ia_mode = S_IFDIR | 0, }; if (work->d_inode) { err = -EEXIST; if (retried) goto out_dput; if (persist) goto out_unlock; retried = true; err = ovl_workdir_cleanup(ofs, dir, mnt, work, 0); dput(work); if (err == -EINVAL) { work = ERR_PTR(err); goto out_unlock; } goto retry; } err = ovl_mkdir_real(ofs, dir, &work, attr.ia_mode); if (err) goto out_dput; /* Weird filesystem returning with hashed negative (kernfs)? */ err = -EINVAL; if (d_really_is_negative(work)) goto out_dput; /* * Try to remove POSIX ACL xattrs from workdir. We are good if: * * a) success (there was a POSIX ACL xattr and was removed) * b) -ENODATA (there was no POSIX ACL xattr) * c) -EOPNOTSUPP (POSIX ACL xattrs are not supported) * * There are various other error values that could effectively * mean that the xattr doesn't exist (e.g. -ERANGE is returned * if the xattr name is too long), but the set of filesystems * allowed as upper are limited to "normal" ones, where checking * for the above two errors is sufficient. */ err = ovl_do_remove_acl(ofs, work, XATTR_NAME_POSIX_ACL_DEFAULT); if (err && err != -ENODATA && err != -EOPNOTSUPP) goto out_dput; err = ovl_do_remove_acl(ofs, work, XATTR_NAME_POSIX_ACL_ACCESS); if (err && err != -ENODATA && err != -EOPNOTSUPP) goto out_dput; /* Clear any inherited mode bits */ inode_lock(work->d_inode); err = ovl_do_notify_change(ofs, work, &attr); inode_unlock(work->d_inode); if (err) goto out_dput; } else { err = PTR_ERR(work); goto out_err; } out_unlock: inode_unlock(dir); return work; out_dput: dput(work); out_err: pr_warn("failed to create directory %s/%s (errno: %i); mounting read-only\n", ofs->config.workdir, name, -err); work = NULL; goto out_unlock; } static int ovl_check_namelen(const struct path *path, struct ovl_fs *ofs, const char *name) { struct kstatfs statfs; int err = vfs_statfs(path, &statfs); if (err) pr_err("statfs failed on '%s'\n", name); else ofs->namelen = max(ofs->namelen, statfs.f_namelen); return err; } static int ovl_lower_dir(const char *name, struct path *path, struct ovl_fs *ofs, int *stack_depth) { int fh_type; int err; err = ovl_check_namelen(path, ofs, name); if (err) return err; *stack_depth = max(*stack_depth, path->mnt->mnt_sb->s_stack_depth); /* * The inodes index feature and NFS export need to encode and decode * file handles, so they require that all layers support them. */ fh_type = ovl_can_decode_fh(path->dentry->d_sb); if ((ofs->config.nfs_export || (ofs->config.index && ofs->config.upperdir)) && !fh_type) { ofs->config.index = false; ofs->config.nfs_export = false; pr_warn("fs on '%s' does not support file handles, falling back to index=off,nfs_export=off.\n", name); } ofs->nofh |= !fh_type; /* * Decoding origin file handle is required for persistent st_ino. * Without persistent st_ino, xino=auto falls back to xino=off. */ if (ofs->config.xino == OVL_XINO_AUTO && ofs->config.upperdir && !fh_type) { ofs->config.xino = OVL_XINO_OFF; pr_warn("fs on '%s' does not support file handles, falling back to xino=off.\n", name); } /* Check if lower fs has 32bit inode numbers */ if (fh_type != FILEID_INO32_GEN) ofs->xino_mode = -1; return 0; } /* Workdir should not be subdir of upperdir and vice versa */ static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir) { bool ok = false; if (workdir != upperdir) { ok = (lock_rename(workdir, upperdir) == NULL); unlock_rename(workdir, upperdir); } return ok; } static int ovl_own_xattr_get(const struct xattr_handler *handler, struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size) { return -EOPNOTSUPP; } static int ovl_own_xattr_set(const struct xattr_handler *handler, struct mnt_idmap *idmap, struct dentry *dentry, struct inode *inode, const char *name, const void *value, size_t size, int flags) { return -EOPNOTSUPP; } static int ovl_other_xattr_get(const struct xattr_handler *handler, struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size) { return ovl_xattr_get(dentry, inode, name, buffer, size); } static int ovl_other_xattr_set(const struct xattr_handler *handler, struct mnt_idmap *idmap, struct dentry *dentry, struct inode *inode, const char *name, const void *value, size_t size, int flags) { return ovl_xattr_set(dentry, inode, name, value, size, flags); } static const struct xattr_handler ovl_own_trusted_xattr_handler = { .prefix = OVL_XATTR_TRUSTED_PREFIX, .get = ovl_own_xattr_get, .set = ovl_own_xattr_set, }; static const struct xattr_handler ovl_own_user_xattr_handler = { .prefix = OVL_XATTR_USER_PREFIX, .get = ovl_own_xattr_get, .set = ovl_own_xattr_set, }; static const struct xattr_handler ovl_other_xattr_handler = { .prefix = "", /* catch all */ .get = ovl_other_xattr_get, .set = ovl_other_xattr_set, }; static const struct xattr_handler *ovl_trusted_xattr_handlers[] = { &ovl_own_trusted_xattr_handler, &ovl_other_xattr_handler, NULL }; static const struct xattr_handler *ovl_user_xattr_handlers[] = { &ovl_own_user_xattr_handler, &ovl_other_xattr_handler, NULL }; static int ovl_setup_trap(struct super_block *sb, struct dentry *dir, struct inode **ptrap, const char *name) { struct inode *trap; int err; trap = ovl_get_trap_inode(sb, dir); err = PTR_ERR_OR_ZERO(trap); if (err) { if (err == -ELOOP) pr_err("conflicting %s path\n", name); return err; } *ptrap = trap; return 0; } /* * Determine how we treat concurrent use of upperdir/workdir based on the * index feature. This is papering over mount leaks of container runtimes, * for example, an old overlay mount is leaked and now its upperdir is * attempted to be used as a lower layer in a new overlay mount. */ static int ovl_report_in_use(struct ovl_fs *ofs, const char *name) { if (ofs->config.index) { pr_err("%s is in-use as upperdir/workdir of another mount, mount with '-o index=off' to override exclusive upperdir protection.\n", name); return -EBUSY; } else { pr_warn("%s is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.\n", name); return 0; } } static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs, struct ovl_layer *upper_layer, const struct path *upperpath) { struct vfsmount *upper_mnt; int err; /* Upperdir path should not be r/o */ if (__mnt_is_readonly(upperpath->mnt)) { pr_err("upper fs is r/o, try multi-lower layers mount\n"); err = -EINVAL; goto out; } err = ovl_check_namelen(upperpath, ofs, ofs->config.upperdir); if (err) goto out; err = ovl_setup_trap(sb, upperpath->dentry, &upper_layer->trap, "upperdir"); if (err) goto out; upper_mnt = clone_private_mount(upperpath); err = PTR_ERR(upper_mnt); if (IS_ERR(upper_mnt)) { pr_err("failed to clone upperpath\n"); goto out; } /* Don't inherit atime flags */ upper_mnt->mnt_flags &= ~(MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME); upper_layer->mnt = upper_mnt; upper_layer->idx = 0; upper_layer->fsid = 0; err = -ENOMEM; upper_layer->name = kstrdup(ofs->config.upperdir, GFP_KERNEL); if (!upper_layer->name) goto out; /* * Inherit SB_NOSEC flag from upperdir. * * This optimization changes behavior when a security related attribute * (suid/sgid/security.*) is changed on an underlying layer. This is * okay because we don't yet have guarantees in that case, but it will * need careful treatment once we want to honour changes to underlying * filesystems. */ if (upper_mnt->mnt_sb->s_flags & SB_NOSEC) sb->s_flags |= SB_NOSEC; if (ovl_inuse_trylock(ovl_upper_mnt(ofs)->mnt_root)) { ofs->upperdir_locked = true; } else { err = ovl_report_in_use(ofs, "upperdir"); if (err) goto out; } err = 0; out: return err; } /* * Returns 1 if RENAME_WHITEOUT is supported, 0 if not supported and * negative values if error is encountered. */ static int ovl_check_rename_whiteout(struct ovl_fs *ofs) { struct dentry *workdir = ofs->workdir; struct inode *dir = d_inode(workdir); struct dentry *temp; struct dentry *dest; struct dentry *whiteout; struct name_snapshot name; int err; inode_lock_nested(dir, I_MUTEX_PARENT); temp = ovl_create_temp(ofs, workdir, OVL_CATTR(S_IFREG | 0)); err = PTR_ERR(temp); if (IS_ERR(temp)) goto out_unlock; dest = ovl_lookup_temp(ofs, workdir); err = PTR_ERR(dest); if (IS_ERR(dest)) { dput(temp); goto out_unlock; } /* Name is inline and stable - using snapshot as a copy helper */ take_dentry_name_snapshot(&name, temp); err = ovl_do_rename(ofs, dir, temp, dir, dest, RENAME_WHITEOUT); if (err) { if (err == -EINVAL) err = 0; goto cleanup_temp; } whiteout = ovl_lookup_upper(ofs, name.name.name, workdir, name.name.len); err = PTR_ERR(whiteout); if (IS_ERR(whiteout)) goto cleanup_temp; err = ovl_is_whiteout(whiteout); /* Best effort cleanup of whiteout and temp file */ if (err) ovl_cleanup(ofs, dir, whiteout); dput(whiteout); cleanup_temp: ovl_cleanup(ofs, dir, temp); release_dentry_name_snapshot(&name); dput(temp); dput(dest); out_unlock: inode_unlock(dir); return err; } static struct dentry *ovl_lookup_or_create(struct ovl_fs *ofs, struct dentry *parent, const char *name, umode_t mode) { size_t len = strlen(name); struct dentry *child; inode_lock_nested(parent->d_inode, I_MUTEX_PARENT); child = ovl_lookup_upper(ofs, name, parent, len); if (!IS_ERR(child) && !child->d_inode) child = ovl_create_real(ofs, parent->d_inode, child, OVL_CATTR(mode)); inode_unlock(parent->d_inode); dput(parent); return child; } /* * Creates $workdir/work/incompat/volatile/dirty file if it is not already * present. */ static int ovl_create_volatile_dirty(struct ovl_fs *ofs) { unsigned int ctr; struct dentry *d = dget(ofs->workbasedir); static const char *const volatile_path[] = { OVL_WORKDIR_NAME, "incompat", "volatile", "dirty" }; const char *const *name = volatile_path; for (ctr = ARRAY_SIZE(volatile_path); ctr; ctr--, name++) { d = ovl_lookup_or_create(ofs, d, *name, ctr > 1 ? S_IFDIR : S_IFREG); if (IS_ERR(d)) return PTR_ERR(d); } dput(d); return 0; } static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs, const struct path *workpath) { struct vfsmount *mnt = ovl_upper_mnt(ofs); struct dentry *workdir; struct file *tmpfile; bool rename_whiteout; bool d_type; int fh_type; int err; err = mnt_want_write(mnt); if (err) return err; workdir = ovl_workdir_create(ofs, OVL_WORKDIR_NAME, false); err = PTR_ERR(workdir); if (IS_ERR_OR_NULL(workdir)) goto out; ofs->workdir = workdir; err = ovl_setup_trap(sb, ofs->workdir, &ofs->workdir_trap, "workdir"); if (err) goto out; /* * Upper should support d_type, else whiteouts are visible. Given * workdir and upper are on same fs, we can do iterate_dir() on * workdir. This check requires successful creation of workdir in * previous step. */ err = ovl_check_d_type_supported(workpath); if (err < 0) goto out; d_type = err; if (!d_type) pr_warn("upper fs needs to support d_type.\n"); /* Check if upper/work fs supports O_TMPFILE */ tmpfile = ovl_do_tmpfile(ofs, ofs->workdir, S_IFREG | 0); ofs->tmpfile = !IS_ERR(tmpfile); if (ofs->tmpfile) fput(tmpfile); else pr_warn("upper fs does not support tmpfile.\n"); /* Check if upper/work fs supports RENAME_WHITEOUT */ err = ovl_check_rename_whiteout(ofs); if (err < 0) goto out; rename_whiteout = err; if (!rename_whiteout) pr_warn("upper fs does not support RENAME_WHITEOUT.\n"); /* * Check if upper/work fs supports (trusted|user).overlay.* xattr */ err = ovl_setxattr(ofs, ofs->workdir, OVL_XATTR_OPAQUE, "0", 1); if (err) { pr_warn("failed to set xattr on upper\n"); ofs->noxattr = true; if (ovl_redirect_follow(ofs)) { ofs->config.redirect_mode = OVL_REDIRECT_NOFOLLOW; pr_warn("...falling back to redirect_dir=nofollow.\n"); } if (ofs->config.metacopy) { ofs->config.metacopy = false; pr_warn("...falling back to metacopy=off.\n"); } if (ofs->config.index) { ofs->config.index = false; pr_warn("...falling back to index=off.\n"); } if (ovl_has_fsid(ofs)) { ofs->config.uuid = OVL_UUID_NULL; pr_warn("...falling back to uuid=null.\n"); } /* * xattr support is required for persistent st_ino. * Without persistent st_ino, xino=auto falls back to xino=off. */ if (ofs->config.xino == OVL_XINO_AUTO) { ofs->config.xino = OVL_XINO_OFF; pr_warn("...falling back to xino=off.\n"); } if (err == -EPERM && !ofs->config.userxattr) pr_info("try mounting with 'userxattr' option\n"); err = 0; } else { ovl_removexattr(ofs, ofs->workdir, OVL_XATTR_OPAQUE); } /* * We allowed sub-optimal upper fs configuration and don't want to break * users over kernel upgrade, but we never allowed remote upper fs, so * we can enforce strict requirements for remote upper fs. */ if (ovl_dentry_remote(ofs->workdir) && (!d_type || !rename_whiteout || ofs->noxattr)) { pr_err("upper fs missing required features.\n"); err = -EINVAL; goto out; } /* * For volatile mount, create a incompat/volatile/dirty file to keep * track of it. */ if (ofs->config.ovl_volatile) { err = ovl_create_volatile_dirty(ofs); if (err < 0) { pr_err("Failed to create volatile/dirty file.\n"); goto out; } } /* Check if upper/work fs supports file handles */ fh_type = ovl_can_decode_fh(ofs->workdir->d_sb); if (ofs->config.index && !fh_type) { ofs->config.index = false; pr_warn("upper fs does not support file handles, falling back to index=off.\n"); } ofs->nofh |= !fh_type; /* Check if upper fs has 32bit inode numbers */ if (fh_type != FILEID_INO32_GEN) ofs->xino_mode = -1; /* NFS export of r/w mount depends on index */ if (ofs->config.nfs_export && !ofs->config.index) { pr_warn("NFS export requires \"index=on\", falling back to nfs_export=off.\n"); ofs->config.nfs_export = false; } out: mnt_drop_write(mnt); return err; } static int ovl_get_workdir(struct super_block *sb, struct ovl_fs *ofs, const struct path *upperpath, const struct path *workpath) { int err; err = -EINVAL; if (upperpath->mnt != workpath->mnt) { pr_err("workdir and upperdir must reside under the same mount\n"); return err; } if (!ovl_workdir_ok(workpath->dentry, upperpath->dentry)) { pr_err("workdir and upperdir must be separate subtrees\n"); return err; } ofs->workbasedir = dget(workpath->dentry); if (ovl_inuse_trylock(ofs->workbasedir)) { ofs->workdir_locked = true; } else { err = ovl_report_in_use(ofs, "workdir"); if (err) return err; } err = ovl_setup_trap(sb, ofs->workbasedir, &ofs->workbasedir_trap, "workdir"); if (err) return err; return ovl_make_workdir(sb, ofs, workpath); } static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs, struct ovl_entry *oe, const struct path *upperpath) { struct vfsmount *mnt = ovl_upper_mnt(ofs); struct dentry *indexdir; int err; err = mnt_want_write(mnt); if (err) return err; /* Verify lower root is upper root origin */ err = ovl_verify_origin(ofs, upperpath->dentry, ovl_lowerstack(oe)->dentry, true); if (err) { pr_err("failed to verify upper root origin\n"); goto out; } /* index dir will act also as workdir */ iput(ofs->workdir_trap); ofs->workdir_trap = NULL; dput(ofs->workdir); ofs->workdir = NULL; indexdir = ovl_workdir_create(ofs, OVL_INDEXDIR_NAME, true); if (IS_ERR(indexdir)) { err = PTR_ERR(indexdir); } else if (indexdir) { ofs->indexdir = indexdir; ofs->workdir = dget(indexdir); err = ovl_setup_trap(sb, ofs->indexdir, &ofs->indexdir_trap, "indexdir"); if (err) goto out; /* * Verify upper root is exclusively associated with index dir. * Older kernels stored upper fh in ".overlay.origin" * xattr. If that xattr exists, verify that it is a match to * upper dir file handle. In any case, verify or set xattr * ".overlay.upper" to indicate that index may have * directory entries. */ if (ovl_check_origin_xattr(ofs, ofs->indexdir)) { err = ovl_verify_set_fh(ofs, ofs->indexdir, OVL_XATTR_ORIGIN, upperpath->dentry, true, false); if (err) pr_err("failed to verify index dir 'origin' xattr\n"); } err = ovl_verify_upper(ofs, ofs->indexdir, upperpath->dentry, true); if (err) pr_err("failed to verify index dir 'upper' xattr\n"); /* Cleanup bad/stale/orphan index entries */ if (!err) err = ovl_indexdir_cleanup(ofs); } if (err || !ofs->indexdir) pr_warn("try deleting index dir or mounting with '-o index=off' to disable inodes index.\n"); out: mnt_drop_write(mnt); return err; } static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid) { unsigned int i; if (!ofs->config.nfs_export && !ovl_upper_mnt(ofs)) return true; /* * We allow using single lower with null uuid for index and nfs_export * for example to support those features with single lower squashfs. * To avoid regressions in setups of overlay with re-formatted lower * squashfs, do not allow decoding origin with lower null uuid unless * user opted-in to one of the new features that require following the * lower inode of non-dir upper. */ if (ovl_allow_offline_changes(ofs) && uuid_is_null(uuid)) return false; for (i = 0; i < ofs->numfs; i++) { /* * We use uuid to associate an overlay lower file handle with a * lower layer, so we can accept lower fs with null uuid as long * as all lower layers with null uuid are on the same fs. * if we detect multiple lower fs with the same uuid, we * disable lower file handle decoding on all of them. */ if (ofs->fs[i].is_lower && uuid_equal(&ofs->fs[i].sb->s_uuid, uuid)) { ofs->fs[i].bad_uuid = true; return false; } } return true; } /* Get a unique fsid for the layer */ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path) { struct super_block *sb = path->mnt->mnt_sb; unsigned int i; dev_t dev; int err; bool bad_uuid = false; bool warn = false; for (i = 0; i < ofs->numfs; i++) { if (ofs->fs[i].sb == sb) return i; } if (!ovl_lower_uuid_ok(ofs, &sb->s_uuid)) { bad_uuid = true; if (ofs->config.xino == OVL_XINO_AUTO) { ofs->config.xino = OVL_XINO_OFF; warn = true; } if (ofs->config.index || ofs->config.nfs_export) { ofs->config.index = false; ofs->config.nfs_export = false; warn = true; } if (warn) { pr_warn("%s uuid detected in lower fs '%pd2', falling back to xino=%s,index=off,nfs_export=off.\n", uuid_is_null(&sb->s_uuid) ? "null" : "conflicting", path->dentry, ovl_xino_mode(&ofs->config)); } } err = get_anon_bdev(&dev); if (err) { pr_err("failed to get anonymous bdev for lowerpath\n"); return err; } ofs->fs[ofs->numfs].sb = sb; ofs->fs[ofs->numfs].pseudo_dev = dev; ofs->fs[ofs->numfs].bad_uuid = bad_uuid; return ofs->numfs++; } /* * The fsid after the last lower fsid is used for the data layers. * It is a "null fs" with a null sb, null uuid, and no pseudo dev. */ static int ovl_get_data_fsid(struct ovl_fs *ofs) { return ofs->numfs; } static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs, struct ovl_fs_context *ctx, struct ovl_layer *layers) { int err; unsigned int i; size_t nr_merged_lower; ofs->fs = kcalloc(ctx->nr + 2, sizeof(struct ovl_sb), GFP_KERNEL); if (ofs->fs == NULL) return -ENOMEM; /* * idx/fsid 0 are reserved for upper fs even with lower only overlay * and the last fsid is reserved for "null fs" of the data layers. */ ofs->numfs++; /* * All lower layers that share the same fs as upper layer, use the same * pseudo_dev as upper layer. Allocate fs[0].pseudo_dev even for lower * only overlay to simplify ovl_fs_free(). * is_lower will be set if upper fs is shared with a lower layer. */ err = get_anon_bdev(&ofs->fs[0].pseudo_dev); if (err) { pr_err("failed to get anonymous bdev for upper fs\n"); return err; } if (ovl_upper_mnt(ofs)) { ofs->fs[0].sb = ovl_upper_mnt(ofs)->mnt_sb; ofs->fs[0].is_lower = false; } nr_merged_lower = ctx->nr - ctx->nr_data; for (i = 0; i < ctx->nr; i++) { struct ovl_fs_context_layer *l = &ctx->lower[i]; struct vfsmount *mnt; struct inode *trap; int fsid; if (i < nr_merged_lower) fsid = ovl_get_fsid(ofs, &l->path); else fsid = ovl_get_data_fsid(ofs); if (fsid < 0) return fsid; /* * Check if lower root conflicts with this overlay layers before * checking if it is in-use as upperdir/workdir of "another" * mount, because we do not bother to check in ovl_is_inuse() if * the upperdir/workdir is in fact in-use by our * upperdir/workdir. */ err = ovl_setup_trap(sb, l->path.dentry, &trap, "lowerdir"); if (err) return err; if (ovl_is_inuse(l->path.dentry)) { err = ovl_report_in_use(ofs, "lowerdir"); if (err) { iput(trap); return err; } } mnt = clone_private_mount(&l->path); err = PTR_ERR(mnt); if (IS_ERR(mnt)) { pr_err("failed to clone lowerpath\n"); iput(trap); return err; } /* * Make lower layers R/O. That way fchmod/fchown on lower file * will fail instead of modifying lower fs. */ mnt->mnt_flags |= MNT_READONLY | MNT_NOATIME; layers[ofs->numlayer].trap = trap; layers[ofs->numlayer].mnt = mnt; layers[ofs->numlayer].idx = ofs->numlayer; layers[ofs->numlayer].fsid = fsid; layers[ofs->numlayer].fs = &ofs->fs[fsid]; layers[ofs->numlayer].name = l->name; l->name = NULL; ofs->numlayer++; ofs->fs[fsid].is_lower = true; } /* * When all layers on same fs, overlay can use real inode numbers. * With mount option "xino=<on|auto>", mounter declares that there are * enough free high bits in underlying fs to hold the unique fsid. * If overlayfs does encounter underlying inodes using the high xino * bits reserved for fsid, it emits a warning and uses the original * inode number or a non persistent inode number allocated from a * dedicated range. */ if (ofs->numfs - !ovl_upper_mnt(ofs) == 1) { if (ofs->config.xino == OVL_XINO_ON) pr_info("\"xino=on\" is useless with all layers on same fs, ignore.\n"); ofs->xino_mode = 0; } else if (ofs->config.xino == OVL_XINO_OFF) { ofs->xino_mode = -1; } else if (ofs->xino_mode < 0) { /* * This is a roundup of number of bits needed for encoding * fsid, where fsid 0 is reserved for upper fs (even with * lower only overlay) +1 extra bit is reserved for the non * persistent inode number range that is used for resolving * xino lower bits overflow. */ BUILD_BUG_ON(ilog2(OVL_MAX_STACK) > 30); ofs->xino_mode = ilog2(ofs->numfs - 1) + 2; } if (ofs->xino_mode > 0) { pr_info("\"xino\" feature enabled using %d upper inode bits.\n", ofs->xino_mode); } return 0; } static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb, struct ovl_fs_context *ctx, struct ovl_fs *ofs, struct ovl_layer *layers) { int err; unsigned int i; size_t nr_merged_lower; struct ovl_entry *oe; struct ovl_path *lowerstack; struct ovl_fs_context_layer *l; if (!ofs->config.upperdir && ctx->nr == 1) { pr_err("at least 2 lowerdir are needed while upperdir nonexistent\n"); return ERR_PTR(-EINVAL); } err = -EINVAL; for (i = 0; i < ctx->nr; i++) { l = &ctx->lower[i]; err = ovl_lower_dir(l->name, &l->path, ofs, &sb->s_stack_depth); if (err) return ERR_PTR(err); } err = -EINVAL; sb->s_stack_depth++; if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) { pr_err("maximum fs stacking depth exceeded\n"); return ERR_PTR(err); } err = ovl_get_layers(sb, ofs, ctx, layers); if (err) return ERR_PTR(err); err = -ENOMEM; /* Data-only layers are not merged in root directory */ nr_merged_lower = ctx->nr - ctx->nr_data; oe = ovl_alloc_entry(nr_merged_lower); if (!oe) return ERR_PTR(err); lowerstack = ovl_lowerstack(oe); for (i = 0; i < nr_merged_lower; i++) { l = &ctx->lower[i]; lowerstack[i].dentry = dget(l->path.dentry); lowerstack[i].layer = &ofs->layers[i + 1]; } ofs->numdatalayer = ctx->nr_data; return oe; } /* * Check if this layer root is a descendant of: * - another layer of this overlayfs instance * - upper/work dir of any overlayfs instance */ static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs, struct dentry *dentry, const char *name, bool is_lower) { struct dentry *next = dentry, *parent; int err = 0; if (!dentry) return 0; parent = dget_parent(next); /* Walk back ancestors to root (inclusive) looking for traps */ while (!err && parent != next) { if (is_lower && ovl_lookup_trap_inode(sb, parent)) { err = -ELOOP; pr_err("overlapping %s path\n", name); } else if (ovl_is_inuse(parent)) { err = ovl_report_in_use(ofs, name); } next = parent; parent = dget_parent(next); dput(next); } dput(parent); return err; } /* * Check if any of the layers or work dirs overlap. */ static int ovl_check_overlapping_layers(struct super_block *sb, struct ovl_fs *ofs) { int i, err; if (ovl_upper_mnt(ofs)) { err = ovl_check_layer(sb, ofs, ovl_upper_mnt(ofs)->mnt_root, "upperdir", false); if (err) return err; /* * Checking workbasedir avoids hitting ovl_is_inuse(parent) of * this instance and covers overlapping work and index dirs, * unless work or index dir have been moved since created inside * workbasedir. In that case, we already have their traps in * inode cache and we will catch that case on lookup. */ err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir", false); if (err) return err; } for (i = 1; i < ofs->numlayer; i++) { err = ovl_check_layer(sb, ofs, ofs->layers[i].mnt->mnt_root, "lowerdir", true); if (err) return err; } return 0; } static struct dentry *ovl_get_root(struct super_block *sb, struct dentry *upperdentry, struct ovl_entry *oe) { struct dentry *root; struct ovl_path *lowerpath = ovl_lowerstack(oe); unsigned long ino = d_inode(lowerpath->dentry)->i_ino; int fsid = lowerpath->layer->fsid; struct ovl_inode_params oip = { .upperdentry = upperdentry, .oe = oe, }; root = d_make_root(ovl_new_inode(sb, S_IFDIR, 0)); if (!root) return NULL; if (upperdentry) { /* Root inode uses upper st_ino/i_ino */ ino = d_inode(upperdentry)->i_ino; fsid = 0; ovl_dentry_set_upper_alias(root); if (ovl_is_impuredir(sb, upperdentry)) ovl_set_flag(OVL_IMPURE, d_inode(root)); } /* Root is always merge -> can have whiteouts */ ovl_set_flag(OVL_WHITEOUTS, d_inode(root)); ovl_dentry_set_flag(OVL_E_CONNECTED, root); ovl_set_upperdata(d_inode(root)); ovl_inode_init(d_inode(root), &oip, ino, fsid); ovl_dentry_init_flags(root, upperdentry, oe, DCACHE_OP_WEAK_REVALIDATE); /* root keeps a reference of upperdentry */ dget(upperdentry); return root; } int ovl_fill_super(struct super_block *sb, struct fs_context *fc) { struct ovl_fs *ofs = sb->s_fs_info; struct ovl_fs_context *ctx = fc->fs_private; struct dentry *root_dentry; struct ovl_entry *oe; struct ovl_layer *layers; struct cred *cred; int err; err = -EIO; if (WARN_ON(fc->user_ns != current_user_ns())) goto out_err; sb->s_d_op = &ovl_dentry_operations; err = -ENOMEM; ofs->creator_cred = cred = prepare_creds(); if (!cred) goto out_err; err = ovl_fs_params_verify(ctx, &ofs->config); if (err) goto out_err; err = -EINVAL; if (ctx->nr == 0) { if (!(fc->sb_flags & SB_SILENT)) pr_err("missing 'lowerdir'\n"); goto out_err; } err = -ENOMEM; layers = kcalloc(ctx->nr + 1, sizeof(struct ovl_layer), GFP_KERNEL); if (!layers) goto out_err; ofs->layers = layers; /* Layer 0 is reserved for upper even if there's no upper */ ofs->numlayer = 1; sb->s_stack_depth = 0; sb->s_maxbytes = MAX_LFS_FILESIZE; atomic_long_set(&ofs->last_ino, 1); /* Assume underlying fs uses 32bit inodes unless proven otherwise */ if (ofs->config.xino != OVL_XINO_OFF) { ofs->xino_mode = BITS_PER_LONG - 32; if (!ofs->xino_mode) { pr_warn("xino not supported on 32bit kernel, falling back to xino=off.\n"); ofs->config.xino = OVL_XINO_OFF; } } /* alloc/destroy_inode needed for setting up traps in inode cache */ sb->s_op = &ovl_super_operations; if (ofs->config.upperdir) { struct super_block *upper_sb; err = -EINVAL; if (!ofs->config.workdir) { pr_err("missing 'workdir'\n"); goto out_err; } err = ovl_get_upper(sb, ofs, &layers[0], &ctx->upper); if (err) goto out_err; upper_sb = ovl_upper_mnt(ofs)->mnt_sb; if (!ovl_should_sync(ofs)) { ofs->errseq = errseq_sample(&upper_sb->s_wb_err); if (errseq_check(&upper_sb->s_wb_err, ofs->errseq)) { err = -EIO; pr_err("Cannot mount volatile when upperdir has an unseen error. Sync upperdir fs to clear state.\n"); goto out_err; } } err = ovl_get_workdir(sb, ofs, &ctx->upper, &ctx->work); if (err) goto out_err; if (!ofs->workdir) sb->s_flags |= SB_RDONLY; sb->s_stack_depth = upper_sb->s_stack_depth; sb->s_time_gran = upper_sb->s_time_gran; } oe = ovl_get_lowerstack(sb, ctx, ofs, layers); err = PTR_ERR(oe); if (IS_ERR(oe)) goto out_err; /* If the upper fs is nonexistent, we mark overlayfs r/o too */ if (!ovl_upper_mnt(ofs)) sb->s_flags |= SB_RDONLY; if (!ovl_origin_uuid(ofs) && ofs->numfs > 1) { pr_warn("The uuid=off requires a single fs for lower and upper, falling back to uuid=null.\n"); ofs->config.uuid = OVL_UUID_NULL; } else if (ovl_has_fsid(ofs) && ovl_upper_mnt(ofs)) { /* Use per instance persistent uuid/fsid */ ovl_init_uuid_xattr(sb, ofs, &ctx->upper); } if (!ovl_force_readonly(ofs) && ofs->config.index) { err = ovl_get_indexdir(sb, ofs, oe, &ctx->upper); if (err) goto out_free_oe; /* Force r/o mount with no index dir */ if (!ofs->indexdir) sb->s_flags |= SB_RDONLY; } err = ovl_check_overlapping_layers(sb, ofs); if (err) goto out_free_oe; /* Show index=off in /proc/mounts for forced r/o mount */ if (!ofs->indexdir) { ofs->config.index = false; if (ovl_upper_mnt(ofs) && ofs->config.nfs_export) { pr_warn("NFS export requires an index dir, falling back to nfs_export=off.\n"); ofs->config.nfs_export = false; } } if (ofs->config.metacopy && ofs->config.nfs_export) { pr_warn("NFS export is not supported with metadata only copy up, falling back to nfs_export=off.\n"); ofs->config.nfs_export = false; } /* * Support encoding decodable file handles with nfs_export=on * and encoding non-decodable file handles with nfs_export=off * if all layers support file handles. */ if (ofs->config.nfs_export) sb->s_export_op = &ovl_export_operations; else if (!ofs->nofh) sb->s_export_op = &ovl_export_fid_operations; /* Never override disk quota limits or use reserved space */ cap_lower(cred->cap_effective, CAP_SYS_RESOURCE); sb->s_magic = OVERLAYFS_SUPER_MAGIC; sb->s_xattr = ofs->config.userxattr ? ovl_user_xattr_handlers : ovl_trusted_xattr_handlers; sb->s_fs_info = ofs; sb->s_flags |= SB_POSIXACL; sb->s_iflags |= SB_I_SKIP_SYNC | SB_I_IMA_UNVERIFIABLE_SIGNATURE; err = -ENOMEM; root_dentry = ovl_get_root(sb, ctx->upper.dentry, oe); if (!root_dentry) goto out_free_oe; sb->s_root = root_dentry; return 0; out_free_oe: ovl_free_entry(oe); out_err: ovl_free_fs(ofs); sb->s_fs_info = NULL; return err; } struct file_system_type ovl_fs_type = { .owner = THIS_MODULE, .name = "overlay", .init_fs_context = ovl_init_fs_context, .parameters = ovl_parameter_spec, .fs_flags = FS_USERNS_MOUNT, .kill_sb = kill_anon_super, }; MODULE_ALIAS_FS("overlay"); static void ovl_inode_init_once(void *foo) { struct ovl_inode *oi = foo; inode_init_once(&oi->vfs_inode); } static int __init ovl_init(void) { int err; ovl_inode_cachep = kmem_cache_create("ovl_inode", sizeof(struct ovl_inode), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD|SLAB_ACCOUNT), ovl_inode_init_once); if (ovl_inode_cachep == NULL) return -ENOMEM; err = ovl_aio_request_cache_init(); if (!err) { err = register_filesystem(&ovl_fs_type); if (!err) return 0; ovl_aio_request_cache_destroy(); } kmem_cache_destroy(ovl_inode_cachep); return err; } static void __exit ovl_exit(void) { unregister_filesystem(&ovl_fs_type); /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(ovl_inode_cachep); ovl_aio_request_cache_destroy(); } module_init(ovl_init); module_exit(ovl_exit);
linux-master
fs/overlayfs/super.c