python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2006 NEC Corporation * * Created by KaiGai Kohei <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define JFFS2_XATTR_IS_CORRUPTED 1 #include <linux/kernel.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/time.h> #include <linux/pagemap.h> #include <linux/highmem.h> #include <linux/crc32.h> #include <linux/jffs2.h> #include <linux/xattr.h> #include <linux/posix_acl_xattr.h> #include <linux/mtd/mtd.h> #include "nodelist.h" /* -------- xdatum related functions ---------------- * xattr_datum_hashkey(xprefix, xname, xvalue, xsize) * is used to calcurate xdatum hashkey. The reminder of hashkey into XATTRINDEX_HASHSIZE is * the index of the xattr name/value pair cache (c->xattrindex). * is_xattr_datum_unchecked(c, xd) * returns 1, if xdatum contains any unchecked raw nodes. if all raw nodes are not * unchecked, it returns 0. * unload_xattr_datum(c, xd) * is used to release xattr name/value pair and detach from c->xattrindex. * reclaim_xattr_datum(c) * is used to reclaim xattr name/value pairs on the xattr name/value pair cache when * memory usage by cache is over c->xdatum_mem_threshold. Currently, this threshold * is hard coded as 32KiB. * do_verify_xattr_datum(c, xd) * is used to load the xdatum informations without name/value pair from the medium. * It's necessary once, because those informations are not collected during mounting * process when EBS is enabled. * 0 will be returned, if success. An negative return value means recoverable error, and * positive return value means unrecoverable error. Thus, caller must remove this xdatum * and xref when it returned positive value. * do_load_xattr_datum(c, xd) * is used to load name/value pair from the medium. * The meanings of return value is same as do_verify_xattr_datum(). * load_xattr_datum(c, xd) * is used to be as a wrapper of do_verify_xattr_datum() and do_load_xattr_datum(). * If xd need to call do_verify_xattr_datum() at first, it's called before calling * do_load_xattr_datum(). The meanings of return value is same as do_verify_xattr_datum(). * save_xattr_datum(c, xd) * is used to write xdatum to medium. xd->version will be incremented. * create_xattr_datum(c, xprefix, xname, xvalue, xsize) * is used to create new xdatum and write to medium. * unrefer_xattr_datum(c, xd) * is used to delete a xdatum. When nobody refers this xdatum, JFFS2_XFLAGS_DEAD * is set on xd->flags and chained xattr_dead_list or release it immediately. * In the first case, the garbage collector release it later. * -------------------------------------------------- */ static uint32_t xattr_datum_hashkey(int xprefix, const char *xname, const char *xvalue, int xsize) { int name_len = strlen(xname); return crc32(xprefix, xname, name_len) ^ crc32(xprefix, xvalue, xsize); } static int is_xattr_datum_unchecked(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) { struct jffs2_raw_node_ref *raw; int rc = 0; spin_lock(&c->erase_completion_lock); for (raw=xd->node; raw != (void *)xd; raw=raw->next_in_ino) { if (ref_flags(raw) == REF_UNCHECKED) { rc = 1; break; } } spin_unlock(&c->erase_completion_lock); return rc; } static void unload_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) { /* must be called under down_write(xattr_sem) */ D1(dbg_xattr("%s: xid=%u, version=%u\n", __func__, xd->xid, xd->version)); if (xd->xname) { c->xdatum_mem_usage -= (xd->name_len + 1 + xd->value_len); kfree(xd->xname); } list_del_init(&xd->xindex); xd->hashkey = 0; xd->xname = NULL; xd->xvalue = NULL; } static void reclaim_xattr_datum(struct jffs2_sb_info *c) { /* must be called under down_write(xattr_sem) */ struct jffs2_xattr_datum *xd, *_xd; uint32_t target, before; static int index = 0; int count; if (c->xdatum_mem_threshold > c->xdatum_mem_usage) return; before = c->xdatum_mem_usage; target = c->xdatum_mem_usage * 4 / 5; /* 20% reduction */ for (count = 0; count < XATTRINDEX_HASHSIZE; count++) { list_for_each_entry_safe(xd, _xd, &c->xattrindex[index], xindex) { if (xd->flags & JFFS2_XFLAGS_HOT) { xd->flags &= ~JFFS2_XFLAGS_HOT; } else if (!(xd->flags & JFFS2_XFLAGS_BIND)) { unload_xattr_datum(c, xd); } if (c->xdatum_mem_usage <= target) goto out; } index = (index+1) % XATTRINDEX_HASHSIZE; } out: JFFS2_NOTICE("xdatum_mem_usage from %u byte to %u byte (%u byte reclaimed)\n", before, c->xdatum_mem_usage, before - c->xdatum_mem_usage); } static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) { /* must be called under down_write(xattr_sem) */ struct jffs2_eraseblock *jeb; struct jffs2_raw_node_ref *raw; struct jffs2_raw_xattr rx; size_t readlen; uint32_t crc, offset, totlen; int rc; spin_lock(&c->erase_completion_lock); offset = ref_offset(xd->node); if (ref_flags(xd->node) == REF_PRISTINE) goto complete; spin_unlock(&c->erase_completion_lock); rc = jffs2_flash_read(c, offset, sizeof(rx), &readlen, (char *)&rx); if (rc || readlen != sizeof(rx)) { JFFS2_WARNING("jffs2_flash_read()=%d, req=%zu, read=%zu at %#08x\n", rc, sizeof(rx), readlen, offset); return rc ? rc : -EIO; } crc = crc32(0, &rx, sizeof(rx) - 4); if (crc != je32_to_cpu(rx.node_crc)) { JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", offset, je32_to_cpu(rx.hdr_crc), crc); xd->flags |= JFFS2_XFLAGS_INVALID; return JFFS2_XATTR_IS_CORRUPTED; } totlen = PAD(sizeof(rx) + rx.name_len + 1 + je16_to_cpu(rx.value_len)); if (je16_to_cpu(rx.magic) != JFFS2_MAGIC_BITMASK || je16_to_cpu(rx.nodetype) != JFFS2_NODETYPE_XATTR || je32_to_cpu(rx.totlen) != totlen || je32_to_cpu(rx.xid) != xd->xid || je32_to_cpu(rx.version) != xd->version) { JFFS2_ERROR("inconsistent xdatum at %#08x, magic=%#04x/%#04x, " "nodetype=%#04x/%#04x, totlen=%u/%u, xid=%u/%u, version=%u/%u\n", offset, je16_to_cpu(rx.magic), JFFS2_MAGIC_BITMASK, je16_to_cpu(rx.nodetype), JFFS2_NODETYPE_XATTR, je32_to_cpu(rx.totlen), totlen, je32_to_cpu(rx.xid), xd->xid, je32_to_cpu(rx.version), xd->version); xd->flags |= JFFS2_XFLAGS_INVALID; return JFFS2_XATTR_IS_CORRUPTED; } xd->xprefix = rx.xprefix; xd->name_len = rx.name_len; xd->value_len = je16_to_cpu(rx.value_len); xd->data_crc = je32_to_cpu(rx.data_crc); spin_lock(&c->erase_completion_lock); complete: for (raw=xd->node; raw != (void *)xd; raw=raw->next_in_ino) { jeb = &c->blocks[ref_offset(raw) / c->sector_size]; totlen = PAD(ref_totlen(c, jeb, raw)); if (ref_flags(raw) == REF_UNCHECKED) { c->unchecked_size -= totlen; c->used_size += totlen; jeb->unchecked_size -= totlen; jeb->used_size += totlen; } raw->flash_offset = ref_offset(raw) | ((xd->node==raw) ? REF_PRISTINE : REF_NORMAL); } spin_unlock(&c->erase_completion_lock); /* unchecked xdatum is chained with c->xattr_unchecked */ list_del_init(&xd->xindex); dbg_xattr("success on verifying xdatum (xid=%u, version=%u)\n", xd->xid, xd->version); return 0; } static int do_load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) { /* must be called under down_write(xattr_sem) */ char *data; size_t readlen; uint32_t crc, length; int i, ret, retry = 0; BUG_ON(ref_flags(xd->node) != REF_PRISTINE); BUG_ON(!list_empty(&xd->xindex)); retry: length = xd->name_len + 1 + xd->value_len; data = kmalloc(length, GFP_KERNEL); if (!data) return -ENOMEM; ret = jffs2_flash_read(c, ref_offset(xd->node)+sizeof(struct jffs2_raw_xattr), length, &readlen, data); if (ret || length!=readlen) { JFFS2_WARNING("jffs2_flash_read() returned %d, request=%d, readlen=%zu, at %#08x\n", ret, length, readlen, ref_offset(xd->node)); kfree(data); return ret ? ret : -EIO; } data[xd->name_len] = '\0'; crc = crc32(0, data, length); if (crc != xd->data_crc) { JFFS2_WARNING("node CRC failed (JFFS2_NODETYPE_XATTR)" " at %#08x, read: 0x%08x calculated: 0x%08x\n", ref_offset(xd->node), xd->data_crc, crc); kfree(data); xd->flags |= JFFS2_XFLAGS_INVALID; return JFFS2_XATTR_IS_CORRUPTED; } xd->flags |= JFFS2_XFLAGS_HOT; xd->xname = data; xd->xvalue = data + xd->name_len+1; c->xdatum_mem_usage += length; xd->hashkey = xattr_datum_hashkey(xd->xprefix, xd->xname, xd->xvalue, xd->value_len); i = xd->hashkey % XATTRINDEX_HASHSIZE; list_add(&xd->xindex, &c->xattrindex[i]); if (!retry) { retry = 1; reclaim_xattr_datum(c); if (!xd->xname) goto retry; } dbg_xattr("success on loading xdatum (xid=%u, xprefix=%u, xname='%s')\n", xd->xid, xd->xprefix, xd->xname); return 0; } static int load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) { /* must be called under down_write(xattr_sem); * rc < 0 : recoverable error, try again * rc = 0 : success * rc > 0 : Unrecoverable error, this node should be deleted. */ int rc = 0; BUG_ON(xd->flags & JFFS2_XFLAGS_DEAD); if (xd->xname) return 0; if (xd->flags & JFFS2_XFLAGS_INVALID) return JFFS2_XATTR_IS_CORRUPTED; if (unlikely(is_xattr_datum_unchecked(c, xd))) rc = do_verify_xattr_datum(c, xd); if (!rc) rc = do_load_xattr_datum(c, xd); return rc; } static int save_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) { /* must be called under down_write(xattr_sem) */ struct jffs2_raw_xattr rx; struct kvec vecs[2]; size_t length; int rc, totlen; uint32_t phys_ofs = write_ofs(c); BUG_ON(!xd->xname); BUG_ON(xd->flags & (JFFS2_XFLAGS_DEAD|JFFS2_XFLAGS_INVALID)); vecs[0].iov_base = &rx; vecs[0].iov_len = sizeof(rx); vecs[1].iov_base = xd->xname; vecs[1].iov_len = xd->name_len + 1 + xd->value_len; totlen = vecs[0].iov_len + vecs[1].iov_len; /* Setup raw-xattr */ memset(&rx, 0, sizeof(rx)); rx.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); rx.nodetype = cpu_to_je16(JFFS2_NODETYPE_XATTR); rx.totlen = cpu_to_je32(PAD(totlen)); rx.hdr_crc = cpu_to_je32(crc32(0, &rx, sizeof(struct jffs2_unknown_node) - 4)); rx.xid = cpu_to_je32(xd->xid); rx.version = cpu_to_je32(++xd->version); rx.xprefix = xd->xprefix; rx.name_len = xd->name_len; rx.value_len = cpu_to_je16(xd->value_len); rx.data_crc = cpu_to_je32(crc32(0, vecs[1].iov_base, vecs[1].iov_len)); rx.node_crc = cpu_to_je32(crc32(0, &rx, sizeof(struct jffs2_raw_xattr) - 4)); rc = jffs2_flash_writev(c, vecs, 2, phys_ofs, &length, 0); if (rc || totlen != length) { JFFS2_WARNING("jffs2_flash_writev()=%d, req=%u, wrote=%zu, at %#08x\n", rc, totlen, length, phys_ofs); rc = rc ? rc : -EIO; if (length) jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, PAD(totlen), NULL); return rc; } /* success */ jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(totlen), (void *)xd); dbg_xattr("success on saving xdatum (xid=%u, version=%u, xprefix=%u, xname='%s')\n", xd->xid, xd->version, xd->xprefix, xd->xname); return 0; } static struct jffs2_xattr_datum *create_xattr_datum(struct jffs2_sb_info *c, int xprefix, const char *xname, const char *xvalue, int xsize) { /* must be called under down_write(xattr_sem) */ struct jffs2_xattr_datum *xd; uint32_t hashkey, name_len; char *data; int i, rc; /* Search xattr_datum has same xname/xvalue by index */ hashkey = xattr_datum_hashkey(xprefix, xname, xvalue, xsize); i = hashkey % XATTRINDEX_HASHSIZE; list_for_each_entry(xd, &c->xattrindex[i], xindex) { if (xd->hashkey==hashkey && xd->xprefix==xprefix && xd->value_len==xsize && !strcmp(xd->xname, xname) && !memcmp(xd->xvalue, xvalue, xsize)) { atomic_inc(&xd->refcnt); return xd; } } /* Not found, Create NEW XATTR-Cache */ name_len = strlen(xname); xd = jffs2_alloc_xattr_datum(); if (!xd) return ERR_PTR(-ENOMEM); data = kmalloc(name_len + 1 + xsize, GFP_KERNEL); if (!data) { jffs2_free_xattr_datum(xd); return ERR_PTR(-ENOMEM); } strcpy(data, xname); memcpy(data + name_len + 1, xvalue, xsize); atomic_set(&xd->refcnt, 1); xd->xid = ++c->highest_xid; xd->flags |= JFFS2_XFLAGS_HOT; xd->xprefix = xprefix; xd->hashkey = hashkey; xd->xname = data; xd->xvalue = data + name_len + 1; xd->name_len = name_len; xd->value_len = xsize; xd->data_crc = crc32(0, data, xd->name_len + 1 + xd->value_len); rc = save_xattr_datum(c, xd); if (rc) { kfree(xd->xname); jffs2_free_xattr_datum(xd); return ERR_PTR(rc); } /* Insert Hash Index */ i = hashkey % XATTRINDEX_HASHSIZE; list_add(&xd->xindex, &c->xattrindex[i]); c->xdatum_mem_usage += (xd->name_len + 1 + xd->value_len); reclaim_xattr_datum(c); return xd; } static void unrefer_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) { /* must be called under down_write(xattr_sem) */ if (atomic_dec_and_lock(&xd->refcnt, &c->erase_completion_lock)) { unload_xattr_datum(c, xd); xd->flags |= JFFS2_XFLAGS_DEAD; if (xd->node == (void *)xd) { BUG_ON(!(xd->flags & JFFS2_XFLAGS_INVALID)); jffs2_free_xattr_datum(xd); } else { list_add(&xd->xindex, &c->xattr_dead_list); } spin_unlock(&c->erase_completion_lock); dbg_xattr("xdatum(xid=%u, version=%u) was removed.\n", xd->xid, xd->version); } } /* -------- xref related functions ------------------ * verify_xattr_ref(c, ref) * is used to load xref information from medium. Because summary data does not * contain xid/ino, it's necessary to verify once while mounting process. * save_xattr_ref(c, ref) * is used to write xref to medium. If delete marker is marked, it write * a delete marker of xref into medium. * create_xattr_ref(c, ic, xd) * is used to create a new xref and write to medium. * delete_xattr_ref(c, ref) * is used to delete jffs2_xattr_ref. It marks xref XREF_DELETE_MARKER, * and allows GC to reclaim those physical nodes. * jffs2_xattr_delete_inode(c, ic) * is called to remove xrefs related to obsolete inode when inode is unlinked. * jffs2_xattr_free_inode(c, ic) * is called to release xattr related objects when unmounting. * check_xattr_ref_inode(c, ic) * is used to confirm inode does not have duplicate xattr name/value pair. * jffs2_xattr_do_crccheck_inode(c, ic) * is used to force xattr data integrity check during the initial gc scan. * -------------------------------------------------- */ static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) { struct jffs2_eraseblock *jeb; struct jffs2_raw_node_ref *raw; struct jffs2_raw_xref rr; size_t readlen; uint32_t crc, offset, totlen; int rc; spin_lock(&c->erase_completion_lock); if (ref_flags(ref->node) != REF_UNCHECKED) goto complete; offset = ref_offset(ref->node); spin_unlock(&c->erase_completion_lock); rc = jffs2_flash_read(c, offset, sizeof(rr), &readlen, (char *)&rr); if (rc || sizeof(rr) != readlen) { JFFS2_WARNING("jffs2_flash_read()=%d, req=%zu, read=%zu, at %#08x\n", rc, sizeof(rr), readlen, offset); return rc ? rc : -EIO; } /* obsolete node */ crc = crc32(0, &rr, sizeof(rr) - 4); if (crc != je32_to_cpu(rr.node_crc)) { JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", offset, je32_to_cpu(rr.node_crc), crc); return JFFS2_XATTR_IS_CORRUPTED; } if (je16_to_cpu(rr.magic) != JFFS2_MAGIC_BITMASK || je16_to_cpu(rr.nodetype) != JFFS2_NODETYPE_XREF || je32_to_cpu(rr.totlen) != PAD(sizeof(rr))) { JFFS2_ERROR("inconsistent xref at %#08x, magic=%#04x/%#04x, " "nodetype=%#04x/%#04x, totlen=%u/%zu\n", offset, je16_to_cpu(rr.magic), JFFS2_MAGIC_BITMASK, je16_to_cpu(rr.nodetype), JFFS2_NODETYPE_XREF, je32_to_cpu(rr.totlen), PAD(sizeof(rr))); return JFFS2_XATTR_IS_CORRUPTED; } ref->ino = je32_to_cpu(rr.ino); ref->xid = je32_to_cpu(rr.xid); ref->xseqno = je32_to_cpu(rr.xseqno); if (ref->xseqno > c->highest_xseqno) c->highest_xseqno = (ref->xseqno & ~XREF_DELETE_MARKER); spin_lock(&c->erase_completion_lock); complete: for (raw=ref->node; raw != (void *)ref; raw=raw->next_in_ino) { jeb = &c->blocks[ref_offset(raw) / c->sector_size]; totlen = PAD(ref_totlen(c, jeb, raw)); if (ref_flags(raw) == REF_UNCHECKED) { c->unchecked_size -= totlen; c->used_size += totlen; jeb->unchecked_size -= totlen; jeb->used_size += totlen; } raw->flash_offset = ref_offset(raw) | ((ref->node==raw) ? REF_PRISTINE : REF_NORMAL); } spin_unlock(&c->erase_completion_lock); dbg_xattr("success on verifying xref (ino=%u, xid=%u) at %#08x\n", ref->ino, ref->xid, ref_offset(ref->node)); return 0; } static int save_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) { /* must be called under down_write(xattr_sem) */ struct jffs2_raw_xref rr; size_t length; uint32_t xseqno, phys_ofs = write_ofs(c); int ret; rr.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); rr.nodetype = cpu_to_je16(JFFS2_NODETYPE_XREF); rr.totlen = cpu_to_je32(PAD(sizeof(rr))); rr.hdr_crc = cpu_to_je32(crc32(0, &rr, sizeof(struct jffs2_unknown_node) - 4)); xseqno = (c->highest_xseqno += 2); if (is_xattr_ref_dead(ref)) { xseqno |= XREF_DELETE_MARKER; rr.ino = cpu_to_je32(ref->ino); rr.xid = cpu_to_je32(ref->xid); } else { rr.ino = cpu_to_je32(ref->ic->ino); rr.xid = cpu_to_je32(ref->xd->xid); } rr.xseqno = cpu_to_je32(xseqno); rr.node_crc = cpu_to_je32(crc32(0, &rr, sizeof(rr) - 4)); ret = jffs2_flash_write(c, phys_ofs, sizeof(rr), &length, (char *)&rr); if (ret || sizeof(rr) != length) { JFFS2_WARNING("jffs2_flash_write() returned %d, request=%zu, retlen=%zu, at %#08x\n", ret, sizeof(rr), length, phys_ofs); ret = ret ? ret : -EIO; if (length) jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, PAD(sizeof(rr)), NULL); return ret; } /* success */ ref->xseqno = xseqno; jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(sizeof(rr)), (void *)ref); dbg_xattr("success on saving xref (ino=%u, xid=%u)\n", ref->ic->ino, ref->xd->xid); return 0; } static struct jffs2_xattr_ref *create_xattr_ref(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, struct jffs2_xattr_datum *xd) { /* must be called under down_write(xattr_sem) */ struct jffs2_xattr_ref *ref; int ret; ref = jffs2_alloc_xattr_ref(); if (!ref) return ERR_PTR(-ENOMEM); ref->ic = ic; ref->xd = xd; ret = save_xattr_ref(c, ref); if (ret) { jffs2_free_xattr_ref(ref); return ERR_PTR(ret); } /* Chain to inode */ ref->next = ic->xref; ic->xref = ref; return ref; /* success */ } static void delete_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) { /* must be called under down_write(xattr_sem) */ struct jffs2_xattr_datum *xd; xd = ref->xd; ref->xseqno |= XREF_DELETE_MARKER; ref->ino = ref->ic->ino; ref->xid = ref->xd->xid; spin_lock(&c->erase_completion_lock); ref->next = c->xref_dead_list; c->xref_dead_list = ref; spin_unlock(&c->erase_completion_lock); dbg_xattr("xref(ino=%u, xid=%u, xseqno=%u) was removed.\n", ref->ino, ref->xid, ref->xseqno); unrefer_xattr_datum(c, xd); } void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) { /* It's called from jffs2_evict_inode() on inode removing. When an inode with XATTR is removed, those XATTRs must be removed. */ struct jffs2_xattr_ref *ref, *_ref; if (!ic || ic->pino_nlink > 0) return; down_write(&c->xattr_sem); for (ref = ic->xref; ref; ref = _ref) { _ref = ref->next; delete_xattr_ref(c, ref); } ic->xref = NULL; up_write(&c->xattr_sem); } void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) { /* It's called from jffs2_free_ino_caches() until unmounting FS. */ struct jffs2_xattr_datum *xd; struct jffs2_xattr_ref *ref, *_ref; down_write(&c->xattr_sem); for (ref = ic->xref; ref; ref = _ref) { _ref = ref->next; xd = ref->xd; if (atomic_dec_and_test(&xd->refcnt)) { unload_xattr_datum(c, xd); jffs2_free_xattr_datum(xd); } jffs2_free_xattr_ref(ref); } ic->xref = NULL; up_write(&c->xattr_sem); } static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) { /* success of check_xattr_ref_inode() means that inode (ic) dose not have * duplicate name/value pairs. If duplicate name/value pair would be found, * one will be removed. */ struct jffs2_xattr_ref *ref, *cmp, **pref, **pcmp; int rc = 0; if (likely(ic->flags & INO_FLAGS_XATTR_CHECKED)) return 0; down_write(&c->xattr_sem); retry: rc = 0; for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) { if (!ref->xd->xname) { rc = load_xattr_datum(c, ref->xd); if (unlikely(rc > 0)) { *pref = ref->next; delete_xattr_ref(c, ref); goto retry; } else if (unlikely(rc < 0)) goto out; } for (cmp=ref->next, pcmp=&ref->next; cmp; pcmp=&cmp->next, cmp=cmp->next) { if (!cmp->xd->xname) { ref->xd->flags |= JFFS2_XFLAGS_BIND; rc = load_xattr_datum(c, cmp->xd); ref->xd->flags &= ~JFFS2_XFLAGS_BIND; if (unlikely(rc > 0)) { *pcmp = cmp->next; delete_xattr_ref(c, cmp); goto retry; } else if (unlikely(rc < 0)) goto out; } if (ref->xd->xprefix == cmp->xd->xprefix && !strcmp(ref->xd->xname, cmp->xd->xname)) { if (ref->xseqno > cmp->xseqno) { *pcmp = cmp->next; delete_xattr_ref(c, cmp); } else { *pref = ref->next; delete_xattr_ref(c, ref); } goto retry; } } } ic->flags |= INO_FLAGS_XATTR_CHECKED; out: up_write(&c->xattr_sem); return rc; } void jffs2_xattr_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) { check_xattr_ref_inode(c, ic); } /* -------- xattr subsystem functions --------------- * jffs2_init_xattr_subsystem(c) * is used to initialize semaphore and list_head, and some variables. * jffs2_find_xattr_datum(c, xid) * is used to lookup xdatum while scanning process. * jffs2_clear_xattr_subsystem(c) * is used to release any xattr related objects. * jffs2_build_xattr_subsystem(c) * is used to associate xdatum and xref while super block building process. * jffs2_setup_xattr_datum(c, xid, version) * is used to insert xdatum while scanning process. * -------------------------------------------------- */ void jffs2_init_xattr_subsystem(struct jffs2_sb_info *c) { int i; for (i=0; i < XATTRINDEX_HASHSIZE; i++) INIT_LIST_HEAD(&c->xattrindex[i]); INIT_LIST_HEAD(&c->xattr_unchecked); INIT_LIST_HEAD(&c->xattr_dead_list); c->xref_dead_list = NULL; c->xref_temp = NULL; init_rwsem(&c->xattr_sem); c->highest_xid = 0; c->highest_xseqno = 0; c->xdatum_mem_usage = 0; c->xdatum_mem_threshold = 32 * 1024; /* Default 32KB */ } static struct jffs2_xattr_datum *jffs2_find_xattr_datum(struct jffs2_sb_info *c, uint32_t xid) { struct jffs2_xattr_datum *xd; int i = xid % XATTRINDEX_HASHSIZE; /* It's only used in scanning/building process. */ BUG_ON(!(c->flags & (JFFS2_SB_FLAG_SCANNING|JFFS2_SB_FLAG_BUILDING))); list_for_each_entry(xd, &c->xattrindex[i], xindex) { if (xd->xid==xid) return xd; } return NULL; } void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c) { struct jffs2_xattr_datum *xd, *_xd; struct jffs2_xattr_ref *ref, *_ref; int i; for (ref=c->xref_temp; ref; ref = _ref) { _ref = ref->next; jffs2_free_xattr_ref(ref); } for (ref=c->xref_dead_list; ref; ref = _ref) { _ref = ref->next; jffs2_free_xattr_ref(ref); } for (i=0; i < XATTRINDEX_HASHSIZE; i++) { list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) { list_del(&xd->xindex); kfree(xd->xname); jffs2_free_xattr_datum(xd); } } list_for_each_entry_safe(xd, _xd, &c->xattr_dead_list, xindex) { list_del(&xd->xindex); jffs2_free_xattr_datum(xd); } list_for_each_entry_safe(xd, _xd, &c->xattr_unchecked, xindex) { list_del(&xd->xindex); jffs2_free_xattr_datum(xd); } } #define XREF_TMPHASH_SIZE (128) int jffs2_build_xattr_subsystem(struct jffs2_sb_info *c) { struct jffs2_xattr_ref *ref, *_ref; struct jffs2_xattr_ref **xref_tmphash; struct jffs2_xattr_datum *xd, *_xd; struct jffs2_inode_cache *ic; struct jffs2_raw_node_ref *raw; int i, xdatum_count = 0, xdatum_unchecked_count = 0, xref_count = 0; int xdatum_orphan_count = 0, xref_orphan_count = 0, xref_dead_count = 0; BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING)); xref_tmphash = kcalloc(XREF_TMPHASH_SIZE, sizeof(struct jffs2_xattr_ref *), GFP_KERNEL); if (!xref_tmphash) return -ENOMEM; /* Phase.1 : Merge same xref */ for (ref=c->xref_temp; ref; ref=_ref) { struct jffs2_xattr_ref *tmp; _ref = ref->next; if (ref_flags(ref->node) != REF_PRISTINE) { if (verify_xattr_ref(c, ref)) { BUG_ON(ref->node->next_in_ino != (void *)ref); ref->node->next_in_ino = NULL; jffs2_mark_node_obsolete(c, ref->node); jffs2_free_xattr_ref(ref); continue; } } i = (ref->ino ^ ref->xid) % XREF_TMPHASH_SIZE; for (tmp=xref_tmphash[i]; tmp; tmp=tmp->next) { if (tmp->ino == ref->ino && tmp->xid == ref->xid) break; } if (tmp) { raw = ref->node; if (ref->xseqno > tmp->xseqno) { tmp->xseqno = ref->xseqno; raw->next_in_ino = tmp->node; tmp->node = raw; } else { raw->next_in_ino = tmp->node->next_in_ino; tmp->node->next_in_ino = raw; } jffs2_free_xattr_ref(ref); continue; } else { ref->next = xref_tmphash[i]; xref_tmphash[i] = ref; } } c->xref_temp = NULL; /* Phase.2 : Bind xref with inode_cache and xattr_datum */ for (i=0; i < XREF_TMPHASH_SIZE; i++) { for (ref=xref_tmphash[i]; ref; ref=_ref) { xref_count++; _ref = ref->next; if (is_xattr_ref_dead(ref)) { ref->next = c->xref_dead_list; c->xref_dead_list = ref; xref_dead_count++; continue; } /* At this point, ref->xid and ref->ino contain XID and inode number. ref->xd and ref->ic are not valid yet. */ xd = jffs2_find_xattr_datum(c, ref->xid); ic = jffs2_get_ino_cache(c, ref->ino); if (!xd || !ic || !ic->pino_nlink) { dbg_xattr("xref(ino=%u, xid=%u, xseqno=%u) is orphan.\n", ref->ino, ref->xid, ref->xseqno); ref->xseqno |= XREF_DELETE_MARKER; ref->next = c->xref_dead_list; c->xref_dead_list = ref; xref_orphan_count++; continue; } ref->xd = xd; ref->ic = ic; atomic_inc(&xd->refcnt); ref->next = ic->xref; ic->xref = ref; } } /* Phase.3 : Link unchecked xdatum to xattr_unchecked list */ for (i=0; i < XATTRINDEX_HASHSIZE; i++) { list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) { xdatum_count++; list_del_init(&xd->xindex); if (!atomic_read(&xd->refcnt)) { dbg_xattr("xdatum(xid=%u, version=%u) is orphan.\n", xd->xid, xd->version); xd->flags |= JFFS2_XFLAGS_DEAD; list_add(&xd->xindex, &c->xattr_unchecked); xdatum_orphan_count++; continue; } if (is_xattr_datum_unchecked(c, xd)) { dbg_xattr("unchecked xdatum(xid=%u, version=%u)\n", xd->xid, xd->version); list_add(&xd->xindex, &c->xattr_unchecked); xdatum_unchecked_count++; } } } /* build complete */ JFFS2_NOTICE("complete building xattr subsystem, %u of xdatum" " (%u unchecked, %u orphan) and " "%u of xref (%u dead, %u orphan) found.\n", xdatum_count, xdatum_unchecked_count, xdatum_orphan_count, xref_count, xref_dead_count, xref_orphan_count); kfree(xref_tmphash); return 0; } struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c, uint32_t xid, uint32_t version) { struct jffs2_xattr_datum *xd; xd = jffs2_find_xattr_datum(c, xid); if (!xd) { xd = jffs2_alloc_xattr_datum(); if (!xd) return ERR_PTR(-ENOMEM); xd->xid = xid; xd->version = version; if (xd->xid > c->highest_xid) c->highest_xid = xd->xid; list_add_tail(&xd->xindex, &c->xattrindex[xid % XATTRINDEX_HASHSIZE]); } return xd; } /* -------- xattr subsystem functions --------------- * xprefix_to_handler(xprefix) * is used to translate xprefix into xattr_handler. * jffs2_listxattr(dentry, buffer, size) * is an implementation of listxattr handler on jffs2. * do_jffs2_getxattr(inode, xprefix, xname, buffer, size) * is an implementation of getxattr handler on jffs2. * do_jffs2_setxattr(inode, xprefix, xname, buffer, size, flags) * is an implementation of setxattr handler on jffs2. * -------------------------------------------------- */ const struct xattr_handler *jffs2_xattr_handlers[] = { &jffs2_user_xattr_handler, #ifdef CONFIG_JFFS2_FS_SECURITY &jffs2_security_xattr_handler, #endif &jffs2_trusted_xattr_handler, NULL }; static const char *jffs2_xattr_prefix(int xprefix, struct dentry *dentry) { const struct xattr_handler *ret = NULL; switch (xprefix) { case JFFS2_XPREFIX_USER: ret = &jffs2_user_xattr_handler; break; #ifdef CONFIG_JFFS2_FS_SECURITY case JFFS2_XPREFIX_SECURITY: ret = &jffs2_security_xattr_handler; break; #endif #ifdef CONFIG_JFFS2_FS_POSIX_ACL case JFFS2_XPREFIX_ACL_ACCESS: ret = &nop_posix_acl_access; break; case JFFS2_XPREFIX_ACL_DEFAULT: ret = &nop_posix_acl_default; break; #endif case JFFS2_XPREFIX_TRUSTED: ret = &jffs2_trusted_xattr_handler; break; default: return NULL; } if (!xattr_handler_can_list(ret, dentry)) return NULL; return xattr_prefix(ret); } ssize_t jffs2_listxattr(struct dentry *dentry, char *buffer, size_t size) { struct inode *inode = d_inode(dentry); struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); struct jffs2_inode_cache *ic = f->inocache; struct jffs2_xattr_ref *ref, **pref; struct jffs2_xattr_datum *xd; const char *prefix; ssize_t prefix_len, len, rc; int retry = 0; rc = check_xattr_ref_inode(c, ic); if (unlikely(rc)) return rc; down_read(&c->xattr_sem); retry: len = 0; for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) { BUG_ON(ref->ic != ic); xd = ref->xd; if (!xd->xname) { /* xdatum is unchached */ if (!retry) { retry = 1; up_read(&c->xattr_sem); down_write(&c->xattr_sem); goto retry; } else { rc = load_xattr_datum(c, xd); if (unlikely(rc > 0)) { *pref = ref->next; delete_xattr_ref(c, ref); goto retry; } else if (unlikely(rc < 0)) goto out; } } prefix = jffs2_xattr_prefix(xd->xprefix, dentry); if (!prefix) continue; prefix_len = strlen(prefix); rc = prefix_len + xd->name_len + 1; if (buffer) { if (rc > size - len) { rc = -ERANGE; goto out; } memcpy(buffer, prefix, prefix_len); buffer += prefix_len; memcpy(buffer, xd->xname, xd->name_len); buffer += xd->name_len; *buffer++ = 0; } len += rc; } rc = len; out: if (!retry) { up_read(&c->xattr_sem); } else { up_write(&c->xattr_sem); } return rc; } int do_jffs2_getxattr(struct inode *inode, int xprefix, const char *xname, char *buffer, size_t size) { struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); struct jffs2_inode_cache *ic = f->inocache; struct jffs2_xattr_datum *xd; struct jffs2_xattr_ref *ref, **pref; int rc, retry = 0; rc = check_xattr_ref_inode(c, ic); if (unlikely(rc)) return rc; down_read(&c->xattr_sem); retry: for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) { BUG_ON(ref->ic!=ic); xd = ref->xd; if (xd->xprefix != xprefix) continue; if (!xd->xname) { /* xdatum is unchached */ if (!retry) { retry = 1; up_read(&c->xattr_sem); down_write(&c->xattr_sem); goto retry; } else { rc = load_xattr_datum(c, xd); if (unlikely(rc > 0)) { *pref = ref->next; delete_xattr_ref(c, ref); goto retry; } else if (unlikely(rc < 0)) { goto out; } } } if (!strcmp(xname, xd->xname)) { rc = xd->value_len; if (buffer) { if (size < rc) { rc = -ERANGE; } else { memcpy(buffer, xd->xvalue, rc); } } goto out; } } rc = -ENODATA; out: if (!retry) { up_read(&c->xattr_sem); } else { up_write(&c->xattr_sem); } return rc; } int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname, const char *buffer, size_t size, int flags) { struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); struct jffs2_inode_cache *ic = f->inocache; struct jffs2_xattr_datum *xd; struct jffs2_xattr_ref *ref, *newref, **pref; uint32_t length, request; int rc; rc = check_xattr_ref_inode(c, ic); if (unlikely(rc)) return rc; request = PAD(sizeof(struct jffs2_raw_xattr) + strlen(xname) + 1 + size); rc = jffs2_reserve_space(c, request, &length, ALLOC_NORMAL, JFFS2_SUMMARY_XATTR_SIZE); if (rc) { JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, request); return rc; } /* Find existing xattr */ down_write(&c->xattr_sem); retry: for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) { xd = ref->xd; if (xd->xprefix != xprefix) continue; if (!xd->xname) { rc = load_xattr_datum(c, xd); if (unlikely(rc > 0)) { *pref = ref->next; delete_xattr_ref(c, ref); goto retry; } else if (unlikely(rc < 0)) goto out; } if (!strcmp(xd->xname, xname)) { if (flags & XATTR_CREATE) { rc = -EEXIST; goto out; } if (!buffer) { ref->ino = ic->ino; ref->xid = xd->xid; ref->xseqno |= XREF_DELETE_MARKER; rc = save_xattr_ref(c, ref); if (!rc) { *pref = ref->next; spin_lock(&c->erase_completion_lock); ref->next = c->xref_dead_list; c->xref_dead_list = ref; spin_unlock(&c->erase_completion_lock); unrefer_xattr_datum(c, xd); } else { ref->ic = ic; ref->xd = xd; ref->xseqno &= ~XREF_DELETE_MARKER; } goto out; } goto found; } } /* not found */ if (flags & XATTR_REPLACE) { rc = -ENODATA; goto out; } if (!buffer) { rc = -ENODATA; goto out; } found: xd = create_xattr_datum(c, xprefix, xname, buffer, size); if (IS_ERR(xd)) { rc = PTR_ERR(xd); goto out; } up_write(&c->xattr_sem); jffs2_complete_reservation(c); /* create xattr_ref */ request = PAD(sizeof(struct jffs2_raw_xref)); rc = jffs2_reserve_space(c, request, &length, ALLOC_NORMAL, JFFS2_SUMMARY_XREF_SIZE); down_write(&c->xattr_sem); if (rc) { JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, request); unrefer_xattr_datum(c, xd); up_write(&c->xattr_sem); return rc; } if (ref) *pref = ref->next; newref = create_xattr_ref(c, ic, xd); if (IS_ERR(newref)) { if (ref) { ref->next = ic->xref; ic->xref = ref; } rc = PTR_ERR(newref); unrefer_xattr_datum(c, xd); } else if (ref) { delete_xattr_ref(c, ref); } out: up_write(&c->xattr_sem); jffs2_complete_reservation(c); return rc; } /* -------- garbage collector functions ------------- * jffs2_garbage_collect_xattr_datum(c, xd, raw) * is used to move xdatum into new node. * jffs2_garbage_collect_xattr_ref(c, ref, raw) * is used to move xref into new node. * jffs2_verify_xattr(c) * is used to call do_verify_xattr_datum() before garbage collecting. * jffs2_release_xattr_datum(c, xd) * is used to release an in-memory object of xdatum. * jffs2_release_xattr_ref(c, ref) * is used to release an in-memory object of xref. * -------------------------------------------------- */ int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd, struct jffs2_raw_node_ref *raw) { uint32_t totlen, length, old_ofs; int rc = 0; down_write(&c->xattr_sem); if (xd->node != raw) goto out; if (xd->flags & (JFFS2_XFLAGS_DEAD|JFFS2_XFLAGS_INVALID)) goto out; rc = load_xattr_datum(c, xd); if (unlikely(rc)) { rc = (rc > 0) ? 0 : rc; goto out; } old_ofs = ref_offset(xd->node); totlen = PAD(sizeof(struct jffs2_raw_xattr) + xd->name_len + 1 + xd->value_len); rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XATTR_SIZE); if (rc) { JFFS2_WARNING("jffs2_reserve_space_gc()=%d, request=%u\n", rc, totlen); goto out; } rc = save_xattr_datum(c, xd); if (!rc) dbg_xattr("xdatum (xid=%u, version=%u) GC'ed from %#08x to %08x\n", xd->xid, xd->version, old_ofs, ref_offset(xd->node)); out: if (!rc) jffs2_mark_node_obsolete(c, raw); up_write(&c->xattr_sem); return rc; } int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref, struct jffs2_raw_node_ref *raw) { uint32_t totlen, length, old_ofs; int rc = 0; down_write(&c->xattr_sem); BUG_ON(!ref->node); if (ref->node != raw) goto out; if (is_xattr_ref_dead(ref) && (raw->next_in_ino == (void *)ref)) goto out; old_ofs = ref_offset(ref->node); totlen = ref_totlen(c, c->gcblock, ref->node); rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XREF_SIZE); if (rc) { JFFS2_WARNING("%s: jffs2_reserve_space_gc() = %d, request = %u\n", __func__, rc, totlen); goto out; } rc = save_xattr_ref(c, ref); if (!rc) dbg_xattr("xref (ino=%u, xid=%u) GC'ed from %#08x to %08x\n", ref->ic->ino, ref->xd->xid, old_ofs, ref_offset(ref->node)); out: if (!rc) jffs2_mark_node_obsolete(c, raw); up_write(&c->xattr_sem); return rc; } int jffs2_verify_xattr(struct jffs2_sb_info *c) { struct jffs2_xattr_datum *xd, *_xd; struct jffs2_eraseblock *jeb; struct jffs2_raw_node_ref *raw; uint32_t totlen; int rc; down_write(&c->xattr_sem); list_for_each_entry_safe(xd, _xd, &c->xattr_unchecked, xindex) { rc = do_verify_xattr_datum(c, xd); if (rc < 0) continue; list_del_init(&xd->xindex); spin_lock(&c->erase_completion_lock); for (raw=xd->node; raw != (void *)xd; raw=raw->next_in_ino) { if (ref_flags(raw) != REF_UNCHECKED) continue; jeb = &c->blocks[ref_offset(raw) / c->sector_size]; totlen = PAD(ref_totlen(c, jeb, raw)); c->unchecked_size -= totlen; c->used_size += totlen; jeb->unchecked_size -= totlen; jeb->used_size += totlen; raw->flash_offset = ref_offset(raw) | ((xd->node == (void *)raw) ? REF_PRISTINE : REF_NORMAL); } if (xd->flags & JFFS2_XFLAGS_DEAD) list_add(&xd->xindex, &c->xattr_dead_list); spin_unlock(&c->erase_completion_lock); } up_write(&c->xattr_sem); return list_empty(&c->xattr_unchecked) ? 1 : 0; } void jffs2_release_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) { /* must be called under spin_lock(&c->erase_completion_lock) */ if (atomic_read(&xd->refcnt) || xd->node != (void *)xd) return; list_del(&xd->xindex); jffs2_free_xattr_datum(xd); } void jffs2_release_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) { /* must be called under spin_lock(&c->erase_completion_lock) */ struct jffs2_xattr_ref *tmp, **ptmp; if (ref->node != (void *)ref) return; for (tmp=c->xref_dead_list, ptmp=&c->xref_dead_list; tmp; ptmp=&tmp->next, tmp=tmp->next) { if (ref == tmp) { *ptmp = tmp->next; break; } } jffs2_free_xattr_ref(ref); }
linux-master
fs/jffs2/xattr.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * Copyright © 2004-2010 David Woodhouse <[email protected]> * * Created by David Woodhouse <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #if !defined(__KERNEL__) && !defined(__ECOS) #error "The userspace support got too messy and was removed. Update your mkfs.jffs2" #endif #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/zlib.h> #include <linux/zutil.h> #include "nodelist.h" #include "compr.h" /* Plan: call deflate() with avail_in == *sourcelen, avail_out = *dstlen - 12 and flush == Z_FINISH. If it doesn't manage to finish, call it again with avail_in == 0 and avail_out set to the remaining 12 bytes for it to clean up. Q: Is 12 bytes sufficient? */ #define STREAM_END_SPACE 12 static DEFINE_MUTEX(deflate_mutex); static DEFINE_MUTEX(inflate_mutex); static z_stream inf_strm, def_strm; #ifdef __KERNEL__ /* Linux-only */ #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/mutex.h> static int __init alloc_workspaces(void) { def_strm.workspace = vmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL)); if (!def_strm.workspace) return -ENOMEM; jffs2_dbg(1, "Allocated %d bytes for deflate workspace\n", zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL)); inf_strm.workspace = vmalloc(zlib_inflate_workspacesize()); if (!inf_strm.workspace) { vfree(def_strm.workspace); return -ENOMEM; } jffs2_dbg(1, "Allocated %d bytes for inflate workspace\n", zlib_inflate_workspacesize()); return 0; } static void free_workspaces(void) { vfree(def_strm.workspace); vfree(inf_strm.workspace); } #else #define alloc_workspaces() (0) #define free_workspaces() do { } while(0) #endif /* __KERNEL__ */ static int jffs2_zlib_compress(unsigned char *data_in, unsigned char *cpage_out, uint32_t *sourcelen, uint32_t *dstlen) { int ret; if (*dstlen <= STREAM_END_SPACE) return -1; mutex_lock(&deflate_mutex); if (Z_OK != zlib_deflateInit(&def_strm, 3)) { pr_warn("deflateInit failed\n"); mutex_unlock(&deflate_mutex); return -1; } def_strm.next_in = data_in; def_strm.total_in = 0; def_strm.next_out = cpage_out; def_strm.total_out = 0; while (def_strm.total_out < *dstlen - STREAM_END_SPACE && def_strm.total_in < *sourcelen) { def_strm.avail_out = *dstlen - (def_strm.total_out + STREAM_END_SPACE); def_strm.avail_in = min_t(unsigned long, (*sourcelen-def_strm.total_in), def_strm.avail_out); jffs2_dbg(1, "calling deflate with avail_in %ld, avail_out %ld\n", def_strm.avail_in, def_strm.avail_out); ret = zlib_deflate(&def_strm, Z_PARTIAL_FLUSH); jffs2_dbg(1, "deflate returned with avail_in %ld, avail_out %ld, total_in %ld, total_out %ld\n", def_strm.avail_in, def_strm.avail_out, def_strm.total_in, def_strm.total_out); if (ret != Z_OK) { jffs2_dbg(1, "deflate in loop returned %d\n", ret); zlib_deflateEnd(&def_strm); mutex_unlock(&deflate_mutex); return -1; } } def_strm.avail_out += STREAM_END_SPACE; def_strm.avail_in = 0; ret = zlib_deflate(&def_strm, Z_FINISH); zlib_deflateEnd(&def_strm); if (ret != Z_STREAM_END) { jffs2_dbg(1, "final deflate returned %d\n", ret); ret = -1; goto out; } if (def_strm.total_out >= def_strm.total_in) { jffs2_dbg(1, "zlib compressed %ld bytes into %ld; failing\n", def_strm.total_in, def_strm.total_out); ret = -1; goto out; } jffs2_dbg(1, "zlib compressed %ld bytes into %ld\n", def_strm.total_in, def_strm.total_out); *dstlen = def_strm.total_out; *sourcelen = def_strm.total_in; ret = 0; out: mutex_unlock(&deflate_mutex); return ret; } static int jffs2_zlib_decompress(unsigned char *data_in, unsigned char *cpage_out, uint32_t srclen, uint32_t destlen) { int ret; int wbits = MAX_WBITS; mutex_lock(&inflate_mutex); inf_strm.next_in = data_in; inf_strm.avail_in = srclen; inf_strm.total_in = 0; inf_strm.next_out = cpage_out; inf_strm.avail_out = destlen; inf_strm.total_out = 0; /* If it's deflate, and it's got no preset dictionary, then we can tell zlib to skip the adler32 check. */ if (srclen > 2 && !(data_in[1] & PRESET_DICT) && ((data_in[0] & 0x0f) == Z_DEFLATED) && !(((data_in[0]<<8) + data_in[1]) % 31)) { jffs2_dbg(2, "inflate skipping adler32\n"); wbits = -((data_in[0] >> 4) + 8); inf_strm.next_in += 2; inf_strm.avail_in -= 2; } else { /* Let this remain D1 for now -- it should never happen */ jffs2_dbg(1, "inflate not skipping adler32\n"); } if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) { pr_warn("inflateInit failed\n"); mutex_unlock(&inflate_mutex); return 1; } while((ret = zlib_inflate(&inf_strm, Z_FINISH)) == Z_OK) ; if (ret != Z_STREAM_END) { pr_notice("inflate returned %d\n", ret); } zlib_inflateEnd(&inf_strm); mutex_unlock(&inflate_mutex); return 0; } static struct jffs2_compressor jffs2_zlib_comp = { .priority = JFFS2_ZLIB_PRIORITY, .name = "zlib", .compr = JFFS2_COMPR_ZLIB, .compress = &jffs2_zlib_compress, .decompress = &jffs2_zlib_decompress, #ifdef JFFS2_ZLIB_DISABLED .disabled = 1, #else .disabled = 0, #endif }; int __init jffs2_zlib_init(void) { int ret; ret = alloc_workspaces(); if (ret) return ret; ret = jffs2_register_compressor(&jffs2_zlib_comp); if (ret) free_workspaces(); return ret; } void jffs2_zlib_exit(void) { jffs2_unregister_compressor(&jffs2_zlib_comp); free_workspaces(); }
linux-master
fs/jffs2/compr_zlib.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2007 Nokia Corporation. All rights reserved. * Copyright © 2004-2010 David Woodhouse <[email protected]> * * Created by Richard Purdie <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/lzo.h> #include "compr.h" static void *lzo_mem; static void *lzo_compress_buf; static DEFINE_MUTEX(deflate_mutex); /* for lzo_mem and lzo_compress_buf */ static void free_workspace(void) { vfree(lzo_mem); vfree(lzo_compress_buf); } static int __init alloc_workspace(void) { lzo_mem = vmalloc(LZO1X_MEM_COMPRESS); lzo_compress_buf = vmalloc(lzo1x_worst_compress(PAGE_SIZE)); if (!lzo_mem || !lzo_compress_buf) { free_workspace(); return -ENOMEM; } return 0; } static int jffs2_lzo_compress(unsigned char *data_in, unsigned char *cpage_out, uint32_t *sourcelen, uint32_t *dstlen) { size_t compress_size; int ret; mutex_lock(&deflate_mutex); ret = lzo1x_1_compress(data_in, *sourcelen, lzo_compress_buf, &compress_size, lzo_mem); if (ret != LZO_E_OK) goto fail; if (compress_size > *dstlen) goto fail; memcpy(cpage_out, lzo_compress_buf, compress_size); mutex_unlock(&deflate_mutex); *dstlen = compress_size; return 0; fail: mutex_unlock(&deflate_mutex); return -1; } static int jffs2_lzo_decompress(unsigned char *data_in, unsigned char *cpage_out, uint32_t srclen, uint32_t destlen) { size_t dl = destlen; int ret; ret = lzo1x_decompress_safe(data_in, srclen, cpage_out, &dl); if (ret != LZO_E_OK || dl != destlen) return -1; return 0; } static struct jffs2_compressor jffs2_lzo_comp = { .priority = JFFS2_LZO_PRIORITY, .name = "lzo", .compr = JFFS2_COMPR_LZO, .compress = &jffs2_lzo_compress, .decompress = &jffs2_lzo_decompress, .disabled = 0, }; int __init jffs2_lzo_init(void) { int ret; ret = alloc_workspace(); if (ret < 0) return ret; ret = jffs2_register_compressor(&jffs2_lzo_comp); if (ret) free_workspace(); return ret; } void jffs2_lzo_exit(void) { jffs2_unregister_compressor(&jffs2_lzo_comp); free_workspace(); }
linux-master
fs/jffs2/compr_lzo.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * * Created by David Woodhouse <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #include <linux/kernel.h> #include <linux/mtd/mtd.h> #include "nodelist.h" int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen) { if (!jffs2_is_writebuffered(c)) { if (jffs2_sum_active()) { int res; res = jffs2_sum_add_kvec(c, vecs, count, (uint32_t) to); if (res) { return res; } } } return mtd_writev(c->mtd, vecs, count, to, retlen); } int jffs2_flash_direct_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char *buf) { int ret; ret = mtd_write(c->mtd, ofs, len, retlen, buf); if (jffs2_sum_active()) { struct kvec vecs[1]; int res; vecs[0].iov_base = (unsigned char *) buf; vecs[0].iov_len = len; res = jffs2_sum_add_kvec(c, vecs, 1, (uint32_t) ofs); if (res) { return res; } } return ret; }
linux-master
fs/jffs2/writev.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * Copyright © 2004-2010 David Woodhouse <[email protected]> * * Created by David Woodhouse <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/slab.h> #include <linux/mtd/mtd.h> #include <linux/compiler.h> #include <linux/crc32.h> #include <linux/sched.h> #include <linux/pagemap.h> #include "nodelist.h" static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset); static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); static void jffs2_erase_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { int ret; uint32_t bad_offset; #ifdef __ECOS ret = jffs2_flash_erase(c, jeb); if (!ret) { jffs2_erase_succeeded(c, jeb); return; } bad_offset = jeb->offset; #else /* Linux */ struct erase_info *instr; jffs2_dbg(1, "%s(): erase block %#08x (range %#08x-%#08x)\n", __func__, jeb->offset, jeb->offset, jeb->offset + c->sector_size); instr = kzalloc(sizeof(struct erase_info), GFP_KERNEL); if (!instr) { pr_warn("kzalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); mutex_lock(&c->erase_free_sem); spin_lock(&c->erase_completion_lock); list_move(&jeb->list, &c->erase_pending_list); c->erasing_size -= c->sector_size; c->dirty_size += c->sector_size; jeb->dirty_size = c->sector_size; spin_unlock(&c->erase_completion_lock); mutex_unlock(&c->erase_free_sem); return; } instr->addr = jeb->offset; instr->len = c->sector_size; ret = mtd_erase(c->mtd, instr); if (!ret) { jffs2_erase_succeeded(c, jeb); kfree(instr); return; } bad_offset = instr->fail_addr; kfree(instr); #endif /* __ECOS */ if (ret == -ENOMEM || ret == -EAGAIN) { /* Erase failed immediately. Refile it on the list */ jffs2_dbg(1, "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret); mutex_lock(&c->erase_free_sem); spin_lock(&c->erase_completion_lock); list_move(&jeb->list, &c->erase_pending_list); c->erasing_size -= c->sector_size; c->dirty_size += c->sector_size; jeb->dirty_size = c->sector_size; spin_unlock(&c->erase_completion_lock); mutex_unlock(&c->erase_free_sem); return; } if (ret == -EROFS) pr_warn("Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n", jeb->offset); else pr_warn("Erase at 0x%08x failed immediately: errno %d\n", jeb->offset, ret); jffs2_erase_failed(c, jeb, bad_offset); } int jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count) { struct jffs2_eraseblock *jeb; int work_done = 0; mutex_lock(&c->erase_free_sem); spin_lock(&c->erase_completion_lock); while (!list_empty(&c->erase_complete_list) || !list_empty(&c->erase_pending_list)) { if (!list_empty(&c->erase_complete_list)) { jeb = list_entry(c->erase_complete_list.next, struct jffs2_eraseblock, list); list_move(&jeb->list, &c->erase_checking_list); spin_unlock(&c->erase_completion_lock); mutex_unlock(&c->erase_free_sem); jffs2_mark_erased_block(c, jeb); work_done++; if (!--count) { jffs2_dbg(1, "Count reached. jffs2_erase_pending_blocks leaving\n"); goto done; } } else if (!list_empty(&c->erase_pending_list)) { jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list); jffs2_dbg(1, "Starting erase of pending block 0x%08x\n", jeb->offset); list_del(&jeb->list); c->erasing_size += c->sector_size; c->wasted_size -= jeb->wasted_size; c->free_size -= jeb->free_size; c->used_size -= jeb->used_size; c->dirty_size -= jeb->dirty_size; jeb->wasted_size = jeb->used_size = jeb->dirty_size = jeb->free_size = 0; jffs2_free_jeb_node_refs(c, jeb); list_add(&jeb->list, &c->erasing_list); spin_unlock(&c->erase_completion_lock); mutex_unlock(&c->erase_free_sem); jffs2_erase_block(c, jeb); } else { BUG(); } /* Be nice */ cond_resched(); mutex_lock(&c->erase_free_sem); spin_lock(&c->erase_completion_lock); } spin_unlock(&c->erase_completion_lock); mutex_unlock(&c->erase_free_sem); done: jffs2_dbg(1, "jffs2_erase_pending_blocks completed\n"); return work_done; } static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { jffs2_dbg(1, "Erase completed successfully at 0x%08x\n", jeb->offset); mutex_lock(&c->erase_free_sem); spin_lock(&c->erase_completion_lock); list_move_tail(&jeb->list, &c->erase_complete_list); /* Wake the GC thread to mark them clean */ jffs2_garbage_collect_trigger(c); spin_unlock(&c->erase_completion_lock); mutex_unlock(&c->erase_free_sem); wake_up(&c->erase_wait); } static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset) { /* For NAND, if the failure did not occur at the device level for a specific physical page, don't bother updating the bad block table. */ if (jffs2_cleanmarker_oob(c) && (bad_offset != (uint32_t)MTD_FAIL_ADDR_UNKNOWN)) { /* We had a device-level failure to erase. Let's see if we've failed too many times. */ if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) { /* We'd like to give this block another try. */ mutex_lock(&c->erase_free_sem); spin_lock(&c->erase_completion_lock); list_move(&jeb->list, &c->erase_pending_list); c->erasing_size -= c->sector_size; c->dirty_size += c->sector_size; jeb->dirty_size = c->sector_size; spin_unlock(&c->erase_completion_lock); mutex_unlock(&c->erase_free_sem); return; } } mutex_lock(&c->erase_free_sem); spin_lock(&c->erase_completion_lock); c->erasing_size -= c->sector_size; c->bad_size += c->sector_size; list_move(&jeb->list, &c->bad_list); c->nr_erasing_blocks--; spin_unlock(&c->erase_completion_lock); mutex_unlock(&c->erase_free_sem); wake_up(&c->erase_wait); } /* Hmmm. Maybe we should accept the extra space it takes and make this a standard doubly-linked list? */ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_eraseblock *jeb) { struct jffs2_inode_cache *ic = NULL; struct jffs2_raw_node_ref **prev; prev = &ref->next_in_ino; /* Walk the inode's list once, removing any nodes from this eraseblock */ while (1) { if (!(*prev)->next_in_ino) { /* We're looking at the jffs2_inode_cache, which is at the end of the linked list. Stash it and continue from the beginning of the list */ ic = (struct jffs2_inode_cache *)(*prev); prev = &ic->nodes; continue; } if (SECTOR_ADDR((*prev)->flash_offset) == jeb->offset) { /* It's in the block we're erasing */ struct jffs2_raw_node_ref *this; this = *prev; *prev = this->next_in_ino; this->next_in_ino = NULL; if (this == ref) break; continue; } /* Not to be deleted. Skip */ prev = &((*prev)->next_in_ino); } /* PARANOIA */ if (!ic) { JFFS2_WARNING("inode_cache/xattr_datum/xattr_ref" " not found in remove_node_refs()!!\n"); return; } jffs2_dbg(1, "Removed nodes in range 0x%08x-0x%08x from ino #%u\n", jeb->offset, jeb->offset + c->sector_size, ic->ino); D2({ int i=0; struct jffs2_raw_node_ref *this; printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n"); this = ic->nodes; printk(KERN_DEBUG); while(this) { pr_cont("0x%08x(%d)->", ref_offset(this), ref_flags(this)); if (++i == 5) { printk(KERN_DEBUG); i=0; } this = this->next_in_ino; } pr_cont("\n"); }); switch (ic->class) { #ifdef CONFIG_JFFS2_FS_XATTR case RAWNODE_CLASS_XATTR_DATUM: jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic); break; case RAWNODE_CLASS_XATTR_REF: jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic); break; #endif default: if (ic->nodes == (void *)ic && ic->pino_nlink == 0) jffs2_del_ino_cache(c, ic); } } void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { struct jffs2_raw_node_ref *block, *ref; jffs2_dbg(1, "Freeing all node refs for eraseblock offset 0x%08x\n", jeb->offset); block = ref = jeb->first_node; while (ref) { if (ref->flash_offset == REF_LINK_NODE) { ref = ref->next_in_ino; jffs2_free_refblock(block); block = ref; continue; } if (ref->flash_offset != REF_EMPTY_NODE && ref->next_in_ino) jffs2_remove_node_refs_from_ino_list(c, ref, jeb); /* else it was a non-inode node or already removed, so don't bother */ ref++; } jeb->first_node = jeb->last_node = NULL; } static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *bad_offset) { void *ebuf; uint32_t ofs; size_t retlen; int ret; unsigned long *wordebuf; ret = mtd_point(c->mtd, jeb->offset, c->sector_size, &retlen, &ebuf, NULL); if (ret != -EOPNOTSUPP) { if (ret) { jffs2_dbg(1, "MTD point failed %d\n", ret); goto do_flash_read; } if (retlen < c->sector_size) { /* Don't muck about if it won't let us point to the whole erase sector */ jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n", retlen); mtd_unpoint(c->mtd, jeb->offset, retlen); goto do_flash_read; } wordebuf = ebuf-sizeof(*wordebuf); retlen /= sizeof(*wordebuf); do { if (*++wordebuf != ~0) break; } while(--retlen); mtd_unpoint(c->mtd, jeb->offset, c->sector_size); if (retlen) { pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08tx\n", *wordebuf, jeb->offset + c->sector_size-retlen * sizeof(*wordebuf)); return -EIO; } return 0; } do_flash_read: ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!ebuf) { pr_warn("Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n", jeb->offset); return -EAGAIN; } jffs2_dbg(1, "Verifying erase at 0x%08x\n", jeb->offset); for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) { uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs); int i; *bad_offset = ofs; ret = mtd_read(c->mtd, ofs, readlen, &retlen, ebuf); if (ret) { pr_warn("Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret); ret = -EIO; goto fail; } if (retlen != readlen) { pr_warn("Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen); ret = -EIO; goto fail; } for (i=0; i<readlen; i += sizeof(unsigned long)) { /* It's OK. We know it's properly aligned */ unsigned long *datum = ebuf + i; if (*datum + 1) { *bad_offset += i; pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08x\n", *datum, *bad_offset); ret = -EIO; goto fail; } } ofs += readlen; cond_resched(); } ret = 0; fail: kfree(ebuf); return ret; } static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { size_t retlen; int ret; uint32_t bad_offset; switch (jffs2_block_check_erase(c, jeb, &bad_offset)) { case -EAGAIN: goto refile; case -EIO: goto filebad; } /* Write the erase complete marker */ jffs2_dbg(1, "Writing erased marker to block at 0x%08x\n", jeb->offset); bad_offset = jeb->offset; /* Cleanmarker in oob area or no cleanmarker at all ? */ if (jffs2_cleanmarker_oob(c) || c->cleanmarker_size == 0) { if (jffs2_cleanmarker_oob(c)) { if (jffs2_write_nand_cleanmarker(c, jeb)) goto filebad; } } else { struct kvec vecs[1]; struct jffs2_unknown_node marker = { .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK), .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER), .totlen = cpu_to_je32(c->cleanmarker_size) }; jffs2_prealloc_raw_node_refs(c, jeb, 1); marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4)); vecs[0].iov_base = (unsigned char *) &marker; vecs[0].iov_len = sizeof(marker); ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen); if (ret || retlen != sizeof(marker)) { if (ret) pr_warn("Write clean marker to block at 0x%08x failed: %d\n", jeb->offset, ret); else pr_warn("Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n", jeb->offset, sizeof(marker), retlen); goto filebad; } } /* Everything else got zeroed before the erase */ jeb->free_size = c->sector_size; mutex_lock(&c->erase_free_sem); spin_lock(&c->erase_completion_lock); c->erasing_size -= c->sector_size; c->free_size += c->sector_size; /* Account for cleanmarker now, if it's in-band */ if (c->cleanmarker_size && !jffs2_cleanmarker_oob(c)) jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, c->cleanmarker_size, NULL); list_move_tail(&jeb->list, &c->free_list); c->nr_erasing_blocks--; c->nr_free_blocks++; jffs2_dbg_acct_sanity_check_nolock(c, jeb); jffs2_dbg_acct_paranoia_check_nolock(c, jeb); spin_unlock(&c->erase_completion_lock); mutex_unlock(&c->erase_free_sem); wake_up(&c->erase_wait); return; filebad: jffs2_erase_failed(c, jeb, bad_offset); return; refile: /* Stick it back on the list from whence it came and come back later */ mutex_lock(&c->erase_free_sem); spin_lock(&c->erase_completion_lock); jffs2_garbage_collect_trigger(c); list_move(&jeb->list, &c->erase_complete_list); spin_unlock(&c->erase_completion_lock); mutex_unlock(&c->erase_free_sem); return; }
linux-master
fs/jffs2/erase.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2006 NEC Corporation * * Created by KaiGai Kohei <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/jffs2.h> #include <linux/xattr.h> #include <linux/mtd/mtd.h> #include "nodelist.h" static int jffs2_user_getxattr(const struct xattr_handler *handler, struct dentry *unused, struct inode *inode, const char *name, void *buffer, size_t size) { return do_jffs2_getxattr(inode, JFFS2_XPREFIX_USER, name, buffer, size); } static int jffs2_user_setxattr(const struct xattr_handler *handler, struct mnt_idmap *idmap, struct dentry *unused, struct inode *inode, const char *name, const void *buffer, size_t size, int flags) { return do_jffs2_setxattr(inode, JFFS2_XPREFIX_USER, name, buffer, size, flags); } const struct xattr_handler jffs2_user_xattr_handler = { .prefix = XATTR_USER_PREFIX, .set = jffs2_user_setxattr, .get = jffs2_user_getxattr };
linux-master
fs/jffs2/xattr_user.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * * Created by David Woodhouse <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/mtd/mtd.h> #include <linux/pagemap.h> #include <linux/crc32.h> #include <linux/compiler.h> #include "nodelist.h" #include "summary.h" #include "debug.h" #define DEFAULT_EMPTY_SCAN_SIZE 256 #define noisy_printk(noise, fmt, ...) \ do { \ if (*(noise)) { \ pr_notice(fmt, ##__VA_ARGS__); \ (*(noise))--; \ if (!(*(noise))) \ pr_notice("Further such events for this erase block will not be printed\n"); \ } \ } while (0) static uint32_t pseudo_random; static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s); /* These helper functions _must_ increase ofs and also do the dirty/used space accounting. * Returning an error will abort the mount - bad checksums etc. should just mark the space * as dirty. */ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s); static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s); static inline int min_free(struct jffs2_sb_info *c) { uint32_t min = 2 * sizeof(struct jffs2_raw_inode); #ifdef CONFIG_JFFS2_FS_WRITEBUFFER if (!jffs2_can_mark_obsolete(c) && min < c->wbuf_pagesize) return c->wbuf_pagesize; #endif return min; } static inline uint32_t EMPTY_SCAN_SIZE(uint32_t sector_size) { if (sector_size < DEFAULT_EMPTY_SCAN_SIZE) return sector_size; else return DEFAULT_EMPTY_SCAN_SIZE; } static int file_dirty(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { int ret; if ((ret = jffs2_prealloc_raw_node_refs(c, jeb, 1))) return ret; if ((ret = jffs2_scan_dirty_space(c, jeb, jeb->free_size))) return ret; /* Turned wasted size into dirty, since we apparently think it's recoverable now. */ jeb->dirty_size += jeb->wasted_size; c->dirty_size += jeb->wasted_size; c->wasted_size -= jeb->wasted_size; jeb->wasted_size = 0; if (VERYDIRTY(c, jeb->dirty_size)) { list_add(&jeb->list, &c->very_dirty_list); } else { list_add(&jeb->list, &c->dirty_list); } return 0; } int jffs2_scan_medium(struct jffs2_sb_info *c) { int i, ret; uint32_t empty_blocks = 0, bad_blocks = 0; unsigned char *flashbuf = NULL; uint32_t buf_size = 0; struct jffs2_summary *s = NULL; /* summary info collected by the scan process */ #ifndef __ECOS size_t pointlen, try_size; ret = mtd_point(c->mtd, 0, c->mtd->size, &pointlen, (void **)&flashbuf, NULL); if (!ret && pointlen < c->mtd->size) { /* Don't muck about if it won't let us point to the whole flash */ jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n", pointlen); mtd_unpoint(c->mtd, 0, pointlen); flashbuf = NULL; } if (ret && ret != -EOPNOTSUPP) jffs2_dbg(1, "MTD point failed %d\n", ret); #endif if (!flashbuf) { /* For NAND it's quicker to read a whole eraseblock at a time, apparently */ if (jffs2_cleanmarker_oob(c)) try_size = c->sector_size; else try_size = PAGE_SIZE; jffs2_dbg(1, "Trying to allocate readbuf of %zu " "bytes\n", try_size); flashbuf = mtd_kmalloc_up_to(c->mtd, &try_size); if (!flashbuf) return -ENOMEM; jffs2_dbg(1, "Allocated readbuf of %zu bytes\n", try_size); buf_size = (uint32_t)try_size; } if (jffs2_sum_active()) { s = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL); if (!s) { JFFS2_WARNING("Can't allocate memory for summary\n"); ret = -ENOMEM; goto out_buf; } } for (i=0; i<c->nr_blocks; i++) { struct jffs2_eraseblock *jeb = &c->blocks[i]; cond_resched(); /* reset summary info for next eraseblock scan */ jffs2_sum_reset_collected(s); ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset), buf_size, s); if (ret < 0) goto out; jffs2_dbg_acct_paranoia_check_nolock(c, jeb); /* Now decide which list to put it on */ switch(ret) { case BLK_STATE_ALLFF: /* * Empty block. Since we can't be sure it * was entirely erased, we just queue it for erase * again. It will be marked as such when the erase * is complete. Meanwhile we still count it as empty * for later checks. */ empty_blocks++; list_add(&jeb->list, &c->erase_pending_list); c->nr_erasing_blocks++; break; case BLK_STATE_CLEANMARKER: /* Only a CLEANMARKER node is valid */ if (!jeb->dirty_size) { /* It's actually free */ list_add(&jeb->list, &c->free_list); c->nr_free_blocks++; } else { /* Dirt */ jffs2_dbg(1, "Adding all-dirty block at 0x%08x to erase_pending_list\n", jeb->offset); list_add(&jeb->list, &c->erase_pending_list); c->nr_erasing_blocks++; } break; case BLK_STATE_CLEAN: /* Full (or almost full) of clean data. Clean list */ list_add(&jeb->list, &c->clean_list); break; case BLK_STATE_PARTDIRTY: /* Some data, but not full. Dirty list. */ /* We want to remember the block with most free space and stick it in the 'nextblock' position to start writing to it. */ if (jeb->free_size > min_free(c) && (!c->nextblock || c->nextblock->free_size < jeb->free_size)) { /* Better candidate for the next writes to go to */ if (c->nextblock) { ret = file_dirty(c, c->nextblock); if (ret) goto out; /* deleting summary information of the old nextblock */ jffs2_sum_reset_collected(c->summary); } /* update collected summary information for the current nextblock */ jffs2_sum_move_collected(c, s); jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n", __func__, jeb->offset); c->nextblock = jeb; } else { ret = file_dirty(c, jeb); if (ret) goto out; } break; case BLK_STATE_ALLDIRTY: /* Nothing valid - not even a clean marker. Needs erasing. */ /* For now we just put it on the erasing list. We'll start the erases later */ jffs2_dbg(1, "Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset); list_add(&jeb->list, &c->erase_pending_list); c->nr_erasing_blocks++; break; case BLK_STATE_BADBLOCK: jffs2_dbg(1, "Block at 0x%08x is bad\n", jeb->offset); list_add(&jeb->list, &c->bad_list); c->bad_size += c->sector_size; c->free_size -= c->sector_size; bad_blocks++; break; default: pr_warn("%s(): unknown block state\n", __func__); BUG(); } } /* Nextblock dirty is always seen as wasted, because we cannot recycle it now */ if (c->nextblock && (c->nextblock->dirty_size)) { c->nextblock->wasted_size += c->nextblock->dirty_size; c->wasted_size += c->nextblock->dirty_size; c->dirty_size -= c->nextblock->dirty_size; c->nextblock->dirty_size = 0; } #ifdef CONFIG_JFFS2_FS_WRITEBUFFER if (!jffs2_can_mark_obsolete(c) && c->wbuf_pagesize && c->nextblock && (c->nextblock->free_size % c->wbuf_pagesize)) { /* If we're going to start writing into a block which already contains data, and the end of the data isn't page-aligned, skip a little and align it. */ uint32_t skip = c->nextblock->free_size % c->wbuf_pagesize; jffs2_dbg(1, "%s(): Skipping %d bytes in nextblock to ensure page alignment\n", __func__, skip); jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); jffs2_scan_dirty_space(c, c->nextblock, skip); } #endif if (c->nr_erasing_blocks) { if (!c->used_size && !c->unchecked_size && ((c->nr_free_blocks+empty_blocks+bad_blocks) != c->nr_blocks || bad_blocks == c->nr_blocks)) { pr_notice("Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n"); pr_notice("empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n", empty_blocks, bad_blocks, c->nr_blocks); ret = -EIO; goto out; } spin_lock(&c->erase_completion_lock); jffs2_garbage_collect_trigger(c); spin_unlock(&c->erase_completion_lock); } ret = 0; out: jffs2_sum_reset_collected(s); kfree(s); out_buf: if (buf_size) kfree(flashbuf); #ifndef __ECOS else mtd_unpoint(c->mtd, 0, c->mtd->size); #endif return ret; } static int jffs2_fill_scan_buf(struct jffs2_sb_info *c, void *buf, uint32_t ofs, uint32_t len) { int ret; size_t retlen; ret = jffs2_flash_read(c, ofs, len, &retlen, buf); if (ret) { jffs2_dbg(1, "mtd->read(0x%x bytes from 0x%x) returned %d\n", len, ofs, ret); return ret; } if (retlen < len) { jffs2_dbg(1, "Read at 0x%x gave only 0x%zx bytes\n", ofs, retlen); return -EIO; } return 0; } int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size && (!jeb->first_node || !ref_next(jeb->first_node)) ) return BLK_STATE_CLEANMARKER; /* move blocks with max 4 byte dirty space to cleanlist */ else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) { c->dirty_size -= jeb->dirty_size; c->wasted_size += jeb->dirty_size; jeb->wasted_size += jeb->dirty_size; jeb->dirty_size = 0; return BLK_STATE_CLEAN; } else if (jeb->used_size || jeb->unchecked_size) return BLK_STATE_PARTDIRTY; else return BLK_STATE_ALLDIRTY; } #ifdef CONFIG_JFFS2_FS_XATTR static int jffs2_scan_xattr_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_raw_xattr *rx, uint32_t ofs, struct jffs2_summary *s) { struct jffs2_xattr_datum *xd; uint32_t xid, version, totlen, crc; int err; crc = crc32(0, rx, sizeof(struct jffs2_raw_xattr) - 4); if (crc != je32_to_cpu(rx->node_crc)) { JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", ofs, je32_to_cpu(rx->node_crc), crc); if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen)))) return err; return 0; } xid = je32_to_cpu(rx->xid); version = je32_to_cpu(rx->version); totlen = PAD(sizeof(struct jffs2_raw_xattr) + rx->name_len + 1 + je16_to_cpu(rx->value_len)); if (totlen != je32_to_cpu(rx->totlen)) { JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%u\n", ofs, je32_to_cpu(rx->totlen), totlen); if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen)))) return err; return 0; } xd = jffs2_setup_xattr_datum(c, xid, version); if (IS_ERR(xd)) return PTR_ERR(xd); if (xd->version > version) { struct jffs2_raw_node_ref *raw = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, NULL); raw->next_in_ino = xd->node->next_in_ino; xd->node->next_in_ino = raw; } else { xd->version = version; xd->xprefix = rx->xprefix; xd->name_len = rx->name_len; xd->value_len = je16_to_cpu(rx->value_len); xd->data_crc = je32_to_cpu(rx->data_crc); jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, (void *)xd); } if (jffs2_sum_active()) jffs2_sum_add_xattr_mem(s, rx, ofs - jeb->offset); dbg_xattr("scanning xdatum at %#08x (xid=%u, version=%u)\n", ofs, xd->xid, xd->version); return 0; } static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_raw_xref *rr, uint32_t ofs, struct jffs2_summary *s) { struct jffs2_xattr_ref *ref; uint32_t crc; int err; crc = crc32(0, rr, sizeof(*rr) - 4); if (crc != je32_to_cpu(rr->node_crc)) { JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", ofs, je32_to_cpu(rr->node_crc), crc); if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rr->totlen))))) return err; return 0; } if (PAD(sizeof(struct jffs2_raw_xref)) != je32_to_cpu(rr->totlen)) { JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%zd\n", ofs, je32_to_cpu(rr->totlen), PAD(sizeof(struct jffs2_raw_xref))); if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rr->totlen)))) return err; return 0; } ref = jffs2_alloc_xattr_ref(); if (!ref) return -ENOMEM; /* BEFORE jffs2_build_xattr_subsystem() called, * and AFTER xattr_ref is marked as a dead xref, * ref->xid is used to store 32bit xid, xd is not used * ref->ino is used to store 32bit inode-number, ic is not used * Thoes variables are declared as union, thus using those * are exclusive. In a similar way, ref->next is temporarily * used to chain all xattr_ref object. It's re-chained to * jffs2_inode_cache in jffs2_build_xattr_subsystem() correctly. */ ref->ino = je32_to_cpu(rr->ino); ref->xid = je32_to_cpu(rr->xid); ref->xseqno = je32_to_cpu(rr->xseqno); if (ref->xseqno > c->highest_xseqno) c->highest_xseqno = (ref->xseqno & ~XREF_DELETE_MARKER); ref->next = c->xref_temp; c->xref_temp = ref; jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rr->totlen)), (void *)ref); if (jffs2_sum_active()) jffs2_sum_add_xref_mem(s, rr, ofs - jeb->offset); dbg_xattr("scan xref at %#08x (xid=%u, ino=%u)\n", ofs, ref->xid, ref->ino); return 0; } #endif /* Called with 'buf_size == 0' if buf is in fact a pointer _directly_ into the flash, XIP-style */ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s) { struct jffs2_unknown_node *node; struct jffs2_unknown_node crcnode; uint32_t ofs, prevofs, max_ofs; uint32_t hdr_crc, buf_ofs, buf_len; int err; int noise = 0; #ifdef CONFIG_JFFS2_FS_WRITEBUFFER int cleanmarkerfound = 0; #endif ofs = jeb->offset; prevofs = jeb->offset - 1; jffs2_dbg(1, "%s(): Scanning block at 0x%x\n", __func__, ofs); #ifdef CONFIG_JFFS2_FS_WRITEBUFFER if (jffs2_cleanmarker_oob(c)) { int ret; if (mtd_block_isbad(c->mtd, jeb->offset)) return BLK_STATE_BADBLOCK; ret = jffs2_check_nand_cleanmarker(c, jeb); jffs2_dbg(2, "jffs_check_nand_cleanmarker returned %d\n", ret); /* Even if it's not found, we still scan to see if the block is empty. We use this information to decide whether to erase it or not. */ switch (ret) { case 0: cleanmarkerfound = 1; break; case 1: break; default: return ret; } } #endif if (jffs2_sum_active()) { struct jffs2_sum_marker *sm; void *sumptr = NULL; uint32_t sumlen; if (!buf_size) { /* XIP case. Just look, point at the summary if it's there */ sm = (void *)buf + c->sector_size - sizeof(*sm); if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) { sumptr = buf + je32_to_cpu(sm->offset); sumlen = c->sector_size - je32_to_cpu(sm->offset); } } else { /* If NAND flash, read a whole page of it. Else just the end */ if (c->wbuf_pagesize) buf_len = c->wbuf_pagesize; else buf_len = sizeof(*sm); /* Read as much as we want into the _end_ of the preallocated buffer */ err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len, jeb->offset + c->sector_size - buf_len, buf_len); if (err) return err; sm = (void *)buf + buf_size - sizeof(*sm); if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) { sumlen = c->sector_size - je32_to_cpu(sm->offset); sumptr = buf + buf_size - sumlen; /* sm->offset maybe wrong but MAGIC maybe right */ if (sumlen > c->sector_size) goto full_scan; /* Now, make sure the summary itself is available */ if (sumlen > buf_size) { /* Need to kmalloc for this. */ sumptr = kmalloc(sumlen, GFP_KERNEL); if (!sumptr) return -ENOMEM; memcpy(sumptr + sumlen - buf_len, buf + buf_size - buf_len, buf_len); } if (buf_len < sumlen) { /* Need to read more so that the entire summary node is present */ err = jffs2_fill_scan_buf(c, sumptr, jeb->offset + c->sector_size - sumlen, sumlen - buf_len); if (err) { if (sumlen > buf_size) kfree(sumptr); return err; } } } } if (sumptr) { err = jffs2_sum_scan_sumnode(c, jeb, sumptr, sumlen, &pseudo_random); if (buf_size && sumlen > buf_size) kfree(sumptr); /* If it returns with a real error, bail. If it returns positive, that's a block classification (i.e. BLK_STATE_xxx) so return that too. If it returns zero, fall through to full scan. */ if (err) return err; } } full_scan: buf_ofs = jeb->offset; if (!buf_size) { /* This is the XIP case -- we're reading _directly_ from the flash chip */ buf_len = c->sector_size; } else { buf_len = EMPTY_SCAN_SIZE(c->sector_size); err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len); if (err) return err; } /* We temporarily use 'ofs' as a pointer into the buffer/jeb */ ofs = 0; max_ofs = EMPTY_SCAN_SIZE(c->sector_size); /* Scan only EMPTY_SCAN_SIZE of 0xFF before declaring it's empty */ while(ofs < max_ofs && *(uint32_t *)(&buf[ofs]) == 0xFFFFFFFF) ofs += 4; if (ofs == max_ofs) { #ifdef CONFIG_JFFS2_FS_WRITEBUFFER if (jffs2_cleanmarker_oob(c)) { /* scan oob, take care of cleanmarker */ int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound); jffs2_dbg(2, "jffs2_check_oob_empty returned %d\n", ret); switch (ret) { case 0: return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF; case 1: return BLK_STATE_ALLDIRTY; default: return ret; } } #endif jffs2_dbg(1, "Block at 0x%08x is empty (erased)\n", jeb->offset); if (c->cleanmarker_size == 0) return BLK_STATE_CLEANMARKER; /* don't bother with re-erase */ else return BLK_STATE_ALLFF; /* OK to erase if all blocks are like this */ } if (ofs) { jffs2_dbg(1, "Free space at %08x ends at %08x\n", jeb->offset, jeb->offset + ofs); if ((err = jffs2_prealloc_raw_node_refs(c, jeb, 1))) return err; if ((err = jffs2_scan_dirty_space(c, jeb, ofs))) return err; } /* Now ofs is a complete physical flash offset as it always was... */ ofs += jeb->offset; noise = 10; dbg_summary("no summary found in jeb 0x%08x. Apply original scan.\n",jeb->offset); scan_more: while(ofs < jeb->offset + c->sector_size) { jffs2_dbg_acct_paranoia_check_nolock(c, jeb); /* Make sure there are node refs available for use */ err = jffs2_prealloc_raw_node_refs(c, jeb, 2); if (err) return err; cond_resched(); if (ofs & 3) { pr_warn("Eep. ofs 0x%08x not word-aligned!\n", ofs); ofs = PAD(ofs); continue; } if (ofs == prevofs) { pr_warn("ofs 0x%08x has already been seen. Skipping\n", ofs); if ((err = jffs2_scan_dirty_space(c, jeb, 4))) return err; ofs += 4; continue; } prevofs = ofs; if (jeb->offset + c->sector_size < ofs + sizeof(*node)) { jffs2_dbg(1, "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node), jeb->offset, c->sector_size, ofs, sizeof(*node)); if ((err = jffs2_scan_dirty_space(c, jeb, (jeb->offset + c->sector_size)-ofs))) return err; break; } if (buf_ofs + buf_len < ofs + sizeof(*node)) { buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); jffs2_dbg(1, "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n", sizeof(struct jffs2_unknown_node), buf_len, ofs); err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); if (err) return err; buf_ofs = ofs; } node = (struct jffs2_unknown_node *)&buf[ofs-buf_ofs]; if (*(uint32_t *)(&buf[ofs-buf_ofs]) == 0xffffffff) { uint32_t inbuf_ofs; uint32_t empty_start, scan_end; empty_start = ofs; ofs += 4; scan_end = min_t(uint32_t, EMPTY_SCAN_SIZE(c->sector_size)/8, buf_len); jffs2_dbg(1, "Found empty flash at 0x%08x\n", ofs); more_empty: inbuf_ofs = ofs - buf_ofs; while (inbuf_ofs < scan_end) { if (unlikely(*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff)) { pr_warn("Empty flash at 0x%08x ends at 0x%08x\n", empty_start, ofs); if ((err = jffs2_scan_dirty_space(c, jeb, ofs-empty_start))) return err; goto scan_more; } inbuf_ofs+=4; ofs += 4; } /* Ran off end. */ jffs2_dbg(1, "Empty flash to end of buffer at 0x%08x\n", ofs); /* If we're only checking the beginning of a block with a cleanmarker, bail now */ if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) && c->cleanmarker_size && !jeb->dirty_size && !ref_next(jeb->first_node)) { jffs2_dbg(1, "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE(c->sector_size)); return BLK_STATE_CLEANMARKER; } if (!buf_size && (scan_end != buf_len)) {/* XIP/point case */ scan_end = buf_len; goto more_empty; } /* See how much more there is to read in this eraseblock... */ buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); if (!buf_len) { /* No more to read. Break out of main loop without marking this range of empty space as dirty (because it's not) */ jffs2_dbg(1, "Empty flash at %08x runs to end of block. Treating as free_space\n", empty_start); break; } /* point never reaches here */ scan_end = buf_len; jffs2_dbg(1, "Reading another 0x%x at 0x%08x\n", buf_len, ofs); err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); if (err) return err; buf_ofs = ofs; goto more_empty; } if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) { pr_warn("Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", ofs); if ((err = jffs2_scan_dirty_space(c, jeb, 4))) return err; ofs += 4; continue; } if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) { jffs2_dbg(1, "Dirty bitmask at 0x%08x\n", ofs); if ((err = jffs2_scan_dirty_space(c, jeb, 4))) return err; ofs += 4; continue; } if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) { pr_warn("Old JFFS2 bitmask found at 0x%08x\n", ofs); pr_warn("You cannot use older JFFS2 filesystems with newer kernels\n"); if ((err = jffs2_scan_dirty_space(c, jeb, 4))) return err; ofs += 4; continue; } if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) { /* OK. We're out of possibilities. Whinge and move on */ noisy_printk(&noise, "%s(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n", __func__, JFFS2_MAGIC_BITMASK, ofs, je16_to_cpu(node->magic)); if ((err = jffs2_scan_dirty_space(c, jeb, 4))) return err; ofs += 4; continue; } /* We seem to have a node of sorts. Check the CRC */ crcnode.magic = node->magic; crcnode.nodetype = cpu_to_je16( je16_to_cpu(node->nodetype) | JFFS2_NODE_ACCURATE); crcnode.totlen = node->totlen; hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4); if (hdr_crc != je32_to_cpu(node->hdr_crc)) { noisy_printk(&noise, "%s(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n", __func__, ofs, je16_to_cpu(node->magic), je16_to_cpu(node->nodetype), je32_to_cpu(node->totlen), je32_to_cpu(node->hdr_crc), hdr_crc); if ((err = jffs2_scan_dirty_space(c, jeb, 4))) return err; ofs += 4; continue; } if (ofs + je32_to_cpu(node->totlen) > jeb->offset + c->sector_size) { /* Eep. Node goes over the end of the erase block. */ pr_warn("Node at 0x%08x with length 0x%08x would run over the end of the erase block\n", ofs, je32_to_cpu(node->totlen)); pr_warn("Perhaps the file system was created with the wrong erase size?\n"); if ((err = jffs2_scan_dirty_space(c, jeb, 4))) return err; ofs += 4; continue; } if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) { /* Wheee. This is an obsoleted node */ jffs2_dbg(2, "Node at 0x%08x is obsolete. Skipping\n", ofs); if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) return err; ofs += PAD(je32_to_cpu(node->totlen)); continue; } switch(je16_to_cpu(node->nodetype)) { case JFFS2_NODETYPE_INODE: if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) { buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); jffs2_dbg(1, "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n", sizeof(struct jffs2_raw_inode), buf_len, ofs); err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); if (err) return err; buf_ofs = ofs; node = (void *)buf; } err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs, s); if (err) return err; ofs += PAD(je32_to_cpu(node->totlen)); break; case JFFS2_NODETYPE_DIRENT: if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); jffs2_dbg(1, "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n", je32_to_cpu(node->totlen), buf_len, ofs); err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); if (err) return err; buf_ofs = ofs; node = (void *)buf; } err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs, s); if (err) return err; ofs += PAD(je32_to_cpu(node->totlen)); break; #ifdef CONFIG_JFFS2_FS_XATTR case JFFS2_NODETYPE_XATTR: if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); jffs2_dbg(1, "Fewer than %d bytes (xattr node) left to end of buf. Reading 0x%x at 0x%08x\n", je32_to_cpu(node->totlen), buf_len, ofs); err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); if (err) return err; buf_ofs = ofs; node = (void *)buf; } err = jffs2_scan_xattr_node(c, jeb, (void *)node, ofs, s); if (err) return err; ofs += PAD(je32_to_cpu(node->totlen)); break; case JFFS2_NODETYPE_XREF: if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); jffs2_dbg(1, "Fewer than %d bytes (xref node) left to end of buf. Reading 0x%x at 0x%08x\n", je32_to_cpu(node->totlen), buf_len, ofs); err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); if (err) return err; buf_ofs = ofs; node = (void *)buf; } err = jffs2_scan_xref_node(c, jeb, (void *)node, ofs, s); if (err) return err; ofs += PAD(je32_to_cpu(node->totlen)); break; #endif /* CONFIG_JFFS2_FS_XATTR */ case JFFS2_NODETYPE_CLEANMARKER: jffs2_dbg(1, "CLEANMARKER node found at 0x%08x\n", ofs); if (je32_to_cpu(node->totlen) != c->cleanmarker_size) { pr_notice("CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n", ofs, je32_to_cpu(node->totlen), c->cleanmarker_size); if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node))))) return err; ofs += PAD(sizeof(struct jffs2_unknown_node)); } else if (jeb->first_node) { pr_notice("CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", ofs, jeb->offset); if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node))))) return err; ofs += PAD(sizeof(struct jffs2_unknown_node)); } else { jffs2_link_node_ref(c, jeb, ofs | REF_NORMAL, c->cleanmarker_size, NULL); ofs += PAD(c->cleanmarker_size); } break; case JFFS2_NODETYPE_PADDING: if (jffs2_sum_active()) jffs2_sum_add_padding_mem(s, je32_to_cpu(node->totlen)); if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) return err; ofs += PAD(je32_to_cpu(node->totlen)); break; default: switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) { case JFFS2_FEATURE_ROCOMPAT: pr_notice("Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs); c->flags |= JFFS2_SB_FLAG_RO; if (!(jffs2_is_readonly(c))) return -EROFS; if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) return err; ofs += PAD(je32_to_cpu(node->totlen)); break; case JFFS2_FEATURE_INCOMPAT: pr_notice("Incompatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs); return -EINVAL; case JFFS2_FEATURE_RWCOMPAT_DELETE: jffs2_dbg(1, "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs); if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) return err; ofs += PAD(je32_to_cpu(node->totlen)); break; case JFFS2_FEATURE_RWCOMPAT_COPY: { jffs2_dbg(1, "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs); jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL); /* We can't summarise nodes we don't grok */ jffs2_sum_disable_collecting(s); ofs += PAD(je32_to_cpu(node->totlen)); break; } } } } if (jffs2_sum_active()) { if (PAD(s->sum_size + JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size) { dbg_summary("There is not enough space for " "summary information, disabling for this jeb!\n"); jffs2_sum_disable_collecting(s); } } jffs2_dbg(1, "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n", jeb->offset, jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size, jeb->wasted_size); /* mark_node_obsolete can add to wasted !! */ if (jeb->wasted_size) { jeb->dirty_size += jeb->wasted_size; c->dirty_size += jeb->wasted_size; c->wasted_size -= jeb->wasted_size; jeb->wasted_size = 0; } return jffs2_scan_classify_jeb(c, jeb); } struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino) { struct jffs2_inode_cache *ic; ic = jffs2_get_ino_cache(c, ino); if (ic) return ic; if (ino > c->highest_ino) c->highest_ino = ino; ic = jffs2_alloc_inode_cache(); if (!ic) { pr_notice("%s(): allocation of inode cache failed\n", __func__); return NULL; } memset(ic, 0, sizeof(*ic)); ic->ino = ino; ic->nodes = (void *)ic; jffs2_add_ino_cache(c, ic); if (ino == 1) ic->pino_nlink = 1; return ic; } static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s) { struct jffs2_inode_cache *ic; uint32_t crc, ino = je32_to_cpu(ri->ino); jffs2_dbg(1, "%s(): Node at 0x%08x\n", __func__, ofs); /* We do very little here now. Just check the ino# to which we should attribute this node; we can do all the CRC checking etc. later. There's a tradeoff here -- we used to scan the flash once only, reading everything we want from it into memory, then building all our in-core data structures and freeing the extra information. Now we allow the first part of the mount to complete a lot quicker, but we have to go _back_ to the flash in order to finish the CRC checking, etc. Which means that the _full_ amount of time to get to proper write mode with GC operational may actually be _longer_ than before. Sucks to be me. */ /* Check the node CRC in any case. */ crc = crc32(0, ri, sizeof(*ri)-8); if (crc != je32_to_cpu(ri->node_crc)) { pr_notice("%s(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", __func__, ofs, je32_to_cpu(ri->node_crc), crc); /* * We believe totlen because the CRC on the node * _header_ was OK, just the node itself failed. */ return jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(ri->totlen))); } ic = jffs2_get_ino_cache(c, ino); if (!ic) { ic = jffs2_scan_make_ino_cache(c, ino); if (!ic) return -ENOMEM; } /* Wheee. It worked */ jffs2_link_node_ref(c, jeb, ofs | REF_UNCHECKED, PAD(je32_to_cpu(ri->totlen)), ic); jffs2_dbg(1, "Node is ino #%u, version %d. Range 0x%x-0x%x\n", je32_to_cpu(ri->ino), je32_to_cpu(ri->version), je32_to_cpu(ri->offset), je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize)); pseudo_random += je32_to_cpu(ri->version); if (jffs2_sum_active()) { jffs2_sum_add_inode_mem(s, ri, ofs - jeb->offset); } return 0; } static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s) { struct jffs2_full_dirent *fd; struct jffs2_inode_cache *ic; uint32_t checkedlen; uint32_t crc; int err; jffs2_dbg(1, "%s(): Node at 0x%08x\n", __func__, ofs); /* We don't get here unless the node is still valid, so we don't have to mask in the ACCURATE bit any more. */ crc = crc32(0, rd, sizeof(*rd)-8); if (crc != je32_to_cpu(rd->node_crc)) { pr_notice("%s(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", __func__, ofs, je32_to_cpu(rd->node_crc), crc); /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */ if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen))))) return err; return 0; } pseudo_random += je32_to_cpu(rd->version); /* Should never happen. Did. (OLPC trac #4184)*/ checkedlen = strnlen(rd->name, rd->nsize); if (checkedlen < rd->nsize) { pr_err("Dirent at %08x has zeroes in name. Truncating to %d chars\n", ofs, checkedlen); } fd = jffs2_alloc_full_dirent(checkedlen+1); if (!fd) { return -ENOMEM; } memcpy(&fd->name, rd->name, checkedlen); fd->name[checkedlen] = 0; crc = crc32(0, fd->name, checkedlen); if (crc != je32_to_cpu(rd->name_crc)) { pr_notice("%s(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", __func__, ofs, je32_to_cpu(rd->name_crc), crc); jffs2_dbg(1, "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, je32_to_cpu(rd->ino)); jffs2_free_full_dirent(fd); /* FIXME: Why do we believe totlen? */ /* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */ if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen))))) return err; return 0; } ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino)); if (!ic) { jffs2_free_full_dirent(fd); return -ENOMEM; } fd->raw = jffs2_link_node_ref(c, jeb, ofs | dirent_node_state(rd), PAD(je32_to_cpu(rd->totlen)), ic); fd->next = NULL; fd->version = je32_to_cpu(rd->version); fd->ino = je32_to_cpu(rd->ino); fd->nhash = full_name_hash(NULL, fd->name, checkedlen); fd->type = rd->type; jffs2_add_fd_to_list(c, fd, &ic->scan_dents); if (jffs2_sum_active()) { jffs2_sum_add_dirent_mem(s, rd, ofs - jeb->offset); } return 0; } static int count_list(struct list_head *l) { uint32_t count = 0; struct list_head *tmp; list_for_each(tmp, l) { count++; } return count; } /* Note: This breaks if list_empty(head). I don't care. You might, if you copy this code and use it elsewhere :) */ static void rotate_list(struct list_head *head, uint32_t count) { struct list_head *n = head->next; list_del(head); while(count--) { n = n->next; } list_add(head, n); } void jffs2_rotate_lists(struct jffs2_sb_info *c) { uint32_t x; uint32_t rotateby; x = count_list(&c->clean_list); if (x) { rotateby = pseudo_random % x; rotate_list((&c->clean_list), rotateby); } x = count_list(&c->very_dirty_list); if (x) { rotateby = pseudo_random % x; rotate_list((&c->very_dirty_list), rotateby); } x = count_list(&c->dirty_list); if (x) { rotateby = pseudo_random % x; rotate_list((&c->dirty_list), rotateby); } x = count_list(&c->erasable_list); if (x) { rotateby = pseudo_random % x; rotate_list((&c->erasable_list), rotateby); } if (c->nr_erasing_blocks) { rotateby = pseudo_random % c->nr_erasing_blocks; rotate_list((&c->erase_pending_list), rotateby); } if (c->nr_free_blocks) { rotateby = pseudo_random % c->nr_free_blocks; rotate_list((&c->free_list), rotateby); } }
linux-master
fs/jffs2/scan.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * Copyright © 2004-2010 David Woodhouse <[email protected]> * * Created by David Woodhouse <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/capability.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/cred.h> #include <linux/fs.h> #include <linux/fs_context.h> #include <linux/list.h> #include <linux/mtd/mtd.h> #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/vfs.h> #include <linux/crc32.h> #include "nodelist.h" static int jffs2_flash_setup(struct jffs2_sb_info *c); int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) { struct jffs2_full_dnode *old_metadata, *new_metadata; struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); struct jffs2_raw_inode *ri; union jffs2_device_node dev; unsigned char *mdata = NULL; int mdatalen = 0; unsigned int ivalid; uint32_t alloclen; int ret; int alloc_type = ALLOC_NORMAL; jffs2_dbg(1, "%s(): ino #%lu\n", __func__, inode->i_ino); /* Special cases - we don't want more than one data node for these types on the medium at any time. So setattr must read the original data associated with the node (i.e. the device numbers or the target name) and write it out again with the appropriate data attached */ if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) { /* For these, we don't actually need to read the old node */ mdatalen = jffs2_encode_dev(&dev, inode->i_rdev); mdata = (char *)&dev; jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n", __func__, mdatalen); } else if (S_ISLNK(inode->i_mode)) { mutex_lock(&f->sem); mdatalen = f->metadata->size; mdata = kmalloc(f->metadata->size, GFP_USER); if (!mdata) { mutex_unlock(&f->sem); return -ENOMEM; } ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen); if (ret) { mutex_unlock(&f->sem); kfree(mdata); return ret; } mutex_unlock(&f->sem); jffs2_dbg(1, "%s(): Writing %d bytes of symlink target\n", __func__, mdatalen); } ri = jffs2_alloc_raw_inode(); if (!ri) { if (S_ISLNK(inode->i_mode)) kfree(mdata); return -ENOMEM; } ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); if (ret) { jffs2_free_raw_inode(ri); if (S_ISLNK(inode->i_mode)) kfree(mdata); return ret; } mutex_lock(&f->sem); ivalid = iattr->ia_valid; ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen); ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)); ri->ino = cpu_to_je32(inode->i_ino); ri->version = cpu_to_je32(++f->highest_version); ri->uid = cpu_to_je16((ivalid & ATTR_UID)? from_kuid(&init_user_ns, iattr->ia_uid):i_uid_read(inode)); ri->gid = cpu_to_je16((ivalid & ATTR_GID)? from_kgid(&init_user_ns, iattr->ia_gid):i_gid_read(inode)); if (ivalid & ATTR_MODE) ri->mode = cpu_to_jemode(iattr->ia_mode); else ri->mode = cpu_to_jemode(inode->i_mode); ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size); ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime)); ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime)); ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode_get_ctime(inode))); ri->offset = cpu_to_je32(0); ri->csize = ri->dsize = cpu_to_je32(mdatalen); ri->compr = JFFS2_COMPR_NONE; if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) { /* It's an extension. Make it a hole node */ ri->compr = JFFS2_COMPR_ZERO; ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size); ri->offset = cpu_to_je32(inode->i_size); } else if (ivalid & ATTR_SIZE && !iattr->ia_size) { /* For truncate-to-zero, treat it as deletion because it'll always be obsoleting all previous nodes */ alloc_type = ALLOC_DELETION; } ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); if (mdatalen) ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen)); else ri->data_crc = cpu_to_je32(0); new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type); if (S_ISLNK(inode->i_mode)) kfree(mdata); if (IS_ERR(new_metadata)) { jffs2_complete_reservation(c); jffs2_free_raw_inode(ri); mutex_unlock(&f->sem); return PTR_ERR(new_metadata); } /* It worked. Update the inode */ inode->i_atime = ITIME(je32_to_cpu(ri->atime)); inode_set_ctime_to_ts(inode, ITIME(je32_to_cpu(ri->ctime))); inode->i_mtime = ITIME(je32_to_cpu(ri->mtime)); inode->i_mode = jemode_to_cpu(ri->mode); i_uid_write(inode, je16_to_cpu(ri->uid)); i_gid_write(inode, je16_to_cpu(ri->gid)); old_metadata = f->metadata; if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size); if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) { jffs2_add_full_dnode_to_inode(c, f, new_metadata); inode->i_size = iattr->ia_size; inode->i_blocks = (inode->i_size + 511) >> 9; f->metadata = NULL; } else { f->metadata = new_metadata; } if (old_metadata) { jffs2_mark_node_obsolete(c, old_metadata->raw); jffs2_free_full_dnode(old_metadata); } jffs2_free_raw_inode(ri); mutex_unlock(&f->sem); jffs2_complete_reservation(c); /* We have to do the truncate_setsize() without f->sem held, since some pages may be locked and waiting for it in read_folio(). We are protected from a simultaneous write() extending i_size back past iattr->ia_size, because do_truncate() holds the generic inode semaphore. */ if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) { truncate_setsize(inode, iattr->ia_size); inode->i_blocks = (inode->i_size + 511) >> 9; } return 0; } int jffs2_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *iattr) { struct inode *inode = d_inode(dentry); int rc; rc = setattr_prepare(&nop_mnt_idmap, dentry, iattr); if (rc) return rc; rc = jffs2_do_setattr(inode, iattr); if (!rc && (iattr->ia_valid & ATTR_MODE)) rc = posix_acl_chmod(&nop_mnt_idmap, dentry, inode->i_mode); return rc; } int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf) { struct jffs2_sb_info *c = JFFS2_SB_INFO(dentry->d_sb); unsigned long avail; buf->f_type = JFFS2_SUPER_MAGIC; buf->f_bsize = 1 << PAGE_SHIFT; buf->f_blocks = c->flash_size >> PAGE_SHIFT; buf->f_files = 0; buf->f_ffree = 0; buf->f_namelen = JFFS2_MAX_NAME_LEN; buf->f_fsid.val[0] = JFFS2_SUPER_MAGIC; buf->f_fsid.val[1] = c->mtd->index; spin_lock(&c->erase_completion_lock); avail = c->dirty_size + c->free_size; if (avail > c->sector_size * c->resv_blocks_write) avail -= c->sector_size * c->resv_blocks_write; else avail = 0; spin_unlock(&c->erase_completion_lock); buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT; return 0; } void jffs2_evict_inode (struct inode *inode) { /* We can forget about this inode for now - drop all * the nodelists associated with it, etc. */ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); jffs2_dbg(1, "%s(): ino #%lu mode %o\n", __func__, inode->i_ino, inode->i_mode); truncate_inode_pages_final(&inode->i_data); clear_inode(inode); jffs2_do_clear_inode(c, f); } struct inode *jffs2_iget(struct super_block *sb, unsigned long ino) { struct jffs2_inode_info *f; struct jffs2_sb_info *c; struct jffs2_raw_inode latest_node; union jffs2_device_node jdev; struct inode *inode; dev_t rdev = 0; int ret; jffs2_dbg(1, "%s(): ino == %lu\n", __func__, ino); inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; f = JFFS2_INODE_INFO(inode); c = JFFS2_SB_INFO(inode->i_sb); jffs2_init_inode_info(f); mutex_lock(&f->sem); ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node); if (ret) goto error; inode->i_mode = jemode_to_cpu(latest_node.mode); i_uid_write(inode, je16_to_cpu(latest_node.uid)); i_gid_write(inode, je16_to_cpu(latest_node.gid)); inode->i_size = je32_to_cpu(latest_node.isize); inode->i_atime = ITIME(je32_to_cpu(latest_node.atime)); inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime)); inode_set_ctime_to_ts(inode, ITIME(je32_to_cpu(latest_node.ctime))); set_nlink(inode, f->inocache->pino_nlink); inode->i_blocks = (inode->i_size + 511) >> 9; switch (inode->i_mode & S_IFMT) { case S_IFLNK: inode->i_op = &jffs2_symlink_inode_operations; inode->i_link = f->target; break; case S_IFDIR: { struct jffs2_full_dirent *fd; set_nlink(inode, 2); /* parent and '.' */ for (fd=f->dents; fd; fd = fd->next) { if (fd->type == DT_DIR && fd->ino) inc_nlink(inode); } /* Root dir gets i_nlink 3 for some reason */ if (inode->i_ino == 1) inc_nlink(inode); inode->i_op = &jffs2_dir_inode_operations; inode->i_fop = &jffs2_dir_operations; break; } case S_IFREG: inode->i_op = &jffs2_file_inode_operations; inode->i_fop = &jffs2_file_operations; inode->i_mapping->a_ops = &jffs2_file_address_operations; inode->i_mapping->nrpages = 0; break; case S_IFBLK: case S_IFCHR: /* Read the device numbers from the media */ if (f->metadata->size != sizeof(jdev.old_id) && f->metadata->size != sizeof(jdev.new_id)) { pr_notice("Device node has strange size %d\n", f->metadata->size); goto error_io; } jffs2_dbg(1, "Reading device numbers from flash\n"); ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size); if (ret < 0) { /* Eep */ pr_notice("Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino); goto error; } if (f->metadata->size == sizeof(jdev.old_id)) rdev = old_decode_dev(je16_to_cpu(jdev.old_id)); else rdev = new_decode_dev(je32_to_cpu(jdev.new_id)); fallthrough; case S_IFSOCK: case S_IFIFO: inode->i_op = &jffs2_file_inode_operations; init_special_inode(inode, inode->i_mode, rdev); break; default: pr_warn("%s(): Bogus i_mode %o for ino %lu\n", __func__, inode->i_mode, (unsigned long)inode->i_ino); } mutex_unlock(&f->sem); jffs2_dbg(1, "jffs2_read_inode() returning\n"); unlock_new_inode(inode); return inode; error_io: ret = -EIO; error: mutex_unlock(&f->sem); iget_failed(inode); return ERR_PTR(ret); } void jffs2_dirty_inode(struct inode *inode, int flags) { struct iattr iattr; if (!(inode->i_state & I_DIRTY_DATASYNC)) { jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n", __func__, inode->i_ino); return; } jffs2_dbg(1, "%s(): calling setattr() for ino #%lu\n", __func__, inode->i_ino); iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME; iattr.ia_mode = inode->i_mode; iattr.ia_uid = inode->i_uid; iattr.ia_gid = inode->i_gid; iattr.ia_atime = inode->i_atime; iattr.ia_mtime = inode->i_mtime; iattr.ia_ctime = inode_get_ctime(inode); jffs2_do_setattr(inode, &iattr); } int jffs2_do_remount_fs(struct super_block *sb, struct fs_context *fc) { struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); if (c->flags & JFFS2_SB_FLAG_RO && !sb_rdonly(sb)) return -EROFS; /* We stop if it was running, then restart if it needs to. This also catches the case where it was stopped and this is just a remount to restart it. Flush the writebuffer, if necessary, else we loose it */ if (!sb_rdonly(sb)) { jffs2_stop_garbage_collect_thread(c); mutex_lock(&c->alloc_sem); jffs2_flush_wbuf_pad(c); mutex_unlock(&c->alloc_sem); } if (!(fc->sb_flags & SB_RDONLY)) jffs2_start_garbage_collect_thread(c); fc->sb_flags |= SB_NOATIME; return 0; } /* jffs2_new_inode: allocate a new inode and inocache, add it to the hash, fill in the raw_inode while you're at it. */ struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_raw_inode *ri) { struct inode *inode; struct super_block *sb = dir_i->i_sb; struct jffs2_sb_info *c; struct jffs2_inode_info *f; int ret; jffs2_dbg(1, "%s(): dir_i %ld, mode 0x%x\n", __func__, dir_i->i_ino, mode); c = JFFS2_SB_INFO(sb); inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); f = JFFS2_INODE_INFO(inode); jffs2_init_inode_info(f); mutex_lock(&f->sem); memset(ri, 0, sizeof(*ri)); /* Set OS-specific defaults for new inodes */ ri->uid = cpu_to_je16(from_kuid(&init_user_ns, current_fsuid())); if (dir_i->i_mode & S_ISGID) { ri->gid = cpu_to_je16(i_gid_read(dir_i)); if (S_ISDIR(mode)) mode |= S_ISGID; } else { ri->gid = cpu_to_je16(from_kgid(&init_user_ns, current_fsgid())); } /* POSIX ACLs have to be processed now, at least partly. The umask is only applied if there's no default ACL */ ret = jffs2_init_acl_pre(dir_i, inode, &mode); if (ret) { mutex_unlock(&f->sem); make_bad_inode(inode); iput(inode); return ERR_PTR(ret); } ret = jffs2_do_new_inode (c, f, mode, ri); if (ret) { mutex_unlock(&f->sem); make_bad_inode(inode); iput(inode); return ERR_PTR(ret); } set_nlink(inode, 1); inode->i_ino = je32_to_cpu(ri->ino); inode->i_mode = jemode_to_cpu(ri->mode); i_gid_write(inode, je16_to_cpu(ri->gid)); i_uid_write(inode, je16_to_cpu(ri->uid)); inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode); ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime)); inode->i_blocks = 0; inode->i_size = 0; if (insert_inode_locked(inode) < 0) { mutex_unlock(&f->sem); make_bad_inode(inode); iput(inode); return ERR_PTR(-EINVAL); } return inode; } static int calculate_inocache_hashsize(uint32_t flash_size) { /* * Pick a inocache hash size based on the size of the medium. * Count how many megabytes we're dealing with, apply a hashsize twice * that size, but rounding down to the usual big powers of 2. And keep * to sensible bounds. */ int size_mb = flash_size / 1024 / 1024; int hashsize = (size_mb * 2) & ~0x3f; if (hashsize < INOCACHE_HASHSIZE_MIN) return INOCACHE_HASHSIZE_MIN; if (hashsize > INOCACHE_HASHSIZE_MAX) return INOCACHE_HASHSIZE_MAX; return hashsize; } int jffs2_do_fill_super(struct super_block *sb, struct fs_context *fc) { struct jffs2_sb_info *c; struct inode *root_i; int ret; size_t blocks; c = JFFS2_SB_INFO(sb); /* Do not support the MLC nand */ if (c->mtd->type == MTD_MLCNANDFLASH) return -EINVAL; #ifndef CONFIG_JFFS2_FS_WRITEBUFFER if (c->mtd->type == MTD_NANDFLASH) { errorf(fc, "Cannot operate on NAND flash unless jffs2 NAND support is compiled in"); return -EINVAL; } if (c->mtd->type == MTD_DATAFLASH) { errorf(fc, "Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in"); return -EINVAL; } #endif c->flash_size = c->mtd->size; c->sector_size = c->mtd->erasesize; blocks = c->flash_size / c->sector_size; /* * Size alignment check */ if ((c->sector_size * blocks) != c->flash_size) { c->flash_size = c->sector_size * blocks; infof(fc, "Flash size not aligned to erasesize, reducing to %dKiB", c->flash_size / 1024); } if (c->flash_size < 5*c->sector_size) { errorf(fc, "Too few erase blocks (%d)", c->flash_size / c->sector_size); return -EINVAL; } c->cleanmarker_size = sizeof(struct jffs2_unknown_node); /* NAND (or other bizarre) flash... do setup accordingly */ ret = jffs2_flash_setup(c); if (ret) return ret; c->inocache_hashsize = calculate_inocache_hashsize(c->flash_size); c->inocache_list = kcalloc(c->inocache_hashsize, sizeof(struct jffs2_inode_cache *), GFP_KERNEL); if (!c->inocache_list) { ret = -ENOMEM; goto out_wbuf; } jffs2_init_xattr_subsystem(c); if ((ret = jffs2_do_mount_fs(c))) goto out_inohash; jffs2_dbg(1, "%s(): Getting root inode\n", __func__); root_i = jffs2_iget(sb, 1); if (IS_ERR(root_i)) { jffs2_dbg(1, "get root inode failed\n"); ret = PTR_ERR(root_i); goto out_root; } ret = -ENOMEM; jffs2_dbg(1, "%s(): d_make_root()\n", __func__); sb->s_root = d_make_root(root_i); if (!sb->s_root) goto out_root; sb->s_maxbytes = 0xFFFFFFFF; sb->s_blocksize = PAGE_SIZE; sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = JFFS2_SUPER_MAGIC; sb->s_time_min = 0; sb->s_time_max = U32_MAX; if (!sb_rdonly(sb)) jffs2_start_garbage_collect_thread(c); return 0; out_root: jffs2_free_ino_caches(c); jffs2_free_raw_node_refs(c); kvfree(c->blocks); jffs2_clear_xattr_subsystem(c); jffs2_sum_exit(c); out_inohash: kfree(c->inocache_list); out_wbuf: jffs2_flash_cleanup(c); return ret; } void jffs2_gc_release_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f) { iput(OFNI_EDONI_2SFFJ(f)); } struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c, int inum, int unlinked) { struct inode *inode; struct jffs2_inode_cache *ic; if (unlinked) { /* The inode has zero nlink but its nodes weren't yet marked obsolete. This has to be because we're still waiting for the final (close() and) iput() to happen. There's a possibility that the final iput() could have happened while we were contemplating. In order to ensure that we don't cause a new read_inode() (which would fail) for the inode in question, we use ilookup() in this case instead of iget(). The nlink can't _become_ zero at this point because we're holding the alloc_sem, and jffs2_do_unlink() would also need that while decrementing nlink on any inode. */ inode = ilookup(OFNI_BS_2SFFJ(c), inum); if (!inode) { jffs2_dbg(1, "ilookup() failed for ino #%u; inode is probably deleted.\n", inum); spin_lock(&c->inocache_lock); ic = jffs2_get_ino_cache(c, inum); if (!ic) { jffs2_dbg(1, "Inode cache for ino #%u is gone\n", inum); spin_unlock(&c->inocache_lock); return NULL; } if (ic->state != INO_STATE_CHECKEDABSENT) { /* Wait for progress. Don't just loop */ jffs2_dbg(1, "Waiting for ino #%u in state %d\n", ic->ino, ic->state); sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); } else { spin_unlock(&c->inocache_lock); } return NULL; } } else { /* Inode has links to it still; they're not going away because jffs2_do_unlink() would need the alloc_sem and we have it. Just iget() it, and if read_inode() is necessary that's OK. */ inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum); if (IS_ERR(inode)) return ERR_CAST(inode); } if (is_bad_inode(inode)) { pr_notice("Eep. read_inode() failed for ino #%u. unlinked %d\n", inum, unlinked); /* NB. This will happen again. We need to do something appropriate here. */ iput(inode); return ERR_PTR(-EIO); } return JFFS2_INODE_INFO(inode); } static int jffs2_flash_setup(struct jffs2_sb_info *c) { int ret = 0; if (jffs2_cleanmarker_oob(c)) { /* NAND flash... do setup accordingly */ ret = jffs2_nand_flash_setup(c); if (ret) return ret; } /* and Dataflash */ if (jffs2_dataflash(c)) { ret = jffs2_dataflash_setup(c); if (ret) return ret; } /* and Intel "Sibley" flash */ if (jffs2_nor_wbuf_flash(c)) { ret = jffs2_nor_wbuf_flash_setup(c); if (ret) return ret; } /* and an UBI volume */ if (jffs2_ubivol(c)) { ret = jffs2_ubivol_setup(c); if (ret) return ret; } return ret; } void jffs2_flash_cleanup(struct jffs2_sb_info *c) { if (jffs2_cleanmarker_oob(c)) { jffs2_nand_flash_cleanup(c); } /* and DataFlash */ if (jffs2_dataflash(c)) { jffs2_dataflash_cleanup(c); } /* and Intel "Sibley" flash */ if (jffs2_nor_wbuf_flash(c)) { jffs2_nor_wbuf_flash_cleanup(c); } /* and an UBI volume */ if (jffs2_ubivol(c)) { jffs2_ubivol_cleanup(c); } }
linux-master
fs/jffs2/fs.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * Copyright © 2004-2010 David Woodhouse <[email protected]> * * Created by David Woodhouse <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/crc32.h> #include <linux/jffs2.h> #include "jffs2_fs_i.h" #include "jffs2_fs_sb.h" #include <linux/time.h> #include "nodelist.h" static int jffs2_readdir (struct file *, struct dir_context *); static int jffs2_create (struct mnt_idmap *, struct inode *, struct dentry *, umode_t, bool); static struct dentry *jffs2_lookup (struct inode *,struct dentry *, unsigned int); static int jffs2_link (struct dentry *,struct inode *,struct dentry *); static int jffs2_unlink (struct inode *,struct dentry *); static int jffs2_symlink (struct mnt_idmap *, struct inode *, struct dentry *, const char *); static int jffs2_mkdir (struct mnt_idmap *, struct inode *,struct dentry *, umode_t); static int jffs2_rmdir (struct inode *,struct dentry *); static int jffs2_mknod (struct mnt_idmap *, struct inode *,struct dentry *, umode_t,dev_t); static int jffs2_rename (struct mnt_idmap *, struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); const struct file_operations jffs2_dir_operations = { .read = generic_read_dir, .iterate_shared=jffs2_readdir, .unlocked_ioctl=jffs2_ioctl, .fsync = jffs2_fsync, .llseek = generic_file_llseek, }; const struct inode_operations jffs2_dir_inode_operations = { .create = jffs2_create, .lookup = jffs2_lookup, .link = jffs2_link, .unlink = jffs2_unlink, .symlink = jffs2_symlink, .mkdir = jffs2_mkdir, .rmdir = jffs2_rmdir, .mknod = jffs2_mknod, .rename = jffs2_rename, .get_inode_acl = jffs2_get_acl, .set_acl = jffs2_set_acl, .setattr = jffs2_setattr, .listxattr = jffs2_listxattr, }; /***********************************************************************/ /* We keep the dirent list sorted in increasing order of name hash, and we use the same hash function as the dentries. Makes this nice and simple */ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target, unsigned int flags) { struct jffs2_inode_info *dir_f; struct jffs2_full_dirent *fd = NULL, *fd_list; uint32_t ino = 0; struct inode *inode = NULL; unsigned int nhash; jffs2_dbg(1, "jffs2_lookup()\n"); if (target->d_name.len > JFFS2_MAX_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); dir_f = JFFS2_INODE_INFO(dir_i); /* The 'nhash' on the fd_list is not the same as the dentry hash */ nhash = full_name_hash(NULL, target->d_name.name, target->d_name.len); mutex_lock(&dir_f->sem); /* NB: The 2.2 backport will need to explicitly check for '.' and '..' here */ for (fd_list = dir_f->dents; fd_list && fd_list->nhash <= nhash; fd_list = fd_list->next) { if (fd_list->nhash == nhash && (!fd || fd_list->version > fd->version) && strlen(fd_list->name) == target->d_name.len && !strncmp(fd_list->name, target->d_name.name, target->d_name.len)) { fd = fd_list; } } if (fd) ino = fd->ino; mutex_unlock(&dir_f->sem); if (ino) { inode = jffs2_iget(dir_i->i_sb, ino); if (IS_ERR(inode)) pr_warn("iget() failed for ino #%u\n", ino); } return d_splice_alias(inode, target); } /***********************************************************************/ static int jffs2_readdir(struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); struct jffs2_full_dirent *fd; unsigned long curofs = 1; jffs2_dbg(1, "jffs2_readdir() for dir_i #%lu\n", inode->i_ino); if (!dir_emit_dots(file, ctx)) return 0; mutex_lock(&f->sem); for (fd = f->dents; fd; fd = fd->next) { curofs++; /* First loop: curofs = 2; pos = 2 */ if (curofs < ctx->pos) { jffs2_dbg(2, "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n", fd->name, fd->ino, fd->type, curofs, (unsigned long)ctx->pos); continue; } if (!fd->ino) { jffs2_dbg(2, "Skipping deletion dirent \"%s\"\n", fd->name); ctx->pos++; continue; } jffs2_dbg(2, "Dirent %ld: \"%s\", ino #%u, type %d\n", (unsigned long)ctx->pos, fd->name, fd->ino, fd->type); if (!dir_emit(ctx, fd->name, strlen(fd->name), fd->ino, fd->type)) break; ctx->pos++; } mutex_unlock(&f->sem); return 0; } /***********************************************************************/ static int jffs2_create(struct mnt_idmap *idmap, struct inode *dir_i, struct dentry *dentry, umode_t mode, bool excl) { struct jffs2_raw_inode *ri; struct jffs2_inode_info *f, *dir_f; struct jffs2_sb_info *c; struct inode *inode; int ret; ri = jffs2_alloc_raw_inode(); if (!ri) return -ENOMEM; c = JFFS2_SB_INFO(dir_i->i_sb); jffs2_dbg(1, "%s()\n", __func__); inode = jffs2_new_inode(dir_i, mode, ri); if (IS_ERR(inode)) { jffs2_dbg(1, "jffs2_new_inode() failed\n"); jffs2_free_raw_inode(ri); return PTR_ERR(inode); } inode->i_op = &jffs2_file_inode_operations; inode->i_fop = &jffs2_file_operations; inode->i_mapping->a_ops = &jffs2_file_address_operations; inode->i_mapping->nrpages = 0; f = JFFS2_INODE_INFO(inode); dir_f = JFFS2_INODE_INFO(dir_i); /* jffs2_do_create() will want to lock it, _after_ reserving space and taking c-alloc_sem. If we keep it locked here, lockdep gets unhappy (although it's a false positive; nothing else will be looking at this inode yet so there's no chance of AB-BA deadlock involving its f->sem). */ mutex_unlock(&f->sem); ret = jffs2_do_create(c, dir_f, f, ri, &dentry->d_name); if (ret) goto fail; dir_i->i_mtime = inode_set_ctime_to_ts(dir_i, ITIME(je32_to_cpu(ri->ctime))); jffs2_free_raw_inode(ri); jffs2_dbg(1, "%s(): Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n", __func__, inode->i_ino, inode->i_mode, inode->i_nlink, f->inocache->pino_nlink, inode->i_mapping->nrpages); d_instantiate_new(dentry, inode); return 0; fail: iget_failed(inode); jffs2_free_raw_inode(ri); return ret; } /***********************************************************************/ static int jffs2_unlink(struct inode *dir_i, struct dentry *dentry) { struct jffs2_sb_info *c = JFFS2_SB_INFO(dir_i->i_sb); struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i); struct jffs2_inode_info *dead_f = JFFS2_INODE_INFO(d_inode(dentry)); int ret; uint32_t now = JFFS2_NOW(); ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name, dentry->d_name.len, dead_f, now); if (dead_f->inocache) set_nlink(d_inode(dentry), dead_f->inocache->pino_nlink); if (!ret) dir_i->i_mtime = inode_set_ctime_to_ts(dir_i, ITIME(now)); return ret; } /***********************************************************************/ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct dentry *dentry) { struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dentry->d_sb); struct jffs2_inode_info *f = JFFS2_INODE_INFO(d_inode(old_dentry)); struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i); int ret; uint8_t type; uint32_t now; /* Don't let people make hard links to bad inodes. */ if (!f->inocache) return -EIO; if (d_is_dir(old_dentry)) return -EPERM; /* XXX: This is ugly */ type = (d_inode(old_dentry)->i_mode & S_IFMT) >> 12; if (!type) type = DT_REG; now = JFFS2_NOW(); ret = jffs2_do_link(c, dir_f, f->inocache->ino, type, dentry->d_name.name, dentry->d_name.len, now); if (!ret) { mutex_lock(&f->sem); set_nlink(d_inode(old_dentry), ++f->inocache->pino_nlink); mutex_unlock(&f->sem); d_instantiate(dentry, d_inode(old_dentry)); dir_i->i_mtime = inode_set_ctime_to_ts(dir_i, ITIME(now)); ihold(d_inode(old_dentry)); } return ret; } /***********************************************************************/ static int jffs2_symlink (struct mnt_idmap *idmap, struct inode *dir_i, struct dentry *dentry, const char *target) { struct jffs2_inode_info *f, *dir_f; struct jffs2_sb_info *c; struct inode *inode; struct jffs2_raw_inode *ri; struct jffs2_raw_dirent *rd; struct jffs2_full_dnode *fn; struct jffs2_full_dirent *fd; int namelen; uint32_t alloclen; int ret, targetlen = strlen(target); /* FIXME: If you care. We'd need to use frags for the target if it grows much more than this */ if (targetlen > 254) return -ENAMETOOLONG; ri = jffs2_alloc_raw_inode(); if (!ri) return -ENOMEM; c = JFFS2_SB_INFO(dir_i->i_sb); /* Try to reserve enough space for both node and dirent. * Just the node will do for now, though */ namelen = dentry->d_name.len; ret = jffs2_reserve_space(c, sizeof(*ri) + targetlen, &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); if (ret) { jffs2_free_raw_inode(ri); return ret; } inode = jffs2_new_inode(dir_i, S_IFLNK | S_IRWXUGO, ri); if (IS_ERR(inode)) { jffs2_free_raw_inode(ri); jffs2_complete_reservation(c); return PTR_ERR(inode); } inode->i_op = &jffs2_symlink_inode_operations; f = JFFS2_INODE_INFO(inode); inode->i_size = targetlen; ri->isize = ri->dsize = ri->csize = cpu_to_je32(inode->i_size); ri->totlen = cpu_to_je32(sizeof(*ri) + inode->i_size); ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)); ri->compr = JFFS2_COMPR_NONE; ri->data_crc = cpu_to_je32(crc32(0, target, targetlen)); ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); fn = jffs2_write_dnode(c, f, ri, target, targetlen, ALLOC_NORMAL); jffs2_free_raw_inode(ri); if (IS_ERR(fn)) { /* Eeek. Wave bye bye */ mutex_unlock(&f->sem); jffs2_complete_reservation(c); ret = PTR_ERR(fn); goto fail; } /* We use f->target field to store the target path. */ f->target = kmemdup(target, targetlen + 1, GFP_KERNEL); if (!f->target) { pr_warn("Can't allocate %d bytes of memory\n", targetlen + 1); mutex_unlock(&f->sem); jffs2_complete_reservation(c); ret = -ENOMEM; goto fail; } inode->i_link = f->target; jffs2_dbg(1, "%s(): symlink's target '%s' cached\n", __func__, (char *)f->target); /* No data here. Only a metadata node, which will be obsoleted by the first data write */ f->metadata = fn; mutex_unlock(&f->sem); jffs2_complete_reservation(c); ret = jffs2_init_security(inode, dir_i, &dentry->d_name); if (ret) goto fail; ret = jffs2_init_acl_post(inode); if (ret) goto fail; ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); if (ret) goto fail; rd = jffs2_alloc_raw_dirent(); if (!rd) { /* Argh. Now we treat it like a normal delete */ jffs2_complete_reservation(c); ret = -ENOMEM; goto fail; } dir_f = JFFS2_INODE_INFO(dir_i); mutex_lock(&dir_f->sem); rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT); rd->totlen = cpu_to_je32(sizeof(*rd) + namelen); rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)); rd->pino = cpu_to_je32(dir_i->i_ino); rd->version = cpu_to_je32(++dir_f->highest_version); rd->ino = cpu_to_je32(inode->i_ino); rd->mctime = cpu_to_je32(JFFS2_NOW()); rd->nsize = namelen; rd->type = DT_LNK; rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen)); fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, ALLOC_NORMAL); if (IS_ERR(fd)) { /* dirent failed to write. Delete the inode normally as if it were the final unlink() */ jffs2_complete_reservation(c); jffs2_free_raw_dirent(rd); mutex_unlock(&dir_f->sem); ret = PTR_ERR(fd); goto fail; } dir_i->i_mtime = inode_set_ctime_to_ts(dir_i, ITIME(je32_to_cpu(rd->mctime))); jffs2_free_raw_dirent(rd); /* Link the fd into the inode's list, obsoleting an old one if necessary. */ jffs2_add_fd_to_list(c, fd, &dir_f->dents); mutex_unlock(&dir_f->sem); jffs2_complete_reservation(c); d_instantiate_new(dentry, inode); return 0; fail: iget_failed(inode); return ret; } static int jffs2_mkdir (struct mnt_idmap *idmap, struct inode *dir_i, struct dentry *dentry, umode_t mode) { struct jffs2_inode_info *f, *dir_f; struct jffs2_sb_info *c; struct inode *inode; struct jffs2_raw_inode *ri; struct jffs2_raw_dirent *rd; struct jffs2_full_dnode *fn; struct jffs2_full_dirent *fd; int namelen; uint32_t alloclen; int ret; mode |= S_IFDIR; ri = jffs2_alloc_raw_inode(); if (!ri) return -ENOMEM; c = JFFS2_SB_INFO(dir_i->i_sb); /* Try to reserve enough space for both node and dirent. * Just the node will do for now, though */ namelen = dentry->d_name.len; ret = jffs2_reserve_space(c, sizeof(*ri), &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); if (ret) { jffs2_free_raw_inode(ri); return ret; } inode = jffs2_new_inode(dir_i, mode, ri); if (IS_ERR(inode)) { jffs2_free_raw_inode(ri); jffs2_complete_reservation(c); return PTR_ERR(inode); } inode->i_op = &jffs2_dir_inode_operations; inode->i_fop = &jffs2_dir_operations; f = JFFS2_INODE_INFO(inode); /* Directories get nlink 2 at start */ set_nlink(inode, 2); /* but ic->pino_nlink is the parent ino# */ f->inocache->pino_nlink = dir_i->i_ino; ri->data_crc = cpu_to_je32(0); ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); fn = jffs2_write_dnode(c, f, ri, NULL, 0, ALLOC_NORMAL); jffs2_free_raw_inode(ri); if (IS_ERR(fn)) { /* Eeek. Wave bye bye */ mutex_unlock(&f->sem); jffs2_complete_reservation(c); ret = PTR_ERR(fn); goto fail; } /* No data here. Only a metadata node, which will be obsoleted by the first data write */ f->metadata = fn; mutex_unlock(&f->sem); jffs2_complete_reservation(c); ret = jffs2_init_security(inode, dir_i, &dentry->d_name); if (ret) goto fail; ret = jffs2_init_acl_post(inode); if (ret) goto fail; ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); if (ret) goto fail; rd = jffs2_alloc_raw_dirent(); if (!rd) { /* Argh. Now we treat it like a normal delete */ jffs2_complete_reservation(c); ret = -ENOMEM; goto fail; } dir_f = JFFS2_INODE_INFO(dir_i); mutex_lock(&dir_f->sem); rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT); rd->totlen = cpu_to_je32(sizeof(*rd) + namelen); rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)); rd->pino = cpu_to_je32(dir_i->i_ino); rd->version = cpu_to_je32(++dir_f->highest_version); rd->ino = cpu_to_je32(inode->i_ino); rd->mctime = cpu_to_je32(JFFS2_NOW()); rd->nsize = namelen; rd->type = DT_DIR; rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen)); fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, ALLOC_NORMAL); if (IS_ERR(fd)) { /* dirent failed to write. Delete the inode normally as if it were the final unlink() */ jffs2_complete_reservation(c); jffs2_free_raw_dirent(rd); mutex_unlock(&dir_f->sem); ret = PTR_ERR(fd); goto fail; } dir_i->i_mtime = inode_set_ctime_to_ts(dir_i, ITIME(je32_to_cpu(rd->mctime))); inc_nlink(dir_i); jffs2_free_raw_dirent(rd); /* Link the fd into the inode's list, obsoleting an old one if necessary. */ jffs2_add_fd_to_list(c, fd, &dir_f->dents); mutex_unlock(&dir_f->sem); jffs2_complete_reservation(c); d_instantiate_new(dentry, inode); return 0; fail: iget_failed(inode); return ret; } static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry) { struct jffs2_sb_info *c = JFFS2_SB_INFO(dir_i->i_sb); struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i); struct jffs2_inode_info *f = JFFS2_INODE_INFO(d_inode(dentry)); struct jffs2_full_dirent *fd; int ret; uint32_t now = JFFS2_NOW(); mutex_lock(&f->sem); for (fd = f->dents ; fd; fd = fd->next) { if (fd->ino) { mutex_unlock(&f->sem); return -ENOTEMPTY; } } mutex_unlock(&f->sem); ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name, dentry->d_name.len, f, now); if (!ret) { dir_i->i_mtime = inode_set_ctime_to_ts(dir_i, ITIME(now)); clear_nlink(d_inode(dentry)); drop_nlink(dir_i); } return ret; } static int jffs2_mknod (struct mnt_idmap *idmap, struct inode *dir_i, struct dentry *dentry, umode_t mode, dev_t rdev) { struct jffs2_inode_info *f, *dir_f; struct jffs2_sb_info *c; struct inode *inode; struct jffs2_raw_inode *ri; struct jffs2_raw_dirent *rd; struct jffs2_full_dnode *fn; struct jffs2_full_dirent *fd; int namelen; union jffs2_device_node dev; int devlen = 0; uint32_t alloclen; int ret; ri = jffs2_alloc_raw_inode(); if (!ri) return -ENOMEM; c = JFFS2_SB_INFO(dir_i->i_sb); if (S_ISBLK(mode) || S_ISCHR(mode)) devlen = jffs2_encode_dev(&dev, rdev); /* Try to reserve enough space for both node and dirent. * Just the node will do for now, though */ namelen = dentry->d_name.len; ret = jffs2_reserve_space(c, sizeof(*ri) + devlen, &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); if (ret) { jffs2_free_raw_inode(ri); return ret; } inode = jffs2_new_inode(dir_i, mode, ri); if (IS_ERR(inode)) { jffs2_free_raw_inode(ri); jffs2_complete_reservation(c); return PTR_ERR(inode); } inode->i_op = &jffs2_file_inode_operations; init_special_inode(inode, inode->i_mode, rdev); f = JFFS2_INODE_INFO(inode); ri->dsize = ri->csize = cpu_to_je32(devlen); ri->totlen = cpu_to_je32(sizeof(*ri) + devlen); ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)); ri->compr = JFFS2_COMPR_NONE; ri->data_crc = cpu_to_je32(crc32(0, &dev, devlen)); ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); fn = jffs2_write_dnode(c, f, ri, (char *)&dev, devlen, ALLOC_NORMAL); jffs2_free_raw_inode(ri); if (IS_ERR(fn)) { /* Eeek. Wave bye bye */ mutex_unlock(&f->sem); jffs2_complete_reservation(c); ret = PTR_ERR(fn); goto fail; } /* No data here. Only a metadata node, which will be obsoleted by the first data write */ f->metadata = fn; mutex_unlock(&f->sem); jffs2_complete_reservation(c); ret = jffs2_init_security(inode, dir_i, &dentry->d_name); if (ret) goto fail; ret = jffs2_init_acl_post(inode); if (ret) goto fail; ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); if (ret) goto fail; rd = jffs2_alloc_raw_dirent(); if (!rd) { /* Argh. Now we treat it like a normal delete */ jffs2_complete_reservation(c); ret = -ENOMEM; goto fail; } dir_f = JFFS2_INODE_INFO(dir_i); mutex_lock(&dir_f->sem); rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT); rd->totlen = cpu_to_je32(sizeof(*rd) + namelen); rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)); rd->pino = cpu_to_je32(dir_i->i_ino); rd->version = cpu_to_je32(++dir_f->highest_version); rd->ino = cpu_to_je32(inode->i_ino); rd->mctime = cpu_to_je32(JFFS2_NOW()); rd->nsize = namelen; /* XXX: This is ugly. */ rd->type = (mode & S_IFMT) >> 12; rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen)); fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, ALLOC_NORMAL); if (IS_ERR(fd)) { /* dirent failed to write. Delete the inode normally as if it were the final unlink() */ jffs2_complete_reservation(c); jffs2_free_raw_dirent(rd); mutex_unlock(&dir_f->sem); ret = PTR_ERR(fd); goto fail; } dir_i->i_mtime = inode_set_ctime_to_ts(dir_i, ITIME(je32_to_cpu(rd->mctime))); jffs2_free_raw_dirent(rd); /* Link the fd into the inode's list, obsoleting an old one if necessary. */ jffs2_add_fd_to_list(c, fd, &dir_f->dents); mutex_unlock(&dir_f->sem); jffs2_complete_reservation(c); d_instantiate_new(dentry, inode); return 0; fail: iget_failed(inode); return ret; } static int jffs2_rename (struct mnt_idmap *idmap, struct inode *old_dir_i, struct dentry *old_dentry, struct inode *new_dir_i, struct dentry *new_dentry, unsigned int flags) { int ret; struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dir_i->i_sb); struct jffs2_inode_info *victim_f = NULL; uint8_t type; uint32_t now; if (flags & ~RENAME_NOREPLACE) return -EINVAL; /* The VFS will check for us and prevent trying to rename a * file over a directory and vice versa, but if it's a directory, * the VFS can't check whether the victim is empty. The filesystem * needs to do that for itself. */ if (d_really_is_positive(new_dentry)) { victim_f = JFFS2_INODE_INFO(d_inode(new_dentry)); if (d_is_dir(new_dentry)) { struct jffs2_full_dirent *fd; mutex_lock(&victim_f->sem); for (fd = victim_f->dents; fd; fd = fd->next) { if (fd->ino) { mutex_unlock(&victim_f->sem); return -ENOTEMPTY; } } mutex_unlock(&victim_f->sem); } } /* XXX: We probably ought to alloc enough space for both nodes at the same time. Writing the new link, then getting -ENOSPC, is quite bad :) */ /* Make a hard link */ /* XXX: This is ugly */ type = (d_inode(old_dentry)->i_mode & S_IFMT) >> 12; if (!type) type = DT_REG; now = JFFS2_NOW(); ret = jffs2_do_link(c, JFFS2_INODE_INFO(new_dir_i), d_inode(old_dentry)->i_ino, type, new_dentry->d_name.name, new_dentry->d_name.len, now); if (ret) return ret; if (victim_f) { /* There was a victim. Kill it off nicely */ if (d_is_dir(new_dentry)) clear_nlink(d_inode(new_dentry)); else drop_nlink(d_inode(new_dentry)); /* Don't oops if the victim was a dirent pointing to an inode which didn't exist. */ if (victim_f->inocache) { mutex_lock(&victim_f->sem); if (d_is_dir(new_dentry)) victim_f->inocache->pino_nlink = 0; else victim_f->inocache->pino_nlink--; mutex_unlock(&victim_f->sem); } } /* If it was a directory we moved, and there was no victim, increase i_nlink on its new parent */ if (d_is_dir(old_dentry) && !victim_f) inc_nlink(new_dir_i); /* Unlink the original */ ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i), old_dentry->d_name.name, old_dentry->d_name.len, NULL, now); /* We don't touch inode->i_nlink */ if (ret) { /* Oh shit. We really ought to make a single node which can do both atomically */ struct jffs2_inode_info *f = JFFS2_INODE_INFO(d_inode(old_dentry)); mutex_lock(&f->sem); inc_nlink(d_inode(old_dentry)); if (f->inocache && !d_is_dir(old_dentry)) f->inocache->pino_nlink++; mutex_unlock(&f->sem); pr_notice("%s(): Link succeeded, unlink failed (err %d). You now have a hard link\n", __func__, ret); /* * We can't keep the target in dcache after that. * For one thing, we can't afford dentry aliases for directories. * For another, if there was a victim, we _can't_ set new inode * for that sucker and we have to trigger mount eviction - the * caller won't do it on its own since we are returning an error. */ d_invalidate(new_dentry); new_dir_i->i_mtime = inode_set_ctime_to_ts(new_dir_i, ITIME(now)); return ret; } if (d_is_dir(old_dentry)) drop_nlink(old_dir_i); old_dir_i->i_mtime = inode_set_ctime_to_ts(old_dir_i, ITIME(now)); new_dir_i->i_mtime = inode_set_ctime_to_ts(new_dir_i, ITIME(now)); return 0; }
linux-master
fs/jffs2/dir.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * * Created by David Woodhouse <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/slab.h> #include <linux/crc32.h> #include <linux/pagemap.h> #include <linux/mtd/mtd.h> #include <linux/compiler.h> #include "nodelist.h" #include "compr.h" int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fd, unsigned char *buf, int ofs, int len) { struct jffs2_raw_inode *ri; size_t readlen; uint32_t crc; unsigned char *decomprbuf = NULL; unsigned char *readbuf = NULL; int ret = 0; ri = jffs2_alloc_raw_inode(); if (!ri) return -ENOMEM; ret = jffs2_flash_read(c, ref_offset(fd->raw), sizeof(*ri), &readlen, (char *)ri); if (ret) { jffs2_free_raw_inode(ri); pr_warn("Error reading node from 0x%08x: %d\n", ref_offset(fd->raw), ret); return ret; } if (readlen != sizeof(*ri)) { jffs2_free_raw_inode(ri); pr_warn("Short read from 0x%08x: wanted 0x%zx bytes, got 0x%zx\n", ref_offset(fd->raw), sizeof(*ri), readlen); return -EIO; } crc = crc32(0, ri, sizeof(*ri)-8); jffs2_dbg(1, "Node read from %08x: node_crc %08x, calculated CRC %08x. dsize %x, csize %x, offset %x, buf %p\n", ref_offset(fd->raw), je32_to_cpu(ri->node_crc), crc, je32_to_cpu(ri->dsize), je32_to_cpu(ri->csize), je32_to_cpu(ri->offset), buf); if (crc != je32_to_cpu(ri->node_crc)) { pr_warn("Node CRC %08x != calculated CRC %08x for node at %08x\n", je32_to_cpu(ri->node_crc), crc, ref_offset(fd->raw)); ret = -EIO; goto out_ri; } /* There was a bug where we wrote hole nodes out with csize/dsize swapped. Deal with it */ if (ri->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(ri->dsize) && je32_to_cpu(ri->csize)) { ri->dsize = ri->csize; ri->csize = cpu_to_je32(0); } D1(if(ofs + len > je32_to_cpu(ri->dsize)) { pr_warn("jffs2_read_dnode() asked for %d bytes at %d from %d-byte node\n", len, ofs, je32_to_cpu(ri->dsize)); ret = -EINVAL; goto out_ri; }); if (ri->compr == JFFS2_COMPR_ZERO) { memset(buf, 0, len); goto out_ri; } /* Cases: Reading whole node and it's uncompressed - read directly to buffer provided, check CRC. Reading whole node and it's compressed - read into comprbuf, check CRC and decompress to buffer provided Reading partial node and it's uncompressed - read into readbuf, check CRC, and copy Reading partial node and it's compressed - read into readbuf, check checksum, decompress to decomprbuf and copy */ if (ri->compr == JFFS2_COMPR_NONE && len == je32_to_cpu(ri->dsize)) { readbuf = buf; } else { readbuf = kmalloc(je32_to_cpu(ri->csize), GFP_KERNEL); if (!readbuf) { ret = -ENOMEM; goto out_ri; } } if (ri->compr != JFFS2_COMPR_NONE) { if (len < je32_to_cpu(ri->dsize)) { decomprbuf = kmalloc(je32_to_cpu(ri->dsize), GFP_KERNEL); if (!decomprbuf) { ret = -ENOMEM; goto out_readbuf; } } else { decomprbuf = buf; } } else { decomprbuf = readbuf; } jffs2_dbg(2, "Read %d bytes to %p\n", je32_to_cpu(ri->csize), readbuf); ret = jffs2_flash_read(c, (ref_offset(fd->raw)) + sizeof(*ri), je32_to_cpu(ri->csize), &readlen, readbuf); if (!ret && readlen != je32_to_cpu(ri->csize)) ret = -EIO; if (ret) goto out_decomprbuf; crc = crc32(0, readbuf, je32_to_cpu(ri->csize)); if (crc != je32_to_cpu(ri->data_crc)) { pr_warn("Data CRC %08x != calculated CRC %08x for node at %08x\n", je32_to_cpu(ri->data_crc), crc, ref_offset(fd->raw)); ret = -EIO; goto out_decomprbuf; } jffs2_dbg(2, "Data CRC matches calculated CRC %08x\n", crc); if (ri->compr != JFFS2_COMPR_NONE) { jffs2_dbg(2, "Decompress %d bytes from %p to %d bytes at %p\n", je32_to_cpu(ri->csize), readbuf, je32_to_cpu(ri->dsize), decomprbuf); ret = jffs2_decompress(c, f, ri->compr | (ri->usercompr << 8), readbuf, decomprbuf, je32_to_cpu(ri->csize), je32_to_cpu(ri->dsize)); if (ret) { pr_warn("Error: jffs2_decompress returned %d\n", ret); goto out_decomprbuf; } } if (len < je32_to_cpu(ri->dsize)) { memcpy(buf, decomprbuf+ofs, len); } out_decomprbuf: if(decomprbuf != buf && decomprbuf != readbuf) kfree(decomprbuf); out_readbuf: if(readbuf != buf) kfree(readbuf); out_ri: jffs2_free_raw_inode(ri); return ret; } int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, unsigned char *buf, uint32_t offset, uint32_t len) { uint32_t end = offset + len; struct jffs2_node_frag *frag; int ret; jffs2_dbg(1, "%s(): ino #%u, range 0x%08x-0x%08x\n", __func__, f->inocache->ino, offset, offset + len); frag = jffs2_lookup_node_frag(&f->fragtree, offset); /* XXX FIXME: Where a single physical node actually shows up in two frags, we read it twice. Don't do that. */ /* Now we're pointing at the first frag which overlaps our page * (or perhaps is before it, if we've been asked to read off the * end of the file). */ while(offset < end) { jffs2_dbg(2, "%s(): offset %d, end %d\n", __func__, offset, end); if (unlikely(!frag || frag->ofs > offset || frag->ofs + frag->size <= offset)) { uint32_t holesize = end - offset; if (frag && frag->ofs > offset) { jffs2_dbg(1, "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset); holesize = min(holesize, frag->ofs - offset); } jffs2_dbg(1, "Filling non-frag hole from %d-%d\n", offset, offset + holesize); memset(buf, 0, holesize); buf += holesize; offset += holesize; continue; } else if (unlikely(!frag->node)) { uint32_t holeend = min(end, frag->ofs + frag->size); jffs2_dbg(1, "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n", offset, holeend, frag->ofs, frag->ofs + frag->size); memset(buf, 0, holeend - offset); buf += holeend - offset; offset = holeend; frag = frag_next(frag); continue; } else { uint32_t readlen; uint32_t fragofs; /* offset within the frag to start reading */ fragofs = offset - frag->ofs; readlen = min(frag->size - fragofs, end - offset); jffs2_dbg(1, "Reading %d-%d from node at 0x%08x (%d)\n", frag->ofs+fragofs, frag->ofs + fragofs+readlen, ref_offset(frag->node->raw), ref_flags(frag->node->raw)); ret = jffs2_read_dnode(c, f, frag->node, buf, fragofs + frag->ofs - frag->node->ofs, readlen); jffs2_dbg(2, "node read done\n"); if (ret) { jffs2_dbg(1, "%s(): error %d\n", __func__, ret); memset(buf, 0, readlen); return ret; } buf += readlen; offset += readlen; frag = frag_next(frag); jffs2_dbg(2, "node read was OK. Looping\n"); } } return 0; }
linux-master
fs/jffs2/read.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2004 Ferenc Havasi <[email protected]>, * Zoltan Sogor <[email protected]>, * Patrik Kluba <[email protected]>, * University of Szeged, Hungary * 2006 KaiGai Kohei <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/slab.h> #include <linux/mtd/mtd.h> #include <linux/pagemap.h> #include <linux/crc32.h> #include <linux/compiler.h> #include <linux/vmalloc.h> #include "nodelist.h" #include "debug.h" int jffs2_sum_init(struct jffs2_sb_info *c) { uint32_t sum_size = min_t(uint32_t, c->sector_size, MAX_SUMMARY_SIZE); c->summary = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL); if (!c->summary) { JFFS2_WARNING("Can't allocate memory for summary information!\n"); return -ENOMEM; } c->summary->sum_buf = kmalloc(sum_size, GFP_KERNEL); if (!c->summary->sum_buf) { JFFS2_WARNING("Can't allocate buffer for writing out summary information!\n"); kfree(c->summary); return -ENOMEM; } dbg_summary("returned successfully\n"); return 0; } void jffs2_sum_exit(struct jffs2_sb_info *c) { dbg_summary("called\n"); jffs2_sum_disable_collecting(c->summary); kfree(c->summary->sum_buf); c->summary->sum_buf = NULL; kfree(c->summary); c->summary = NULL; } static int jffs2_sum_add_mem(struct jffs2_summary *s, union jffs2_sum_mem *item) { if (!s->sum_list_head) s->sum_list_head = (union jffs2_sum_mem *) item; if (s->sum_list_tail) s->sum_list_tail->u.next = (union jffs2_sum_mem *) item; s->sum_list_tail = (union jffs2_sum_mem *) item; switch (je16_to_cpu(item->u.nodetype)) { case JFFS2_NODETYPE_INODE: s->sum_size += JFFS2_SUMMARY_INODE_SIZE; s->sum_num++; dbg_summary("inode (%u) added to summary\n", je32_to_cpu(item->i.inode)); break; case JFFS2_NODETYPE_DIRENT: s->sum_size += JFFS2_SUMMARY_DIRENT_SIZE(item->d.nsize); s->sum_num++; dbg_summary("dirent (%u) added to summary\n", je32_to_cpu(item->d.ino)); break; #ifdef CONFIG_JFFS2_FS_XATTR case JFFS2_NODETYPE_XATTR: s->sum_size += JFFS2_SUMMARY_XATTR_SIZE; s->sum_num++; dbg_summary("xattr (xid=%u, version=%u) added to summary\n", je32_to_cpu(item->x.xid), je32_to_cpu(item->x.version)); break; case JFFS2_NODETYPE_XREF: s->sum_size += JFFS2_SUMMARY_XREF_SIZE; s->sum_num++; dbg_summary("xref added to summary\n"); break; #endif default: JFFS2_WARNING("UNKNOWN node type %u\n", je16_to_cpu(item->u.nodetype)); return 1; } return 0; } /* The following 3 functions are called from scan.c to collect summary info for not closed jeb */ int jffs2_sum_add_padding_mem(struct jffs2_summary *s, uint32_t size) { dbg_summary("called with %u\n", size); s->sum_padded += size; return 0; } int jffs2_sum_add_inode_mem(struct jffs2_summary *s, struct jffs2_raw_inode *ri, uint32_t ofs) { struct jffs2_sum_inode_mem *temp = kmalloc(sizeof(struct jffs2_sum_inode_mem), GFP_KERNEL); if (!temp) return -ENOMEM; temp->nodetype = ri->nodetype; temp->inode = ri->ino; temp->version = ri->version; temp->offset = cpu_to_je32(ofs); /* relative offset from the beginning of the jeb */ temp->totlen = ri->totlen; temp->next = NULL; return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp); } int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *rd, uint32_t ofs) { struct jffs2_sum_dirent_mem *temp = kmalloc(sizeof(struct jffs2_sum_dirent_mem) + rd->nsize, GFP_KERNEL); if (!temp) return -ENOMEM; temp->nodetype = rd->nodetype; temp->totlen = rd->totlen; temp->offset = cpu_to_je32(ofs); /* relative from the beginning of the jeb */ temp->pino = rd->pino; temp->version = rd->version; temp->ino = rd->ino; temp->nsize = rd->nsize; temp->type = rd->type; temp->next = NULL; memcpy(temp->name, rd->name, rd->nsize); return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp); } #ifdef CONFIG_JFFS2_FS_XATTR int jffs2_sum_add_xattr_mem(struct jffs2_summary *s, struct jffs2_raw_xattr *rx, uint32_t ofs) { struct jffs2_sum_xattr_mem *temp; temp = kmalloc(sizeof(struct jffs2_sum_xattr_mem), GFP_KERNEL); if (!temp) return -ENOMEM; temp->nodetype = rx->nodetype; temp->xid = rx->xid; temp->version = rx->version; temp->offset = cpu_to_je32(ofs); temp->totlen = rx->totlen; temp->next = NULL; return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp); } int jffs2_sum_add_xref_mem(struct jffs2_summary *s, struct jffs2_raw_xref *rr, uint32_t ofs) { struct jffs2_sum_xref_mem *temp; temp = kmalloc(sizeof(struct jffs2_sum_xref_mem), GFP_KERNEL); if (!temp) return -ENOMEM; temp->nodetype = rr->nodetype; temp->offset = cpu_to_je32(ofs); temp->next = NULL; return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp); } #endif /* Cleanup every collected summary information */ static void jffs2_sum_clean_collected(struct jffs2_summary *s) { union jffs2_sum_mem *temp; if (!s->sum_list_head) { dbg_summary("already empty\n"); } while (s->sum_list_head) { temp = s->sum_list_head; s->sum_list_head = s->sum_list_head->u.next; kfree(temp); } s->sum_list_tail = NULL; s->sum_padded = 0; s->sum_num = 0; } void jffs2_sum_reset_collected(struct jffs2_summary *s) { dbg_summary("called\n"); jffs2_sum_clean_collected(s); s->sum_size = 0; } void jffs2_sum_disable_collecting(struct jffs2_summary *s) { dbg_summary("called\n"); jffs2_sum_clean_collected(s); s->sum_size = JFFS2_SUMMARY_NOSUM_SIZE; } int jffs2_sum_is_disabled(struct jffs2_summary *s) { return (s->sum_size == JFFS2_SUMMARY_NOSUM_SIZE); } /* Move the collected summary information into sb (called from scan.c) */ void jffs2_sum_move_collected(struct jffs2_sb_info *c, struct jffs2_summary *s) { dbg_summary("oldsize=0x%x oldnum=%u => newsize=0x%x newnum=%u\n", c->summary->sum_size, c->summary->sum_num, s->sum_size, s->sum_num); c->summary->sum_size = s->sum_size; c->summary->sum_num = s->sum_num; c->summary->sum_padded = s->sum_padded; c->summary->sum_list_head = s->sum_list_head; c->summary->sum_list_tail = s->sum_list_tail; s->sum_list_head = s->sum_list_tail = NULL; } /* Called from wbuf.c to collect writed node info */ int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs, unsigned long count, uint32_t ofs) { union jffs2_node_union *node; struct jffs2_eraseblock *jeb; if (c->summary->sum_size == JFFS2_SUMMARY_NOSUM_SIZE) { dbg_summary("Summary is disabled for this jeb! Skipping summary info!\n"); return 0; } node = invecs[0].iov_base; jeb = &c->blocks[ofs / c->sector_size]; ofs -= jeb->offset; switch (je16_to_cpu(node->u.nodetype)) { case JFFS2_NODETYPE_INODE: { struct jffs2_sum_inode_mem *temp = kmalloc(sizeof(struct jffs2_sum_inode_mem), GFP_KERNEL); if (!temp) goto no_mem; temp->nodetype = node->i.nodetype; temp->inode = node->i.ino; temp->version = node->i.version; temp->offset = cpu_to_je32(ofs); temp->totlen = node->i.totlen; temp->next = NULL; return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); } case JFFS2_NODETYPE_DIRENT: { struct jffs2_sum_dirent_mem *temp = kmalloc(sizeof(struct jffs2_sum_dirent_mem) + node->d.nsize, GFP_KERNEL); if (!temp) goto no_mem; temp->nodetype = node->d.nodetype; temp->totlen = node->d.totlen; temp->offset = cpu_to_je32(ofs); temp->pino = node->d.pino; temp->version = node->d.version; temp->ino = node->d.ino; temp->nsize = node->d.nsize; temp->type = node->d.type; temp->next = NULL; switch (count) { case 1: memcpy(temp->name,node->d.name,node->d.nsize); break; case 2: memcpy(temp->name,invecs[1].iov_base,node->d.nsize); break; default: BUG(); /* impossible count value */ break; } return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); } #ifdef CONFIG_JFFS2_FS_XATTR case JFFS2_NODETYPE_XATTR: { struct jffs2_sum_xattr_mem *temp; temp = kmalloc(sizeof(struct jffs2_sum_xattr_mem), GFP_KERNEL); if (!temp) goto no_mem; temp->nodetype = node->x.nodetype; temp->xid = node->x.xid; temp->version = node->x.version; temp->totlen = node->x.totlen; temp->offset = cpu_to_je32(ofs); temp->next = NULL; return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); } case JFFS2_NODETYPE_XREF: { struct jffs2_sum_xref_mem *temp; temp = kmalloc(sizeof(struct jffs2_sum_xref_mem), GFP_KERNEL); if (!temp) goto no_mem; temp->nodetype = node->r.nodetype; temp->offset = cpu_to_je32(ofs); temp->next = NULL; return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); } #endif case JFFS2_NODETYPE_PADDING: dbg_summary("node PADDING\n"); c->summary->sum_padded += je32_to_cpu(node->u.totlen); break; case JFFS2_NODETYPE_CLEANMARKER: dbg_summary("node CLEANMARKER\n"); break; case JFFS2_NODETYPE_SUMMARY: dbg_summary("node SUMMARY\n"); break; default: /* If you implement a new node type you should also implement summary support for it or disable summary. */ BUG(); break; } return 0; no_mem: JFFS2_WARNING("MEMORY ALLOCATION ERROR!"); return -ENOMEM; } static struct jffs2_raw_node_ref *sum_link_node_ref(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t ofs, uint32_t len, struct jffs2_inode_cache *ic) { /* If there was a gap, mark it dirty */ if ((ofs & ~3) > c->sector_size - jeb->free_size) { /* Ew. Summary doesn't actually tell us explicitly about dirty space */ jffs2_scan_dirty_space(c, jeb, (ofs & ~3) - (c->sector_size - jeb->free_size)); } return jffs2_link_node_ref(c, jeb, jeb->offset + ofs, len, ic); } /* Process the stored summary information - helper function for jffs2_sum_scan_sumnode() */ static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_raw_summary *summary, uint32_t *pseudo_random) { struct jffs2_inode_cache *ic; struct jffs2_full_dirent *fd; void *sp; int i, ino; int err; sp = summary->sum; for (i=0; i<je32_to_cpu(summary->sum_num); i++) { dbg_summary("processing summary index %d\n", i); cond_resched(); /* Make sure there's a spare ref for dirty space */ err = jffs2_prealloc_raw_node_refs(c, jeb, 2); if (err) return err; switch (je16_to_cpu(((struct jffs2_sum_unknown_flash *)sp)->nodetype)) { case JFFS2_NODETYPE_INODE: { struct jffs2_sum_inode_flash *spi; spi = sp; ino = je32_to_cpu(spi->inode); dbg_summary("Inode at 0x%08x-0x%08x\n", jeb->offset + je32_to_cpu(spi->offset), jeb->offset + je32_to_cpu(spi->offset) + je32_to_cpu(spi->totlen)); ic = jffs2_scan_make_ino_cache(c, ino); if (!ic) { JFFS2_NOTICE("scan_make_ino_cache failed\n"); return -ENOMEM; } sum_link_node_ref(c, jeb, je32_to_cpu(spi->offset) | REF_UNCHECKED, PAD(je32_to_cpu(spi->totlen)), ic); *pseudo_random += je32_to_cpu(spi->version); sp += JFFS2_SUMMARY_INODE_SIZE; break; } case JFFS2_NODETYPE_DIRENT: { struct jffs2_sum_dirent_flash *spd; int checkedlen; spd = sp; dbg_summary("Dirent at 0x%08x-0x%08x\n", jeb->offset + je32_to_cpu(spd->offset), jeb->offset + je32_to_cpu(spd->offset) + je32_to_cpu(spd->totlen)); /* This should never happen, but https://dev.laptop.org/ticket/4184 */ checkedlen = strnlen(spd->name, spd->nsize); if (!checkedlen) { pr_err("Dirent at %08x has zero at start of name. Aborting mount.\n", jeb->offset + je32_to_cpu(spd->offset)); return -EIO; } if (checkedlen < spd->nsize) { pr_err("Dirent at %08x has zeroes in name. Truncating to %d chars\n", jeb->offset + je32_to_cpu(spd->offset), checkedlen); } fd = jffs2_alloc_full_dirent(checkedlen+1); if (!fd) return -ENOMEM; memcpy(&fd->name, spd->name, checkedlen); fd->name[checkedlen] = 0; ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(spd->pino)); if (!ic) { jffs2_free_full_dirent(fd); return -ENOMEM; } fd->raw = sum_link_node_ref(c, jeb, je32_to_cpu(spd->offset) | REF_UNCHECKED, PAD(je32_to_cpu(spd->totlen)), ic); fd->next = NULL; fd->version = je32_to_cpu(spd->version); fd->ino = je32_to_cpu(spd->ino); fd->nhash = full_name_hash(NULL, fd->name, checkedlen); fd->type = spd->type; jffs2_add_fd_to_list(c, fd, &ic->scan_dents); *pseudo_random += je32_to_cpu(spd->version); sp += JFFS2_SUMMARY_DIRENT_SIZE(spd->nsize); break; } #ifdef CONFIG_JFFS2_FS_XATTR case JFFS2_NODETYPE_XATTR: { struct jffs2_xattr_datum *xd; struct jffs2_sum_xattr_flash *spx; spx = (struct jffs2_sum_xattr_flash *)sp; dbg_summary("xattr at %#08x-%#08x (xid=%u, version=%u)\n", jeb->offset + je32_to_cpu(spx->offset), jeb->offset + je32_to_cpu(spx->offset) + je32_to_cpu(spx->totlen), je32_to_cpu(spx->xid), je32_to_cpu(spx->version)); xd = jffs2_setup_xattr_datum(c, je32_to_cpu(spx->xid), je32_to_cpu(spx->version)); if (IS_ERR(xd)) return PTR_ERR(xd); if (xd->version > je32_to_cpu(spx->version)) { /* node is not the newest one */ struct jffs2_raw_node_ref *raw = sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED, PAD(je32_to_cpu(spx->totlen)), NULL); raw->next_in_ino = xd->node->next_in_ino; xd->node->next_in_ino = raw; } else { xd->version = je32_to_cpu(spx->version); sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED, PAD(je32_to_cpu(spx->totlen)), (void *)xd); } *pseudo_random += je32_to_cpu(spx->xid); sp += JFFS2_SUMMARY_XATTR_SIZE; break; } case JFFS2_NODETYPE_XREF: { struct jffs2_xattr_ref *ref; struct jffs2_sum_xref_flash *spr; spr = (struct jffs2_sum_xref_flash *)sp; dbg_summary("xref at %#08x-%#08x\n", jeb->offset + je32_to_cpu(spr->offset), jeb->offset + je32_to_cpu(spr->offset) + (uint32_t)PAD(sizeof(struct jffs2_raw_xref))); ref = jffs2_alloc_xattr_ref(); if (!ref) { JFFS2_NOTICE("allocation of xattr_datum failed\n"); return -ENOMEM; } ref->next = c->xref_temp; c->xref_temp = ref; sum_link_node_ref(c, jeb, je32_to_cpu(spr->offset) | REF_UNCHECKED, PAD(sizeof(struct jffs2_raw_xref)), (void *)ref); *pseudo_random += ref->node->flash_offset; sp += JFFS2_SUMMARY_XREF_SIZE; break; } #endif default : { uint16_t nodetype = je16_to_cpu(((struct jffs2_sum_unknown_flash *)sp)->nodetype); JFFS2_WARNING("Unsupported node type %x found in summary! Exiting...\n", nodetype); if ((nodetype & JFFS2_COMPAT_MASK) == JFFS2_FEATURE_INCOMPAT) return -EIO; /* For compatible node types, just fall back to the full scan */ c->wasted_size -= jeb->wasted_size; c->free_size += c->sector_size - jeb->free_size; c->used_size -= jeb->used_size; c->dirty_size -= jeb->dirty_size; jeb->wasted_size = jeb->used_size = jeb->dirty_size = 0; jeb->free_size = c->sector_size; jffs2_free_jeb_node_refs(c, jeb); return -ENOTRECOVERABLE; } } } return 0; } /* Process the summary node - called from jffs2_scan_eraseblock() */ int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_raw_summary *summary, uint32_t sumsize, uint32_t *pseudo_random) { struct jffs2_unknown_node crcnode; int ret, ofs; uint32_t crc; ofs = c->sector_size - sumsize; dbg_summary("summary found for 0x%08x at 0x%08x (0x%x bytes)\n", jeb->offset, jeb->offset + ofs, sumsize); /* OK, now check for node validity and CRC */ crcnode.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); crcnode.nodetype = cpu_to_je16(JFFS2_NODETYPE_SUMMARY); crcnode.totlen = summary->totlen; crc = crc32(0, &crcnode, sizeof(crcnode)-4); if (je32_to_cpu(summary->hdr_crc) != crc) { dbg_summary("Summary node header is corrupt (bad CRC or " "no summary at all)\n"); goto crc_err; } if (je32_to_cpu(summary->totlen) != sumsize) { dbg_summary("Summary node is corrupt (wrong erasesize?)\n"); goto crc_err; } crc = crc32(0, summary, sizeof(struct jffs2_raw_summary)-8); if (je32_to_cpu(summary->node_crc) != crc) { dbg_summary("Summary node is corrupt (bad CRC)\n"); goto crc_err; } crc = crc32(0, summary->sum, sumsize - sizeof(struct jffs2_raw_summary)); if (je32_to_cpu(summary->sum_crc) != crc) { dbg_summary("Summary node data is corrupt (bad CRC)\n"); goto crc_err; } if ( je32_to_cpu(summary->cln_mkr) ) { dbg_summary("Summary : CLEANMARKER node \n"); ret = jffs2_prealloc_raw_node_refs(c, jeb, 1); if (ret) return ret; if (je32_to_cpu(summary->cln_mkr) != c->cleanmarker_size) { dbg_summary("CLEANMARKER node has totlen 0x%x != normal 0x%x\n", je32_to_cpu(summary->cln_mkr), c->cleanmarker_size); if ((ret = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(summary->cln_mkr))))) return ret; } else if (jeb->first_node) { dbg_summary("CLEANMARKER node not first node in block " "(0x%08x)\n", jeb->offset); if ((ret = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(summary->cln_mkr))))) return ret; } else { jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, je32_to_cpu(summary->cln_mkr), NULL); } } ret = jffs2_sum_process_sum_data(c, jeb, summary, pseudo_random); /* -ENOTRECOVERABLE isn't a fatal error -- it means we should do a full scan of this eraseblock. So return zero */ if (ret == -ENOTRECOVERABLE) return 0; if (ret) return ret; /* real error */ /* for PARANOIA_CHECK */ ret = jffs2_prealloc_raw_node_refs(c, jeb, 2); if (ret) return ret; sum_link_node_ref(c, jeb, ofs | REF_NORMAL, sumsize, NULL); if (unlikely(jeb->free_size)) { JFFS2_WARNING("Free size 0x%x bytes in eraseblock @0x%08x with summary?\n", jeb->free_size, jeb->offset); jeb->wasted_size += jeb->free_size; c->wasted_size += jeb->free_size; c->free_size -= jeb->free_size; jeb->free_size = 0; } return jffs2_scan_classify_jeb(c, jeb); crc_err: JFFS2_WARNING("Summary node crc error, skipping summary information.\n"); return 0; } /* Write summary data to flash - helper function for jffs2_sum_write_sumnode() */ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t infosize, uint32_t datasize, int padsize) { struct jffs2_raw_summary isum; union jffs2_sum_mem *temp; struct jffs2_sum_marker *sm; struct kvec vecs[2]; uint32_t sum_ofs; void *wpage; int ret; size_t retlen; if (padsize + datasize > MAX_SUMMARY_SIZE) { /* It won't fit in the buffer. Abort summary for this jeb */ jffs2_sum_disable_collecting(c->summary); JFFS2_WARNING("Summary too big (%d data, %d pad) in eraseblock at %08x\n", datasize, padsize, jeb->offset); /* Non-fatal */ return 0; } /* Is there enough space for summary? */ if (padsize < 0) { /* don't try to write out summary for this jeb */ jffs2_sum_disable_collecting(c->summary); JFFS2_WARNING("Not enough space for summary, padsize = %d\n", padsize); /* Non-fatal */ return 0; } memset(c->summary->sum_buf, 0xff, datasize); memset(&isum, 0, sizeof(isum)); isum.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); isum.nodetype = cpu_to_je16(JFFS2_NODETYPE_SUMMARY); isum.totlen = cpu_to_je32(infosize); isum.hdr_crc = cpu_to_je32(crc32(0, &isum, sizeof(struct jffs2_unknown_node) - 4)); isum.padded = cpu_to_je32(c->summary->sum_padded); isum.cln_mkr = cpu_to_je32(c->cleanmarker_size); isum.sum_num = cpu_to_je32(c->summary->sum_num); wpage = c->summary->sum_buf; while (c->summary->sum_num) { temp = c->summary->sum_list_head; switch (je16_to_cpu(temp->u.nodetype)) { case JFFS2_NODETYPE_INODE: { struct jffs2_sum_inode_flash *sino_ptr = wpage; sino_ptr->nodetype = temp->i.nodetype; sino_ptr->inode = temp->i.inode; sino_ptr->version = temp->i.version; sino_ptr->offset = temp->i.offset; sino_ptr->totlen = temp->i.totlen; wpage += JFFS2_SUMMARY_INODE_SIZE; break; } case JFFS2_NODETYPE_DIRENT: { struct jffs2_sum_dirent_flash *sdrnt_ptr = wpage; sdrnt_ptr->nodetype = temp->d.nodetype; sdrnt_ptr->totlen = temp->d.totlen; sdrnt_ptr->offset = temp->d.offset; sdrnt_ptr->pino = temp->d.pino; sdrnt_ptr->version = temp->d.version; sdrnt_ptr->ino = temp->d.ino; sdrnt_ptr->nsize = temp->d.nsize; sdrnt_ptr->type = temp->d.type; memcpy(sdrnt_ptr->name, temp->d.name, temp->d.nsize); wpage += JFFS2_SUMMARY_DIRENT_SIZE(temp->d.nsize); break; } #ifdef CONFIG_JFFS2_FS_XATTR case JFFS2_NODETYPE_XATTR: { struct jffs2_sum_xattr_flash *sxattr_ptr = wpage; temp = c->summary->sum_list_head; sxattr_ptr->nodetype = temp->x.nodetype; sxattr_ptr->xid = temp->x.xid; sxattr_ptr->version = temp->x.version; sxattr_ptr->offset = temp->x.offset; sxattr_ptr->totlen = temp->x.totlen; wpage += JFFS2_SUMMARY_XATTR_SIZE; break; } case JFFS2_NODETYPE_XREF: { struct jffs2_sum_xref_flash *sxref_ptr = wpage; temp = c->summary->sum_list_head; sxref_ptr->nodetype = temp->r.nodetype; sxref_ptr->offset = temp->r.offset; wpage += JFFS2_SUMMARY_XREF_SIZE; break; } #endif default : { if ((je16_to_cpu(temp->u.nodetype) & JFFS2_COMPAT_MASK) == JFFS2_FEATURE_RWCOMPAT_COPY) { dbg_summary("Writing unknown RWCOMPAT_COPY node type %x\n", je16_to_cpu(temp->u.nodetype)); jffs2_sum_disable_collecting(c->summary); /* The above call removes the list, nothing more to do */ goto bail_rwcompat; } else { BUG(); /* unknown node in summary information */ } } } c->summary->sum_list_head = temp->u.next; kfree(temp); c->summary->sum_num--; } bail_rwcompat: jffs2_sum_reset_collected(c->summary); wpage += padsize; sm = wpage; sm->offset = cpu_to_je32(c->sector_size - jeb->free_size); sm->magic = cpu_to_je32(JFFS2_SUM_MAGIC); isum.sum_crc = cpu_to_je32(crc32(0, c->summary->sum_buf, datasize)); isum.node_crc = cpu_to_je32(crc32(0, &isum, sizeof(isum) - 8)); vecs[0].iov_base = &isum; vecs[0].iov_len = sizeof(isum); vecs[1].iov_base = c->summary->sum_buf; vecs[1].iov_len = datasize; sum_ofs = jeb->offset + c->sector_size - jeb->free_size; dbg_summary("writing out data to flash to pos : 0x%08x\n", sum_ofs); ret = jffs2_flash_writev(c, vecs, 2, sum_ofs, &retlen, 0); if (ret || (retlen != infosize)) { JFFS2_WARNING("Write of %u bytes at 0x%08x failed. returned %d, retlen %zd\n", infosize, sum_ofs, ret, retlen); if (retlen) { /* Waste remaining space */ spin_lock(&c->erase_completion_lock); jffs2_link_node_ref(c, jeb, sum_ofs | REF_OBSOLETE, infosize, NULL); spin_unlock(&c->erase_completion_lock); } c->summary->sum_size = JFFS2_SUMMARY_NOSUM_SIZE; return 0; } spin_lock(&c->erase_completion_lock); jffs2_link_node_ref(c, jeb, sum_ofs | REF_NORMAL, infosize, NULL); spin_unlock(&c->erase_completion_lock); return 0; } /* Write out summary information - called from jffs2_do_reserve_space */ int jffs2_sum_write_sumnode(struct jffs2_sb_info *c) __must_hold(&c->erase_completion_block) { int datasize, infosize, padsize; struct jffs2_eraseblock *jeb; int ret = 0; dbg_summary("called\n"); spin_unlock(&c->erase_completion_lock); jeb = c->nextblock; jffs2_prealloc_raw_node_refs(c, jeb, 1); if (!c->summary->sum_num || !c->summary->sum_list_head) { JFFS2_WARNING("Empty summary info!!!\n"); BUG(); } datasize = c->summary->sum_size + sizeof(struct jffs2_sum_marker); infosize = sizeof(struct jffs2_raw_summary) + datasize; padsize = jeb->free_size - infosize; infosize += padsize; datasize += padsize; ret = jffs2_sum_write_data(c, jeb, infosize, datasize, padsize); spin_lock(&c->erase_completion_lock); return ret; }
linux-master
fs/jffs2/summary.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * Copyright © 2004-2010 David Woodhouse <[email protected]> * * Created by Arjan van de Ven <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * * * * Very simple lz77-ish encoder. * * Theory of operation: Both encoder and decoder have a list of "last * occurrences" for every possible source-value; after sending the * first source-byte, the second byte indicated the "run" length of * matches * * The algorithm is intended to only send "whole bytes", no bit-messing. * */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/jffs2.h> #include "compr.h" /* _compress returns the compressed size, -1 if bigger */ static int jffs2_rtime_compress(unsigned char *data_in, unsigned char *cpage_out, uint32_t *sourcelen, uint32_t *dstlen) { unsigned short positions[256]; int outpos = 0; int pos=0; if (*dstlen <= 3) return -1; memset(positions,0,sizeof(positions)); while (pos < (*sourcelen) && outpos <= (*dstlen)-2) { int backpos, runlen=0; unsigned char value; value = data_in[pos]; cpage_out[outpos++] = data_in[pos++]; backpos = positions[value]; positions[value]=pos; while ((backpos < pos) && (pos < (*sourcelen)) && (data_in[pos]==data_in[backpos++]) && (runlen<255)) { pos++; runlen++; } cpage_out[outpos++] = runlen; } if (outpos >= pos) { /* We failed */ return -1; } /* Tell the caller how much we managed to compress, and how much space it took */ *sourcelen = pos; *dstlen = outpos; return 0; } static int jffs2_rtime_decompress(unsigned char *data_in, unsigned char *cpage_out, uint32_t srclen, uint32_t destlen) { unsigned short positions[256]; int outpos = 0; int pos=0; memset(positions,0,sizeof(positions)); while (outpos<destlen) { unsigned char value; int backoffs; int repeat; value = data_in[pos++]; cpage_out[outpos++] = value; /* first the verbatim copied byte */ repeat = data_in[pos++]; backoffs = positions[value]; positions[value]=outpos; if (repeat) { if (backoffs + repeat >= outpos) { while(repeat) { cpage_out[outpos++] = cpage_out[backoffs++]; repeat--; } } else { memcpy(&cpage_out[outpos],&cpage_out[backoffs],repeat); outpos+=repeat; } } } return 0; } static struct jffs2_compressor jffs2_rtime_comp = { .priority = JFFS2_RTIME_PRIORITY, .name = "rtime", .compr = JFFS2_COMPR_RTIME, .compress = &jffs2_rtime_compress, .decompress = &jffs2_rtime_decompress, #ifdef JFFS2_RTIME_DISABLED .disabled = 1, #else .disabled = 0, #endif }; int jffs2_rtime_init(void) { return jffs2_register_compressor(&jffs2_rtime_comp); } void jffs2_rtime_exit(void) { jffs2_unregister_compressor(&jffs2_rtime_comp); }
linux-master
fs/jffs2/compr_rtime.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * Copyright © 2004-2010 David Woodhouse <[email protected]> * * Created by David Woodhouse <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/types.h> #include <linux/pagemap.h> #include <linux/crc32.h> #include <linux/jffs2.h> #include <linux/mtd/mtd.h> #include <linux/slab.h> #include "nodelist.h" #include "debug.h" #ifdef JFFS2_DBG_SANITY_CHECKS void __jffs2_dbg_acct_sanity_check_nolock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { if (unlikely(jeb && jeb->used_size + jeb->dirty_size + jeb->free_size + jeb->wasted_size + jeb->unchecked_size != c->sector_size)) { JFFS2_ERROR("eeep, space accounting for block at 0x%08x is screwed.\n", jeb->offset); JFFS2_ERROR("free %#08x + dirty %#08x + used %#08x + wasted %#08x + unchecked %#08x != total %#08x.\n", jeb->free_size, jeb->dirty_size, jeb->used_size, jeb->wasted_size, jeb->unchecked_size, c->sector_size); BUG(); } if (unlikely(c->used_size + c->dirty_size + c->free_size + c->erasing_size + c->bad_size + c->wasted_size + c->unchecked_size != c->flash_size)) { JFFS2_ERROR("eeep, space accounting superblock info is screwed.\n"); JFFS2_ERROR("free %#08x + dirty %#08x + used %#08x + erasing %#08x + bad %#08x + wasted %#08x + unchecked %#08x != total %#08x.\n", c->free_size, c->dirty_size, c->used_size, c->erasing_size, c->bad_size, c->wasted_size, c->unchecked_size, c->flash_size); BUG(); } } void __jffs2_dbg_acct_sanity_check(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { spin_lock(&c->erase_completion_lock); jffs2_dbg_acct_sanity_check_nolock(c, jeb); spin_unlock(&c->erase_completion_lock); } #endif /* JFFS2_DBG_SANITY_CHECKS */ #ifdef JFFS2_DBG_PARANOIA_CHECKS /* * Check the fragtree. */ void __jffs2_dbg_fragtree_paranoia_check(struct jffs2_inode_info *f) { mutex_lock(&f->sem); __jffs2_dbg_fragtree_paranoia_check_nolock(f); mutex_unlock(&f->sem); } void __jffs2_dbg_fragtree_paranoia_check_nolock(struct jffs2_inode_info *f) { struct jffs2_node_frag *frag; int bitched = 0; for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) { struct jffs2_full_dnode *fn = frag->node; if (!fn || !fn->raw) continue; if (ref_flags(fn->raw) == REF_PRISTINE) { if (fn->frags > 1) { JFFS2_ERROR("REF_PRISTINE node at 0x%08x had %d frags. Tell dwmw2.\n", ref_offset(fn->raw), fn->frags); bitched = 1; } /* A hole node which isn't multi-page should be garbage-collected and merged anyway, so we just check for the frag size here, rather than mucking around with actually reading the node and checking the compression type, which is the real way to tell a hole node. */ if (frag->ofs & (PAGE_SIZE-1) && frag_prev(frag) && frag_prev(frag)->size < PAGE_SIZE && frag_prev(frag)->node) { JFFS2_ERROR("REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2.\n", ref_offset(fn->raw)); bitched = 1; } if ((frag->ofs+frag->size) & (PAGE_SIZE-1) && frag_next(frag) && frag_next(frag)->size < PAGE_SIZE && frag_next(frag)->node) { JFFS2_ERROR("REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2.\n", ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size); bitched = 1; } } } if (bitched) { JFFS2_ERROR("fragtree is corrupted.\n"); __jffs2_dbg_dump_fragtree_nolock(f); BUG(); } } /* * Check if the flash contains all 0xFF before we start writing. */ void __jffs2_dbg_prewrite_paranoia_check(struct jffs2_sb_info *c, uint32_t ofs, int len) { size_t retlen; int ret, i; unsigned char *buf; buf = kmalloc(len, GFP_KERNEL); if (!buf) return; ret = jffs2_flash_read(c, ofs, len, &retlen, buf); if (ret || (retlen != len)) { JFFS2_WARNING("read %d bytes failed or short. ret %d, retlen %zd.\n", len, ret, retlen); kfree(buf); return; } ret = 0; for (i = 0; i < len; i++) if (buf[i] != 0xff) ret = 1; if (ret) { JFFS2_ERROR("argh, about to write node to %#08x on flash, but there are data already there. The first corrupted byte is at %#08x offset.\n", ofs, ofs + i); __jffs2_dbg_dump_buffer(buf, len, ofs); kfree(buf); BUG(); } kfree(buf); } void __jffs2_dbg_superblock_counts(struct jffs2_sb_info *c) { struct jffs2_eraseblock *jeb; uint32_t free = 0, dirty = 0, used = 0, wasted = 0, erasing = 0, bad = 0, unchecked = 0; int nr_counted = 0; int dump = 0; if (c->gcblock) { nr_counted++; free += c->gcblock->free_size; dirty += c->gcblock->dirty_size; used += c->gcblock->used_size; wasted += c->gcblock->wasted_size; unchecked += c->gcblock->unchecked_size; } if (c->nextblock) { nr_counted++; free += c->nextblock->free_size; dirty += c->nextblock->dirty_size; used += c->nextblock->used_size; wasted += c->nextblock->wasted_size; unchecked += c->nextblock->unchecked_size; } list_for_each_entry(jeb, &c->clean_list, list) { nr_counted++; free += jeb->free_size; dirty += jeb->dirty_size; used += jeb->used_size; wasted += jeb->wasted_size; unchecked += jeb->unchecked_size; } list_for_each_entry(jeb, &c->very_dirty_list, list) { nr_counted++; free += jeb->free_size; dirty += jeb->dirty_size; used += jeb->used_size; wasted += jeb->wasted_size; unchecked += jeb->unchecked_size; } list_for_each_entry(jeb, &c->dirty_list, list) { nr_counted++; free += jeb->free_size; dirty += jeb->dirty_size; used += jeb->used_size; wasted += jeb->wasted_size; unchecked += jeb->unchecked_size; } list_for_each_entry(jeb, &c->erasable_list, list) { nr_counted++; free += jeb->free_size; dirty += jeb->dirty_size; used += jeb->used_size; wasted += jeb->wasted_size; unchecked += jeb->unchecked_size; } list_for_each_entry(jeb, &c->erasable_pending_wbuf_list, list) { nr_counted++; free += jeb->free_size; dirty += jeb->dirty_size; used += jeb->used_size; wasted += jeb->wasted_size; unchecked += jeb->unchecked_size; } list_for_each_entry(jeb, &c->erase_pending_list, list) { nr_counted++; free += jeb->free_size; dirty += jeb->dirty_size; used += jeb->used_size; wasted += jeb->wasted_size; unchecked += jeb->unchecked_size; } list_for_each_entry(jeb, &c->free_list, list) { nr_counted++; free += jeb->free_size; dirty += jeb->dirty_size; used += jeb->used_size; wasted += jeb->wasted_size; unchecked += jeb->unchecked_size; } list_for_each_entry(jeb, &c->bad_used_list, list) { nr_counted++; free += jeb->free_size; dirty += jeb->dirty_size; used += jeb->used_size; wasted += jeb->wasted_size; unchecked += jeb->unchecked_size; } list_for_each_entry(jeb, &c->erasing_list, list) { nr_counted++; erasing += c->sector_size; } list_for_each_entry(jeb, &c->erase_checking_list, list) { nr_counted++; erasing += c->sector_size; } list_for_each_entry(jeb, &c->erase_complete_list, list) { nr_counted++; erasing += c->sector_size; } list_for_each_entry(jeb, &c->bad_list, list) { nr_counted++; bad += c->sector_size; } #define check(sz) \ do { \ if (sz != c->sz##_size) { \ pr_warn("%s_size mismatch counted 0x%x, c->%s_size 0x%x\n", \ #sz, sz, #sz, c->sz##_size); \ dump = 1; \ } \ } while (0) check(free); check(dirty); check(used); check(wasted); check(unchecked); check(bad); check(erasing); #undef check if (nr_counted != c->nr_blocks) { pr_warn("%s counted only 0x%x blocks of 0x%x. Where are the others?\n", __func__, nr_counted, c->nr_blocks); dump = 1; } if (dump) { __jffs2_dbg_dump_block_lists_nolock(c); BUG(); } } /* * Check the space accounting and node_ref list correctness for the JFFS2 erasable block 'jeb'. */ void __jffs2_dbg_acct_paranoia_check(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { spin_lock(&c->erase_completion_lock); __jffs2_dbg_acct_paranoia_check_nolock(c, jeb); spin_unlock(&c->erase_completion_lock); } void __jffs2_dbg_acct_paranoia_check_nolock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { uint32_t my_used_size = 0; uint32_t my_unchecked_size = 0; uint32_t my_dirty_size = 0; struct jffs2_raw_node_ref *ref2 = jeb->first_node; while (ref2) { uint32_t totlen = ref_totlen(c, jeb, ref2); if (ref_offset(ref2) < jeb->offset || ref_offset(ref2) > jeb->offset + c->sector_size) { JFFS2_ERROR("node_ref %#08x shouldn't be in block at %#08x.\n", ref_offset(ref2), jeb->offset); goto error; } if (ref_flags(ref2) == REF_UNCHECKED) my_unchecked_size += totlen; else if (!ref_obsolete(ref2)) my_used_size += totlen; else my_dirty_size += totlen; if ((!ref_next(ref2)) != (ref2 == jeb->last_node)) { JFFS2_ERROR("node_ref for node at %#08x (mem %p) has next at %#08x (mem %p), last_node is at %#08x (mem %p).\n", ref_offset(ref2), ref2, ref_offset(ref_next(ref2)), ref_next(ref2), ref_offset(jeb->last_node), jeb->last_node); goto error; } ref2 = ref_next(ref2); } if (my_used_size != jeb->used_size) { JFFS2_ERROR("Calculated used size %#08x != stored used size %#08x.\n", my_used_size, jeb->used_size); goto error; } if (my_unchecked_size != jeb->unchecked_size) { JFFS2_ERROR("Calculated unchecked size %#08x != stored unchecked size %#08x.\n", my_unchecked_size, jeb->unchecked_size); goto error; } #if 0 /* This should work when we implement ref->__totlen elemination */ if (my_dirty_size != jeb->dirty_size + jeb->wasted_size) { JFFS2_ERROR("Calculated dirty+wasted size %#08x != stored dirty + wasted size %#08x\n", my_dirty_size, jeb->dirty_size + jeb->wasted_size); goto error; } if (jeb->free_size == 0 && my_used_size + my_unchecked_size + my_dirty_size != c->sector_size) { JFFS2_ERROR("The sum of all nodes in block (%#x) != size of block (%#x)\n", my_used_size + my_unchecked_size + my_dirty_size, c->sector_size); goto error; } #endif if (!(c->flags & (JFFS2_SB_FLAG_BUILDING|JFFS2_SB_FLAG_SCANNING))) __jffs2_dbg_superblock_counts(c); return; error: __jffs2_dbg_dump_node_refs_nolock(c, jeb); __jffs2_dbg_dump_jeb_nolock(jeb); __jffs2_dbg_dump_block_lists_nolock(c); BUG(); } #endif /* JFFS2_DBG_PARANOIA_CHECKS */ #if defined(JFFS2_DBG_DUMPS) || defined(JFFS2_DBG_PARANOIA_CHECKS) /* * Dump the node_refs of the 'jeb' JFFS2 eraseblock. */ void __jffs2_dbg_dump_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { spin_lock(&c->erase_completion_lock); __jffs2_dbg_dump_node_refs_nolock(c, jeb); spin_unlock(&c->erase_completion_lock); } void __jffs2_dbg_dump_node_refs_nolock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { struct jffs2_raw_node_ref *ref; int i = 0; printk(JFFS2_DBG_MSG_PREFIX " Dump node_refs of the eraseblock %#08x\n", jeb->offset); if (!jeb->first_node) { printk(JFFS2_DBG_MSG_PREFIX " no nodes in the eraseblock %#08x\n", jeb->offset); return; } printk(JFFS2_DBG); for (ref = jeb->first_node; ; ref = ref_next(ref)) { printk("%#08x", ref_offset(ref)); #ifdef TEST_TOTLEN printk("(%x)", ref->__totlen); #endif if (ref_next(ref)) printk("->"); else break; if (++i == 4) { i = 0; printk("\n" JFFS2_DBG); } } printk("\n"); } /* * Dump an eraseblock's space accounting. */ void __jffs2_dbg_dump_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { spin_lock(&c->erase_completion_lock); __jffs2_dbg_dump_jeb_nolock(jeb); spin_unlock(&c->erase_completion_lock); } void __jffs2_dbg_dump_jeb_nolock(struct jffs2_eraseblock *jeb) { if (!jeb) return; printk(JFFS2_DBG_MSG_PREFIX " dump space accounting for the eraseblock at %#08x:\n", jeb->offset); printk(JFFS2_DBG "used_size: %#08x\n", jeb->used_size); printk(JFFS2_DBG "dirty_size: %#08x\n", jeb->dirty_size); printk(JFFS2_DBG "wasted_size: %#08x\n", jeb->wasted_size); printk(JFFS2_DBG "unchecked_size: %#08x\n", jeb->unchecked_size); printk(JFFS2_DBG "free_size: %#08x\n", jeb->free_size); } void __jffs2_dbg_dump_block_lists(struct jffs2_sb_info *c) { spin_lock(&c->erase_completion_lock); __jffs2_dbg_dump_block_lists_nolock(c); spin_unlock(&c->erase_completion_lock); } void __jffs2_dbg_dump_block_lists_nolock(struct jffs2_sb_info *c) { printk(JFFS2_DBG_MSG_PREFIX " dump JFFS2 blocks lists:\n"); printk(JFFS2_DBG "flash_size: %#08x\n", c->flash_size); printk(JFFS2_DBG "used_size: %#08x\n", c->used_size); printk(JFFS2_DBG "dirty_size: %#08x\n", c->dirty_size); printk(JFFS2_DBG "wasted_size: %#08x\n", c->wasted_size); printk(JFFS2_DBG "unchecked_size: %#08x\n", c->unchecked_size); printk(JFFS2_DBG "free_size: %#08x\n", c->free_size); printk(JFFS2_DBG "erasing_size: %#08x\n", c->erasing_size); printk(JFFS2_DBG "bad_size: %#08x\n", c->bad_size); printk(JFFS2_DBG "sector_size: %#08x\n", c->sector_size); printk(JFFS2_DBG "jffs2_reserved_blocks size: %#08x\n", c->sector_size * c->resv_blocks_write); if (c->nextblock) printk(JFFS2_DBG "nextblock: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->unchecked_size, c->nextblock->free_size); else printk(JFFS2_DBG "nextblock: NULL\n"); if (c->gcblock) printk(JFFS2_DBG "gcblock: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size); else printk(JFFS2_DBG "gcblock: NULL\n"); if (list_empty(&c->clean_list)) { printk(JFFS2_DBG "clean_list: empty\n"); } else { struct list_head *this; int numblocks = 0; uint32_t dirty = 0; list_for_each(this, &c->clean_list) { struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); numblocks ++; dirty += jeb->wasted_size; if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { printk(JFFS2_DBG "clean_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); } } printk (JFFS2_DBG "Contains %d blocks with total wasted size %u, average wasted size: %u\n", numblocks, dirty, dirty / numblocks); } if (list_empty(&c->very_dirty_list)) { printk(JFFS2_DBG "very_dirty_list: empty\n"); } else { struct list_head *this; int numblocks = 0; uint32_t dirty = 0; list_for_each(this, &c->very_dirty_list) { struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); numblocks ++; dirty += jeb->dirty_size; if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { printk(JFFS2_DBG "very_dirty_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); } } printk (JFFS2_DBG "Contains %d blocks with total dirty size %u, average dirty size: %u\n", numblocks, dirty, dirty / numblocks); } if (list_empty(&c->dirty_list)) { printk(JFFS2_DBG "dirty_list: empty\n"); } else { struct list_head *this; int numblocks = 0; uint32_t dirty = 0; list_for_each(this, &c->dirty_list) { struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); numblocks ++; dirty += jeb->dirty_size; if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { printk(JFFS2_DBG "dirty_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); } } printk (JFFS2_DBG "contains %d blocks with total dirty size %u, average dirty size: %u\n", numblocks, dirty, dirty / numblocks); } if (list_empty(&c->erasable_list)) { printk(JFFS2_DBG "erasable_list: empty\n"); } else { struct list_head *this; list_for_each(this, &c->erasable_list) { struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { printk(JFFS2_DBG "erasable_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); } } } if (list_empty(&c->erasing_list)) { printk(JFFS2_DBG "erasing_list: empty\n"); } else { struct list_head *this; list_for_each(this, &c->erasing_list) { struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { printk(JFFS2_DBG "erasing_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); } } } if (list_empty(&c->erase_checking_list)) { printk(JFFS2_DBG "erase_checking_list: empty\n"); } else { struct list_head *this; list_for_each(this, &c->erase_checking_list) { struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { printk(JFFS2_DBG "erase_checking_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); } } } if (list_empty(&c->erase_pending_list)) { printk(JFFS2_DBG "erase_pending_list: empty\n"); } else { struct list_head *this; list_for_each(this, &c->erase_pending_list) { struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { printk(JFFS2_DBG "erase_pending_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); } } } if (list_empty(&c->erasable_pending_wbuf_list)) { printk(JFFS2_DBG "erasable_pending_wbuf_list: empty\n"); } else { struct list_head *this; list_for_each(this, &c->erasable_pending_wbuf_list) { struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { printk(JFFS2_DBG "erasable_pending_wbuf_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); } } } if (list_empty(&c->free_list)) { printk(JFFS2_DBG "free_list: empty\n"); } else { struct list_head *this; list_for_each(this, &c->free_list) { struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { printk(JFFS2_DBG "free_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); } } } if (list_empty(&c->bad_list)) { printk(JFFS2_DBG "bad_list: empty\n"); } else { struct list_head *this; list_for_each(this, &c->bad_list) { struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { printk(JFFS2_DBG "bad_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); } } } if (list_empty(&c->bad_used_list)) { printk(JFFS2_DBG "bad_used_list: empty\n"); } else { struct list_head *this; list_for_each(this, &c->bad_used_list) { struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { printk(JFFS2_DBG "bad_used_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); } } } } void __jffs2_dbg_dump_fragtree(struct jffs2_inode_info *f) { mutex_lock(&f->sem); jffs2_dbg_dump_fragtree_nolock(f); mutex_unlock(&f->sem); } void __jffs2_dbg_dump_fragtree_nolock(struct jffs2_inode_info *f) { struct jffs2_node_frag *this = frag_first(&f->fragtree); uint32_t lastofs = 0; int buggy = 0; printk(JFFS2_DBG_MSG_PREFIX " dump fragtree of ino #%u\n", f->inocache->ino); while(this) { if (this->node) printk(JFFS2_DBG "frag %#04x-%#04x: %#08x(%d) on flash (*%p), left (%p), right (%p), parent (%p)\n", this->ofs, this->ofs+this->size, ref_offset(this->node->raw), ref_flags(this->node->raw), this, frag_left(this), frag_right(this), frag_parent(this)); else printk(JFFS2_DBG "frag %#04x-%#04x: hole (*%p). left (%p), right (%p), parent (%p)\n", this->ofs, this->ofs+this->size, this, frag_left(this), frag_right(this), frag_parent(this)); if (this->ofs != lastofs) buggy = 1; lastofs = this->ofs + this->size; this = frag_next(this); } if (f->metadata) printk(JFFS2_DBG "metadata at 0x%08x\n", ref_offset(f->metadata->raw)); if (buggy) { JFFS2_ERROR("frag tree got a hole in it.\n"); BUG(); } } #define JFFS2_BUFDUMP_BYTES_PER_LINE 32 void __jffs2_dbg_dump_buffer(unsigned char *buf, int len, uint32_t offs) { int skip; int i; printk(JFFS2_DBG_MSG_PREFIX " dump from offset %#08x to offset %#08x (%x bytes).\n", offs, offs + len, len); i = skip = offs % JFFS2_BUFDUMP_BYTES_PER_LINE; offs = offs & ~(JFFS2_BUFDUMP_BYTES_PER_LINE - 1); if (skip != 0) printk(JFFS2_DBG "%#08x: ", offs); while (skip--) printk(" "); while (i < len) { if ((i % JFFS2_BUFDUMP_BYTES_PER_LINE) == 0 && i != len -1) { if (i != 0) printk("\n"); offs += JFFS2_BUFDUMP_BYTES_PER_LINE; printk(JFFS2_DBG "%0#8x: ", offs); } printk("%02x ", buf[i]); i += 1; } printk("\n"); } /* * Dump a JFFS2 node. */ void __jffs2_dbg_dump_node(struct jffs2_sb_info *c, uint32_t ofs) { union jffs2_node_union node; int len = sizeof(union jffs2_node_union); size_t retlen; uint32_t crc; int ret; printk(JFFS2_DBG_MSG_PREFIX " dump node at offset %#08x.\n", ofs); ret = jffs2_flash_read(c, ofs, len, &retlen, (unsigned char *)&node); if (ret || (retlen != len)) { JFFS2_ERROR("read %d bytes failed or short. ret %d, retlen %zd.\n", len, ret, retlen); return; } printk(JFFS2_DBG "magic:\t%#04x\n", je16_to_cpu(node.u.magic)); printk(JFFS2_DBG "nodetype:\t%#04x\n", je16_to_cpu(node.u.nodetype)); printk(JFFS2_DBG "totlen:\t%#08x\n", je32_to_cpu(node.u.totlen)); printk(JFFS2_DBG "hdr_crc:\t%#08x\n", je32_to_cpu(node.u.hdr_crc)); crc = crc32(0, &node.u, sizeof(node.u) - 4); if (crc != je32_to_cpu(node.u.hdr_crc)) { JFFS2_ERROR("wrong common header CRC.\n"); return; } if (je16_to_cpu(node.u.magic) != JFFS2_MAGIC_BITMASK && je16_to_cpu(node.u.magic) != JFFS2_OLD_MAGIC_BITMASK) { JFFS2_ERROR("wrong node magic: %#04x instead of %#04x.\n", je16_to_cpu(node.u.magic), JFFS2_MAGIC_BITMASK); return; } switch(je16_to_cpu(node.u.nodetype)) { case JFFS2_NODETYPE_INODE: printk(JFFS2_DBG "the node is inode node\n"); printk(JFFS2_DBG "ino:\t%#08x\n", je32_to_cpu(node.i.ino)); printk(JFFS2_DBG "version:\t%#08x\n", je32_to_cpu(node.i.version)); printk(JFFS2_DBG "mode:\t%#08x\n", node.i.mode.m); printk(JFFS2_DBG "uid:\t%#04x\n", je16_to_cpu(node.i.uid)); printk(JFFS2_DBG "gid:\t%#04x\n", je16_to_cpu(node.i.gid)); printk(JFFS2_DBG "isize:\t%#08x\n", je32_to_cpu(node.i.isize)); printk(JFFS2_DBG "atime:\t%#08x\n", je32_to_cpu(node.i.atime)); printk(JFFS2_DBG "mtime:\t%#08x\n", je32_to_cpu(node.i.mtime)); printk(JFFS2_DBG "ctime:\t%#08x\n", je32_to_cpu(node.i.ctime)); printk(JFFS2_DBG "offset:\t%#08x\n", je32_to_cpu(node.i.offset)); printk(JFFS2_DBG "csize:\t%#08x\n", je32_to_cpu(node.i.csize)); printk(JFFS2_DBG "dsize:\t%#08x\n", je32_to_cpu(node.i.dsize)); printk(JFFS2_DBG "compr:\t%#02x\n", node.i.compr); printk(JFFS2_DBG "usercompr:\t%#02x\n", node.i.usercompr); printk(JFFS2_DBG "flags:\t%#04x\n", je16_to_cpu(node.i.flags)); printk(JFFS2_DBG "data_crc:\t%#08x\n", je32_to_cpu(node.i.data_crc)); printk(JFFS2_DBG "node_crc:\t%#08x\n", je32_to_cpu(node.i.node_crc)); crc = crc32(0, &node.i, sizeof(node.i) - 8); if (crc != je32_to_cpu(node.i.node_crc)) { JFFS2_ERROR("wrong node header CRC.\n"); return; } break; case JFFS2_NODETYPE_DIRENT: printk(JFFS2_DBG "the node is dirent node\n"); printk(JFFS2_DBG "pino:\t%#08x\n", je32_to_cpu(node.d.pino)); printk(JFFS2_DBG "version:\t%#08x\n", je32_to_cpu(node.d.version)); printk(JFFS2_DBG "ino:\t%#08x\n", je32_to_cpu(node.d.ino)); printk(JFFS2_DBG "mctime:\t%#08x\n", je32_to_cpu(node.d.mctime)); printk(JFFS2_DBG "nsize:\t%#02x\n", node.d.nsize); printk(JFFS2_DBG "type:\t%#02x\n", node.d.type); printk(JFFS2_DBG "node_crc:\t%#08x\n", je32_to_cpu(node.d.node_crc)); printk(JFFS2_DBG "name_crc:\t%#08x\n", je32_to_cpu(node.d.name_crc)); node.d.name[node.d.nsize] = '\0'; printk(JFFS2_DBG "name:\t\"%s\"\n", node.d.name); crc = crc32(0, &node.d, sizeof(node.d) - 8); if (crc != je32_to_cpu(node.d.node_crc)) { JFFS2_ERROR("wrong node header CRC.\n"); return; } break; default: printk(JFFS2_DBG "node type is unknown\n"); break; } } #endif /* JFFS2_DBG_DUMPS || JFFS2_DBG_PARANOIA_CHECKS */
linux-master
fs/jffs2/debug.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * * Created by David Woodhouse <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/mtd/mtd.h> #include <linux/rbtree.h> #include <linux/crc32.h> #include <linux/pagemap.h> #include "nodelist.h" static void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this); void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list) { struct jffs2_full_dirent **prev = list; dbg_dentlist("add dirent \"%s\", ino #%u\n", new->name, new->ino); while ((*prev) && (*prev)->nhash <= new->nhash) { if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) { /* Duplicate. Free one */ if (new->version < (*prev)->version) { dbg_dentlist("Eep! Marking new dirent node obsolete, old is \"%s\", ino #%u\n", (*prev)->name, (*prev)->ino); jffs2_mark_node_obsolete(c, new->raw); jffs2_free_full_dirent(new); } else { dbg_dentlist("marking old dirent \"%s\", ino #%u obsolete\n", (*prev)->name, (*prev)->ino); new->next = (*prev)->next; /* It may have been a 'placeholder' deletion dirent, if jffs2_can_mark_obsolete() (see jffs2_do_unlink()) */ if ((*prev)->raw) jffs2_mark_node_obsolete(c, ((*prev)->raw)); jffs2_free_full_dirent(*prev); *prev = new; } return; } prev = &((*prev)->next); } new->next = *prev; *prev = new; } uint32_t jffs2_truncate_fragtree(struct jffs2_sb_info *c, struct rb_root *list, uint32_t size) { struct jffs2_node_frag *frag = jffs2_lookup_node_frag(list, size); dbg_fragtree("truncating fragtree to 0x%08x bytes\n", size); /* We know frag->ofs <= size. That's what lookup does for us */ if (frag && frag->ofs != size) { if (frag->ofs+frag->size > size) { frag->size = size - frag->ofs; } frag = frag_next(frag); } while (frag && frag->ofs >= size) { struct jffs2_node_frag *next = frag_next(frag); frag_erase(frag, list); jffs2_obsolete_node_frag(c, frag); frag = next; } if (size == 0) return 0; frag = frag_last(list); /* Sanity check for truncation to longer than we started with... */ if (!frag) return 0; if (frag->ofs + frag->size < size) return frag->ofs + frag->size; /* If the last fragment starts at the RAM page boundary, it is * REF_PRISTINE irrespective of its size. */ if (frag->node && (frag->ofs & (PAGE_SIZE - 1)) == 0) { dbg_fragtree2("marking the last fragment 0x%08x-0x%08x REF_PRISTINE.\n", frag->ofs, frag->ofs + frag->size); frag->node->raw->flash_offset = ref_offset(frag->node->raw) | REF_PRISTINE; } return size; } static void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this) { if (this->node) { this->node->frags--; if (!this->node->frags) { /* The node has no valid frags left. It's totally obsoleted */ dbg_fragtree2("marking old node @0x%08x (0x%04x-0x%04x) obsolete\n", ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size); jffs2_mark_node_obsolete(c, this->node->raw); jffs2_free_full_dnode(this->node); } else { dbg_fragtree2("marking old node @0x%08x (0x%04x-0x%04x) REF_NORMAL. frags is %d\n", ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size, this->node->frags); mark_ref_normal(this->node->raw); } } jffs2_free_node_frag(this); } static void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base) { struct rb_node *parent = &base->rb; struct rb_node **link = &parent; dbg_fragtree2("insert frag (0x%04x-0x%04x)\n", newfrag->ofs, newfrag->ofs + newfrag->size); while (*link) { parent = *link; base = rb_entry(parent, struct jffs2_node_frag, rb); if (newfrag->ofs > base->ofs) link = &base->rb.rb_right; else if (newfrag->ofs < base->ofs) link = &base->rb.rb_left; else { JFFS2_ERROR("duplicate frag at %08x (%p,%p)\n", newfrag->ofs, newfrag, base); BUG(); } } rb_link_node(&newfrag->rb, &base->rb, link); } /* * Allocate and initializes a new fragment. */ static struct jffs2_node_frag * new_fragment(struct jffs2_full_dnode *fn, uint32_t ofs, uint32_t size) { struct jffs2_node_frag *newfrag; newfrag = jffs2_alloc_node_frag(); if (likely(newfrag)) { newfrag->ofs = ofs; newfrag->size = size; newfrag->node = fn; } else { JFFS2_ERROR("cannot allocate a jffs2_node_frag object\n"); } return newfrag; } /* * Called when there is no overlapping fragment exist. Inserts a hole before the new * fragment and inserts the new fragment to the fragtree. */ static int no_overlapping_node(struct jffs2_sb_info *c, struct rb_root *root, struct jffs2_node_frag *newfrag, struct jffs2_node_frag *this, uint32_t lastend) { if (lastend < newfrag->node->ofs) { /* put a hole in before the new fragment */ struct jffs2_node_frag *holefrag; holefrag= new_fragment(NULL, lastend, newfrag->node->ofs - lastend); if (unlikely(!holefrag)) { jffs2_free_node_frag(newfrag); return -ENOMEM; } if (this) { /* By definition, the 'this' node has no right-hand child, because there are no frags with offset greater than it. So that's where we want to put the hole */ dbg_fragtree2("add hole frag %#04x-%#04x on the right of the new frag.\n", holefrag->ofs, holefrag->ofs + holefrag->size); rb_link_node(&holefrag->rb, &this->rb, &this->rb.rb_right); } else { dbg_fragtree2("Add hole frag %#04x-%#04x to the root of the tree.\n", holefrag->ofs, holefrag->ofs + holefrag->size); rb_link_node(&holefrag->rb, NULL, &root->rb_node); } rb_insert_color(&holefrag->rb, root); this = holefrag; } if (this) { /* By definition, the 'this' node has no right-hand child, because there are no frags with offset greater than it. So that's where we want to put new fragment */ dbg_fragtree2("add the new node at the right\n"); rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right); } else { dbg_fragtree2("insert the new node at the root of the tree\n"); rb_link_node(&newfrag->rb, NULL, &root->rb_node); } rb_insert_color(&newfrag->rb, root); return 0; } /* Doesn't set inode->i_size */ static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *root, struct jffs2_node_frag *newfrag) { struct jffs2_node_frag *this; uint32_t lastend; /* Skip all the nodes which are completed before this one starts */ this = jffs2_lookup_node_frag(root, newfrag->node->ofs); if (this) { dbg_fragtree2("lookup gave frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n", this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this); lastend = this->ofs + this->size; } else { dbg_fragtree2("lookup gave no frag\n"); lastend = 0; } /* See if we ran off the end of the fragtree */ if (lastend <= newfrag->ofs) { /* We did */ /* Check if 'this' node was on the same page as the new node. If so, both 'this' and the new node get marked REF_NORMAL so the GC can take a look. */ if (lastend && (lastend-1) >> PAGE_SHIFT == newfrag->ofs >> PAGE_SHIFT) { if (this->node) mark_ref_normal(this->node->raw); mark_ref_normal(newfrag->node->raw); } return no_overlapping_node(c, root, newfrag, this, lastend); } if (this->node) dbg_fragtree2("dealing with frag %u-%u, phys %#08x(%d).\n", this->ofs, this->ofs + this->size, ref_offset(this->node->raw), ref_flags(this->node->raw)); else dbg_fragtree2("dealing with hole frag %u-%u.\n", this->ofs, this->ofs + this->size); /* OK. 'this' is pointing at the first frag that newfrag->ofs at least partially obsoletes, * - i.e. newfrag->ofs < this->ofs+this->size && newfrag->ofs >= this->ofs */ if (newfrag->ofs > this->ofs) { /* This node isn't completely obsoleted. The start of it remains valid */ /* Mark the new node and the partially covered node REF_NORMAL -- let the GC take a look at them */ mark_ref_normal(newfrag->node->raw); if (this->node) mark_ref_normal(this->node->raw); if (this->ofs + this->size > newfrag->ofs + newfrag->size) { /* The new node splits 'this' frag into two */ struct jffs2_node_frag *newfrag2; if (this->node) dbg_fragtree2("split old frag 0x%04x-0x%04x, phys 0x%08x\n", this->ofs, this->ofs+this->size, ref_offset(this->node->raw)); else dbg_fragtree2("split old hole frag 0x%04x-0x%04x\n", this->ofs, this->ofs+this->size); /* New second frag pointing to this's node */ newfrag2 = new_fragment(this->node, newfrag->ofs + newfrag->size, this->ofs + this->size - newfrag->ofs - newfrag->size); if (unlikely(!newfrag2)) return -ENOMEM; if (this->node) this->node->frags++; /* Adjust size of original 'this' */ this->size = newfrag->ofs - this->ofs; /* Now, we know there's no node with offset greater than this->ofs but smaller than newfrag2->ofs or newfrag->ofs, for obvious reasons. So we can do a tree insert from 'this' to insert newfrag, and a tree insert from newfrag to insert newfrag2. */ jffs2_fragtree_insert(newfrag, this); rb_insert_color(&newfrag->rb, root); jffs2_fragtree_insert(newfrag2, newfrag); rb_insert_color(&newfrag2->rb, root); return 0; } /* New node just reduces 'this' frag in size, doesn't split it */ this->size = newfrag->ofs - this->ofs; /* Again, we know it lives down here in the tree */ jffs2_fragtree_insert(newfrag, this); rb_insert_color(&newfrag->rb, root); } else { /* New frag starts at the same point as 'this' used to. Replace it in the tree without doing a delete and insertion */ dbg_fragtree2("inserting newfrag (*%p),%d-%d in before 'this' (*%p),%d-%d\n", newfrag, newfrag->ofs, newfrag->ofs+newfrag->size, this, this->ofs, this->ofs+this->size); rb_replace_node(&this->rb, &newfrag->rb, root); if (newfrag->ofs + newfrag->size >= this->ofs+this->size) { dbg_fragtree2("obsoleting node frag %p (%x-%x)\n", this, this->ofs, this->ofs+this->size); jffs2_obsolete_node_frag(c, this); } else { this->ofs += newfrag->size; this->size -= newfrag->size; jffs2_fragtree_insert(this, newfrag); rb_insert_color(&this->rb, root); return 0; } } /* OK, now we have newfrag added in the correct place in the tree, but frag_next(newfrag) may be a fragment which is overlapped by it */ while ((this = frag_next(newfrag)) && newfrag->ofs + newfrag->size >= this->ofs + this->size) { /* 'this' frag is obsoleted completely. */ dbg_fragtree2("obsoleting node frag %p (%x-%x) and removing from tree\n", this, this->ofs, this->ofs+this->size); rb_erase(&this->rb, root); jffs2_obsolete_node_frag(c, this); } /* Now we're pointing at the first frag which isn't totally obsoleted by the new frag */ if (!this || newfrag->ofs + newfrag->size == this->ofs) return 0; /* Still some overlap but we don't need to move it in the tree */ this->size = (this->ofs + this->size) - (newfrag->ofs + newfrag->size); this->ofs = newfrag->ofs + newfrag->size; /* And mark them REF_NORMAL so the GC takes a look at them */ if (this->node) mark_ref_normal(this->node->raw); mark_ref_normal(newfrag->node->raw); return 0; } /* * Given an inode, probably with existing tree of fragments, add the new node * to the fragment tree. */ int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn) { int ret; struct jffs2_node_frag *newfrag; if (unlikely(!fn->size)) return 0; newfrag = new_fragment(fn, fn->ofs, fn->size); if (unlikely(!newfrag)) return -ENOMEM; newfrag->node->frags = 1; dbg_fragtree("adding node %#04x-%#04x @0x%08x on flash, newfrag *%p\n", fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag); ret = jffs2_add_frag_to_fragtree(c, &f->fragtree, newfrag); if (unlikely(ret)) return ret; /* If we now share a page with other nodes, mark either previous or next node REF_NORMAL, as appropriate. */ if (newfrag->ofs & (PAGE_SIZE-1)) { struct jffs2_node_frag *prev = frag_prev(newfrag); mark_ref_normal(fn->raw); /* If we don't start at zero there's _always_ a previous */ if (prev->node) mark_ref_normal(prev->node->raw); } if ((newfrag->ofs+newfrag->size) & (PAGE_SIZE-1)) { struct jffs2_node_frag *next = frag_next(newfrag); if (next) { mark_ref_normal(fn->raw); if (next->node) mark_ref_normal(next->node->raw); } } jffs2_dbg_fragtree_paranoia_check_nolock(f); return 0; } void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state) { spin_lock(&c->inocache_lock); ic->state = state; wake_up(&c->inocache_wq); spin_unlock(&c->inocache_lock); } /* During mount, this needs no locking. During normal operation, its callers want to do other stuff while still holding the inocache_lock. Rather than introducing special case get_ino_cache functions or callbacks, we just let the caller do the locking itself. */ struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino) { struct jffs2_inode_cache *ret; ret = c->inocache_list[ino % c->inocache_hashsize]; while (ret && ret->ino < ino) { ret = ret->next; } if (ret && ret->ino != ino) ret = NULL; return ret; } void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new) { struct jffs2_inode_cache **prev; spin_lock(&c->inocache_lock); if (!new->ino) new->ino = ++c->highest_ino; dbg_inocache("add %p (ino #%u)\n", new, new->ino); prev = &c->inocache_list[new->ino % c->inocache_hashsize]; while ((*prev) && (*prev)->ino < new->ino) { prev = &(*prev)->next; } new->next = *prev; *prev = new; spin_unlock(&c->inocache_lock); } void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old) { struct jffs2_inode_cache **prev; #ifdef CONFIG_JFFS2_FS_XATTR BUG_ON(old->xref); #endif dbg_inocache("del %p (ino #%u)\n", old, old->ino); spin_lock(&c->inocache_lock); prev = &c->inocache_list[old->ino % c->inocache_hashsize]; while ((*prev) && (*prev)->ino < old->ino) { prev = &(*prev)->next; } if ((*prev) == old) { *prev = old->next; } /* Free it now unless it's in READING or CLEARING state, which are the transitions upon read_inode() and clear_inode(). The rest of the time we know nobody else is looking at it, and if it's held by read_inode() or clear_inode() they'll free it for themselves. */ if (old->state != INO_STATE_READING && old->state != INO_STATE_CLEARING) jffs2_free_inode_cache(old); spin_unlock(&c->inocache_lock); } void jffs2_free_ino_caches(struct jffs2_sb_info *c) { int i; struct jffs2_inode_cache *this, *next; for (i=0; i < c->inocache_hashsize; i++) { this = c->inocache_list[i]; while (this) { next = this->next; jffs2_xattr_free_inode(c, this); jffs2_free_inode_cache(this); this = next; } c->inocache_list[i] = NULL; } } void jffs2_free_raw_node_refs(struct jffs2_sb_info *c) { int i; struct jffs2_raw_node_ref *this, *next; for (i=0; i<c->nr_blocks; i++) { this = c->blocks[i].first_node; while (this) { if (this[REFS_PER_BLOCK].flash_offset == REF_LINK_NODE) next = this[REFS_PER_BLOCK].next_in_ino; else next = NULL; jffs2_free_refblock(this); this = next; } c->blocks[i].first_node = c->blocks[i].last_node = NULL; } } struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset) { /* The common case in lookup is that there will be a node which precisely matches. So we go looking for that first */ struct rb_node *next; struct jffs2_node_frag *prev = NULL; struct jffs2_node_frag *frag = NULL; dbg_fragtree2("root %p, offset %d\n", fragtree, offset); next = fragtree->rb_node; while(next) { frag = rb_entry(next, struct jffs2_node_frag, rb); if (frag->ofs + frag->size <= offset) { /* Remember the closest smaller match on the way down */ if (!prev || frag->ofs > prev->ofs) prev = frag; next = frag->rb.rb_right; } else if (frag->ofs > offset) { next = frag->rb.rb_left; } else { return frag; } } /* Exact match not found. Go back up looking at each parent, and return the closest smaller one */ if (prev) dbg_fragtree2("no match. Returning frag %#04x-%#04x, closest previous\n", prev->ofs, prev->ofs+prev->size); else dbg_fragtree2("returning NULL, empty fragtree\n"); return prev; } /* Pass 'c' argument to indicate that nodes should be marked obsolete as they're killed. */ void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c) { struct jffs2_node_frag *frag, *next; dbg_fragtree("killing\n"); rbtree_postorder_for_each_entry_safe(frag, next, root, rb) { if (frag->node && !(--frag->node->frags)) { /* Not a hole, and it's the final remaining frag of this node. Free the node */ if (c) jffs2_mark_node_obsolete(c, frag->node->raw); jffs2_free_full_dnode(frag->node); } jffs2_free_node_frag(frag); cond_resched(); } } struct jffs2_raw_node_ref *jffs2_link_node_ref(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t ofs, uint32_t len, struct jffs2_inode_cache *ic) { struct jffs2_raw_node_ref *ref; BUG_ON(!jeb->allocated_refs); jeb->allocated_refs--; ref = jeb->last_node; dbg_noderef("Last node at %p is (%08x,%p)\n", ref, ref->flash_offset, ref->next_in_ino); while (ref->flash_offset != REF_EMPTY_NODE) { if (ref->flash_offset == REF_LINK_NODE) ref = ref->next_in_ino; else ref++; } dbg_noderef("New ref is %p (%08x becomes %08x,%p) len 0x%x\n", ref, ref->flash_offset, ofs, ref->next_in_ino, len); ref->flash_offset = ofs; if (!jeb->first_node) { jeb->first_node = ref; BUG_ON(ref_offset(ref) != jeb->offset); } else if (unlikely(ref_offset(ref) != jeb->offset + c->sector_size - jeb->free_size)) { uint32_t last_len = ref_totlen(c, jeb, jeb->last_node); JFFS2_ERROR("Adding new ref %p at (0x%08x-0x%08x) not immediately after previous (0x%08x-0x%08x)\n", ref, ref_offset(ref), ref_offset(ref)+len, ref_offset(jeb->last_node), ref_offset(jeb->last_node)+last_len); BUG(); } jeb->last_node = ref; if (ic) { ref->next_in_ino = ic->nodes; ic->nodes = ref; } else { ref->next_in_ino = NULL; } switch(ref_flags(ref)) { case REF_UNCHECKED: c->unchecked_size += len; jeb->unchecked_size += len; break; case REF_NORMAL: case REF_PRISTINE: c->used_size += len; jeb->used_size += len; break; case REF_OBSOLETE: c->dirty_size += len; jeb->dirty_size += len; break; } c->free_size -= len; jeb->free_size -= len; #ifdef TEST_TOTLEN /* Set (and test) __totlen field... for now */ ref->__totlen = len; ref_totlen(c, jeb, ref); #endif return ref; } /* No locking, no reservation of 'ref'. Do not use on a live file system */ int jffs2_scan_dirty_space(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t size) { if (!size) return 0; if (unlikely(size > jeb->free_size)) { pr_crit("Dirty space 0x%x larger then free_size 0x%x (wasted 0x%x)\n", size, jeb->free_size, jeb->wasted_size); BUG(); } /* REF_EMPTY_NODE is !obsolete, so that works OK */ if (jeb->last_node && ref_obsolete(jeb->last_node)) { #ifdef TEST_TOTLEN jeb->last_node->__totlen += size; #endif c->dirty_size += size; c->free_size -= size; jeb->dirty_size += size; jeb->free_size -= size; } else { uint32_t ofs = jeb->offset + c->sector_size - jeb->free_size; ofs |= REF_OBSOLETE; jffs2_link_node_ref(c, jeb, ofs, size, NULL); } return 0; } /* Calculate totlen from surrounding nodes or eraseblock */ static inline uint32_t __ref_totlen(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_raw_node_ref *ref) { uint32_t ref_end; struct jffs2_raw_node_ref *next_ref = ref_next(ref); if (next_ref) ref_end = ref_offset(next_ref); else { if (!jeb) jeb = &c->blocks[ref->flash_offset / c->sector_size]; /* Last node in block. Use free_space */ if (unlikely(ref != jeb->last_node)) { pr_crit("ref %p @0x%08x is not jeb->last_node (%p @0x%08x)\n", ref, ref_offset(ref), jeb->last_node, jeb->last_node ? ref_offset(jeb->last_node) : 0); BUG(); } ref_end = jeb->offset + c->sector_size - jeb->free_size; } return ref_end - ref_offset(ref); } uint32_t __jffs2_ref_totlen(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_raw_node_ref *ref) { uint32_t ret; ret = __ref_totlen(c, jeb, ref); #ifdef TEST_TOTLEN if (unlikely(ret != ref->__totlen)) { if (!jeb) jeb = &c->blocks[ref->flash_offset / c->sector_size]; pr_crit("Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n", ref, ref_offset(ref), ref_offset(ref) + ref->__totlen, ret, ref->__totlen); if (ref_next(ref)) { pr_crit("next %p (0x%08x-0x%08x)\n", ref_next(ref), ref_offset(ref_next(ref)), ref_offset(ref_next(ref)) + ref->__totlen); } else pr_crit("No next ref. jeb->last_node is %p\n", jeb->last_node); pr_crit("jeb->wasted_size %x, dirty_size %x, used_size %x, free_size %x\n", jeb->wasted_size, jeb->dirty_size, jeb->used_size, jeb->free_size); #if defined(JFFS2_DBG_DUMPS) || defined(JFFS2_DBG_PARANOIA_CHECKS) __jffs2_dbg_dump_node_refs_nolock(c, jeb); #endif WARN_ON(1); ret = ref->__totlen; } #endif /* TEST_TOTLEN */ return ret; }
linux-master
fs/jffs2/nodelist.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * Copyright © 2004-2010 David Woodhouse <[email protected]> * * Created by David Woodhouse <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #include <linux/fs.h> #include "nodelist.h" long jffs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { /* Later, this will provide for lsattr.jffs2 and chattr.jffs2, which will include compression support etc. */ return -ENOTTY; }
linux-master
fs/jffs2/ioctl.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * * Created by David Woodhouse <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/mtd/mtd.h> #include <linux/compiler.h> #include <linux/sched/signal.h> #include "nodelist.h" #include "debug.h" /* * Check whether the user is allowed to write. */ static int jffs2_rp_can_write(struct jffs2_sb_info *c) { uint32_t avail; struct jffs2_mount_opts *opts = &c->mount_opts; avail = c->dirty_size + c->free_size + c->unchecked_size + c->erasing_size - c->resv_blocks_write * c->sector_size - c->nospc_dirty_size; if (avail < 2 * opts->rp_size) jffs2_dbg(1, "rpsize %u, dirty_size %u, free_size %u, " "erasing_size %u, unchecked_size %u, " "nr_erasing_blocks %u, avail %u, resrv %u\n", opts->rp_size, c->dirty_size, c->free_size, c->erasing_size, c->unchecked_size, c->nr_erasing_blocks, avail, c->nospc_dirty_size); if (avail > opts->rp_size) return 1; /* Always allow root */ if (capable(CAP_SYS_RESOURCE)) return 1; jffs2_dbg(1, "forbid writing\n"); return 0; } /** * jffs2_reserve_space - request physical space to write nodes to flash * @c: superblock info * @minsize: Minimum acceptable size of allocation * @len: Returned value of allocation length * @prio: Allocation type - ALLOC_{NORMAL,DELETION} * * Requests a block of physical space on the flash. Returns zero for success * and puts 'len' into the appropriate place, or returns -ENOSPC or other * error if appropriate. Doesn't return len since that's * * If it returns zero, jffs2_reserve_space() also downs the per-filesystem * allocation semaphore, to prevent more than one allocation from being * active at any time. The semaphore is later released by jffs2_commit_allocation() * * jffs2_reserve_space() may trigger garbage collection in order to make room * for the requested allocation. */ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *len, uint32_t sumsize); int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *len, int prio, uint32_t sumsize) { int ret = -EAGAIN; int blocksneeded = c->resv_blocks_write; /* align it */ minsize = PAD(minsize); jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize); mutex_lock(&c->alloc_sem); jffs2_dbg(1, "%s(): alloc sem got\n", __func__); spin_lock(&c->erase_completion_lock); /* * Check if the free space is greater then size of the reserved pool. * If not, only allow root to proceed with writing. */ if (prio != ALLOC_DELETION && !jffs2_rp_can_write(c)) { ret = -ENOSPC; goto out; } /* this needs a little more thought (true <tglx> :)) */ while(ret == -EAGAIN) { while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) { uint32_t dirty, avail; /* calculate real dirty size * dirty_size contains blocks on erase_pending_list * those blocks are counted in c->nr_erasing_blocks. * If one block is actually erased, it is not longer counted as dirty_space * but it is counted in c->nr_erasing_blocks, so we add it and subtract it * with c->nr_erasing_blocks * c->sector_size again. * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks * This helps us to force gc and pick eventually a clean block to spread the load. * We add unchecked_size here, as we hopefully will find some space to use. * This will affect the sum only once, as gc first finishes checking * of nodes. */ dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size; if (dirty < c->nospc_dirty_size) { if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) { jffs2_dbg(1, "%s(): Low on dirty space to GC, but it's a deletion. Allowing...\n", __func__); break; } jffs2_dbg(1, "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n", dirty, c->unchecked_size, c->sector_size); spin_unlock(&c->erase_completion_lock); mutex_unlock(&c->alloc_sem); return -ENOSPC; } /* Calc possibly available space. Possibly available means that we * don't know, if unchecked size contains obsoleted nodes, which could give us some * more usable space. This will affect the sum only once, as gc first finishes checking * of nodes. + Return -ENOSPC, if the maximum possibly available space is less or equal than * blocksneeded * sector_size. * This blocks endless gc looping on a filesystem, which is nearly full, even if * the check above passes. */ avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size; if ( (avail / c->sector_size) <= blocksneeded) { if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) { jffs2_dbg(1, "%s(): Low on possibly available space, but it's a deletion. Allowing...\n", __func__); break; } jffs2_dbg(1, "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n", avail, blocksneeded * c->sector_size); spin_unlock(&c->erase_completion_lock); mutex_unlock(&c->alloc_sem); return -ENOSPC; } mutex_unlock(&c->alloc_sem); jffs2_dbg(1, "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n", c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size, c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size); spin_unlock(&c->erase_completion_lock); ret = jffs2_garbage_collect_pass(c); if (ret == -EAGAIN) { spin_lock(&c->erase_completion_lock); if (c->nr_erasing_blocks && list_empty(&c->erase_pending_list) && list_empty(&c->erase_complete_list)) { DECLARE_WAITQUEUE(wait, current); set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&c->erase_wait, &wait); jffs2_dbg(1, "%s waiting for erase to complete\n", __func__); spin_unlock(&c->erase_completion_lock); schedule(); remove_wait_queue(&c->erase_wait, &wait); } else spin_unlock(&c->erase_completion_lock); } else if (ret) return ret; cond_resched(); if (signal_pending(current)) return -EINTR; mutex_lock(&c->alloc_sem); spin_lock(&c->erase_completion_lock); } ret = jffs2_do_reserve_space(c, minsize, len, sumsize); if (ret) { jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret); } } out: spin_unlock(&c->erase_completion_lock); if (!ret) ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); if (ret) mutex_unlock(&c->alloc_sem); return ret; } int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *len, uint32_t sumsize) { int ret; minsize = PAD(minsize); jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize); while (true) { spin_lock(&c->erase_completion_lock); ret = jffs2_do_reserve_space(c, minsize, len, sumsize); if (ret) { jffs2_dbg(1, "%s(): looping, ret is %d\n", __func__, ret); } spin_unlock(&c->erase_completion_lock); if (ret == -EAGAIN) cond_resched(); else break; } if (!ret) ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); return ret; } /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */ static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { if (c->nextblock == NULL) { jffs2_dbg(1, "%s(): Erase block at 0x%08x has already been placed in a list\n", __func__, jeb->offset); return; } /* Check, if we have a dirty block now, or if it was dirty already */ if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) { c->dirty_size += jeb->wasted_size; c->wasted_size -= jeb->wasted_size; jeb->dirty_size += jeb->wasted_size; jeb->wasted_size = 0; if (VERYDIRTY(c, jeb->dirty_size)) { jffs2_dbg(1, "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size); list_add_tail(&jeb->list, &c->very_dirty_list); } else { jffs2_dbg(1, "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size); list_add_tail(&jeb->list, &c->dirty_list); } } else { jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size); list_add_tail(&jeb->list, &c->clean_list); } c->nextblock = NULL; } /* Select a new jeb for nextblock */ static int jffs2_find_nextblock(struct jffs2_sb_info *c) { struct list_head *next; /* Take the next block off the 'free' list */ if (list_empty(&c->free_list)) { if (!c->nr_erasing_blocks && !list_empty(&c->erasable_list)) { struct jffs2_eraseblock *ejeb; ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list); list_move_tail(&ejeb->list, &c->erase_pending_list); c->nr_erasing_blocks++; jffs2_garbage_collect_trigger(c); jffs2_dbg(1, "%s(): Triggering erase of erasable block at 0x%08x\n", __func__, ejeb->offset); } if (!c->nr_erasing_blocks && !list_empty(&c->erasable_pending_wbuf_list)) { jffs2_dbg(1, "%s(): Flushing write buffer\n", __func__); /* c->nextblock is NULL, no update to c->nextblock allowed */ spin_unlock(&c->erase_completion_lock); jffs2_flush_wbuf_pad(c); spin_lock(&c->erase_completion_lock); /* Have another go. It'll be on the erasable_list now */ return -EAGAIN; } if (!c->nr_erasing_blocks) { /* Ouch. We're in GC, or we wouldn't have got here. And there's no space left. At all. */ pr_crit("Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n", c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list) ? "yes" : "no", list_empty(&c->erasing_list) ? "yes" : "no", list_empty(&c->erase_pending_list) ? "yes" : "no"); return -ENOSPC; } spin_unlock(&c->erase_completion_lock); /* Don't wait for it; just erase one right now */ jffs2_erase_pending_blocks(c, 1); spin_lock(&c->erase_completion_lock); /* An erase may have failed, decreasing the amount of free space available. So we must restart from the beginning */ return -EAGAIN; } next = c->free_list.next; list_del(next); c->nextblock = list_entry(next, struct jffs2_eraseblock, list); c->nr_free_blocks--; jffs2_sum_reset_collected(c->summary); /* reset collected summary */ #ifdef CONFIG_JFFS2_FS_WRITEBUFFER /* adjust write buffer offset, else we get a non contiguous write bug */ if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len) c->wbuf_ofs = 0xffffffff; #endif jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n", __func__, c->nextblock->offset); return 0; } /* Called with alloc sem _and_ erase_completion_lock */ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *len, uint32_t sumsize) { struct jffs2_eraseblock *jeb = c->nextblock; uint32_t reserved_size; /* for summary information at the end of the jeb */ int ret; restart: reserved_size = 0; if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) { /* NOSUM_SIZE means not to generate summary */ if (jeb) { reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE); dbg_summary("minsize=%d , jeb->free=%d ," "summary->size=%d , sumsize=%d\n", minsize, jeb->free_size, c->summary->sum_size, sumsize); } /* Is there enough space for writing out the current node, or we have to write out summary information now, close this jeb and select new nextblock? */ if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize + JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) { /* Has summary been disabled for this jeb? */ if (jffs2_sum_is_disabled(c->summary)) { sumsize = JFFS2_SUMMARY_NOSUM_SIZE; goto restart; } /* Writing out the collected summary information */ dbg_summary("generating summary for 0x%08x.\n", jeb->offset); ret = jffs2_sum_write_sumnode(c); if (ret) return ret; if (jffs2_sum_is_disabled(c->summary)) { /* jffs2_write_sumnode() couldn't write out the summary information diabling summary for this jeb and free the collected information */ sumsize = JFFS2_SUMMARY_NOSUM_SIZE; goto restart; } jffs2_close_nextblock(c, jeb); jeb = NULL; /* keep always valid value in reserved_size */ reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE); } } else { if (jeb && minsize > jeb->free_size) { uint32_t waste; /* Skip the end of this block and file it as having some dirty space */ /* If there's a pending write to it, flush now */ if (jffs2_wbuf_dirty(c)) { spin_unlock(&c->erase_completion_lock); jffs2_dbg(1, "%s(): Flushing write buffer\n", __func__); jffs2_flush_wbuf_pad(c); spin_lock(&c->erase_completion_lock); jeb = c->nextblock; goto restart; } spin_unlock(&c->erase_completion_lock); ret = jffs2_prealloc_raw_node_refs(c, jeb, 1); /* Just lock it again and continue. Nothing much can change because we hold c->alloc_sem anyway. In fact, it's not entirely clear why we hold c->erase_completion_lock in the majority of this function... but that's a question for another (more caffeine-rich) day. */ spin_lock(&c->erase_completion_lock); if (ret) return ret; waste = jeb->free_size; jffs2_link_node_ref(c, jeb, (jeb->offset + c->sector_size - waste) | REF_OBSOLETE, waste, NULL); /* FIXME: that made it count as dirty. Convert to wasted */ jeb->dirty_size -= waste; c->dirty_size -= waste; jeb->wasted_size += waste; c->wasted_size += waste; jffs2_close_nextblock(c, jeb); jeb = NULL; } } if (!jeb) { ret = jffs2_find_nextblock(c); if (ret) return ret; jeb = c->nextblock; if (jeb->free_size != c->sector_size - c->cleanmarker_size) { pr_warn("Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size); goto restart; } } /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has enough space */ *len = jeb->free_size - reserved_size; if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size && !jeb->first_node->next_in_ino) { /* Only node in it beforehand was a CLEANMARKER node (we think). So mark it obsolete now that there's going to be another node in the block. This will reduce used_size to zero but We've already set c->nextblock so that jffs2_mark_node_obsolete() won't try to refile it to the dirty_list. */ spin_unlock(&c->erase_completion_lock); jffs2_mark_node_obsolete(c, jeb->first_node); spin_lock(&c->erase_completion_lock); } jffs2_dbg(1, "%s(): Giving 0x%x bytes at 0x%x\n", __func__, *len, jeb->offset + (c->sector_size - jeb->free_size)); return 0; } /** * jffs2_add_physical_node_ref - add a physical node reference to the list * @c: superblock info * @new: new node reference to add * @len: length of this physical node * * Should only be used to report nodes for which space has been allocated * by jffs2_reserve_space. * * Must be called with the alloc_sem held. */ struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c, uint32_t ofs, uint32_t len, struct jffs2_inode_cache *ic) { struct jffs2_eraseblock *jeb; struct jffs2_raw_node_ref *new; jeb = &c->blocks[ofs / c->sector_size]; jffs2_dbg(1, "%s(): Node at 0x%x(%d), size 0x%x\n", __func__, ofs & ~3, ofs & 3, len); #if 1 /* Allow non-obsolete nodes only to be added at the end of c->nextblock, if c->nextblock is set. Note that wbuf.c will file obsolete nodes even after refiling c->nextblock */ if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE)) && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) { pr_warn("argh. node added in wrong place at 0x%08x(%d)\n", ofs & ~3, ofs & 3); if (c->nextblock) pr_warn("nextblock 0x%08x", c->nextblock->offset); else pr_warn("No nextblock"); pr_cont(", expected at %08x\n", jeb->offset + (c->sector_size - jeb->free_size)); return ERR_PTR(-EINVAL); } #endif spin_lock(&c->erase_completion_lock); new = jffs2_link_node_ref(c, jeb, ofs, len, ic); if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) { /* If it lives on the dirty_list, jffs2_reserve_space will put it there */ jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size); if (jffs2_wbuf_dirty(c)) { /* Flush the last write in the block if it's outstanding */ spin_unlock(&c->erase_completion_lock); jffs2_flush_wbuf_pad(c); spin_lock(&c->erase_completion_lock); } list_add_tail(&jeb->list, &c->clean_list); c->nextblock = NULL; } jffs2_dbg_acct_sanity_check_nolock(c,jeb); jffs2_dbg_acct_paranoia_check_nolock(c, jeb); spin_unlock(&c->erase_completion_lock); return new; } void jffs2_complete_reservation(struct jffs2_sb_info *c) { jffs2_dbg(1, "jffs2_complete_reservation()\n"); spin_lock(&c->erase_completion_lock); jffs2_garbage_collect_trigger(c); spin_unlock(&c->erase_completion_lock); mutex_unlock(&c->alloc_sem); } static inline int on_list(struct list_head *obj, struct list_head *head) { struct list_head *this; list_for_each(this, head) { if (this == obj) { jffs2_dbg(1, "%p is on list at %p\n", obj, head); return 1; } } return 0; } void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref) { struct jffs2_eraseblock *jeb; int blocknr; struct jffs2_unknown_node n; int ret, addedsize; size_t retlen; uint32_t freed_len; if(unlikely(!ref)) { pr_notice("EEEEEK. jffs2_mark_node_obsolete called with NULL node\n"); return; } if (ref_obsolete(ref)) { jffs2_dbg(1, "%s(): called with already obsolete node at 0x%08x\n", __func__, ref_offset(ref)); return; } blocknr = ref->flash_offset / c->sector_size; if (blocknr >= c->nr_blocks) { pr_notice("raw node at 0x%08x is off the end of device!\n", ref->flash_offset); BUG(); } jeb = &c->blocks[blocknr]; if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) && !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) { /* Hm. This may confuse static lock analysis. If any of the above three conditions is false, we're going to return from this function without actually obliterating any nodes or freeing any jffs2_raw_node_refs. So we don't need to stop erases from happening, or protect against people holding an obsolete jffs2_raw_node_ref without the erase_completion_lock. */ mutex_lock(&c->erase_free_sem); } spin_lock(&c->erase_completion_lock); freed_len = ref_totlen(c, jeb, ref); if (ref_flags(ref) == REF_UNCHECKED) { D1(if (unlikely(jeb->unchecked_size < freed_len)) { pr_notice("raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n", freed_len, blocknr, ref->flash_offset, jeb->used_size); BUG(); }) jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n", ref_offset(ref), freed_len); jeb->unchecked_size -= freed_len; c->unchecked_size -= freed_len; } else { D1(if (unlikely(jeb->used_size < freed_len)) { pr_notice("raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n", freed_len, blocknr, ref->flash_offset, jeb->used_size); BUG(); }) jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len); jeb->used_size -= freed_len; c->used_size -= freed_len; } // Take care, that wasted size is taken into concern if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) { jffs2_dbg(1, "Dirtying\n"); addedsize = freed_len; jeb->dirty_size += freed_len; c->dirty_size += freed_len; /* Convert wasted space to dirty, if not a bad block */ if (jeb->wasted_size) { if (on_list(&jeb->list, &c->bad_used_list)) { jffs2_dbg(1, "Leaving block at %08x on the bad_used_list\n", jeb->offset); addedsize = 0; /* To fool the refiling code later */ } else { jffs2_dbg(1, "Converting %d bytes of wasted space to dirty in block at %08x\n", jeb->wasted_size, jeb->offset); addedsize += jeb->wasted_size; jeb->dirty_size += jeb->wasted_size; c->dirty_size += jeb->wasted_size; c->wasted_size -= jeb->wasted_size; jeb->wasted_size = 0; } } } else { jffs2_dbg(1, "Wasting\n"); addedsize = 0; jeb->wasted_size += freed_len; c->wasted_size += freed_len; } ref->flash_offset = ref_offset(ref) | REF_OBSOLETE; jffs2_dbg_acct_sanity_check_nolock(c, jeb); jffs2_dbg_acct_paranoia_check_nolock(c, jeb); if (c->flags & JFFS2_SB_FLAG_SCANNING) { /* Flash scanning is in progress. Don't muck about with the block lists because they're not ready yet, and don't actually obliterate nodes that look obsolete. If they weren't marked obsolete on the flash at the time they _became_ obsolete, there was probably a reason for that. */ spin_unlock(&c->erase_completion_lock); /* We didn't lock the erase_free_sem */ return; } if (jeb == c->nextblock) { jffs2_dbg(2, "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset); } else if (!jeb->used_size && !jeb->unchecked_size) { if (jeb == c->gcblock) { jffs2_dbg(1, "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset); c->gcblock = NULL; } else { jffs2_dbg(1, "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset); list_del(&jeb->list); } if (jffs2_wbuf_dirty(c)) { jffs2_dbg(1, "...and adding to erasable_pending_wbuf_list\n"); list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list); } else { if (jiffies & 127) { /* Most of the time, we just erase it immediately. Otherwise we spend ages scanning it on mount, etc. */ jffs2_dbg(1, "...and adding to erase_pending_list\n"); list_add_tail(&jeb->list, &c->erase_pending_list); c->nr_erasing_blocks++; jffs2_garbage_collect_trigger(c); } else { /* Sometimes, however, we leave it elsewhere so it doesn't get immediately reused, and we spread the load a bit. */ jffs2_dbg(1, "...and adding to erasable_list\n"); list_add_tail(&jeb->list, &c->erasable_list); } } jffs2_dbg(1, "Done OK\n"); } else if (jeb == c->gcblock) { jffs2_dbg(2, "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset); } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) { jffs2_dbg(1, "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset); list_del(&jeb->list); jffs2_dbg(1, "...and adding to dirty_list\n"); list_add_tail(&jeb->list, &c->dirty_list); } else if (VERYDIRTY(c, jeb->dirty_size) && !VERYDIRTY(c, jeb->dirty_size - addedsize)) { jffs2_dbg(1, "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset); list_del(&jeb->list); jffs2_dbg(1, "...and adding to very_dirty_list\n"); list_add_tail(&jeb->list, &c->very_dirty_list); } else { jffs2_dbg(1, "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n", jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size); } spin_unlock(&c->erase_completion_lock); if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) || (c->flags & JFFS2_SB_FLAG_BUILDING)) { /* We didn't lock the erase_free_sem */ return; } /* The erase_free_sem is locked, and has been since before we marked the node obsolete and potentially put its eraseblock onto the erase_pending_list. Thus, we know that the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */ jffs2_dbg(1, "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)); ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); if (ret) { pr_warn("Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret); goto out_erase_sem; } if (retlen != sizeof(n)) { pr_warn("Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen); goto out_erase_sem; } if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) { pr_warn("Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len); goto out_erase_sem; } if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) { jffs2_dbg(1, "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)); goto out_erase_sem; } /* XXX FIXME: This is ugly now */ n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE); ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); if (ret) { pr_warn("Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret); goto out_erase_sem; } if (retlen != sizeof(n)) { pr_warn("Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen); goto out_erase_sem; } /* Nodes which have been marked obsolete no longer need to be associated with any inode. Remove them from the per-inode list. Note we can't do this for NAND at the moment because we need obsolete dirent nodes to stay on the lists, because of the horridness in jffs2_garbage_collect_deletion_dirent(). Also because we delete the inocache, and on NAND we need that to stay around until all the nodes are actually erased, in order to stop us from giving the same inode number to another newly created inode. */ if (ref->next_in_ino) { struct jffs2_inode_cache *ic; struct jffs2_raw_node_ref **p; spin_lock(&c->erase_completion_lock); ic = jffs2_raw_ref_to_ic(ref); for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino)) ; *p = ref->next_in_ino; ref->next_in_ino = NULL; switch (ic->class) { #ifdef CONFIG_JFFS2_FS_XATTR case RAWNODE_CLASS_XATTR_DATUM: jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic); break; case RAWNODE_CLASS_XATTR_REF: jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic); break; #endif default: if (ic->nodes == (void *)ic && ic->pino_nlink == 0) jffs2_del_ino_cache(c, ic); break; } spin_unlock(&c->erase_completion_lock); } out_erase_sem: mutex_unlock(&c->erase_free_sem); } int jffs2_thread_should_wake(struct jffs2_sb_info *c) { int ret = 0; uint32_t dirty; int nr_very_dirty = 0; struct jffs2_eraseblock *jeb; if (!list_empty(&c->erase_complete_list) || !list_empty(&c->erase_pending_list)) return 1; if (c->unchecked_size) { jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, check_ino #%d\n", c->unchecked_size, c->check_ino); return 1; } /* dirty_size contains blocks on erase_pending_list * those blocks are counted in c->nr_erasing_blocks. * If one block is actually erased, it is not longer counted as dirty_space * but it is counted in c->nr_erasing_blocks, so we add it and subtract it * with c->nr_erasing_blocks * c->sector_size again. * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks * This helps us to force gc and pick eventually a clean block to spread the load. */ dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size; if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger && (dirty > c->nospc_dirty_size)) ret = 1; list_for_each_entry(jeb, &c->very_dirty_list, list) { nr_very_dirty++; if (nr_very_dirty == c->vdirty_blocks_gctrigger) { ret = 1; /* In debug mode, actually go through and count them all */ D1(continue); break; } } jffs2_dbg(1, "%s(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n", __func__, c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, nr_very_dirty, ret ? "yes" : "no"); return ret; }
linux-master
fs/jffs2/nodemgmt.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * Copyright © 2004-2010 David Woodhouse <[email protected]> * * Created by David Woodhouse <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mtd/mtd.h> #include <linux/mm.h> /* kvfree() */ #include "nodelist.h" static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *, struct jffs2_inode_cache *, struct jffs2_full_dirent **); static inline struct jffs2_inode_cache * first_inode_chain(int *i, struct jffs2_sb_info *c) { for (; *i < c->inocache_hashsize; (*i)++) { if (c->inocache_list[*i]) return c->inocache_list[*i]; } return NULL; } static inline struct jffs2_inode_cache * next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c) { /* More in this chain? */ if (ic->next) return ic->next; (*i)++; return first_inode_chain(i, c); } #define for_each_inode(i, c, ic) \ for (i = 0, ic = first_inode_chain(&i, (c)); \ ic; \ ic = next_inode(&i, ic, (c))) static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int *dir_hardlinks) { struct jffs2_full_dirent *fd; dbg_fsbuild("building directory inode #%u\n", ic->ino); /* For each child, increase nlink */ for(fd = ic->scan_dents; fd; fd = fd->next) { struct jffs2_inode_cache *child_ic; if (!fd->ino) continue; /* we can get high latency here with huge directories */ child_ic = jffs2_get_ino_cache(c, fd->ino); if (!child_ic) { dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n", fd->name, fd->ino, ic->ino); jffs2_mark_node_obsolete(c, fd->raw); /* Clear the ic/raw union so it doesn't cause problems later. */ fd->ic = NULL; continue; } /* From this point, fd->raw is no longer used so we can set fd->ic */ fd->ic = child_ic; child_ic->pino_nlink++; /* If we appear (at this stage) to have hard-linked directories, * set a flag to trigger a scan later */ if (fd->type == DT_DIR) { child_ic->flags |= INO_FLAGS_IS_DIR; if (child_ic->pino_nlink > 1) *dir_hardlinks = 1; } dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino); /* Can't free scan_dents so far. We might need them in pass 2 */ } } /* Scan plan: - Scan physical nodes. Build map of inodes/dirents. Allocate inocaches as we go - Scan directory tree from top down, setting nlink in inocaches - Scan inocaches for inodes with nlink==0 */ static int jffs2_build_filesystem(struct jffs2_sb_info *c) { int ret, i, dir_hardlinks = 0; struct jffs2_inode_cache *ic; struct jffs2_full_dirent *fd; struct jffs2_full_dirent *dead_fds = NULL; dbg_fsbuild("build FS data structures\n"); /* First, scan the medium and build all the inode caches with lists of physical nodes */ c->flags |= JFFS2_SB_FLAG_SCANNING; ret = jffs2_scan_medium(c); c->flags &= ~JFFS2_SB_FLAG_SCANNING; if (ret) goto exit; dbg_fsbuild("scanned flash completely\n"); jffs2_dbg_dump_block_lists_nolock(c); dbg_fsbuild("pass 1 starting\n"); c->flags |= JFFS2_SB_FLAG_BUILDING; /* Now scan the directory tree, increasing nlink according to every dirent found. */ for_each_inode(i, c, ic) { if (ic->scan_dents) { jffs2_build_inode_pass1(c, ic, &dir_hardlinks); cond_resched(); } } dbg_fsbuild("pass 1 complete\n"); /* Next, scan for inodes with nlink == 0 and remove them. If they were directories, then decrement the nlink of their children too, and repeat the scan. As that's going to be a fairly uncommon occurrence, it's not so evil to do it this way. Recursion bad. */ dbg_fsbuild("pass 2 starting\n"); for_each_inode(i, c, ic) { if (ic->pino_nlink) continue; jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); cond_resched(); } dbg_fsbuild("pass 2a starting\n"); while (dead_fds) { fd = dead_fds; dead_fds = fd->next; ic = jffs2_get_ino_cache(c, fd->ino); if (ic) jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); jffs2_free_full_dirent(fd); } dbg_fsbuild("pass 2a complete\n"); if (dir_hardlinks) { /* If we detected directory hardlinks earlier, *hopefully* * they are gone now because some of the links were from * dead directories which still had some old dirents lying * around and not yet garbage-collected, but which have * been discarded above. So clear the pino_nlink field * in each directory, so that the final scan below can * print appropriate warnings. */ for_each_inode(i, c, ic) { if (ic->flags & INO_FLAGS_IS_DIR) ic->pino_nlink = 0; } } dbg_fsbuild("freeing temporary data structures\n"); /* Finally, we can scan again and free the dirent structs */ for_each_inode(i, c, ic) { while(ic->scan_dents) { fd = ic->scan_dents; ic->scan_dents = fd->next; /* We do use the pino_nlink field to count nlink of * directories during fs build, so set it to the * parent ino# now. Now that there's hopefully only * one. */ if (fd->type == DT_DIR) { if (!fd->ic) { /* We'll have complained about it and marked the coresponding raw node obsolete already. Just skip it. */ continue; } /* We *have* to have set this in jffs2_build_inode_pass1() */ BUG_ON(!(fd->ic->flags & INO_FLAGS_IS_DIR)); /* We clear ic->pino_nlink ∀ directories' ic *only* if dir_hardlinks * is set. Otherwise, we know this should never trigger anyway, so * we don't do the check. And ic->pino_nlink still contains the nlink * value (which is 1). */ if (dir_hardlinks && fd->ic->pino_nlink) { JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u is also hard linked from dir ino #%u\n", fd->name, fd->ino, ic->ino, fd->ic->pino_nlink); /* Should we unlink it from its previous parent? */ } /* For directories, ic->pino_nlink holds that parent inode # */ fd->ic->pino_nlink = ic->ino; } jffs2_free_full_dirent(fd); } ic->scan_dents = NULL; cond_resched(); } ret = jffs2_build_xattr_subsystem(c); if (ret) goto exit; c->flags &= ~JFFS2_SB_FLAG_BUILDING; dbg_fsbuild("FS build complete\n"); /* Rotate the lists by some number to ensure wear levelling */ jffs2_rotate_lists(c); ret = 0; exit: if (ret) { for_each_inode(i, c, ic) { while(ic->scan_dents) { fd = ic->scan_dents; ic->scan_dents = fd->next; jffs2_free_full_dirent(fd); } } jffs2_clear_xattr_subsystem(c); } return ret; } static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, struct jffs2_full_dirent **dead_fds) { struct jffs2_raw_node_ref *raw; struct jffs2_full_dirent *fd; dbg_fsbuild("removing ino #%u with nlink == zero.\n", ic->ino); raw = ic->nodes; while (raw != (void *)ic) { struct jffs2_raw_node_ref *next = raw->next_in_ino; dbg_fsbuild("obsoleting node at 0x%08x\n", ref_offset(raw)); jffs2_mark_node_obsolete(c, raw); raw = next; } if (ic->scan_dents) { int whinged = 0; dbg_fsbuild("inode #%u was a directory which may have children...\n", ic->ino); while(ic->scan_dents) { struct jffs2_inode_cache *child_ic; fd = ic->scan_dents; ic->scan_dents = fd->next; if (!fd->ino) { /* It's a deletion dirent. Ignore it */ dbg_fsbuild("child \"%s\" is a deletion dirent, skipping...\n", fd->name); jffs2_free_full_dirent(fd); continue; } if (!whinged) whinged = 1; dbg_fsbuild("removing child \"%s\", ino #%u\n", fd->name, fd->ino); child_ic = jffs2_get_ino_cache(c, fd->ino); if (!child_ic) { dbg_fsbuild("cannot remove child \"%s\", ino #%u, because it doesn't exist\n", fd->name, fd->ino); jffs2_free_full_dirent(fd); continue; } /* Reduce nlink of the child. If it's now zero, stick it on the dead_fds list to be cleaned up later. Else just free the fd */ child_ic->pino_nlink--; if (!child_ic->pino_nlink) { dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n", fd->ino, fd->name); fd->next = *dead_fds; *dead_fds = fd; } else { dbg_fsbuild("inode #%u (\"%s\") has now got nlink %d. Ignoring.\n", fd->ino, fd->name, child_ic->pino_nlink); jffs2_free_full_dirent(fd); } } } /* We don't delete the inocache from the hash list and free it yet. The erase code will do that, when all the nodes are completely gone. */ } static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c) { uint32_t size; /* Deletion should almost _always_ be allowed. We're fairly buggered once we stop allowing people to delete stuff because there's not enough free space... */ c->resv_blocks_deletion = 2; /* Be conservative about how much space we need before we allow writes. On top of that which is required for deletia, require an extra 2% of the medium to be available, for overhead caused by nodes being split across blocks, etc. */ size = c->flash_size / 50; /* 2% of flash size */ size += c->nr_blocks * 100; /* And 100 bytes per eraseblock */ size += c->sector_size - 1; /* ... and round up */ c->resv_blocks_write = c->resv_blocks_deletion + (size / c->sector_size); /* When do we let the GC thread run in the background */ c->resv_blocks_gctrigger = c->resv_blocks_write + 1; /* When do we allow garbage collection to merge nodes to make long-term progress at the expense of short-term space exhaustion? */ c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1; /* When do we allow garbage collection to eat from bad blocks rather than actually making progress? */ c->resv_blocks_gcbad = 0;//c->resv_blocks_deletion + 2; /* What number of 'very dirty' eraseblocks do we allow before we trigger the GC thread even if we don't _need_ the space. When we can't mark nodes obsolete on the medium, the old dirty nodes cause performance problems because we have to inspect and discard them. */ c->vdirty_blocks_gctrigger = c->resv_blocks_gctrigger; if (jffs2_can_mark_obsolete(c)) c->vdirty_blocks_gctrigger *= 10; /* If there's less than this amount of dirty space, don't bother trying to GC to make more space. It'll be a fruitless task */ c->nospc_dirty_size = c->sector_size + (c->flash_size / 100); dbg_fsbuild("trigger levels (size %d KiB, block size %d KiB, %d blocks)\n", c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks); dbg_fsbuild("Blocks required to allow deletion: %d (%d KiB)\n", c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024); dbg_fsbuild("Blocks required to allow writes: %d (%d KiB)\n", c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024); dbg_fsbuild("Blocks required to quiesce GC thread: %d (%d KiB)\n", c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024); dbg_fsbuild("Blocks required to allow GC merges: %d (%d KiB)\n", c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024); dbg_fsbuild("Blocks required to GC bad blocks: %d (%d KiB)\n", c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024); dbg_fsbuild("Amount of dirty space required to GC: %d bytes\n", c->nospc_dirty_size); dbg_fsbuild("Very dirty blocks before GC triggered: %d\n", c->vdirty_blocks_gctrigger); } int jffs2_do_mount_fs(struct jffs2_sb_info *c) { int ret; int i; int size; c->free_size = c->flash_size; c->nr_blocks = c->flash_size / c->sector_size; size = sizeof(struct jffs2_eraseblock) * c->nr_blocks; #ifndef __ECOS if (jffs2_blocks_use_vmalloc(c)) c->blocks = vzalloc(size); else #endif c->blocks = kzalloc(size, GFP_KERNEL); if (!c->blocks) return -ENOMEM; for (i=0; i<c->nr_blocks; i++) { INIT_LIST_HEAD(&c->blocks[i].list); c->blocks[i].offset = i * c->sector_size; c->blocks[i].free_size = c->sector_size; } INIT_LIST_HEAD(&c->clean_list); INIT_LIST_HEAD(&c->very_dirty_list); INIT_LIST_HEAD(&c->dirty_list); INIT_LIST_HEAD(&c->erasable_list); INIT_LIST_HEAD(&c->erasing_list); INIT_LIST_HEAD(&c->erase_checking_list); INIT_LIST_HEAD(&c->erase_pending_list); INIT_LIST_HEAD(&c->erasable_pending_wbuf_list); INIT_LIST_HEAD(&c->erase_complete_list); INIT_LIST_HEAD(&c->free_list); INIT_LIST_HEAD(&c->bad_list); INIT_LIST_HEAD(&c->bad_used_list); c->highest_ino = 1; c->summary = NULL; ret = jffs2_sum_init(c); if (ret) goto out_free; if (jffs2_build_filesystem(c)) { dbg_fsbuild("build_fs failed\n"); jffs2_free_ino_caches(c); jffs2_free_raw_node_refs(c); ret = -EIO; goto out_sum_exit; } jffs2_calc_trigger_levels(c); return 0; out_sum_exit: jffs2_sum_exit(c); out_free: kvfree(c->blocks); return ret; }
linux-master
fs/jffs2/build.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * Copyright © 2004-2010 David Woodhouse <[email protected]> * * Created by Arjan van de Ven <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/string.h> #include <linux/types.h> #include <linux/jffs2.h> #include <linux/errno.h> #include "compr.h" #define RUBIN_REG_SIZE 16 #define UPPER_BIT_RUBIN (((long) 1)<<(RUBIN_REG_SIZE-1)) #define LOWER_BITS_RUBIN ((((long) 1)<<(RUBIN_REG_SIZE-1))-1) #define BIT_DIVIDER_MIPS 1043 static int bits_mips[8] = { 277, 249, 290, 267, 229, 341, 212, 241}; struct pushpull { unsigned char *buf; unsigned int buflen; unsigned int ofs; unsigned int reserve; }; struct rubin_state { unsigned long p; unsigned long q; unsigned long rec_q; long bit_number; struct pushpull pp; int bit_divider; int bits[8]; }; static inline void init_pushpull(struct pushpull *pp, char *buf, unsigned buflen, unsigned ofs, unsigned reserve) { pp->buf = buf; pp->buflen = buflen; pp->ofs = ofs; pp->reserve = reserve; } static inline int pushbit(struct pushpull *pp, int bit, int use_reserved) { if (pp->ofs >= pp->buflen - (use_reserved?0:pp->reserve)) return -ENOSPC; if (bit) pp->buf[pp->ofs >> 3] |= (1<<(7-(pp->ofs & 7))); else pp->buf[pp->ofs >> 3] &= ~(1<<(7-(pp->ofs & 7))); pp->ofs++; return 0; } static inline int pushedbits(struct pushpull *pp) { return pp->ofs; } static inline int pullbit(struct pushpull *pp) { int bit; bit = (pp->buf[pp->ofs >> 3] >> (7-(pp->ofs & 7))) & 1; pp->ofs++; return bit; } static void init_rubin(struct rubin_state *rs, int div, int *bits) { int c; rs->q = 0; rs->p = (long) (2 * UPPER_BIT_RUBIN); rs->bit_number = (long) 0; rs->bit_divider = div; for (c=0; c<8; c++) rs->bits[c] = bits[c]; } static int encode(struct rubin_state *rs, long A, long B, int symbol) { long i0, i1; int ret; while ((rs->q >= UPPER_BIT_RUBIN) || ((rs->p + rs->q) <= UPPER_BIT_RUBIN)) { rs->bit_number++; ret = pushbit(&rs->pp, (rs->q & UPPER_BIT_RUBIN) ? 1 : 0, 0); if (ret) return ret; rs->q &= LOWER_BITS_RUBIN; rs->q <<= 1; rs->p <<= 1; } i0 = A * rs->p / (A + B); if (i0 <= 0) i0 = 1; if (i0 >= rs->p) i0 = rs->p - 1; i1 = rs->p - i0; if (symbol == 0) rs->p = i0; else { rs->p = i1; rs->q += i0; } return 0; } static void end_rubin(struct rubin_state *rs) { int i; for (i = 0; i < RUBIN_REG_SIZE; i++) { pushbit(&rs->pp, (UPPER_BIT_RUBIN & rs->q) ? 1 : 0, 1); rs->q &= LOWER_BITS_RUBIN; rs->q <<= 1; } } static void init_decode(struct rubin_state *rs, int div, int *bits) { init_rubin(rs, div, bits); /* behalve lower */ rs->rec_q = 0; for (rs->bit_number = 0; rs->bit_number++ < RUBIN_REG_SIZE; rs->rec_q = rs->rec_q * 2 + (long) (pullbit(&rs->pp))) ; } static void __do_decode(struct rubin_state *rs, unsigned long p, unsigned long q) { register unsigned long lower_bits_rubin = LOWER_BITS_RUBIN; unsigned long rec_q; int c, bits = 0; /* * First, work out how many bits we need from the input stream. * Note that we have already done the initial check on this * loop prior to calling this function. */ do { bits++; q &= lower_bits_rubin; q <<= 1; p <<= 1; } while ((q >= UPPER_BIT_RUBIN) || ((p + q) <= UPPER_BIT_RUBIN)); rs->p = p; rs->q = q; rs->bit_number += bits; /* * Now get the bits. We really want this to be "get n bits". */ rec_q = rs->rec_q; do { c = pullbit(&rs->pp); rec_q &= lower_bits_rubin; rec_q <<= 1; rec_q += c; } while (--bits); rs->rec_q = rec_q; } static int decode(struct rubin_state *rs, long A, long B) { unsigned long p = rs->p, q = rs->q; long i0, threshold; int symbol; if (q >= UPPER_BIT_RUBIN || ((p + q) <= UPPER_BIT_RUBIN)) __do_decode(rs, p, q); i0 = A * rs->p / (A + B); if (i0 <= 0) i0 = 1; if (i0 >= rs->p) i0 = rs->p - 1; threshold = rs->q + i0; symbol = rs->rec_q >= threshold; if (rs->rec_q >= threshold) { rs->q += i0; i0 = rs->p - i0; } rs->p = i0; return symbol; } static int out_byte(struct rubin_state *rs, unsigned char byte) { int i, ret; struct rubin_state rs_copy; rs_copy = *rs; for (i=0; i<8; i++) { ret = encode(rs, rs->bit_divider-rs->bits[i], rs->bits[i], byte & 1); if (ret) { /* Failed. Restore old state */ *rs = rs_copy; return ret; } byte >>= 1 ; } return 0; } static int in_byte(struct rubin_state *rs) { int i, result = 0, bit_divider = rs->bit_divider; for (i = 0; i < 8; i++) result |= decode(rs, bit_divider - rs->bits[i], rs->bits[i]) << i; return result; } static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in, unsigned char *cpage_out, uint32_t *sourcelen, uint32_t *dstlen) { int outpos = 0; int pos=0; struct rubin_state rs; init_pushpull(&rs.pp, cpage_out, *dstlen * 8, 0, 32); init_rubin(&rs, bit_divider, bits); while (pos < (*sourcelen) && !out_byte(&rs, data_in[pos])) pos++; end_rubin(&rs); if (outpos > pos) { /* We failed */ return -1; } /* Tell the caller how much we managed to compress, * and how much space it took */ outpos = (pushedbits(&rs.pp)+7)/8; if (outpos >= pos) return -1; /* We didn't actually compress */ *sourcelen = pos; *dstlen = outpos; return 0; } #if 0 /* _compress returns the compressed size, -1 if bigger */ int jffs2_rubinmips_compress(unsigned char *data_in, unsigned char *cpage_out, uint32_t *sourcelen, uint32_t *dstlen) { return rubin_do_compress(BIT_DIVIDER_MIPS, bits_mips, data_in, cpage_out, sourcelen, dstlen); } #endif static int jffs2_dynrubin_compress(unsigned char *data_in, unsigned char *cpage_out, uint32_t *sourcelen, uint32_t *dstlen) { int bits[8]; unsigned char histo[256]; int i; int ret; uint32_t mysrclen, mydstlen; mysrclen = *sourcelen; mydstlen = *dstlen - 8; if (*dstlen <= 12) return -1; memset(histo, 0, 256); for (i=0; i<mysrclen; i++) histo[data_in[i]]++; memset(bits, 0, sizeof(int)*8); for (i=0; i<256; i++) { if (i&128) bits[7] += histo[i]; if (i&64) bits[6] += histo[i]; if (i&32) bits[5] += histo[i]; if (i&16) bits[4] += histo[i]; if (i&8) bits[3] += histo[i]; if (i&4) bits[2] += histo[i]; if (i&2) bits[1] += histo[i]; if (i&1) bits[0] += histo[i]; } for (i=0; i<8; i++) { bits[i] = (bits[i] * 256) / mysrclen; if (!bits[i]) bits[i] = 1; if (bits[i] > 255) bits[i] = 255; cpage_out[i] = bits[i]; } ret = rubin_do_compress(256, bits, data_in, cpage_out+8, &mysrclen, &mydstlen); if (ret) return ret; /* Add back the 8 bytes we took for the probabilities */ mydstlen += 8; if (mysrclen <= mydstlen) { /* We compressed */ return -1; } *sourcelen = mysrclen; *dstlen = mydstlen; return 0; } static void rubin_do_decompress(int bit_divider, int *bits, unsigned char *cdata_in, unsigned char *page_out, uint32_t srclen, uint32_t destlen) { int outpos = 0; struct rubin_state rs; init_pushpull(&rs.pp, cdata_in, srclen, 0, 0); init_decode(&rs, bit_divider, bits); while (outpos < destlen) page_out[outpos++] = in_byte(&rs); } static int jffs2_rubinmips_decompress(unsigned char *data_in, unsigned char *cpage_out, uint32_t sourcelen, uint32_t dstlen) { rubin_do_decompress(BIT_DIVIDER_MIPS, bits_mips, data_in, cpage_out, sourcelen, dstlen); return 0; } static int jffs2_dynrubin_decompress(unsigned char *data_in, unsigned char *cpage_out, uint32_t sourcelen, uint32_t dstlen) { int bits[8]; int c; for (c=0; c<8; c++) bits[c] = data_in[c]; rubin_do_decompress(256, bits, data_in+8, cpage_out, sourcelen-8, dstlen); return 0; } static struct jffs2_compressor jffs2_rubinmips_comp = { .priority = JFFS2_RUBINMIPS_PRIORITY, .name = "rubinmips", .compr = JFFS2_COMPR_DYNRUBIN, .compress = NULL, /*&jffs2_rubinmips_compress,*/ .decompress = &jffs2_rubinmips_decompress, #ifdef JFFS2_RUBINMIPS_DISABLED .disabled = 1, #else .disabled = 0, #endif }; int jffs2_rubinmips_init(void) { return jffs2_register_compressor(&jffs2_rubinmips_comp); } void jffs2_rubinmips_exit(void) { jffs2_unregister_compressor(&jffs2_rubinmips_comp); } static struct jffs2_compressor jffs2_dynrubin_comp = { .priority = JFFS2_DYNRUBIN_PRIORITY, .name = "dynrubin", .compr = JFFS2_COMPR_RUBINMIPS, .compress = jffs2_dynrubin_compress, .decompress = &jffs2_dynrubin_decompress, #ifdef JFFS2_DYNRUBIN_DISABLED .disabled = 1, #else .disabled = 0, #endif }; int jffs2_dynrubin_init(void) { return jffs2_register_compressor(&jffs2_dynrubin_comp); } void jffs2_dynrubin_exit(void) { jffs2_unregister_compressor(&jffs2_dynrubin_comp); }
linux-master
fs/jffs2/compr_rubin.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * Copyright © 2004-2010 David Woodhouse <[email protected]> * * Created by David Woodhouse <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/jffs2.h> #include <linux/mtd/mtd.h> #include <linux/completion.h> #include <linux/sched/signal.h> #include <linux/freezer.h> #include <linux/kthread.h> #include "nodelist.h" static int jffs2_garbage_collect_thread(void *); void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c) { assert_spin_locked(&c->erase_completion_lock); if (c->gc_task && jffs2_thread_should_wake(c)) send_sig(SIGHUP, c->gc_task, 1); } /* This must only ever be called when no GC thread is currently running */ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c) { struct task_struct *tsk; int ret = 0; BUG_ON(c->gc_task); init_completion(&c->gc_thread_start); init_completion(&c->gc_thread_exit); tsk = kthread_run(jffs2_garbage_collect_thread, c, "jffs2_gcd_mtd%d", c->mtd->index); if (IS_ERR(tsk)) { pr_warn("fork failed for JFFS2 garbage collect thread: %ld\n", -PTR_ERR(tsk)); complete(&c->gc_thread_exit); ret = PTR_ERR(tsk); } else { /* Wait for it... */ jffs2_dbg(1, "Garbage collect thread is pid %d\n", tsk->pid); wait_for_completion(&c->gc_thread_start); ret = tsk->pid; } return ret; } void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c) { int wait = 0; spin_lock(&c->erase_completion_lock); if (c->gc_task) { jffs2_dbg(1, "Killing GC task %d\n", c->gc_task->pid); send_sig(SIGKILL, c->gc_task, 1); wait = 1; } spin_unlock(&c->erase_completion_lock); if (wait) wait_for_completion(&c->gc_thread_exit); } static int jffs2_garbage_collect_thread(void *_c) { struct jffs2_sb_info *c = _c; sigset_t hupmask; siginitset(&hupmask, sigmask(SIGHUP)); allow_signal(SIGKILL); allow_signal(SIGSTOP); allow_signal(SIGHUP); c->gc_task = current; complete(&c->gc_thread_start); set_user_nice(current, 10); set_freezable(); for (;;) { sigprocmask(SIG_UNBLOCK, &hupmask, NULL); again: spin_lock(&c->erase_completion_lock); if (!jffs2_thread_should_wake(c)) { set_current_state (TASK_INTERRUPTIBLE); spin_unlock(&c->erase_completion_lock); jffs2_dbg(1, "%s(): sleeping...\n", __func__); schedule(); } else { spin_unlock(&c->erase_completion_lock); } /* Problem - immediately after bootup, the GCD spends a lot * of time in places like jffs2_kill_fragtree(); so much so * that userspace processes (like gdm and X) are starved * despite plenty of cond_resched()s and renicing. Yield() * doesn't help, either (presumably because userspace and GCD * are generally competing for a higher latency resource - * disk). * This forces the GCD to slow the hell down. Pulling an * inode in with read_inode() is much preferable to having * the GC thread get there first. */ schedule_timeout_interruptible(msecs_to_jiffies(50)); if (kthread_should_stop()) { jffs2_dbg(1, "%s(): kthread_stop() called\n", __func__); goto die; } /* Put_super will send a SIGKILL and then wait on the sem. */ while (signal_pending(current) || freezing(current)) { unsigned long signr; if (try_to_freeze()) goto again; signr = kernel_dequeue_signal(); switch(signr) { case SIGSTOP: jffs2_dbg(1, "%s(): SIGSTOP received\n", __func__); kernel_signal_stop(); break; case SIGKILL: jffs2_dbg(1, "%s(): SIGKILL received\n", __func__); goto die; case SIGHUP: jffs2_dbg(1, "%s(): SIGHUP received\n", __func__); break; default: jffs2_dbg(1, "%s(): signal %ld received\n", __func__, signr); } } /* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */ sigprocmask(SIG_BLOCK, &hupmask, NULL); jffs2_dbg(1, "%s(): pass\n", __func__); if (jffs2_garbage_collect_pass(c) == -ENOSPC) { pr_notice("No space for garbage collection. Aborting GC thread\n"); goto die; } } die: spin_lock(&c->erase_completion_lock); c->gc_task = NULL; spin_unlock(&c->erase_completion_lock); kthread_complete_and_exit(&c->gc_thread_exit, 0); }
linux-master
fs/jffs2/background.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * * Created by David Woodhouse <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/jffs2.h> #include "nodelist.h" /* These are initialised to NULL in the kernel startup code. If you're porting to other operating systems, beware */ static struct kmem_cache *full_dnode_slab; static struct kmem_cache *raw_dirent_slab; static struct kmem_cache *raw_inode_slab; static struct kmem_cache *tmp_dnode_info_slab; static struct kmem_cache *raw_node_ref_slab; static struct kmem_cache *node_frag_slab; static struct kmem_cache *inode_cache_slab; #ifdef CONFIG_JFFS2_FS_XATTR static struct kmem_cache *xattr_datum_cache; static struct kmem_cache *xattr_ref_cache; #endif int __init jffs2_create_slab_caches(void) { full_dnode_slab = kmem_cache_create("jffs2_full_dnode", sizeof(struct jffs2_full_dnode), 0, 0, NULL); if (!full_dnode_slab) goto err; raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent", sizeof(struct jffs2_raw_dirent), 0, SLAB_HWCACHE_ALIGN, NULL); if (!raw_dirent_slab) goto err; raw_inode_slab = kmem_cache_create("jffs2_raw_inode", sizeof(struct jffs2_raw_inode), 0, SLAB_HWCACHE_ALIGN, NULL); if (!raw_inode_slab) goto err; tmp_dnode_info_slab = kmem_cache_create("jffs2_tmp_dnode", sizeof(struct jffs2_tmp_dnode_info), 0, 0, NULL); if (!tmp_dnode_info_slab) goto err; raw_node_ref_slab = kmem_cache_create("jffs2_refblock", sizeof(struct jffs2_raw_node_ref) * (REFS_PER_BLOCK + 1), 0, 0, NULL); if (!raw_node_ref_slab) goto err; node_frag_slab = kmem_cache_create("jffs2_node_frag", sizeof(struct jffs2_node_frag), 0, 0, NULL); if (!node_frag_slab) goto err; inode_cache_slab = kmem_cache_create("jffs2_inode_cache", sizeof(struct jffs2_inode_cache), 0, 0, NULL); if (!inode_cache_slab) goto err; #ifdef CONFIG_JFFS2_FS_XATTR xattr_datum_cache = kmem_cache_create("jffs2_xattr_datum", sizeof(struct jffs2_xattr_datum), 0, 0, NULL); if (!xattr_datum_cache) goto err; xattr_ref_cache = kmem_cache_create("jffs2_xattr_ref", sizeof(struct jffs2_xattr_ref), 0, 0, NULL); if (!xattr_ref_cache) goto err; #endif return 0; err: jffs2_destroy_slab_caches(); return -ENOMEM; } void jffs2_destroy_slab_caches(void) { kmem_cache_destroy(full_dnode_slab); kmem_cache_destroy(raw_dirent_slab); kmem_cache_destroy(raw_inode_slab); kmem_cache_destroy(tmp_dnode_info_slab); kmem_cache_destroy(raw_node_ref_slab); kmem_cache_destroy(node_frag_slab); kmem_cache_destroy(inode_cache_slab); #ifdef CONFIG_JFFS2_FS_XATTR kmem_cache_destroy(xattr_datum_cache); kmem_cache_destroy(xattr_ref_cache); #endif } struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize) { struct jffs2_full_dirent *ret; ret = kmalloc(sizeof(struct jffs2_full_dirent) + namesize, GFP_KERNEL); dbg_memalloc("%p\n", ret); return ret; } void jffs2_free_full_dirent(struct jffs2_full_dirent *x) { dbg_memalloc("%p\n", x); kfree(x); } struct jffs2_full_dnode *jffs2_alloc_full_dnode(void) { struct jffs2_full_dnode *ret; ret = kmem_cache_alloc(full_dnode_slab, GFP_KERNEL); dbg_memalloc("%p\n", ret); return ret; } void jffs2_free_full_dnode(struct jffs2_full_dnode *x) { dbg_memalloc("%p\n", x); kmem_cache_free(full_dnode_slab, x); } struct jffs2_raw_dirent *jffs2_alloc_raw_dirent(void) { struct jffs2_raw_dirent *ret; ret = kmem_cache_alloc(raw_dirent_slab, GFP_KERNEL); dbg_memalloc("%p\n", ret); return ret; } void jffs2_free_raw_dirent(struct jffs2_raw_dirent *x) { dbg_memalloc("%p\n", x); kmem_cache_free(raw_dirent_slab, x); } struct jffs2_raw_inode *jffs2_alloc_raw_inode(void) { struct jffs2_raw_inode *ret; ret = kmem_cache_alloc(raw_inode_slab, GFP_KERNEL); dbg_memalloc("%p\n", ret); return ret; } void jffs2_free_raw_inode(struct jffs2_raw_inode *x) { dbg_memalloc("%p\n", x); kmem_cache_free(raw_inode_slab, x); } struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void) { struct jffs2_tmp_dnode_info *ret; ret = kmem_cache_alloc(tmp_dnode_info_slab, GFP_KERNEL); dbg_memalloc("%p\n", ret); return ret; } void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *x) { dbg_memalloc("%p\n", x); kmem_cache_free(tmp_dnode_info_slab, x); } static struct jffs2_raw_node_ref *jffs2_alloc_refblock(void) { struct jffs2_raw_node_ref *ret; ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL); if (ret) { int i = 0; for (i=0; i < REFS_PER_BLOCK; i++) { ret[i].flash_offset = REF_EMPTY_NODE; ret[i].next_in_ino = NULL; } ret[i].flash_offset = REF_LINK_NODE; ret[i].next_in_ino = NULL; } return ret; } int jffs2_prealloc_raw_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int nr) { struct jffs2_raw_node_ref **p, *ref; int i = nr; dbg_memalloc("%d\n", nr); p = &jeb->last_node; ref = *p; dbg_memalloc("Reserving %d refs for block @0x%08x\n", nr, jeb->offset); /* If jeb->last_node is really a valid node then skip over it */ if (ref && ref->flash_offset != REF_EMPTY_NODE) ref++; while (i) { if (!ref) { dbg_memalloc("Allocating new refblock linked from %p\n", p); ref = *p = jffs2_alloc_refblock(); if (!ref) return -ENOMEM; } if (ref->flash_offset == REF_LINK_NODE) { p = &ref->next_in_ino; ref = *p; continue; } i--; ref++; } jeb->allocated_refs = nr; dbg_memalloc("Reserved %d refs for block @0x%08x, last_node is %p (%08x,%p)\n", nr, jeb->offset, jeb->last_node, jeb->last_node->flash_offset, jeb->last_node->next_in_ino); return 0; } void jffs2_free_refblock(struct jffs2_raw_node_ref *x) { dbg_memalloc("%p\n", x); kmem_cache_free(raw_node_ref_slab, x); } struct jffs2_node_frag *jffs2_alloc_node_frag(void) { struct jffs2_node_frag *ret; ret = kmem_cache_alloc(node_frag_slab, GFP_KERNEL); dbg_memalloc("%p\n", ret); return ret; } void jffs2_free_node_frag(struct jffs2_node_frag *x) { dbg_memalloc("%p\n", x); kmem_cache_free(node_frag_slab, x); } struct jffs2_inode_cache *jffs2_alloc_inode_cache(void) { struct jffs2_inode_cache *ret; ret = kmem_cache_alloc(inode_cache_slab, GFP_KERNEL); dbg_memalloc("%p\n", ret); return ret; } void jffs2_free_inode_cache(struct jffs2_inode_cache *x) { dbg_memalloc("%p\n", x); kmem_cache_free(inode_cache_slab, x); } #ifdef CONFIG_JFFS2_FS_XATTR struct jffs2_xattr_datum *jffs2_alloc_xattr_datum(void) { struct jffs2_xattr_datum *xd; xd = kmem_cache_zalloc(xattr_datum_cache, GFP_KERNEL); dbg_memalloc("%p\n", xd); if (!xd) return NULL; xd->class = RAWNODE_CLASS_XATTR_DATUM; xd->node = (void *)xd; INIT_LIST_HEAD(&xd->xindex); return xd; } void jffs2_free_xattr_datum(struct jffs2_xattr_datum *xd) { dbg_memalloc("%p\n", xd); kmem_cache_free(xattr_datum_cache, xd); } struct jffs2_xattr_ref *jffs2_alloc_xattr_ref(void) { struct jffs2_xattr_ref *ref; ref = kmem_cache_zalloc(xattr_ref_cache, GFP_KERNEL); dbg_memalloc("%p\n", ref); if (!ref) return NULL; ref->class = RAWNODE_CLASS_XATTR_REF; ref->node = (void *)ref; return ref; } void jffs2_free_xattr_ref(struct jffs2_xattr_ref *ref) { dbg_memalloc("%p\n", ref); kmem_cache_free(xattr_ref_cache, ref); } #endif
linux-master
fs/jffs2/malloc.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2006 NEC Corporation * * Created by KaiGai Kohei <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/crc32.h> #include <linux/jffs2.h> #include <linux/xattr.h> #include <linux/posix_acl_xattr.h> #include <linux/mtd/mtd.h> #include "nodelist.h" static size_t jffs2_acl_size(int count) { if (count <= 4) { return sizeof(struct jffs2_acl_header) + count * sizeof(struct jffs2_acl_entry_short); } else { return sizeof(struct jffs2_acl_header) + 4 * sizeof(struct jffs2_acl_entry_short) + (count - 4) * sizeof(struct jffs2_acl_entry); } } static int jffs2_acl_count(size_t size) { size_t s; size -= sizeof(struct jffs2_acl_header); if (size < 4 * sizeof(struct jffs2_acl_entry_short)) { if (size % sizeof(struct jffs2_acl_entry_short)) return -1; return size / sizeof(struct jffs2_acl_entry_short); } else { s = size - 4 * sizeof(struct jffs2_acl_entry_short); if (s % sizeof(struct jffs2_acl_entry)) return -1; return s / sizeof(struct jffs2_acl_entry) + 4; } } static struct posix_acl *jffs2_acl_from_medium(void *value, size_t size) { void *end = value + size; struct jffs2_acl_header *header = value; struct jffs2_acl_entry *entry; struct posix_acl *acl; uint32_t ver; int i, count; if (!value) return NULL; if (size < sizeof(struct jffs2_acl_header)) return ERR_PTR(-EINVAL); ver = je32_to_cpu(header->a_version); if (ver != JFFS2_ACL_VERSION) { JFFS2_WARNING("Invalid ACL version. (=%u)\n", ver); return ERR_PTR(-EINVAL); } value += sizeof(struct jffs2_acl_header); count = jffs2_acl_count(size); if (count < 0) return ERR_PTR(-EINVAL); if (count == 0) return NULL; acl = posix_acl_alloc(count, GFP_KERNEL); if (!acl) return ERR_PTR(-ENOMEM); for (i=0; i < count; i++) { entry = value; if (value + sizeof(struct jffs2_acl_entry_short) > end) goto fail; acl->a_entries[i].e_tag = je16_to_cpu(entry->e_tag); acl->a_entries[i].e_perm = je16_to_cpu(entry->e_perm); switch (acl->a_entries[i].e_tag) { case ACL_USER_OBJ: case ACL_GROUP_OBJ: case ACL_MASK: case ACL_OTHER: value += sizeof(struct jffs2_acl_entry_short); break; case ACL_USER: value += sizeof(struct jffs2_acl_entry); if (value > end) goto fail; acl->a_entries[i].e_uid = make_kuid(&init_user_ns, je32_to_cpu(entry->e_id)); break; case ACL_GROUP: value += sizeof(struct jffs2_acl_entry); if (value > end) goto fail; acl->a_entries[i].e_gid = make_kgid(&init_user_ns, je32_to_cpu(entry->e_id)); break; default: goto fail; } } if (value != end) goto fail; return acl; fail: posix_acl_release(acl); return ERR_PTR(-EINVAL); } static void *jffs2_acl_to_medium(const struct posix_acl *acl, size_t *size) { struct jffs2_acl_header *header; struct jffs2_acl_entry *entry; void *e; size_t i; *size = jffs2_acl_size(acl->a_count); header = kmalloc(struct_size(header, a_entries, acl->a_count), GFP_KERNEL); if (!header) return ERR_PTR(-ENOMEM); header->a_version = cpu_to_je32(JFFS2_ACL_VERSION); e = header + 1; for (i=0; i < acl->a_count; i++) { const struct posix_acl_entry *acl_e = &acl->a_entries[i]; entry = e; entry->e_tag = cpu_to_je16(acl_e->e_tag); entry->e_perm = cpu_to_je16(acl_e->e_perm); switch(acl_e->e_tag) { case ACL_USER: entry->e_id = cpu_to_je32( from_kuid(&init_user_ns, acl_e->e_uid)); e += sizeof(struct jffs2_acl_entry); break; case ACL_GROUP: entry->e_id = cpu_to_je32( from_kgid(&init_user_ns, acl_e->e_gid)); e += sizeof(struct jffs2_acl_entry); break; case ACL_USER_OBJ: case ACL_GROUP_OBJ: case ACL_MASK: case ACL_OTHER: e += sizeof(struct jffs2_acl_entry_short); break; default: goto fail; } } return header; fail: kfree(header); return ERR_PTR(-EINVAL); } struct posix_acl *jffs2_get_acl(struct inode *inode, int type, bool rcu) { struct posix_acl *acl; char *value = NULL; int rc, xprefix; if (rcu) return ERR_PTR(-ECHILD); switch (type) { case ACL_TYPE_ACCESS: xprefix = JFFS2_XPREFIX_ACL_ACCESS; break; case ACL_TYPE_DEFAULT: xprefix = JFFS2_XPREFIX_ACL_DEFAULT; break; default: BUG(); } rc = do_jffs2_getxattr(inode, xprefix, "", NULL, 0); if (rc > 0) { value = kmalloc(rc, GFP_KERNEL); if (!value) return ERR_PTR(-ENOMEM); rc = do_jffs2_getxattr(inode, xprefix, "", value, rc); } if (rc > 0) { acl = jffs2_acl_from_medium(value, rc); } else if (rc == -ENODATA || rc == -ENOSYS) { acl = NULL; } else { acl = ERR_PTR(rc); } kfree(value); return acl; } static int __jffs2_set_acl(struct inode *inode, int xprefix, struct posix_acl *acl) { char *value = NULL; size_t size = 0; int rc; if (acl) { value = jffs2_acl_to_medium(acl, &size); if (IS_ERR(value)) return PTR_ERR(value); } rc = do_jffs2_setxattr(inode, xprefix, "", value, size, 0); if (!value && rc == -ENODATA) rc = 0; kfree(value); return rc; } int jffs2_set_acl(struct mnt_idmap *idmap, struct dentry *dentry, struct posix_acl *acl, int type) { int rc, xprefix; struct inode *inode = d_inode(dentry); switch (type) { case ACL_TYPE_ACCESS: xprefix = JFFS2_XPREFIX_ACL_ACCESS; if (acl) { umode_t mode; rc = posix_acl_update_mode(&nop_mnt_idmap, inode, &mode, &acl); if (rc) return rc; if (inode->i_mode != mode) { struct iattr attr; attr.ia_valid = ATTR_MODE | ATTR_CTIME; attr.ia_mode = mode; attr.ia_ctime = current_time(inode); rc = jffs2_do_setattr(inode, &attr); if (rc < 0) return rc; } } break; case ACL_TYPE_DEFAULT: xprefix = JFFS2_XPREFIX_ACL_DEFAULT; if (!S_ISDIR(inode->i_mode)) return acl ? -EACCES : 0; break; default: return -EINVAL; } rc = __jffs2_set_acl(inode, xprefix, acl); if (!rc) set_cached_acl(inode, type, acl); return rc; } int jffs2_init_acl_pre(struct inode *dir_i, struct inode *inode, umode_t *i_mode) { struct posix_acl *default_acl, *acl; int rc; cache_no_acl(inode); rc = posix_acl_create(dir_i, i_mode, &default_acl, &acl); if (rc) return rc; if (default_acl) { set_cached_acl(inode, ACL_TYPE_DEFAULT, default_acl); posix_acl_release(default_acl); } if (acl) { set_cached_acl(inode, ACL_TYPE_ACCESS, acl); posix_acl_release(acl); } return 0; } int jffs2_init_acl_post(struct inode *inode) { int rc; if (inode->i_default_acl) { rc = __jffs2_set_acl(inode, JFFS2_XPREFIX_ACL_DEFAULT, inode->i_default_acl); if (rc) return rc; } if (inode->i_acl) { rc = __jffs2_set_acl(inode, JFFS2_XPREFIX_ACL_ACCESS, inode->i_acl); if (rc) return rc; } return 0; }
linux-master
fs/jffs2/acl.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * * Created by David Woodhouse <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #include "nodelist.h" const struct inode_operations jffs2_symlink_inode_operations = { .get_link = simple_get_link, .setattr = jffs2_setattr, .listxattr = jffs2_listxattr, };
linux-master
fs/jffs2/symlink.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * Copyright © 2004-2010 David Woodhouse <[email protected]> * * Created by David Woodhouse <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/fs.h> #include <linux/time.h> #include <linux/pagemap.h> #include <linux/highmem.h> #include <linux/crc32.h> #include <linux/jffs2.h> #include "nodelist.h" static int jffs2_write_end(struct file *filp, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *pg, void *fsdata); static int jffs2_write_begin(struct file *filp, struct address_space *mapping, loff_t pos, unsigned len, struct page **pagep, void **fsdata); static int jffs2_read_folio(struct file *filp, struct folio *folio); int jffs2_fsync(struct file *filp, loff_t start, loff_t end, int datasync) { struct inode *inode = filp->f_mapping->host; struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); int ret; ret = file_write_and_wait_range(filp, start, end); if (ret) return ret; inode_lock(inode); /* Trigger GC to flush any pending writes for this inode */ jffs2_flush_wbuf_gc(c, inode->i_ino); inode_unlock(inode); return 0; } const struct file_operations jffs2_file_operations = { .llseek = generic_file_llseek, .open = generic_file_open, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .unlocked_ioctl=jffs2_ioctl, .mmap = generic_file_readonly_mmap, .fsync = jffs2_fsync, .splice_read = filemap_splice_read, .splice_write = iter_file_splice_write, }; /* jffs2_file_inode_operations */ const struct inode_operations jffs2_file_inode_operations = { .get_inode_acl = jffs2_get_acl, .set_acl = jffs2_set_acl, .setattr = jffs2_setattr, .listxattr = jffs2_listxattr, }; const struct address_space_operations jffs2_file_address_operations = { .read_folio = jffs2_read_folio, .write_begin = jffs2_write_begin, .write_end = jffs2_write_end, }; static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg) { struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); unsigned char *pg_buf; int ret; jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n", __func__, inode->i_ino, pg->index << PAGE_SHIFT); BUG_ON(!PageLocked(pg)); pg_buf = kmap(pg); /* FIXME: Can kmap fail? */ ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_SHIFT, PAGE_SIZE); if (ret) { ClearPageUptodate(pg); SetPageError(pg); } else { SetPageUptodate(pg); ClearPageError(pg); } flush_dcache_page(pg); kunmap(pg); jffs2_dbg(2, "readpage finished\n"); return ret; } int __jffs2_read_folio(struct file *file, struct folio *folio) { int ret = jffs2_do_readpage_nolock(folio->mapping->host, &folio->page); folio_unlock(folio); return ret; } static int jffs2_read_folio(struct file *file, struct folio *folio) { struct jffs2_inode_info *f = JFFS2_INODE_INFO(folio->mapping->host); int ret; mutex_lock(&f->sem); ret = __jffs2_read_folio(file, folio); mutex_unlock(&f->sem); return ret; } static int jffs2_write_begin(struct file *filp, struct address_space *mapping, loff_t pos, unsigned len, struct page **pagep, void **fsdata) { struct page *pg; struct inode *inode = mapping->host; struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); pgoff_t index = pos >> PAGE_SHIFT; int ret = 0; jffs2_dbg(1, "%s()\n", __func__); if (pos > inode->i_size) { /* Make new hole frag from old EOF to new position */ struct jffs2_raw_inode ri; struct jffs2_full_dnode *fn; uint32_t alloc_len; jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new position\n", (unsigned int)inode->i_size, (uint32_t)pos); ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); if (ret) goto out_err; mutex_lock(&f->sem); memset(&ri, 0, sizeof(ri)); ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); ri.totlen = cpu_to_je32(sizeof(ri)); ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4)); ri.ino = cpu_to_je32(f->inocache->ino); ri.version = cpu_to_je32(++f->highest_version); ri.mode = cpu_to_jemode(inode->i_mode); ri.uid = cpu_to_je16(i_uid_read(inode)); ri.gid = cpu_to_je16(i_gid_read(inode)); ri.isize = cpu_to_je32((uint32_t)pos); ri.atime = ri.ctime = ri.mtime = cpu_to_je32(JFFS2_NOW()); ri.offset = cpu_to_je32(inode->i_size); ri.dsize = cpu_to_je32((uint32_t)pos - inode->i_size); ri.csize = cpu_to_je32(0); ri.compr = JFFS2_COMPR_ZERO; ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); ri.data_crc = cpu_to_je32(0); fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_NORMAL); if (IS_ERR(fn)) { ret = PTR_ERR(fn); jffs2_complete_reservation(c); mutex_unlock(&f->sem); goto out_err; } ret = jffs2_add_full_dnode_to_inode(c, f, fn); if (f->metadata) { jffs2_mark_node_obsolete(c, f->metadata->raw); jffs2_free_full_dnode(f->metadata); f->metadata = NULL; } if (ret) { jffs2_dbg(1, "Eep. add_full_dnode_to_inode() failed in write_begin, returned %d\n", ret); jffs2_mark_node_obsolete(c, fn->raw); jffs2_free_full_dnode(fn); jffs2_complete_reservation(c); mutex_unlock(&f->sem); goto out_err; } jffs2_complete_reservation(c); inode->i_size = pos; mutex_unlock(&f->sem); } /* * While getting a page and reading data in, lock c->alloc_sem until * the page is Uptodate. Otherwise GC task may attempt to read the same * page in read_cache_page(), which causes a deadlock. */ mutex_lock(&c->alloc_sem); pg = grab_cache_page_write_begin(mapping, index); if (!pg) { ret = -ENOMEM; goto release_sem; } *pagep = pg; /* * Read in the page if it wasn't already present. Cannot optimize away * the whole page write case until jffs2_write_end can handle the * case of a short-copy. */ if (!PageUptodate(pg)) { mutex_lock(&f->sem); ret = jffs2_do_readpage_nolock(inode, pg); mutex_unlock(&f->sem); if (ret) { unlock_page(pg); put_page(pg); goto release_sem; } } jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags); release_sem: mutex_unlock(&c->alloc_sem); out_err: return ret; } static int jffs2_write_end(struct file *filp, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *pg, void *fsdata) { /* Actually commit the write from the page cache page we're looking at. * For now, we write the full page out each time. It sucks, but it's simple */ struct inode *inode = mapping->host; struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); struct jffs2_raw_inode *ri; unsigned start = pos & (PAGE_SIZE - 1); unsigned end = start + copied; unsigned aligned_start = start & ~3; int ret = 0; uint32_t writtenlen = 0; jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", __func__, inode->i_ino, pg->index << PAGE_SHIFT, start, end, pg->flags); /* We need to avoid deadlock with page_cache_read() in jffs2_garbage_collect_pass(). So the page must be up to date to prevent page_cache_read() from trying to re-lock it. */ BUG_ON(!PageUptodate(pg)); if (end == PAGE_SIZE) { /* When writing out the end of a page, write out the _whole_ page. This helps to reduce the number of nodes in files which have many short writes, like syslog files. */ aligned_start = 0; } ri = jffs2_alloc_raw_inode(); if (!ri) { jffs2_dbg(1, "%s(): Allocation of raw inode failed\n", __func__); unlock_page(pg); put_page(pg); return -ENOMEM; } /* Set the fields that the generic jffs2_write_inode_range() code can't find */ ri->ino = cpu_to_je32(inode->i_ino); ri->mode = cpu_to_jemode(inode->i_mode); ri->uid = cpu_to_je16(i_uid_read(inode)); ri->gid = cpu_to_je16(i_gid_read(inode)); ri->isize = cpu_to_je32((uint32_t)inode->i_size); ri->atime = ri->ctime = ri->mtime = cpu_to_je32(JFFS2_NOW()); /* In 2.4, it was already kmapped by generic_file_write(). Doesn't hurt to do it again. The alternative is ifdefs, which are ugly. */ kmap(pg); ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start, (pg->index << PAGE_SHIFT) + aligned_start, end - aligned_start, &writtenlen); kunmap(pg); if (ret) { /* There was an error writing. */ SetPageError(pg); } /* Adjust writtenlen for the padding we did, so we don't confuse our caller */ writtenlen -= min(writtenlen, (start - aligned_start)); if (writtenlen) { if (inode->i_size < pos + writtenlen) { inode->i_size = pos + writtenlen; inode->i_blocks = (inode->i_size + 511) >> 9; inode->i_mtime = inode_set_ctime_to_ts(inode, ITIME(je32_to_cpu(ri->ctime))); } } jffs2_free_raw_inode(ri); if (start+writtenlen < end) { /* generic_file_write has written more to the page cache than we've actually written to the medium. Mark the page !Uptodate so that it gets reread */ jffs2_dbg(1, "%s(): Not all bytes written. Marking page !uptodate\n", __func__); SetPageError(pg); ClearPageUptodate(pg); } jffs2_dbg(1, "%s() returning %d\n", __func__, writtenlen > 0 ? writtenlen : ret); unlock_page(pg); put_page(pg); return writtenlen > 0 ? writtenlen : ret; }
linux-master
fs/jffs2/file.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * * Created by David Woodhouse <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/crc32.h> #include <linux/pagemap.h> #include <linux/mtd/mtd.h> #include <linux/compiler.h> #include "nodelist.h" /* * Check the data CRC of the node. * * Returns: 0 if the data CRC is correct; * 1 - if incorrect; * error code if an error occurred. */ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn) { struct jffs2_raw_node_ref *ref = tn->fn->raw; int err = 0, pointed = 0; struct jffs2_eraseblock *jeb; unsigned char *buffer; uint32_t crc, ofs, len; size_t retlen; BUG_ON(tn->csize == 0); /* Calculate how many bytes were already checked */ ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode); len = tn->csize; if (jffs2_is_writebuffered(c)) { int adj = ofs % c->wbuf_pagesize; if (likely(adj)) adj = c->wbuf_pagesize - adj; if (adj >= tn->csize) { dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n", ref_offset(ref), tn->csize, ofs); goto adj_acc; } ofs += adj; len -= adj; } dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n", ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len); #ifndef __ECOS /* TODO: instead, incapsulate point() stuff to jffs2_flash_read(), * adding and jffs2_flash_read_end() interface. */ err = mtd_point(c->mtd, ofs, len, &retlen, (void **)&buffer, NULL); if (!err && retlen < len) { JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize); mtd_unpoint(c->mtd, ofs, retlen); } else if (err) { if (err != -EOPNOTSUPP) JFFS2_WARNING("MTD point failed: error code %d.\n", err); } else pointed = 1; /* succefully pointed to device */ #endif if (!pointed) { buffer = kmalloc(len, GFP_KERNEL); if (unlikely(!buffer)) return -ENOMEM; /* TODO: this is very frequent pattern, make it a separate * routine */ err = jffs2_flash_read(c, ofs, len, &retlen, buffer); if (err) { JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ofs, err); goto free_out; } if (retlen != len) { JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n", ofs, retlen, len); err = -EIO; goto free_out; } } /* Continue calculating CRC */ crc = crc32(tn->partial_crc, buffer, len); if(!pointed) kfree(buffer); #ifndef __ECOS else mtd_unpoint(c->mtd, ofs, len); #endif if (crc != tn->data_crc) { JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n", ref_offset(ref), tn->data_crc, crc); return 1; } adj_acc: jeb = &c->blocks[ref->flash_offset / c->sector_size]; len = ref_totlen(c, jeb, ref); /* If it should be REF_NORMAL, it'll get marked as such when we build the fragtree, shortly. No need to worry about GC moving it while it's marked REF_PRISTINE -- GC won't happen till we've finished checking every inode anyway. */ ref->flash_offset |= REF_PRISTINE; /* * Mark the node as having been checked and fix the * accounting accordingly. */ spin_lock(&c->erase_completion_lock); jeb->used_size += len; jeb->unchecked_size -= len; c->used_size += len; c->unchecked_size -= len; jffs2_dbg_acct_paranoia_check_nolock(c, jeb); spin_unlock(&c->erase_completion_lock); return 0; free_out: if(!pointed) kfree(buffer); #ifndef __ECOS else mtd_unpoint(c->mtd, ofs, len); #endif return err; } /* * Helper function for jffs2_add_older_frag_to_fragtree(). * * Checks the node if we are in the checking stage. */ static int check_tn_node(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn) { int ret; BUG_ON(ref_obsolete(tn->fn->raw)); /* We only check the data CRC of unchecked nodes */ if (ref_flags(tn->fn->raw) != REF_UNCHECKED) return 0; dbg_readinode("check node %#04x-%#04x, phys offs %#08x\n", tn->fn->ofs, tn->fn->ofs + tn->fn->size, ref_offset(tn->fn->raw)); ret = check_node_data(c, tn); if (unlikely(ret < 0)) { JFFS2_ERROR("check_node_data() returned error: %d.\n", ret); } else if (unlikely(ret > 0)) { dbg_readinode("CRC error, mark it obsolete.\n"); jffs2_mark_node_obsolete(c, tn->fn->raw); } return ret; } static struct jffs2_tmp_dnode_info *jffs2_lookup_tn(struct rb_root *tn_root, uint32_t offset) { struct rb_node *next; struct jffs2_tmp_dnode_info *tn = NULL; dbg_readinode("root %p, offset %d\n", tn_root, offset); next = tn_root->rb_node; while (next) { tn = rb_entry(next, struct jffs2_tmp_dnode_info, rb); if (tn->fn->ofs < offset) next = tn->rb.rb_right; else if (tn->fn->ofs >= offset) next = tn->rb.rb_left; else break; } return tn; } static void jffs2_kill_tn(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn) { jffs2_mark_node_obsolete(c, tn->fn->raw); jffs2_free_full_dnode(tn->fn); jffs2_free_tmp_dnode_info(tn); } /* * This function is used when we read an inode. Data nodes arrive in * arbitrary order -- they may be older or newer than the nodes which * are already in the tree. Where overlaps occur, the older node can * be discarded as long as the newer passes the CRC check. We don't * bother to keep track of holes in this rbtree, and neither do we deal * with frags -- we can have multiple entries starting at the same * offset, and the one with the smallest length will come first in the * ordering. * * Returns 0 if the node was handled (including marking it obsolete) * < 0 an if error occurred */ static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c, struct jffs2_readinode_info *rii, struct jffs2_tmp_dnode_info *tn) { uint32_t fn_end = tn->fn->ofs + tn->fn->size; struct jffs2_tmp_dnode_info *this, *ptn; dbg_readinode("insert fragment %#04x-%#04x, ver %u at %08x\n", tn->fn->ofs, fn_end, tn->version, ref_offset(tn->fn->raw)); /* If a node has zero dsize, we only have to keep it if it might be the node with highest version -- i.e. the one which will end up as f->metadata. Note that such nodes won't be REF_UNCHECKED since there are no data to check anyway. */ if (!tn->fn->size) { if (rii->mdata_tn) { if (rii->mdata_tn->version < tn->version) { /* We had a candidate mdata node already */ dbg_readinode("kill old mdata with ver %d\n", rii->mdata_tn->version); jffs2_kill_tn(c, rii->mdata_tn); } else { dbg_readinode("kill new mdata with ver %d (older than existing %d\n", tn->version, rii->mdata_tn->version); jffs2_kill_tn(c, tn); return 0; } } rii->mdata_tn = tn; dbg_readinode("keep new mdata with ver %d\n", tn->version); return 0; } /* Find the earliest node which _may_ be relevant to this one */ this = jffs2_lookup_tn(&rii->tn_root, tn->fn->ofs); if (this) { /* If the node is coincident with another at a lower address, back up until the other node is found. It may be relevant */ while (this->overlapped) { ptn = tn_prev(this); if (!ptn) { /* * We killed a node which set the overlapped * flags during the scan. Fix it up. */ this->overlapped = 0; break; } this = ptn; } dbg_readinode("'this' found %#04x-%#04x (%s)\n", this->fn->ofs, this->fn->ofs + this->fn->size, this->fn ? "data" : "hole"); } while (this) { if (this->fn->ofs > fn_end) break; dbg_readinode("Ponder this ver %d, 0x%x-0x%x\n", this->version, this->fn->ofs, this->fn->size); if (this->version == tn->version) { /* Version number collision means REF_PRISTINE GC. Accept either of them as long as the CRC is correct. Check the one we have already... */ if (!check_tn_node(c, this)) { /* The one we already had was OK. Keep it and throw away the new one */ dbg_readinode("Like old node. Throw away new\n"); jffs2_kill_tn(c, tn); return 0; } else { /* Who cares if the new one is good; keep it for now anyway. */ dbg_readinode("Like new node. Throw away old\n"); rb_replace_node(&this->rb, &tn->rb, &rii->tn_root); jffs2_kill_tn(c, this); /* Same overlapping from in front and behind */ return 0; } } if (this->version < tn->version && this->fn->ofs >= tn->fn->ofs && this->fn->ofs + this->fn->size <= fn_end) { /* New node entirely overlaps 'this' */ if (check_tn_node(c, tn)) { dbg_readinode("new node bad CRC\n"); jffs2_kill_tn(c, tn); return 0; } /* ... and is good. Kill 'this' and any subsequent nodes which are also overlapped */ while (this && this->fn->ofs + this->fn->size <= fn_end) { struct jffs2_tmp_dnode_info *next = tn_next(this); if (this->version < tn->version) { tn_erase(this, &rii->tn_root); dbg_readinode("Kill overlapped ver %d, 0x%x-0x%x\n", this->version, this->fn->ofs, this->fn->ofs+this->fn->size); jffs2_kill_tn(c, this); } this = next; } dbg_readinode("Done killing overlapped nodes\n"); continue; } if (this->version > tn->version && this->fn->ofs <= tn->fn->ofs && this->fn->ofs+this->fn->size >= fn_end) { /* New node entirely overlapped by 'this' */ if (!check_tn_node(c, this)) { dbg_readinode("Good CRC on old node. Kill new\n"); jffs2_kill_tn(c, tn); return 0; } /* ... but 'this' was bad. Replace it... */ dbg_readinode("Bad CRC on old overlapping node. Kill it\n"); tn_erase(this, &rii->tn_root); jffs2_kill_tn(c, this); break; } this = tn_next(this); } /* We neither completely obsoleted nor were completely obsoleted by an earlier node. Insert into the tree */ { struct rb_node *parent; struct rb_node **link = &rii->tn_root.rb_node; struct jffs2_tmp_dnode_info *insert_point = NULL; while (*link) { parent = *link; insert_point = rb_entry(parent, struct jffs2_tmp_dnode_info, rb); if (tn->fn->ofs > insert_point->fn->ofs) link = &insert_point->rb.rb_right; else if (tn->fn->ofs < insert_point->fn->ofs || tn->fn->size < insert_point->fn->size) link = &insert_point->rb.rb_left; else link = &insert_point->rb.rb_right; } rb_link_node(&tn->rb, &insert_point->rb, link); rb_insert_color(&tn->rb, &rii->tn_root); } /* If there's anything behind that overlaps us, note it */ this = tn_prev(tn); if (this) { while (1) { if (this->fn->ofs + this->fn->size > tn->fn->ofs) { dbg_readinode("Node is overlapped by %p (v %d, 0x%x-0x%x)\n", this, this->version, this->fn->ofs, this->fn->ofs+this->fn->size); tn->overlapped = 1; break; } if (!this->overlapped) break; ptn = tn_prev(this); if (!ptn) { /* * We killed a node which set the overlapped * flags during the scan. Fix it up. */ this->overlapped = 0; break; } this = ptn; } } /* If the new node overlaps anything ahead, note it */ this = tn_next(tn); while (this && this->fn->ofs < fn_end) { this->overlapped = 1; dbg_readinode("Node ver %d, 0x%x-0x%x is overlapped\n", this->version, this->fn->ofs, this->fn->ofs+this->fn->size); this = tn_next(this); } return 0; } /* Trivial function to remove the last node in the tree. Which by definition has no right-hand child — so can be removed just by making its left-hand child (if any) take its place under its parent. Since this is only done when we're consuming the whole tree, there's no need to use rb_erase() and let it worry about adjusting colours and balancing the tree. That would just be a waste of time. */ static void eat_last(struct rb_root *root, struct rb_node *node) { struct rb_node *parent = rb_parent(node); struct rb_node **link; /* LAST! */ BUG_ON(node->rb_right); if (!parent) link = &root->rb_node; else if (node == parent->rb_left) link = &parent->rb_left; else link = &parent->rb_right; *link = node->rb_left; if (node->rb_left) node->rb_left->__rb_parent_color = node->__rb_parent_color; } /* We put the version tree in reverse order, so we can use the same eat_last() function that we use to consume the tmpnode tree (tn_root). */ static void ver_insert(struct rb_root *ver_root, struct jffs2_tmp_dnode_info *tn) { struct rb_node **link = &ver_root->rb_node; struct rb_node *parent = NULL; struct jffs2_tmp_dnode_info *this_tn; while (*link) { parent = *link; this_tn = rb_entry(parent, struct jffs2_tmp_dnode_info, rb); if (tn->version > this_tn->version) link = &parent->rb_left; else link = &parent->rb_right; } dbg_readinode("Link new node at %p (root is %p)\n", link, ver_root); rb_link_node(&tn->rb, parent, link); rb_insert_color(&tn->rb, ver_root); } /* Build final, normal fragtree from tn tree. It doesn't matter which order we add nodes to the real fragtree, as long as they don't overlap. And having thrown away the majority of overlapped nodes as we went, there really shouldn't be many sets of nodes which do overlap. If we start at the end, we can use the overlap markers -- we can just eat nodes which aren't overlapped, and when we encounter nodes which _do_ overlap we sort them all into a temporary tree in version order before replaying them. */ static int jffs2_build_inode_fragtree(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_readinode_info *rii) { struct jffs2_tmp_dnode_info *pen, *last, *this; struct rb_root ver_root = RB_ROOT; uint32_t high_ver = 0; if (rii->mdata_tn) { dbg_readinode("potential mdata is ver %d at %p\n", rii->mdata_tn->version, rii->mdata_tn); high_ver = rii->mdata_tn->version; rii->latest_ref = rii->mdata_tn->fn->raw; } #ifdef JFFS2_DBG_READINODE_MESSAGES this = tn_last(&rii->tn_root); while (this) { dbg_readinode("tn %p ver %d range 0x%x-0x%x ov %d\n", this, this->version, this->fn->ofs, this->fn->ofs+this->fn->size, this->overlapped); this = tn_prev(this); } #endif pen = tn_last(&rii->tn_root); while ((last = pen)) { pen = tn_prev(last); eat_last(&rii->tn_root, &last->rb); ver_insert(&ver_root, last); if (unlikely(last->overlapped)) { if (pen) continue; /* * We killed a node which set the overlapped * flags during the scan. Fix it up. */ last->overlapped = 0; } /* Now we have a bunch of nodes in reverse version order, in the tree at ver_root. Most of the time, there'll actually be only one node in the 'tree', in fact. */ this = tn_last(&ver_root); while (this) { struct jffs2_tmp_dnode_info *vers_next; int ret; vers_next = tn_prev(this); eat_last(&ver_root, &this->rb); if (check_tn_node(c, this)) { dbg_readinode("node ver %d, 0x%x-0x%x failed CRC\n", this->version, this->fn->ofs, this->fn->ofs+this->fn->size); jffs2_kill_tn(c, this); } else { if (this->version > high_ver) { /* Note that this is different from the other highest_version, because this one is only counting _valid_ nodes which could give the latest inode metadata */ high_ver = this->version; rii->latest_ref = this->fn->raw; } dbg_readinode("Add %p (v %d, 0x%x-0x%x, ov %d) to fragtree\n", this, this->version, this->fn->ofs, this->fn->ofs+this->fn->size, this->overlapped); ret = jffs2_add_full_dnode_to_inode(c, f, this->fn); if (ret) { /* Free the nodes in vers_root; let the caller deal with the rest */ JFFS2_ERROR("Add node to tree failed %d\n", ret); while (1) { vers_next = tn_prev(this); if (check_tn_node(c, this)) jffs2_mark_node_obsolete(c, this->fn->raw); jffs2_free_full_dnode(this->fn); jffs2_free_tmp_dnode_info(this); this = vers_next; if (!this) break; eat_last(&ver_root, &vers_next->rb); } return ret; } jffs2_free_tmp_dnode_info(this); } this = vers_next; } } return 0; } static void jffs2_free_tmp_dnode_info_list(struct rb_root *list) { struct jffs2_tmp_dnode_info *tn, *next; rbtree_postorder_for_each_entry_safe(tn, next, list, rb) { jffs2_free_full_dnode(tn->fn); jffs2_free_tmp_dnode_info(tn); } *list = RB_ROOT; } static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd) { struct jffs2_full_dirent *next; while (fd) { next = fd->next; jffs2_free_full_dirent(fd); fd = next; } } /* Returns first valid node after 'ref'. May return 'ref' */ static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref) { while (ref && ref->next_in_ino) { if (!ref_obsolete(ref)) return ref; dbg_noderef("node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref)); ref = ref->next_in_ino; } return NULL; } /* * Helper function for jffs2_get_inode_nodes(). * It is called every time an directory entry node is found. * * Returns: 0 on success; * negative error code on failure. */ static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_raw_dirent *rd, size_t read, struct jffs2_readinode_info *rii) { struct jffs2_full_dirent *fd; uint32_t crc; /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ BUG_ON(ref_obsolete(ref)); crc = crc32(0, rd, sizeof(*rd) - 8); if (unlikely(crc != je32_to_cpu(rd->node_crc))) { JFFS2_NOTICE("header CRC failed on dirent node at %#08x: read %#08x, calculated %#08x\n", ref_offset(ref), je32_to_cpu(rd->node_crc), crc); jffs2_mark_node_obsolete(c, ref); return 0; } /* If we've never checked the CRCs on this node, check them now */ if (ref_flags(ref) == REF_UNCHECKED) { struct jffs2_eraseblock *jeb; int len; /* Sanity check */ if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) { JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n", ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen)); jffs2_mark_node_obsolete(c, ref); return 0; } jeb = &c->blocks[ref->flash_offset / c->sector_size]; len = ref_totlen(c, jeb, ref); spin_lock(&c->erase_completion_lock); jeb->used_size += len; jeb->unchecked_size -= len; c->used_size += len; c->unchecked_size -= len; ref->flash_offset = ref_offset(ref) | dirent_node_state(rd); spin_unlock(&c->erase_completion_lock); } fd = jffs2_alloc_full_dirent(rd->nsize + 1); if (unlikely(!fd)) return -ENOMEM; fd->raw = ref; fd->version = je32_to_cpu(rd->version); fd->ino = je32_to_cpu(rd->ino); fd->type = rd->type; if (fd->version > rii->highest_version) rii->highest_version = fd->version; /* Pick out the mctime of the latest dirent */ if(fd->version > rii->mctime_ver && je32_to_cpu(rd->mctime)) { rii->mctime_ver = fd->version; rii->latest_mctime = je32_to_cpu(rd->mctime); } /* * Copy as much of the name as possible from the raw * dirent we've already read from the flash. */ if (read > sizeof(*rd)) memcpy(&fd->name[0], &rd->name[0], min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) )); /* Do we need to copy any more of the name directly from the flash? */ if (rd->nsize + sizeof(*rd) > read) { /* FIXME: point() */ int err; int already = read - sizeof(*rd); err = jffs2_flash_read(c, (ref_offset(ref)) + read, rd->nsize - already, &read, &fd->name[already]); if (unlikely(read != rd->nsize - already) && likely(!err)) { jffs2_free_full_dirent(fd); JFFS2_ERROR("short read: wanted %d bytes, got %zd\n", rd->nsize - already, read); return -EIO; } if (unlikely(err)) { JFFS2_ERROR("read remainder of name: error %d\n", err); jffs2_free_full_dirent(fd); return -EIO; } #ifdef CONFIG_JFFS2_SUMMARY /* * we use CONFIG_JFFS2_SUMMARY because without it, we * have checked it while mounting */ crc = crc32(0, fd->name, rd->nsize); if (unlikely(crc != je32_to_cpu(rd->name_crc))) { JFFS2_NOTICE("name CRC failed on dirent node at" "%#08x: read %#08x,calculated %#08x\n", ref_offset(ref), je32_to_cpu(rd->node_crc), crc); jffs2_mark_node_obsolete(c, ref); jffs2_free_full_dirent(fd); return 0; } #endif } fd->nhash = full_name_hash(NULL, fd->name, rd->nsize); fd->next = NULL; fd->name[rd->nsize] = '\0'; /* * Wheee. We now have a complete jffs2_full_dirent structure, with * the name in it and everything. Link it into the list */ jffs2_add_fd_to_list(c, fd, &rii->fds); return 0; } /* * Helper function for jffs2_get_inode_nodes(). * It is called every time an inode node is found. * * Returns: 0 on success (possibly after marking a bad node obsolete); * negative error code on failure. */ static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_raw_inode *rd, int rdlen, struct jffs2_readinode_info *rii) { struct jffs2_tmp_dnode_info *tn; uint32_t len, csize; int ret = 0; uint32_t crc; /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ BUG_ON(ref_obsolete(ref)); crc = crc32(0, rd, sizeof(*rd) - 8); if (unlikely(crc != je32_to_cpu(rd->node_crc))) { JFFS2_NOTICE("node CRC failed on dnode at %#08x: read %#08x, calculated %#08x\n", ref_offset(ref), je32_to_cpu(rd->node_crc), crc); jffs2_mark_node_obsolete(c, ref); return 0; } tn = jffs2_alloc_tmp_dnode_info(); if (!tn) { JFFS2_ERROR("failed to allocate tn (%zu bytes).\n", sizeof(*tn)); return -ENOMEM; } tn->partial_crc = 0; csize = je32_to_cpu(rd->csize); /* If we've never checked the CRCs on this node, check them now */ if (ref_flags(ref) == REF_UNCHECKED) { /* Sanity checks */ if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) || unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) { JFFS2_WARNING("inode node header CRC is corrupted at %#08x\n", ref_offset(ref)); jffs2_dbg_dump_node(c, ref_offset(ref)); jffs2_mark_node_obsolete(c, ref); goto free_out; } if (jffs2_is_writebuffered(c) && csize != 0) { /* At this point we are supposed to check the data CRC * of our unchecked node. But thus far, we do not * know whether the node is valid or obsolete. To * figure this out, we need to walk all the nodes of * the inode and build the inode fragtree. We don't * want to spend time checking data of nodes which may * later be found to be obsolete. So we put off the full * data CRC checking until we have read all the inode * nodes and have started building the fragtree. * * The fragtree is being built starting with nodes * having the highest version number, so we'll be able * to detect whether a node is valid (i.e., it is not * overlapped by a node with higher version) or not. * And we'll be able to check only those nodes, which * are not obsolete. * * Of course, this optimization only makes sense in case * of NAND flashes (or other flashes with * !jffs2_can_mark_obsolete()), since on NOR flashes * nodes are marked obsolete physically. * * Since NAND flashes (or other flashes with * jffs2_is_writebuffered(c)) are anyway read by * fractions of c->wbuf_pagesize, and we have just read * the node header, it is likely that the starting part * of the node data is also read when we read the * header. So we don't mind to check the CRC of the * starting part of the data of the node now, and check * the second part later (in jffs2_check_node_data()). * Of course, we will not need to re-read and re-check * the NAND page which we have just read. This is why we * read the whole NAND page at jffs2_get_inode_nodes(), * while we needed only the node header. */ unsigned char *buf; /* 'buf' will point to the start of data */ buf = (unsigned char *)rd + sizeof(*rd); /* len will be the read data length */ len = min_t(uint32_t, rdlen - sizeof(*rd), csize); tn->partial_crc = crc32(0, buf, len); dbg_readinode("Calculates CRC (%#08x) for %d bytes, csize %d\n", tn->partial_crc, len, csize); /* If we actually calculated the whole data CRC * and it is wrong, drop the node. */ if (len >= csize && unlikely(tn->partial_crc != je32_to_cpu(rd->data_crc))) { JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n", ref_offset(ref), tn->partial_crc, je32_to_cpu(rd->data_crc)); jffs2_mark_node_obsolete(c, ref); goto free_out; } } else if (csize == 0) { /* * We checked the header CRC. If the node has no data, adjust * the space accounting now. For other nodes this will be done * later either when the node is marked obsolete or when its * data is checked. */ struct jffs2_eraseblock *jeb; dbg_readinode("the node has no data.\n"); jeb = &c->blocks[ref->flash_offset / c->sector_size]; len = ref_totlen(c, jeb, ref); spin_lock(&c->erase_completion_lock); jeb->used_size += len; jeb->unchecked_size -= len; c->used_size += len; c->unchecked_size -= len; ref->flash_offset = ref_offset(ref) | REF_NORMAL; spin_unlock(&c->erase_completion_lock); } } tn->fn = jffs2_alloc_full_dnode(); if (!tn->fn) { JFFS2_ERROR("alloc fn failed\n"); ret = -ENOMEM; goto free_out; } tn->version = je32_to_cpu(rd->version); tn->fn->ofs = je32_to_cpu(rd->offset); tn->data_crc = je32_to_cpu(rd->data_crc); tn->csize = csize; tn->fn->raw = ref; tn->overlapped = 0; if (tn->version > rii->highest_version) rii->highest_version = tn->version; /* There was a bug where we wrote hole nodes out with csize/dsize swapped. Deal with it */ if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && csize) tn->fn->size = csize; else // normal case... tn->fn->size = je32_to_cpu(rd->dsize); dbg_readinode2("dnode @%08x: ver %u, offset %#04x, dsize %#04x, csize %#04x\n", ref_offset(ref), je32_to_cpu(rd->version), je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize), csize); ret = jffs2_add_tn_to_tree(c, rii, tn); if (ret) { jffs2_free_full_dnode(tn->fn); free_out: jffs2_free_tmp_dnode_info(tn); return ret; } #ifdef JFFS2_DBG_READINODE2_MESSAGES dbg_readinode2("After adding ver %d:\n", je32_to_cpu(rd->version)); tn = tn_first(&rii->tn_root); while (tn) { dbg_readinode2("%p: v %d r 0x%x-0x%x ov %d\n", tn, tn->version, tn->fn->ofs, tn->fn->ofs+tn->fn->size, tn->overlapped); tn = tn_next(tn); } #endif return 0; } /* * Helper function for jffs2_get_inode_nodes(). * It is called every time an unknown node is found. * * Returns: 0 on success; * negative error code on failure. */ static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_unknown_node *un) { /* We don't mark unknown nodes as REF_UNCHECKED */ if (ref_flags(ref) == REF_UNCHECKED) { JFFS2_ERROR("REF_UNCHECKED but unknown node at %#08x\n", ref_offset(ref)); JFFS2_ERROR("Node is {%04x,%04x,%08x,%08x}. Please report this error.\n", je16_to_cpu(un->magic), je16_to_cpu(un->nodetype), je32_to_cpu(un->totlen), je32_to_cpu(un->hdr_crc)); jffs2_mark_node_obsolete(c, ref); return 0; } un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype)); switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) { case JFFS2_FEATURE_INCOMPAT: JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n", je16_to_cpu(un->nodetype), ref_offset(ref)); /* EEP */ BUG(); break; case JFFS2_FEATURE_ROCOMPAT: JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n", je16_to_cpu(un->nodetype), ref_offset(ref)); BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO)); break; case JFFS2_FEATURE_RWCOMPAT_COPY: JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n", je16_to_cpu(un->nodetype), ref_offset(ref)); break; case JFFS2_FEATURE_RWCOMPAT_DELETE: JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n", je16_to_cpu(un->nodetype), ref_offset(ref)); jffs2_mark_node_obsolete(c, ref); return 0; } return 0; } /* * Helper function for jffs2_get_inode_nodes(). * The function detects whether more data should be read and reads it if yes. * * Returns: 0 on success; * negative error code on failure. */ static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, int needed_len, int *rdlen, unsigned char *buf) { int err, to_read = needed_len - *rdlen; size_t retlen; uint32_t offs; if (jffs2_is_writebuffered(c)) { int rem = to_read % c->wbuf_pagesize; if (rem) to_read += c->wbuf_pagesize - rem; } /* We need to read more data */ offs = ref_offset(ref) + *rdlen; dbg_readinode("read more %d bytes\n", to_read); err = jffs2_flash_read(c, offs, to_read, &retlen, buf + *rdlen); if (err) { JFFS2_ERROR("can not read %d bytes from 0x%08x, " "error code: %d.\n", to_read, offs, err); return err; } if (retlen < to_read) { JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n", offs, retlen, to_read); return -EIO; } *rdlen += to_read; return 0; } /* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated with this ino. Perform a preliminary ordering on data nodes, throwing away those which are completely obsoleted by newer ones. The naïve approach we use to take of just returning them _all_ in version order will cause us to run out of memory in certain degenerate cases. */ static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_readinode_info *rii) { struct jffs2_raw_node_ref *ref, *valid_ref; unsigned char *buf = NULL; union jffs2_node_union *node; size_t retlen; int len, err; rii->mctime_ver = 0; dbg_readinode("ino #%u\n", f->inocache->ino); /* FIXME: in case of NOR and available ->point() this * needs to be fixed. */ len = sizeof(union jffs2_node_union) + c->wbuf_pagesize; buf = kmalloc(len, GFP_KERNEL); if (!buf) return -ENOMEM; spin_lock(&c->erase_completion_lock); valid_ref = jffs2_first_valid_node(f->inocache->nodes); if (!valid_ref && f->inocache->ino != 1) JFFS2_WARNING("Eep. No valid nodes for ino #%u.\n", f->inocache->ino); while (valid_ref) { /* We can hold a pointer to a non-obsolete node without the spinlock, but _obsolete_ nodes may disappear at any time, if the block they're in gets erased. So if we mark 'ref' obsolete while we're not holding the lock, it can go away immediately. For that reason, we find the next valid node first, before processing 'ref'. */ ref = valid_ref; valid_ref = jffs2_first_valid_node(ref->next_in_ino); spin_unlock(&c->erase_completion_lock); cond_resched(); /* * At this point we don't know the type of the node we're going * to read, so we do not know the size of its header. In order * to minimize the amount of flash IO we assume the header is * of size = JFFS2_MIN_NODE_HEADER. */ len = JFFS2_MIN_NODE_HEADER; if (jffs2_is_writebuffered(c)) { int end, rem; /* * We are about to read JFFS2_MIN_NODE_HEADER bytes, * but this flash has some minimal I/O unit. It is * possible that we'll need to read more soon, so read * up to the next min. I/O unit, in order not to * re-read the same min. I/O unit twice. */ end = ref_offset(ref) + len; rem = end % c->wbuf_pagesize; if (rem) end += c->wbuf_pagesize - rem; len = end - ref_offset(ref); } dbg_readinode("read %d bytes at %#08x(%d).\n", len, ref_offset(ref), ref_flags(ref)); /* FIXME: point() */ err = jffs2_flash_read(c, ref_offset(ref), len, &retlen, buf); if (err) { JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ref_offset(ref), err); goto free_out; } if (retlen < len) { JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n", ref_offset(ref), retlen, len); err = -EIO; goto free_out; } node = (union jffs2_node_union *)buf; /* No need to mask in the valid bit; it shouldn't be invalid */ if (je32_to_cpu(node->u.hdr_crc) != crc32(0, node, sizeof(node->u)-4)) { JFFS2_NOTICE("Node header CRC failed at %#08x. {%04x,%04x,%08x,%08x}\n", ref_offset(ref), je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype), je32_to_cpu(node->u.totlen), je32_to_cpu(node->u.hdr_crc)); jffs2_dbg_dump_node(c, ref_offset(ref)); jffs2_mark_node_obsolete(c, ref); goto cont; } if (je16_to_cpu(node->u.magic) != JFFS2_MAGIC_BITMASK) { /* Not a JFFS2 node, whinge and move on */ JFFS2_NOTICE("Wrong magic bitmask 0x%04x in node header at %#08x.\n", je16_to_cpu(node->u.magic), ref_offset(ref)); jffs2_mark_node_obsolete(c, ref); goto cont; } switch (je16_to_cpu(node->u.nodetype)) { case JFFS2_NODETYPE_DIRENT: if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent) && len < sizeof(struct jffs2_raw_dirent)) { err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf); if (unlikely(err)) goto free_out; } err = read_direntry(c, ref, &node->d, retlen, rii); if (unlikely(err)) goto free_out; break; case JFFS2_NODETYPE_INODE: if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode) && len < sizeof(struct jffs2_raw_inode)) { err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf); if (unlikely(err)) goto free_out; } err = read_dnode(c, ref, &node->i, len, rii); if (unlikely(err)) goto free_out; break; default: if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node) && len < sizeof(struct jffs2_unknown_node)) { err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf); if (unlikely(err)) goto free_out; } err = read_unknown(c, ref, &node->u); if (unlikely(err)) goto free_out; } cont: spin_lock(&c->erase_completion_lock); } spin_unlock(&c->erase_completion_lock); kfree(buf); f->highest_version = rii->highest_version; dbg_readinode("nodes of inode #%u were read, the highest version is %u, latest_mctime %u, mctime_ver %u.\n", f->inocache->ino, rii->highest_version, rii->latest_mctime, rii->mctime_ver); return 0; free_out: jffs2_free_tmp_dnode_info_list(&rii->tn_root); jffs2_free_full_dirent_list(rii->fds); rii->fds = NULL; kfree(buf); return err; } static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *latest_node) { struct jffs2_readinode_info rii; uint32_t crc, new_size; size_t retlen; int ret; dbg_readinode("ino #%u pino/nlink is %d\n", f->inocache->ino, f->inocache->pino_nlink); memset(&rii, 0, sizeof(rii)); /* Grab all nodes relevant to this ino */ ret = jffs2_get_inode_nodes(c, f, &rii); if (ret) { JFFS2_ERROR("cannot read nodes for ino %u, returned error is %d\n", f->inocache->ino, ret); if (f->inocache->state == INO_STATE_READING) jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); return ret; } ret = jffs2_build_inode_fragtree(c, f, &rii); if (ret) { JFFS2_ERROR("Failed to build final fragtree for inode #%u: error %d\n", f->inocache->ino, ret); if (f->inocache->state == INO_STATE_READING) jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); jffs2_free_tmp_dnode_info_list(&rii.tn_root); /* FIXME: We could at least crc-check them all */ if (rii.mdata_tn) { jffs2_free_full_dnode(rii.mdata_tn->fn); jffs2_free_tmp_dnode_info(rii.mdata_tn); rii.mdata_tn = NULL; } return ret; } if (rii.mdata_tn) { if (rii.mdata_tn->fn->raw == rii.latest_ref) { f->metadata = rii.mdata_tn->fn; jffs2_free_tmp_dnode_info(rii.mdata_tn); } else { jffs2_kill_tn(c, rii.mdata_tn); } rii.mdata_tn = NULL; } f->dents = rii.fds; jffs2_dbg_fragtree_paranoia_check_nolock(f); if (unlikely(!rii.latest_ref)) { /* No data nodes for this inode. */ if (f->inocache->ino != 1) { JFFS2_WARNING("no data nodes found for ino #%u\n", f->inocache->ino); if (!rii.fds) { if (f->inocache->state == INO_STATE_READING) jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); return -EIO; } JFFS2_NOTICE("but it has children so we fake some modes for it\n"); } latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO); latest_node->version = cpu_to_je32(0); latest_node->atime = latest_node->ctime = latest_node->mtime = cpu_to_je32(0); latest_node->isize = cpu_to_je32(0); latest_node->gid = cpu_to_je16(0); latest_node->uid = cpu_to_je16(0); if (f->inocache->state == INO_STATE_READING) jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT); return 0; } ret = jffs2_flash_read(c, ref_offset(rii.latest_ref), sizeof(*latest_node), &retlen, (void *)latest_node); if (ret || retlen != sizeof(*latest_node)) { JFFS2_ERROR("failed to read from flash: error %d, %zd of %zd bytes read\n", ret, retlen, sizeof(*latest_node)); /* FIXME: If this fails, there seems to be a memory leak. Find it. */ return ret ? ret : -EIO; } crc = crc32(0, latest_node, sizeof(*latest_node)-8); if (crc != je32_to_cpu(latest_node->node_crc)) { JFFS2_ERROR("CRC failed for read_inode of inode %u at physical location 0x%x\n", f->inocache->ino, ref_offset(rii.latest_ref)); return -EIO; } switch(jemode_to_cpu(latest_node->mode) & S_IFMT) { case S_IFDIR: if (rii.mctime_ver > je32_to_cpu(latest_node->version)) { /* The times in the latest_node are actually older than mctime in the latest dirent. Cheat. */ latest_node->ctime = latest_node->mtime = cpu_to_je32(rii.latest_mctime); } break; case S_IFREG: /* If it was a regular file, truncate it to the latest node's isize */ new_size = jffs2_truncate_fragtree(c, &f->fragtree, je32_to_cpu(latest_node->isize)); if (new_size != je32_to_cpu(latest_node->isize)) { JFFS2_WARNING("Truncating ino #%u to %d bytes failed because it only had %d bytes to start with!\n", f->inocache->ino, je32_to_cpu(latest_node->isize), new_size); latest_node->isize = cpu_to_je32(new_size); } break; case S_IFLNK: /* Hack to work around broken isize in old symlink code. Remove this when dwmw2 comes to his senses and stops symlinks from being an entirely gratuitous special case. */ if (!je32_to_cpu(latest_node->isize)) latest_node->isize = latest_node->dsize; if (f->inocache->state != INO_STATE_CHECKING) { /* Symlink's inode data is the target path. Read it and * keep in RAM to facilitate quick follow symlink * operation. */ uint32_t csize = je32_to_cpu(latest_node->csize); if (csize > JFFS2_MAX_NAME_LEN) return -ENAMETOOLONG; f->target = kmalloc(csize + 1, GFP_KERNEL); if (!f->target) { JFFS2_ERROR("can't allocate %u bytes of memory for the symlink target path cache\n", csize); return -ENOMEM; } ret = jffs2_flash_read(c, ref_offset(rii.latest_ref) + sizeof(*latest_node), csize, &retlen, (char *)f->target); if (ret || retlen != csize) { if (retlen != csize) ret = -EIO; kfree(f->target); f->target = NULL; return ret; } f->target[csize] = '\0'; dbg_readinode("symlink's target '%s' cached\n", f->target); } fallthrough; case S_IFBLK: case S_IFCHR: /* Certain inode types should have only one data node, and it's kept as the metadata node */ if (f->metadata) { JFFS2_ERROR("Argh. Special inode #%u with mode 0%o had metadata node\n", f->inocache->ino, jemode_to_cpu(latest_node->mode)); return -EIO; } if (!frag_first(&f->fragtree)) { JFFS2_ERROR("Argh. Special inode #%u with mode 0%o has no fragments\n", f->inocache->ino, jemode_to_cpu(latest_node->mode)); return -EIO; } /* ASSERT: f->fraglist != NULL */ if (frag_next(frag_first(&f->fragtree))) { JFFS2_ERROR("Argh. Special inode #%u with mode 0x%x had more than one node\n", f->inocache->ino, jemode_to_cpu(latest_node->mode)); /* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */ return -EIO; } /* OK. We're happy */ f->metadata = frag_first(&f->fragtree)->node; jffs2_free_node_frag(frag_first(&f->fragtree)); f->fragtree = RB_ROOT; break; } if (f->inocache->state == INO_STATE_READING) jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT); return 0; } /* Scan the list of all nodes present for this ino, build map of versions, etc. */ int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t ino, struct jffs2_raw_inode *latest_node) { dbg_readinode("read inode #%u\n", ino); retry_inocache: spin_lock(&c->inocache_lock); f->inocache = jffs2_get_ino_cache(c, ino); if (f->inocache) { /* Check its state. We may need to wait before we can use it */ switch(f->inocache->state) { case INO_STATE_UNCHECKED: case INO_STATE_CHECKEDABSENT: f->inocache->state = INO_STATE_READING; break; case INO_STATE_CHECKING: case INO_STATE_GC: /* If it's in either of these states, we need to wait for whoever's got it to finish and put it back. */ dbg_readinode("waiting for ino #%u in state %d\n", ino, f->inocache->state); sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); goto retry_inocache; case INO_STATE_READING: case INO_STATE_PRESENT: /* Eep. This should never happen. It can happen if Linux calls read_inode() again before clear_inode() has finished though. */ JFFS2_ERROR("Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state); /* Fail. That's probably better than allowing it to succeed */ f->inocache = NULL; break; default: BUG(); } } spin_unlock(&c->inocache_lock); if (!f->inocache && ino == 1) { /* Special case - no root inode on medium */ f->inocache = jffs2_alloc_inode_cache(); if (!f->inocache) { JFFS2_ERROR("cannot allocate inocache for root inode\n"); return -ENOMEM; } dbg_readinode("creating inocache for root inode\n"); memset(f->inocache, 0, sizeof(struct jffs2_inode_cache)); f->inocache->ino = f->inocache->pino_nlink = 1; f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache; f->inocache->state = INO_STATE_READING; jffs2_add_ino_cache(c, f->inocache); } if (!f->inocache) { JFFS2_ERROR("requested to read a nonexistent ino %u\n", ino); return -ENOENT; } return jffs2_do_read_inode_internal(c, f, latest_node); } int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) { struct jffs2_raw_inode n; struct jffs2_inode_info *f = kzalloc(sizeof(*f), GFP_KERNEL); int ret; if (!f) return -ENOMEM; mutex_init(&f->sem); mutex_lock(&f->sem); f->inocache = ic; ret = jffs2_do_read_inode_internal(c, f, &n); mutex_unlock(&f->sem); jffs2_do_clear_inode(c, f); jffs2_xattr_do_crccheck_inode(c, ic); kfree (f); return ret; } void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f) { struct jffs2_full_dirent *fd, *fds; int deleted; jffs2_xattr_delete_inode(c, f->inocache); mutex_lock(&f->sem); deleted = f->inocache && !f->inocache->pino_nlink; if (f->inocache && f->inocache->state != INO_STATE_CHECKING) jffs2_set_inocache_state(c, f->inocache, INO_STATE_CLEARING); if (f->metadata) { if (deleted) jffs2_mark_node_obsolete(c, f->metadata->raw); jffs2_free_full_dnode(f->metadata); } jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL); fds = f->dents; while(fds) { fd = fds; fds = fd->next; jffs2_free_full_dirent(fd); } if (f->inocache && f->inocache->state != INO_STATE_CHECKING) { jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); if (f->inocache->nodes == (void *)f->inocache) jffs2_del_ino_cache(c, f->inocache); } mutex_unlock(&f->sem); }
linux-master
fs/jffs2/readinode.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2006 NEC Corporation * * Created by KaiGai Kohei <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/time.h> #include <linux/pagemap.h> #include <linux/highmem.h> #include <linux/crc32.h> #include <linux/jffs2.h> #include <linux/xattr.h> #include <linux/mtd/mtd.h> #include <linux/security.h> #include "nodelist.h" /* ---- Initial Security Label(s) Attachment callback --- */ static int jffs2_initxattrs(struct inode *inode, const struct xattr *xattr_array, void *fs_info) { const struct xattr *xattr; int err = 0; for (xattr = xattr_array; xattr->name != NULL; xattr++) { err = do_jffs2_setxattr(inode, JFFS2_XPREFIX_SECURITY, xattr->name, xattr->value, xattr->value_len, 0); if (err < 0) break; } return err; } /* ---- Initial Security Label(s) Attachment ----------- */ int jffs2_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr) { return security_inode_init_security(inode, dir, qstr, &jffs2_initxattrs, NULL); } /* ---- XATTR Handler for "security.*" ----------------- */ static int jffs2_security_getxattr(const struct xattr_handler *handler, struct dentry *unused, struct inode *inode, const char *name, void *buffer, size_t size) { return do_jffs2_getxattr(inode, JFFS2_XPREFIX_SECURITY, name, buffer, size); } static int jffs2_security_setxattr(const struct xattr_handler *handler, struct mnt_idmap *idmap, struct dentry *unused, struct inode *inode, const char *name, const void *buffer, size_t size, int flags) { return do_jffs2_setxattr(inode, JFFS2_XPREFIX_SECURITY, name, buffer, size, flags); } const struct xattr_handler jffs2_security_xattr_handler = { .prefix = XATTR_SECURITY_PREFIX, .set = jffs2_security_setxattr, .get = jffs2_security_getxattr };
linux-master
fs/jffs2/security.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * * Created by David Woodhouse <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/fs.h> #include <linux/crc32.h> #include <linux/pagemap.h> #include <linux/mtd/mtd.h> #include "nodelist.h" #include "compr.h" int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t mode, struct jffs2_raw_inode *ri) { struct jffs2_inode_cache *ic; ic = jffs2_alloc_inode_cache(); if (!ic) { return -ENOMEM; } memset(ic, 0, sizeof(*ic)); f->inocache = ic; f->inocache->pino_nlink = 1; /* Will be overwritten shortly for directories */ f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache; f->inocache->state = INO_STATE_PRESENT; jffs2_add_ino_cache(c, f->inocache); jffs2_dbg(1, "%s(): Assigned ino# %d\n", __func__, f->inocache->ino); ri->ino = cpu_to_je32(f->inocache->ino); ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); ri->totlen = cpu_to_je32(PAD(sizeof(*ri))); ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)); ri->mode = cpu_to_jemode(mode); f->highest_version = 1; ri->version = cpu_to_je32(f->highest_version); return 0; } /* jffs2_write_dnode - given a raw_inode, allocate a full_dnode for it, write it to the flash, link it into the existing inode/fragment list */ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, int alloc_mode) { struct jffs2_full_dnode *fn; size_t retlen; uint32_t flash_ofs; struct kvec vecs[2]; int ret; int retried = 0; unsigned long cnt = 2; D1(if(je32_to_cpu(ri->hdr_crc) != crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)) { pr_crit("Eep. CRC not correct in jffs2_write_dnode()\n"); BUG(); } ); vecs[0].iov_base = ri; vecs[0].iov_len = sizeof(*ri); vecs[1].iov_base = (unsigned char *)data; vecs[1].iov_len = datalen; if (je32_to_cpu(ri->totlen) != sizeof(*ri) + datalen) { pr_warn("%s(): ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n", __func__, je32_to_cpu(ri->totlen), sizeof(*ri), datalen); } fn = jffs2_alloc_full_dnode(); if (!fn) return ERR_PTR(-ENOMEM); /* check number of valid vecs */ if (!datalen || !data) cnt = 1; retry: flash_ofs = write_ofs(c); jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len); if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(ri->version) < f->highest_version)) { BUG_ON(!retried); jffs2_dbg(1, "%s(): dnode_version %d, highest version %d -> updating dnode\n", __func__, je32_to_cpu(ri->version), f->highest_version); ri->version = cpu_to_je32(++f->highest_version); ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); } ret = jffs2_flash_writev(c, vecs, cnt, flash_ofs, &retlen, (alloc_mode==ALLOC_GC)?0:f->inocache->ino); if (ret || (retlen != sizeof(*ri) + datalen)) { pr_notice("Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", sizeof(*ri) + datalen, flash_ofs, ret, retlen); /* Mark the space as dirtied */ if (retlen) { /* Don't change raw->size to match retlen. We may have written the node header already, and only the data will seem corrupted, in which case the scan would skip over any node we write before the original intended end of this node */ jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*ri)+datalen), NULL); } else { pr_notice("Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", flash_ofs); } if (!retried && alloc_mode != ALLOC_NORETRY) { /* Try to reallocate space and retry */ uint32_t dummy; struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size]; retried = 1; jffs2_dbg(1, "Retrying failed write.\n"); jffs2_dbg_acct_sanity_check(c,jeb); jffs2_dbg_acct_paranoia_check(c, jeb); if (alloc_mode == ALLOC_GC) { ret = jffs2_reserve_space_gc(c, sizeof(*ri) + datalen, &dummy, JFFS2_SUMMARY_INODE_SIZE); } else { /* Locking pain */ mutex_unlock(&f->sem); jffs2_complete_reservation(c); ret = jffs2_reserve_space(c, sizeof(*ri) + datalen, &dummy, alloc_mode, JFFS2_SUMMARY_INODE_SIZE); mutex_lock(&f->sem); } if (!ret) { flash_ofs = write_ofs(c); jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write.\n", flash_ofs); jffs2_dbg_acct_sanity_check(c,jeb); jffs2_dbg_acct_paranoia_check(c, jeb); goto retry; } jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n", ret); } /* Release the full_dnode which is now useless, and return */ jffs2_free_full_dnode(fn); return ERR_PTR(ret?ret:-EIO); } /* Mark the space used */ /* If node covers at least a whole page, or if it starts at the beginning of a page and runs to the end of the file, or if it's a hole node, mark it REF_PRISTINE, else REF_NORMAL. */ if ((je32_to_cpu(ri->dsize) >= PAGE_SIZE) || ( ((je32_to_cpu(ri->offset)&(PAGE_SIZE-1))==0) && (je32_to_cpu(ri->dsize)+je32_to_cpu(ri->offset) == je32_to_cpu(ri->isize)))) { flash_ofs |= REF_PRISTINE; } else { flash_ofs |= REF_NORMAL; } fn->raw = jffs2_add_physical_node_ref(c, flash_ofs, PAD(sizeof(*ri)+datalen), f->inocache); if (IS_ERR(fn->raw)) { void *hold_err = fn->raw; /* Release the full_dnode which is now useless, and return */ jffs2_free_full_dnode(fn); return ERR_CAST(hold_err); } fn->ofs = je32_to_cpu(ri->offset); fn->size = je32_to_cpu(ri->dsize); fn->frags = 0; jffs2_dbg(1, "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n", flash_ofs & ~3, flash_ofs & 3, je32_to_cpu(ri->dsize), je32_to_cpu(ri->csize), je32_to_cpu(ri->node_crc), je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen)); if (retried) { jffs2_dbg_acct_sanity_check(c,NULL); } return fn; } struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_dirent *rd, const unsigned char *name, uint32_t namelen, int alloc_mode) { struct jffs2_full_dirent *fd; size_t retlen; struct kvec vecs[2]; uint32_t flash_ofs; int retried = 0; int ret; jffs2_dbg(1, "%s(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n", __func__, je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino), je32_to_cpu(rd->name_crc)); D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) { pr_crit("Eep. CRC not correct in jffs2_write_dirent()\n"); BUG(); }); if (strnlen(name, namelen) != namelen) { /* This should never happen, but seems to have done on at least one occasion: https://dev.laptop.org/ticket/4184 */ pr_crit("Error in jffs2_write_dirent() -- name contains zero bytes!\n"); pr_crit("Directory inode #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x\n", je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino), je32_to_cpu(rd->name_crc)); WARN_ON(1); return ERR_PTR(-EIO); } vecs[0].iov_base = rd; vecs[0].iov_len = sizeof(*rd); vecs[1].iov_base = (unsigned char *)name; vecs[1].iov_len = namelen; fd = jffs2_alloc_full_dirent(namelen+1); if (!fd) return ERR_PTR(-ENOMEM); fd->version = je32_to_cpu(rd->version); fd->ino = je32_to_cpu(rd->ino); fd->nhash = full_name_hash(NULL, name, namelen); fd->type = rd->type; memcpy(fd->name, name, namelen); fd->name[namelen]=0; retry: flash_ofs = write_ofs(c); jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len); if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(rd->version) < f->highest_version)) { BUG_ON(!retried); jffs2_dbg(1, "%s(): dirent_version %d, highest version %d -> updating dirent\n", __func__, je32_to_cpu(rd->version), f->highest_version); rd->version = cpu_to_je32(++f->highest_version); fd->version = je32_to_cpu(rd->version); rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); } ret = jffs2_flash_writev(c, vecs, 2, flash_ofs, &retlen, (alloc_mode==ALLOC_GC)?0:je32_to_cpu(rd->pino)); if (ret || (retlen != sizeof(*rd) + namelen)) { pr_notice("Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", sizeof(*rd) + namelen, flash_ofs, ret, retlen); /* Mark the space as dirtied */ if (retlen) { jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*rd)+namelen), NULL); } else { pr_notice("Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", flash_ofs); } if (!retried) { /* Try to reallocate space and retry */ uint32_t dummy; struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size]; retried = 1; jffs2_dbg(1, "Retrying failed write.\n"); jffs2_dbg_acct_sanity_check(c,jeb); jffs2_dbg_acct_paranoia_check(c, jeb); if (alloc_mode == ALLOC_GC) { ret = jffs2_reserve_space_gc(c, sizeof(*rd) + namelen, &dummy, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); } else { /* Locking pain */ mutex_unlock(&f->sem); jffs2_complete_reservation(c); ret = jffs2_reserve_space(c, sizeof(*rd) + namelen, &dummy, alloc_mode, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); mutex_lock(&f->sem); } if (!ret) { flash_ofs = write_ofs(c); jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write\n", flash_ofs); jffs2_dbg_acct_sanity_check(c,jeb); jffs2_dbg_acct_paranoia_check(c, jeb); goto retry; } jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n", ret); } /* Release the full_dnode which is now useless, and return */ jffs2_free_full_dirent(fd); return ERR_PTR(ret?ret:-EIO); } /* Mark the space used */ fd->raw = jffs2_add_physical_node_ref(c, flash_ofs | dirent_node_state(rd), PAD(sizeof(*rd)+namelen), f->inocache); if (IS_ERR(fd->raw)) { void *hold_err = fd->raw; /* Release the full_dirent which is now useless, and return */ jffs2_free_full_dirent(fd); return ERR_CAST(hold_err); } if (retried) { jffs2_dbg_acct_sanity_check(c,NULL); } return fd; } /* The OS-specific code fills in the metadata in the jffs2_raw_inode for us, so that we don't have to go digging in struct inode or its equivalent. It should set: mode, uid, gid, (starting)isize, atime, ctime, mtime */ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, unsigned char *buf, uint32_t offset, uint32_t writelen, uint32_t *retlen) { int ret = 0; uint32_t writtenlen = 0; jffs2_dbg(1, "%s(): Ino #%u, ofs 0x%x, len 0x%x\n", __func__, f->inocache->ino, offset, writelen); while(writelen) { struct jffs2_full_dnode *fn; unsigned char *comprbuf = NULL; uint16_t comprtype = JFFS2_COMPR_NONE; uint32_t alloclen; uint32_t datalen, cdatalen; int retried = 0; retry: jffs2_dbg(2, "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", writelen, offset); ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); if (ret) { jffs2_dbg(1, "jffs2_reserve_space returned %d\n", ret); break; } mutex_lock(&f->sem); datalen = min_t(uint32_t, writelen, PAGE_SIZE - (offset & (PAGE_SIZE-1))); cdatalen = min_t(uint32_t, alloclen - sizeof(*ri), datalen); comprtype = jffs2_compress(c, f, buf, &comprbuf, &datalen, &cdatalen); ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); ri->totlen = cpu_to_je32(sizeof(*ri) + cdatalen); ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)); ri->ino = cpu_to_je32(f->inocache->ino); ri->version = cpu_to_je32(++f->highest_version); ri->isize = cpu_to_je32(max(je32_to_cpu(ri->isize), offset + datalen)); ri->offset = cpu_to_je32(offset); ri->csize = cpu_to_je32(cdatalen); ri->dsize = cpu_to_je32(datalen); ri->compr = comprtype & 0xff; ri->usercompr = (comprtype >> 8 ) & 0xff; ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); ri->data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen)); fn = jffs2_write_dnode(c, f, ri, comprbuf, cdatalen, ALLOC_NORETRY); jffs2_free_comprbuf(comprbuf, buf); if (IS_ERR(fn)) { ret = PTR_ERR(fn); mutex_unlock(&f->sem); jffs2_complete_reservation(c); if (!retried) { /* Write error to be retried */ retried = 1; jffs2_dbg(1, "Retrying node write in jffs2_write_inode_range()\n"); goto retry; } break; } ret = jffs2_add_full_dnode_to_inode(c, f, fn); if (f->metadata) { jffs2_mark_node_obsolete(c, f->metadata->raw); jffs2_free_full_dnode(f->metadata); f->metadata = NULL; } if (ret) { /* Eep */ jffs2_dbg(1, "Eep. add_full_dnode_to_inode() failed in commit_write, returned %d\n", ret); jffs2_mark_node_obsolete(c, fn->raw); jffs2_free_full_dnode(fn); mutex_unlock(&f->sem); jffs2_complete_reservation(c); break; } mutex_unlock(&f->sem); jffs2_complete_reservation(c); if (!datalen) { pr_warn("Eep. We didn't actually write any data in jffs2_write_inode_range()\n"); ret = -EIO; break; } jffs2_dbg(1, "increasing writtenlen by %d\n", datalen); writtenlen += datalen; offset += datalen; writelen -= datalen; buf += datalen; } *retlen = writtenlen; return ret; } int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const struct qstr *qstr) { struct jffs2_raw_dirent *rd; struct jffs2_full_dnode *fn; struct jffs2_full_dirent *fd; uint32_t alloclen; int ret; /* Try to reserve enough space for both node and dirent. * Just the node will do for now, though */ ret = jffs2_reserve_space(c, sizeof(*ri), &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); jffs2_dbg(1, "%s(): reserved 0x%x bytes\n", __func__, alloclen); if (ret) return ret; mutex_lock(&f->sem); ri->data_crc = cpu_to_je32(0); ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); fn = jffs2_write_dnode(c, f, ri, NULL, 0, ALLOC_NORMAL); jffs2_dbg(1, "jffs2_do_create created file with mode 0x%x\n", jemode_to_cpu(ri->mode)); if (IS_ERR(fn)) { jffs2_dbg(1, "jffs2_write_dnode() failed\n"); /* Eeek. Wave bye bye */ mutex_unlock(&f->sem); jffs2_complete_reservation(c); return PTR_ERR(fn); } /* No data here. Only a metadata node, which will be obsoleted by the first data write */ f->metadata = fn; mutex_unlock(&f->sem); jffs2_complete_reservation(c); ret = jffs2_init_security(&f->vfs_inode, &dir_f->vfs_inode, qstr); if (ret) return ret; ret = jffs2_init_acl_post(&f->vfs_inode); if (ret) return ret; ret = jffs2_reserve_space(c, sizeof(*rd)+qstr->len, &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(qstr->len)); if (ret) { /* Eep. */ jffs2_dbg(1, "jffs2_reserve_space() for dirent failed\n"); return ret; } rd = jffs2_alloc_raw_dirent(); if (!rd) { /* Argh. Now we treat it like a normal delete */ jffs2_complete_reservation(c); return -ENOMEM; } mutex_lock(&dir_f->sem); rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT); rd->totlen = cpu_to_je32(sizeof(*rd) + qstr->len); rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)); rd->pino = cpu_to_je32(dir_f->inocache->ino); rd->version = cpu_to_je32(++dir_f->highest_version); rd->ino = ri->ino; rd->mctime = ri->ctime; rd->nsize = qstr->len; rd->type = DT_REG; rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); rd->name_crc = cpu_to_je32(crc32(0, qstr->name, qstr->len)); fd = jffs2_write_dirent(c, dir_f, rd, qstr->name, qstr->len, ALLOC_NORMAL); jffs2_free_raw_dirent(rd); if (IS_ERR(fd)) { /* dirent failed to write. Delete the inode normally as if it were the final unlink() */ jffs2_complete_reservation(c); mutex_unlock(&dir_f->sem); return PTR_ERR(fd); } /* Link the fd into the inode's list, obsoleting an old one if necessary. */ jffs2_add_fd_to_list(c, fd, &dir_f->dents); jffs2_complete_reservation(c); mutex_unlock(&dir_f->sem); return 0; } int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, const char *name, int namelen, struct jffs2_inode_info *dead_f, uint32_t time) { struct jffs2_raw_dirent *rd; struct jffs2_full_dirent *fd; uint32_t alloclen; int ret; if (!jffs2_can_mark_obsolete(c)) { /* We can't mark stuff obsolete on the medium. We need to write a deletion dirent */ rd = jffs2_alloc_raw_dirent(); if (!rd) return -ENOMEM; ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, ALLOC_DELETION, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); if (ret) { jffs2_free_raw_dirent(rd); return ret; } mutex_lock(&dir_f->sem); /* Build a deletion node */ rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT); rd->totlen = cpu_to_je32(sizeof(*rd) + namelen); rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)); rd->pino = cpu_to_je32(dir_f->inocache->ino); rd->version = cpu_to_je32(++dir_f->highest_version); rd->ino = cpu_to_je32(0); rd->mctime = cpu_to_je32(time); rd->nsize = namelen; rd->type = DT_UNKNOWN; rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, ALLOC_DELETION); jffs2_free_raw_dirent(rd); if (IS_ERR(fd)) { jffs2_complete_reservation(c); mutex_unlock(&dir_f->sem); return PTR_ERR(fd); } /* File it. This will mark the old one obsolete. */ jffs2_add_fd_to_list(c, fd, &dir_f->dents); mutex_unlock(&dir_f->sem); } else { uint32_t nhash = full_name_hash(NULL, name, namelen); fd = dir_f->dents; /* We don't actually want to reserve any space, but we do want to be holding the alloc_sem when we write to flash */ mutex_lock(&c->alloc_sem); mutex_lock(&dir_f->sem); for (fd = dir_f->dents; fd; fd = fd->next) { if (fd->nhash == nhash && !memcmp(fd->name, name, namelen) && !fd->name[namelen]) { jffs2_dbg(1, "Marking old dirent node (ino #%u) @%08x obsolete\n", fd->ino, ref_offset(fd->raw)); jffs2_mark_node_obsolete(c, fd->raw); /* We don't want to remove it from the list immediately, because that screws up getdents()/seek() semantics even more than they're screwed already. Turn it into a node-less deletion dirent instead -- a placeholder */ fd->raw = NULL; fd->ino = 0; break; } } mutex_unlock(&dir_f->sem); } /* dead_f is NULL if this was a rename not a real unlink */ /* Also catch the !f->inocache case, where there was a dirent pointing to an inode which didn't exist. */ if (dead_f && dead_f->inocache) { mutex_lock(&dead_f->sem); if (S_ISDIR(OFNI_EDONI_2SFFJ(dead_f)->i_mode)) { while (dead_f->dents) { /* There can be only deleted ones */ fd = dead_f->dents; dead_f->dents = fd->next; if (fd->ino) { pr_warn("Deleting inode #%u with active dentry \"%s\"->ino #%u\n", dead_f->inocache->ino, fd->name, fd->ino); } else { jffs2_dbg(1, "Removing deletion dirent for \"%s\" from dir ino #%u\n", fd->name, dead_f->inocache->ino); } if (fd->raw) jffs2_mark_node_obsolete(c, fd->raw); jffs2_free_full_dirent(fd); } dead_f->inocache->pino_nlink = 0; } else dead_f->inocache->pino_nlink--; /* NB: Caller must set inode nlink if appropriate */ mutex_unlock(&dead_f->sem); } jffs2_complete_reservation(c); return 0; } int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen, uint32_t time) { struct jffs2_raw_dirent *rd; struct jffs2_full_dirent *fd; uint32_t alloclen; int ret; rd = jffs2_alloc_raw_dirent(); if (!rd) return -ENOMEM; ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); if (ret) { jffs2_free_raw_dirent(rd); return ret; } mutex_lock(&dir_f->sem); /* Build a deletion node */ rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT); rd->totlen = cpu_to_je32(sizeof(*rd) + namelen); rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)); rd->pino = cpu_to_je32(dir_f->inocache->ino); rd->version = cpu_to_je32(++dir_f->highest_version); rd->ino = cpu_to_je32(ino); rd->mctime = cpu_to_je32(time); rd->nsize = namelen; rd->type = type; rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, ALLOC_NORMAL); jffs2_free_raw_dirent(rd); if (IS_ERR(fd)) { jffs2_complete_reservation(c); mutex_unlock(&dir_f->sem); return PTR_ERR(fd); } /* File it. This will mark the old one obsolete. */ jffs2_add_fd_to_list(c, fd, &dir_f->dents); jffs2_complete_reservation(c); mutex_unlock(&dir_f->sem); return 0; }
linux-master
fs/jffs2/write.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * Copyright © 2004 Thomas Gleixner <[email protected]> * * Created by David Woodhouse <[email protected]> * Modified debugged and enhanced by Thomas Gleixner <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/slab.h> #include <linux/mtd/mtd.h> #include <linux/crc32.h> #include <linux/mtd/rawnand.h> #include <linux/jiffies.h> #include <linux/sched.h> #include <linux/writeback.h> #include "nodelist.h" /* For testing write failures */ #undef BREAKME #undef BREAKMEHEADER #ifdef BREAKME static unsigned char *brokenbuf; #endif #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) ) #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) ) /* max. erase failures before we mark a block bad */ #define MAX_ERASE_FAILURES 2 struct jffs2_inodirty { uint32_t ino; struct jffs2_inodirty *next; }; static struct jffs2_inodirty inodirty_nomem; static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino) { struct jffs2_inodirty *this = c->wbuf_inodes; /* If a malloc failed, consider _everything_ dirty */ if (this == &inodirty_nomem) return 1; /* If ino == 0, _any_ non-GC writes mean 'yes' */ if (this && !ino) return 1; /* Look to see if the inode in question is pending in the wbuf */ while (this) { if (this->ino == ino) return 1; this = this->next; } return 0; } static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c) { struct jffs2_inodirty *this; this = c->wbuf_inodes; if (this != &inodirty_nomem) { while (this) { struct jffs2_inodirty *next = this->next; kfree(this); this = next; } } c->wbuf_inodes = NULL; } static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino) { struct jffs2_inodirty *new; /* Schedule delayed write-buffer write-out */ jffs2_dirty_trigger(c); if (jffs2_wbuf_pending_for_ino(c, ino)) return; new = kmalloc(sizeof(*new), GFP_KERNEL); if (!new) { jffs2_dbg(1, "No memory to allocate inodirty. Fallback to all considered dirty\n"); jffs2_clear_wbuf_ino_list(c); c->wbuf_inodes = &inodirty_nomem; return; } new->ino = ino; new->next = c->wbuf_inodes; c->wbuf_inodes = new; return; } static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c) { struct list_head *this, *next; static int n; if (list_empty(&c->erasable_pending_wbuf_list)) return; list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) { struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); jffs2_dbg(1, "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset); list_del(this); if ((jiffies + (n++)) & 127) { /* Most of the time, we just erase it immediately. Otherwise we spend ages scanning it on mount, etc. */ jffs2_dbg(1, "...and adding to erase_pending_list\n"); list_add_tail(&jeb->list, &c->erase_pending_list); c->nr_erasing_blocks++; jffs2_garbage_collect_trigger(c); } else { /* Sometimes, however, we leave it elsewhere so it doesn't get immediately reused, and we spread the load a bit. */ jffs2_dbg(1, "...and adding to erasable_list\n"); list_add_tail(&jeb->list, &c->erasable_list); } } } #define REFILE_NOTEMPTY 0 #define REFILE_ANYWAY 1 static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty) { jffs2_dbg(1, "About to refile bad block at %08x\n", jeb->offset); /* File the existing block on the bad_used_list.... */ if (c->nextblock == jeb) c->nextblock = NULL; else /* Not sure this should ever happen... need more coffee */ list_del(&jeb->list); if (jeb->first_node) { jffs2_dbg(1, "Refiling block at %08x to bad_used_list\n", jeb->offset); list_add(&jeb->list, &c->bad_used_list); } else { BUG_ON(allow_empty == REFILE_NOTEMPTY); /* It has to have had some nodes or we couldn't be here */ jffs2_dbg(1, "Refiling block at %08x to erase_pending_list\n", jeb->offset); list_add(&jeb->list, &c->erase_pending_list); c->nr_erasing_blocks++; jffs2_garbage_collect_trigger(c); } if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) { uint32_t oldfree = jeb->free_size; jffs2_link_node_ref(c, jeb, (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE, oldfree, NULL); /* convert to wasted */ c->wasted_size += oldfree; jeb->wasted_size += oldfree; c->dirty_size -= oldfree; jeb->dirty_size -= oldfree; } jffs2_dbg_dump_block_lists_nolock(c); jffs2_dbg_acct_sanity_check_nolock(c,jeb); jffs2_dbg_acct_paranoia_check_nolock(c, jeb); } static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_node_ref *raw, union jffs2_node_union *node) { struct jffs2_node_frag *frag; struct jffs2_full_dirent *fd; dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n", node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype)); BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 && je16_to_cpu(node->u.magic) != 0); switch (je16_to_cpu(node->u.nodetype)) { case JFFS2_NODETYPE_INODE: if (f->metadata && f->metadata->raw == raw) { dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata); return &f->metadata->raw; } frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset)); BUG_ON(!frag); /* Find a frag which refers to the full_dnode we want to modify */ while (!frag->node || frag->node->raw != raw) { frag = frag_next(frag); BUG_ON(!frag); } dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node); return &frag->node->raw; case JFFS2_NODETYPE_DIRENT: for (fd = f->dents; fd; fd = fd->next) { if (fd->raw == raw) { dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd); return &fd->raw; } } BUG(); default: dbg_noderef("Don't care about replacing raw for nodetype %x\n", je16_to_cpu(node->u.nodetype)); break; } return NULL; } #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf, uint32_t ofs) { int ret; size_t retlen; char *eccstr; ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify); if (ret && ret != -EUCLEAN && ret != -EBADMSG) { pr_warn("%s(): Read back of page at %08x failed: %d\n", __func__, c->wbuf_ofs, ret); return ret; } else if (retlen != c->wbuf_pagesize) { pr_warn("%s(): Read back of page at %08x gave short read: %zd not %d\n", __func__, ofs, retlen, c->wbuf_pagesize); return -EIO; } if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize)) return 0; if (ret == -EUCLEAN) eccstr = "corrected"; else if (ret == -EBADMSG) eccstr = "correction failed"; else eccstr = "OK or unused"; pr_warn("Write verify error (ECC %s) at %08x. Wrote:\n", eccstr, c->wbuf_ofs); print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, c->wbuf, c->wbuf_pagesize, 0); pr_warn("Read back:\n"); print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, c->wbuf_verify, c->wbuf_pagesize, 0); return -EIO; } #else #define jffs2_verify_write(c,b,o) (0) #endif /* Recover from failure to write wbuf. Recover the nodes up to the * wbuf, not the one which we were starting to try to write. */ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) { struct jffs2_eraseblock *jeb, *new_jeb; struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL; size_t retlen; int ret; int nr_refile = 0; unsigned char *buf; uint32_t start, end, ofs, len; jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; spin_lock(&c->erase_completion_lock); if (c->wbuf_ofs % c->mtd->erasesize) jffs2_block_refile(c, jeb, REFILE_NOTEMPTY); else jffs2_block_refile(c, jeb, REFILE_ANYWAY); spin_unlock(&c->erase_completion_lock); BUG_ON(!ref_obsolete(jeb->last_node)); /* Find the first node to be recovered, by skipping over every node which ends before the wbuf starts, or which is obsolete. */ for (next = raw = jeb->first_node; next; raw = next) { next = ref_next(raw); if (ref_obsolete(raw) || (next && ref_offset(next) <= c->wbuf_ofs)) { dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n", ref_offset(raw), ref_flags(raw), (ref_offset(raw) + ref_totlen(c, jeb, raw)), c->wbuf_ofs); continue; } dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n", ref_offset(raw), ref_flags(raw), (ref_offset(raw) + ref_totlen(c, jeb, raw))); first_raw = raw; break; } if (!first_raw) { /* All nodes were obsolete. Nothing to recover. */ jffs2_dbg(1, "No non-obsolete nodes to be recovered. Just filing block bad\n"); c->wbuf_len = 0; return; } start = ref_offset(first_raw); end = ref_offset(jeb->last_node); nr_refile = 1; /* Count the number of refs which need to be copied */ while ((raw = ref_next(raw)) != jeb->last_node) nr_refile++; dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n", start, end, end - start, nr_refile); buf = NULL; if (start < c->wbuf_ofs) { /* First affected node was already partially written. * Attempt to reread the old data into our buffer. */ buf = kmalloc(end - start, GFP_KERNEL); if (!buf) { pr_crit("Malloc failure in wbuf recovery. Data loss ensues.\n"); goto read_failed; } /* Do the read... */ ret = mtd_read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf); /* ECC recovered ? */ if ((ret == -EUCLEAN || ret == -EBADMSG) && (retlen == c->wbuf_ofs - start)) ret = 0; if (ret || retlen != c->wbuf_ofs - start) { pr_crit("Old data are already lost in wbuf recovery. Data loss ensues.\n"); kfree(buf); buf = NULL; read_failed: first_raw = ref_next(first_raw); nr_refile--; while (first_raw && ref_obsolete(first_raw)) { first_raw = ref_next(first_raw); nr_refile--; } /* If this was the only node to be recovered, give up */ if (!first_raw) { c->wbuf_len = 0; return; } /* It wasn't. Go on and try to recover nodes complete in the wbuf */ start = ref_offset(first_raw); dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n", start, end, end - start, nr_refile); } else { /* Read succeeded. Copy the remaining data from the wbuf */ memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs); } } /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards. Either 'buf' contains the data, or we find it in the wbuf */ /* ... and get an allocation of space from a shiny new block instead */ ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE); if (ret) { pr_warn("Failed to allocate space for wbuf recovery. Data loss ensues.\n"); kfree(buf); return; } /* The summary is not recovered, so it must be disabled for this erase block */ jffs2_sum_disable_collecting(c->summary); ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile); if (ret) { pr_warn("Failed to allocate node refs for wbuf recovery. Data loss ensues.\n"); kfree(buf); return; } ofs = write_ofs(c); if (end-start >= c->wbuf_pagesize) { /* Need to do another write immediately, but it's possible that this is just because the wbuf itself is completely full, and there's nothing earlier read back from the flash. Hence 'buf' isn't necessarily what we're writing from. */ unsigned char *rewrite_buf = buf?:c->wbuf; uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize); jffs2_dbg(1, "Write 0x%x bytes at 0x%08x in wbuf recover\n", towrite, ofs); #ifdef BREAKMEHEADER static int breakme; if (breakme++ == 20) { pr_notice("Faking write error at 0x%08x\n", ofs); breakme = 0; mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf); ret = -EIO; } else #endif ret = mtd_write(c->mtd, ofs, towrite, &retlen, rewrite_buf); if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) { /* Argh. We tried. Really we did. */ pr_crit("Recovery of wbuf failed due to a second write error\n"); kfree(buf); if (retlen) jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL); return; } pr_notice("Recovery of wbuf succeeded to %08x\n", ofs); c->wbuf_len = (end - start) - towrite; c->wbuf_ofs = ofs + towrite; memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len); /* Don't muck about with c->wbuf_inodes. False positives are harmless. */ } else { /* OK, now we're left with the dregs in whichever buffer we're using */ if (buf) { memcpy(c->wbuf, buf, end-start); } else { memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start); } c->wbuf_ofs = ofs; c->wbuf_len = end - start; } /* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */ new_jeb = &c->blocks[ofs / c->sector_size]; spin_lock(&c->erase_completion_lock); for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) { uint32_t rawlen = ref_totlen(c, jeb, raw); struct jffs2_inode_cache *ic; struct jffs2_raw_node_ref *new_ref; struct jffs2_raw_node_ref **adjust_ref = NULL; struct jffs2_inode_info *f = NULL; jffs2_dbg(1, "Refiling block of %08x at %08x(%d) to %08x\n", rawlen, ref_offset(raw), ref_flags(raw), ofs); ic = jffs2_raw_ref_to_ic(raw); /* Ick. This XATTR mess should be fixed shortly... */ if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) { struct jffs2_xattr_datum *xd = (void *)ic; BUG_ON(xd->node != raw); adjust_ref = &xd->node; raw->next_in_ino = NULL; ic = NULL; } else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) { struct jffs2_xattr_datum *xr = (void *)ic; BUG_ON(xr->node != raw); adjust_ref = &xr->node; raw->next_in_ino = NULL; ic = NULL; } else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) { struct jffs2_raw_node_ref **p = &ic->nodes; /* Remove the old node from the per-inode list */ while (*p && *p != (void *)ic) { if (*p == raw) { (*p) = (raw->next_in_ino); raw->next_in_ino = NULL; break; } p = &((*p)->next_in_ino); } if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) { /* If it's an in-core inode, then we have to adjust any full_dirent or full_dnode structure to point to the new version instead of the old */ f = jffs2_gc_fetch_inode(c, ic->ino, !ic->pino_nlink); if (IS_ERR(f)) { /* Should never happen; it _must_ be present */ JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n", ic->ino, PTR_ERR(f)); BUG(); } /* We don't lock f->sem. There's a number of ways we could end up in here with it already being locked, and nobody's going to modify it on us anyway because we hold the alloc_sem. We're only changing one ->raw pointer too, which we can get away with without upsetting readers. */ adjust_ref = jffs2_incore_replace_raw(c, f, raw, (void *)(buf?:c->wbuf) + (ref_offset(raw) - start)); } else if (unlikely(ic->state != INO_STATE_PRESENT && ic->state != INO_STATE_CHECKEDABSENT && ic->state != INO_STATE_GC)) { JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state); BUG(); } } new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic); if (adjust_ref) { BUG_ON(*adjust_ref != raw); *adjust_ref = new_ref; } if (f) jffs2_gc_release_inode(c, f); if (!ref_obsolete(raw)) { jeb->dirty_size += rawlen; jeb->used_size -= rawlen; c->dirty_size += rawlen; c->used_size -= rawlen; raw->flash_offset = ref_offset(raw) | REF_OBSOLETE; BUG_ON(raw->next_in_ino); } ofs += rawlen; } kfree(buf); /* Fix up the original jeb now it's on the bad_list */ if (first_raw == jeb->first_node) { jffs2_dbg(1, "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset); list_move(&jeb->list, &c->erase_pending_list); c->nr_erasing_blocks++; jffs2_garbage_collect_trigger(c); } jffs2_dbg_acct_sanity_check_nolock(c, jeb); jffs2_dbg_acct_paranoia_check_nolock(c, jeb); jffs2_dbg_acct_sanity_check_nolock(c, new_jeb); jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb); spin_unlock(&c->erase_completion_lock); jffs2_dbg(1, "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", c->wbuf_ofs, c->wbuf_len); } /* Meaning of pad argument: 0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway. 1: Pad, do not adjust nextblock free_size 2: Pad, adjust nextblock free_size */ #define NOPAD 0 #define PAD_NOACCOUNT 1 #define PAD_ACCOUNTING 2 static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) { struct jffs2_eraseblock *wbuf_jeb; int ret; size_t retlen; /* Nothing to do if not write-buffering the flash. In particular, we shouldn't del_timer() the timer we never initialised. */ if (!jffs2_is_writebuffered(c)) return 0; if (!mutex_is_locked(&c->alloc_sem)) { pr_crit("jffs2_flush_wbuf() called with alloc_sem not locked!\n"); BUG(); } if (!c->wbuf_len) /* already checked c->wbuf above */ return 0; wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1)) return -ENOMEM; /* claim remaining space on the page this happens, if we have a change to a new block, or if fsync forces us to flush the writebuffer. if we have a switch to next page, we will not have enough remaining space for this. */ if (pad ) { c->wbuf_len = PAD(c->wbuf_len); /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR with 8 byte page size */ memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len); if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) { struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len); padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING); padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len); padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4)); } } /* else jffs2_flash_writev has actually filled in the rest of the buffer for us, and will deal with the node refs etc. later. */ #ifdef BREAKME static int breakme; if (breakme++ == 20) { pr_notice("Faking write error at 0x%08x\n", c->wbuf_ofs); breakme = 0; mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, brokenbuf); ret = -EIO; } else #endif ret = mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf); if (ret) { pr_warn("jffs2_flush_wbuf(): Write failed with %d\n", ret); goto wfail; } else if (retlen != c->wbuf_pagesize) { pr_warn("jffs2_flush_wbuf(): Write was short: %zd instead of %d\n", retlen, c->wbuf_pagesize); ret = -EIO; goto wfail; } else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) { wfail: jffs2_wbuf_recover(c); return ret; } /* Adjust free size of the block if we padded. */ if (pad) { uint32_t waste = c->wbuf_pagesize - c->wbuf_len; jffs2_dbg(1, "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n", (wbuf_jeb == c->nextblock) ? "next" : "", wbuf_jeb->offset); /* wbuf_pagesize - wbuf_len is the amount of space that's to be padded. If there is less free space in the block than that, something screwed up */ if (wbuf_jeb->free_size < waste) { pr_crit("jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n", c->wbuf_ofs, c->wbuf_len, waste); pr_crit("jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n", wbuf_jeb->offset, wbuf_jeb->free_size); BUG(); } spin_lock(&c->erase_completion_lock); jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL); /* FIXME: that made it count as dirty. Convert to wasted */ wbuf_jeb->dirty_size -= waste; c->dirty_size -= waste; wbuf_jeb->wasted_size += waste; c->wasted_size += waste; } else spin_lock(&c->erase_completion_lock); /* Stick any now-obsoleted blocks on the erase_pending_list */ jffs2_refile_wbuf_blocks(c); jffs2_clear_wbuf_ino_list(c); spin_unlock(&c->erase_completion_lock); memset(c->wbuf,0xff,c->wbuf_pagesize); /* adjust write buffer offset, else we get a non contiguous write bug */ c->wbuf_ofs += c->wbuf_pagesize; c->wbuf_len = 0; return 0; } /* Trigger garbage collection to flush the write-buffer. If ino arg is zero, do it if _any_ real (i.e. not GC) writes are outstanding. If ino arg non-zero, do it only if a write for the given inode is outstanding. */ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) { uint32_t old_wbuf_ofs; uint32_t old_wbuf_len; int ret = 0; jffs2_dbg(1, "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino); if (!c->wbuf) return 0; mutex_lock(&c->alloc_sem); if (!jffs2_wbuf_pending_for_ino(c, ino)) { jffs2_dbg(1, "Ino #%d not pending in wbuf. Returning\n", ino); mutex_unlock(&c->alloc_sem); return 0; } old_wbuf_ofs = c->wbuf_ofs; old_wbuf_len = c->wbuf_len; if (c->unchecked_size) { /* GC won't make any progress for a while */ jffs2_dbg(1, "%s(): padding. Not finished checking\n", __func__); down_write(&c->wbuf_sem); ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); /* retry flushing wbuf in case jffs2_wbuf_recover left some data in the wbuf */ if (ret) ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); up_write(&c->wbuf_sem); } else while (old_wbuf_len && old_wbuf_ofs == c->wbuf_ofs) { mutex_unlock(&c->alloc_sem); jffs2_dbg(1, "%s(): calls gc pass\n", __func__); ret = jffs2_garbage_collect_pass(c); if (ret) { /* GC failed. Flush it with padding instead */ mutex_lock(&c->alloc_sem); down_write(&c->wbuf_sem); ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); /* retry flushing wbuf in case jffs2_wbuf_recover left some data in the wbuf */ if (ret) ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); up_write(&c->wbuf_sem); break; } mutex_lock(&c->alloc_sem); } jffs2_dbg(1, "%s(): ends...\n", __func__); mutex_unlock(&c->alloc_sem); return ret; } /* Pad write-buffer to end and write it, wasting space. */ int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c) { int ret; if (!c->wbuf) return 0; down_write(&c->wbuf_sem); ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); /* retry - maybe wbuf recover left some data in wbuf. */ if (ret) ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); up_write(&c->wbuf_sem); return ret; } static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf, size_t len) { if (len && !c->wbuf_len && (len >= c->wbuf_pagesize)) return 0; if (len > (c->wbuf_pagesize - c->wbuf_len)) len = c->wbuf_pagesize - c->wbuf_len; memcpy(c->wbuf + c->wbuf_len, buf, len); c->wbuf_len += (uint32_t) len; return len; } int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino) { struct jffs2_eraseblock *jeb; size_t wbuf_retlen, donelen = 0; uint32_t outvec_to = to; int ret, invec; /* If not writebuffered flash, don't bother */ if (!jffs2_is_writebuffered(c)) return jffs2_flash_direct_writev(c, invecs, count, to, retlen); down_write(&c->wbuf_sem); /* If wbuf_ofs is not initialized, set it to target address */ if (c->wbuf_ofs == 0xFFFFFFFF) { c->wbuf_ofs = PAGE_DIV(to); c->wbuf_len = PAGE_MOD(to); memset(c->wbuf,0xff,c->wbuf_pagesize); } /* * Sanity checks on target address. It's permitted to write * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to * write at the beginning of a new erase block. Anything else, * and you die. New block starts at xxx000c (0-b = block * header) */ if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) { /* It's a write to a new block */ if (c->wbuf_len) { jffs2_dbg(1, "%s(): to 0x%lx causes flush of wbuf at 0x%08x\n", __func__, (unsigned long)to, c->wbuf_ofs); ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); if (ret) goto outerr; } /* set pointer to new block */ c->wbuf_ofs = PAGE_DIV(to); c->wbuf_len = PAGE_MOD(to); } if (to != PAD(c->wbuf_ofs + c->wbuf_len)) { /* We're not writing immediately after the writebuffer. Bad. */ pr_crit("%s(): Non-contiguous write to %08lx\n", __func__, (unsigned long)to); if (c->wbuf_len) pr_crit("wbuf was previously %08x-%08x\n", c->wbuf_ofs, c->wbuf_ofs + c->wbuf_len); BUG(); } /* adjust alignment offset */ if (c->wbuf_len != PAGE_MOD(to)) { c->wbuf_len = PAGE_MOD(to); /* take care of alignment to next page */ if (!c->wbuf_len) { c->wbuf_len = c->wbuf_pagesize; ret = __jffs2_flush_wbuf(c, NOPAD); if (ret) goto outerr; } } for (invec = 0; invec < count; invec++) { int vlen = invecs[invec].iov_len; uint8_t *v = invecs[invec].iov_base; wbuf_retlen = jffs2_fill_wbuf(c, v, vlen); if (c->wbuf_len == c->wbuf_pagesize) { ret = __jffs2_flush_wbuf(c, NOPAD); if (ret) goto outerr; } vlen -= wbuf_retlen; outvec_to += wbuf_retlen; donelen += wbuf_retlen; v += wbuf_retlen; if (vlen >= c->wbuf_pagesize) { ret = mtd_write(c->mtd, outvec_to, PAGE_DIV(vlen), &wbuf_retlen, v); if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen)) goto outfile; vlen -= wbuf_retlen; outvec_to += wbuf_retlen; c->wbuf_ofs = outvec_to; donelen += wbuf_retlen; v += wbuf_retlen; } wbuf_retlen = jffs2_fill_wbuf(c, v, vlen); if (c->wbuf_len == c->wbuf_pagesize) { ret = __jffs2_flush_wbuf(c, NOPAD); if (ret) goto outerr; } outvec_to += wbuf_retlen; donelen += wbuf_retlen; } /* * If there's a remainder in the wbuf and it's a non-GC write, * remember that the wbuf affects this ino */ *retlen = donelen; if (jffs2_sum_active()) { int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to); if (res) return res; } if (c->wbuf_len && ino) jffs2_wbuf_dirties_inode(c, ino); ret = 0; up_write(&c->wbuf_sem); return ret; outfile: /* * At this point we have no problem, c->wbuf is empty. However * refile nextblock to avoid writing again to same address. */ spin_lock(&c->erase_completion_lock); jeb = &c->blocks[outvec_to / c->sector_size]; jffs2_block_refile(c, jeb, REFILE_ANYWAY); spin_unlock(&c->erase_completion_lock); outerr: *retlen = 0; up_write(&c->wbuf_sem); return ret; } /* * This is the entry for flash write. * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev */ int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char *buf) { struct kvec vecs[1]; if (!jffs2_is_writebuffered(c)) return jffs2_flash_direct_write(c, ofs, len, retlen, buf); vecs[0].iov_base = (unsigned char *) buf; vecs[0].iov_len = len; return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0); } /* Handle readback from writebuffer and ECC failure return */ int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf) { loff_t orbf = 0, owbf = 0, lwbf = 0; int ret; if (!jffs2_is_writebuffered(c)) return mtd_read(c->mtd, ofs, len, retlen, buf); /* Read flash */ down_read(&c->wbuf_sem); ret = mtd_read(c->mtd, ofs, len, retlen, buf); if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) { if (ret == -EBADMSG) pr_warn("mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n", len, ofs); /* * We have the raw data without ECC correction in the buffer, * maybe we are lucky and all data or parts are correct. We * check the node. If data are corrupted node check will sort * it out. We keep this block, it will fail on write or erase * and the we mark it bad. Or should we do that now? But we * should give him a chance. Maybe we had a system crash or * power loss before the ecc write or a erase was completed. * So we return success. :) */ ret = 0; } /* if no writebuffer available or write buffer empty, return */ if (!c->wbuf_pagesize || !c->wbuf_len) goto exit; /* if we read in a different block, return */ if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs)) goto exit; if (ofs >= c->wbuf_ofs) { owbf = (ofs - c->wbuf_ofs); /* offset in write buffer */ if (owbf > c->wbuf_len) /* is read beyond write buffer ? */ goto exit; lwbf = c->wbuf_len - owbf; /* number of bytes to copy */ if (lwbf > len) lwbf = len; } else { orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */ if (orbf > len) /* is write beyond write buffer ? */ goto exit; lwbf = len - orbf; /* number of bytes to copy */ if (lwbf > c->wbuf_len) lwbf = c->wbuf_len; } if (lwbf > 0) memcpy(buf+orbf,c->wbuf+owbf,lwbf); exit: up_read(&c->wbuf_sem); return ret; } #define NR_OOB_SCAN_PAGES 4 /* For historical reasons we use only 8 bytes for OOB clean marker */ #define OOB_CM_SIZE 8 static const struct jffs2_unknown_node oob_cleanmarker = { .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK), .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER), .totlen = constant_cpu_to_je32(8) }; /* * Check, if the out of band area is empty. This function knows about the clean * marker and if it is present in OOB, treats the OOB as empty anyway. */ int jffs2_check_oob_empty(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int mode) { int i, ret; int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); struct mtd_oob_ops ops = { }; ops.mode = MTD_OPS_AUTO_OOB; ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail; ops.oobbuf = c->oobbuf; ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; ops.datbuf = NULL; ret = mtd_read_oob(c->mtd, jeb->offset, &ops); if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) { pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n", jeb->offset, ops.ooblen, ops.oobretlen, ret); if (!ret || mtd_is_bitflip(ret)) ret = -EIO; return ret; } for(i = 0; i < ops.ooblen; i++) { if (mode && i < cmlen) /* Yeah, we know about the cleanmarker */ continue; if (ops.oobbuf[i] != 0xFF) { jffs2_dbg(2, "Found %02x at %x in OOB for " "%08x\n", ops.oobbuf[i], i, jeb->offset); return 1; } } return 0; } /* * Check for a valid cleanmarker. * Returns: 0 if a valid cleanmarker was found * 1 if no cleanmarker was found * negative error code if an error occurred */ int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { struct mtd_oob_ops ops = { }; int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); ops.mode = MTD_OPS_AUTO_OOB; ops.ooblen = cmlen; ops.oobbuf = c->oobbuf; ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; ops.datbuf = NULL; ret = mtd_read_oob(c->mtd, jeb->offset, &ops); if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) { pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n", jeb->offset, ops.ooblen, ops.oobretlen, ret); if (!ret || mtd_is_bitflip(ret)) ret = -EIO; return ret; } return !!memcmp(&oob_cleanmarker, c->oobbuf, cmlen); } int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { int ret; struct mtd_oob_ops ops = { }; int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); ops.mode = MTD_OPS_AUTO_OOB; ops.ooblen = cmlen; ops.oobbuf = (uint8_t *)&oob_cleanmarker; ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; ops.datbuf = NULL; ret = mtd_write_oob(c->mtd, jeb->offset, &ops); if (ret || ops.oobretlen != ops.ooblen) { pr_err("cannot write OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n", jeb->offset, ops.ooblen, ops.oobretlen, ret); if (!ret) ret = -EIO; return ret; } return 0; } /* * On NAND we try to mark this block bad. If the block was erased more * than MAX_ERASE_FAILURES we mark it finally bad. * Don't care about failures. This block remains on the erase-pending * or badblock list as long as nobody manipulates the flash with * a bootloader or something like that. */ int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset) { int ret; /* if the count is < max, we try to write the counter to the 2nd page oob area */ if( ++jeb->bad_count < MAX_ERASE_FAILURES) return 0; pr_warn("marking eraseblock at %08x as bad\n", bad_offset); ret = mtd_block_markbad(c->mtd, bad_offset); if (ret) { jffs2_dbg(1, "%s(): Write failed for block at %08x: error %d\n", __func__, jeb->offset, ret); return ret; } return 1; } static struct jffs2_sb_info *work_to_sb(struct work_struct *work) { struct delayed_work *dwork; dwork = to_delayed_work(work); return container_of(dwork, struct jffs2_sb_info, wbuf_dwork); } static void delayed_wbuf_sync(struct work_struct *work) { struct jffs2_sb_info *c = work_to_sb(work); struct super_block *sb = OFNI_BS_2SFFJ(c); if (!sb_rdonly(sb)) { jffs2_dbg(1, "%s()\n", __func__); jffs2_flush_wbuf_gc(c, 0); } } void jffs2_dirty_trigger(struct jffs2_sb_info *c) { struct super_block *sb = OFNI_BS_2SFFJ(c); unsigned long delay; if (sb_rdonly(sb)) return; delay = msecs_to_jiffies(dirty_writeback_interval * 10); if (queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay)) jffs2_dbg(1, "%s()\n", __func__); } int jffs2_nand_flash_setup(struct jffs2_sb_info *c) { if (!c->mtd->oobsize) return 0; /* Cleanmarker is out-of-band, so inline size zero */ c->cleanmarker_size = 0; if (c->mtd->oobavail == 0) { pr_err("inconsistent device description\n"); return -EINVAL; } jffs2_dbg(1, "using OOB on NAND\n"); c->oobavail = c->mtd->oobavail; /* Initialise write buffer */ init_rwsem(&c->wbuf_sem); INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); c->wbuf_pagesize = c->mtd->writesize; c->wbuf_ofs = 0xFFFFFFFF; c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); if (!c->wbuf) return -ENOMEM; c->oobbuf = kmalloc_array(NR_OOB_SCAN_PAGES, c->oobavail, GFP_KERNEL); if (!c->oobbuf) { kfree(c->wbuf); return -ENOMEM; } #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL); if (!c->wbuf_verify) { kfree(c->oobbuf); kfree(c->wbuf); return -ENOMEM; } #endif return 0; } void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c) { #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY kfree(c->wbuf_verify); #endif kfree(c->wbuf); kfree(c->oobbuf); } int jffs2_dataflash_setup(struct jffs2_sb_info *c) { c->cleanmarker_size = 0; /* No cleanmarkers needed */ /* Initialize write buffer */ init_rwsem(&c->wbuf_sem); INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); c->wbuf_pagesize = c->mtd->erasesize; /* Find a suitable c->sector_size * - Not too much sectors * - Sectors have to be at least 4 K + some bytes * - All known dataflashes have erase sizes of 528 or 1056 * - we take at least 8 eraseblocks and want to have at least 8K size * - The concatenation should be a power of 2 */ c->sector_size = 8 * c->mtd->erasesize; while (c->sector_size < 8192) { c->sector_size *= 2; } /* It may be necessary to adjust the flash size */ c->flash_size = c->mtd->size; if ((c->flash_size % c->sector_size) != 0) { c->flash_size = (c->flash_size / c->sector_size) * c->sector_size; pr_warn("flash size adjusted to %dKiB\n", c->flash_size); } c->wbuf_ofs = 0xFFFFFFFF; c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); if (!c->wbuf) return -ENOMEM; #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL); if (!c->wbuf_verify) { kfree(c->wbuf); return -ENOMEM; } #endif pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size); return 0; } void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) { #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY kfree(c->wbuf_verify); #endif kfree(c->wbuf); } int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) { /* Cleanmarker currently occupies whole programming regions, * either one or 2 for 8Byte STMicro flashes. */ c->cleanmarker_size = max(16u, c->mtd->writesize); /* Initialize write buffer */ init_rwsem(&c->wbuf_sem); INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); c->wbuf_pagesize = c->mtd->writesize; c->wbuf_ofs = 0xFFFFFFFF; c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); if (!c->wbuf) return -ENOMEM; #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL); if (!c->wbuf_verify) { kfree(c->wbuf); return -ENOMEM; } #endif return 0; } void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) { #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY kfree(c->wbuf_verify); #endif kfree(c->wbuf); } int jffs2_ubivol_setup(struct jffs2_sb_info *c) { c->cleanmarker_size = 0; if (c->mtd->writesize == 1) /* We do not need write-buffer */ return 0; init_rwsem(&c->wbuf_sem); INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); c->wbuf_pagesize = c->mtd->writesize; c->wbuf_ofs = 0xFFFFFFFF; c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); if (!c->wbuf) return -ENOMEM; pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size); return 0; } void jffs2_ubivol_cleanup(struct jffs2_sb_info *c) { kfree(c->wbuf); }
linux-master
fs/jffs2/wbuf.c
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * Copyright © 2004-2010 David Woodhouse <[email protected]> * * Created by David Woodhouse <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/mtd/mtd.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/crc32.h> #include <linux/compiler.h> #include <linux/stat.h> #include "nodelist.h" #include "compr.h" static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, struct jffs2_raw_node_ref *raw); static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_inode_info *f, struct jffs2_full_dnode *fd); static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_inode_info *f, struct jffs2_full_dirent *fd); static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_inode_info *f, struct jffs2_full_dirent *fd); static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn, uint32_t start, uint32_t end); static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn, uint32_t start, uint32_t end); static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_raw_node_ref *raw, struct jffs2_inode_info *f); /* Called with erase_completion_lock held */ static struct jffs2_eraseblock *jffs2_find_gc_block(struct jffs2_sb_info *c) { struct jffs2_eraseblock *ret; struct list_head *nextlist = NULL; int n = jiffies % 128; /* Pick an eraseblock to garbage collect next. This is where we'll put the clever wear-levelling algorithms. Eventually. */ /* We possibly want to favour the dirtier blocks more when the number of free blocks is low. */ again: if (!list_empty(&c->bad_used_list) && c->nr_free_blocks > c->resv_blocks_gcbad) { jffs2_dbg(1, "Picking block from bad_used_list to GC next\n"); nextlist = &c->bad_used_list; } else if (n < 50 && !list_empty(&c->erasable_list)) { /* Note that most of them will have gone directly to be erased. So don't favour the erasable_list _too_ much. */ jffs2_dbg(1, "Picking block from erasable_list to GC next\n"); nextlist = &c->erasable_list; } else if (n < 110 && !list_empty(&c->very_dirty_list)) { /* Most of the time, pick one off the very_dirty list */ jffs2_dbg(1, "Picking block from very_dirty_list to GC next\n"); nextlist = &c->very_dirty_list; } else if (n < 126 && !list_empty(&c->dirty_list)) { jffs2_dbg(1, "Picking block from dirty_list to GC next\n"); nextlist = &c->dirty_list; } else if (!list_empty(&c->clean_list)) { jffs2_dbg(1, "Picking block from clean_list to GC next\n"); nextlist = &c->clean_list; } else if (!list_empty(&c->dirty_list)) { jffs2_dbg(1, "Picking block from dirty_list to GC next (clean_list was empty)\n"); nextlist = &c->dirty_list; } else if (!list_empty(&c->very_dirty_list)) { jffs2_dbg(1, "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n"); nextlist = &c->very_dirty_list; } else if (!list_empty(&c->erasable_list)) { jffs2_dbg(1, "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n"); nextlist = &c->erasable_list; } else if (!list_empty(&c->erasable_pending_wbuf_list)) { /* There are blocks are wating for the wbuf sync */ jffs2_dbg(1, "Synching wbuf in order to reuse erasable_pending_wbuf_list blocks\n"); spin_unlock(&c->erase_completion_lock); jffs2_flush_wbuf_pad(c); spin_lock(&c->erase_completion_lock); goto again; } else { /* Eep. All were empty */ jffs2_dbg(1, "No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n"); return NULL; } ret = list_entry(nextlist->next, struct jffs2_eraseblock, list); list_del(&ret->list); c->gcblock = ret; ret->gc_node = ret->first_node; if (!ret->gc_node) { pr_warn("Eep. ret->gc_node for block at 0x%08x is NULL\n", ret->offset); BUG(); } /* Have we accidentally picked a clean block with wasted space ? */ if (ret->wasted_size) { jffs2_dbg(1, "Converting wasted_size %08x to dirty_size\n", ret->wasted_size); ret->dirty_size += ret->wasted_size; c->wasted_size -= ret->wasted_size; c->dirty_size += ret->wasted_size; ret->wasted_size = 0; } return ret; } /* jffs2_garbage_collect_pass * Make a single attempt to progress GC. Move one node, and possibly * start erasing one eraseblock. */ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) { struct jffs2_inode_info *f; struct jffs2_inode_cache *ic; struct jffs2_eraseblock *jeb; struct jffs2_raw_node_ref *raw; uint32_t gcblock_dirty; int ret = 0, inum, nlink; int xattr = 0; if (mutex_lock_interruptible(&c->alloc_sem)) return -EINTR; for (;;) { /* We can't start doing GC until we've finished checking the node CRCs etc. */ int bucket, want_ino; spin_lock(&c->erase_completion_lock); if (!c->unchecked_size) break; spin_unlock(&c->erase_completion_lock); if (!xattr) xattr = jffs2_verify_xattr(c); spin_lock(&c->inocache_lock); /* Instead of doing the inodes in numeric order, doing a lookup * in the hash for each possible number, just walk the hash * buckets of *existing* inodes. This means that we process * them out-of-order, but it can be a lot faster if there's * a sparse inode# space. Which there often is. */ want_ino = c->check_ino; for (bucket = c->check_ino % c->inocache_hashsize ; bucket < c->inocache_hashsize; bucket++) { for (ic = c->inocache_list[bucket]; ic; ic = ic->next) { if (ic->ino < want_ino) continue; if (ic->state != INO_STATE_CHECKEDABSENT && ic->state != INO_STATE_PRESENT) goto got_next; /* with inocache_lock held */ jffs2_dbg(1, "Skipping ino #%u already checked\n", ic->ino); } want_ino = 0; } /* Point c->check_ino past the end of the last bucket. */ c->check_ino = ((c->highest_ino + c->inocache_hashsize + 1) & ~c->inocache_hashsize) - 1; spin_unlock(&c->inocache_lock); pr_crit("Checked all inodes but still 0x%x bytes of unchecked space?\n", c->unchecked_size); jffs2_dbg_dump_block_lists_nolock(c); mutex_unlock(&c->alloc_sem); return -ENOSPC; got_next: /* For next time round the loop, we want c->checked_ino to indicate * the *next* one we want to check. And since we're walking the * buckets rather than doing it sequentially, it's: */ c->check_ino = ic->ino + c->inocache_hashsize; if (!ic->pino_nlink) { jffs2_dbg(1, "Skipping check of ino #%d with nlink/pino zero\n", ic->ino); spin_unlock(&c->inocache_lock); jffs2_xattr_delete_inode(c, ic); continue; } switch(ic->state) { case INO_STATE_CHECKEDABSENT: case INO_STATE_PRESENT: spin_unlock(&c->inocache_lock); continue; case INO_STATE_GC: case INO_STATE_CHECKING: pr_warn("Inode #%u is in state %d during CRC check phase!\n", ic->ino, ic->state); spin_unlock(&c->inocache_lock); BUG(); case INO_STATE_READING: /* We need to wait for it to finish, lest we move on and trigger the BUG() above while we haven't yet finished checking all its nodes */ jffs2_dbg(1, "Waiting for ino #%u to finish reading\n", ic->ino); /* We need to come back again for the _same_ inode. We've made no progress in this case, but that should be OK */ c->check_ino = ic->ino; mutex_unlock(&c->alloc_sem); sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); return 0; default: BUG(); case INO_STATE_UNCHECKED: ; } ic->state = INO_STATE_CHECKING; spin_unlock(&c->inocache_lock); jffs2_dbg(1, "%s(): triggering inode scan of ino#%u\n", __func__, ic->ino); ret = jffs2_do_crccheck_inode(c, ic); if (ret) pr_warn("Returned error for crccheck of ino #%u. Expect badness...\n", ic->ino); jffs2_set_inocache_state(c, ic, INO_STATE_CHECKEDABSENT); mutex_unlock(&c->alloc_sem); return ret; } /* If there are any blocks which need erasing, erase them now */ if (!list_empty(&c->erase_complete_list) || !list_empty(&c->erase_pending_list)) { spin_unlock(&c->erase_completion_lock); mutex_unlock(&c->alloc_sem); jffs2_dbg(1, "%s(): erasing pending blocks\n", __func__); if (jffs2_erase_pending_blocks(c, 1)) return 0; jffs2_dbg(1, "No progress from erasing block; doing GC anyway\n"); mutex_lock(&c->alloc_sem); spin_lock(&c->erase_completion_lock); } /* First, work out which block we're garbage-collecting */ jeb = c->gcblock; if (!jeb) jeb = jffs2_find_gc_block(c); if (!jeb) { /* Couldn't find a free block. But maybe we can just erase one and make 'progress'? */ if (c->nr_erasing_blocks) { spin_unlock(&c->erase_completion_lock); mutex_unlock(&c->alloc_sem); return -EAGAIN; } jffs2_dbg(1, "Couldn't find erase block to garbage collect!\n"); spin_unlock(&c->erase_completion_lock); mutex_unlock(&c->alloc_sem); return -EIO; } jffs2_dbg(1, "GC from block %08x, used_size %08x, dirty_size %08x, free_size %08x\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->free_size); D1(if (c->nextblock) printk(KERN_DEBUG "Nextblock at %08x, used_size %08x, dirty_size %08x, wasted_size %08x, free_size %08x\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->free_size)); if (!jeb->used_size) { mutex_unlock(&c->alloc_sem); goto eraseit; } raw = jeb->gc_node; gcblock_dirty = jeb->dirty_size; while(ref_obsolete(raw)) { jffs2_dbg(1, "Node at 0x%08x is obsolete... skipping\n", ref_offset(raw)); raw = ref_next(raw); if (unlikely(!raw)) { pr_warn("eep. End of raw list while still supposedly nodes to GC\n"); pr_warn("erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n", jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size); jeb->gc_node = raw; spin_unlock(&c->erase_completion_lock); mutex_unlock(&c->alloc_sem); BUG(); } } jeb->gc_node = raw; jffs2_dbg(1, "Going to garbage collect node at 0x%08x\n", ref_offset(raw)); if (!raw->next_in_ino) { /* Inode-less node. Clean marker, snapshot or something like that */ spin_unlock(&c->erase_completion_lock); if (ref_flags(raw) == REF_PRISTINE) { /* It's an unknown node with JFFS2_FEATURE_RWCOMPAT_COPY */ jffs2_garbage_collect_pristine(c, NULL, raw); } else { /* Just mark it obsolete */ jffs2_mark_node_obsolete(c, raw); } mutex_unlock(&c->alloc_sem); goto eraseit_lock; } ic = jffs2_raw_ref_to_ic(raw); #ifdef CONFIG_JFFS2_FS_XATTR /* When 'ic' refers xattr_datum/xattr_ref, this node is GCed as xattr. * We can decide whether this node is inode or xattr by ic->class. */ if (ic->class == RAWNODE_CLASS_XATTR_DATUM || ic->class == RAWNODE_CLASS_XATTR_REF) { spin_unlock(&c->erase_completion_lock); if (ic->class == RAWNODE_CLASS_XATTR_DATUM) { ret = jffs2_garbage_collect_xattr_datum(c, (struct jffs2_xattr_datum *)ic, raw); } else { ret = jffs2_garbage_collect_xattr_ref(c, (struct jffs2_xattr_ref *)ic, raw); } goto test_gcnode; } #endif /* We need to hold the inocache. Either the erase_completion_lock or the inocache_lock are sufficient; we trade down since the inocache_lock causes less contention. */ spin_lock(&c->inocache_lock); spin_unlock(&c->erase_completion_lock); jffs2_dbg(1, "%s(): collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n", __func__, jeb->offset, ref_offset(raw), ref_flags(raw), ic->ino); /* Three possibilities: 1. Inode is already in-core. We must iget it and do proper updating to its fragtree, etc. 2. Inode is not in-core, node is REF_PRISTINE. We lock the inocache to prevent a read_inode(), copy the node intact. 3. Inode is not in-core, node is not pristine. We must iget() and take the slow path. */ switch(ic->state) { case INO_STATE_CHECKEDABSENT: /* It's been checked, but it's not currently in-core. We can just copy any pristine nodes, but have to prevent anyone else from doing read_inode() while we're at it, so we set the state accordingly */ if (ref_flags(raw) == REF_PRISTINE) ic->state = INO_STATE_GC; else { jffs2_dbg(1, "Ino #%u is absent but node not REF_PRISTINE. Reading.\n", ic->ino); } break; case INO_STATE_PRESENT: /* It's in-core. GC must iget() it. */ break; case INO_STATE_UNCHECKED: case INO_STATE_CHECKING: case INO_STATE_GC: /* Should never happen. We should have finished checking by the time we actually start doing any GC, and since we're holding the alloc_sem, no other garbage collection can happen. */ pr_crit("Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n", ic->ino, ic->state); mutex_unlock(&c->alloc_sem); spin_unlock(&c->inocache_lock); BUG(); case INO_STATE_READING: /* Someone's currently trying to read it. We must wait for them to finish and then go through the full iget() route to do the GC. However, sometimes read_inode() needs to get the alloc_sem() (for marking nodes invalid) so we must drop the alloc_sem before sleeping. */ mutex_unlock(&c->alloc_sem); jffs2_dbg(1, "%s(): waiting for ino #%u in state %d\n", __func__, ic->ino, ic->state); sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); /* And because we dropped the alloc_sem we must start again from the beginning. Ponder chance of livelock here -- we're returning success without actually making any progress. Q: What are the chances that the inode is back in INO_STATE_READING again by the time we next enter this function? And that this happens enough times to cause a real delay? A: Small enough that I don't care :) */ return 0; } /* OK. Now if the inode is in state INO_STATE_GC, we are going to copy the node intact, and we don't have to muck about with the fragtree etc. because we know it's not in-core. If it _was_ in-core, we go through all the iget() crap anyway */ if (ic->state == INO_STATE_GC) { spin_unlock(&c->inocache_lock); ret = jffs2_garbage_collect_pristine(c, ic, raw); spin_lock(&c->inocache_lock); ic->state = INO_STATE_CHECKEDABSENT; wake_up(&c->inocache_wq); if (ret != -EBADFD) { spin_unlock(&c->inocache_lock); goto test_gcnode; } /* Fall through if it wanted us to, with inocache_lock held */ } /* Prevent the fairly unlikely race where the gcblock is entirely obsoleted by the final close of a file which had the only valid nodes in the block, followed by erasure, followed by freeing of the ic because the erased block(s) held _all_ the nodes of that inode.... never been seen but it's vaguely possible. */ inum = ic->ino; nlink = ic->pino_nlink; spin_unlock(&c->inocache_lock); f = jffs2_gc_fetch_inode(c, inum, !nlink); if (IS_ERR(f)) { ret = PTR_ERR(f); goto release_sem; } if (!f) { ret = 0; goto release_sem; } ret = jffs2_garbage_collect_live(c, jeb, raw, f); jffs2_gc_release_inode(c, f); test_gcnode: if (jeb->dirty_size == gcblock_dirty && !ref_obsolete(jeb->gc_node)) { /* Eep. This really should never happen. GC is broken */ pr_err("Error garbage collecting node at %08x!\n", ref_offset(jeb->gc_node)); ret = -ENOSPC; } release_sem: mutex_unlock(&c->alloc_sem); eraseit_lock: /* If we've finished this block, start it erasing */ spin_lock(&c->erase_completion_lock); eraseit: if (c->gcblock && !c->gcblock->used_size) { jffs2_dbg(1, "Block at 0x%08x completely obsoleted by GC. Moving to erase_pending_list\n", c->gcblock->offset); /* We're GC'ing an empty block? */ list_add_tail(&c->gcblock->list, &c->erase_pending_list); c->gcblock = NULL; c->nr_erasing_blocks++; jffs2_garbage_collect_trigger(c); } spin_unlock(&c->erase_completion_lock); return ret; } static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_raw_node_ref *raw, struct jffs2_inode_info *f) { struct jffs2_node_frag *frag; struct jffs2_full_dnode *fn = NULL; struct jffs2_full_dirent *fd; uint32_t start = 0, end = 0, nrfrags = 0; int ret = 0; mutex_lock(&f->sem); /* Now we have the lock for this inode. Check that it's still the one at the head of the list. */ spin_lock(&c->erase_completion_lock); if (c->gcblock != jeb) { spin_unlock(&c->erase_completion_lock); jffs2_dbg(1, "GC block is no longer gcblock. Restart\n"); goto upnout; } if (ref_obsolete(raw)) { spin_unlock(&c->erase_completion_lock); jffs2_dbg(1, "node to be GC'd was obsoleted in the meantime.\n"); /* They'll call again */ goto upnout; } spin_unlock(&c->erase_completion_lock); /* OK. Looks safe. And nobody can get us now because we have the semaphore. Move the block */ if (f->metadata && f->metadata->raw == raw) { fn = f->metadata; ret = jffs2_garbage_collect_metadata(c, jeb, f, fn); goto upnout; } /* FIXME. Read node and do lookup? */ for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) { if (frag->node && frag->node->raw == raw) { fn = frag->node; end = frag->ofs + frag->size; if (!nrfrags++) start = frag->ofs; if (nrfrags == frag->node->frags) break; /* We've found them all */ } } if (fn) { if (ref_flags(raw) == REF_PRISTINE) { ret = jffs2_garbage_collect_pristine(c, f->inocache, raw); if (!ret) { /* Urgh. Return it sensibly. */ frag->node->raw = f->inocache->nodes; } if (ret != -EBADFD) goto upnout; } /* We found a datanode. Do the GC */ if((start >> PAGE_SHIFT) < ((end-1) >> PAGE_SHIFT)) { /* It crosses a page boundary. Therefore, it must be a hole. */ ret = jffs2_garbage_collect_hole(c, jeb, f, fn, start, end); } else { /* It could still be a hole. But we GC the page this way anyway */ ret = jffs2_garbage_collect_dnode(c, jeb, f, fn, start, end); } goto upnout; } /* Wasn't a dnode. Try dirent */ for (fd = f->dents; fd; fd=fd->next) { if (fd->raw == raw) break; } if (fd && fd->ino) { ret = jffs2_garbage_collect_dirent(c, jeb, f, fd); } else if (fd) { ret = jffs2_garbage_collect_deletion_dirent(c, jeb, f, fd); } else { pr_warn("Raw node at 0x%08x wasn't in node lists for ino #%u\n", ref_offset(raw), f->inocache->ino); if (ref_obsolete(raw)) { pr_warn("But it's obsolete so we don't mind too much\n"); } else { jffs2_dbg_dump_node(c, ref_offset(raw)); BUG(); } } upnout: mutex_unlock(&f->sem); return ret; } static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, struct jffs2_raw_node_ref *raw) { union jffs2_node_union *node; size_t retlen; int ret; uint32_t phys_ofs, alloclen; uint32_t crc, rawlen; int retried = 0; jffs2_dbg(1, "Going to GC REF_PRISTINE node at 0x%08x\n", ref_offset(raw)); alloclen = rawlen = ref_totlen(c, c->gcblock, raw); /* Ask for a small amount of space (or the totlen if smaller) because we don't want to force wastage of the end of a block if splitting would work. */ if (ic && alloclen > sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN) alloclen = sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN; ret = jffs2_reserve_space_gc(c, alloclen, &alloclen, rawlen); /* 'rawlen' is not the exact summary size; it is only an upper estimation */ if (ret) return ret; if (alloclen < rawlen) { /* Doesn't fit untouched. We'll go the old route and split it */ return -EBADFD; } node = kmalloc(rawlen, GFP_KERNEL); if (!node) return -ENOMEM; ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)node); if (!ret && retlen != rawlen) ret = -EIO; if (ret) goto out_node; crc = crc32(0, node, sizeof(struct jffs2_unknown_node)-4); if (je32_to_cpu(node->u.hdr_crc) != crc) { pr_warn("Header CRC failed on REF_PRISTINE node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", ref_offset(raw), je32_to_cpu(node->u.hdr_crc), crc); goto bail; } switch(je16_to_cpu(node->u.nodetype)) { case JFFS2_NODETYPE_INODE: crc = crc32(0, node, sizeof(node->i)-8); if (je32_to_cpu(node->i.node_crc) != crc) { pr_warn("Node CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", ref_offset(raw), je32_to_cpu(node->i.node_crc), crc); goto bail; } if (je32_to_cpu(node->i.dsize)) { crc = crc32(0, node->i.data, je32_to_cpu(node->i.csize)); if (je32_to_cpu(node->i.data_crc) != crc) { pr_warn("Data CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", ref_offset(raw), je32_to_cpu(node->i.data_crc), crc); goto bail; } } break; case JFFS2_NODETYPE_DIRENT: crc = crc32(0, node, sizeof(node->d)-8); if (je32_to_cpu(node->d.node_crc) != crc) { pr_warn("Node CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", ref_offset(raw), je32_to_cpu(node->d.node_crc), crc); goto bail; } if (strnlen(node->d.name, node->d.nsize) != node->d.nsize) { pr_warn("Name in dirent node at 0x%08x contains zeroes\n", ref_offset(raw)); goto bail; } if (node->d.nsize) { crc = crc32(0, node->d.name, node->d.nsize); if (je32_to_cpu(node->d.name_crc) != crc) { pr_warn("Name CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", ref_offset(raw), je32_to_cpu(node->d.name_crc), crc); goto bail; } } break; default: /* If it's inode-less, we don't _know_ what it is. Just copy it intact */ if (ic) { pr_warn("Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n", ref_offset(raw), je16_to_cpu(node->u.nodetype)); goto bail; } } /* OK, all the CRCs are good; this node can just be copied as-is. */ retry: phys_ofs = write_ofs(c); ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (char *)node); if (ret || (retlen != rawlen)) { pr_notice("Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n", rawlen, phys_ofs, ret, retlen); if (retlen) { jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, rawlen, NULL); } else { pr_notice("Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", phys_ofs); } if (!retried) { /* Try to reallocate space and retry */ uint32_t dummy; struct jffs2_eraseblock *jeb = &c->blocks[phys_ofs / c->sector_size]; retried = 1; jffs2_dbg(1, "Retrying failed write of REF_PRISTINE node.\n"); jffs2_dbg_acct_sanity_check(c,jeb); jffs2_dbg_acct_paranoia_check(c, jeb); ret = jffs2_reserve_space_gc(c, rawlen, &dummy, rawlen); /* this is not the exact summary size of it, it is only an upper estimation */ if (!ret) { jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write.\n", phys_ofs); jffs2_dbg_acct_sanity_check(c,jeb); jffs2_dbg_acct_paranoia_check(c, jeb); goto retry; } jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n", ret); } if (!ret) ret = -EIO; goto out_node; } jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, rawlen, ic); jffs2_mark_node_obsolete(c, raw); jffs2_dbg(1, "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n", ref_offset(raw)); out_node: kfree(node); return ret; bail: ret = -EBADFD; goto out_node; } static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn) { struct jffs2_full_dnode *new_fn; struct jffs2_raw_inode ri; struct jffs2_node_frag *last_frag; union jffs2_device_node dev; char *mdata = NULL; int mdatalen = 0; uint32_t alloclen, ilen; int ret; if (S_ISBLK(JFFS2_F_I_MODE(f)) || S_ISCHR(JFFS2_F_I_MODE(f)) ) { /* For these, we don't actually need to read the old node */ mdatalen = jffs2_encode_dev(&dev, JFFS2_F_I_RDEV(f)); mdata = (char *)&dev; jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n", __func__, mdatalen); } else if (S_ISLNK(JFFS2_F_I_MODE(f))) { mdatalen = fn->size; mdata = kmalloc(fn->size, GFP_KERNEL); if (!mdata) { pr_warn("kmalloc of mdata failed in jffs2_garbage_collect_metadata()\n"); return -ENOMEM; } ret = jffs2_read_dnode(c, f, fn, mdata, 0, mdatalen); if (ret) { pr_warn("read of old metadata failed in jffs2_garbage_collect_metadata(): %d\n", ret); kfree(mdata); return ret; } jffs2_dbg(1, "%s(): Writing %d bites of symlink target\n", __func__, mdatalen); } ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &alloclen, JFFS2_SUMMARY_INODE_SIZE); if (ret) { pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n", sizeof(ri) + mdatalen, ret); goto out; } last_frag = frag_last(&f->fragtree); if (last_frag) /* Fetch the inode length from the fragtree rather then * from i_size since i_size may have not been updated yet */ ilen = last_frag->ofs + last_frag->size; else ilen = JFFS2_F_I_SIZE(f); memset(&ri, 0, sizeof(ri)); ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); ri.totlen = cpu_to_je32(sizeof(ri) + mdatalen); ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4)); ri.ino = cpu_to_je32(f->inocache->ino); ri.version = cpu_to_je32(++f->highest_version); ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f)); ri.uid = cpu_to_je16(JFFS2_F_I_UID(f)); ri.gid = cpu_to_je16(JFFS2_F_I_GID(f)); ri.isize = cpu_to_je32(ilen); ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f)); ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f)); ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f)); ri.offset = cpu_to_je32(0); ri.csize = cpu_to_je32(mdatalen); ri.dsize = cpu_to_je32(mdatalen); ri.compr = JFFS2_COMPR_NONE; ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); ri.data_crc = cpu_to_je32(crc32(0, mdata, mdatalen)); new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, ALLOC_GC); if (IS_ERR(new_fn)) { pr_warn("Error writing new dnode: %ld\n", PTR_ERR(new_fn)); ret = PTR_ERR(new_fn); goto out; } jffs2_mark_node_obsolete(c, fn->raw); jffs2_free_full_dnode(fn); f->metadata = new_fn; out: if (S_ISLNK(JFFS2_F_I_MODE(f))) kfree(mdata); return ret; } static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_inode_info *f, struct jffs2_full_dirent *fd) { struct jffs2_full_dirent *new_fd; struct jffs2_raw_dirent rd; uint32_t alloclen; int ret; rd.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); rd.nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT); rd.nsize = strlen(fd->name); rd.totlen = cpu_to_je32(sizeof(rd) + rd.nsize); rd.hdr_crc = cpu_to_je32(crc32(0, &rd, sizeof(struct jffs2_unknown_node)-4)); rd.pino = cpu_to_je32(f->inocache->ino); rd.version = cpu_to_je32(++f->highest_version); rd.ino = cpu_to_je32(fd->ino); /* If the times on this inode were set by explicit utime() they can be different, so refrain from splatting them. */ if (JFFS2_F_I_MTIME(f) == JFFS2_F_I_CTIME(f)) rd.mctime = cpu_to_je32(JFFS2_F_I_MTIME(f)); else rd.mctime = cpu_to_je32(0); rd.type = fd->type; rd.node_crc = cpu_to_je32(crc32(0, &rd, sizeof(rd)-8)); rd.name_crc = cpu_to_je32(crc32(0, fd->name, rd.nsize)); ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &alloclen, JFFS2_SUMMARY_DIRENT_SIZE(rd.nsize)); if (ret) { pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n", sizeof(rd)+rd.nsize, ret); return ret; } new_fd = jffs2_write_dirent(c, f, &rd, fd->name, rd.nsize, ALLOC_GC); if (IS_ERR(new_fd)) { pr_warn("jffs2_write_dirent in garbage_collect_dirent failed: %ld\n", PTR_ERR(new_fd)); return PTR_ERR(new_fd); } jffs2_add_fd_to_list(c, new_fd, &f->dents); return 0; } static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_inode_info *f, struct jffs2_full_dirent *fd) { struct jffs2_full_dirent **fdp = &f->dents; int found = 0; /* On a medium where we can't actually mark nodes obsolete pernamently, such as NAND flash, we need to work out whether this deletion dirent is still needed to actively delete a 'real' dirent with the same name that's still somewhere else on the flash. */ if (!jffs2_can_mark_obsolete(c)) { struct jffs2_raw_dirent *rd; struct jffs2_raw_node_ref *raw; int ret; size_t retlen; int name_len = strlen(fd->name); uint32_t name_crc = crc32(0, fd->name, name_len); uint32_t rawlen = ref_totlen(c, jeb, fd->raw); rd = kmalloc(rawlen, GFP_KERNEL); if (!rd) return -ENOMEM; /* Prevent the erase code from nicking the obsolete node refs while we're looking at them. I really don't like this extra lock but can't see any alternative. Suggestions on a postcard to... */ mutex_lock(&c->erase_free_sem); for (raw = f->inocache->nodes; raw != (void *)f->inocache; raw = raw->next_in_ino) { cond_resched(); /* We only care about obsolete ones */ if (!(ref_obsolete(raw))) continue; /* Any dirent with the same name is going to have the same length... */ if (ref_totlen(c, NULL, raw) != rawlen) continue; /* Doesn't matter if there's one in the same erase block. We're going to delete it too at the same time. */ if (SECTOR_ADDR(raw->flash_offset) == SECTOR_ADDR(fd->raw->flash_offset)) continue; jffs2_dbg(1, "Check potential deletion dirent at %08x\n", ref_offset(raw)); /* This is an obsolete node belonging to the same directory, and it's of the right length. We need to take a closer look...*/ ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)rd); if (ret) { pr_warn("%s(): Read error (%d) reading obsolete node at %08x\n", __func__, ret, ref_offset(raw)); /* If we can't read it, we don't need to continue to obsolete it. Continue */ continue; } if (retlen != rawlen) { pr_warn("%s(): Short read (%zd not %u) reading header from obsolete node at %08x\n", __func__, retlen, rawlen, ref_offset(raw)); continue; } if (je16_to_cpu(rd->nodetype) != JFFS2_NODETYPE_DIRENT) continue; /* If the name CRC doesn't match, skip */ if (je32_to_cpu(rd->name_crc) != name_crc) continue; /* If the name length doesn't match, or it's another deletion dirent, skip */ if (rd->nsize != name_len || !je32_to_cpu(rd->ino)) continue; /* OK, check the actual name now */ if (memcmp(rd->name, fd->name, name_len)) continue; /* OK. The name really does match. There really is still an older node on the flash which our deletion dirent obsoletes. So we have to write out a new deletion dirent to replace it */ mutex_unlock(&c->erase_free_sem); jffs2_dbg(1, "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n", ref_offset(fd->raw), fd->name, ref_offset(raw), je32_to_cpu(rd->ino)); kfree(rd); return jffs2_garbage_collect_dirent(c, jeb, f, fd); } mutex_unlock(&c->erase_free_sem); kfree(rd); } /* FIXME: If we're deleting a dirent which contains the current mtime and ctime, we should update the metadata node with those times accordingly */ /* No need for it any more. Just mark it obsolete and remove it from the list */ while (*fdp) { if ((*fdp) == fd) { found = 1; *fdp = fd->next; break; } fdp = &(*fdp)->next; } if (!found) { pr_warn("Deletion dirent \"%s\" not found in list for ino #%u\n", fd->name, f->inocache->ino); } jffs2_mark_node_obsolete(c, fd->raw); jffs2_free_full_dirent(fd); return 0; } static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn, uint32_t start, uint32_t end) { struct jffs2_raw_inode ri; struct jffs2_node_frag *frag; struct jffs2_full_dnode *new_fn; uint32_t alloclen, ilen; int ret; jffs2_dbg(1, "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n", f->inocache->ino, start, end); memset(&ri, 0, sizeof(ri)); if(fn->frags > 1) { size_t readlen; uint32_t crc; /* It's partially obsoleted by a later write. So we have to write it out again with the _same_ version as before */ ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(ri), &readlen, (char *)&ri); if (readlen != sizeof(ri) || ret) { pr_warn("Node read failed in jffs2_garbage_collect_hole. Ret %d, retlen %zd. Data will be lost by writing new hole node\n", ret, readlen); goto fill; } if (je16_to_cpu(ri.nodetype) != JFFS2_NODETYPE_INODE) { pr_warn("%s(): Node at 0x%08x had node type 0x%04x instead of JFFS2_NODETYPE_INODE(0x%04x)\n", __func__, ref_offset(fn->raw), je16_to_cpu(ri.nodetype), JFFS2_NODETYPE_INODE); return -EIO; } if (je32_to_cpu(ri.totlen) != sizeof(ri)) { pr_warn("%s(): Node at 0x%08x had totlen 0x%x instead of expected 0x%zx\n", __func__, ref_offset(fn->raw), je32_to_cpu(ri.totlen), sizeof(ri)); return -EIO; } crc = crc32(0, &ri, sizeof(ri)-8); if (crc != je32_to_cpu(ri.node_crc)) { pr_warn("%s: Node at 0x%08x had CRC 0x%08x which doesn't match calculated CRC 0x%08x\n", __func__, ref_offset(fn->raw), je32_to_cpu(ri.node_crc), crc); /* FIXME: We could possibly deal with this by writing new holes for each frag */ pr_warn("Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n", start, end, f->inocache->ino); goto fill; } if (ri.compr != JFFS2_COMPR_ZERO) { pr_warn("%s(): Node 0x%08x wasn't a hole node!\n", __func__, ref_offset(fn->raw)); pr_warn("Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n", start, end, f->inocache->ino); goto fill; } } else { fill: ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); ri.totlen = cpu_to_je32(sizeof(ri)); ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4)); ri.ino = cpu_to_je32(f->inocache->ino); ri.version = cpu_to_je32(++f->highest_version); ri.offset = cpu_to_je32(start); ri.dsize = cpu_to_je32(end - start); ri.csize = cpu_to_je32(0); ri.compr = JFFS2_COMPR_ZERO; } frag = frag_last(&f->fragtree); if (frag) /* Fetch the inode length from the fragtree rather then * from i_size since i_size may have not been updated yet */ ilen = frag->ofs + frag->size; else ilen = JFFS2_F_I_SIZE(f); ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f)); ri.uid = cpu_to_je16(JFFS2_F_I_UID(f)); ri.gid = cpu_to_je16(JFFS2_F_I_GID(f)); ri.isize = cpu_to_je32(ilen); ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f)); ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f)); ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f)); ri.data_crc = cpu_to_je32(0); ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); ret = jffs2_reserve_space_gc(c, sizeof(ri), &alloclen, JFFS2_SUMMARY_INODE_SIZE); if (ret) { pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n", sizeof(ri), ret); return ret; } new_fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_GC); if (IS_ERR(new_fn)) { pr_warn("Error writing new hole node: %ld\n", PTR_ERR(new_fn)); return PTR_ERR(new_fn); } if (je32_to_cpu(ri.version) == f->highest_version) { jffs2_add_full_dnode_to_inode(c, f, new_fn); if (f->metadata) { jffs2_mark_node_obsolete(c, f->metadata->raw); jffs2_free_full_dnode(f->metadata); f->metadata = NULL; } return 0; } /* * We should only get here in the case where the node we are * replacing had more than one frag, so we kept the same version * number as before. (Except in case of error -- see 'goto fill;' * above.) */ D1(if(unlikely(fn->frags <= 1)) { pr_warn("%s(): Replacing fn with %d frag(s) but new ver %d != highest_version %d of ino #%d\n", __func__, fn->frags, je32_to_cpu(ri.version), f->highest_version, je32_to_cpu(ri.ino)); }); /* This is a partially-overlapped hole node. Mark it REF_NORMAL not REF_PRISTINE */ mark_ref_normal(new_fn->raw); for (frag = jffs2_lookup_node_frag(&f->fragtree, fn->ofs); frag; frag = frag_next(frag)) { if (frag->ofs > fn->size + fn->ofs) break; if (frag->node == fn) { frag->node = new_fn; new_fn->frags++; fn->frags--; } } if (fn->frags) { pr_warn("%s(): Old node still has frags!\n", __func__); BUG(); } if (!new_fn->frags) { pr_warn("%s(): New node has no frags!\n", __func__); BUG(); } jffs2_mark_node_obsolete(c, fn->raw); jffs2_free_full_dnode(fn); return 0; } static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *orig_jeb, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn, uint32_t start, uint32_t end) { struct inode *inode = OFNI_EDONI_2SFFJ(f); struct jffs2_full_dnode *new_fn; struct jffs2_raw_inode ri; uint32_t alloclen, offset, orig_end, orig_start; int ret = 0; unsigned char *comprbuf = NULL, *writebuf; struct page *page; unsigned char *pg_ptr; memset(&ri, 0, sizeof(ri)); jffs2_dbg(1, "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n", f->inocache->ino, start, end); orig_end = end; orig_start = start; if (c->nr_free_blocks + c->nr_erasing_blocks > c->resv_blocks_gcmerge) { /* Attempt to do some merging. But only expand to cover logically adjacent frags if the block containing them is already considered to be dirty. Otherwise we end up with GC just going round in circles dirtying the nodes it already wrote out, especially on NAND where we have small eraseblocks and hence a much higher chance of nodes having to be split to cross boundaries. */ struct jffs2_node_frag *frag; uint32_t min, max; min = start & ~(PAGE_SIZE-1); max = min + PAGE_SIZE; frag = jffs2_lookup_node_frag(&f->fragtree, start); /* BUG_ON(!frag) but that'll happen anyway... */ BUG_ON(frag->ofs != start); /* First grow down... */ while((frag = frag_prev(frag)) && frag->ofs >= min) { /* If the previous frag doesn't even reach the beginning, there's excessive fragmentation. Just merge. */ if (frag->ofs > min) { jffs2_dbg(1, "Expanding down to cover partial frag (0x%x-0x%x)\n", frag->ofs, frag->ofs+frag->size); start = frag->ofs; continue; } /* OK. This frag holds the first byte of the page. */ if (!frag->node || !frag->node->raw) { jffs2_dbg(1, "First frag in page is hole (0x%x-0x%x). Not expanding down.\n", frag->ofs, frag->ofs+frag->size); break; } else { /* OK, it's a frag which extends to the beginning of the page. Does it live in a block which is still considered clean? If so, don't obsolete it. If not, cover it anyway. */ struct jffs2_raw_node_ref *raw = frag->node->raw; struct jffs2_eraseblock *jeb; jeb = &c->blocks[raw->flash_offset / c->sector_size]; if (jeb == c->gcblock) { jffs2_dbg(1, "Expanding down to cover frag (0x%x-0x%x) in gcblock at %08x\n", frag->ofs, frag->ofs + frag->size, ref_offset(raw)); start = frag->ofs; break; } if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) { jffs2_dbg(1, "Not expanding down to cover frag (0x%x-0x%x) in clean block %08x\n", frag->ofs, frag->ofs + frag->size, jeb->offset); break; } jffs2_dbg(1, "Expanding down to cover frag (0x%x-0x%x) in dirty block %08x\n", frag->ofs, frag->ofs + frag->size, jeb->offset); start = frag->ofs; break; } } /* ... then up */ /* Find last frag which is actually part of the node we're to GC. */ frag = jffs2_lookup_node_frag(&f->fragtree, end-1); while((frag = frag_next(frag)) && frag->ofs+frag->size <= max) { /* If the previous frag doesn't even reach the beginning, there's lots of fragmentation. Just merge. */ if (frag->ofs+frag->size < max) { jffs2_dbg(1, "Expanding up to cover partial frag (0x%x-0x%x)\n", frag->ofs, frag->ofs+frag->size); end = frag->ofs + frag->size; continue; } if (!frag->node || !frag->node->raw) { jffs2_dbg(1, "Last frag in page is hole (0x%x-0x%x). Not expanding up.\n", frag->ofs, frag->ofs+frag->size); break; } else { /* OK, it's a frag which extends to the beginning of the page. Does it live in a block which is still considered clean? If so, don't obsolete it. If not, cover it anyway. */ struct jffs2_raw_node_ref *raw = frag->node->raw; struct jffs2_eraseblock *jeb; jeb = &c->blocks[raw->flash_offset / c->sector_size]; if (jeb == c->gcblock) { jffs2_dbg(1, "Expanding up to cover frag (0x%x-0x%x) in gcblock at %08x\n", frag->ofs, frag->ofs + frag->size, ref_offset(raw)); end = frag->ofs + frag->size; break; } if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) { jffs2_dbg(1, "Not expanding up to cover frag (0x%x-0x%x) in clean block %08x\n", frag->ofs, frag->ofs + frag->size, jeb->offset); break; } jffs2_dbg(1, "Expanding up to cover frag (0x%x-0x%x) in dirty block %08x\n", frag->ofs, frag->ofs + frag->size, jeb->offset); end = frag->ofs + frag->size; break; } } jffs2_dbg(1, "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n", orig_start, orig_end, start, end); D1(BUG_ON(end > frag_last(&f->fragtree)->ofs + frag_last(&f->fragtree)->size)); BUG_ON(end < orig_end); BUG_ON(start > orig_start); } /* The rules state that we must obtain the page lock *before* f->sem, so * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's * actually going to *change* so we're safe; we only allow reading. * * It is important to note that jffs2_write_begin() will ensure that its * page is marked Uptodate before allocating space. That means that if we * end up here trying to GC the *same* page that jffs2_write_begin() is * trying to write out, read_cache_page() will not deadlock. */ mutex_unlock(&f->sem); page = read_cache_page(inode->i_mapping, start >> PAGE_SHIFT, __jffs2_read_folio, NULL); if (IS_ERR(page)) { pr_warn("read_cache_page() returned error: %ld\n", PTR_ERR(page)); mutex_lock(&f->sem); return PTR_ERR(page); } pg_ptr = kmap(page); mutex_lock(&f->sem); offset = start; while(offset < orig_end) { uint32_t datalen; uint32_t cdatalen; uint16_t comprtype = JFFS2_COMPR_NONE; ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN, &alloclen, JFFS2_SUMMARY_INODE_SIZE); if (ret) { pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_dnode failed: %d\n", sizeof(ri) + JFFS2_MIN_DATA_LEN, ret); break; } cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset); datalen = end - offset; writebuf = pg_ptr + (offset & (PAGE_SIZE -1)); comprtype = jffs2_compress(c, f, writebuf, &comprbuf, &datalen, &cdatalen); ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); ri.totlen = cpu_to_je32(sizeof(ri) + cdatalen); ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4)); ri.ino = cpu_to_je32(f->inocache->ino); ri.version = cpu_to_je32(++f->highest_version); ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f)); ri.uid = cpu_to_je16(JFFS2_F_I_UID(f)); ri.gid = cpu_to_je16(JFFS2_F_I_GID(f)); ri.isize = cpu_to_je32(JFFS2_F_I_SIZE(f)); ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f)); ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f)); ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f)); ri.offset = cpu_to_je32(offset); ri.csize = cpu_to_je32(cdatalen); ri.dsize = cpu_to_je32(datalen); ri.compr = comprtype & 0xff; ri.usercompr = (comprtype >> 8) & 0xff; ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); ri.data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen)); new_fn = jffs2_write_dnode(c, f, &ri, comprbuf, cdatalen, ALLOC_GC); jffs2_free_comprbuf(comprbuf, writebuf); if (IS_ERR(new_fn)) { pr_warn("Error writing new dnode: %ld\n", PTR_ERR(new_fn)); ret = PTR_ERR(new_fn); break; } ret = jffs2_add_full_dnode_to_inode(c, f, new_fn); offset += datalen; if (f->metadata) { jffs2_mark_node_obsolete(c, f->metadata->raw); jffs2_free_full_dnode(f->metadata); f->metadata = NULL; } } kunmap(page); put_page(page); return ret; }
linux-master
fs/jffs2/gc.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/minix/bitmap.c * * Copyright (C) 1991, 1992 Linus Torvalds */ /* * Modified for 680x0 by Hamish Macdonald * Fixed for 680x0 by Andreas Schwab */ /* bitmap.c contains the code that handles the inode and block bitmaps */ #include "minix.h" #include <linux/buffer_head.h> #include <linux/bitops.h> #include <linux/sched.h> static DEFINE_SPINLOCK(bitmap_lock); /* * bitmap consists of blocks filled with 16bit words * bit set == busy, bit clear == free * endianness is a mess, but for counting zero bits it really doesn't matter... */ static __u32 count_free(struct buffer_head *map[], unsigned blocksize, __u32 numbits) { __u32 sum = 0; unsigned blocks = DIV_ROUND_UP(numbits, blocksize * 8); while (blocks--) { unsigned words = blocksize / 2; __u16 *p = (__u16 *)(*map++)->b_data; while (words--) sum += 16 - hweight16(*p++); } return sum; } void minix_free_block(struct inode *inode, unsigned long block) { struct super_block *sb = inode->i_sb; struct minix_sb_info *sbi = minix_sb(sb); struct buffer_head *bh; int k = sb->s_blocksize_bits + 3; unsigned long bit, zone; if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) { printk("Trying to free block not in datazone\n"); return; } zone = block - sbi->s_firstdatazone + 1; bit = zone & ((1<<k) - 1); zone >>= k; if (zone >= sbi->s_zmap_blocks) { printk("minix_free_block: nonexistent bitmap buffer\n"); return; } bh = sbi->s_zmap[zone]; spin_lock(&bitmap_lock); if (!minix_test_and_clear_bit(bit, bh->b_data)) printk("minix_free_block (%s:%lu): bit already cleared\n", sb->s_id, block); spin_unlock(&bitmap_lock); mark_buffer_dirty(bh); return; } int minix_new_block(struct inode * inode) { struct minix_sb_info *sbi = minix_sb(inode->i_sb); int bits_per_zone = 8 * inode->i_sb->s_blocksize; int i; for (i = 0; i < sbi->s_zmap_blocks; i++) { struct buffer_head *bh = sbi->s_zmap[i]; int j; spin_lock(&bitmap_lock); j = minix_find_first_zero_bit(bh->b_data, bits_per_zone); if (j < bits_per_zone) { minix_set_bit(j, bh->b_data); spin_unlock(&bitmap_lock); mark_buffer_dirty(bh); j += i * bits_per_zone + sbi->s_firstdatazone-1; if (j < sbi->s_firstdatazone || j >= sbi->s_nzones) break; return j; } spin_unlock(&bitmap_lock); } return 0; } unsigned long minix_count_free_blocks(struct super_block *sb) { struct minix_sb_info *sbi = minix_sb(sb); u32 bits = sbi->s_nzones - sbi->s_firstdatazone + 1; return (count_free(sbi->s_zmap, sb->s_blocksize, bits) << sbi->s_log_zone_size); } struct minix_inode * minix_V1_raw_inode(struct super_block *sb, ino_t ino, struct buffer_head **bh) { int block; struct minix_sb_info *sbi = minix_sb(sb); struct minix_inode *p; if (!ino || ino > sbi->s_ninodes) { printk("Bad inode number on dev %s: %ld is out of range\n", sb->s_id, (long)ino); return NULL; } ino--; block = 2 + sbi->s_imap_blocks + sbi->s_zmap_blocks + ino / MINIX_INODES_PER_BLOCK; *bh = sb_bread(sb, block); if (!*bh) { printk("Unable to read inode block\n"); return NULL; } p = (void *)(*bh)->b_data; return p + ino % MINIX_INODES_PER_BLOCK; } struct minix2_inode * minix_V2_raw_inode(struct super_block *sb, ino_t ino, struct buffer_head **bh) { int block; struct minix_sb_info *sbi = minix_sb(sb); struct minix2_inode *p; int minix2_inodes_per_block = sb->s_blocksize / sizeof(struct minix2_inode); *bh = NULL; if (!ino || ino > sbi->s_ninodes) { printk("Bad inode number on dev %s: %ld is out of range\n", sb->s_id, (long)ino); return NULL; } ino--; block = 2 + sbi->s_imap_blocks + sbi->s_zmap_blocks + ino / minix2_inodes_per_block; *bh = sb_bread(sb, block); if (!*bh) { printk("Unable to read inode block\n"); return NULL; } p = (void *)(*bh)->b_data; return p + ino % minix2_inodes_per_block; } /* Clear the link count and mode of a deleted inode on disk. */ static void minix_clear_inode(struct inode *inode) { struct buffer_head *bh = NULL; if (INODE_VERSION(inode) == MINIX_V1) { struct minix_inode *raw_inode; raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh); if (raw_inode) { raw_inode->i_nlinks = 0; raw_inode->i_mode = 0; } } else { struct minix2_inode *raw_inode; raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh); if (raw_inode) { raw_inode->i_nlinks = 0; raw_inode->i_mode = 0; } } if (bh) { mark_buffer_dirty(bh); brelse (bh); } } void minix_free_inode(struct inode * inode) { struct super_block *sb = inode->i_sb; struct minix_sb_info *sbi = minix_sb(inode->i_sb); struct buffer_head *bh; int k = sb->s_blocksize_bits + 3; unsigned long ino, bit; ino = inode->i_ino; if (ino < 1 || ino > sbi->s_ninodes) { printk("minix_free_inode: inode 0 or nonexistent inode\n"); return; } bit = ino & ((1<<k) - 1); ino >>= k; if (ino >= sbi->s_imap_blocks) { printk("minix_free_inode: nonexistent imap in superblock\n"); return; } minix_clear_inode(inode); /* clear on-disk copy */ bh = sbi->s_imap[ino]; spin_lock(&bitmap_lock); if (!minix_test_and_clear_bit(bit, bh->b_data)) printk("minix_free_inode: bit %lu already cleared\n", bit); spin_unlock(&bitmap_lock); mark_buffer_dirty(bh); } struct inode *minix_new_inode(const struct inode *dir, umode_t mode) { struct super_block *sb = dir->i_sb; struct minix_sb_info *sbi = minix_sb(sb); struct inode *inode = new_inode(sb); struct buffer_head * bh; int bits_per_zone = 8 * sb->s_blocksize; unsigned long j; int i; if (!inode) return ERR_PTR(-ENOMEM); j = bits_per_zone; bh = NULL; spin_lock(&bitmap_lock); for (i = 0; i < sbi->s_imap_blocks; i++) { bh = sbi->s_imap[i]; j = minix_find_first_zero_bit(bh->b_data, bits_per_zone); if (j < bits_per_zone) break; } if (!bh || j >= bits_per_zone) { spin_unlock(&bitmap_lock); iput(inode); return ERR_PTR(-ENOSPC); } if (minix_test_and_set_bit(j, bh->b_data)) { /* shouldn't happen */ spin_unlock(&bitmap_lock); printk("minix_new_inode: bit already set\n"); iput(inode); return ERR_PTR(-ENOSPC); } spin_unlock(&bitmap_lock); mark_buffer_dirty(bh); j += i * bits_per_zone; if (!j || j > sbi->s_ninodes) { iput(inode); return ERR_PTR(-ENOSPC); } inode_init_owner(&nop_mnt_idmap, inode, dir, mode); inode->i_ino = j; inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode); inode->i_blocks = 0; memset(&minix_i(inode)->u, 0, sizeof(minix_i(inode)->u)); insert_inode_hash(inode); mark_inode_dirty(inode); return inode; } unsigned long minix_count_free_inodes(struct super_block *sb) { struct minix_sb_info *sbi = minix_sb(sb); u32 bits = sbi->s_ninodes + 1; return count_free(sbi->s_imap, sb->s_blocksize, bits); }
linux-master
fs/minix/bitmap.c
// SPDX-License-Identifier: GPL-2.0 /* Generic part */ typedef struct { block_t *p; block_t key; struct buffer_head *bh; } Indirect; static DEFINE_RWLOCK(pointers_lock); static inline void add_chain(Indirect *p, struct buffer_head *bh, block_t *v) { p->key = *(p->p = v); p->bh = bh; } static inline int verify_chain(Indirect *from, Indirect *to) { while (from <= to && from->key == *from->p) from++; return (from > to); } static inline block_t *block_end(struct buffer_head *bh) { return (block_t *)((char*)bh->b_data + bh->b_size); } static inline Indirect *get_branch(struct inode *inode, int depth, int *offsets, Indirect chain[DEPTH], int *err) { struct super_block *sb = inode->i_sb; Indirect *p = chain; struct buffer_head *bh; *err = 0; /* i_data is not going away, no lock needed */ add_chain (chain, NULL, i_data(inode) + *offsets); if (!p->key) goto no_block; while (--depth) { bh = sb_bread(sb, block_to_cpu(p->key)); if (!bh) goto failure; read_lock(&pointers_lock); if (!verify_chain(chain, p)) goto changed; add_chain(++p, bh, (block_t *)bh->b_data + *++offsets); read_unlock(&pointers_lock); if (!p->key) goto no_block; } return NULL; changed: read_unlock(&pointers_lock); brelse(bh); *err = -EAGAIN; goto no_block; failure: *err = -EIO; no_block: return p; } static int alloc_branch(struct inode *inode, int num, int *offsets, Indirect *branch) { int n = 0; int i; int parent = minix_new_block(inode); int err = -ENOSPC; branch[0].key = cpu_to_block(parent); if (parent) for (n = 1; n < num; n++) { struct buffer_head *bh; /* Allocate the next block */ int nr = minix_new_block(inode); if (!nr) break; branch[n].key = cpu_to_block(nr); bh = sb_getblk(inode->i_sb, parent); if (!bh) { minix_free_block(inode, nr); err = -ENOMEM; break; } lock_buffer(bh); memset(bh->b_data, 0, bh->b_size); branch[n].bh = bh; branch[n].p = (block_t*) bh->b_data + offsets[n]; *branch[n].p = branch[n].key; set_buffer_uptodate(bh); unlock_buffer(bh); mark_buffer_dirty_inode(bh, inode); parent = nr; } if (n == num) return 0; /* Allocation failed, free what we already allocated */ for (i = 1; i < n; i++) bforget(branch[i].bh); for (i = 0; i < n; i++) minix_free_block(inode, block_to_cpu(branch[i].key)); return err; } static inline int splice_branch(struct inode *inode, Indirect chain[DEPTH], Indirect *where, int num) { int i; write_lock(&pointers_lock); /* Verify that place we are splicing to is still there and vacant */ if (!verify_chain(chain, where-1) || *where->p) goto changed; *where->p = where->key; write_unlock(&pointers_lock); /* We are done with atomic stuff, now do the rest of housekeeping */ inode_set_ctime_current(inode); /* had we spliced it onto indirect block? */ if (where->bh) mark_buffer_dirty_inode(where->bh, inode); mark_inode_dirty(inode); return 0; changed: write_unlock(&pointers_lock); for (i = 1; i < num; i++) bforget(where[i].bh); for (i = 0; i < num; i++) minix_free_block(inode, block_to_cpu(where[i].key)); return -EAGAIN; } static int get_block(struct inode * inode, sector_t block, struct buffer_head *bh, int create) { int err = -EIO; int offsets[DEPTH]; Indirect chain[DEPTH]; Indirect *partial; int left; int depth = block_to_path(inode, block, offsets); if (depth == 0) goto out; reread: partial = get_branch(inode, depth, offsets, chain, &err); /* Simplest case - block found, no allocation needed */ if (!partial) { got_it: map_bh(bh, inode->i_sb, block_to_cpu(chain[depth-1].key)); /* Clean up and exit */ partial = chain+depth-1; /* the whole chain */ goto cleanup; } /* Next simple case - plain lookup or failed read of indirect block */ if (!create || err == -EIO) { cleanup: while (partial > chain) { brelse(partial->bh); partial--; } out: return err; } /* * Indirect block might be removed by truncate while we were * reading it. Handling of that case (forget what we've got and * reread) is taken out of the main path. */ if (err == -EAGAIN) goto changed; left = (chain + depth) - partial; err = alloc_branch(inode, left, offsets+(partial-chain), partial); if (err) goto cleanup; if (splice_branch(inode, chain, partial, left) < 0) goto changed; set_buffer_new(bh); goto got_it; changed: while (partial > chain) { brelse(partial->bh); partial--; } goto reread; } static inline int all_zeroes(block_t *p, block_t *q) { while (p < q) if (*p++) return 0; return 1; } static Indirect *find_shared(struct inode *inode, int depth, int offsets[DEPTH], Indirect chain[DEPTH], block_t *top) { Indirect *partial, *p; int k, err; *top = 0; for (k = depth; k > 1 && !offsets[k-1]; k--) ; partial = get_branch(inode, k, offsets, chain, &err); write_lock(&pointers_lock); if (!partial) partial = chain + k-1; if (!partial->key && *partial->p) { write_unlock(&pointers_lock); goto no_top; } for (p=partial;p>chain && all_zeroes((block_t*)p->bh->b_data,p->p);p--) ; if (p == chain + k - 1 && p > chain) { p->p--; } else { *top = *p->p; *p->p = 0; } write_unlock(&pointers_lock); while(partial > p) { brelse(partial->bh); partial--; } no_top: return partial; } static inline void free_data(struct inode *inode, block_t *p, block_t *q) { unsigned long nr; for ( ; p < q ; p++) { nr = block_to_cpu(*p); if (nr) { *p = 0; minix_free_block(inode, nr); } } } static void free_branches(struct inode *inode, block_t *p, block_t *q, int depth) { struct buffer_head * bh; unsigned long nr; if (depth--) { for ( ; p < q ; p++) { nr = block_to_cpu(*p); if (!nr) continue; *p = 0; bh = sb_bread(inode->i_sb, nr); if (!bh) continue; free_branches(inode, (block_t*)bh->b_data, block_end(bh), depth); bforget(bh); minix_free_block(inode, nr); mark_inode_dirty(inode); } } else free_data(inode, p, q); } static inline void truncate (struct inode * inode) { struct super_block *sb = inode->i_sb; block_t *idata = i_data(inode); int offsets[DEPTH]; Indirect chain[DEPTH]; Indirect *partial; block_t nr = 0; int n; int first_whole; long iblock; iblock = (inode->i_size + sb->s_blocksize -1) >> sb->s_blocksize_bits; block_truncate_page(inode->i_mapping, inode->i_size, get_block); n = block_to_path(inode, iblock, offsets); if (!n) return; if (n == 1) { free_data(inode, idata+offsets[0], idata + DIRECT); first_whole = 0; goto do_indirects; } first_whole = offsets[0] + 1 - DIRECT; partial = find_shared(inode, n, offsets, chain, &nr); if (nr) { if (partial == chain) mark_inode_dirty(inode); else mark_buffer_dirty_inode(partial->bh, inode); free_branches(inode, &nr, &nr+1, (chain+n-1) - partial); } /* Clear the ends of indirect blocks on the shared branch */ while (partial > chain) { free_branches(inode, partial->p + 1, block_end(partial->bh), (chain+n-1) - partial); mark_buffer_dirty_inode(partial->bh, inode); brelse (partial->bh); partial--; } do_indirects: /* Kill the remaining (whole) subtrees */ while (first_whole < DEPTH-1) { nr = idata[DIRECT+first_whole]; if (nr) { idata[DIRECT+first_whole] = 0; mark_inode_dirty(inode); free_branches(inode, &nr, &nr+1, first_whole+1); } first_whole++; } inode->i_mtime = inode_set_ctime_current(inode); mark_inode_dirty(inode); } static inline unsigned nblocks(loff_t size, struct super_block *sb) { int k = sb->s_blocksize_bits - 10; unsigned blocks, res, direct = DIRECT, i = DEPTH; blocks = (size + sb->s_blocksize - 1) >> (BLOCK_SIZE_BITS + k); res = blocks; while (--i && blocks > direct) { blocks -= direct; blocks += sb->s_blocksize/sizeof(block_t) - 1; blocks /= sb->s_blocksize/sizeof(block_t); res += blocks; direct = 1; } return res; }
linux-master
fs/minix/itree_common.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/minix/dir.c * * Copyright (C) 1991, 1992 Linus Torvalds * * minix directory handling functions * * Updated to filesystem version 3 by Daniel Aragones */ #include "minix.h" #include <linux/buffer_head.h> #include <linux/highmem.h> #include <linux/swap.h> typedef struct minix_dir_entry minix_dirent; typedef struct minix3_dir_entry minix3_dirent; static int minix_readdir(struct file *, struct dir_context *); const struct file_operations minix_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .iterate_shared = minix_readdir, .fsync = generic_file_fsync, }; static inline void dir_put_page(struct page *page) { kunmap(page); put_page(page); } /* * Return the offset into page `page_nr' of the last valid * byte in that page, plus one. */ static unsigned minix_last_byte(struct inode *inode, unsigned long page_nr) { unsigned last_byte = PAGE_SIZE; if (page_nr == (inode->i_size >> PAGE_SHIFT)) last_byte = inode->i_size & (PAGE_SIZE - 1); return last_byte; } static void dir_commit_chunk(struct page *page, loff_t pos, unsigned len) { struct address_space *mapping = page->mapping; struct inode *dir = mapping->host; block_write_end(NULL, mapping, pos, len, len, page, NULL); if (pos+len > dir->i_size) { i_size_write(dir, pos+len); mark_inode_dirty(dir); } unlock_page(page); } static int minix_handle_dirsync(struct inode *dir) { int err; err = filemap_write_and_wait(dir->i_mapping); if (!err) err = sync_inode_metadata(dir, 1); return err; } static struct page * dir_get_page(struct inode *dir, unsigned long n) { struct address_space *mapping = dir->i_mapping; struct page *page = read_mapping_page(mapping, n, NULL); if (!IS_ERR(page)) kmap(page); return page; } static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi) { return (void*)((char*)de + sbi->s_dirsize); } static int minix_readdir(struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; struct minix_sb_info *sbi = minix_sb(sb); unsigned chunk_size = sbi->s_dirsize; unsigned long npages = dir_pages(inode); unsigned long pos = ctx->pos; unsigned offset; unsigned long n; ctx->pos = pos = ALIGN(pos, chunk_size); if (pos >= inode->i_size) return 0; offset = pos & ~PAGE_MASK; n = pos >> PAGE_SHIFT; for ( ; n < npages; n++, offset = 0) { char *p, *kaddr, *limit; struct page *page = dir_get_page(inode, n); if (IS_ERR(page)) continue; kaddr = (char *)page_address(page); p = kaddr+offset; limit = kaddr + minix_last_byte(inode, n) - chunk_size; for ( ; p <= limit; p = minix_next_entry(p, sbi)) { const char *name; __u32 inumber; if (sbi->s_version == MINIX_V3) { minix3_dirent *de3 = (minix3_dirent *)p; name = de3->name; inumber = de3->inode; } else { minix_dirent *de = (minix_dirent *)p; name = de->name; inumber = de->inode; } if (inumber) { unsigned l = strnlen(name, sbi->s_namelen); if (!dir_emit(ctx, name, l, inumber, DT_UNKNOWN)) { dir_put_page(page); return 0; } } ctx->pos += chunk_size; } dir_put_page(page); } return 0; } static inline int namecompare(int len, int maxlen, const char * name, const char * buffer) { if (len < maxlen && buffer[len]) return 0; return !memcmp(name, buffer, len); } /* * minix_find_entry() * * finds an entry in the specified directory with the wanted name. It * returns the cache buffer in which the entry was found, and the entry * itself (as a parameter - res_dir). It does NOT read the inode of the * entry - you'll have to do that yourself if you want to. */ minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page) { const char * name = dentry->d_name.name; int namelen = dentry->d_name.len; struct inode * dir = d_inode(dentry->d_parent); struct super_block * sb = dir->i_sb; struct minix_sb_info * sbi = minix_sb(sb); unsigned long n; unsigned long npages = dir_pages(dir); struct page *page = NULL; char *p; char *namx; __u32 inumber; *res_page = NULL; for (n = 0; n < npages; n++) { char *kaddr, *limit; page = dir_get_page(dir, n); if (IS_ERR(page)) continue; kaddr = (char*)page_address(page); limit = kaddr + minix_last_byte(dir, n) - sbi->s_dirsize; for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) { if (sbi->s_version == MINIX_V3) { minix3_dirent *de3 = (minix3_dirent *)p; namx = de3->name; inumber = de3->inode; } else { minix_dirent *de = (minix_dirent *)p; namx = de->name; inumber = de->inode; } if (!inumber) continue; if (namecompare(namelen, sbi->s_namelen, name, namx)) goto found; } dir_put_page(page); } return NULL; found: *res_page = page; return (minix_dirent *)p; } int minix_add_link(struct dentry *dentry, struct inode *inode) { struct inode *dir = d_inode(dentry->d_parent); const char * name = dentry->d_name.name; int namelen = dentry->d_name.len; struct super_block * sb = dir->i_sb; struct minix_sb_info * sbi = minix_sb(sb); struct page *page = NULL; unsigned long npages = dir_pages(dir); unsigned long n; char *kaddr, *p; minix_dirent *de; minix3_dirent *de3; loff_t pos; int err; char *namx = NULL; __u32 inumber; /* * We take care of directory expansion in the same loop * This code plays outside i_size, so it locks the page * to protect that region. */ for (n = 0; n <= npages; n++) { char *limit, *dir_end; page = dir_get_page(dir, n); err = PTR_ERR(page); if (IS_ERR(page)) goto out; lock_page(page); kaddr = (char*)page_address(page); dir_end = kaddr + minix_last_byte(dir, n); limit = kaddr + PAGE_SIZE - sbi->s_dirsize; for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) { de = (minix_dirent *)p; de3 = (minix3_dirent *)p; if (sbi->s_version == MINIX_V3) { namx = de3->name; inumber = de3->inode; } else { namx = de->name; inumber = de->inode; } if (p == dir_end) { /* We hit i_size */ if (sbi->s_version == MINIX_V3) de3->inode = 0; else de->inode = 0; goto got_it; } if (!inumber) goto got_it; err = -EEXIST; if (namecompare(namelen, sbi->s_namelen, name, namx)) goto out_unlock; } unlock_page(page); dir_put_page(page); } BUG(); return -EINVAL; got_it: pos = page_offset(page) + p - (char *)page_address(page); err = minix_prepare_chunk(page, pos, sbi->s_dirsize); if (err) goto out_unlock; memcpy (namx, name, namelen); if (sbi->s_version == MINIX_V3) { memset (namx + namelen, 0, sbi->s_dirsize - namelen - 4); de3->inode = inode->i_ino; } else { memset (namx + namelen, 0, sbi->s_dirsize - namelen - 2); de->inode = inode->i_ino; } dir_commit_chunk(page, pos, sbi->s_dirsize); dir->i_mtime = inode_set_ctime_current(dir); mark_inode_dirty(dir); err = minix_handle_dirsync(dir); out_put: dir_put_page(page); out: return err; out_unlock: unlock_page(page); goto out_put; } int minix_delete_entry(struct minix_dir_entry *de, struct page *page) { struct inode *inode = page->mapping->host; char *kaddr = page_address(page); loff_t pos = page_offset(page) + (char*)de - kaddr; struct minix_sb_info *sbi = minix_sb(inode->i_sb); unsigned len = sbi->s_dirsize; int err; lock_page(page); err = minix_prepare_chunk(page, pos, len); if (err) { unlock_page(page); return err; } if (sbi->s_version == MINIX_V3) ((minix3_dirent *)de)->inode = 0; else de->inode = 0; dir_commit_chunk(page, pos, len); inode->i_mtime = inode_set_ctime_current(inode); mark_inode_dirty(inode); return minix_handle_dirsync(inode); } int minix_make_empty(struct inode *inode, struct inode *dir) { struct page *page = grab_cache_page(inode->i_mapping, 0); struct minix_sb_info *sbi = minix_sb(inode->i_sb); char *kaddr; int err; if (!page) return -ENOMEM; err = minix_prepare_chunk(page, 0, 2 * sbi->s_dirsize); if (err) { unlock_page(page); goto fail; } kaddr = kmap_atomic(page); memset(kaddr, 0, PAGE_SIZE); if (sbi->s_version == MINIX_V3) { minix3_dirent *de3 = (minix3_dirent *)kaddr; de3->inode = inode->i_ino; strcpy(de3->name, "."); de3 = minix_next_entry(de3, sbi); de3->inode = dir->i_ino; strcpy(de3->name, ".."); } else { minix_dirent *de = (minix_dirent *)kaddr; de->inode = inode->i_ino; strcpy(de->name, "."); de = minix_next_entry(de, sbi); de->inode = dir->i_ino; strcpy(de->name, ".."); } kunmap_atomic(kaddr); dir_commit_chunk(page, 0, 2 * sbi->s_dirsize); err = minix_handle_dirsync(inode); fail: put_page(page); return err; } /* * routine to check that the specified directory is empty (for rmdir) */ int minix_empty_dir(struct inode * inode) { struct page *page = NULL; unsigned long i, npages = dir_pages(inode); struct minix_sb_info *sbi = minix_sb(inode->i_sb); char *name; __u32 inumber; for (i = 0; i < npages; i++) { char *p, *kaddr, *limit; page = dir_get_page(inode, i); if (IS_ERR(page)) continue; kaddr = (char *)page_address(page); limit = kaddr + minix_last_byte(inode, i) - sbi->s_dirsize; for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) { if (sbi->s_version == MINIX_V3) { minix3_dirent *de3 = (minix3_dirent *)p; name = de3->name; inumber = de3->inode; } else { minix_dirent *de = (minix_dirent *)p; name = de->name; inumber = de->inode; } if (inumber != 0) { /* check for . and .. */ if (name[0] != '.') goto not_empty; if (!name[1]) { if (inumber != inode->i_ino) goto not_empty; } else if (name[1] != '.') goto not_empty; else if (name[2]) goto not_empty; } } dir_put_page(page); } return 1; not_empty: dir_put_page(page); return 0; } /* Releases the page */ int minix_set_link(struct minix_dir_entry *de, struct page *page, struct inode *inode) { struct inode *dir = page->mapping->host; struct minix_sb_info *sbi = minix_sb(dir->i_sb); loff_t pos = page_offset(page) + (char *)de-(char*)page_address(page); int err; lock_page(page); err = minix_prepare_chunk(page, pos, sbi->s_dirsize); if (err) { unlock_page(page); return err; } if (sbi->s_version == MINIX_V3) ((minix3_dirent *)de)->inode = inode->i_ino; else de->inode = inode->i_ino; dir_commit_chunk(page, pos, sbi->s_dirsize); dir->i_mtime = inode_set_ctime_current(dir); mark_inode_dirty(dir); return minix_handle_dirsync(dir); } struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p) { struct page *page = dir_get_page(dir, 0); struct minix_sb_info *sbi = minix_sb(dir->i_sb); struct minix_dir_entry *de = NULL; if (!IS_ERR(page)) { de = minix_next_entry(page_address(page), sbi); *p = page; } return de; } ino_t minix_inode_by_name(struct dentry *dentry) { struct page *page; struct minix_dir_entry *de = minix_find_entry(dentry, &page); ino_t res = 0; if (de) { struct address_space *mapping = page->mapping; struct inode *inode = mapping->host; struct minix_sb_info *sbi = minix_sb(inode->i_sb); if (sbi->s_version == MINIX_V3) res = ((minix3_dirent *) de)->inode; else res = de->inode; dir_put_page(page); } return res; }
linux-master
fs/minix/dir.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Copyright (C) 1996 Gertjan van Wingerde * Minix V2 fs support. * * Modified for 680x0 by Andreas Schwab * Updated to filesystem version 3 by Daniel Aragones */ #include <linux/module.h> #include "minix.h" #include <linux/buffer_head.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/highuid.h> #include <linux/vfs.h> #include <linux/writeback.h> static int minix_write_inode(struct inode *inode, struct writeback_control *wbc); static int minix_statfs(struct dentry *dentry, struct kstatfs *buf); static int minix_remount (struct super_block * sb, int * flags, char * data); static void minix_evict_inode(struct inode *inode) { truncate_inode_pages_final(&inode->i_data); if (!inode->i_nlink) { inode->i_size = 0; minix_truncate(inode); } invalidate_inode_buffers(inode); clear_inode(inode); if (!inode->i_nlink) minix_free_inode(inode); } static void minix_put_super(struct super_block *sb) { int i; struct minix_sb_info *sbi = minix_sb(sb); if (!sb_rdonly(sb)) { if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */ sbi->s_ms->s_state = sbi->s_mount_state; mark_buffer_dirty(sbi->s_sbh); } for (i = 0; i < sbi->s_imap_blocks; i++) brelse(sbi->s_imap[i]); for (i = 0; i < sbi->s_zmap_blocks; i++) brelse(sbi->s_zmap[i]); brelse (sbi->s_sbh); kfree(sbi->s_imap); sb->s_fs_info = NULL; kfree(sbi); } static struct kmem_cache * minix_inode_cachep; static struct inode *minix_alloc_inode(struct super_block *sb) { struct minix_inode_info *ei; ei = alloc_inode_sb(sb, minix_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; } static void minix_free_in_core_inode(struct inode *inode) { kmem_cache_free(minix_inode_cachep, minix_i(inode)); } static void init_once(void *foo) { struct minix_inode_info *ei = (struct minix_inode_info *) foo; inode_init_once(&ei->vfs_inode); } static int __init init_inodecache(void) { minix_inode_cachep = kmem_cache_create("minix_inode_cache", sizeof(struct minix_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD|SLAB_ACCOUNT), init_once); if (minix_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(minix_inode_cachep); } static const struct super_operations minix_sops = { .alloc_inode = minix_alloc_inode, .free_inode = minix_free_in_core_inode, .write_inode = minix_write_inode, .evict_inode = minix_evict_inode, .put_super = minix_put_super, .statfs = minix_statfs, .remount_fs = minix_remount, }; static int minix_remount (struct super_block * sb, int * flags, char * data) { struct minix_sb_info * sbi = minix_sb(sb); struct minix_super_block * ms; sync_filesystem(sb); ms = sbi->s_ms; if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb)) return 0; if (*flags & SB_RDONLY) { if (ms->s_state & MINIX_VALID_FS || !(sbi->s_mount_state & MINIX_VALID_FS)) return 0; /* Mounting a rw partition read-only. */ if (sbi->s_version != MINIX_V3) ms->s_state = sbi->s_mount_state; mark_buffer_dirty(sbi->s_sbh); } else { /* Mount a partition which is read-only, read-write. */ if (sbi->s_version != MINIX_V3) { sbi->s_mount_state = ms->s_state; ms->s_state &= ~MINIX_VALID_FS; } else { sbi->s_mount_state = MINIX_VALID_FS; } mark_buffer_dirty(sbi->s_sbh); if (!(sbi->s_mount_state & MINIX_VALID_FS)) printk("MINIX-fs warning: remounting unchecked fs, " "running fsck is recommended\n"); else if ((sbi->s_mount_state & MINIX_ERROR_FS)) printk("MINIX-fs warning: remounting fs with errors, " "running fsck is recommended\n"); } return 0; } static bool minix_check_superblock(struct super_block *sb) { struct minix_sb_info *sbi = minix_sb(sb); if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0) return false; /* * s_max_size must not exceed the block mapping limitation. This check * is only needed for V1 filesystems, since V2/V3 support an extra level * of indirect blocks which places the limit well above U32_MAX. */ if (sbi->s_version == MINIX_V1 && sb->s_maxbytes > (7 + 512 + 512*512) * BLOCK_SIZE) return false; return true; } static int minix_fill_super(struct super_block *s, void *data, int silent) { struct buffer_head *bh; struct buffer_head **map; struct minix_super_block *ms; struct minix3_super_block *m3s = NULL; unsigned long i, block; struct inode *root_inode; struct minix_sb_info *sbi; int ret = -EINVAL; sbi = kzalloc(sizeof(struct minix_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; s->s_fs_info = sbi; BUILD_BUG_ON(32 != sizeof (struct minix_inode)); BUILD_BUG_ON(64 != sizeof(struct minix2_inode)); if (!sb_set_blocksize(s, BLOCK_SIZE)) goto out_bad_hblock; if (!(bh = sb_bread(s, 1))) goto out_bad_sb; ms = (struct minix_super_block *) bh->b_data; sbi->s_ms = ms; sbi->s_sbh = bh; sbi->s_mount_state = ms->s_state; sbi->s_ninodes = ms->s_ninodes; sbi->s_nzones = ms->s_nzones; sbi->s_imap_blocks = ms->s_imap_blocks; sbi->s_zmap_blocks = ms->s_zmap_blocks; sbi->s_firstdatazone = ms->s_firstdatazone; sbi->s_log_zone_size = ms->s_log_zone_size; s->s_maxbytes = ms->s_max_size; s->s_magic = ms->s_magic; if (s->s_magic == MINIX_SUPER_MAGIC) { sbi->s_version = MINIX_V1; sbi->s_dirsize = 16; sbi->s_namelen = 14; s->s_max_links = MINIX_LINK_MAX; } else if (s->s_magic == MINIX_SUPER_MAGIC2) { sbi->s_version = MINIX_V1; sbi->s_dirsize = 32; sbi->s_namelen = 30; s->s_max_links = MINIX_LINK_MAX; } else if (s->s_magic == MINIX2_SUPER_MAGIC) { sbi->s_version = MINIX_V2; sbi->s_nzones = ms->s_zones; sbi->s_dirsize = 16; sbi->s_namelen = 14; s->s_max_links = MINIX2_LINK_MAX; } else if (s->s_magic == MINIX2_SUPER_MAGIC2) { sbi->s_version = MINIX_V2; sbi->s_nzones = ms->s_zones; sbi->s_dirsize = 32; sbi->s_namelen = 30; s->s_max_links = MINIX2_LINK_MAX; } else if ( *(__u16 *)(bh->b_data + 24) == MINIX3_SUPER_MAGIC) { m3s = (struct minix3_super_block *) bh->b_data; s->s_magic = m3s->s_magic; sbi->s_imap_blocks = m3s->s_imap_blocks; sbi->s_zmap_blocks = m3s->s_zmap_blocks; sbi->s_firstdatazone = m3s->s_firstdatazone; sbi->s_log_zone_size = m3s->s_log_zone_size; s->s_maxbytes = m3s->s_max_size; sbi->s_ninodes = m3s->s_ninodes; sbi->s_nzones = m3s->s_zones; sbi->s_dirsize = 64; sbi->s_namelen = 60; sbi->s_version = MINIX_V3; sbi->s_mount_state = MINIX_VALID_FS; sb_set_blocksize(s, m3s->s_blocksize); s->s_max_links = MINIX2_LINK_MAX; } else goto out_no_fs; if (!minix_check_superblock(s)) goto out_illegal_sb; /* * Allocate the buffer map to keep the superblock small. */ i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh); map = kzalloc(i, GFP_KERNEL); if (!map) goto out_no_map; sbi->s_imap = &map[0]; sbi->s_zmap = &map[sbi->s_imap_blocks]; block=2; for (i=0 ; i < sbi->s_imap_blocks ; i++) { if (!(sbi->s_imap[i]=sb_bread(s, block))) goto out_no_bitmap; block++; } for (i=0 ; i < sbi->s_zmap_blocks ; i++) { if (!(sbi->s_zmap[i]=sb_bread(s, block))) goto out_no_bitmap; block++; } minix_set_bit(0,sbi->s_imap[0]->b_data); minix_set_bit(0,sbi->s_zmap[0]->b_data); /* Apparently minix can create filesystems that allocate more blocks for * the bitmaps than needed. We simply ignore that, but verify it didn't * create one with not enough blocks and bail out if so. */ block = minix_blocks_needed(sbi->s_ninodes, s->s_blocksize); if (sbi->s_imap_blocks < block) { printk("MINIX-fs: file system does not have enough " "imap blocks allocated. Refusing to mount.\n"); goto out_no_bitmap; } block = minix_blocks_needed( (sbi->s_nzones - sbi->s_firstdatazone + 1), s->s_blocksize); if (sbi->s_zmap_blocks < block) { printk("MINIX-fs: file system does not have enough " "zmap blocks allocated. Refusing to mount.\n"); goto out_no_bitmap; } /* set up enough so that it can read an inode */ s->s_op = &minix_sops; s->s_time_min = 0; s->s_time_max = U32_MAX; root_inode = minix_iget(s, MINIX_ROOT_INO); if (IS_ERR(root_inode)) { ret = PTR_ERR(root_inode); goto out_no_root; } ret = -ENOMEM; s->s_root = d_make_root(root_inode); if (!s->s_root) goto out_no_root; if (!sb_rdonly(s)) { if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */ ms->s_state &= ~MINIX_VALID_FS; mark_buffer_dirty(bh); } if (!(sbi->s_mount_state & MINIX_VALID_FS)) printk("MINIX-fs: mounting unchecked file system, " "running fsck is recommended\n"); else if (sbi->s_mount_state & MINIX_ERROR_FS) printk("MINIX-fs: mounting file system with errors, " "running fsck is recommended\n"); return 0; out_no_root: if (!silent) printk("MINIX-fs: get root inode failed\n"); goto out_freemap; out_no_bitmap: printk("MINIX-fs: bad superblock or unable to read bitmaps\n"); out_freemap: for (i = 0; i < sbi->s_imap_blocks; i++) brelse(sbi->s_imap[i]); for (i = 0; i < sbi->s_zmap_blocks; i++) brelse(sbi->s_zmap[i]); kfree(sbi->s_imap); goto out_release; out_no_map: ret = -ENOMEM; if (!silent) printk("MINIX-fs: can't allocate map\n"); goto out_release; out_illegal_sb: if (!silent) printk("MINIX-fs: bad superblock\n"); goto out_release; out_no_fs: if (!silent) printk("VFS: Can't find a Minix filesystem V1 | V2 | V3 " "on device %s.\n", s->s_id); out_release: brelse(bh); goto out; out_bad_hblock: printk("MINIX-fs: blocksize too small for device\n"); goto out; out_bad_sb: printk("MINIX-fs: unable to read superblock\n"); out: s->s_fs_info = NULL; kfree(sbi); return ret; } static int minix_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct minix_sb_info *sbi = minix_sb(sb); u64 id = huge_encode_dev(sb->s_bdev->bd_dev); buf->f_type = sb->s_magic; buf->f_bsize = sb->s_blocksize; buf->f_blocks = (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size; buf->f_bfree = minix_count_free_blocks(sb); buf->f_bavail = buf->f_bfree; buf->f_files = sbi->s_ninodes; buf->f_ffree = minix_count_free_inodes(sb); buf->f_namelen = sbi->s_namelen; buf->f_fsid = u64_to_fsid(id); return 0; } static int minix_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create) { if (INODE_VERSION(inode) == MINIX_V1) return V1_minix_get_block(inode, block, bh_result, create); else return V2_minix_get_block(inode, block, bh_result, create); } static int minix_writepage(struct page *page, struct writeback_control *wbc) { return block_write_full_page(page, minix_get_block, wbc); } static int minix_read_folio(struct file *file, struct folio *folio) { return block_read_full_folio(folio, minix_get_block); } int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len) { return __block_write_begin(page, pos, len, minix_get_block); } static void minix_write_failed(struct address_space *mapping, loff_t to) { struct inode *inode = mapping->host; if (to > inode->i_size) { truncate_pagecache(inode, inode->i_size); minix_truncate(inode); } } static int minix_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int ret; ret = block_write_begin(mapping, pos, len, pagep, minix_get_block); if (unlikely(ret)) minix_write_failed(mapping, pos + len); return ret; } static sector_t minix_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping,block,minix_get_block); } static const struct address_space_operations minix_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .read_folio = minix_read_folio, .writepage = minix_writepage, .write_begin = minix_write_begin, .write_end = generic_write_end, .bmap = minix_bmap, .direct_IO = noop_direct_IO }; static const struct inode_operations minix_symlink_inode_operations = { .get_link = page_get_link, .getattr = minix_getattr, }; void minix_set_inode(struct inode *inode, dev_t rdev) { if (S_ISREG(inode->i_mode)) { inode->i_op = &minix_file_inode_operations; inode->i_fop = &minix_file_operations; inode->i_mapping->a_ops = &minix_aops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &minix_dir_inode_operations; inode->i_fop = &minix_dir_operations; inode->i_mapping->a_ops = &minix_aops; } else if (S_ISLNK(inode->i_mode)) { inode->i_op = &minix_symlink_inode_operations; inode_nohighmem(inode); inode->i_mapping->a_ops = &minix_aops; } else init_special_inode(inode, inode->i_mode, rdev); } /* * The minix V1 function to read an inode. */ static struct inode *V1_minix_iget(struct inode *inode) { struct buffer_head * bh; struct minix_inode * raw_inode; struct minix_inode_info *minix_inode = minix_i(inode); int i; raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh); if (!raw_inode) { iget_failed(inode); return ERR_PTR(-EIO); } if (raw_inode->i_nlinks == 0) { printk("MINIX-fs: deleted inode referenced: %lu\n", inode->i_ino); brelse(bh); iget_failed(inode); return ERR_PTR(-ESTALE); } inode->i_mode = raw_inode->i_mode; i_uid_write(inode, raw_inode->i_uid); i_gid_write(inode, raw_inode->i_gid); set_nlink(inode, raw_inode->i_nlinks); inode->i_size = raw_inode->i_size; inode->i_mtime = inode->i_atime = inode_set_ctime(inode, raw_inode->i_time, 0); inode->i_blocks = 0; for (i = 0; i < 9; i++) minix_inode->u.i1_data[i] = raw_inode->i_zone[i]; minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0])); brelse(bh); unlock_new_inode(inode); return inode; } /* * The minix V2 function to read an inode. */ static struct inode *V2_minix_iget(struct inode *inode) { struct buffer_head * bh; struct minix2_inode * raw_inode; struct minix_inode_info *minix_inode = minix_i(inode); int i; raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh); if (!raw_inode) { iget_failed(inode); return ERR_PTR(-EIO); } if (raw_inode->i_nlinks == 0) { printk("MINIX-fs: deleted inode referenced: %lu\n", inode->i_ino); brelse(bh); iget_failed(inode); return ERR_PTR(-ESTALE); } inode->i_mode = raw_inode->i_mode; i_uid_write(inode, raw_inode->i_uid); i_gid_write(inode, raw_inode->i_gid); set_nlink(inode, raw_inode->i_nlinks); inode->i_size = raw_inode->i_size; inode->i_mtime.tv_sec = raw_inode->i_mtime; inode->i_atime.tv_sec = raw_inode->i_atime; inode_set_ctime(inode, raw_inode->i_ctime, 0); inode->i_mtime.tv_nsec = 0; inode->i_atime.tv_nsec = 0; inode->i_blocks = 0; for (i = 0; i < 10; i++) minix_inode->u.i2_data[i] = raw_inode->i_zone[i]; minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0])); brelse(bh); unlock_new_inode(inode); return inode; } /* * The global function to read an inode. */ struct inode *minix_iget(struct super_block *sb, unsigned long ino) { struct inode *inode; inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; if (INODE_VERSION(inode) == MINIX_V1) return V1_minix_iget(inode); else return V2_minix_iget(inode); } /* * The minix V1 function to synchronize an inode. */ static struct buffer_head * V1_minix_update_inode(struct inode * inode) { struct buffer_head * bh; struct minix_inode * raw_inode; struct minix_inode_info *minix_inode = minix_i(inode); int i; raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh); if (!raw_inode) return NULL; raw_inode->i_mode = inode->i_mode; raw_inode->i_uid = fs_high2lowuid(i_uid_read(inode)); raw_inode->i_gid = fs_high2lowgid(i_gid_read(inode)); raw_inode->i_nlinks = inode->i_nlink; raw_inode->i_size = inode->i_size; raw_inode->i_time = inode->i_mtime.tv_sec; if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) raw_inode->i_zone[0] = old_encode_dev(inode->i_rdev); else for (i = 0; i < 9; i++) raw_inode->i_zone[i] = minix_inode->u.i1_data[i]; mark_buffer_dirty(bh); return bh; } /* * The minix V2 function to synchronize an inode. */ static struct buffer_head * V2_minix_update_inode(struct inode * inode) { struct buffer_head * bh; struct minix2_inode * raw_inode; struct minix_inode_info *minix_inode = minix_i(inode); int i; raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh); if (!raw_inode) return NULL; raw_inode->i_mode = inode->i_mode; raw_inode->i_uid = fs_high2lowuid(i_uid_read(inode)); raw_inode->i_gid = fs_high2lowgid(i_gid_read(inode)); raw_inode->i_nlinks = inode->i_nlink; raw_inode->i_size = inode->i_size; raw_inode->i_mtime = inode->i_mtime.tv_sec; raw_inode->i_atime = inode->i_atime.tv_sec; raw_inode->i_ctime = inode_get_ctime(inode).tv_sec; if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) raw_inode->i_zone[0] = old_encode_dev(inode->i_rdev); else for (i = 0; i < 10; i++) raw_inode->i_zone[i] = minix_inode->u.i2_data[i]; mark_buffer_dirty(bh); return bh; } static int minix_write_inode(struct inode *inode, struct writeback_control *wbc) { int err = 0; struct buffer_head *bh; if (INODE_VERSION(inode) == MINIX_V1) bh = V1_minix_update_inode(inode); else bh = V2_minix_update_inode(inode); if (!bh) return -EIO; if (wbc->sync_mode == WB_SYNC_ALL && buffer_dirty(bh)) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { printk("IO error syncing minix inode [%s:%08lx]\n", inode->i_sb->s_id, inode->i_ino); err = -EIO; } } brelse (bh); return err; } int minix_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int flags) { struct super_block *sb = path->dentry->d_sb; struct inode *inode = d_inode(path->dentry); generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat); if (INODE_VERSION(inode) == MINIX_V1) stat->blocks = (BLOCK_SIZE / 512) * V1_minix_blocks(stat->size, sb); else stat->blocks = (sb->s_blocksize / 512) * V2_minix_blocks(stat->size, sb); stat->blksize = sb->s_blocksize; return 0; } /* * The function that is called for file truncation. */ void minix_truncate(struct inode * inode) { if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return; if (INODE_VERSION(inode) == MINIX_V1) V1_minix_truncate(inode); else V2_minix_truncate(inode); } static struct dentry *minix_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, minix_fill_super); } static struct file_system_type minix_fs_type = { .owner = THIS_MODULE, .name = "minix", .mount = minix_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("minix"); static int __init init_minix_fs(void) { int err = init_inodecache(); if (err) goto out1; err = register_filesystem(&minix_fs_type); if (err) goto out; return 0; out: destroy_inodecache(); out1: return err; } static void __exit exit_minix_fs(void) { unregister_filesystem(&minix_fs_type); destroy_inodecache(); } module_init(init_minix_fs) module_exit(exit_minix_fs) MODULE_LICENSE("GPL");
linux-master
fs/minix/inode.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/buffer_head.h> #include "minix.h" enum {DIRECT = 7, DEPTH = 4}; /* Have triple indirect */ typedef u32 block_t; /* 32 bit, host order */ static inline unsigned long block_to_cpu(block_t n) { return n; } static inline block_t cpu_to_block(unsigned long n) { return n; } static inline block_t *i_data(struct inode *inode) { return (block_t *)minix_i(inode)->u.i2_data; } #define DIRCOUNT 7 #define INDIRCOUNT(sb) (1 << ((sb)->s_blocksize_bits - 2)) static int block_to_path(struct inode * inode, long block, int offsets[DEPTH]) { int n = 0; struct super_block *sb = inode->i_sb; if (block < 0) { printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n", block, sb->s_bdev); return 0; } if ((u64)block * (u64)sb->s_blocksize >= sb->s_maxbytes) return 0; if (block < DIRCOUNT) { offsets[n++] = block; } else if ((block -= DIRCOUNT) < INDIRCOUNT(sb)) { offsets[n++] = DIRCOUNT; offsets[n++] = block; } else if ((block -= INDIRCOUNT(sb)) < INDIRCOUNT(sb) * INDIRCOUNT(sb)) { offsets[n++] = DIRCOUNT + 1; offsets[n++] = block / INDIRCOUNT(sb); offsets[n++] = block % INDIRCOUNT(sb); } else { block -= INDIRCOUNT(sb) * INDIRCOUNT(sb); offsets[n++] = DIRCOUNT + 2; offsets[n++] = (block / INDIRCOUNT(sb)) / INDIRCOUNT(sb); offsets[n++] = (block / INDIRCOUNT(sb)) % INDIRCOUNT(sb); offsets[n++] = block % INDIRCOUNT(sb); } return n; } #include "itree_common.c" int V2_minix_get_block(struct inode * inode, long block, struct buffer_head *bh_result, int create) { return get_block(inode, block, bh_result, create); } void V2_minix_truncate(struct inode * inode) { truncate(inode); } unsigned V2_minix_blocks(loff_t size, struct super_block *sb) { return nblocks(size, sb); }
linux-master
fs/minix/itree_v2.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/minix/namei.c * * Copyright (C) 1991, 1992 Linus Torvalds */ #include "minix.h" static int add_nondir(struct dentry *dentry, struct inode *inode) { int err = minix_add_link(dentry, inode); if (!err) { d_instantiate(dentry, inode); return 0; } inode_dec_link_count(inode); iput(inode); return err; } static struct dentry *minix_lookup(struct inode * dir, struct dentry *dentry, unsigned int flags) { struct inode * inode = NULL; ino_t ino; if (dentry->d_name.len > minix_sb(dir->i_sb)->s_namelen) return ERR_PTR(-ENAMETOOLONG); ino = minix_inode_by_name(dentry); if (ino) inode = minix_iget(dir->i_sb, ino); return d_splice_alias(inode, dentry); } static int minix_mknod(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct inode *inode; if (!old_valid_dev(rdev)) return -EINVAL; inode = minix_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); minix_set_inode(inode, rdev); mark_inode_dirty(inode); return add_nondir(dentry, inode); } static int minix_tmpfile(struct mnt_idmap *idmap, struct inode *dir, struct file *file, umode_t mode) { struct inode *inode = minix_new_inode(dir, mode); if (IS_ERR(inode)) return finish_open_simple(file, PTR_ERR(inode)); minix_set_inode(inode, 0); mark_inode_dirty(inode); d_tmpfile(file, inode); return finish_open_simple(file, 0); } static int minix_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { return minix_mknod(&nop_mnt_idmap, dir, dentry, mode, 0); } static int minix_symlink(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, const char *symname) { int i = strlen(symname)+1; struct inode * inode; int err; if (i > dir->i_sb->s_blocksize) return -ENAMETOOLONG; inode = minix_new_inode(dir, S_IFLNK | 0777); if (IS_ERR(inode)) return PTR_ERR(inode); minix_set_inode(inode, 0); err = page_symlink(inode, symname, i); if (unlikely(err)) { inode_dec_link_count(inode); iput(inode); return err; } return add_nondir(dentry, inode); } static int minix_link(struct dentry * old_dentry, struct inode * dir, struct dentry *dentry) { struct inode *inode = d_inode(old_dentry); inode_set_ctime_current(inode); inode_inc_link_count(inode); ihold(inode); return add_nondir(dentry, inode); } static int minix_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) { struct inode * inode; int err; inode = minix_new_inode(dir, S_IFDIR | mode); if (IS_ERR(inode)) return PTR_ERR(inode); inode_inc_link_count(dir); minix_set_inode(inode, 0); inode_inc_link_count(inode); err = minix_make_empty(inode, dir); if (err) goto out_fail; err = minix_add_link(dentry, inode); if (err) goto out_fail; d_instantiate(dentry, inode); out: return err; out_fail: inode_dec_link_count(inode); inode_dec_link_count(inode); iput(inode); inode_dec_link_count(dir); goto out; } static int minix_unlink(struct inode * dir, struct dentry *dentry) { struct inode * inode = d_inode(dentry); struct page * page; struct minix_dir_entry * de; int err; de = minix_find_entry(dentry, &page); if (!de) return -ENOENT; err = minix_delete_entry(de, page); kunmap(page); put_page(page); if (err) return err; inode_set_ctime_to_ts(inode, inode_get_ctime(dir)); inode_dec_link_count(inode); return 0; } static int minix_rmdir(struct inode * dir, struct dentry *dentry) { struct inode * inode = d_inode(dentry); int err = -ENOTEMPTY; if (minix_empty_dir(inode)) { err = minix_unlink(dir, dentry); if (!err) { inode_dec_link_count(dir); inode_dec_link_count(inode); } } return err; } static int minix_rename(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { struct inode * old_inode = d_inode(old_dentry); struct inode * new_inode = d_inode(new_dentry); struct page * dir_page = NULL; struct minix_dir_entry * dir_de = NULL; struct page * old_page; struct minix_dir_entry * old_de; int err = -ENOENT; if (flags & ~RENAME_NOREPLACE) return -EINVAL; old_de = minix_find_entry(old_dentry, &old_page); if (!old_de) goto out; if (S_ISDIR(old_inode->i_mode)) { err = -EIO; dir_de = minix_dotdot(old_inode, &dir_page); if (!dir_de) goto out_old; } if (new_inode) { struct page * new_page; struct minix_dir_entry * new_de; err = -ENOTEMPTY; if (dir_de && !minix_empty_dir(new_inode)) goto out_dir; err = -ENOENT; new_de = minix_find_entry(new_dentry, &new_page); if (!new_de) goto out_dir; err = minix_set_link(new_de, new_page, old_inode); kunmap(new_page); put_page(new_page); if (err) goto out_dir; inode_set_ctime_current(new_inode); if (dir_de) drop_nlink(new_inode); inode_dec_link_count(new_inode); } else { err = minix_add_link(new_dentry, old_inode); if (err) goto out_dir; if (dir_de) inode_inc_link_count(new_dir); } err = minix_delete_entry(old_de, old_page); if (err) goto out_dir; mark_inode_dirty(old_inode); if (dir_de) { err = minix_set_link(dir_de, dir_page, new_dir); if (!err) inode_dec_link_count(old_dir); } out_dir: if (dir_de) { kunmap(dir_page); put_page(dir_page); } out_old: kunmap(old_page); put_page(old_page); out: return err; } /* * directories can handle most operations... */ const struct inode_operations minix_dir_inode_operations = { .create = minix_create, .lookup = minix_lookup, .link = minix_link, .unlink = minix_unlink, .symlink = minix_symlink, .mkdir = minix_mkdir, .rmdir = minix_rmdir, .mknod = minix_mknod, .rename = minix_rename, .getattr = minix_getattr, .tmpfile = minix_tmpfile, };
linux-master
fs/minix/namei.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/fs/minix/file.c * * Copyright (C) 1991, 1992 Linus Torvalds * * minix regular file handling primitives */ #include "minix.h" /* * We have mostly NULLs here: the current defaults are OK for * the minix filesystem. */ const struct file_operations minix_file_operations = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .mmap = generic_file_mmap, .fsync = generic_file_fsync, .splice_read = filemap_splice_read, }; static int minix_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); int error; error = setattr_prepare(&nop_mnt_idmap, dentry, attr); if (error) return error; if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size != i_size_read(inode)) { error = inode_newsize_ok(inode, attr->ia_size); if (error) return error; truncate_setsize(inode, attr->ia_size); minix_truncate(inode); } setattr_copy(&nop_mnt_idmap, inode, attr); mark_inode_dirty(inode); return 0; } const struct inode_operations minix_file_inode_operations = { .setattr = minix_setattr, .getattr = minix_getattr, };
linux-master
fs/minix/file.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/buffer_head.h> #include <linux/slab.h> #include "minix.h" enum {DEPTH = 3, DIRECT = 7}; /* Only double indirect */ typedef u16 block_t; /* 16 bit, host order */ static inline unsigned long block_to_cpu(block_t n) { return n; } static inline block_t cpu_to_block(unsigned long n) { return n; } static inline block_t *i_data(struct inode *inode) { return (block_t *)minix_i(inode)->u.i1_data; } static int block_to_path(struct inode * inode, long block, int offsets[DEPTH]) { int n = 0; if (block < 0) { printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n", block, inode->i_sb->s_bdev); return 0; } if ((u64)block * BLOCK_SIZE >= inode->i_sb->s_maxbytes) return 0; if (block < 7) { offsets[n++] = block; } else if ((block -= 7) < 512) { offsets[n++] = 7; offsets[n++] = block; } else { block -= 512; offsets[n++] = 8; offsets[n++] = block>>9; offsets[n++] = block & 511; } return n; } #include "itree_common.c" int V1_minix_get_block(struct inode * inode, long block, struct buffer_head *bh_result, int create) { return get_block(inode, block, bh_result, create); } void V1_minix_truncate(struct inode * inode) { truncate(inode); } unsigned V1_minix_blocks(loff_t size, struct super_block *sb) { return nblocks(size, sb); }
linux-master
fs/minix/itree_v1.c
// SPDX-License-Identifier: GPL-2.0-only /* * fs/nfs_common/nfsacl.c * * Copyright (C) 2002-2003 Andreas Gruenbacher <[email protected]> */ /* * The Solaris nfsacl protocol represents some ACLs slightly differently * than POSIX 1003.1e draft 17 does (and we do): * * - Minimal ACLs always have an ACL_MASK entry, so they have * four instead of three entries. * - The ACL_MASK entry in such minimal ACLs always has the same * permissions as the ACL_GROUP_OBJ entry. (In extended ACLs * the ACL_MASK and ACL_GROUP_OBJ entries may differ.) * - The identifier fields of the ACL_USER_OBJ and ACL_GROUP_OBJ * entries contain the identifiers of the owner and owning group. * (In POSIX ACLs we always set them to ACL_UNDEFINED_ID). * - ACL entries in the kernel are kept sorted in ascending order * of (e_tag, e_id). Solaris ACLs are unsorted. */ #include <linux/module.h> #include <linux/fs.h> #include <linux/gfp.h> #include <linux/sunrpc/xdr.h> #include <linux/nfsacl.h> #include <linux/nfs3.h> #include <linux/sort.h> MODULE_LICENSE("GPL"); struct nfsacl_encode_desc { struct xdr_array2_desc desc; unsigned int count; struct posix_acl *acl; int typeflag; kuid_t uid; kgid_t gid; }; struct nfsacl_simple_acl { struct posix_acl acl; struct posix_acl_entry ace[4]; }; static int xdr_nfsace_encode(struct xdr_array2_desc *desc, void *elem) { struct nfsacl_encode_desc *nfsacl_desc = (struct nfsacl_encode_desc *) desc; __be32 *p = elem; struct posix_acl_entry *entry = &nfsacl_desc->acl->a_entries[nfsacl_desc->count++]; *p++ = htonl(entry->e_tag | nfsacl_desc->typeflag); switch(entry->e_tag) { case ACL_USER_OBJ: *p++ = htonl(from_kuid(&init_user_ns, nfsacl_desc->uid)); break; case ACL_GROUP_OBJ: *p++ = htonl(from_kgid(&init_user_ns, nfsacl_desc->gid)); break; case ACL_USER: *p++ = htonl(from_kuid(&init_user_ns, entry->e_uid)); break; case ACL_GROUP: *p++ = htonl(from_kgid(&init_user_ns, entry->e_gid)); break; default: /* Solaris depends on that! */ *p++ = 0; break; } *p++ = htonl(entry->e_perm & S_IRWXO); return 0; } /** * nfsacl_encode - Encode an NFSv3 ACL * * @buf: destination xdr_buf to contain XDR encoded ACL * @base: byte offset in xdr_buf where XDR'd ACL begins * @inode: inode of file whose ACL this is * @acl: posix_acl to encode * @encode_entries: whether to encode ACEs as well * @typeflag: ACL type: NFS_ACL_DEFAULT or zero * * Returns size of encoded ACL in bytes or a negative errno value. */ int nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode, struct posix_acl *acl, int encode_entries, int typeflag) { int entries = (acl && acl->a_count) ? max_t(int, acl->a_count, 4) : 0; struct nfsacl_encode_desc nfsacl_desc = { .desc = { .elem_size = 12, .array_len = encode_entries ? entries : 0, .xcode = xdr_nfsace_encode, }, .acl = acl, .typeflag = typeflag, .uid = inode->i_uid, .gid = inode->i_gid, }; struct nfsacl_simple_acl aclbuf; int err; if (entries > NFS_ACL_MAX_ENTRIES || xdr_encode_word(buf, base, entries)) return -EINVAL; if (encode_entries && acl && acl->a_count == 3) { struct posix_acl *acl2 = &aclbuf.acl; /* Avoid the use of posix_acl_alloc(). nfsacl_encode() is * invoked in contexts where a memory allocation failure is * fatal. Fortunately this fake ACL is small enough to * construct on the stack. */ posix_acl_init(acl2, 4); /* Insert entries in canonical order: other orders seem to confuse Solaris VxFS. */ acl2->a_entries[0] = acl->a_entries[0]; /* ACL_USER_OBJ */ acl2->a_entries[1] = acl->a_entries[1]; /* ACL_GROUP_OBJ */ acl2->a_entries[2] = acl->a_entries[1]; /* ACL_MASK */ acl2->a_entries[2].e_tag = ACL_MASK; acl2->a_entries[3] = acl->a_entries[2]; /* ACL_OTHER */ nfsacl_desc.acl = acl2; } err = xdr_encode_array2(buf, base + 4, &nfsacl_desc.desc); if (!err) err = 8 + nfsacl_desc.desc.elem_size * nfsacl_desc.desc.array_len; return err; } EXPORT_SYMBOL_GPL(nfsacl_encode); /** * nfs_stream_encode_acl - Encode an NFSv3 ACL * * @xdr: an xdr_stream positioned to receive an encoded ACL * @inode: inode of file whose ACL this is * @acl: posix_acl to encode * @encode_entries: whether to encode ACEs as well * @typeflag: ACL type: NFS_ACL_DEFAULT or zero * * Return values: * %false: The ACL could not be encoded * %true: @xdr is advanced to the next available position */ bool nfs_stream_encode_acl(struct xdr_stream *xdr, struct inode *inode, struct posix_acl *acl, int encode_entries, int typeflag) { const size_t elem_size = XDR_UNIT * 3; u32 entries = (acl && acl->a_count) ? max_t(int, acl->a_count, 4) : 0; struct nfsacl_encode_desc nfsacl_desc = { .desc = { .elem_size = elem_size, .array_len = encode_entries ? entries : 0, .xcode = xdr_nfsace_encode, }, .acl = acl, .typeflag = typeflag, .uid = inode->i_uid, .gid = inode->i_gid, }; struct nfsacl_simple_acl aclbuf; unsigned int base; int err; if (entries > NFS_ACL_MAX_ENTRIES) return false; if (xdr_stream_encode_u32(xdr, entries) < 0) return false; if (encode_entries && acl && acl->a_count == 3) { struct posix_acl *acl2 = &aclbuf.acl; /* Avoid the use of posix_acl_alloc(). nfsacl_encode() is * invoked in contexts where a memory allocation failure is * fatal. Fortunately this fake ACL is small enough to * construct on the stack. */ posix_acl_init(acl2, 4); /* Insert entries in canonical order: other orders seem to confuse Solaris VxFS. */ acl2->a_entries[0] = acl->a_entries[0]; /* ACL_USER_OBJ */ acl2->a_entries[1] = acl->a_entries[1]; /* ACL_GROUP_OBJ */ acl2->a_entries[2] = acl->a_entries[1]; /* ACL_MASK */ acl2->a_entries[2].e_tag = ACL_MASK; acl2->a_entries[3] = acl->a_entries[2]; /* ACL_OTHER */ nfsacl_desc.acl = acl2; } base = xdr_stream_pos(xdr); if (!xdr_reserve_space(xdr, XDR_UNIT + elem_size * nfsacl_desc.desc.array_len)) return false; err = xdr_encode_array2(xdr->buf, base, &nfsacl_desc.desc); if (err) return false; return true; } EXPORT_SYMBOL_GPL(nfs_stream_encode_acl); struct nfsacl_decode_desc { struct xdr_array2_desc desc; unsigned int count; struct posix_acl *acl; }; static int xdr_nfsace_decode(struct xdr_array2_desc *desc, void *elem) { struct nfsacl_decode_desc *nfsacl_desc = (struct nfsacl_decode_desc *) desc; __be32 *p = elem; struct posix_acl_entry *entry; unsigned int id; if (!nfsacl_desc->acl) { if (desc->array_len > NFS_ACL_MAX_ENTRIES) return -EINVAL; nfsacl_desc->acl = posix_acl_alloc(desc->array_len, GFP_KERNEL); if (!nfsacl_desc->acl) return -ENOMEM; nfsacl_desc->count = 0; } entry = &nfsacl_desc->acl->a_entries[nfsacl_desc->count++]; entry->e_tag = ntohl(*p++) & ~NFS_ACL_DEFAULT; id = ntohl(*p++); entry->e_perm = ntohl(*p++); switch(entry->e_tag) { case ACL_USER: entry->e_uid = make_kuid(&init_user_ns, id); if (!uid_valid(entry->e_uid)) return -EINVAL; break; case ACL_GROUP: entry->e_gid = make_kgid(&init_user_ns, id); if (!gid_valid(entry->e_gid)) return -EINVAL; break; case ACL_USER_OBJ: case ACL_GROUP_OBJ: case ACL_OTHER: if (entry->e_perm & ~S_IRWXO) return -EINVAL; break; case ACL_MASK: /* Solaris sometimes sets additional bits in the mask */ entry->e_perm &= S_IRWXO; break; default: return -EINVAL; } return 0; } static int cmp_acl_entry(const void *x, const void *y) { const struct posix_acl_entry *a = x, *b = y; if (a->e_tag != b->e_tag) return a->e_tag - b->e_tag; else if ((a->e_tag == ACL_USER) && uid_gt(a->e_uid, b->e_uid)) return 1; else if ((a->e_tag == ACL_USER) && uid_lt(a->e_uid, b->e_uid)) return -1; else if ((a->e_tag == ACL_GROUP) && gid_gt(a->e_gid, b->e_gid)) return 1; else if ((a->e_tag == ACL_GROUP) && gid_lt(a->e_gid, b->e_gid)) return -1; else return 0; } /* * Convert from a Solaris ACL to a POSIX 1003.1e draft 17 ACL. */ static int posix_acl_from_nfsacl(struct posix_acl *acl) { struct posix_acl_entry *pa, *pe, *group_obj = NULL, *mask = NULL; if (!acl) return 0; sort(acl->a_entries, acl->a_count, sizeof(struct posix_acl_entry), cmp_acl_entry, NULL); /* Find the ACL_GROUP_OBJ and ACL_MASK entries. */ FOREACH_ACL_ENTRY(pa, acl, pe) { switch(pa->e_tag) { case ACL_USER_OBJ: break; case ACL_GROUP_OBJ: group_obj = pa; break; case ACL_MASK: mask = pa; fallthrough; case ACL_OTHER: break; } } if (acl->a_count == 4 && group_obj && mask && mask->e_perm == group_obj->e_perm) { /* remove bogus ACL_MASK entry */ memmove(mask, mask+1, (3 - (mask - acl->a_entries)) * sizeof(struct posix_acl_entry)); acl->a_count = 3; } return 0; } /** * nfsacl_decode - Decode an NFSv3 ACL * * @buf: xdr_buf containing XDR'd ACL data to decode * @base: byte offset in xdr_buf where XDR'd ACL begins * @aclcnt: count of ACEs in decoded posix_acl * @pacl: buffer in which to place decoded posix_acl * * Returns the length of the decoded ACL in bytes, or a negative errno value. */ int nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt, struct posix_acl **pacl) { struct nfsacl_decode_desc nfsacl_desc = { .desc = { .elem_size = 12, .xcode = pacl ? xdr_nfsace_decode : NULL, }, }; u32 entries; int err; if (xdr_decode_word(buf, base, &entries) || entries > NFS_ACL_MAX_ENTRIES) return -EINVAL; nfsacl_desc.desc.array_maxlen = entries; err = xdr_decode_array2(buf, base + 4, &nfsacl_desc.desc); if (err) return err; if (pacl) { if (entries != nfsacl_desc.desc.array_len || posix_acl_from_nfsacl(nfsacl_desc.acl) != 0) { posix_acl_release(nfsacl_desc.acl); return -EINVAL; } *pacl = nfsacl_desc.acl; } if (aclcnt) *aclcnt = entries; return 8 + nfsacl_desc.desc.elem_size * nfsacl_desc.desc.array_len; } EXPORT_SYMBOL_GPL(nfsacl_decode); /** * nfs_stream_decode_acl - Decode an NFSv3 ACL * * @xdr: an xdr_stream positioned at an encoded ACL * @aclcnt: OUT: count of ACEs in decoded posix_acl * @pacl: OUT: a dynamically-allocated buffer containing the decoded posix_acl * * Return values: * %false: The encoded ACL is not valid * %true: @pacl contains a decoded ACL, and @xdr is advanced * * On a successful return, caller must release *pacl using posix_acl_release(). */ bool nfs_stream_decode_acl(struct xdr_stream *xdr, unsigned int *aclcnt, struct posix_acl **pacl) { const size_t elem_size = XDR_UNIT * 3; struct nfsacl_decode_desc nfsacl_desc = { .desc = { .elem_size = elem_size, .xcode = pacl ? xdr_nfsace_decode : NULL, }, }; unsigned int base; u32 entries; if (xdr_stream_decode_u32(xdr, &entries) < 0) return false; if (entries > NFS_ACL_MAX_ENTRIES) return false; base = xdr_stream_pos(xdr); if (!xdr_inline_decode(xdr, XDR_UNIT + elem_size * entries)) return false; nfsacl_desc.desc.array_maxlen = entries; if (xdr_decode_array2(xdr->buf, base, &nfsacl_desc.desc)) return false; if (pacl) { if (entries != nfsacl_desc.desc.array_len || posix_acl_from_nfsacl(nfsacl_desc.acl) != 0) { posix_acl_release(nfsacl_desc.acl); return false; } *pacl = nfsacl_desc.acl; } if (aclcnt) *aclcnt = entries; return true; } EXPORT_SYMBOL_GPL(nfs_stream_decode_acl);
linux-master
fs/nfs_common/nfsacl.c
// SPDX-License-Identifier: GPL-2.0-only /* * Common code for control of lockd and nfsv4 grace periods. * * Transplanted from lockd code */ #include <linux/module.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <linux/fs.h> #include <linux/filelock.h> static unsigned int grace_net_id; static DEFINE_SPINLOCK(grace_lock); /** * locks_start_grace * @net: net namespace that this lock manager belongs to * @lm: who this grace period is for * * A grace period is a period during which locks should not be given * out. Currently grace periods are only enforced by the two lock * managers (lockd and nfsd), using the locks_in_grace() function to * check when they are in a grace period. * * This function is called to start a grace period. */ void locks_start_grace(struct net *net, struct lock_manager *lm) { struct list_head *grace_list = net_generic(net, grace_net_id); spin_lock(&grace_lock); if (list_empty(&lm->list)) list_add(&lm->list, grace_list); else WARN(1, "double list_add attempt detected in net %x %s\n", net->ns.inum, (net == &init_net) ? "(init_net)" : ""); spin_unlock(&grace_lock); } EXPORT_SYMBOL_GPL(locks_start_grace); /** * locks_end_grace * @lm: who this grace period is for * * Call this function to state that the given lock manager is ready to * resume regular locking. The grace period will not end until all lock * managers that called locks_start_grace() also call locks_end_grace(). * Note that callers count on it being safe to call this more than once, * and the second call should be a no-op. */ void locks_end_grace(struct lock_manager *lm) { spin_lock(&grace_lock); list_del_init(&lm->list); spin_unlock(&grace_lock); } EXPORT_SYMBOL_GPL(locks_end_grace); static bool __state_in_grace(struct net *net, bool open) { struct list_head *grace_list = net_generic(net, grace_net_id); struct lock_manager *lm; if (!open) return !list_empty(grace_list); spin_lock(&grace_lock); list_for_each_entry(lm, grace_list, list) { if (lm->block_opens) { spin_unlock(&grace_lock); return true; } } spin_unlock(&grace_lock); return false; } /** * locks_in_grace * @net: network namespace * * Lock managers call this function to determine when it is OK for them * to answer ordinary lock requests, and when they should accept only * lock reclaims. */ bool locks_in_grace(struct net *net) { return __state_in_grace(net, false); } EXPORT_SYMBOL_GPL(locks_in_grace); bool opens_in_grace(struct net *net) { return __state_in_grace(net, true); } EXPORT_SYMBOL_GPL(opens_in_grace); static int __net_init grace_init_net(struct net *net) { struct list_head *grace_list = net_generic(net, grace_net_id); INIT_LIST_HEAD(grace_list); return 0; } static void __net_exit grace_exit_net(struct net *net) { struct list_head *grace_list = net_generic(net, grace_net_id); WARN_ONCE(!list_empty(grace_list), "net %x %s: grace_list is not empty\n", net->ns.inum, __func__); } static struct pernet_operations grace_net_ops = { .init = grace_init_net, .exit = grace_exit_net, .id = &grace_net_id, .size = sizeof(struct list_head), }; static int __init init_grace(void) { return register_pernet_subsys(&grace_net_ops); } static void __exit exit_grace(void) { unregister_pernet_subsys(&grace_net_ops); } MODULE_AUTHOR("Jeff Layton <[email protected]>"); MODULE_LICENSE("GPL"); module_init(init_grace) module_exit(exit_grace)
linux-master
fs/nfs_common/grace.c
// SPDX-License-Identifier: GPL-2.0-only /* * Helper for knfsd's SSC to access ops in NFS client modules * * Author: Dai Ngo <[email protected]> * * Copyright (c) 2020, Oracle and/or its affiliates. */ #include <linux/module.h> #include <linux/fs.h> #include <linux/nfs_ssc.h> #include "../nfs/nfs4_fs.h" struct nfs_ssc_client_ops_tbl nfs_ssc_client_tbl; EXPORT_SYMBOL_GPL(nfs_ssc_client_tbl); #ifdef CONFIG_NFS_V4_2 /** * nfs42_ssc_register - install the NFS_V4 client ops in the nfs_ssc_client_tbl * @ops: NFS_V4 ops to be installed * * Return values: * None */ void nfs42_ssc_register(const struct nfs4_ssc_client_ops *ops) { nfs_ssc_client_tbl.ssc_nfs4_ops = ops; } EXPORT_SYMBOL_GPL(nfs42_ssc_register); /** * nfs42_ssc_unregister - uninstall the NFS_V4 client ops from * the nfs_ssc_client_tbl * @ops: ops to be uninstalled * * Return values: * None */ void nfs42_ssc_unregister(const struct nfs4_ssc_client_ops *ops) { if (nfs_ssc_client_tbl.ssc_nfs4_ops != ops) return; nfs_ssc_client_tbl.ssc_nfs4_ops = NULL; } EXPORT_SYMBOL_GPL(nfs42_ssc_unregister); #endif /* CONFIG_NFS_V4_2 */ #ifdef CONFIG_NFS_V4_2 /** * nfs_ssc_register - install the NFS_FS client ops in the nfs_ssc_client_tbl * @ops: NFS_FS ops to be installed * * Return values: * None */ void nfs_ssc_register(const struct nfs_ssc_client_ops *ops) { nfs_ssc_client_tbl.ssc_nfs_ops = ops; } EXPORT_SYMBOL_GPL(nfs_ssc_register); /** * nfs_ssc_unregister - uninstall the NFS_FS client ops from * the nfs_ssc_client_tbl * @ops: ops to be uninstalled * * Return values: * None */ void nfs_ssc_unregister(const struct nfs_ssc_client_ops *ops) { if (nfs_ssc_client_tbl.ssc_nfs_ops != ops) return; nfs_ssc_client_tbl.ssc_nfs_ops = NULL; } EXPORT_SYMBOL_GPL(nfs_ssc_unregister); #else void nfs_ssc_register(const struct nfs_ssc_client_ops *ops) { } EXPORT_SYMBOL_GPL(nfs_ssc_register); void nfs_ssc_unregister(const struct nfs_ssc_client_ops *ops) { } EXPORT_SYMBOL_GPL(nfs_ssc_unregister); #endif /* CONFIG_NFS_V4_2 */
linux-master
fs/nfs_common/nfs_ssc.c
/* file-mmu.c: ramfs MMU-based file operations * * Resizable simple ram filesystem for Linux. * * Copyright (C) 2000 Linus Torvalds. * 2000 Transmeta Corp. * * Usage limits added by David Gibson, Linuxcare Australia. * This file is released under the GPL. */ /* * NOTE! This filesystem is probably most useful * not as a real filesystem, but as an example of * how virtual filesystems can be written. * * It doesn't get much simpler than this. Consider * that this file implements the full semantics of * a POSIX-compliant read-write filesystem. * * Note in particular how the filesystem does not * need to implement any data structures of its own * to keep track of the virtual data: using the VFS * caches is sufficient. */ #include <linux/fs.h> #include <linux/mm.h> #include <linux/ramfs.h> #include <linux/sched.h> #include "internal.h" static unsigned long ramfs_mmu_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); } const struct file_operations ramfs_file_operations = { .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .mmap = generic_file_mmap, .fsync = noop_fsync, .splice_read = filemap_splice_read, .splice_write = iter_file_splice_write, .llseek = generic_file_llseek, .get_unmapped_area = ramfs_mmu_get_unmapped_area, }; const struct inode_operations ramfs_file_inode_operations = { .setattr = simple_setattr, .getattr = simple_getattr, };
linux-master
fs/ramfs/file-mmu.c
/* * Resizable simple ram filesystem for Linux. * * Copyright (C) 2000 Linus Torvalds. * 2000 Transmeta Corp. * * Usage limits added by David Gibson, Linuxcare Australia. * This file is released under the GPL. */ /* * NOTE! This filesystem is probably most useful * not as a real filesystem, but as an example of * how virtual filesystems can be written. * * It doesn't get much simpler than this. Consider * that this file implements the full semantics of * a POSIX-compliant read-write filesystem. * * Note in particular how the filesystem does not * need to implement any data structures of its own * to keep track of the virtual data: using the VFS * caches is sufficient. */ #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/highmem.h> #include <linux/time.h> #include <linux/init.h> #include <linux/string.h> #include <linux/backing-dev.h> #include <linux/ramfs.h> #include <linux/sched.h> #include <linux/parser.h> #include <linux/magic.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/fs_context.h> #include <linux/fs_parser.h> #include <linux/seq_file.h> #include "internal.h" struct ramfs_mount_opts { umode_t mode; }; struct ramfs_fs_info { struct ramfs_mount_opts mount_opts; }; #define RAMFS_DEFAULT_MODE 0755 static const struct super_operations ramfs_ops; static const struct inode_operations ramfs_dir_inode_operations; struct inode *ramfs_get_inode(struct super_block *sb, const struct inode *dir, umode_t mode, dev_t dev) { struct inode * inode = new_inode(sb); if (inode) { inode->i_ino = get_next_ino(); inode_init_owner(&nop_mnt_idmap, inode, dir, mode); inode->i_mapping->a_ops = &ram_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER); mapping_set_unevictable(inode->i_mapping); inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode); switch (mode & S_IFMT) { default: init_special_inode(inode, mode, dev); break; case S_IFREG: inode->i_op = &ramfs_file_inode_operations; inode->i_fop = &ramfs_file_operations; break; case S_IFDIR: inode->i_op = &ramfs_dir_inode_operations; inode->i_fop = &simple_dir_operations; /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); break; case S_IFLNK: inode->i_op = &page_symlink_inode_operations; inode_nohighmem(inode); break; } } return inode; } /* * File creation. Allocate an inode, and we're done.. */ /* SMP-safe */ static int ramfs_mknod(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { struct inode * inode = ramfs_get_inode(dir->i_sb, dir, mode, dev); int error = -ENOSPC; if (inode) { d_instantiate(dentry, inode); dget(dentry); /* Extra count - pin the dentry in core */ error = 0; dir->i_mtime = inode_set_ctime_current(dir); } return error; } static int ramfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) { int retval = ramfs_mknod(&nop_mnt_idmap, dir, dentry, mode | S_IFDIR, 0); if (!retval) inc_nlink(dir); return retval; } static int ramfs_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { return ramfs_mknod(&nop_mnt_idmap, dir, dentry, mode | S_IFREG, 0); } static int ramfs_symlink(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, const char *symname) { struct inode *inode; int error = -ENOSPC; inode = ramfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0); if (inode) { int l = strlen(symname)+1; error = page_symlink(inode, symname, l); if (!error) { d_instantiate(dentry, inode); dget(dentry); dir->i_mtime = inode_set_ctime_current(dir); } else iput(inode); } return error; } static int ramfs_tmpfile(struct mnt_idmap *idmap, struct inode *dir, struct file *file, umode_t mode) { struct inode *inode; inode = ramfs_get_inode(dir->i_sb, dir, mode, 0); if (!inode) return -ENOSPC; d_tmpfile(file, inode); return finish_open_simple(file, 0); } static const struct inode_operations ramfs_dir_inode_operations = { .create = ramfs_create, .lookup = simple_lookup, .link = simple_link, .unlink = simple_unlink, .symlink = ramfs_symlink, .mkdir = ramfs_mkdir, .rmdir = simple_rmdir, .mknod = ramfs_mknod, .rename = simple_rename, .tmpfile = ramfs_tmpfile, }; /* * Display the mount options in /proc/mounts. */ static int ramfs_show_options(struct seq_file *m, struct dentry *root) { struct ramfs_fs_info *fsi = root->d_sb->s_fs_info; if (fsi->mount_opts.mode != RAMFS_DEFAULT_MODE) seq_printf(m, ",mode=%o", fsi->mount_opts.mode); return 0; } static const struct super_operations ramfs_ops = { .statfs = simple_statfs, .drop_inode = generic_delete_inode, .show_options = ramfs_show_options, }; enum ramfs_param { Opt_mode, }; const struct fs_parameter_spec ramfs_fs_parameters[] = { fsparam_u32oct("mode", Opt_mode), {} }; static int ramfs_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct fs_parse_result result; struct ramfs_fs_info *fsi = fc->s_fs_info; int opt; opt = fs_parse(fc, ramfs_fs_parameters, param, &result); if (opt == -ENOPARAM) { opt = vfs_parse_fs_param_source(fc, param); if (opt != -ENOPARAM) return opt; /* * We might like to report bad mount options here; * but traditionally ramfs has ignored all mount options, * and as it is used as a !CONFIG_SHMEM simple substitute * for tmpfs, better continue to ignore other mount options. */ return 0; } if (opt < 0) return opt; switch (opt) { case Opt_mode: fsi->mount_opts.mode = result.uint_32 & S_IALLUGO; break; } return 0; } static int ramfs_fill_super(struct super_block *sb, struct fs_context *fc) { struct ramfs_fs_info *fsi = sb->s_fs_info; struct inode *inode; sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_blocksize = PAGE_SIZE; sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = RAMFS_MAGIC; sb->s_op = &ramfs_ops; sb->s_time_gran = 1; inode = ramfs_get_inode(sb, NULL, S_IFDIR | fsi->mount_opts.mode, 0); sb->s_root = d_make_root(inode); if (!sb->s_root) return -ENOMEM; return 0; } static int ramfs_get_tree(struct fs_context *fc) { return get_tree_nodev(fc, ramfs_fill_super); } static void ramfs_free_fc(struct fs_context *fc) { kfree(fc->s_fs_info); } static const struct fs_context_operations ramfs_context_ops = { .free = ramfs_free_fc, .parse_param = ramfs_parse_param, .get_tree = ramfs_get_tree, }; int ramfs_init_fs_context(struct fs_context *fc) { struct ramfs_fs_info *fsi; fsi = kzalloc(sizeof(*fsi), GFP_KERNEL); if (!fsi) return -ENOMEM; fsi->mount_opts.mode = RAMFS_DEFAULT_MODE; fc->s_fs_info = fsi; fc->ops = &ramfs_context_ops; return 0; } void ramfs_kill_sb(struct super_block *sb) { kfree(sb->s_fs_info); kill_litter_super(sb); } static struct file_system_type ramfs_fs_type = { .name = "ramfs", .init_fs_context = ramfs_init_fs_context, .parameters = ramfs_fs_parameters, .kill_sb = ramfs_kill_sb, .fs_flags = FS_USERNS_MOUNT, }; static int __init init_ramfs_fs(void) { return register_filesystem(&ramfs_fs_type); } fs_initcall(init_ramfs_fs);
linux-master
fs/ramfs/inode.c
// SPDX-License-Identifier: GPL-2.0-or-later /* file-nommu.c: no-MMU version of ramfs * * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/module.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/highmem.h> #include <linux/init.h> #include <linux/string.h> #include <linux/backing-dev.h> #include <linux/ramfs.h> #include <linux/pagevec.h> #include <linux/mman.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/uaccess.h> #include "internal.h" static int ramfs_nommu_setattr(struct mnt_idmap *, struct dentry *, struct iattr *); static unsigned long ramfs_nommu_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma); static unsigned ramfs_mmap_capabilities(struct file *file) { return NOMMU_MAP_DIRECT | NOMMU_MAP_COPY | NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC; } const struct file_operations ramfs_file_operations = { .mmap_capabilities = ramfs_mmap_capabilities, .mmap = ramfs_nommu_mmap, .get_unmapped_area = ramfs_nommu_get_unmapped_area, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .fsync = noop_fsync, .splice_read = filemap_splice_read, .splice_write = iter_file_splice_write, .llseek = generic_file_llseek, }; const struct inode_operations ramfs_file_inode_operations = { .setattr = ramfs_nommu_setattr, .getattr = simple_getattr, }; /*****************************************************************************/ /* * add a contiguous set of pages into a ramfs inode when it's truncated from * size 0 on the assumption that it's going to be used for an mmap of shared * memory */ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) { unsigned long npages, xpages, loop; struct page *pages; unsigned order; void *data; int ret; gfp_t gfp = mapping_gfp_mask(inode->i_mapping); /* make various checks */ order = get_order(newsize); if (unlikely(order > MAX_ORDER)) return -EFBIG; ret = inode_newsize_ok(inode, newsize); if (ret) return ret; i_size_write(inode, newsize); /* allocate enough contiguous pages to be able to satisfy the * request */ pages = alloc_pages(gfp, order); if (!pages) return -ENOMEM; /* split the high-order page into an array of single pages */ xpages = 1UL << order; npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT; split_page(pages, order); /* trim off any pages we don't actually require */ for (loop = npages; loop < xpages; loop++) __free_page(pages + loop); /* clear the memory we allocated */ newsize = PAGE_SIZE * npages; data = page_address(pages); memset(data, 0, newsize); /* attach all the pages to the inode's address space */ for (loop = 0; loop < npages; loop++) { struct page *page = pages + loop; ret = add_to_page_cache_lru(page, inode->i_mapping, loop, gfp); if (ret < 0) goto add_error; /* prevent the page from being discarded on memory pressure */ SetPageDirty(page); SetPageUptodate(page); unlock_page(page); put_page(page); } return 0; add_error: while (loop < npages) __free_page(pages + loop++); return ret; } /*****************************************************************************/ /* * */ static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size) { int ret; /* assume a truncate from zero size is going to be for the purposes of * shared mmap */ if (size == 0) { if (unlikely(newsize >> 32)) return -EFBIG; return ramfs_nommu_expand_for_mapping(inode, newsize); } /* check that a decrease in size doesn't cut off any shared mappings */ if (newsize < size) { ret = nommu_shrink_inode_mappings(inode, size, newsize); if (ret < 0) return ret; } truncate_setsize(inode, newsize); return 0; } /*****************************************************************************/ /* * handle a change of attributes * - we're specifically interested in a change of size */ static int ramfs_nommu_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *ia) { struct inode *inode = d_inode(dentry); unsigned int old_ia_valid = ia->ia_valid; int ret = 0; /* POSIX UID/GID verification for setting inode attributes */ ret = setattr_prepare(&nop_mnt_idmap, dentry, ia); if (ret) return ret; /* pick out size-changing events */ if (ia->ia_valid & ATTR_SIZE) { loff_t size = inode->i_size; if (ia->ia_size != size) { ret = ramfs_nommu_resize(inode, ia->ia_size, size); if (ret < 0 || ia->ia_valid == ATTR_SIZE) goto out; } else { /* we skipped the truncate but must still update * timestamps */ ia->ia_valid |= ATTR_MTIME|ATTR_CTIME; } } setattr_copy(&nop_mnt_idmap, inode, ia); out: ia->ia_valid = old_ia_valid; return ret; } /*****************************************************************************/ /* * try to determine where a shared mapping can be made * - we require that: * - the pages to be mapped must exist * - the pages be physically contiguous in sequence */ static unsigned long ramfs_nommu_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long maxpages, lpages, nr_folios, loop, ret, nr_pages, pfn; struct inode *inode = file_inode(file); struct folio_batch fbatch; loff_t isize; /* the mapping mustn't extend beyond the EOF */ lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; isize = i_size_read(inode); ret = -ENOSYS; maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT; if (pgoff >= maxpages) goto out; if (maxpages - pgoff < lpages) goto out; /* gang-find the pages */ folio_batch_init(&fbatch); nr_pages = 0; repeat: nr_folios = filemap_get_folios_contig(inode->i_mapping, &pgoff, ULONG_MAX, &fbatch); if (!nr_folios) { ret = -ENOSYS; return ret; } if (ret == -ENOSYS) { ret = (unsigned long) folio_address(fbatch.folios[0]); pfn = folio_pfn(fbatch.folios[0]); } /* check the pages for physical adjacency */ for (loop = 0; loop < nr_folios; loop++) { if (pfn + nr_pages != folio_pfn(fbatch.folios[loop])) { ret = -ENOSYS; goto out_free; /* leave if not physical adjacent */ } nr_pages += folio_nr_pages(fbatch.folios[loop]); if (nr_pages >= lpages) goto out_free; /* successfully found desired pages*/ } if (nr_pages < lpages) { folio_batch_release(&fbatch); goto repeat; /* loop if pages are missing */ } /* okay - all conditions fulfilled */ out_free: folio_batch_release(&fbatch); out: return ret; } /*****************************************************************************/ /* * set up a mapping for shared memory segments */ static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma) { if (!is_nommu_shared_mapping(vma->vm_flags)) return -ENOSYS; file_accessed(file); vma->vm_ops = &generic_file_vm_ops; return 0; }
linux-master
fs/ramfs/file-nommu.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2000-2001 Christoph Hellwig. */ /* * Veritas filesystem driver - filesystem to disk block mapping. */ #include <linux/fs.h> #include <linux/buffer_head.h> #include <linux/kernel.h> #include "vxfs.h" #include "vxfs_inode.h" #include "vxfs_extern.h" #ifdef DIAGNOSTIC static void vxfs_typdump(struct vxfs_typed *typ) { printk(KERN_DEBUG "type=%Lu ", typ->vt_hdr >> VXFS_TYPED_TYPESHIFT); printk("offset=%Lx ", typ->vt_hdr & VXFS_TYPED_OFFSETMASK); printk("block=%x ", typ->vt_block); printk("size=%x\n", typ->vt_size); } #endif /** * vxfs_bmap_ext4 - do bmap for ext4 extents * @ip: pointer to the inode we do bmap for * @iblock: logical block. * * Description: * vxfs_bmap_ext4 performs the bmap operation for inodes with * ext4-style extents (which are much like the traditional UNIX * inode organisation). * * Returns: * The physical block number on success, else Zero. */ static daddr_t vxfs_bmap_ext4(struct inode *ip, long bn) { struct super_block *sb = ip->i_sb; struct vxfs_inode_info *vip = VXFS_INO(ip); struct vxfs_sb_info *sbi = VXFS_SBI(sb); unsigned long bsize = sb->s_blocksize; u32 indsize = fs32_to_cpu(sbi, vip->vii_ext4.ve4_indsize); int i; if (indsize > sb->s_blocksize) goto fail_size; for (i = 0; i < VXFS_NDADDR; i++) { struct direct *d = vip->vii_ext4.ve4_direct + i; if (bn >= 0 && bn < fs32_to_cpu(sbi, d->size)) return (bn + fs32_to_cpu(sbi, d->extent)); bn -= fs32_to_cpu(sbi, d->size); } if ((bn / (indsize * indsize * bsize / 4)) == 0) { struct buffer_head *buf; daddr_t bno; __fs32 *indir; buf = sb_bread(sb, fs32_to_cpu(sbi, vip->vii_ext4.ve4_indir[0])); if (!buf || !buffer_mapped(buf)) goto fail_buf; indir = (__fs32 *)buf->b_data; bno = fs32_to_cpu(sbi, indir[(bn / indsize) % (indsize * bn)]) + (bn % indsize); brelse(buf); return bno; } else printk(KERN_WARNING "no matching indir?"); return 0; fail_size: printk("vxfs: indirect extent too big!\n"); fail_buf: return 0; } /** * vxfs_bmap_indir - recursion for vxfs_bmap_typed * @ip: pointer to the inode we do bmap for * @indir: indirect block we start reading at * @size: size of the typed area to search * @block: partially result from further searches * * Description: * vxfs_bmap_indir reads a &struct vxfs_typed at @indir * and performs the type-defined action. * * Return Value: * The physical block number on success, else Zero. * * Note: * Kernelstack is rare. Unrecurse? */ static daddr_t vxfs_bmap_indir(struct inode *ip, long indir, int size, long block) { struct vxfs_sb_info *sbi = VXFS_SBI(ip->i_sb); struct buffer_head *bp = NULL; daddr_t pblock = 0; int i; for (i = 0; i < size * VXFS_TYPED_PER_BLOCK(ip->i_sb); i++) { struct vxfs_typed *typ; int64_t off; bp = sb_bread(ip->i_sb, indir + (i / VXFS_TYPED_PER_BLOCK(ip->i_sb))); if (!bp || !buffer_mapped(bp)) return 0; typ = ((struct vxfs_typed *)bp->b_data) + (i % VXFS_TYPED_PER_BLOCK(ip->i_sb)); off = fs64_to_cpu(sbi, typ->vt_hdr) & VXFS_TYPED_OFFSETMASK; if (block < off) { brelse(bp); continue; } switch ((u_int32_t)(fs64_to_cpu(sbi, typ->vt_hdr) >> VXFS_TYPED_TYPESHIFT)) { case VXFS_TYPED_INDIRECT: pblock = vxfs_bmap_indir(ip, fs32_to_cpu(sbi, typ->vt_block), fs32_to_cpu(sbi, typ->vt_size), block - off); if (pblock == -2) break; goto out; case VXFS_TYPED_DATA: if ((block - off) >= fs32_to_cpu(sbi, typ->vt_size)) break; pblock = fs32_to_cpu(sbi, typ->vt_block) + block - off; goto out; case VXFS_TYPED_INDIRECT_DEV4: case VXFS_TYPED_DATA_DEV4: { struct vxfs_typed_dev4 *typ4 = (struct vxfs_typed_dev4 *)typ; printk(KERN_INFO "\n\nTYPED_DEV4 detected!\n"); printk(KERN_INFO "block: %llu\tsize: %lld\tdev: %d\n", fs64_to_cpu(sbi, typ4->vd4_block), fs64_to_cpu(sbi, typ4->vd4_size), fs32_to_cpu(sbi, typ4->vd4_dev)); goto fail; } default: printk(KERN_ERR "%s:%d vt_hdr %llu\n", __func__, __LINE__, fs64_to_cpu(sbi, typ->vt_hdr)); BUG(); } brelse(bp); } fail: pblock = 0; out: brelse(bp); return (pblock); } /** * vxfs_bmap_typed - bmap for typed extents * @ip: pointer to the inode we do bmap for * @iblock: logical block * * Description: * Performs the bmap operation for typed extents. * * Return Value: * The physical block number on success, else Zero. */ static daddr_t vxfs_bmap_typed(struct inode *ip, long iblock) { struct vxfs_inode_info *vip = VXFS_INO(ip); struct vxfs_sb_info *sbi = VXFS_SBI(ip->i_sb); daddr_t pblock = 0; int i; for (i = 0; i < VXFS_NTYPED; i++) { struct vxfs_typed *typ = vip->vii_org.typed + i; u64 hdr = fs64_to_cpu(sbi, typ->vt_hdr); int64_t off = (hdr & VXFS_TYPED_OFFSETMASK); #ifdef DIAGNOSTIC vxfs_typdump(typ); #endif if (iblock < off) continue; switch ((u32)(hdr >> VXFS_TYPED_TYPESHIFT)) { case VXFS_TYPED_INDIRECT: pblock = vxfs_bmap_indir(ip, fs32_to_cpu(sbi, typ->vt_block), fs32_to_cpu(sbi, typ->vt_size), iblock - off); if (pblock == -2) break; return (pblock); case VXFS_TYPED_DATA: if ((iblock - off) < fs32_to_cpu(sbi, typ->vt_size)) return (fs32_to_cpu(sbi, typ->vt_block) + iblock - off); break; case VXFS_TYPED_INDIRECT_DEV4: case VXFS_TYPED_DATA_DEV4: { struct vxfs_typed_dev4 *typ4 = (struct vxfs_typed_dev4 *)typ; printk(KERN_INFO "\n\nTYPED_DEV4 detected!\n"); printk(KERN_INFO "block: %llu\tsize: %lld\tdev: %d\n", fs64_to_cpu(sbi, typ4->vd4_block), fs64_to_cpu(sbi, typ4->vd4_size), fs32_to_cpu(sbi, typ4->vd4_dev)); return 0; } default: BUG(); } } return 0; } /** * vxfs_bmap1 - vxfs-internal bmap operation * @ip: pointer to the inode we do bmap for * @iblock: logical block * * Description: * vxfs_bmap1 perfoms a logical to physical block mapping * for vxfs-internal purposes. * * Return Value: * The physical block number on success, else Zero. */ daddr_t vxfs_bmap1(struct inode *ip, long iblock) { struct vxfs_inode_info *vip = VXFS_INO(ip); if (VXFS_ISEXT4(vip)) return vxfs_bmap_ext4(ip, iblock); if (VXFS_ISTYPED(vip)) return vxfs_bmap_typed(ip, iblock); if (VXFS_ISNONE(vip)) goto unsupp; if (VXFS_ISIMMED(vip)) goto unsupp; printk(KERN_WARNING "vxfs: inode %ld has no valid orgtype (%x)\n", ip->i_ino, vip->vii_orgtype); BUG(); unsupp: printk(KERN_WARNING "vxfs: inode %ld has an unsupported orgtype (%x)\n", ip->i_ino, vip->vii_orgtype); return 0; }
linux-master
fs/freevxfs/vxfs_bmap.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2000-2001 Christoph Hellwig. * Copyright (c) 2016 Krzysztof Blaszkowski */ /* * Veritas filesystem driver - superblock related routines. */ #include <linux/init.h> #include <linux/module.h> #include <linux/blkdev.h> #include <linux/fs.h> #include <linux/buffer_head.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/vfs.h> #include <linux/mount.h> #include "vxfs.h" #include "vxfs_extern.h" #include "vxfs_dir.h" #include "vxfs_inode.h" MODULE_AUTHOR("Christoph Hellwig, Krzysztof Blaszkowski"); MODULE_DESCRIPTION("Veritas Filesystem (VxFS) driver"); MODULE_LICENSE("Dual BSD/GPL"); static struct kmem_cache *vxfs_inode_cachep; /** * vxfs_put_super - free superblock resources * @sbp: VFS superblock. * * Description: * vxfs_put_super frees all resources allocated for @sbp * after the last instance of the filesystem is unmounted. */ static void vxfs_put_super(struct super_block *sbp) { struct vxfs_sb_info *infp = VXFS_SBI(sbp); iput(infp->vsi_fship); iput(infp->vsi_ilist); iput(infp->vsi_stilist); brelse(infp->vsi_bp); kfree(infp); } /** * vxfs_statfs - get filesystem information * @dentry: VFS dentry to locate superblock * @bufp: output buffer * * Description: * vxfs_statfs fills the statfs buffer @bufp with information * about the filesystem described by @dentry. * * Returns: * Zero. * * Locking: * No locks held. * * Notes: * This is everything but complete... */ static int vxfs_statfs(struct dentry *dentry, struct kstatfs *bufp) { struct vxfs_sb_info *infp = VXFS_SBI(dentry->d_sb); struct vxfs_sb *raw_sb = infp->vsi_raw; bufp->f_type = VXFS_SUPER_MAGIC; bufp->f_bsize = dentry->d_sb->s_blocksize; bufp->f_blocks = fs32_to_cpu(infp, raw_sb->vs_dsize); bufp->f_bfree = fs32_to_cpu(infp, raw_sb->vs_free); bufp->f_bavail = 0; bufp->f_files = 0; bufp->f_ffree = fs32_to_cpu(infp, raw_sb->vs_ifree); bufp->f_namelen = VXFS_NAMELEN; return 0; } static int vxfs_remount(struct super_block *sb, int *flags, char *data) { sync_filesystem(sb); *flags |= SB_RDONLY; return 0; } static struct inode *vxfs_alloc_inode(struct super_block *sb) { struct vxfs_inode_info *vi; vi = alloc_inode_sb(sb, vxfs_inode_cachep, GFP_KERNEL); if (!vi) return NULL; inode_init_once(&vi->vfs_inode); return &vi->vfs_inode; } static void vxfs_free_inode(struct inode *inode) { kmem_cache_free(vxfs_inode_cachep, VXFS_INO(inode)); } static const struct super_operations vxfs_super_ops = { .alloc_inode = vxfs_alloc_inode, .free_inode = vxfs_free_inode, .evict_inode = vxfs_evict_inode, .put_super = vxfs_put_super, .statfs = vxfs_statfs, .remount_fs = vxfs_remount, }; static int vxfs_try_sb_magic(struct super_block *sbp, int silent, unsigned blk, __fs32 magic) { struct buffer_head *bp; struct vxfs_sb *rsbp; struct vxfs_sb_info *infp = VXFS_SBI(sbp); int rc = -ENOMEM; bp = sb_bread(sbp, blk); do { if (!bp || !buffer_mapped(bp)) { if (!silent) { printk(KERN_WARNING "vxfs: unable to read disk superblock at %u\n", blk); } break; } rc = -EINVAL; rsbp = (struct vxfs_sb *)bp->b_data; if (rsbp->vs_magic != magic) { if (!silent) printk(KERN_NOTICE "vxfs: WRONG superblock magic %08x at %u\n", rsbp->vs_magic, blk); break; } rc = 0; infp->vsi_raw = rsbp; infp->vsi_bp = bp; } while (0); if (rc) { infp->vsi_raw = NULL; infp->vsi_bp = NULL; brelse(bp); } return rc; } /** * vxfs_fill_super - read superblock into memory and initialize filesystem * @sbp: VFS superblock (to fill) * @dp: fs private mount data * @silent: do not complain loudly when sth is wrong * * Description: * We are called on the first mount of a filesystem to read the * superblock into memory and do some basic setup. * * Returns: * The superblock on success, else %NULL. * * Locking: * We are under @sbp->s_lock. */ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent) { struct vxfs_sb_info *infp; struct vxfs_sb *rsbp; u_long bsize; struct inode *root; int ret = -EINVAL; u32 j; sbp->s_flags |= SB_RDONLY; infp = kzalloc(sizeof(*infp), GFP_KERNEL); if (!infp) { printk(KERN_WARNING "vxfs: unable to allocate incore superblock\n"); return -ENOMEM; } bsize = sb_min_blocksize(sbp, BLOCK_SIZE); if (!bsize) { printk(KERN_WARNING "vxfs: unable to set blocksize\n"); goto out; } sbp->s_op = &vxfs_super_ops; sbp->s_fs_info = infp; sbp->s_time_min = 0; sbp->s_time_max = U32_MAX; if (!vxfs_try_sb_magic(sbp, silent, 1, (__force __fs32)cpu_to_le32(VXFS_SUPER_MAGIC))) { /* Unixware, x86 */ infp->byte_order = VXFS_BO_LE; } else if (!vxfs_try_sb_magic(sbp, silent, 8, (__force __fs32)cpu_to_be32(VXFS_SUPER_MAGIC))) { /* HP-UX, parisc */ infp->byte_order = VXFS_BO_BE; } else { if (!silent) printk(KERN_NOTICE "vxfs: can't find superblock.\n"); goto out; } rsbp = infp->vsi_raw; j = fs32_to_cpu(infp, rsbp->vs_version); if ((j < 2 || j > 4) && !silent) { printk(KERN_NOTICE "vxfs: unsupported VxFS version (%d)\n", j); goto out; } #ifdef DIAGNOSTIC printk(KERN_DEBUG "vxfs: supported VxFS version (%d)\n", j); printk(KERN_DEBUG "vxfs: blocksize: %d\n", fs32_to_cpu(infp, rsbp->vs_bsize)); #endif sbp->s_magic = fs32_to_cpu(infp, rsbp->vs_magic); infp->vsi_oltext = fs32_to_cpu(infp, rsbp->vs_oltext[0]); infp->vsi_oltsize = fs32_to_cpu(infp, rsbp->vs_oltsize); j = fs32_to_cpu(infp, rsbp->vs_bsize); if (!sb_set_blocksize(sbp, j)) { printk(KERN_WARNING "vxfs: unable to set final block size\n"); goto out; } if (vxfs_read_olt(sbp, bsize)) { printk(KERN_WARNING "vxfs: unable to read olt\n"); goto out; } if (vxfs_read_fshead(sbp)) { printk(KERN_WARNING "vxfs: unable to read fshead\n"); goto out; } root = vxfs_iget(sbp, VXFS_ROOT_INO); if (IS_ERR(root)) { ret = PTR_ERR(root); goto out; } sbp->s_root = d_make_root(root); if (!sbp->s_root) { printk(KERN_WARNING "vxfs: unable to get root dentry.\n"); goto out_free_ilist; } return 0; out_free_ilist: iput(infp->vsi_fship); iput(infp->vsi_ilist); iput(infp->vsi_stilist); out: brelse(infp->vsi_bp); kfree(infp); return ret; } /* * The usual module blurb. */ static struct dentry *vxfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, vxfs_fill_super); } static struct file_system_type vxfs_fs_type = { .owner = THIS_MODULE, .name = "vxfs", .mount = vxfs_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("vxfs"); /* makes mount -t vxfs autoload the module */ MODULE_ALIAS("vxfs"); static int __init vxfs_init(void) { int rv; vxfs_inode_cachep = kmem_cache_create_usercopy("vxfs_inode", sizeof(struct vxfs_inode_info), 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, offsetof(struct vxfs_inode_info, vii_immed.vi_immed), sizeof_field(struct vxfs_inode_info, vii_immed.vi_immed), NULL); if (!vxfs_inode_cachep) return -ENOMEM; rv = register_filesystem(&vxfs_fs_type); if (rv < 0) kmem_cache_destroy(vxfs_inode_cachep); return rv; } static void __exit vxfs_cleanup(void) { unregister_filesystem(&vxfs_fs_type); /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(vxfs_inode_cachep); } module_init(vxfs_init); module_exit(vxfs_cleanup);
linux-master
fs/freevxfs/vxfs_super.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2000-2001 Christoph Hellwig. */ /* * Veritas filesystem driver - shared subroutines. */ #include <linux/fs.h> #include <linux/buffer_head.h> #include <linux/kernel.h> #include <linux/pagemap.h> #include "vxfs_extern.h" static int vxfs_read_folio(struct file *, struct folio *); static sector_t vxfs_bmap(struct address_space *, sector_t); const struct address_space_operations vxfs_aops = { .read_folio = vxfs_read_folio, .bmap = vxfs_bmap, }; inline void vxfs_put_page(struct page *pp) { kunmap(pp); put_page(pp); } /** * vxfs_get_page - read a page into memory. * @mapping: mapping to read from * @n: page number * * Description: * vxfs_get_page reads the @n th page of @ip into the pagecache. * * Returns: * The wanted page on success, else a NULL pointer. */ struct page * vxfs_get_page(struct address_space *mapping, u_long n) { struct page * pp; pp = read_mapping_page(mapping, n, NULL); if (!IS_ERR(pp)) { kmap(pp); /** if (!PageChecked(pp)) **/ /** vxfs_check_page(pp); **/ } return (pp); } /** * vxfs_bread - read buffer for a give inode,block tuple * @ip: inode * @block: logical block * * Description: * The vxfs_bread function reads block no @block of * @ip into the buffercache. * * Returns: * The resulting &struct buffer_head. */ struct buffer_head * vxfs_bread(struct inode *ip, int block) { struct buffer_head *bp; daddr_t pblock; pblock = vxfs_bmap1(ip, block); bp = sb_bread(ip->i_sb, pblock); return (bp); } /** * vxfs_getblk - locate buffer for given inode,block tuple * @ip: inode * @iblock: logical block * @bp: buffer skeleton * @create: %TRUE if blocks may be newly allocated. * * Description: * The vxfs_getblk function fills @bp with the right physical * block and device number to perform a lowlevel read/write on * it. * * Returns: * Zero on success, else a negativ error code (-EIO). */ static int vxfs_getblk(struct inode *ip, sector_t iblock, struct buffer_head *bp, int create) { daddr_t pblock; pblock = vxfs_bmap1(ip, iblock); if (pblock != 0) { map_bh(bp, ip->i_sb, pblock); return 0; } return -EIO; } /** * vxfs_read_folio - read one page synchronously into the pagecache * @file: file context (unused) * @folio: folio to fill in. * * Description: * The vxfs_read_folio routine reads @folio synchronously into the * pagecache. * * Returns: * Zero on success, else a negative error code. * * Locking status: * @folio is locked and will be unlocked. */ static int vxfs_read_folio(struct file *file, struct folio *folio) { return block_read_full_folio(folio, vxfs_getblk); } /** * vxfs_bmap - perform logical to physical block mapping * @mapping: logical to physical mapping to use * @block: logical block (relative to @mapping). * * Description: * Vxfs_bmap find out the corresponding phsical block to the * @mapping, @block pair. * * Returns: * Physical block number on success, else Zero. * * Locking status: * We are under the bkl. */ static sector_t vxfs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping, block, vxfs_getblk); }
linux-master
fs/freevxfs/vxfs_subr.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2000-2001 Christoph Hellwig. */ /* * Veritas filesystem driver - object location table support. */ #include <linux/fs.h> #include <linux/buffer_head.h> #include <linux/kernel.h> #include "vxfs.h" #include "vxfs_olt.h" #include "vxfs_extern.h" static inline void vxfs_get_fshead(struct vxfs_oltfshead *fshp, struct vxfs_sb_info *infp) { BUG_ON(infp->vsi_fshino); infp->vsi_fshino = fs32_to_cpu(infp, fshp->olt_fsino[0]); } static inline void vxfs_get_ilist(struct vxfs_oltilist *ilistp, struct vxfs_sb_info *infp) { BUG_ON(infp->vsi_iext); infp->vsi_iext = fs32_to_cpu(infp, ilistp->olt_iext[0]); } static inline u_long vxfs_oblock(struct super_block *sbp, daddr_t block, u_long bsize) { BUG_ON(sbp->s_blocksize % bsize); return (block * (sbp->s_blocksize / bsize)); } /** * vxfs_read_olt - read olt * @sbp: superblock of the filesystem * @bsize: blocksize of the filesystem * * Description: * vxfs_read_olt reads the olt of the filesystem described by @sbp * into main memory and does some basic setup. * * Returns: * Zero on success, else a negative error code. */ int vxfs_read_olt(struct super_block *sbp, u_long bsize) { struct vxfs_sb_info *infp = VXFS_SBI(sbp); struct buffer_head *bp; struct vxfs_olt *op; char *oaddr, *eaddr; bp = sb_bread(sbp, vxfs_oblock(sbp, infp->vsi_oltext, bsize)); if (!bp || !bp->b_data) goto fail; op = (struct vxfs_olt *)bp->b_data; if (fs32_to_cpu(infp, op->olt_magic) != VXFS_OLT_MAGIC) { printk(KERN_NOTICE "vxfs: ivalid olt magic number\n"); goto fail; } /* * It is in theory possible that vsi_oltsize is > 1. * I've not seen any such filesystem yet and I'm lazy.. --hch */ if (infp->vsi_oltsize > 1) { printk(KERN_NOTICE "vxfs: oltsize > 1 detected.\n"); printk(KERN_NOTICE "vxfs: please notify [email protected]\n"); goto fail; } oaddr = bp->b_data + fs32_to_cpu(infp, op->olt_size); eaddr = bp->b_data + (infp->vsi_oltsize * sbp->s_blocksize); while (oaddr < eaddr) { struct vxfs_oltcommon *ocp = (struct vxfs_oltcommon *)oaddr; switch (fs32_to_cpu(infp, ocp->olt_type)) { case VXFS_OLT_FSHEAD: vxfs_get_fshead((struct vxfs_oltfshead *)oaddr, infp); break; case VXFS_OLT_ILIST: vxfs_get_ilist((struct vxfs_oltilist *)oaddr, infp); break; } oaddr += fs32_to_cpu(infp, ocp->olt_size); } brelse(bp); return (infp->vsi_fshino && infp->vsi_iext) ? 0 : -EINVAL; fail: brelse(bp); return -EINVAL; }
linux-master
fs/freevxfs/vxfs_olt.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2000-2001 Christoph Hellwig. * Copyright (c) 2016 Krzysztof Blaszkowski */ /* * Veritas filesystem driver - fileset header routines. */ #include <linux/fs.h> #include <linux/buffer_head.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/string.h> #include "vxfs.h" #include "vxfs_inode.h" #include "vxfs_extern.h" #include "vxfs_fshead.h" #ifdef DIAGNOSTIC static void vxfs_dumpfsh(struct vxfs_fsh *fhp) { printk("\n\ndumping fileset header:\n"); printk("----------------------------\n"); printk("version: %u\n", fhp->fsh_version); printk("fsindex: %u\n", fhp->fsh_fsindex); printk("iauino: %u\tninodes:%u\n", fhp->fsh_iauino, fhp->fsh_ninodes); printk("maxinode: %u\tlctino: %u\n", fhp->fsh_maxinode, fhp->fsh_lctino); printk("nau: %u\n", fhp->fsh_nau); printk("ilistino[0]: %u\tilistino[1]: %u\n", fhp->fsh_ilistino[0], fhp->fsh_ilistino[1]); } #endif /** * vxfs_getfsh - read fileset header into memory * @ip: the (fake) fileset header inode * @which: 0 for the structural, 1 for the primary fsh. * * Description: * vxfs_getfsh reads either the structural or primary fileset header * described by @ip into memory. * * Returns: * The fileset header structure on success, else Zero. */ static struct vxfs_fsh * vxfs_getfsh(struct inode *ip, int which) { struct buffer_head *bp; bp = vxfs_bread(ip, which); if (bp) { struct vxfs_fsh *fhp; if (!(fhp = kmalloc(sizeof(*fhp), GFP_KERNEL))) goto out; memcpy(fhp, bp->b_data, sizeof(*fhp)); put_bh(bp); return (fhp); } out: brelse(bp); return NULL; } /** * vxfs_read_fshead - read the fileset headers * @sbp: superblock to which the fileset belongs * * Description: * vxfs_read_fshead will fill the inode and structural inode list in @sb. * * Returns: * Zero on success, else a negative error code (-EINVAL). */ int vxfs_read_fshead(struct super_block *sbp) { struct vxfs_sb_info *infp = VXFS_SBI(sbp); struct vxfs_fsh *pfp, *sfp; struct vxfs_inode_info *vip; infp->vsi_fship = vxfs_blkiget(sbp, infp->vsi_iext, infp->vsi_fshino); if (!infp->vsi_fship) { printk(KERN_ERR "vxfs: unable to read fsh inode\n"); return -EINVAL; } vip = VXFS_INO(infp->vsi_fship); if (!VXFS_ISFSH(vip)) { printk(KERN_ERR "vxfs: fsh list inode is of wrong type (%x)\n", vip->vii_mode & VXFS_TYPE_MASK); goto out_iput_fship; } #ifdef DIAGNOSTIC printk("vxfs: fsh inode dump:\n"); vxfs_dumpi(vip, infp->vsi_fshino); #endif sfp = vxfs_getfsh(infp->vsi_fship, 0); if (!sfp) { printk(KERN_ERR "vxfs: unable to get structural fsh\n"); goto out_iput_fship; } #ifdef DIAGNOSTIC vxfs_dumpfsh(sfp); #endif pfp = vxfs_getfsh(infp->vsi_fship, 1); if (!pfp) { printk(KERN_ERR "vxfs: unable to get primary fsh\n"); goto out_free_sfp; } #ifdef DIAGNOSTIC vxfs_dumpfsh(pfp); #endif infp->vsi_stilist = vxfs_blkiget(sbp, infp->vsi_iext, fs32_to_cpu(infp, sfp->fsh_ilistino[0])); if (!infp->vsi_stilist) { printk(KERN_ERR "vxfs: unable to get structural list inode\n"); goto out_free_pfp; } if (!VXFS_ISILT(VXFS_INO(infp->vsi_stilist))) { printk(KERN_ERR "vxfs: structural list inode is of wrong type (%x)\n", VXFS_INO(infp->vsi_stilist)->vii_mode & VXFS_TYPE_MASK); goto out_iput_stilist; } infp->vsi_ilist = vxfs_stiget(sbp, fs32_to_cpu(infp, pfp->fsh_ilistino[0])); if (!infp->vsi_ilist) { printk(KERN_ERR "vxfs: unable to get inode list inode\n"); goto out_iput_stilist; } if (!VXFS_ISILT(VXFS_INO(infp->vsi_ilist))) { printk(KERN_ERR "vxfs: inode list inode is of wrong type (%x)\n", VXFS_INO(infp->vsi_ilist)->vii_mode & VXFS_TYPE_MASK); goto out_iput_ilist; } kfree(pfp); kfree(sfp); return 0; out_iput_ilist: iput(infp->vsi_ilist); out_iput_stilist: iput(infp->vsi_stilist); out_free_pfp: kfree(pfp); out_free_sfp: kfree(sfp); out_iput_fship: iput(infp->vsi_fship); return -EINVAL; }
linux-master
fs/freevxfs/vxfs_fshead.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2000-2001 Christoph Hellwig. */ /* * Veritas filesystem driver - support for 'immed' inodes. */ #include <linux/fs.h> #include <linux/pagemap.h> #include "vxfs.h" #include "vxfs_extern.h" #include "vxfs_inode.h" /** * vxfs_immed_read_folio - read part of an immed inode into pagecache * @file: file context (unused) * @folio: folio to fill in. * * Description: * vxfs_immed_read_folio reads a part of the immed area of the * file that hosts @folio into the pagecache. * * Returns: * Zero on success, else a negative error code. * * Locking status: * @folio is locked and will be unlocked. */ static int vxfs_immed_read_folio(struct file *fp, struct folio *folio) { struct vxfs_inode_info *vip = VXFS_INO(folio->mapping->host); void *src = vip->vii_immed.vi_immed + folio_pos(folio); unsigned long i; for (i = 0; i < folio_nr_pages(folio); i++) { memcpy_to_page(folio_page(folio, i), 0, src, PAGE_SIZE); src += PAGE_SIZE; } folio_mark_uptodate(folio); folio_unlock(folio); return 0; } /* * Address space operations for immed files and directories. */ const struct address_space_operations vxfs_immed_aops = { .read_folio = vxfs_immed_read_folio, };
linux-master
fs/freevxfs/vxfs_immed.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2000-2001 Christoph Hellwig. * Copyright (c) 2016 Krzysztof Blaszkowski */ /* * Veritas filesystem driver - inode routines. */ #include <linux/fs.h> #include <linux/buffer_head.h> #include <linux/pagemap.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/namei.h> #include "vxfs.h" #include "vxfs_inode.h" #include "vxfs_extern.h" #ifdef DIAGNOSTIC /* * Dump inode contents (partially). */ void vxfs_dumpi(struct vxfs_inode_info *vip, ino_t ino) { printk(KERN_DEBUG "\n\n"); if (ino) printk(KERN_DEBUG "dumping vxfs inode %ld\n", ino); else printk(KERN_DEBUG "dumping unknown vxfs inode\n"); printk(KERN_DEBUG "---------------------------\n"); printk(KERN_DEBUG "mode is %x\n", vip->vii_mode); printk(KERN_DEBUG "nlink:%u, uid:%u, gid:%u\n", vip->vii_nlink, vip->vii_uid, vip->vii_gid); printk(KERN_DEBUG "size:%Lx, blocks:%u\n", vip->vii_size, vip->vii_blocks); printk(KERN_DEBUG "orgtype:%u\n", vip->vii_orgtype); } #endif /** * vxfs_transmod - mode for a VxFS inode * @vip: VxFS inode * * Description: * vxfs_transmod returns a Linux mode_t for a given * VxFS inode structure. */ static __inline__ umode_t vxfs_transmod(struct vxfs_inode_info *vip) { umode_t ret = vip->vii_mode & ~VXFS_TYPE_MASK; if (VXFS_ISFIFO(vip)) ret |= S_IFIFO; if (VXFS_ISCHR(vip)) ret |= S_IFCHR; if (VXFS_ISDIR(vip)) ret |= S_IFDIR; if (VXFS_ISBLK(vip)) ret |= S_IFBLK; if (VXFS_ISLNK(vip)) ret |= S_IFLNK; if (VXFS_ISREG(vip)) ret |= S_IFREG; if (VXFS_ISSOC(vip)) ret |= S_IFSOCK; return (ret); } static inline void dip2vip_cpy(struct vxfs_sb_info *sbi, struct vxfs_inode_info *vip, struct vxfs_dinode *dip) { struct inode *inode = &vip->vfs_inode; vip->vii_mode = fs32_to_cpu(sbi, dip->vdi_mode); vip->vii_nlink = fs32_to_cpu(sbi, dip->vdi_nlink); vip->vii_uid = fs32_to_cpu(sbi, dip->vdi_uid); vip->vii_gid = fs32_to_cpu(sbi, dip->vdi_gid); vip->vii_size = fs64_to_cpu(sbi, dip->vdi_size); vip->vii_atime = fs32_to_cpu(sbi, dip->vdi_atime); vip->vii_autime = fs32_to_cpu(sbi, dip->vdi_autime); vip->vii_mtime = fs32_to_cpu(sbi, dip->vdi_mtime); vip->vii_mutime = fs32_to_cpu(sbi, dip->vdi_mutime); vip->vii_ctime = fs32_to_cpu(sbi, dip->vdi_ctime); vip->vii_cutime = fs32_to_cpu(sbi, dip->vdi_cutime); vip->vii_orgtype = dip->vdi_orgtype; vip->vii_blocks = fs32_to_cpu(sbi, dip->vdi_blocks); vip->vii_gen = fs32_to_cpu(sbi, dip->vdi_gen); if (VXFS_ISDIR(vip)) vip->vii_dotdot = fs32_to_cpu(sbi, dip->vdi_dotdot); else if (!VXFS_ISREG(vip) && !VXFS_ISLNK(vip)) vip->vii_rdev = fs32_to_cpu(sbi, dip->vdi_rdev); /* don't endian swap the fields that differ by orgtype */ memcpy(&vip->vii_org, &dip->vdi_org, sizeof(vip->vii_org)); inode->i_mode = vxfs_transmod(vip); i_uid_write(inode, (uid_t)vip->vii_uid); i_gid_write(inode, (gid_t)vip->vii_gid); set_nlink(inode, vip->vii_nlink); inode->i_size = vip->vii_size; inode->i_atime.tv_sec = vip->vii_atime; inode_set_ctime(inode, vip->vii_ctime, 0); inode->i_mtime.tv_sec = vip->vii_mtime; inode->i_atime.tv_nsec = 0; inode->i_mtime.tv_nsec = 0; inode->i_blocks = vip->vii_blocks; inode->i_generation = vip->vii_gen; } /** * vxfs_blkiget - find inode based on extent # * @sbp: superblock of the filesystem we search in * @extent: number of the extent to search * @ino: inode number to search * * Description: * vxfs_blkiget searches inode @ino in the filesystem described by * @sbp in the extent @extent. * Returns the matching VxFS inode on success, else a NULL pointer. * * NOTE: * While __vxfs_iget uses the pagecache vxfs_blkiget uses the * buffercache. This function should not be used outside the * read_super() method, otherwise the data may be incoherent. */ struct inode * vxfs_blkiget(struct super_block *sbp, u_long extent, ino_t ino) { struct buffer_head *bp; struct inode *inode; u_long block, offset; inode = new_inode(sbp); if (!inode) return NULL; inode->i_ino = get_next_ino(); block = extent + ((ino * VXFS_ISIZE) / sbp->s_blocksize); offset = ((ino % (sbp->s_blocksize / VXFS_ISIZE)) * VXFS_ISIZE); bp = sb_bread(sbp, block); if (bp && buffer_mapped(bp)) { struct vxfs_inode_info *vip = VXFS_INO(inode); struct vxfs_dinode *dip; dip = (struct vxfs_dinode *)(bp->b_data + offset); dip2vip_cpy(VXFS_SBI(sbp), vip, dip); vip->vfs_inode.i_mapping->a_ops = &vxfs_aops; #ifdef DIAGNOSTIC vxfs_dumpi(vip, ino); #endif brelse(bp); return inode; } printk(KERN_WARNING "vxfs: unable to read block %ld\n", block); brelse(bp); iput(inode); return NULL; } /** * __vxfs_iget - generic find inode facility * @ilistp: inode list * @vip: VxFS inode to fill in * @ino: inode number * * Description: * Search the for inode number @ino in the filesystem * described by @sbp. Use the specified inode table (@ilistp). * Returns the matching inode on success, else an error code. */ static int __vxfs_iget(struct inode *ilistp, struct vxfs_inode_info *vip, ino_t ino) { struct page *pp; u_long offset; offset = (ino % (PAGE_SIZE / VXFS_ISIZE)) * VXFS_ISIZE; pp = vxfs_get_page(ilistp->i_mapping, ino * VXFS_ISIZE / PAGE_SIZE); if (!IS_ERR(pp)) { struct vxfs_dinode *dip; caddr_t kaddr = (char *)page_address(pp); dip = (struct vxfs_dinode *)(kaddr + offset); dip2vip_cpy(VXFS_SBI(ilistp->i_sb), vip, dip); vip->vfs_inode.i_mapping->a_ops = &vxfs_aops; #ifdef DIAGNOSTIC vxfs_dumpi(vip, ino); #endif vxfs_put_page(pp); return 0; } printk(KERN_WARNING "vxfs: error on page 0x%p for inode %ld\n", pp, (unsigned long)ino); return PTR_ERR(pp); } /** * vxfs_stiget - find inode using the structural inode list * @sbp: VFS superblock * @ino: inode # * * Description: * Find inode @ino in the filesystem described by @sbp using * the structural inode list. * Returns the matching inode on success, else a NULL pointer. */ struct inode * vxfs_stiget(struct super_block *sbp, ino_t ino) { struct inode *inode; int error; inode = new_inode(sbp); if (!inode) return NULL; inode->i_ino = get_next_ino(); error = __vxfs_iget(VXFS_SBI(sbp)->vsi_stilist, VXFS_INO(inode), ino); if (error) { iput(inode); return NULL; } return inode; } /** * vxfs_iget - get an inode * @sbp: the superblock to get the inode for * @ino: the number of the inode to get * * Description: * vxfs_read_inode creates an inode, reads the disk inode for @ino and fills * in all relevant fields in the new inode. */ struct inode * vxfs_iget(struct super_block *sbp, ino_t ino) { struct vxfs_inode_info *vip; const struct address_space_operations *aops; struct inode *ip; int error; ip = iget_locked(sbp, ino); if (!ip) return ERR_PTR(-ENOMEM); if (!(ip->i_state & I_NEW)) return ip; vip = VXFS_INO(ip); error = __vxfs_iget(VXFS_SBI(sbp)->vsi_ilist, vip, ino); if (error) { iget_failed(ip); return ERR_PTR(error); } if (VXFS_ISIMMED(vip)) aops = &vxfs_immed_aops; else aops = &vxfs_aops; if (S_ISREG(ip->i_mode)) { ip->i_fop = &generic_ro_fops; ip->i_mapping->a_ops = aops; } else if (S_ISDIR(ip->i_mode)) { ip->i_op = &vxfs_dir_inode_ops; ip->i_fop = &vxfs_dir_operations; ip->i_mapping->a_ops = aops; } else if (S_ISLNK(ip->i_mode)) { if (!VXFS_ISIMMED(vip)) { ip->i_op = &page_symlink_inode_operations; inode_nohighmem(ip); ip->i_mapping->a_ops = &vxfs_aops; } else { ip->i_op = &simple_symlink_inode_operations; ip->i_link = vip->vii_immed.vi_immed; nd_terminate_link(ip->i_link, ip->i_size, sizeof(vip->vii_immed.vi_immed) - 1); } } else init_special_inode(ip, ip->i_mode, old_decode_dev(vip->vii_rdev)); unlock_new_inode(ip); return ip; } /** * vxfs_evict_inode - remove inode from main memory * @ip: inode to discard. * * Description: * vxfs_evict_inode() is called on the final iput and frees the private * inode area. */ void vxfs_evict_inode(struct inode *ip) { truncate_inode_pages_final(&ip->i_data); clear_inode(ip); }
linux-master
fs/freevxfs/vxfs_inode.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2000-2001 Christoph Hellwig. * Copyright (c) 2016 Krzysztof Blaszkowski */ /* * Veritas filesystem driver - lookup and other directory related code. */ #include <linux/fs.h> #include <linux/time.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/kernel.h> #include <linux/pagemap.h> #include "vxfs.h" #include "vxfs_dir.h" #include "vxfs_inode.h" #include "vxfs_extern.h" /* * Number of VxFS blocks per page. */ #define VXFS_BLOCK_PER_PAGE(sbp) ((PAGE_SIZE / (sbp)->s_blocksize)) static struct dentry * vxfs_lookup(struct inode *, struct dentry *, unsigned int); static int vxfs_readdir(struct file *, struct dir_context *); const struct inode_operations vxfs_dir_inode_ops = { .lookup = vxfs_lookup, }; const struct file_operations vxfs_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .iterate_shared = vxfs_readdir, }; /** * vxfs_find_entry - find a mathing directory entry for a dentry * @ip: directory inode * @dp: dentry for which we want to find a direct * @ppp: gets filled with the page the return value sits in * * Description: * vxfs_find_entry finds a &struct vxfs_direct for the VFS directory * cache entry @dp. @ppp will be filled with the page the return * value resides in. * * Returns: * The wanted direct on success, else a NULL pointer. */ static struct vxfs_direct * vxfs_find_entry(struct inode *ip, struct dentry *dp, struct page **ppp) { u_long bsize = ip->i_sb->s_blocksize; const char *name = dp->d_name.name; int namelen = dp->d_name.len; loff_t limit = VXFS_DIRROUND(ip->i_size); struct vxfs_direct *de_exit = NULL; loff_t pos = 0; struct vxfs_sb_info *sbi = VXFS_SBI(ip->i_sb); while (pos < limit) { struct page *pp; char *kaddr; int pg_ofs = pos & ~PAGE_MASK; pp = vxfs_get_page(ip->i_mapping, pos >> PAGE_SHIFT); if (IS_ERR(pp)) return NULL; kaddr = (char *)page_address(pp); while (pg_ofs < PAGE_SIZE && pos < limit) { struct vxfs_direct *de; if ((pos & (bsize - 1)) < 4) { struct vxfs_dirblk *dbp = (struct vxfs_dirblk *) (kaddr + (pos & ~PAGE_MASK)); int overhead = VXFS_DIRBLKOV(sbi, dbp); pos += overhead; pg_ofs += overhead; } de = (struct vxfs_direct *)(kaddr + pg_ofs); if (!de->d_reclen) { pos += bsize - 1; pos &= ~(bsize - 1); break; } pg_ofs += fs16_to_cpu(sbi, de->d_reclen); pos += fs16_to_cpu(sbi, de->d_reclen); if (!de->d_ino) continue; if (namelen != fs16_to_cpu(sbi, de->d_namelen)) continue; if (!memcmp(name, de->d_name, namelen)) { *ppp = pp; de_exit = de; break; } } if (!de_exit) vxfs_put_page(pp); else break; } return de_exit; } /** * vxfs_inode_by_name - find inode number for dentry * @dip: directory to search in * @dp: dentry we search for * * Description: * vxfs_inode_by_name finds out the inode number of * the path component described by @dp in @dip. * * Returns: * The wanted inode number on success, else Zero. */ static ino_t vxfs_inode_by_name(struct inode *dip, struct dentry *dp) { struct vxfs_direct *de; struct page *pp; ino_t ino = 0; de = vxfs_find_entry(dip, dp, &pp); if (de) { ino = fs32_to_cpu(VXFS_SBI(dip->i_sb), de->d_ino); kunmap(pp); put_page(pp); } return (ino); } /** * vxfs_lookup - lookup pathname component * @dip: dir in which we lookup * @dp: dentry we lookup * @flags: lookup flags * * Description: * vxfs_lookup tries to lookup the pathname component described * by @dp in @dip. * * Returns: * A NULL-pointer on success, else a negative error code encoded * in the return pointer. */ static struct dentry * vxfs_lookup(struct inode *dip, struct dentry *dp, unsigned int flags) { struct inode *ip = NULL; ino_t ino; if (dp->d_name.len > VXFS_NAMELEN) return ERR_PTR(-ENAMETOOLONG); ino = vxfs_inode_by_name(dip, dp); if (ino) ip = vxfs_iget(dip->i_sb, ino); return d_splice_alias(ip, dp); } /** * vxfs_readdir - read a directory * @fp: the directory to read * @retp: return buffer * @filler: filldir callback * * Description: * vxfs_readdir fills @retp with directory entries from @fp * using the VFS supplied callback @filler. * * Returns: * Zero. */ static int vxfs_readdir(struct file *fp, struct dir_context *ctx) { struct inode *ip = file_inode(fp); struct super_block *sbp = ip->i_sb; u_long bsize = sbp->s_blocksize; loff_t pos, limit; struct vxfs_sb_info *sbi = VXFS_SBI(sbp); if (ctx->pos == 0) { if (!dir_emit_dot(fp, ctx)) goto out; ctx->pos++; } if (ctx->pos == 1) { if (!dir_emit(ctx, "..", 2, VXFS_INO(ip)->vii_dotdot, DT_DIR)) goto out; ctx->pos++; } limit = VXFS_DIRROUND(ip->i_size); if (ctx->pos > limit) goto out; pos = ctx->pos & ~3L; while (pos < limit) { struct page *pp; char *kaddr; int pg_ofs = pos & ~PAGE_MASK; int rc = 0; pp = vxfs_get_page(ip->i_mapping, pos >> PAGE_SHIFT); if (IS_ERR(pp)) return -ENOMEM; kaddr = (char *)page_address(pp); while (pg_ofs < PAGE_SIZE && pos < limit) { struct vxfs_direct *de; if ((pos & (bsize - 1)) < 4) { struct vxfs_dirblk *dbp = (struct vxfs_dirblk *) (kaddr + (pos & ~PAGE_MASK)); int overhead = VXFS_DIRBLKOV(sbi, dbp); pos += overhead; pg_ofs += overhead; } de = (struct vxfs_direct *)(kaddr + pg_ofs); if (!de->d_reclen) { pos += bsize - 1; pos &= ~(bsize - 1); break; } pg_ofs += fs16_to_cpu(sbi, de->d_reclen); pos += fs16_to_cpu(sbi, de->d_reclen); if (!de->d_ino) continue; rc = dir_emit(ctx, de->d_name, fs16_to_cpu(sbi, de->d_namelen), fs32_to_cpu(sbi, de->d_ino), DT_UNKNOWN); if (!rc) { /* the dir entry was not read, fix pos. */ pos -= fs16_to_cpu(sbi, de->d_reclen); break; } } vxfs_put_page(pp); if (!rc) break; } ctx->pos = pos | 2; out: return 0; }
linux-master
fs/freevxfs/vxfs_lookup.c
/* AFS superblock handling * * Copyright (c) 2002, 2007, 2018 Red Hat, Inc. All rights reserved. * * This software may be freely redistributed under the terms of the * GNU General Public License. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Authors: David Howells <[email protected]> * David Woodhouse <[email protected]> * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mount.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/fs_parser.h> #include <linux/statfs.h> #include <linux/sched.h> #include <linux/nsproxy.h> #include <linux/magic.h> #include <net/net_namespace.h> #include "internal.h" static void afs_i_init_once(void *foo); static void afs_kill_super(struct super_block *sb); static struct inode *afs_alloc_inode(struct super_block *sb); static void afs_destroy_inode(struct inode *inode); static void afs_free_inode(struct inode *inode); static int afs_statfs(struct dentry *dentry, struct kstatfs *buf); static int afs_show_devname(struct seq_file *m, struct dentry *root); static int afs_show_options(struct seq_file *m, struct dentry *root); static int afs_init_fs_context(struct fs_context *fc); static const struct fs_parameter_spec afs_fs_parameters[]; struct file_system_type afs_fs_type = { .owner = THIS_MODULE, .name = "afs", .init_fs_context = afs_init_fs_context, .parameters = afs_fs_parameters, .kill_sb = afs_kill_super, .fs_flags = FS_RENAME_DOES_D_MOVE, }; MODULE_ALIAS_FS("afs"); int afs_net_id; static const struct super_operations afs_super_ops = { .statfs = afs_statfs, .alloc_inode = afs_alloc_inode, .write_inode = afs_write_inode, .drop_inode = afs_drop_inode, .destroy_inode = afs_destroy_inode, .free_inode = afs_free_inode, .evict_inode = afs_evict_inode, .show_devname = afs_show_devname, .show_options = afs_show_options, }; static struct kmem_cache *afs_inode_cachep; static atomic_t afs_count_active_inodes; enum afs_param { Opt_autocell, Opt_dyn, Opt_flock, Opt_source, }; static const struct constant_table afs_param_flock[] = { {"local", afs_flock_mode_local }, {"openafs", afs_flock_mode_openafs }, {"strict", afs_flock_mode_strict }, {"write", afs_flock_mode_write }, {} }; static const struct fs_parameter_spec afs_fs_parameters[] = { fsparam_flag ("autocell", Opt_autocell), fsparam_flag ("dyn", Opt_dyn), fsparam_enum ("flock", Opt_flock, afs_param_flock), fsparam_string("source", Opt_source), {} }; /* * initialise the filesystem */ int __init afs_fs_init(void) { int ret; _enter(""); /* create ourselves an inode cache */ atomic_set(&afs_count_active_inodes, 0); ret = -ENOMEM; afs_inode_cachep = kmem_cache_create("afs_inode_cache", sizeof(struct afs_vnode), 0, SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, afs_i_init_once); if (!afs_inode_cachep) { printk(KERN_NOTICE "kAFS: Failed to allocate inode cache\n"); return ret; } /* now export our filesystem to lesser mortals */ ret = register_filesystem(&afs_fs_type); if (ret < 0) { kmem_cache_destroy(afs_inode_cachep); _leave(" = %d", ret); return ret; } _leave(" = 0"); return 0; } /* * clean up the filesystem */ void afs_fs_exit(void) { _enter(""); afs_mntpt_kill_timer(); unregister_filesystem(&afs_fs_type); if (atomic_read(&afs_count_active_inodes) != 0) { printk("kAFS: %d active inode objects still present\n", atomic_read(&afs_count_active_inodes)); BUG(); } /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(afs_inode_cachep); _leave(""); } /* * Display the mount device name in /proc/mounts. */ static int afs_show_devname(struct seq_file *m, struct dentry *root) { struct afs_super_info *as = AFS_FS_S(root->d_sb); struct afs_volume *volume = as->volume; struct afs_cell *cell = as->cell; const char *suf = ""; char pref = '%'; if (as->dyn_root) { seq_puts(m, "none"); return 0; } switch (volume->type) { case AFSVL_RWVOL: break; case AFSVL_ROVOL: pref = '#'; if (volume->type_force) suf = ".readonly"; break; case AFSVL_BACKVOL: pref = '#'; suf = ".backup"; break; } seq_printf(m, "%c%s:%s%s", pref, cell->name, volume->name, suf); return 0; } /* * Display the mount options in /proc/mounts. */ static int afs_show_options(struct seq_file *m, struct dentry *root) { struct afs_super_info *as = AFS_FS_S(root->d_sb); const char *p = NULL; if (as->dyn_root) seq_puts(m, ",dyn"); if (test_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(d_inode(root))->flags)) seq_puts(m, ",autocell"); switch (as->flock_mode) { case afs_flock_mode_unset: break; case afs_flock_mode_local: p = "local"; break; case afs_flock_mode_openafs: p = "openafs"; break; case afs_flock_mode_strict: p = "strict"; break; case afs_flock_mode_write: p = "write"; break; } if (p) seq_printf(m, ",flock=%s", p); return 0; } /* * Parse the source name to get cell name, volume name, volume type and R/W * selector. * * This can be one of the following: * "%[cell:]volume[.]" R/W volume * "#[cell:]volume[.]" R/O or R/W volume (R/O parent), * or R/W (R/W parent) volume * "%[cell:]volume.readonly" R/O volume * "#[cell:]volume.readonly" R/O volume * "%[cell:]volume.backup" Backup volume * "#[cell:]volume.backup" Backup volume */ static int afs_parse_source(struct fs_context *fc, struct fs_parameter *param) { struct afs_fs_context *ctx = fc->fs_private; struct afs_cell *cell; const char *cellname, *suffix, *name = param->string; int cellnamesz; _enter(",%s", name); if (fc->source) return invalf(fc, "kAFS: Multiple sources not supported"); if (!name) { printk(KERN_ERR "kAFS: no volume name specified\n"); return -EINVAL; } if ((name[0] != '%' && name[0] != '#') || !name[1]) { /* To use dynroot, we don't want to have to provide a source */ if (strcmp(name, "none") == 0) { ctx->no_cell = true; return 0; } printk(KERN_ERR "kAFS: unparsable volume name\n"); return -EINVAL; } /* determine the type of volume we're looking for */ if (name[0] == '%') { ctx->type = AFSVL_RWVOL; ctx->force = true; } name++; /* split the cell name out if there is one */ ctx->volname = strchr(name, ':'); if (ctx->volname) { cellname = name; cellnamesz = ctx->volname - name; ctx->volname++; } else { ctx->volname = name; cellname = NULL; cellnamesz = 0; } /* the volume type is further affected by a possible suffix */ suffix = strrchr(ctx->volname, '.'); if (suffix) { if (strcmp(suffix, ".readonly") == 0) { ctx->type = AFSVL_ROVOL; ctx->force = true; } else if (strcmp(suffix, ".backup") == 0) { ctx->type = AFSVL_BACKVOL; ctx->force = true; } else if (suffix[1] == 0) { } else { suffix = NULL; } } ctx->volnamesz = suffix ? suffix - ctx->volname : strlen(ctx->volname); _debug("cell %*.*s [%p]", cellnamesz, cellnamesz, cellname ?: "", ctx->cell); /* lookup the cell record */ if (cellname) { cell = afs_lookup_cell(ctx->net, cellname, cellnamesz, NULL, false); if (IS_ERR(cell)) { pr_err("kAFS: unable to lookup cell '%*.*s'\n", cellnamesz, cellnamesz, cellname ?: ""); return PTR_ERR(cell); } afs_unuse_cell(ctx->net, ctx->cell, afs_cell_trace_unuse_parse); afs_see_cell(cell, afs_cell_trace_see_source); ctx->cell = cell; } _debug("CELL:%s [%p] VOLUME:%*.*s SUFFIX:%s TYPE:%d%s", ctx->cell->name, ctx->cell, ctx->volnamesz, ctx->volnamesz, ctx->volname, suffix ?: "-", ctx->type, ctx->force ? " FORCE" : ""); fc->source = param->string; param->string = NULL; return 0; } /* * Parse a single mount parameter. */ static int afs_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct fs_parse_result result; struct afs_fs_context *ctx = fc->fs_private; int opt; opt = fs_parse(fc, afs_fs_parameters, param, &result); if (opt < 0) return opt; switch (opt) { case Opt_source: return afs_parse_source(fc, param); case Opt_autocell: ctx->autocell = true; break; case Opt_dyn: ctx->dyn_root = true; break; case Opt_flock: ctx->flock_mode = result.uint_32; break; default: return -EINVAL; } _leave(" = 0"); return 0; } /* * Validate the options, get the cell key and look up the volume. */ static int afs_validate_fc(struct fs_context *fc) { struct afs_fs_context *ctx = fc->fs_private; struct afs_volume *volume; struct afs_cell *cell; struct key *key; int ret; if (!ctx->dyn_root) { if (ctx->no_cell) { pr_warn("kAFS: Can only specify source 'none' with -o dyn\n"); return -EINVAL; } if (!ctx->cell) { pr_warn("kAFS: No cell specified\n"); return -EDESTADDRREQ; } reget_key: /* We try to do the mount securely. */ key = afs_request_key(ctx->cell); if (IS_ERR(key)) return PTR_ERR(key); ctx->key = key; if (ctx->volume) { afs_put_volume(ctx->net, ctx->volume, afs_volume_trace_put_validate_fc); ctx->volume = NULL; } if (test_bit(AFS_CELL_FL_CHECK_ALIAS, &ctx->cell->flags)) { ret = afs_cell_detect_alias(ctx->cell, key); if (ret < 0) return ret; if (ret == 1) { _debug("switch to alias"); key_put(ctx->key); ctx->key = NULL; cell = afs_use_cell(ctx->cell->alias_of, afs_cell_trace_use_fc_alias); afs_unuse_cell(ctx->net, ctx->cell, afs_cell_trace_unuse_fc); ctx->cell = cell; goto reget_key; } } volume = afs_create_volume(ctx); if (IS_ERR(volume)) return PTR_ERR(volume); ctx->volume = volume; } return 0; } /* * check a superblock to see if it's the one we're looking for */ static int afs_test_super(struct super_block *sb, struct fs_context *fc) { struct afs_fs_context *ctx = fc->fs_private; struct afs_super_info *as = AFS_FS_S(sb); return (as->net_ns == fc->net_ns && as->volume && as->volume->vid == ctx->volume->vid && as->cell == ctx->cell && !as->dyn_root); } static int afs_dynroot_test_super(struct super_block *sb, struct fs_context *fc) { struct afs_super_info *as = AFS_FS_S(sb); return (as->net_ns == fc->net_ns && as->dyn_root); } static int afs_set_super(struct super_block *sb, struct fs_context *fc) { return set_anon_super(sb, NULL); } /* * fill in the superblock */ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx) { struct afs_super_info *as = AFS_FS_S(sb); struct inode *inode = NULL; int ret; _enter(""); /* fill in the superblock */ sb->s_blocksize = PAGE_SIZE; sb->s_blocksize_bits = PAGE_SHIFT; sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_magic = AFS_FS_MAGIC; sb->s_op = &afs_super_ops; if (!as->dyn_root) sb->s_xattr = afs_xattr_handlers; ret = super_setup_bdi(sb); if (ret) return ret; /* allocate the root inode and dentry */ if (as->dyn_root) { inode = afs_iget_pseudo_dir(sb, true); } else { sprintf(sb->s_id, "%llu", as->volume->vid); afs_activate_volume(as->volume); inode = afs_root_iget(sb, ctx->key); } if (IS_ERR(inode)) return PTR_ERR(inode); if (ctx->autocell || as->dyn_root) set_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(inode)->flags); ret = -ENOMEM; sb->s_root = d_make_root(inode); if (!sb->s_root) goto error; if (as->dyn_root) { sb->s_d_op = &afs_dynroot_dentry_operations; ret = afs_dynroot_populate(sb); if (ret < 0) goto error; } else { sb->s_d_op = &afs_fs_dentry_operations; rcu_assign_pointer(as->volume->sb, sb); } _leave(" = 0"); return 0; error: _leave(" = %d", ret); return ret; } static struct afs_super_info *afs_alloc_sbi(struct fs_context *fc) { struct afs_fs_context *ctx = fc->fs_private; struct afs_super_info *as; as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL); if (as) { as->net_ns = get_net(fc->net_ns); as->flock_mode = ctx->flock_mode; if (ctx->dyn_root) { as->dyn_root = true; } else { as->cell = afs_use_cell(ctx->cell, afs_cell_trace_use_sbi); as->volume = afs_get_volume(ctx->volume, afs_volume_trace_get_alloc_sbi); } } return as; } static void afs_destroy_sbi(struct afs_super_info *as) { if (as) { struct afs_net *net = afs_net(as->net_ns); afs_put_volume(net, as->volume, afs_volume_trace_put_destroy_sbi); afs_unuse_cell(net, as->cell, afs_cell_trace_unuse_sbi); put_net(as->net_ns); kfree(as); } } static void afs_kill_super(struct super_block *sb) { struct afs_super_info *as = AFS_FS_S(sb); if (as->dyn_root) afs_dynroot_depopulate(sb); /* Clear the callback interests (which will do ilookup5) before * deactivating the superblock. */ if (as->volume) rcu_assign_pointer(as->volume->sb, NULL); kill_anon_super(sb); if (as->volume) afs_deactivate_volume(as->volume); afs_destroy_sbi(as); } /* * Get an AFS superblock and root directory. */ static int afs_get_tree(struct fs_context *fc) { struct afs_fs_context *ctx = fc->fs_private; struct super_block *sb; struct afs_super_info *as; int ret; ret = afs_validate_fc(fc); if (ret) goto error; _enter(""); /* allocate a superblock info record */ ret = -ENOMEM; as = afs_alloc_sbi(fc); if (!as) goto error; fc->s_fs_info = as; /* allocate a deviceless superblock */ sb = sget_fc(fc, as->dyn_root ? afs_dynroot_test_super : afs_test_super, afs_set_super); if (IS_ERR(sb)) { ret = PTR_ERR(sb); goto error; } if (!sb->s_root) { /* initial superblock/root creation */ _debug("create"); ret = afs_fill_super(sb, ctx); if (ret < 0) goto error_sb; sb->s_flags |= SB_ACTIVE; } else { _debug("reuse"); ASSERTCMP(sb->s_flags, &, SB_ACTIVE); } fc->root = dget(sb->s_root); trace_afs_get_tree(as->cell, as->volume); _leave(" = 0 [%p]", sb); return 0; error_sb: deactivate_locked_super(sb); error: _leave(" = %d", ret); return ret; } static void afs_free_fc(struct fs_context *fc) { struct afs_fs_context *ctx = fc->fs_private; afs_destroy_sbi(fc->s_fs_info); afs_put_volume(ctx->net, ctx->volume, afs_volume_trace_put_free_fc); afs_unuse_cell(ctx->net, ctx->cell, afs_cell_trace_unuse_fc); key_put(ctx->key); kfree(ctx); } static const struct fs_context_operations afs_context_ops = { .free = afs_free_fc, .parse_param = afs_parse_param, .get_tree = afs_get_tree, }; /* * Set up the filesystem mount context. */ static int afs_init_fs_context(struct fs_context *fc) { struct afs_fs_context *ctx; struct afs_cell *cell; ctx = kzalloc(sizeof(struct afs_fs_context), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->type = AFSVL_ROVOL; ctx->net = afs_net(fc->net_ns); /* Default to the workstation cell. */ cell = afs_find_cell(ctx->net, NULL, 0, afs_cell_trace_use_fc); if (IS_ERR(cell)) cell = NULL; ctx->cell = cell; fc->fs_private = ctx; fc->ops = &afs_context_ops; return 0; } /* * Initialise an inode cache slab element prior to any use. Note that * afs_alloc_inode() *must* reset anything that could incorrectly leak from one * inode to another. */ static void afs_i_init_once(void *_vnode) { struct afs_vnode *vnode = _vnode; memset(vnode, 0, sizeof(*vnode)); inode_init_once(&vnode->netfs.inode); mutex_init(&vnode->io_lock); init_rwsem(&vnode->validate_lock); spin_lock_init(&vnode->wb_lock); spin_lock_init(&vnode->lock); INIT_LIST_HEAD(&vnode->wb_keys); INIT_LIST_HEAD(&vnode->pending_locks); INIT_LIST_HEAD(&vnode->granted_locks); INIT_DELAYED_WORK(&vnode->lock_work, afs_lock_work); INIT_LIST_HEAD(&vnode->cb_mmap_link); seqlock_init(&vnode->cb_lock); } /* * allocate an AFS inode struct from our slab cache */ static struct inode *afs_alloc_inode(struct super_block *sb) { struct afs_vnode *vnode; vnode = alloc_inode_sb(sb, afs_inode_cachep, GFP_KERNEL); if (!vnode) return NULL; atomic_inc(&afs_count_active_inodes); /* Reset anything that shouldn't leak from one inode to the next. */ memset(&vnode->fid, 0, sizeof(vnode->fid)); memset(&vnode->status, 0, sizeof(vnode->status)); afs_vnode_set_cache(vnode, NULL); vnode->volume = NULL; vnode->lock_key = NULL; vnode->permit_cache = NULL; vnode->flags = 1 << AFS_VNODE_UNSET; vnode->lock_state = AFS_VNODE_LOCK_NONE; init_rwsem(&vnode->rmdir_lock); INIT_WORK(&vnode->cb_work, afs_invalidate_mmap_work); _leave(" = %p", &vnode->netfs.inode); return &vnode->netfs.inode; } static void afs_free_inode(struct inode *inode) { kmem_cache_free(afs_inode_cachep, AFS_FS_I(inode)); } /* * destroy an AFS inode struct */ static void afs_destroy_inode(struct inode *inode) { struct afs_vnode *vnode = AFS_FS_I(inode); _enter("%p{%llx:%llu}", inode, vnode->fid.vid, vnode->fid.vnode); _debug("DESTROY INODE %p", inode); atomic_dec(&afs_count_active_inodes); } static void afs_get_volume_status_success(struct afs_operation *op) { struct afs_volume_status *vs = &op->volstatus.vs; struct kstatfs *buf = op->volstatus.buf; if (vs->max_quota == 0) buf->f_blocks = vs->part_max_blocks; else buf->f_blocks = vs->max_quota; if (buf->f_blocks > vs->blocks_in_use) buf->f_bavail = buf->f_bfree = buf->f_blocks - vs->blocks_in_use; } static const struct afs_operation_ops afs_get_volume_status_operation = { .issue_afs_rpc = afs_fs_get_volume_status, .issue_yfs_rpc = yfs_fs_get_volume_status, .success = afs_get_volume_status_success, }; /* * return information about an AFS volume */ static int afs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct afs_super_info *as = AFS_FS_S(dentry->d_sb); struct afs_operation *op; struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry)); buf->f_type = dentry->d_sb->s_magic; buf->f_bsize = AFS_BLOCK_SIZE; buf->f_namelen = AFSNAMEMAX - 1; if (as->dyn_root) { buf->f_blocks = 1; buf->f_bavail = 0; buf->f_bfree = 0; return 0; } op = afs_alloc_operation(NULL, as->volume); if (IS_ERR(op)) return PTR_ERR(op); afs_op_set_vnode(op, 0, vnode); op->nr_files = 1; op->volstatus.buf = buf; op->ops = &afs_get_volume_status_operation; return afs_do_sync_operation(op); }
linux-master
fs/afs/super.c
// SPDX-License-Identifier: GPL-2.0-or-later /* AFS vlserver list management. * * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/kernel.h> #include <linux/slab.h> #include "internal.h" struct afs_vlserver *afs_alloc_vlserver(const char *name, size_t name_len, unsigned short port) { struct afs_vlserver *vlserver; vlserver = kzalloc(struct_size(vlserver, name, name_len + 1), GFP_KERNEL); if (vlserver) { refcount_set(&vlserver->ref, 1); rwlock_init(&vlserver->lock); init_waitqueue_head(&vlserver->probe_wq); spin_lock_init(&vlserver->probe_lock); vlserver->rtt = UINT_MAX; vlserver->name_len = name_len; vlserver->port = port; memcpy(vlserver->name, name, name_len); } return vlserver; } static void afs_vlserver_rcu(struct rcu_head *rcu) { struct afs_vlserver *vlserver = container_of(rcu, struct afs_vlserver, rcu); afs_put_addrlist(rcu_access_pointer(vlserver->addresses)); kfree_rcu(vlserver, rcu); } void afs_put_vlserver(struct afs_net *net, struct afs_vlserver *vlserver) { if (vlserver && refcount_dec_and_test(&vlserver->ref)) call_rcu(&vlserver->rcu, afs_vlserver_rcu); } struct afs_vlserver_list *afs_alloc_vlserver_list(unsigned int nr_servers) { struct afs_vlserver_list *vllist; vllist = kzalloc(struct_size(vllist, servers, nr_servers), GFP_KERNEL); if (vllist) { refcount_set(&vllist->ref, 1); rwlock_init(&vllist->lock); } return vllist; } void afs_put_vlserverlist(struct afs_net *net, struct afs_vlserver_list *vllist) { if (vllist) { if (refcount_dec_and_test(&vllist->ref)) { int i; for (i = 0; i < vllist->nr_servers; i++) { afs_put_vlserver(net, vllist->servers[i].server); } kfree_rcu(vllist, rcu); } } } static u16 afs_extract_le16(const u8 **_b) { u16 val; val = (u16)*(*_b)++ << 0; val |= (u16)*(*_b)++ << 8; return val; } /* * Build a VL server address list from a DNS queried server list. */ static struct afs_addr_list *afs_extract_vl_addrs(const u8 **_b, const u8 *end, u8 nr_addrs, u16 port) { struct afs_addr_list *alist; const u8 *b = *_b; int ret = -EINVAL; alist = afs_alloc_addrlist(nr_addrs, VL_SERVICE, port); if (!alist) return ERR_PTR(-ENOMEM); if (nr_addrs == 0) return alist; for (; nr_addrs > 0 && end - b >= nr_addrs; nr_addrs--) { struct dns_server_list_v1_address hdr; __be32 x[4]; hdr.address_type = *b++; switch (hdr.address_type) { case DNS_ADDRESS_IS_IPV4: if (end - b < 4) { _leave(" = -EINVAL [short inet]"); goto error; } memcpy(x, b, 4); afs_merge_fs_addr4(alist, x[0], port); b += 4; break; case DNS_ADDRESS_IS_IPV6: if (end - b < 16) { _leave(" = -EINVAL [short inet6]"); goto error; } memcpy(x, b, 16); afs_merge_fs_addr6(alist, x, port); b += 16; break; default: _leave(" = -EADDRNOTAVAIL [unknown af %u]", hdr.address_type); ret = -EADDRNOTAVAIL; goto error; } } /* Start with IPv6 if available. */ if (alist->nr_ipv4 < alist->nr_addrs) alist->preferred = alist->nr_ipv4; *_b = b; return alist; error: *_b = b; afs_put_addrlist(alist); return ERR_PTR(ret); } /* * Build a VL server list from a DNS queried server list. */ struct afs_vlserver_list *afs_extract_vlserver_list(struct afs_cell *cell, const void *buffer, size_t buffer_size) { const struct dns_server_list_v1_header *hdr = buffer; struct dns_server_list_v1_server bs; struct afs_vlserver_list *vllist, *previous; struct afs_addr_list *addrs; struct afs_vlserver *server; const u8 *b = buffer, *end = buffer + buffer_size; int ret = -ENOMEM, nr_servers, i, j; _enter(""); /* Check that it's a server list, v1 */ if (end - b < sizeof(*hdr) || hdr->hdr.content != DNS_PAYLOAD_IS_SERVER_LIST || hdr->hdr.version != 1) { pr_notice("kAFS: Got DNS record [%u,%u] len %zu\n", hdr->hdr.content, hdr->hdr.version, end - b); ret = -EDESTADDRREQ; goto dump; } nr_servers = hdr->nr_servers; vllist = afs_alloc_vlserver_list(nr_servers); if (!vllist) return ERR_PTR(-ENOMEM); vllist->source = (hdr->source < NR__dns_record_source) ? hdr->source : NR__dns_record_source; vllist->status = (hdr->status < NR__dns_lookup_status) ? hdr->status : NR__dns_lookup_status; read_lock(&cell->vl_servers_lock); previous = afs_get_vlserverlist( rcu_dereference_protected(cell->vl_servers, lockdep_is_held(&cell->vl_servers_lock))); read_unlock(&cell->vl_servers_lock); b += sizeof(*hdr); while (end - b >= sizeof(bs)) { bs.name_len = afs_extract_le16(&b); bs.priority = afs_extract_le16(&b); bs.weight = afs_extract_le16(&b); bs.port = afs_extract_le16(&b); bs.source = *b++; bs.status = *b++; bs.protocol = *b++; bs.nr_addrs = *b++; _debug("extract %u %u %u %u %u %u %*.*s", bs.name_len, bs.priority, bs.weight, bs.port, bs.protocol, bs.nr_addrs, bs.name_len, bs.name_len, b); if (end - b < bs.name_len) break; ret = -EPROTONOSUPPORT; if (bs.protocol == DNS_SERVER_PROTOCOL_UNSPECIFIED) { bs.protocol = DNS_SERVER_PROTOCOL_UDP; } else if (bs.protocol != DNS_SERVER_PROTOCOL_UDP) { _leave(" = [proto %u]", bs.protocol); goto error; } if (bs.port == 0) bs.port = AFS_VL_PORT; if (bs.source > NR__dns_record_source) bs.source = NR__dns_record_source; if (bs.status > NR__dns_lookup_status) bs.status = NR__dns_lookup_status; /* See if we can update an old server record */ server = NULL; for (i = 0; i < previous->nr_servers; i++) { struct afs_vlserver *p = previous->servers[i].server; if (p->name_len == bs.name_len && p->port == bs.port && strncasecmp(b, p->name, bs.name_len) == 0) { server = afs_get_vlserver(p); break; } } if (!server) { ret = -ENOMEM; server = afs_alloc_vlserver(b, bs.name_len, bs.port); if (!server) goto error; } b += bs.name_len; /* Extract the addresses - note that we can't skip this as we * have to advance the payload pointer. */ addrs = afs_extract_vl_addrs(&b, end, bs.nr_addrs, bs.port); if (IS_ERR(addrs)) { ret = PTR_ERR(addrs); goto error_2; } if (vllist->nr_servers >= nr_servers) { _debug("skip %u >= %u", vllist->nr_servers, nr_servers); afs_put_addrlist(addrs); afs_put_vlserver(cell->net, server); continue; } addrs->source = bs.source; addrs->status = bs.status; if (addrs->nr_addrs == 0) { afs_put_addrlist(addrs); if (!rcu_access_pointer(server->addresses)) { afs_put_vlserver(cell->net, server); continue; } } else { struct afs_addr_list *old = addrs; write_lock(&server->lock); old = rcu_replace_pointer(server->addresses, old, lockdep_is_held(&server->lock)); write_unlock(&server->lock); afs_put_addrlist(old); } /* TODO: Might want to check for duplicates */ /* Insertion-sort by priority and weight */ for (j = 0; j < vllist->nr_servers; j++) { if (bs.priority < vllist->servers[j].priority) break; /* Lower preferable */ if (bs.priority == vllist->servers[j].priority && bs.weight > vllist->servers[j].weight) break; /* Higher preferable */ } if (j < vllist->nr_servers) { memmove(vllist->servers + j + 1, vllist->servers + j, (vllist->nr_servers - j) * sizeof(struct afs_vlserver_entry)); } clear_bit(AFS_VLSERVER_FL_PROBED, &server->flags); vllist->servers[j].priority = bs.priority; vllist->servers[j].weight = bs.weight; vllist->servers[j].server = server; vllist->nr_servers++; } if (b != end) { _debug("parse error %zd", b - end); goto error; } afs_put_vlserverlist(cell->net, previous); _leave(" = ok [%u]", vllist->nr_servers); return vllist; error_2: afs_put_vlserver(cell->net, server); error: afs_put_vlserverlist(cell->net, vllist); afs_put_vlserverlist(cell->net, previous); dump: if (ret != -ENOMEM) { printk(KERN_DEBUG "DNS: at %zu\n", (const void *)b - buffer); print_hex_dump_bytes("DNS: ", DUMP_PREFIX_NONE, buffer, buffer_size); } return ERR_PTR(ret); }
linux-master
fs/afs/vl_list.c
// SPDX-License-Identifier: GPL-2.0-or-later /* AFS File Server client stubs * * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/init.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/circ_buf.h> #include <linux/iversion.h> #include <linux/netfs.h> #include "internal.h" #include "afs_fs.h" #include "xdr_fs.h" /* * decode an AFSFid block */ static void xdr_decode_AFSFid(const __be32 **_bp, struct afs_fid *fid) { const __be32 *bp = *_bp; fid->vid = ntohl(*bp++); fid->vnode = ntohl(*bp++); fid->unique = ntohl(*bp++); *_bp = bp; } /* * Dump a bad file status record. */ static void xdr_dump_bad(const __be32 *bp) { __be32 x[4]; int i; pr_notice("AFS XDR: Bad status record\n"); for (i = 0; i < 5 * 4 * 4; i += 16) { memcpy(x, bp, 16); bp += 4; pr_notice("%03x: %08x %08x %08x %08x\n", i, ntohl(x[0]), ntohl(x[1]), ntohl(x[2]), ntohl(x[3])); } memcpy(x, bp, 4); pr_notice("0x50: %08x\n", ntohl(x[0])); } /* * decode an AFSFetchStatus block */ static void xdr_decode_AFSFetchStatus(const __be32 **_bp, struct afs_call *call, struct afs_status_cb *scb) { const struct afs_xdr_AFSFetchStatus *xdr = (const void *)*_bp; struct afs_file_status *status = &scb->status; bool inline_error = (call->operation_ID == afs_FS_InlineBulkStatus); u64 data_version, size; u32 type, abort_code; abort_code = ntohl(xdr->abort_code); if (xdr->if_version != htonl(AFS_FSTATUS_VERSION)) { if (xdr->if_version == htonl(0) && abort_code != 0 && inline_error) { /* The OpenAFS fileserver has a bug in FS.InlineBulkStatus * whereby it doesn't set the interface version in the error * case. */ status->abort_code = abort_code; scb->have_error = true; goto advance; } pr_warn("Unknown AFSFetchStatus version %u\n", ntohl(xdr->if_version)); goto bad; } if (abort_code != 0 && inline_error) { status->abort_code = abort_code; scb->have_error = true; goto advance; } type = ntohl(xdr->type); switch (type) { case AFS_FTYPE_FILE: case AFS_FTYPE_DIR: case AFS_FTYPE_SYMLINK: status->type = type; break; default: goto bad; } status->nlink = ntohl(xdr->nlink); status->author = ntohl(xdr->author); status->owner = ntohl(xdr->owner); status->caller_access = ntohl(xdr->caller_access); /* Ticket dependent */ status->anon_access = ntohl(xdr->anon_access); status->mode = ntohl(xdr->mode) & S_IALLUGO; status->group = ntohl(xdr->group); status->lock_count = ntohl(xdr->lock_count); status->mtime_client.tv_sec = ntohl(xdr->mtime_client); status->mtime_client.tv_nsec = 0; status->mtime_server.tv_sec = ntohl(xdr->mtime_server); status->mtime_server.tv_nsec = 0; size = (u64)ntohl(xdr->size_lo); size |= (u64)ntohl(xdr->size_hi) << 32; status->size = size; data_version = (u64)ntohl(xdr->data_version_lo); data_version |= (u64)ntohl(xdr->data_version_hi) << 32; status->data_version = data_version; scb->have_status = true; advance: *_bp = (const void *)*_bp + sizeof(*xdr); return; bad: xdr_dump_bad(*_bp); afs_protocol_error(call, afs_eproto_bad_status); goto advance; } static time64_t xdr_decode_expiry(struct afs_call *call, u32 expiry) { return ktime_divns(call->issue_time, NSEC_PER_SEC) + expiry; } static void xdr_decode_AFSCallBack(const __be32 **_bp, struct afs_call *call, struct afs_status_cb *scb) { struct afs_callback *cb = &scb->callback; const __be32 *bp = *_bp; bp++; /* version */ cb->expires_at = xdr_decode_expiry(call, ntohl(*bp++)); bp++; /* type */ scb->have_cb = true; *_bp = bp; } /* * decode an AFSVolSync block */ static void xdr_decode_AFSVolSync(const __be32 **_bp, struct afs_volsync *volsync) { const __be32 *bp = *_bp; u32 creation; creation = ntohl(*bp++); bp++; /* spare2 */ bp++; /* spare3 */ bp++; /* spare4 */ bp++; /* spare5 */ bp++; /* spare6 */ *_bp = bp; if (volsync) volsync->creation = creation; } /* * encode the requested attributes into an AFSStoreStatus block */ static void xdr_encode_AFS_StoreStatus(__be32 **_bp, struct iattr *attr) { __be32 *bp = *_bp; u32 mask = 0, mtime = 0, owner = 0, group = 0, mode = 0; mask = 0; if (attr->ia_valid & ATTR_MTIME) { mask |= AFS_SET_MTIME; mtime = attr->ia_mtime.tv_sec; } if (attr->ia_valid & ATTR_UID) { mask |= AFS_SET_OWNER; owner = from_kuid(&init_user_ns, attr->ia_uid); } if (attr->ia_valid & ATTR_GID) { mask |= AFS_SET_GROUP; group = from_kgid(&init_user_ns, attr->ia_gid); } if (attr->ia_valid & ATTR_MODE) { mask |= AFS_SET_MODE; mode = attr->ia_mode & S_IALLUGO; } *bp++ = htonl(mask); *bp++ = htonl(mtime); *bp++ = htonl(owner); *bp++ = htonl(group); *bp++ = htonl(mode); *bp++ = 0; /* segment size */ *_bp = bp; } /* * decode an AFSFetchVolumeStatus block */ static void xdr_decode_AFSFetchVolumeStatus(const __be32 **_bp, struct afs_volume_status *vs) { const __be32 *bp = *_bp; vs->vid = ntohl(*bp++); vs->parent_id = ntohl(*bp++); vs->online = ntohl(*bp++); vs->in_service = ntohl(*bp++); vs->blessed = ntohl(*bp++); vs->needs_salvage = ntohl(*bp++); vs->type = ntohl(*bp++); vs->min_quota = ntohl(*bp++); vs->max_quota = ntohl(*bp++); vs->blocks_in_use = ntohl(*bp++); vs->part_blocks_avail = ntohl(*bp++); vs->part_max_blocks = ntohl(*bp++); vs->vol_copy_date = 0; vs->vol_backup_date = 0; *_bp = bp; } /* * deliver reply data to an FS.FetchStatus */ static int afs_deliver_fs_fetch_status(struct afs_call *call) { struct afs_operation *op = call->op; struct afs_vnode_param *vp = &op->file[op->fetch_status.which]; const __be32 *bp; int ret; ret = afs_transfer_reply(call); if (ret < 0) return ret; /* unmarshall the reply once we've received all of it */ bp = call->buffer; xdr_decode_AFSFetchStatus(&bp, call, &vp->scb); xdr_decode_AFSCallBack(&bp, call, &vp->scb); xdr_decode_AFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; } /* * FS.FetchStatus operation type */ static const struct afs_call_type afs_RXFSFetchStatus = { .name = "FS.FetchStatus", .op = afs_FS_FetchStatus, .deliver = afs_deliver_fs_fetch_status, .destructor = afs_flat_call_destructor, }; /* * fetch the status information for a file */ void afs_fs_fetch_status(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[op->fetch_status.which]; struct afs_call *call; __be32 *bp; _enter(",%x,{%llx:%llu},,", key_serial(op->key), vp->fid.vid, vp->fid.vnode); call = afs_alloc_flat_call(op->net, &afs_RXFSFetchStatus, 16, (21 + 3 + 6) * 4); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp[0] = htonl(FSFETCHSTATUS); bp[1] = htonl(vp->fid.vid); bp[2] = htonl(vp->fid.vnode); bp[3] = htonl(vp->fid.unique); trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_NOFS); } /* * deliver reply data to an FS.FetchData */ static int afs_deliver_fs_fetch_data(struct afs_call *call) { struct afs_operation *op = call->op; struct afs_vnode_param *vp = &op->file[0]; struct afs_read *req = op->fetch.req; const __be32 *bp; int ret; _enter("{%u,%zu,%zu/%llu}", call->unmarshall, call->iov_len, iov_iter_count(call->iter), req->actual_len); switch (call->unmarshall) { case 0: req->actual_len = 0; call->unmarshall++; if (call->operation_ID == FSFETCHDATA64) { afs_extract_to_tmp64(call); } else { call->tmp_u = htonl(0); afs_extract_to_tmp(call); } fallthrough; /* Extract the returned data length into * ->actual_len. This may indicate more or less data than was * requested will be returned. */ case 1: _debug("extract data length"); ret = afs_extract_data(call, true); if (ret < 0) return ret; req->actual_len = be64_to_cpu(call->tmp64); _debug("DATA length: %llu", req->actual_len); if (req->actual_len == 0) goto no_more_data; call->iter = req->iter; call->iov_len = min(req->actual_len, req->len); call->unmarshall++; fallthrough; /* extract the returned data */ case 2: _debug("extract data %zu/%llu", iov_iter_count(call->iter), req->actual_len); ret = afs_extract_data(call, true); if (ret < 0) return ret; call->iter = &call->def_iter; if (req->actual_len <= req->len) goto no_more_data; /* Discard any excess data the server gave us */ afs_extract_discard(call, req->actual_len - req->len); call->unmarshall = 3; fallthrough; case 3: _debug("extract discard %zu/%llu", iov_iter_count(call->iter), req->actual_len - req->len); ret = afs_extract_data(call, true); if (ret < 0) return ret; no_more_data: call->unmarshall = 4; afs_extract_to_buf(call, (21 + 3 + 6) * 4); fallthrough; /* extract the metadata */ case 4: ret = afs_extract_data(call, false); if (ret < 0) return ret; bp = call->buffer; xdr_decode_AFSFetchStatus(&bp, call, &vp->scb); xdr_decode_AFSCallBack(&bp, call, &vp->scb); xdr_decode_AFSVolSync(&bp, &op->volsync); req->data_version = vp->scb.status.data_version; req->file_size = vp->scb.status.size; call->unmarshall++; fallthrough; case 5: break; } _leave(" = 0 [done]"); return 0; } /* * FS.FetchData operation type */ static const struct afs_call_type afs_RXFSFetchData = { .name = "FS.FetchData", .op = afs_FS_FetchData, .deliver = afs_deliver_fs_fetch_data, .destructor = afs_flat_call_destructor, }; static const struct afs_call_type afs_RXFSFetchData64 = { .name = "FS.FetchData64", .op = afs_FS_FetchData64, .deliver = afs_deliver_fs_fetch_data, .destructor = afs_flat_call_destructor, }; /* * fetch data from a very large file */ static void afs_fs_fetch_data64(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_read *req = op->fetch.req; struct afs_call *call; __be32 *bp; _enter(""); call = afs_alloc_flat_call(op->net, &afs_RXFSFetchData64, 32, (21 + 3 + 6) * 4); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp[0] = htonl(FSFETCHDATA64); bp[1] = htonl(vp->fid.vid); bp[2] = htonl(vp->fid.vnode); bp[3] = htonl(vp->fid.unique); bp[4] = htonl(upper_32_bits(req->pos)); bp[5] = htonl(lower_32_bits(req->pos)); bp[6] = 0; bp[7] = htonl(lower_32_bits(req->len)); trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_NOFS); } /* * fetch data from a file */ void afs_fs_fetch_data(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; struct afs_read *req = op->fetch.req; __be32 *bp; if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags)) return afs_fs_fetch_data64(op); _enter(""); call = afs_alloc_flat_call(op->net, &afs_RXFSFetchData, 24, (21 + 3 + 6) * 4); if (!call) return afs_op_nomem(op); req->call_debug_id = call->debug_id; /* marshall the parameters */ bp = call->request; bp[0] = htonl(FSFETCHDATA); bp[1] = htonl(vp->fid.vid); bp[2] = htonl(vp->fid.vnode); bp[3] = htonl(vp->fid.unique); bp[4] = htonl(lower_32_bits(req->pos)); bp[5] = htonl(lower_32_bits(req->len)); trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_NOFS); } /* * deliver reply data to an FS.CreateFile or an FS.MakeDir */ static int afs_deliver_fs_create_vnode(struct afs_call *call) { struct afs_operation *op = call->op; struct afs_vnode_param *dvp = &op->file[0]; struct afs_vnode_param *vp = &op->file[1]; const __be32 *bp; int ret; ret = afs_transfer_reply(call); if (ret < 0) return ret; /* unmarshall the reply once we've received all of it */ bp = call->buffer; xdr_decode_AFSFid(&bp, &op->file[1].fid); xdr_decode_AFSFetchStatus(&bp, call, &vp->scb); xdr_decode_AFSFetchStatus(&bp, call, &dvp->scb); xdr_decode_AFSCallBack(&bp, call, &vp->scb); xdr_decode_AFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; } /* * FS.CreateFile and FS.MakeDir operation type */ static const struct afs_call_type afs_RXFSCreateFile = { .name = "FS.CreateFile", .op = afs_FS_CreateFile, .deliver = afs_deliver_fs_create_vnode, .destructor = afs_flat_call_destructor, }; /* * Create a file. */ void afs_fs_create_file(struct afs_operation *op) { const struct qstr *name = &op->dentry->d_name; struct afs_vnode_param *dvp = &op->file[0]; struct afs_call *call; size_t namesz, reqsz, padsz; __be32 *bp; _enter(""); namesz = name->len; padsz = (4 - (namesz & 3)) & 3; reqsz = (5 * 4) + namesz + padsz + (6 * 4); call = afs_alloc_flat_call(op->net, &afs_RXFSCreateFile, reqsz, (3 + 21 + 21 + 3 + 6) * 4); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSCREATEFILE); *bp++ = htonl(dvp->fid.vid); *bp++ = htonl(dvp->fid.vnode); *bp++ = htonl(dvp->fid.unique); *bp++ = htonl(namesz); memcpy(bp, name->name, namesz); bp = (void *) bp + namesz; if (padsz > 0) { memset(bp, 0, padsz); bp = (void *) bp + padsz; } *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME); *bp++ = htonl(op->mtime.tv_sec); /* mtime */ *bp++ = 0; /* owner */ *bp++ = 0; /* group */ *bp++ = htonl(op->create.mode & S_IALLUGO); /* unix mode */ *bp++ = 0; /* segment size */ trace_afs_make_fs_call1(call, &dvp->fid, name); afs_make_op_call(op, call, GFP_NOFS); } static const struct afs_call_type afs_RXFSMakeDir = { .name = "FS.MakeDir", .op = afs_FS_MakeDir, .deliver = afs_deliver_fs_create_vnode, .destructor = afs_flat_call_destructor, }; /* * Create a new directory */ void afs_fs_make_dir(struct afs_operation *op) { const struct qstr *name = &op->dentry->d_name; struct afs_vnode_param *dvp = &op->file[0]; struct afs_call *call; size_t namesz, reqsz, padsz; __be32 *bp; _enter(""); namesz = name->len; padsz = (4 - (namesz & 3)) & 3; reqsz = (5 * 4) + namesz + padsz + (6 * 4); call = afs_alloc_flat_call(op->net, &afs_RXFSMakeDir, reqsz, (3 + 21 + 21 + 3 + 6) * 4); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSMAKEDIR); *bp++ = htonl(dvp->fid.vid); *bp++ = htonl(dvp->fid.vnode); *bp++ = htonl(dvp->fid.unique); *bp++ = htonl(namesz); memcpy(bp, name->name, namesz); bp = (void *) bp + namesz; if (padsz > 0) { memset(bp, 0, padsz); bp = (void *) bp + padsz; } *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME); *bp++ = htonl(op->mtime.tv_sec); /* mtime */ *bp++ = 0; /* owner */ *bp++ = 0; /* group */ *bp++ = htonl(op->create.mode & S_IALLUGO); /* unix mode */ *bp++ = 0; /* segment size */ trace_afs_make_fs_call1(call, &dvp->fid, name); afs_make_op_call(op, call, GFP_NOFS); } /* * Deliver reply data to any operation that returns status and volume sync. */ static int afs_deliver_fs_file_status_and_vol(struct afs_call *call) { struct afs_operation *op = call->op; struct afs_vnode_param *vp = &op->file[0]; const __be32 *bp; int ret; ret = afs_transfer_reply(call); if (ret < 0) return ret; /* unmarshall the reply once we've received all of it */ bp = call->buffer; xdr_decode_AFSFetchStatus(&bp, call, &vp->scb); xdr_decode_AFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; } /* * FS.RemoveFile operation type */ static const struct afs_call_type afs_RXFSRemoveFile = { .name = "FS.RemoveFile", .op = afs_FS_RemoveFile, .deliver = afs_deliver_fs_file_status_and_vol, .destructor = afs_flat_call_destructor, }; /* * Remove a file. */ void afs_fs_remove_file(struct afs_operation *op) { const struct qstr *name = &op->dentry->d_name; struct afs_vnode_param *dvp = &op->file[0]; struct afs_call *call; size_t namesz, reqsz, padsz; __be32 *bp; _enter(""); namesz = name->len; padsz = (4 - (namesz & 3)) & 3; reqsz = (5 * 4) + namesz + padsz; call = afs_alloc_flat_call(op->net, &afs_RXFSRemoveFile, reqsz, (21 + 6) * 4); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSREMOVEFILE); *bp++ = htonl(dvp->fid.vid); *bp++ = htonl(dvp->fid.vnode); *bp++ = htonl(dvp->fid.unique); *bp++ = htonl(namesz); memcpy(bp, name->name, namesz); bp = (void *) bp + namesz; if (padsz > 0) { memset(bp, 0, padsz); bp = (void *) bp + padsz; } trace_afs_make_fs_call1(call, &dvp->fid, name); afs_make_op_call(op, call, GFP_NOFS); } static const struct afs_call_type afs_RXFSRemoveDir = { .name = "FS.RemoveDir", .op = afs_FS_RemoveDir, .deliver = afs_deliver_fs_file_status_and_vol, .destructor = afs_flat_call_destructor, }; /* * Remove a directory. */ void afs_fs_remove_dir(struct afs_operation *op) { const struct qstr *name = &op->dentry->d_name; struct afs_vnode_param *dvp = &op->file[0]; struct afs_call *call; size_t namesz, reqsz, padsz; __be32 *bp; _enter(""); namesz = name->len; padsz = (4 - (namesz & 3)) & 3; reqsz = (5 * 4) + namesz + padsz; call = afs_alloc_flat_call(op->net, &afs_RXFSRemoveDir, reqsz, (21 + 6) * 4); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSREMOVEDIR); *bp++ = htonl(dvp->fid.vid); *bp++ = htonl(dvp->fid.vnode); *bp++ = htonl(dvp->fid.unique); *bp++ = htonl(namesz); memcpy(bp, name->name, namesz); bp = (void *) bp + namesz; if (padsz > 0) { memset(bp, 0, padsz); bp = (void *) bp + padsz; } trace_afs_make_fs_call1(call, &dvp->fid, name); afs_make_op_call(op, call, GFP_NOFS); } /* * deliver reply data to an FS.Link */ static int afs_deliver_fs_link(struct afs_call *call) { struct afs_operation *op = call->op; struct afs_vnode_param *dvp = &op->file[0]; struct afs_vnode_param *vp = &op->file[1]; const __be32 *bp; int ret; _enter("{%u}", call->unmarshall); ret = afs_transfer_reply(call); if (ret < 0) return ret; /* unmarshall the reply once we've received all of it */ bp = call->buffer; xdr_decode_AFSFetchStatus(&bp, call, &vp->scb); xdr_decode_AFSFetchStatus(&bp, call, &dvp->scb); xdr_decode_AFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; } /* * FS.Link operation type */ static const struct afs_call_type afs_RXFSLink = { .name = "FS.Link", .op = afs_FS_Link, .deliver = afs_deliver_fs_link, .destructor = afs_flat_call_destructor, }; /* * make a hard link */ void afs_fs_link(struct afs_operation *op) { const struct qstr *name = &op->dentry->d_name; struct afs_vnode_param *dvp = &op->file[0]; struct afs_vnode_param *vp = &op->file[1]; struct afs_call *call; size_t namesz, reqsz, padsz; __be32 *bp; _enter(""); namesz = name->len; padsz = (4 - (namesz & 3)) & 3; reqsz = (5 * 4) + namesz + padsz + (3 * 4); call = afs_alloc_flat_call(op->net, &afs_RXFSLink, reqsz, (21 + 21 + 6) * 4); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSLINK); *bp++ = htonl(dvp->fid.vid); *bp++ = htonl(dvp->fid.vnode); *bp++ = htonl(dvp->fid.unique); *bp++ = htonl(namesz); memcpy(bp, name->name, namesz); bp = (void *) bp + namesz; if (padsz > 0) { memset(bp, 0, padsz); bp = (void *) bp + padsz; } *bp++ = htonl(vp->fid.vid); *bp++ = htonl(vp->fid.vnode); *bp++ = htonl(vp->fid.unique); trace_afs_make_fs_call1(call, &vp->fid, name); afs_make_op_call(op, call, GFP_NOFS); } /* * deliver reply data to an FS.Symlink */ static int afs_deliver_fs_symlink(struct afs_call *call) { struct afs_operation *op = call->op; struct afs_vnode_param *dvp = &op->file[0]; struct afs_vnode_param *vp = &op->file[1]; const __be32 *bp; int ret; _enter("{%u}", call->unmarshall); ret = afs_transfer_reply(call); if (ret < 0) return ret; /* unmarshall the reply once we've received all of it */ bp = call->buffer; xdr_decode_AFSFid(&bp, &vp->fid); xdr_decode_AFSFetchStatus(&bp, call, &vp->scb); xdr_decode_AFSFetchStatus(&bp, call, &dvp->scb); xdr_decode_AFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; } /* * FS.Symlink operation type */ static const struct afs_call_type afs_RXFSSymlink = { .name = "FS.Symlink", .op = afs_FS_Symlink, .deliver = afs_deliver_fs_symlink, .destructor = afs_flat_call_destructor, }; /* * create a symbolic link */ void afs_fs_symlink(struct afs_operation *op) { const struct qstr *name = &op->dentry->d_name; struct afs_vnode_param *dvp = &op->file[0]; struct afs_call *call; size_t namesz, reqsz, padsz, c_namesz, c_padsz; __be32 *bp; _enter(""); namesz = name->len; padsz = (4 - (namesz & 3)) & 3; c_namesz = strlen(op->create.symlink); c_padsz = (4 - (c_namesz & 3)) & 3; reqsz = (6 * 4) + namesz + padsz + c_namesz + c_padsz + (6 * 4); call = afs_alloc_flat_call(op->net, &afs_RXFSSymlink, reqsz, (3 + 21 + 21 + 6) * 4); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSSYMLINK); *bp++ = htonl(dvp->fid.vid); *bp++ = htonl(dvp->fid.vnode); *bp++ = htonl(dvp->fid.unique); *bp++ = htonl(namesz); memcpy(bp, name->name, namesz); bp = (void *) bp + namesz; if (padsz > 0) { memset(bp, 0, padsz); bp = (void *) bp + padsz; } *bp++ = htonl(c_namesz); memcpy(bp, op->create.symlink, c_namesz); bp = (void *) bp + c_namesz; if (c_padsz > 0) { memset(bp, 0, c_padsz); bp = (void *) bp + c_padsz; } *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME); *bp++ = htonl(op->mtime.tv_sec); /* mtime */ *bp++ = 0; /* owner */ *bp++ = 0; /* group */ *bp++ = htonl(S_IRWXUGO); /* unix mode */ *bp++ = 0; /* segment size */ trace_afs_make_fs_call1(call, &dvp->fid, name); afs_make_op_call(op, call, GFP_NOFS); } /* * deliver reply data to an FS.Rename */ static int afs_deliver_fs_rename(struct afs_call *call) { struct afs_operation *op = call->op; struct afs_vnode_param *orig_dvp = &op->file[0]; struct afs_vnode_param *new_dvp = &op->file[1]; const __be32 *bp; int ret; ret = afs_transfer_reply(call); if (ret < 0) return ret; bp = call->buffer; /* If the two dirs are the same, we have two copies of the same status * report, so we just decode it twice. */ xdr_decode_AFSFetchStatus(&bp, call, &orig_dvp->scb); xdr_decode_AFSFetchStatus(&bp, call, &new_dvp->scb); xdr_decode_AFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; } /* * FS.Rename operation type */ static const struct afs_call_type afs_RXFSRename = { .name = "FS.Rename", .op = afs_FS_Rename, .deliver = afs_deliver_fs_rename, .destructor = afs_flat_call_destructor, }; /* * Rename/move a file or directory. */ void afs_fs_rename(struct afs_operation *op) { struct afs_vnode_param *orig_dvp = &op->file[0]; struct afs_vnode_param *new_dvp = &op->file[1]; const struct qstr *orig_name = &op->dentry->d_name; const struct qstr *new_name = &op->dentry_2->d_name; struct afs_call *call; size_t reqsz, o_namesz, o_padsz, n_namesz, n_padsz; __be32 *bp; _enter(""); o_namesz = orig_name->len; o_padsz = (4 - (o_namesz & 3)) & 3; n_namesz = new_name->len; n_padsz = (4 - (n_namesz & 3)) & 3; reqsz = (4 * 4) + 4 + o_namesz + o_padsz + (3 * 4) + 4 + n_namesz + n_padsz; call = afs_alloc_flat_call(op->net, &afs_RXFSRename, reqsz, (21 + 21 + 6) * 4); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSRENAME); *bp++ = htonl(orig_dvp->fid.vid); *bp++ = htonl(orig_dvp->fid.vnode); *bp++ = htonl(orig_dvp->fid.unique); *bp++ = htonl(o_namesz); memcpy(bp, orig_name->name, o_namesz); bp = (void *) bp + o_namesz; if (o_padsz > 0) { memset(bp, 0, o_padsz); bp = (void *) bp + o_padsz; } *bp++ = htonl(new_dvp->fid.vid); *bp++ = htonl(new_dvp->fid.vnode); *bp++ = htonl(new_dvp->fid.unique); *bp++ = htonl(n_namesz); memcpy(bp, new_name->name, n_namesz); bp = (void *) bp + n_namesz; if (n_padsz > 0) { memset(bp, 0, n_padsz); bp = (void *) bp + n_padsz; } trace_afs_make_fs_call2(call, &orig_dvp->fid, orig_name, new_name); afs_make_op_call(op, call, GFP_NOFS); } /* * Deliver reply data to FS.StoreData or FS.StoreStatus */ static int afs_deliver_fs_store_data(struct afs_call *call) { struct afs_operation *op = call->op; struct afs_vnode_param *vp = &op->file[0]; const __be32 *bp; int ret; _enter(""); ret = afs_transfer_reply(call); if (ret < 0) return ret; /* unmarshall the reply once we've received all of it */ bp = call->buffer; xdr_decode_AFSFetchStatus(&bp, call, &vp->scb); xdr_decode_AFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; } /* * FS.StoreData operation type */ static const struct afs_call_type afs_RXFSStoreData = { .name = "FS.StoreData", .op = afs_FS_StoreData, .deliver = afs_deliver_fs_store_data, .destructor = afs_flat_call_destructor, }; static const struct afs_call_type afs_RXFSStoreData64 = { .name = "FS.StoreData64", .op = afs_FS_StoreData64, .deliver = afs_deliver_fs_store_data, .destructor = afs_flat_call_destructor, }; /* * store a set of pages to a very large file */ static void afs_fs_store_data64(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; __be32 *bp; _enter(",%x,{%llx:%llu},,", key_serial(op->key), vp->fid.vid, vp->fid.vnode); call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData64, (4 + 6 + 3 * 2) * 4, (21 + 6) * 4); if (!call) return afs_op_nomem(op); call->write_iter = op->store.write_iter; /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSSTOREDATA64); *bp++ = htonl(vp->fid.vid); *bp++ = htonl(vp->fid.vnode); *bp++ = htonl(vp->fid.unique); *bp++ = htonl(AFS_SET_MTIME); /* mask */ *bp++ = htonl(op->mtime.tv_sec); /* mtime */ *bp++ = 0; /* owner */ *bp++ = 0; /* group */ *bp++ = 0; /* unix mode */ *bp++ = 0; /* segment size */ *bp++ = htonl(upper_32_bits(op->store.pos)); *bp++ = htonl(lower_32_bits(op->store.pos)); *bp++ = htonl(upper_32_bits(op->store.size)); *bp++ = htonl(lower_32_bits(op->store.size)); *bp++ = htonl(upper_32_bits(op->store.i_size)); *bp++ = htonl(lower_32_bits(op->store.i_size)); trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_NOFS); } /* * Write data to a file on the server. */ void afs_fs_store_data(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; __be32 *bp; _enter(",%x,{%llx:%llu},,", key_serial(op->key), vp->fid.vid, vp->fid.vnode); _debug("size %llx, at %llx, i_size %llx", (unsigned long long)op->store.size, (unsigned long long)op->store.pos, (unsigned long long)op->store.i_size); if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags)) return afs_fs_store_data64(op); call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData, (4 + 6 + 3) * 4, (21 + 6) * 4); if (!call) return afs_op_nomem(op); call->write_iter = op->store.write_iter; /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSSTOREDATA); *bp++ = htonl(vp->fid.vid); *bp++ = htonl(vp->fid.vnode); *bp++ = htonl(vp->fid.unique); *bp++ = htonl(AFS_SET_MTIME); /* mask */ *bp++ = htonl(op->mtime.tv_sec); /* mtime */ *bp++ = 0; /* owner */ *bp++ = 0; /* group */ *bp++ = 0; /* unix mode */ *bp++ = 0; /* segment size */ *bp++ = htonl(lower_32_bits(op->store.pos)); *bp++ = htonl(lower_32_bits(op->store.size)); *bp++ = htonl(lower_32_bits(op->store.i_size)); trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_NOFS); } /* * FS.StoreStatus operation type */ static const struct afs_call_type afs_RXFSStoreStatus = { .name = "FS.StoreStatus", .op = afs_FS_StoreStatus, .deliver = afs_deliver_fs_store_data, .destructor = afs_flat_call_destructor, }; static const struct afs_call_type afs_RXFSStoreData_as_Status = { .name = "FS.StoreData", .op = afs_FS_StoreData, .deliver = afs_deliver_fs_store_data, .destructor = afs_flat_call_destructor, }; static const struct afs_call_type afs_RXFSStoreData64_as_Status = { .name = "FS.StoreData64", .op = afs_FS_StoreData64, .deliver = afs_deliver_fs_store_data, .destructor = afs_flat_call_destructor, }; /* * set the attributes on a very large file, using FS.StoreData rather than * FS.StoreStatus so as to alter the file size also */ static void afs_fs_setattr_size64(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; struct iattr *attr = op->setattr.attr; __be32 *bp; _enter(",%x,{%llx:%llu},,", key_serial(op->key), vp->fid.vid, vp->fid.vnode); ASSERT(attr->ia_valid & ATTR_SIZE); call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData64_as_Status, (4 + 6 + 3 * 2) * 4, (21 + 6) * 4); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSSTOREDATA64); *bp++ = htonl(vp->fid.vid); *bp++ = htonl(vp->fid.vnode); *bp++ = htonl(vp->fid.unique); xdr_encode_AFS_StoreStatus(&bp, attr); *bp++ = htonl(upper_32_bits(attr->ia_size)); /* position of start of write */ *bp++ = htonl(lower_32_bits(attr->ia_size)); *bp++ = 0; /* size of write */ *bp++ = 0; *bp++ = htonl(upper_32_bits(attr->ia_size)); /* new file length */ *bp++ = htonl(lower_32_bits(attr->ia_size)); trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_NOFS); } /* * set the attributes on a file, using FS.StoreData rather than FS.StoreStatus * so as to alter the file size also */ static void afs_fs_setattr_size(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; struct iattr *attr = op->setattr.attr; __be32 *bp; _enter(",%x,{%llx:%llu},,", key_serial(op->key), vp->fid.vid, vp->fid.vnode); ASSERT(attr->ia_valid & ATTR_SIZE); if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags)) return afs_fs_setattr_size64(op); call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData_as_Status, (4 + 6 + 3) * 4, (21 + 6) * 4); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSSTOREDATA); *bp++ = htonl(vp->fid.vid); *bp++ = htonl(vp->fid.vnode); *bp++ = htonl(vp->fid.unique); xdr_encode_AFS_StoreStatus(&bp, attr); *bp++ = htonl(attr->ia_size); /* position of start of write */ *bp++ = 0; /* size of write */ *bp++ = htonl(attr->ia_size); /* new file length */ trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_NOFS); } /* * set the attributes on a file, using FS.StoreData if there's a change in file * size, and FS.StoreStatus otherwise */ void afs_fs_setattr(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; struct iattr *attr = op->setattr.attr; __be32 *bp; if (attr->ia_valid & ATTR_SIZE) return afs_fs_setattr_size(op); _enter(",%x,{%llx:%llu},,", key_serial(op->key), vp->fid.vid, vp->fid.vnode); call = afs_alloc_flat_call(op->net, &afs_RXFSStoreStatus, (4 + 6) * 4, (21 + 6) * 4); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSSTORESTATUS); *bp++ = htonl(vp->fid.vid); *bp++ = htonl(vp->fid.vnode); *bp++ = htonl(vp->fid.unique); xdr_encode_AFS_StoreStatus(&bp, op->setattr.attr); trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_NOFS); } /* * deliver reply data to an FS.GetVolumeStatus */ static int afs_deliver_fs_get_volume_status(struct afs_call *call) { struct afs_operation *op = call->op; const __be32 *bp; char *p; u32 size; int ret; _enter("{%u}", call->unmarshall); switch (call->unmarshall) { case 0: call->unmarshall++; afs_extract_to_buf(call, 12 * 4); fallthrough; /* extract the returned status record */ case 1: _debug("extract status"); ret = afs_extract_data(call, true); if (ret < 0) return ret; bp = call->buffer; xdr_decode_AFSFetchVolumeStatus(&bp, &op->volstatus.vs); call->unmarshall++; afs_extract_to_tmp(call); fallthrough; /* extract the volume name length */ case 2: ret = afs_extract_data(call, true); if (ret < 0) return ret; call->count = ntohl(call->tmp); _debug("volname length: %u", call->count); if (call->count >= AFSNAMEMAX) return afs_protocol_error(call, afs_eproto_volname_len); size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; fallthrough; /* extract the volume name */ case 3: _debug("extract volname"); ret = afs_extract_data(call, true); if (ret < 0) return ret; p = call->buffer; p[call->count] = 0; _debug("volname '%s'", p); afs_extract_to_tmp(call); call->unmarshall++; fallthrough; /* extract the offline message length */ case 4: ret = afs_extract_data(call, true); if (ret < 0) return ret; call->count = ntohl(call->tmp); _debug("offline msg length: %u", call->count); if (call->count >= AFSNAMEMAX) return afs_protocol_error(call, afs_eproto_offline_msg_len); size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; fallthrough; /* extract the offline message */ case 5: _debug("extract offline"); ret = afs_extract_data(call, true); if (ret < 0) return ret; p = call->buffer; p[call->count] = 0; _debug("offline '%s'", p); afs_extract_to_tmp(call); call->unmarshall++; fallthrough; /* extract the message of the day length */ case 6: ret = afs_extract_data(call, true); if (ret < 0) return ret; call->count = ntohl(call->tmp); _debug("motd length: %u", call->count); if (call->count >= AFSNAMEMAX) return afs_protocol_error(call, afs_eproto_motd_len); size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; fallthrough; /* extract the message of the day */ case 7: _debug("extract motd"); ret = afs_extract_data(call, false); if (ret < 0) return ret; p = call->buffer; p[call->count] = 0; _debug("motd '%s'", p); call->unmarshall++; fallthrough; case 8: break; } _leave(" = 0 [done]"); return 0; } /* * FS.GetVolumeStatus operation type */ static const struct afs_call_type afs_RXFSGetVolumeStatus = { .name = "FS.GetVolumeStatus", .op = afs_FS_GetVolumeStatus, .deliver = afs_deliver_fs_get_volume_status, .destructor = afs_flat_call_destructor, }; /* * fetch the status of a volume */ void afs_fs_get_volume_status(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; __be32 *bp; _enter(""); call = afs_alloc_flat_call(op->net, &afs_RXFSGetVolumeStatus, 2 * 4, max(12 * 4, AFSOPAQUEMAX + 1)); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp[0] = htonl(FSGETVOLUMESTATUS); bp[1] = htonl(vp->fid.vid); trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_NOFS); } /* * deliver reply data to an FS.SetLock, FS.ExtendLock or FS.ReleaseLock */ static int afs_deliver_fs_xxxx_lock(struct afs_call *call) { struct afs_operation *op = call->op; const __be32 *bp; int ret; _enter("{%u}", call->unmarshall); ret = afs_transfer_reply(call); if (ret < 0) return ret; /* unmarshall the reply once we've received all of it */ bp = call->buffer; xdr_decode_AFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; } /* * FS.SetLock operation type */ static const struct afs_call_type afs_RXFSSetLock = { .name = "FS.SetLock", .op = afs_FS_SetLock, .deliver = afs_deliver_fs_xxxx_lock, .done = afs_lock_op_done, .destructor = afs_flat_call_destructor, }; /* * FS.ExtendLock operation type */ static const struct afs_call_type afs_RXFSExtendLock = { .name = "FS.ExtendLock", .op = afs_FS_ExtendLock, .deliver = afs_deliver_fs_xxxx_lock, .done = afs_lock_op_done, .destructor = afs_flat_call_destructor, }; /* * FS.ReleaseLock operation type */ static const struct afs_call_type afs_RXFSReleaseLock = { .name = "FS.ReleaseLock", .op = afs_FS_ReleaseLock, .deliver = afs_deliver_fs_xxxx_lock, .destructor = afs_flat_call_destructor, }; /* * Set a lock on a file */ void afs_fs_set_lock(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; __be32 *bp; _enter(""); call = afs_alloc_flat_call(op->net, &afs_RXFSSetLock, 5 * 4, 6 * 4); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSSETLOCK); *bp++ = htonl(vp->fid.vid); *bp++ = htonl(vp->fid.vnode); *bp++ = htonl(vp->fid.unique); *bp++ = htonl(op->lock.type); trace_afs_make_fs_calli(call, &vp->fid, op->lock.type); afs_make_op_call(op, call, GFP_NOFS); } /* * extend a lock on a file */ void afs_fs_extend_lock(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; __be32 *bp; _enter(""); call = afs_alloc_flat_call(op->net, &afs_RXFSExtendLock, 4 * 4, 6 * 4); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSEXTENDLOCK); *bp++ = htonl(vp->fid.vid); *bp++ = htonl(vp->fid.vnode); *bp++ = htonl(vp->fid.unique); trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_NOFS); } /* * release a lock on a file */ void afs_fs_release_lock(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; __be32 *bp; _enter(""); call = afs_alloc_flat_call(op->net, &afs_RXFSReleaseLock, 4 * 4, 6 * 4); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSRELEASELOCK); *bp++ = htonl(vp->fid.vid); *bp++ = htonl(vp->fid.vnode); *bp++ = htonl(vp->fid.unique); trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_NOFS); } /* * Deliver reply data to an FS.GiveUpAllCallBacks operation. */ static int afs_deliver_fs_give_up_all_callbacks(struct afs_call *call) { return afs_transfer_reply(call); } /* * FS.GiveUpAllCallBacks operation type */ static const struct afs_call_type afs_RXFSGiveUpAllCallBacks = { .name = "FS.GiveUpAllCallBacks", .op = afs_FS_GiveUpAllCallBacks, .deliver = afs_deliver_fs_give_up_all_callbacks, .destructor = afs_flat_call_destructor, }; /* * Flush all the callbacks we have on a server. */ int afs_fs_give_up_all_callbacks(struct afs_net *net, struct afs_server *server, struct afs_addr_cursor *ac, struct key *key) { struct afs_call *call; __be32 *bp; _enter(""); call = afs_alloc_flat_call(net, &afs_RXFSGiveUpAllCallBacks, 1 * 4, 0); if (!call) return -ENOMEM; call->key = key; /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSGIVEUPALLCALLBACKS); call->server = afs_use_server(server, afs_server_trace_give_up_cb); afs_make_call(ac, call, GFP_NOFS); return afs_wait_for_call_to_complete(call, ac); } /* * Deliver reply data to an FS.GetCapabilities operation. */ static int afs_deliver_fs_get_capabilities(struct afs_call *call) { u32 count; int ret; _enter("{%u,%zu}", call->unmarshall, iov_iter_count(call->iter)); switch (call->unmarshall) { case 0: afs_extract_to_tmp(call); call->unmarshall++; fallthrough; /* Extract the capabilities word count */ case 1: ret = afs_extract_data(call, true); if (ret < 0) return ret; count = ntohl(call->tmp); call->count = count; call->count2 = count; if (count == 0) { call->unmarshall = 4; call->tmp = 0; break; } /* Extract the first word of the capabilities to call->tmp */ afs_extract_to_tmp(call); call->unmarshall++; fallthrough; case 2: ret = afs_extract_data(call, false); if (ret < 0) return ret; afs_extract_discard(call, (count - 1) * sizeof(__be32)); call->unmarshall++; fallthrough; /* Extract remaining capabilities words */ case 3: ret = afs_extract_data(call, false); if (ret < 0) return ret; call->unmarshall++; break; } _leave(" = 0 [done]"); return 0; } /* * FS.GetCapabilities operation type */ static const struct afs_call_type afs_RXFSGetCapabilities = { .name = "FS.GetCapabilities", .op = afs_FS_GetCapabilities, .deliver = afs_deliver_fs_get_capabilities, .done = afs_fileserver_probe_result, .destructor = afs_flat_call_destructor, }; /* * Probe a fileserver for the capabilities that it supports. This RPC can * reply with up to 196 words. The operation is asynchronous and if we managed * to allocate a call, true is returned the result is delivered through the * ->done() - otherwise we return false to indicate we didn't even try. */ bool afs_fs_get_capabilities(struct afs_net *net, struct afs_server *server, struct afs_addr_cursor *ac, struct key *key) { struct afs_call *call; __be32 *bp; _enter(""); call = afs_alloc_flat_call(net, &afs_RXFSGetCapabilities, 1 * 4, 16 * 4); if (!call) return false; call->key = key; call->server = afs_use_server(server, afs_server_trace_get_caps); call->upgrade = true; call->async = true; call->max_lifespan = AFS_PROBE_MAX_LIFESPAN; /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSGETCAPABILITIES); trace_afs_make_fs_call(call, NULL); afs_make_call(ac, call, GFP_NOFS); afs_put_call(call); return true; } /* * Deliver reply data to an FS.InlineBulkStatus call */ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) { struct afs_operation *op = call->op; struct afs_status_cb *scb; const __be32 *bp; u32 tmp; int ret; _enter("{%u}", call->unmarshall); switch (call->unmarshall) { case 0: afs_extract_to_tmp(call); call->unmarshall++; fallthrough; /* Extract the file status count and array in two steps */ case 1: _debug("extract status count"); ret = afs_extract_data(call, true); if (ret < 0) return ret; tmp = ntohl(call->tmp); _debug("status count: %u/%u", tmp, op->nr_files); if (tmp != op->nr_files) return afs_protocol_error(call, afs_eproto_ibulkst_count); call->count = 0; call->unmarshall++; more_counts: afs_extract_to_buf(call, 21 * sizeof(__be32)); fallthrough; case 2: _debug("extract status array %u", call->count); ret = afs_extract_data(call, true); if (ret < 0) return ret; switch (call->count) { case 0: scb = &op->file[0].scb; break; case 1: scb = &op->file[1].scb; break; default: scb = &op->more_files[call->count - 2].scb; break; } bp = call->buffer; xdr_decode_AFSFetchStatus(&bp, call, scb); call->count++; if (call->count < op->nr_files) goto more_counts; call->count = 0; call->unmarshall++; afs_extract_to_tmp(call); fallthrough; /* Extract the callback count and array in two steps */ case 3: _debug("extract CB count"); ret = afs_extract_data(call, true); if (ret < 0) return ret; tmp = ntohl(call->tmp); _debug("CB count: %u", tmp); if (tmp != op->nr_files) return afs_protocol_error(call, afs_eproto_ibulkst_cb_count); call->count = 0; call->unmarshall++; more_cbs: afs_extract_to_buf(call, 3 * sizeof(__be32)); fallthrough; case 4: _debug("extract CB array"); ret = afs_extract_data(call, true); if (ret < 0) return ret; _debug("unmarshall CB array"); switch (call->count) { case 0: scb = &op->file[0].scb; break; case 1: scb = &op->file[1].scb; break; default: scb = &op->more_files[call->count - 2].scb; break; } bp = call->buffer; xdr_decode_AFSCallBack(&bp, call, scb); call->count++; if (call->count < op->nr_files) goto more_cbs; afs_extract_to_buf(call, 6 * sizeof(__be32)); call->unmarshall++; fallthrough; case 5: ret = afs_extract_data(call, false); if (ret < 0) return ret; bp = call->buffer; xdr_decode_AFSVolSync(&bp, &op->volsync); call->unmarshall++; fallthrough; case 6: break; } _leave(" = 0 [done]"); return 0; } static void afs_done_fs_inline_bulk_status(struct afs_call *call) { if (call->error == -ECONNABORTED && call->abort_code == RX_INVALID_OPERATION) { set_bit(AFS_SERVER_FL_NO_IBULK, &call->server->flags); if (call->op) set_bit(AFS_VOLUME_MAYBE_NO_IBULK, &call->op->volume->flags); } } /* * FS.InlineBulkStatus operation type */ static const struct afs_call_type afs_RXFSInlineBulkStatus = { .name = "FS.InlineBulkStatus", .op = afs_FS_InlineBulkStatus, .deliver = afs_deliver_fs_inline_bulk_status, .done = afs_done_fs_inline_bulk_status, .destructor = afs_flat_call_destructor, }; /* * Fetch the status information for up to 50 files */ void afs_fs_inline_bulk_status(struct afs_operation *op) { struct afs_vnode_param *dvp = &op->file[0]; struct afs_vnode_param *vp = &op->file[1]; struct afs_call *call; __be32 *bp; int i; if (test_bit(AFS_SERVER_FL_NO_IBULK, &op->server->flags)) { op->error = -ENOTSUPP; return; } _enter(",%x,{%llx:%llu},%u", key_serial(op->key), vp->fid.vid, vp->fid.vnode, op->nr_files); call = afs_alloc_flat_call(op->net, &afs_RXFSInlineBulkStatus, (2 + op->nr_files * 3) * 4, 21 * 4); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; *bp++ = htonl(FSINLINEBULKSTATUS); *bp++ = htonl(op->nr_files); *bp++ = htonl(dvp->fid.vid); *bp++ = htonl(dvp->fid.vnode); *bp++ = htonl(dvp->fid.unique); *bp++ = htonl(vp->fid.vid); *bp++ = htonl(vp->fid.vnode); *bp++ = htonl(vp->fid.unique); for (i = 0; i < op->nr_files - 2; i++) { *bp++ = htonl(op->more_files[i].fid.vid); *bp++ = htonl(op->more_files[i].fid.vnode); *bp++ = htonl(op->more_files[i].fid.unique); } trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_NOFS); } /* * deliver reply data to an FS.FetchACL */ static int afs_deliver_fs_fetch_acl(struct afs_call *call) { struct afs_operation *op = call->op; struct afs_vnode_param *vp = &op->file[0]; struct afs_acl *acl; const __be32 *bp; unsigned int size; int ret; _enter("{%u}", call->unmarshall); switch (call->unmarshall) { case 0: afs_extract_to_tmp(call); call->unmarshall++; fallthrough; /* extract the returned data length */ case 1: ret = afs_extract_data(call, true); if (ret < 0) return ret; size = call->count2 = ntohl(call->tmp); size = round_up(size, 4); acl = kmalloc(struct_size(acl, data, size), GFP_KERNEL); if (!acl) return -ENOMEM; op->acl = acl; acl->size = call->count2; afs_extract_begin(call, acl->data, size); call->unmarshall++; fallthrough; /* extract the returned data */ case 2: ret = afs_extract_data(call, true); if (ret < 0) return ret; afs_extract_to_buf(call, (21 + 6) * 4); call->unmarshall++; fallthrough; /* extract the metadata */ case 3: ret = afs_extract_data(call, false); if (ret < 0) return ret; bp = call->buffer; xdr_decode_AFSFetchStatus(&bp, call, &vp->scb); xdr_decode_AFSVolSync(&bp, &op->volsync); call->unmarshall++; fallthrough; case 4: break; } _leave(" = 0 [done]"); return 0; } /* * FS.FetchACL operation type */ static const struct afs_call_type afs_RXFSFetchACL = { .name = "FS.FetchACL", .op = afs_FS_FetchACL, .deliver = afs_deliver_fs_fetch_acl, }; /* * Fetch the ACL for a file. */ void afs_fs_fetch_acl(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; __be32 *bp; _enter(",%x,{%llx:%llu},,", key_serial(op->key), vp->fid.vid, vp->fid.vnode); call = afs_alloc_flat_call(op->net, &afs_RXFSFetchACL, 16, (21 + 6) * 4); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp[0] = htonl(FSFETCHACL); bp[1] = htonl(vp->fid.vid); bp[2] = htonl(vp->fid.vnode); bp[3] = htonl(vp->fid.unique); trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_KERNEL); } /* * FS.StoreACL operation type */ static const struct afs_call_type afs_RXFSStoreACL = { .name = "FS.StoreACL", .op = afs_FS_StoreACL, .deliver = afs_deliver_fs_file_status_and_vol, .destructor = afs_flat_call_destructor, }; /* * Fetch the ACL for a file. */ void afs_fs_store_acl(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; const struct afs_acl *acl = op->acl; size_t size; __be32 *bp; _enter(",%x,{%llx:%llu},,", key_serial(op->key), vp->fid.vid, vp->fid.vnode); size = round_up(acl->size, 4); call = afs_alloc_flat_call(op->net, &afs_RXFSStoreACL, 5 * 4 + size, (21 + 6) * 4); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp[0] = htonl(FSSTOREACL); bp[1] = htonl(vp->fid.vid); bp[2] = htonl(vp->fid.vnode); bp[3] = htonl(vp->fid.unique); bp[4] = htonl(acl->size); memcpy(&bp[5], acl->data, acl->size); if (acl->size != size) memset((void *)&bp[5] + acl->size, 0, size - acl->size); trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_KERNEL); }
linux-master
fs/afs/fsclient.c
// SPDX-License-Identifier: GPL-2.0-or-later /* AFS fileserver list management. * * Copyright (C) 2017 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/kernel.h> #include <linux/slab.h> #include "internal.h" void afs_put_serverlist(struct afs_net *net, struct afs_server_list *slist) { int i; if (slist && refcount_dec_and_test(&slist->usage)) { for (i = 0; i < slist->nr_servers; i++) afs_unuse_server(net, slist->servers[i].server, afs_server_trace_put_slist); kfree(slist); } } /* * Build a server list from a VLDB record. */ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell, struct key *key, struct afs_vldb_entry *vldb, u8 type_mask) { struct afs_server_list *slist; struct afs_server *server; int ret = -ENOMEM, nr_servers = 0, i, j; for (i = 0; i < vldb->nr_servers; i++) if (vldb->fs_mask[i] & type_mask) nr_servers++; slist = kzalloc(struct_size(slist, servers, nr_servers), GFP_KERNEL); if (!slist) goto error; refcount_set(&slist->usage, 1); rwlock_init(&slist->lock); for (i = 0; i < AFS_MAXTYPES; i++) slist->vids[i] = vldb->vid[i]; /* Make sure a records exists for each server in the list. */ for (i = 0; i < vldb->nr_servers; i++) { if (!(vldb->fs_mask[i] & type_mask)) continue; server = afs_lookup_server(cell, key, &vldb->fs_server[i], vldb->addr_version[i]); if (IS_ERR(server)) { ret = PTR_ERR(server); if (ret == -ENOENT || ret == -ENOMEDIUM) continue; goto error_2; } /* Insertion-sort by UUID */ for (j = 0; j < slist->nr_servers; j++) if (memcmp(&slist->servers[j].server->uuid, &server->uuid, sizeof(server->uuid)) >= 0) break; if (j < slist->nr_servers) { if (slist->servers[j].server == server) { afs_put_server(cell->net, server, afs_server_trace_put_slist_isort); continue; } memmove(slist->servers + j + 1, slist->servers + j, (slist->nr_servers - j) * sizeof(struct afs_server_entry)); } slist->servers[j].server = server; slist->nr_servers++; } if (slist->nr_servers == 0) { ret = -EDESTADDRREQ; goto error_2; } return slist; error_2: afs_put_serverlist(cell->net, slist); error: return ERR_PTR(ret); } /* * Copy the annotations from an old server list to its potential replacement. */ bool afs_annotate_server_list(struct afs_server_list *new, struct afs_server_list *old) { struct afs_server *cur; int i, j; if (old->nr_servers != new->nr_servers) goto changed; for (i = 0; i < old->nr_servers; i++) if (old->servers[i].server != new->servers[i].server) goto changed; return false; changed: /* Maintain the same preferred server as before if possible. */ cur = old->servers[old->preferred].server; for (j = 0; j < new->nr_servers; j++) { if (new->servers[j].server == cur) { new->preferred = j; break; } } return true; }
linux-master
fs/afs/server_list.c
// SPDX-License-Identifier: GPL-2.0-or-later /* AFS file locking support * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include "internal.h" #define AFS_LOCK_GRANTED 0 #define AFS_LOCK_PENDING 1 #define AFS_LOCK_YOUR_TRY 2 struct workqueue_struct *afs_lock_manager; static void afs_next_locker(struct afs_vnode *vnode, int error); static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl); static void afs_fl_release_private(struct file_lock *fl); static const struct file_lock_operations afs_lock_ops = { .fl_copy_lock = afs_fl_copy_lock, .fl_release_private = afs_fl_release_private, }; static inline void afs_set_lock_state(struct afs_vnode *vnode, enum afs_lock_state state) { _debug("STATE %u -> %u", vnode->lock_state, state); vnode->lock_state = state; } static atomic_t afs_file_lock_debug_id; /* * if the callback is broken on this vnode, then the lock may now be available */ void afs_lock_may_be_available(struct afs_vnode *vnode) { _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode); spin_lock(&vnode->lock); if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB) afs_next_locker(vnode, 0); trace_afs_flock_ev(vnode, NULL, afs_flock_callback_break, 0); spin_unlock(&vnode->lock); } /* * the lock will time out in 5 minutes unless we extend it, so schedule * extension in a bit less than that time */ static void afs_schedule_lock_extension(struct afs_vnode *vnode) { ktime_t expires_at, now, duration; u64 duration_j; expires_at = ktime_add_ms(vnode->locked_at, AFS_LOCKWAIT * 1000 / 2); now = ktime_get_real(); duration = ktime_sub(expires_at, now); if (duration <= 0) duration_j = 0; else duration_j = nsecs_to_jiffies(ktime_to_ns(duration)); queue_delayed_work(afs_lock_manager, &vnode->lock_work, duration_j); } /* * In the case of successful completion of a lock operation, record the time * the reply appeared and start the lock extension timer. */ void afs_lock_op_done(struct afs_call *call) { struct afs_operation *op = call->op; struct afs_vnode *vnode = op->file[0].vnode; if (call->error == 0) { spin_lock(&vnode->lock); trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0); vnode->locked_at = call->issue_time; afs_schedule_lock_extension(vnode); spin_unlock(&vnode->lock); } } /* * grant one or more locks (readlocks are allowed to jump the queue if the * first lock in the queue is itself a readlock) * - the caller must hold the vnode lock */ static void afs_grant_locks(struct afs_vnode *vnode) { struct file_lock *p, *_p; bool exclusive = (vnode->lock_type == AFS_LOCK_WRITE); list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) { if (!exclusive && p->fl_type == F_WRLCK) continue; list_move_tail(&p->fl_u.afs.link, &vnode->granted_locks); p->fl_u.afs.state = AFS_LOCK_GRANTED; trace_afs_flock_op(vnode, p, afs_flock_op_grant); wake_up(&p->fl_wait); } } /* * If an error is specified, reject every pending lock that matches the * authentication and type of the lock we failed to get. If there are any * remaining lockers, try to wake up one of them to have a go. */ static void afs_next_locker(struct afs_vnode *vnode, int error) { struct file_lock *p, *_p, *next = NULL; struct key *key = vnode->lock_key; unsigned int fl_type = F_RDLCK; _enter(""); if (vnode->lock_type == AFS_LOCK_WRITE) fl_type = F_WRLCK; list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) { if (error && p->fl_type == fl_type && afs_file_key(p->fl_file) == key) { list_del_init(&p->fl_u.afs.link); p->fl_u.afs.state = error; wake_up(&p->fl_wait); } /* Select the next locker to hand off to. */ if (next && (next->fl_type == F_WRLCK || p->fl_type == F_RDLCK)) continue; next = p; } vnode->lock_key = NULL; key_put(key); if (next) { afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING); next->fl_u.afs.state = AFS_LOCK_YOUR_TRY; trace_afs_flock_op(vnode, next, afs_flock_op_wake); wake_up(&next->fl_wait); } else { afs_set_lock_state(vnode, AFS_VNODE_LOCK_NONE); trace_afs_flock_ev(vnode, NULL, afs_flock_no_lockers, 0); } _leave(""); } /* * Kill off all waiters in the the pending lock queue due to the vnode being * deleted. */ static void afs_kill_lockers_enoent(struct afs_vnode *vnode) { struct file_lock *p; afs_set_lock_state(vnode, AFS_VNODE_LOCK_DELETED); while (!list_empty(&vnode->pending_locks)) { p = list_entry(vnode->pending_locks.next, struct file_lock, fl_u.afs.link); list_del_init(&p->fl_u.afs.link); p->fl_u.afs.state = -ENOENT; wake_up(&p->fl_wait); } key_put(vnode->lock_key); vnode->lock_key = NULL; } static void afs_lock_success(struct afs_operation *op) { _enter("op=%08x", op->debug_id); afs_vnode_commit_status(op, &op->file[0]); } static const struct afs_operation_ops afs_set_lock_operation = { .issue_afs_rpc = afs_fs_set_lock, .issue_yfs_rpc = yfs_fs_set_lock, .success = afs_lock_success, .aborted = afs_check_for_remote_deletion, }; /* * Get a lock on a file */ static int afs_set_lock(struct afs_vnode *vnode, struct key *key, afs_lock_type_t type) { struct afs_operation *op; _enter("%s{%llx:%llu.%u},%x,%u", vnode->volume->name, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique, key_serial(key), type); op = afs_alloc_operation(key, vnode->volume); if (IS_ERR(op)) return PTR_ERR(op); afs_op_set_vnode(op, 0, vnode); op->lock.type = type; op->ops = &afs_set_lock_operation; return afs_do_sync_operation(op); } static const struct afs_operation_ops afs_extend_lock_operation = { .issue_afs_rpc = afs_fs_extend_lock, .issue_yfs_rpc = yfs_fs_extend_lock, .success = afs_lock_success, }; /* * Extend a lock on a file */ static int afs_extend_lock(struct afs_vnode *vnode, struct key *key) { struct afs_operation *op; _enter("%s{%llx:%llu.%u},%x", vnode->volume->name, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique, key_serial(key)); op = afs_alloc_operation(key, vnode->volume); if (IS_ERR(op)) return PTR_ERR(op); afs_op_set_vnode(op, 0, vnode); op->flags |= AFS_OPERATION_UNINTR; op->ops = &afs_extend_lock_operation; return afs_do_sync_operation(op); } static const struct afs_operation_ops afs_release_lock_operation = { .issue_afs_rpc = afs_fs_release_lock, .issue_yfs_rpc = yfs_fs_release_lock, .success = afs_lock_success, }; /* * Release a lock on a file */ static int afs_release_lock(struct afs_vnode *vnode, struct key *key) { struct afs_operation *op; _enter("%s{%llx:%llu.%u},%x", vnode->volume->name, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique, key_serial(key)); op = afs_alloc_operation(key, vnode->volume); if (IS_ERR(op)) return PTR_ERR(op); afs_op_set_vnode(op, 0, vnode); op->flags |= AFS_OPERATION_UNINTR; op->ops = &afs_release_lock_operation; return afs_do_sync_operation(op); } /* * do work for a lock, including: * - probing for a lock we're waiting on but didn't get immediately * - extending a lock that's close to timing out */ void afs_lock_work(struct work_struct *work) { struct afs_vnode *vnode = container_of(work, struct afs_vnode, lock_work.work); struct key *key; int ret; _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode); spin_lock(&vnode->lock); again: _debug("wstate %u for %p", vnode->lock_state, vnode); switch (vnode->lock_state) { case AFS_VNODE_LOCK_NEED_UNLOCK: afs_set_lock_state(vnode, AFS_VNODE_LOCK_UNLOCKING); trace_afs_flock_ev(vnode, NULL, afs_flock_work_unlocking, 0); spin_unlock(&vnode->lock); /* attempt to release the server lock; if it fails, we just * wait 5 minutes and it'll expire anyway */ ret = afs_release_lock(vnode, vnode->lock_key); if (ret < 0 && vnode->lock_state != AFS_VNODE_LOCK_DELETED) { trace_afs_flock_ev(vnode, NULL, afs_flock_release_fail, ret); printk(KERN_WARNING "AFS:" " Failed to release lock on {%llx:%llx} error %d\n", vnode->fid.vid, vnode->fid.vnode, ret); } spin_lock(&vnode->lock); if (ret == -ENOENT) afs_kill_lockers_enoent(vnode); else afs_next_locker(vnode, 0); spin_unlock(&vnode->lock); return; /* If we've already got a lock, then it must be time to extend that * lock as AFS locks time out after 5 minutes. */ case AFS_VNODE_LOCK_GRANTED: _debug("extend"); ASSERT(!list_empty(&vnode->granted_locks)); key = key_get(vnode->lock_key); afs_set_lock_state(vnode, AFS_VNODE_LOCK_EXTENDING); trace_afs_flock_ev(vnode, NULL, afs_flock_work_extending, 0); spin_unlock(&vnode->lock); ret = afs_extend_lock(vnode, key); /* RPC */ key_put(key); if (ret < 0) { trace_afs_flock_ev(vnode, NULL, afs_flock_extend_fail, ret); pr_warn("AFS: Failed to extend lock on {%llx:%llx} error %d\n", vnode->fid.vid, vnode->fid.vnode, ret); } spin_lock(&vnode->lock); if (ret == -ENOENT) { afs_kill_lockers_enoent(vnode); spin_unlock(&vnode->lock); return; } if (vnode->lock_state != AFS_VNODE_LOCK_EXTENDING) goto again; afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED); if (ret != 0) queue_delayed_work(afs_lock_manager, &vnode->lock_work, HZ * 10); spin_unlock(&vnode->lock); _leave(" [ext]"); return; /* If we're waiting for a callback to indicate lock release, we can't * actually rely on this, so need to recheck at regular intervals. The * problem is that the server might not notify us if the lock just * expires (say because a client died) rather than being explicitly * released. */ case AFS_VNODE_LOCK_WAITING_FOR_CB: _debug("retry"); afs_next_locker(vnode, 0); spin_unlock(&vnode->lock); return; case AFS_VNODE_LOCK_DELETED: afs_kill_lockers_enoent(vnode); spin_unlock(&vnode->lock); return; default: /* Looks like a lock request was withdrawn. */ spin_unlock(&vnode->lock); _leave(" [no]"); return; } } /* * pass responsibility for the unlocking of a vnode on the server to the * manager thread, lest a pending signal in the calling thread interrupt * AF_RXRPC * - the caller must hold the vnode lock */ static void afs_defer_unlock(struct afs_vnode *vnode) { _enter("%u", vnode->lock_state); if (list_empty(&vnode->granted_locks) && (vnode->lock_state == AFS_VNODE_LOCK_GRANTED || vnode->lock_state == AFS_VNODE_LOCK_EXTENDING)) { cancel_delayed_work(&vnode->lock_work); afs_set_lock_state(vnode, AFS_VNODE_LOCK_NEED_UNLOCK); trace_afs_flock_ev(vnode, NULL, afs_flock_defer_unlock, 0); queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0); } } /* * Check that our view of the file metadata is up to date and check to see * whether we think that we have a locking permit. */ static int afs_do_setlk_check(struct afs_vnode *vnode, struct key *key, enum afs_flock_mode mode, afs_lock_type_t type) { afs_access_t access; int ret; /* Make sure we've got a callback on this file and that our view of the * data version is up to date. */ ret = afs_validate(vnode, key); if (ret < 0) return ret; /* Check the permission set to see if we're actually going to be * allowed to get a lock on this file. */ ret = afs_check_permit(vnode, key, &access); if (ret < 0) return ret; /* At a rough estimation, you need LOCK, WRITE or INSERT perm to * read-lock a file and WRITE or INSERT perm to write-lock a file. * * We can't rely on the server to do this for us since if we want to * share a read lock that we already have, we won't go the server. */ if (type == AFS_LOCK_READ) { if (!(access & (AFS_ACE_INSERT | AFS_ACE_WRITE | AFS_ACE_LOCK))) return -EACCES; } else { if (!(access & (AFS_ACE_INSERT | AFS_ACE_WRITE))) return -EACCES; } return 0; } /* * request a lock on a file on the server */ static int afs_do_setlk(struct file *file, struct file_lock *fl) { struct inode *inode = file_inode(file); struct afs_vnode *vnode = AFS_FS_I(inode); enum afs_flock_mode mode = AFS_FS_S(inode->i_sb)->flock_mode; afs_lock_type_t type; struct key *key = afs_file_key(file); bool partial, no_server_lock = false; int ret; if (mode == afs_flock_mode_unset) mode = afs_flock_mode_openafs; _enter("{%llx:%llu},%llu-%llu,%u,%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_start, fl->fl_end, fl->fl_type, mode); fl->fl_ops = &afs_lock_ops; INIT_LIST_HEAD(&fl->fl_u.afs.link); fl->fl_u.afs.state = AFS_LOCK_PENDING; partial = (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX); type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; if (mode == afs_flock_mode_write && partial) type = AFS_LOCK_WRITE; ret = afs_do_setlk_check(vnode, key, mode, type); if (ret < 0) return ret; trace_afs_flock_op(vnode, fl, afs_flock_op_set_lock); /* AFS3 protocol only supports full-file locks and doesn't provide any * method of upgrade/downgrade, so we need to emulate for partial-file * locks. * * The OpenAFS client only gets a server lock for a full-file lock and * keeps partial-file locks local. Allow this behaviour to be emulated * (as the default). */ if (mode == afs_flock_mode_local || (partial && mode == afs_flock_mode_openafs)) { no_server_lock = true; goto skip_server_lock; } spin_lock(&vnode->lock); list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks); ret = -ENOENT; if (vnode->lock_state == AFS_VNODE_LOCK_DELETED) goto error_unlock; /* If we've already got a lock on the server then try to move to having * the VFS grant the requested lock. Note that this means that other * clients may get starved out. */ _debug("try %u", vnode->lock_state); if (vnode->lock_state == AFS_VNODE_LOCK_GRANTED) { if (type == AFS_LOCK_READ) { _debug("instant readlock"); list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks); fl->fl_u.afs.state = AFS_LOCK_GRANTED; goto vnode_is_locked_u; } if (vnode->lock_type == AFS_LOCK_WRITE) { _debug("instant writelock"); list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks); fl->fl_u.afs.state = AFS_LOCK_GRANTED; goto vnode_is_locked_u; } } if (vnode->lock_state == AFS_VNODE_LOCK_NONE && !(fl->fl_flags & FL_SLEEP)) { ret = -EAGAIN; if (type == AFS_LOCK_READ) { if (vnode->status.lock_count == -1) goto lock_is_contended; /* Write locked */ } else { if (vnode->status.lock_count != 0) goto lock_is_contended; /* Locked */ } } if (vnode->lock_state != AFS_VNODE_LOCK_NONE) goto need_to_wait; try_to_lock: /* We don't have a lock on this vnode and we aren't currently waiting * for one either, so ask the server for a lock. * * Note that we need to be careful if we get interrupted by a signal * after dispatching the request as we may still get the lock, even * though we don't wait for the reply (it's not too bad a problem - the * lock will expire in 5 mins anyway). */ trace_afs_flock_ev(vnode, fl, afs_flock_try_to_lock, 0); vnode->lock_key = key_get(key); vnode->lock_type = type; afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING); spin_unlock(&vnode->lock); ret = afs_set_lock(vnode, key, type); /* RPC */ spin_lock(&vnode->lock); switch (ret) { case -EKEYREJECTED: case -EKEYEXPIRED: case -EKEYREVOKED: case -EPERM: case -EACCES: fl->fl_u.afs.state = ret; trace_afs_flock_ev(vnode, fl, afs_flock_fail_perm, ret); list_del_init(&fl->fl_u.afs.link); afs_next_locker(vnode, ret); goto error_unlock; case -ENOENT: fl->fl_u.afs.state = ret; trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret); list_del_init(&fl->fl_u.afs.link); afs_kill_lockers_enoent(vnode); goto error_unlock; default: fl->fl_u.afs.state = ret; trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret); list_del_init(&fl->fl_u.afs.link); afs_next_locker(vnode, 0); goto error_unlock; case -EWOULDBLOCK: /* The server doesn't have a lock-waiting queue, so the client * will have to retry. The server will break the outstanding * callbacks on a file when a lock is released. */ ASSERT(list_empty(&vnode->granted_locks)); ASSERTCMP(vnode->pending_locks.next, ==, &fl->fl_u.afs.link); goto lock_is_contended; case 0: afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED); trace_afs_flock_ev(vnode, fl, afs_flock_acquired, type); afs_grant_locks(vnode); goto vnode_is_locked_u; } vnode_is_locked_u: spin_unlock(&vnode->lock); vnode_is_locked: /* the lock has been granted by the server... */ ASSERTCMP(fl->fl_u.afs.state, ==, AFS_LOCK_GRANTED); skip_server_lock: /* ... but the VFS still needs to distribute access on this client. */ trace_afs_flock_ev(vnode, fl, afs_flock_vfs_locking, 0); ret = locks_lock_file_wait(file, fl); trace_afs_flock_ev(vnode, fl, afs_flock_vfs_lock, ret); if (ret < 0) goto vfs_rejected_lock; /* Again, make sure we've got a callback on this file and, again, make * sure that our view of the data version is up to date (we ignore * errors incurred here and deal with the consequences elsewhere). */ afs_validate(vnode, key); _leave(" = 0"); return 0; lock_is_contended: if (!(fl->fl_flags & FL_SLEEP)) { list_del_init(&fl->fl_u.afs.link); afs_next_locker(vnode, 0); ret = -EAGAIN; goto error_unlock; } afs_set_lock_state(vnode, AFS_VNODE_LOCK_WAITING_FOR_CB); trace_afs_flock_ev(vnode, fl, afs_flock_would_block, ret); queue_delayed_work(afs_lock_manager, &vnode->lock_work, HZ * 5); need_to_wait: /* We're going to have to wait. Either this client doesn't have a lock * on the server yet and we need to wait for a callback to occur, or * the client does have a lock on the server, but it's shared and we * need an exclusive lock. */ spin_unlock(&vnode->lock); trace_afs_flock_ev(vnode, fl, afs_flock_waiting, 0); ret = wait_event_interruptible(fl->fl_wait, fl->fl_u.afs.state != AFS_LOCK_PENDING); trace_afs_flock_ev(vnode, fl, afs_flock_waited, ret); if (fl->fl_u.afs.state >= 0 && fl->fl_u.afs.state != AFS_LOCK_GRANTED) { spin_lock(&vnode->lock); switch (fl->fl_u.afs.state) { case AFS_LOCK_YOUR_TRY: fl->fl_u.afs.state = AFS_LOCK_PENDING; goto try_to_lock; case AFS_LOCK_PENDING: if (ret > 0) { /* We need to retry the lock. We may not be * notified by the server if it just expired * rather than being released. */ ASSERTCMP(vnode->lock_state, ==, AFS_VNODE_LOCK_WAITING_FOR_CB); afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING); fl->fl_u.afs.state = AFS_LOCK_PENDING; goto try_to_lock; } goto error_unlock; case AFS_LOCK_GRANTED: default: break; } spin_unlock(&vnode->lock); } if (fl->fl_u.afs.state == AFS_LOCK_GRANTED) goto vnode_is_locked; ret = fl->fl_u.afs.state; goto error; vfs_rejected_lock: /* The VFS rejected the lock we just obtained, so we have to discard * what we just got. We defer this to the lock manager work item to * deal with. */ _debug("vfs refused %d", ret); if (no_server_lock) goto error; spin_lock(&vnode->lock); list_del_init(&fl->fl_u.afs.link); afs_defer_unlock(vnode); error_unlock: spin_unlock(&vnode->lock); error: _leave(" = %d", ret); return ret; } /* * unlock on a file on the server */ static int afs_do_unlk(struct file *file, struct file_lock *fl) { struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); int ret; _enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type); trace_afs_flock_op(vnode, fl, afs_flock_op_unlock); /* Flush all pending writes before doing anything with locks. */ vfs_fsync(file, 0); ret = locks_lock_file_wait(file, fl); _leave(" = %d [%u]", ret, vnode->lock_state); return ret; } /* * return information about a lock we currently hold, if indeed we hold one */ static int afs_do_getlk(struct file *file, struct file_lock *fl) { struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); struct key *key = afs_file_key(file); int ret, lock_count; _enter(""); if (vnode->lock_state == AFS_VNODE_LOCK_DELETED) return -ENOENT; fl->fl_type = F_UNLCK; /* check local lock records first */ posix_test_lock(file, fl); if (fl->fl_type == F_UNLCK) { /* no local locks; consult the server */ ret = afs_fetch_status(vnode, key, false, NULL); if (ret < 0) goto error; lock_count = READ_ONCE(vnode->status.lock_count); if (lock_count != 0) { if (lock_count > 0) fl->fl_type = F_RDLCK; else fl->fl_type = F_WRLCK; fl->fl_start = 0; fl->fl_end = OFFSET_MAX; fl->fl_pid = 0; } } ret = 0; error: _leave(" = %d [%hd]", ret, fl->fl_type); return ret; } /* * manage POSIX locks on a file */ int afs_lock(struct file *file, int cmd, struct file_lock *fl) { struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); enum afs_flock_operation op; int ret; _enter("{%llx:%llu},%d,{t=%x,fl=%x,r=%Ld:%Ld}", vnode->fid.vid, vnode->fid.vnode, cmd, fl->fl_type, fl->fl_flags, (long long) fl->fl_start, (long long) fl->fl_end); if (IS_GETLK(cmd)) return afs_do_getlk(file, fl); fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id); trace_afs_flock_op(vnode, fl, afs_flock_op_lock); if (fl->fl_type == F_UNLCK) ret = afs_do_unlk(file, fl); else ret = afs_do_setlk(file, fl); switch (ret) { case 0: op = afs_flock_op_return_ok; break; case -EAGAIN: op = afs_flock_op_return_eagain; break; case -EDEADLK: op = afs_flock_op_return_edeadlk; break; default: op = afs_flock_op_return_error; break; } trace_afs_flock_op(vnode, fl, op); return ret; } /* * manage FLOCK locks on a file */ int afs_flock(struct file *file, int cmd, struct file_lock *fl) { struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); enum afs_flock_operation op; int ret; _enter("{%llx:%llu},%d,{t=%x,fl=%x}", vnode->fid.vid, vnode->fid.vnode, cmd, fl->fl_type, fl->fl_flags); /* * No BSD flocks over NFS allowed. * Note: we could try to fake a POSIX lock request here by * using ((u32) filp | 0x80000000) or some such as the pid. * Not sure whether that would be unique, though, or whether * that would break in other places. */ if (!(fl->fl_flags & FL_FLOCK)) return -ENOLCK; fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id); trace_afs_flock_op(vnode, fl, afs_flock_op_flock); /* we're simulating flock() locks using posix locks on the server */ if (fl->fl_type == F_UNLCK) ret = afs_do_unlk(file, fl); else ret = afs_do_setlk(file, fl); switch (ret) { case 0: op = afs_flock_op_return_ok; break; case -EAGAIN: op = afs_flock_op_return_eagain; break; case -EDEADLK: op = afs_flock_op_return_edeadlk; break; default: op = afs_flock_op_return_error; break; } trace_afs_flock_op(vnode, fl, op); return ret; } /* * the POSIX lock management core VFS code copies the lock record and adds the * copy into its own list, so we need to add that copy to the vnode's lock * queue in the same place as the original (which will be deleted shortly * after) */ static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl) { struct afs_vnode *vnode = AFS_FS_I(file_inode(fl->fl_file)); _enter(""); new->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id); spin_lock(&vnode->lock); trace_afs_flock_op(vnode, new, afs_flock_op_copy_lock); list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link); spin_unlock(&vnode->lock); } /* * need to remove this lock from the vnode queue when it's removed from the * VFS's list */ static void afs_fl_release_private(struct file_lock *fl) { struct afs_vnode *vnode = AFS_FS_I(file_inode(fl->fl_file)); _enter(""); spin_lock(&vnode->lock); trace_afs_flock_op(vnode, fl, afs_flock_op_release_lock); list_del_init(&fl->fl_u.afs.link); if (list_empty(&vnode->granted_locks)) afs_defer_unlock(vnode); _debug("state %u for %p", vnode->lock_state, vnode); spin_unlock(&vnode->lock); }
linux-master
fs/afs/flock.c
// SPDX-License-Identifier: GPL-2.0-or-later /* AFS vlserver probing * * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/sched.h> #include <linux/slab.h> #include "afs_fs.h" #include "internal.h" #include "protocol_yfs.h" /* * Handle the completion of a set of probes. */ static void afs_finished_vl_probe(struct afs_vlserver *server) { if (!(server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED)) { server->rtt = UINT_MAX; clear_bit(AFS_VLSERVER_FL_RESPONDING, &server->flags); } clear_bit_unlock(AFS_VLSERVER_FL_PROBING, &server->flags); wake_up_bit(&server->flags, AFS_VLSERVER_FL_PROBING); } /* * Handle the completion of a probe RPC call. */ static void afs_done_one_vl_probe(struct afs_vlserver *server, bool wake_up) { if (atomic_dec_and_test(&server->probe_outstanding)) { afs_finished_vl_probe(server); wake_up = true; } if (wake_up) wake_up_all(&server->probe_wq); } /* * Process the result of probing a vlserver. This is called after successful * or failed delivery of an VL.GetCapabilities operation. */ void afs_vlserver_probe_result(struct afs_call *call) { struct afs_addr_list *alist = call->alist; struct afs_vlserver *server = call->vlserver; unsigned int server_index = call->server_index; unsigned int rtt_us = 0; unsigned int index = call->addr_ix; bool have_result = false; int ret = call->error; _enter("%s,%u,%u,%d,%d", server->name, server_index, index, ret, call->abort_code); spin_lock(&server->probe_lock); switch (ret) { case 0: server->probe.error = 0; goto responded; case -ECONNABORTED: if (!(server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED)) { server->probe.abort_code = call->abort_code; server->probe.error = ret; } goto responded; case -ENOMEM: case -ENONET: case -EKEYEXPIRED: case -EKEYREVOKED: case -EKEYREJECTED: server->probe.flags |= AFS_VLSERVER_PROBE_LOCAL_FAILURE; if (server->probe.error == 0) server->probe.error = ret; trace_afs_io_error(call->debug_id, ret, afs_io_error_vl_probe_fail); goto out; case -ECONNRESET: /* Responded, but call expired. */ case -ERFKILL: case -EADDRNOTAVAIL: case -ENETUNREACH: case -EHOSTUNREACH: case -EHOSTDOWN: case -ECONNREFUSED: case -ETIMEDOUT: case -ETIME: default: clear_bit(index, &alist->responded); set_bit(index, &alist->failed); if (!(server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED) && (server->probe.error == 0 || server->probe.error == -ETIMEDOUT || server->probe.error == -ETIME)) server->probe.error = ret; trace_afs_io_error(call->debug_id, ret, afs_io_error_vl_probe_fail); goto out; } responded: set_bit(index, &alist->responded); clear_bit(index, &alist->failed); if (call->service_id == YFS_VL_SERVICE) { server->probe.flags |= AFS_VLSERVER_PROBE_IS_YFS; set_bit(AFS_VLSERVER_FL_IS_YFS, &server->flags); alist->addrs[index].srx_service = call->service_id; } else { server->probe.flags |= AFS_VLSERVER_PROBE_NOT_YFS; if (!(server->probe.flags & AFS_VLSERVER_PROBE_IS_YFS)) { clear_bit(AFS_VLSERVER_FL_IS_YFS, &server->flags); alist->addrs[index].srx_service = call->service_id; } } rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us); if (rtt_us < server->probe.rtt) { server->probe.rtt = rtt_us; server->rtt = rtt_us; alist->preferred = index; } smp_wmb(); /* Set rtt before responded. */ server->probe.flags |= AFS_VLSERVER_PROBE_RESPONDED; set_bit(AFS_VLSERVER_FL_PROBED, &server->flags); set_bit(AFS_VLSERVER_FL_RESPONDING, &server->flags); have_result = true; out: spin_unlock(&server->probe_lock); _debug("probe [%u][%u] %pISpc rtt=%u ret=%d", server_index, index, &alist->addrs[index].transport, rtt_us, ret); afs_done_one_vl_probe(server, have_result); } /* * Probe all of a vlserver's addresses to find out the best route and to * query its capabilities. */ static bool afs_do_probe_vlserver(struct afs_net *net, struct afs_vlserver *server, struct key *key, unsigned int server_index, struct afs_error *_e) { struct afs_addr_cursor ac = { .index = 0, }; struct afs_call *call; bool in_progress = false; _enter("%s", server->name); read_lock(&server->lock); ac.alist = rcu_dereference_protected(server->addresses, lockdep_is_held(&server->lock)); read_unlock(&server->lock); atomic_set(&server->probe_outstanding, ac.alist->nr_addrs); memset(&server->probe, 0, sizeof(server->probe)); server->probe.rtt = UINT_MAX; for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++) { call = afs_vl_get_capabilities(net, &ac, key, server, server_index); if (!IS_ERR(call)) { afs_put_call(call); in_progress = true; } else { afs_prioritise_error(_e, PTR_ERR(call), ac.abort_code); afs_done_one_vl_probe(server, false); } } return in_progress; } /* * Send off probes to all unprobed servers. */ int afs_send_vl_probes(struct afs_net *net, struct key *key, struct afs_vlserver_list *vllist) { struct afs_vlserver *server; struct afs_error e; bool in_progress = false; int i; e.error = 0; e.responded = false; for (i = 0; i < vllist->nr_servers; i++) { server = vllist->servers[i].server; if (test_bit(AFS_VLSERVER_FL_PROBED, &server->flags)) continue; if (!test_and_set_bit_lock(AFS_VLSERVER_FL_PROBING, &server->flags) && afs_do_probe_vlserver(net, server, key, i, &e)) in_progress = true; } return in_progress ? 0 : e.error; } /* * Wait for the first as-yet untried server to respond. */ int afs_wait_for_vl_probes(struct afs_vlserver_list *vllist, unsigned long untried) { struct wait_queue_entry *waits; struct afs_vlserver *server; unsigned int rtt = UINT_MAX, rtt_s; bool have_responders = false; int pref = -1, i; _enter("%u,%lx", vllist->nr_servers, untried); /* Only wait for servers that have a probe outstanding. */ for (i = 0; i < vllist->nr_servers; i++) { if (test_bit(i, &untried)) { server = vllist->servers[i].server; if (!test_bit(AFS_VLSERVER_FL_PROBING, &server->flags)) __clear_bit(i, &untried); if (server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED) have_responders = true; } } if (have_responders || !untried) return 0; waits = kmalloc(array_size(vllist->nr_servers, sizeof(*waits)), GFP_KERNEL); if (!waits) return -ENOMEM; for (i = 0; i < vllist->nr_servers; i++) { if (test_bit(i, &untried)) { server = vllist->servers[i].server; init_waitqueue_entry(&waits[i], current); add_wait_queue(&server->probe_wq, &waits[i]); } } for (;;) { bool still_probing = false; set_current_state(TASK_INTERRUPTIBLE); for (i = 0; i < vllist->nr_servers; i++) { if (test_bit(i, &untried)) { server = vllist->servers[i].server; if (server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED) goto stop; if (test_bit(AFS_VLSERVER_FL_PROBING, &server->flags)) still_probing = true; } } if (!still_probing || signal_pending(current)) goto stop; schedule(); } stop: set_current_state(TASK_RUNNING); for (i = 0; i < vllist->nr_servers; i++) { if (test_bit(i, &untried)) { server = vllist->servers[i].server; rtt_s = READ_ONCE(server->rtt); if (test_bit(AFS_VLSERVER_FL_RESPONDING, &server->flags) && rtt_s < rtt) { pref = i; rtt = rtt_s; } remove_wait_queue(&server->probe_wq, &waits[i]); } } kfree(waits); if (pref == -1 && signal_pending(current)) return -ERESTARTSYS; if (pref >= 0) vllist->preferred = pref; _leave(" = 0 [%u]", pref); return 0; }
linux-master
fs/afs/vl_probe.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Extended attribute handling for AFS. We use xattrs to get and set metadata * instead of providing pioctl(). * * Copyright (C) 2017 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/slab.h> #include <linux/fs.h> #include <linux/xattr.h> #include "internal.h" /* * Deal with the result of a successful fetch ACL operation. */ static void afs_acl_success(struct afs_operation *op) { afs_vnode_commit_status(op, &op->file[0]); } static void afs_acl_put(struct afs_operation *op) { kfree(op->acl); } static const struct afs_operation_ops afs_fetch_acl_operation = { .issue_afs_rpc = afs_fs_fetch_acl, .success = afs_acl_success, .put = afs_acl_put, }; /* * Get a file's ACL. */ static int afs_xattr_get_acl(const struct xattr_handler *handler, struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size) { struct afs_operation *op; struct afs_vnode *vnode = AFS_FS_I(inode); struct afs_acl *acl = NULL; int ret; op = afs_alloc_operation(NULL, vnode->volume); if (IS_ERR(op)) return -ENOMEM; afs_op_set_vnode(op, 0, vnode); op->ops = &afs_fetch_acl_operation; afs_begin_vnode_operation(op); afs_wait_for_operation(op); acl = op->acl; op->acl = NULL; ret = afs_put_operation(op); if (ret == 0) { ret = acl->size; if (size > 0) { if (acl->size <= size) memcpy(buffer, acl->data, acl->size); else ret = -ERANGE; } } kfree(acl); return ret; } static bool afs_make_acl(struct afs_operation *op, const void *buffer, size_t size) { struct afs_acl *acl; acl = kmalloc(sizeof(*acl) + size, GFP_KERNEL); if (!acl) { afs_op_nomem(op); return false; } acl->size = size; memcpy(acl->data, buffer, size); op->acl = acl; return true; } static const struct afs_operation_ops afs_store_acl_operation = { .issue_afs_rpc = afs_fs_store_acl, .success = afs_acl_success, .put = afs_acl_put, }; /* * Set a file's AFS3 ACL. */ static int afs_xattr_set_acl(const struct xattr_handler *handler, struct mnt_idmap *idmap, struct dentry *dentry, struct inode *inode, const char *name, const void *buffer, size_t size, int flags) { struct afs_operation *op; struct afs_vnode *vnode = AFS_FS_I(inode); if (flags == XATTR_CREATE) return -EINVAL; op = afs_alloc_operation(NULL, vnode->volume); if (IS_ERR(op)) return -ENOMEM; afs_op_set_vnode(op, 0, vnode); if (!afs_make_acl(op, buffer, size)) return afs_put_operation(op); op->ops = &afs_store_acl_operation; return afs_do_sync_operation(op); } static const struct xattr_handler afs_xattr_afs_acl_handler = { .name = "afs.acl", .get = afs_xattr_get_acl, .set = afs_xattr_set_acl, }; static const struct afs_operation_ops yfs_fetch_opaque_acl_operation = { .issue_yfs_rpc = yfs_fs_fetch_opaque_acl, .success = afs_acl_success, /* Don't free op->yacl in .put here */ }; /* * Get a file's YFS ACL. */ static int afs_xattr_get_yfs(const struct xattr_handler *handler, struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size) { struct afs_operation *op; struct afs_vnode *vnode = AFS_FS_I(inode); struct yfs_acl *yacl = NULL; char buf[16], *data; int which = 0, dsize, ret = -ENOMEM; if (strcmp(name, "acl") == 0) which = 0; else if (strcmp(name, "acl_inherited") == 0) which = 1; else if (strcmp(name, "acl_num_cleaned") == 0) which = 2; else if (strcmp(name, "vol_acl") == 0) which = 3; else return -EOPNOTSUPP; yacl = kzalloc(sizeof(struct yfs_acl), GFP_KERNEL); if (!yacl) goto error; if (which == 0) yacl->flags |= YFS_ACL_WANT_ACL; else if (which == 3) yacl->flags |= YFS_ACL_WANT_VOL_ACL; op = afs_alloc_operation(NULL, vnode->volume); if (IS_ERR(op)) goto error_yacl; afs_op_set_vnode(op, 0, vnode); op->yacl = yacl; op->ops = &yfs_fetch_opaque_acl_operation; afs_begin_vnode_operation(op); afs_wait_for_operation(op); ret = afs_put_operation(op); if (ret == 0) { switch (which) { case 0: data = yacl->acl->data; dsize = yacl->acl->size; break; case 1: data = buf; dsize = scnprintf(buf, sizeof(buf), "%u", yacl->inherit_flag); break; case 2: data = buf; dsize = scnprintf(buf, sizeof(buf), "%u", yacl->num_cleaned); break; case 3: data = yacl->vol_acl->data; dsize = yacl->vol_acl->size; break; default: ret = -EOPNOTSUPP; goto error_yacl; } ret = dsize; if (size > 0) { if (dsize <= size) memcpy(buffer, data, dsize); else ret = -ERANGE; } } else if (ret == -ENOTSUPP) { ret = -ENODATA; } error_yacl: yfs_free_opaque_acl(yacl); error: return ret; } static const struct afs_operation_ops yfs_store_opaque_acl2_operation = { .issue_yfs_rpc = yfs_fs_store_opaque_acl2, .success = afs_acl_success, .put = afs_acl_put, }; /* * Set a file's YFS ACL. */ static int afs_xattr_set_yfs(const struct xattr_handler *handler, struct mnt_idmap *idmap, struct dentry *dentry, struct inode *inode, const char *name, const void *buffer, size_t size, int flags) { struct afs_operation *op; struct afs_vnode *vnode = AFS_FS_I(inode); int ret; if (flags == XATTR_CREATE || strcmp(name, "acl") != 0) return -EINVAL; op = afs_alloc_operation(NULL, vnode->volume); if (IS_ERR(op)) return -ENOMEM; afs_op_set_vnode(op, 0, vnode); if (!afs_make_acl(op, buffer, size)) return afs_put_operation(op); op->ops = &yfs_store_opaque_acl2_operation; ret = afs_do_sync_operation(op); if (ret == -ENOTSUPP) ret = -ENODATA; return ret; } static const struct xattr_handler afs_xattr_yfs_handler = { .prefix = "afs.yfs.", .get = afs_xattr_get_yfs, .set = afs_xattr_set_yfs, }; /* * Get the name of the cell on which a file resides. */ static int afs_xattr_get_cell(const struct xattr_handler *handler, struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size) { struct afs_vnode *vnode = AFS_FS_I(inode); struct afs_cell *cell = vnode->volume->cell; size_t namelen; namelen = cell->name_len; if (size == 0) return namelen; if (namelen > size) return -ERANGE; memcpy(buffer, cell->name, namelen); return namelen; } static const struct xattr_handler afs_xattr_afs_cell_handler = { .name = "afs.cell", .get = afs_xattr_get_cell, }; /* * Get the volume ID, vnode ID and vnode uniquifier of a file as a sequence of * hex numbers separated by colons. */ static int afs_xattr_get_fid(const struct xattr_handler *handler, struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size) { struct afs_vnode *vnode = AFS_FS_I(inode); char text[16 + 1 + 24 + 1 + 8 + 1]; size_t len; /* The volume ID is 64-bit, the vnode ID is 96-bit and the * uniquifier is 32-bit. */ len = scnprintf(text, sizeof(text), "%llx:", vnode->fid.vid); if (vnode->fid.vnode_hi) len += scnprintf(text + len, sizeof(text) - len, "%x%016llx", vnode->fid.vnode_hi, vnode->fid.vnode); else len += scnprintf(text + len, sizeof(text) - len, "%llx", vnode->fid.vnode); len += scnprintf(text + len, sizeof(text) - len, ":%x", vnode->fid.unique); if (size == 0) return len; if (len > size) return -ERANGE; memcpy(buffer, text, len); return len; } static const struct xattr_handler afs_xattr_afs_fid_handler = { .name = "afs.fid", .get = afs_xattr_get_fid, }; /* * Get the name of the volume on which a file resides. */ static int afs_xattr_get_volume(const struct xattr_handler *handler, struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size) { struct afs_vnode *vnode = AFS_FS_I(inode); const char *volname = vnode->volume->name; size_t namelen; namelen = strlen(volname); if (size == 0) return namelen; if (namelen > size) return -ERANGE; memcpy(buffer, volname, namelen); return namelen; } static const struct xattr_handler afs_xattr_afs_volume_handler = { .name = "afs.volume", .get = afs_xattr_get_volume, }; const struct xattr_handler *afs_xattr_handlers[] = { &afs_xattr_afs_acl_handler, &afs_xattr_afs_cell_handler, &afs_xattr_afs_fid_handler, &afs_xattr_afs_volume_handler, &afs_xattr_yfs_handler, /* afs.yfs. prefix */ NULL };
linux-master
fs/afs/xattr.c
// SPDX-License-Identifier: GPL-2.0-or-later /* AFS Cache Manager Service * * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/ip.h> #include "internal.h" #include "afs_cm.h" #include "protocol_yfs.h" #define RXRPC_TRACE_ONLY_DEFINE_ENUMS #include <trace/events/rxrpc.h> static int afs_deliver_cb_init_call_back_state(struct afs_call *); static int afs_deliver_cb_init_call_back_state3(struct afs_call *); static int afs_deliver_cb_probe(struct afs_call *); static int afs_deliver_cb_callback(struct afs_call *); static int afs_deliver_cb_probe_uuid(struct afs_call *); static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *); static void afs_cm_destructor(struct afs_call *); static void SRXAFSCB_CallBack(struct work_struct *); static void SRXAFSCB_InitCallBackState(struct work_struct *); static void SRXAFSCB_Probe(struct work_struct *); static void SRXAFSCB_ProbeUuid(struct work_struct *); static void SRXAFSCB_TellMeAboutYourself(struct work_struct *); static int afs_deliver_yfs_cb_callback(struct afs_call *); /* * CB.CallBack operation type */ static const struct afs_call_type afs_SRXCBCallBack = { .name = "CB.CallBack", .deliver = afs_deliver_cb_callback, .destructor = afs_cm_destructor, .work = SRXAFSCB_CallBack, }; /* * CB.InitCallBackState operation type */ static const struct afs_call_type afs_SRXCBInitCallBackState = { .name = "CB.InitCallBackState", .deliver = afs_deliver_cb_init_call_back_state, .destructor = afs_cm_destructor, .work = SRXAFSCB_InitCallBackState, }; /* * CB.InitCallBackState3 operation type */ static const struct afs_call_type afs_SRXCBInitCallBackState3 = { .name = "CB.InitCallBackState3", .deliver = afs_deliver_cb_init_call_back_state3, .destructor = afs_cm_destructor, .work = SRXAFSCB_InitCallBackState, }; /* * CB.Probe operation type */ static const struct afs_call_type afs_SRXCBProbe = { .name = "CB.Probe", .deliver = afs_deliver_cb_probe, .destructor = afs_cm_destructor, .work = SRXAFSCB_Probe, }; /* * CB.ProbeUuid operation type */ static const struct afs_call_type afs_SRXCBProbeUuid = { .name = "CB.ProbeUuid", .deliver = afs_deliver_cb_probe_uuid, .destructor = afs_cm_destructor, .work = SRXAFSCB_ProbeUuid, }; /* * CB.TellMeAboutYourself operation type */ static const struct afs_call_type afs_SRXCBTellMeAboutYourself = { .name = "CB.TellMeAboutYourself", .deliver = afs_deliver_cb_tell_me_about_yourself, .destructor = afs_cm_destructor, .work = SRXAFSCB_TellMeAboutYourself, }; /* * YFS CB.CallBack operation type */ static const struct afs_call_type afs_SRXYFSCB_CallBack = { .name = "YFSCB.CallBack", .deliver = afs_deliver_yfs_cb_callback, .destructor = afs_cm_destructor, .work = SRXAFSCB_CallBack, }; /* * route an incoming cache manager call * - return T if supported, F if not */ bool afs_cm_incoming_call(struct afs_call *call) { _enter("{%u, CB.OP %u}", call->service_id, call->operation_ID); switch (call->operation_ID) { case CBCallBack: call->type = &afs_SRXCBCallBack; return true; case CBInitCallBackState: call->type = &afs_SRXCBInitCallBackState; return true; case CBInitCallBackState3: call->type = &afs_SRXCBInitCallBackState3; return true; case CBProbe: call->type = &afs_SRXCBProbe; return true; case CBProbeUuid: call->type = &afs_SRXCBProbeUuid; return true; case CBTellMeAboutYourself: call->type = &afs_SRXCBTellMeAboutYourself; return true; case YFSCBCallBack: if (call->service_id != YFS_CM_SERVICE) return false; call->type = &afs_SRXYFSCB_CallBack; return true; default: return false; } } /* * Find the server record by peer address and record a probe to the cache * manager from a server. */ static int afs_find_cm_server_by_peer(struct afs_call *call) { struct sockaddr_rxrpc srx; struct afs_server *server; rxrpc_kernel_get_peer(call->net->socket, call->rxcall, &srx); server = afs_find_server(call->net, &srx); if (!server) { trace_afs_cm_no_server(call, &srx); return 0; } call->server = server; return 0; } /* * Find the server record by server UUID and record a probe to the cache * manager from a server. */ static int afs_find_cm_server_by_uuid(struct afs_call *call, struct afs_uuid *uuid) { struct afs_server *server; rcu_read_lock(); server = afs_find_server_by_uuid(call->net, call->request); rcu_read_unlock(); if (!server) { trace_afs_cm_no_server_u(call, call->request); return 0; } call->server = server; return 0; } /* * Clean up a cache manager call. */ static void afs_cm_destructor(struct afs_call *call) { kfree(call->buffer); call->buffer = NULL; } /* * Abort a service call from within an action function. */ static void afs_abort_service_call(struct afs_call *call, u32 abort_code, int error, enum rxrpc_abort_reason why) { rxrpc_kernel_abort_call(call->net->socket, call->rxcall, abort_code, error, why); afs_set_call_complete(call, error, 0); } /* * The server supplied a list of callbacks that it wanted to break. */ static void SRXAFSCB_CallBack(struct work_struct *work) { struct afs_call *call = container_of(work, struct afs_call, work); _enter(""); /* We need to break the callbacks before sending the reply as the * server holds up change visibility till it receives our reply so as * to maintain cache coherency. */ if (call->server) { trace_afs_server(call->server->debug_id, refcount_read(&call->server->ref), atomic_read(&call->server->active), afs_server_trace_callback); afs_break_callbacks(call->server, call->count, call->request); } afs_send_empty_reply(call); afs_put_call(call); _leave(""); } /* * deliver request data to a CB.CallBack call */ static int afs_deliver_cb_callback(struct afs_call *call) { struct afs_callback_break *cb; __be32 *bp; int ret, loop; _enter("{%u}", call->unmarshall); switch (call->unmarshall) { case 0: afs_extract_to_tmp(call); call->unmarshall++; /* extract the FID array and its count in two steps */ fallthrough; case 1: _debug("extract FID count"); ret = afs_extract_data(call, true); if (ret < 0) return ret; call->count = ntohl(call->tmp); _debug("FID count: %u", call->count); if (call->count > AFSCBMAX) return afs_protocol_error(call, afs_eproto_cb_fid_count); call->buffer = kmalloc(array3_size(call->count, 3, 4), GFP_KERNEL); if (!call->buffer) return -ENOMEM; afs_extract_to_buf(call, call->count * 3 * 4); call->unmarshall++; fallthrough; case 2: _debug("extract FID array"); ret = afs_extract_data(call, true); if (ret < 0) return ret; _debug("unmarshall FID array"); call->request = kcalloc(call->count, sizeof(struct afs_callback_break), GFP_KERNEL); if (!call->request) return -ENOMEM; cb = call->request; bp = call->buffer; for (loop = call->count; loop > 0; loop--, cb++) { cb->fid.vid = ntohl(*bp++); cb->fid.vnode = ntohl(*bp++); cb->fid.unique = ntohl(*bp++); } afs_extract_to_tmp(call); call->unmarshall++; /* extract the callback array and its count in two steps */ fallthrough; case 3: _debug("extract CB count"); ret = afs_extract_data(call, true); if (ret < 0) return ret; call->count2 = ntohl(call->tmp); _debug("CB count: %u", call->count2); if (call->count2 != call->count && call->count2 != 0) return afs_protocol_error(call, afs_eproto_cb_count); call->iter = &call->def_iter; iov_iter_discard(&call->def_iter, ITER_DEST, call->count2 * 3 * 4); call->unmarshall++; fallthrough; case 4: _debug("extract discard %zu/%u", iov_iter_count(call->iter), call->count2 * 3 * 4); ret = afs_extract_data(call, false); if (ret < 0) return ret; call->unmarshall++; fallthrough; case 5: break; } if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING)) return afs_io_error(call, afs_io_error_cm_reply); /* we'll need the file server record as that tells us which set of * vnodes to operate upon */ return afs_find_cm_server_by_peer(call); } /* * allow the fileserver to request callback state (re-)initialisation */ static void SRXAFSCB_InitCallBackState(struct work_struct *work) { struct afs_call *call = container_of(work, struct afs_call, work); _enter("{%p}", call->server); if (call->server) afs_init_callback_state(call->server); afs_send_empty_reply(call); afs_put_call(call); _leave(""); } /* * deliver request data to a CB.InitCallBackState call */ static int afs_deliver_cb_init_call_back_state(struct afs_call *call) { int ret; _enter(""); afs_extract_discard(call, 0); ret = afs_extract_data(call, false); if (ret < 0) return ret; /* we'll need the file server record as that tells us which set of * vnodes to operate upon */ return afs_find_cm_server_by_peer(call); } /* * deliver request data to a CB.InitCallBackState3 call */ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call) { struct afs_uuid *r; unsigned loop; __be32 *b; int ret; _enter(""); _enter("{%u}", call->unmarshall); switch (call->unmarshall) { case 0: call->buffer = kmalloc_array(11, sizeof(__be32), GFP_KERNEL); if (!call->buffer) return -ENOMEM; afs_extract_to_buf(call, 11 * sizeof(__be32)); call->unmarshall++; fallthrough; case 1: _debug("extract UUID"); ret = afs_extract_data(call, false); switch (ret) { case 0: break; case -EAGAIN: return 0; default: return ret; } _debug("unmarshall UUID"); call->request = kmalloc(sizeof(struct afs_uuid), GFP_KERNEL); if (!call->request) return -ENOMEM; b = call->buffer; r = call->request; r->time_low = b[0]; r->time_mid = htons(ntohl(b[1])); r->time_hi_and_version = htons(ntohl(b[2])); r->clock_seq_hi_and_reserved = ntohl(b[3]); r->clock_seq_low = ntohl(b[4]); for (loop = 0; loop < 6; loop++) r->node[loop] = ntohl(b[loop + 5]); call->unmarshall++; fallthrough; case 2: break; } if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING)) return afs_io_error(call, afs_io_error_cm_reply); /* we'll need the file server record as that tells us which set of * vnodes to operate upon */ return afs_find_cm_server_by_uuid(call, call->request); } /* * allow the fileserver to see if the cache manager is still alive */ static void SRXAFSCB_Probe(struct work_struct *work) { struct afs_call *call = container_of(work, struct afs_call, work); _enter(""); afs_send_empty_reply(call); afs_put_call(call); _leave(""); } /* * deliver request data to a CB.Probe call */ static int afs_deliver_cb_probe(struct afs_call *call) { int ret; _enter(""); afs_extract_discard(call, 0); ret = afs_extract_data(call, false); if (ret < 0) return ret; if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING)) return afs_io_error(call, afs_io_error_cm_reply); return afs_find_cm_server_by_peer(call); } /* * Allow the fileserver to quickly find out if the cache manager has been * rebooted. */ static void SRXAFSCB_ProbeUuid(struct work_struct *work) { struct afs_call *call = container_of(work, struct afs_call, work); struct afs_uuid *r = call->request; _enter(""); if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0) afs_send_empty_reply(call); else afs_abort_service_call(call, 1, 1, afs_abort_probeuuid_negative); afs_put_call(call); _leave(""); } /* * deliver request data to a CB.ProbeUuid call */ static int afs_deliver_cb_probe_uuid(struct afs_call *call) { struct afs_uuid *r; unsigned loop; __be32 *b; int ret; _enter("{%u}", call->unmarshall); switch (call->unmarshall) { case 0: call->buffer = kmalloc_array(11, sizeof(__be32), GFP_KERNEL); if (!call->buffer) return -ENOMEM; afs_extract_to_buf(call, 11 * sizeof(__be32)); call->unmarshall++; fallthrough; case 1: _debug("extract UUID"); ret = afs_extract_data(call, false); switch (ret) { case 0: break; case -EAGAIN: return 0; default: return ret; } _debug("unmarshall UUID"); call->request = kmalloc(sizeof(struct afs_uuid), GFP_KERNEL); if (!call->request) return -ENOMEM; b = call->buffer; r = call->request; r->time_low = b[0]; r->time_mid = htons(ntohl(b[1])); r->time_hi_and_version = htons(ntohl(b[2])); r->clock_seq_hi_and_reserved = ntohl(b[3]); r->clock_seq_low = ntohl(b[4]); for (loop = 0; loop < 6; loop++) r->node[loop] = ntohl(b[loop + 5]); call->unmarshall++; fallthrough; case 2: break; } if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING)) return afs_io_error(call, afs_io_error_cm_reply); return afs_find_cm_server_by_peer(call); } /* * allow the fileserver to ask about the cache manager's capabilities */ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *work) { struct afs_call *call = container_of(work, struct afs_call, work); int loop; struct { struct /* InterfaceAddr */ { __be32 nifs; __be32 uuid[11]; __be32 ifaddr[32]; __be32 netmask[32]; __be32 mtu[32]; } ia; struct /* Capabilities */ { __be32 capcount; __be32 caps[1]; } cap; } reply; _enter(""); memset(&reply, 0, sizeof(reply)); reply.ia.uuid[0] = call->net->uuid.time_low; reply.ia.uuid[1] = htonl(ntohs(call->net->uuid.time_mid)); reply.ia.uuid[2] = htonl(ntohs(call->net->uuid.time_hi_and_version)); reply.ia.uuid[3] = htonl((s8) call->net->uuid.clock_seq_hi_and_reserved); reply.ia.uuid[4] = htonl((s8) call->net->uuid.clock_seq_low); for (loop = 0; loop < 6; loop++) reply.ia.uuid[loop + 5] = htonl((s8) call->net->uuid.node[loop]); reply.cap.capcount = htonl(1); reply.cap.caps[0] = htonl(AFS_CAP_ERROR_TRANSLATION); afs_send_simple_reply(call, &reply, sizeof(reply)); afs_put_call(call); _leave(""); } /* * deliver request data to a CB.TellMeAboutYourself call */ static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call) { int ret; _enter(""); afs_extract_discard(call, 0); ret = afs_extract_data(call, false); if (ret < 0) return ret; if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING)) return afs_io_error(call, afs_io_error_cm_reply); return afs_find_cm_server_by_peer(call); } /* * deliver request data to a YFS CB.CallBack call */ static int afs_deliver_yfs_cb_callback(struct afs_call *call) { struct afs_callback_break *cb; struct yfs_xdr_YFSFid *bp; size_t size; int ret, loop; _enter("{%u}", call->unmarshall); switch (call->unmarshall) { case 0: afs_extract_to_tmp(call); call->unmarshall++; /* extract the FID array and its count in two steps */ fallthrough; case 1: _debug("extract FID count"); ret = afs_extract_data(call, true); if (ret < 0) return ret; call->count = ntohl(call->tmp); _debug("FID count: %u", call->count); if (call->count > YFSCBMAX) return afs_protocol_error(call, afs_eproto_cb_fid_count); size = array_size(call->count, sizeof(struct yfs_xdr_YFSFid)); call->buffer = kmalloc(size, GFP_KERNEL); if (!call->buffer) return -ENOMEM; afs_extract_to_buf(call, size); call->unmarshall++; fallthrough; case 2: _debug("extract FID array"); ret = afs_extract_data(call, false); if (ret < 0) return ret; _debug("unmarshall FID array"); call->request = kcalloc(call->count, sizeof(struct afs_callback_break), GFP_KERNEL); if (!call->request) return -ENOMEM; cb = call->request; bp = call->buffer; for (loop = call->count; loop > 0; loop--, cb++) { cb->fid.vid = xdr_to_u64(bp->volume); cb->fid.vnode = xdr_to_u64(bp->vnode.lo); cb->fid.vnode_hi = ntohl(bp->vnode.hi); cb->fid.unique = ntohl(bp->vnode.unique); bp++; } afs_extract_to_tmp(call); call->unmarshall++; fallthrough; case 3: break; } if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING)) return afs_io_error(call, afs_io_error_cm_reply); /* We'll need the file server record as that tells us which set of * vnodes to operate upon. */ return afs_find_cm_server_by_peer(call); }
linux-master
fs/afs/cmservice.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Handle fileserver selection and rotation. * * Copyright (C) 2017 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/sched/signal.h> #include "internal.h" #include "afs_fs.h" /* * Begin iteration through a server list, starting with the vnode's last used * server if possible, or the last recorded good server if not. */ static bool afs_start_fs_iteration(struct afs_operation *op, struct afs_vnode *vnode) { struct afs_server *server; void *cb_server; int i; read_lock(&op->volume->servers_lock); op->server_list = afs_get_serverlist( rcu_dereference_protected(op->volume->servers, lockdep_is_held(&op->volume->servers_lock))); read_unlock(&op->volume->servers_lock); op->untried = (1UL << op->server_list->nr_servers) - 1; op->index = READ_ONCE(op->server_list->preferred); cb_server = vnode->cb_server; if (cb_server) { /* See if the vnode's preferred record is still available */ for (i = 0; i < op->server_list->nr_servers; i++) { server = op->server_list->servers[i].server; if (server == cb_server) { op->index = i; goto found_interest; } } /* If we have a lock outstanding on a server that's no longer * serving this vnode, then we can't switch to another server * and have to return an error. */ if (op->flags & AFS_OPERATION_CUR_ONLY) { op->error = -ESTALE; return false; } /* Note that the callback promise is effectively broken */ write_seqlock(&vnode->cb_lock); ASSERTCMP(cb_server, ==, vnode->cb_server); vnode->cb_server = NULL; if (test_and_clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) vnode->cb_break++; write_sequnlock(&vnode->cb_lock); } found_interest: return true; } /* * Post volume busy note. */ static void afs_busy(struct afs_volume *volume, u32 abort_code) { const char *m; switch (abort_code) { case VOFFLINE: m = "offline"; break; case VRESTARTING: m = "restarting"; break; case VSALVAGING: m = "being salvaged"; break; default: m = "busy"; break; } pr_notice("kAFS: Volume %llu '%s' is %s\n", volume->vid, volume->name, m); } /* * Sleep and retry the operation to the same fileserver. */ static bool afs_sleep_and_retry(struct afs_operation *op) { if (!(op->flags & AFS_OPERATION_UNINTR)) { msleep_interruptible(1000); if (signal_pending(current)) { op->error = -ERESTARTSYS; return false; } } else { msleep(1000); } return true; } /* * Select the fileserver to use. May be called multiple times to rotate * through the fileservers. */ bool afs_select_fileserver(struct afs_operation *op) { struct afs_addr_list *alist; struct afs_server *server; struct afs_vnode *vnode = op->file[0].vnode; struct afs_error e; u32 rtt; int error = op->ac.error, i; _enter("%lx[%d],%lx[%d],%d,%d", op->untried, op->index, op->ac.tried, op->ac.index, error, op->ac.abort_code); if (op->flags & AFS_OPERATION_STOP) { _leave(" = f [stopped]"); return false; } op->nr_iterations++; /* Evaluate the result of the previous operation, if there was one. */ switch (error) { case SHRT_MAX: goto start; case 0: default: /* Success or local failure. Stop. */ op->error = error; op->flags |= AFS_OPERATION_STOP; _leave(" = f [okay/local %d]", error); return false; case -ECONNABORTED: /* The far side rejected the operation on some grounds. This * might involve the server being busy or the volume having been moved. */ switch (op->ac.abort_code) { case VNOVOL: /* This fileserver doesn't know about the volume. * - May indicate that the VL is wrong - retry once and compare * the results. * - May indicate that the fileserver couldn't attach to the vol. */ if (op->flags & AFS_OPERATION_VNOVOL) { op->error = -EREMOTEIO; goto next_server; } write_lock(&op->volume->servers_lock); op->server_list->vnovol_mask |= 1 << op->index; write_unlock(&op->volume->servers_lock); set_bit(AFS_VOLUME_NEEDS_UPDATE, &op->volume->flags); error = afs_check_volume_status(op->volume, op); if (error < 0) goto failed_set_error; if (test_bit(AFS_VOLUME_DELETED, &op->volume->flags)) { op->error = -ENOMEDIUM; goto failed; } /* If the server list didn't change, then assume that * it's the fileserver having trouble. */ if (rcu_access_pointer(op->volume->servers) == op->server_list) { op->error = -EREMOTEIO; goto next_server; } /* Try again */ op->flags |= AFS_OPERATION_VNOVOL; _leave(" = t [vnovol]"); return true; case VSALVAGE: /* TODO: Should this return an error or iterate? */ case VVOLEXISTS: case VNOSERVICE: case VONLINE: case VDISKFULL: case VOVERQUOTA: op->error = afs_abort_to_error(op->ac.abort_code); goto next_server; case VOFFLINE: if (!test_and_set_bit(AFS_VOLUME_OFFLINE, &op->volume->flags)) { afs_busy(op->volume, op->ac.abort_code); clear_bit(AFS_VOLUME_BUSY, &op->volume->flags); } if (op->flags & AFS_OPERATION_NO_VSLEEP) { op->error = -EADV; goto failed; } if (op->flags & AFS_OPERATION_CUR_ONLY) { op->error = -ESTALE; goto failed; } goto busy; case VSALVAGING: case VRESTARTING: case VBUSY: /* Retry after going round all the servers unless we * have a file lock we need to maintain. */ if (op->flags & AFS_OPERATION_NO_VSLEEP) { op->error = -EBUSY; goto failed; } if (!test_and_set_bit(AFS_VOLUME_BUSY, &op->volume->flags)) { afs_busy(op->volume, op->ac.abort_code); clear_bit(AFS_VOLUME_OFFLINE, &op->volume->flags); } busy: if (op->flags & AFS_OPERATION_CUR_ONLY) { if (!afs_sleep_and_retry(op)) goto failed; /* Retry with same server & address */ _leave(" = t [vbusy]"); return true; } op->flags |= AFS_OPERATION_VBUSY; goto next_server; case VMOVED: /* The volume migrated to another server. We consider * consider all locks and callbacks broken and request * an update from the VLDB. * * We also limit the number of VMOVED hops we will * honour, just in case someone sets up a loop. */ if (op->flags & AFS_OPERATION_VMOVED) { op->error = -EREMOTEIO; goto failed; } op->flags |= AFS_OPERATION_VMOVED; set_bit(AFS_VOLUME_WAIT, &op->volume->flags); set_bit(AFS_VOLUME_NEEDS_UPDATE, &op->volume->flags); error = afs_check_volume_status(op->volume, op); if (error < 0) goto failed_set_error; /* If the server list didn't change, then the VLDB is * out of sync with the fileservers. This is hopefully * a temporary condition, however, so we don't want to * permanently block access to the file. * * TODO: Try other fileservers if we can. * * TODO: Retry a few times with sleeps. */ if (rcu_access_pointer(op->volume->servers) == op->server_list) { op->error = -ENOMEDIUM; goto failed; } goto restart_from_beginning; default: clear_bit(AFS_VOLUME_OFFLINE, &op->volume->flags); clear_bit(AFS_VOLUME_BUSY, &op->volume->flags); op->error = afs_abort_to_error(op->ac.abort_code); goto failed; } case -ETIMEDOUT: case -ETIME: if (op->error != -EDESTADDRREQ) goto iterate_address; fallthrough; case -ERFKILL: case -EADDRNOTAVAIL: case -ENETUNREACH: case -EHOSTUNREACH: case -EHOSTDOWN: case -ECONNREFUSED: _debug("no conn"); op->error = error; goto iterate_address; case -ENETRESET: pr_warn("kAFS: Peer reset %s (op=%x)\n", op->type ? op->type->name : "???", op->debug_id); fallthrough; case -ECONNRESET: _debug("call reset"); op->error = error; goto failed; } restart_from_beginning: _debug("restart"); afs_end_cursor(&op->ac); op->server = NULL; afs_put_serverlist(op->net, op->server_list); op->server_list = NULL; start: _debug("start"); /* See if we need to do an update of the volume record. Note that the * volume may have moved or even have been deleted. */ error = afs_check_volume_status(op->volume, op); if (error < 0) goto failed_set_error; if (!afs_start_fs_iteration(op, vnode)) goto failed; _debug("__ VOL %llx __", op->volume->vid); pick_server: _debug("pick [%lx]", op->untried); error = afs_wait_for_fs_probes(op->server_list, op->untried); if (error < 0) goto failed_set_error; /* Pick the untried server with the lowest RTT. If we have outstanding * callbacks, we stick with the server we're already using if we can. */ if (op->server) { _debug("server %u", op->index); if (test_bit(op->index, &op->untried)) goto selected_server; op->server = NULL; _debug("no server"); } op->index = -1; rtt = U32_MAX; for (i = 0; i < op->server_list->nr_servers; i++) { struct afs_server *s = op->server_list->servers[i].server; if (!test_bit(i, &op->untried) || !test_bit(AFS_SERVER_FL_RESPONDING, &s->flags)) continue; if (s->probe.rtt < rtt) { op->index = i; rtt = s->probe.rtt; } } if (op->index == -1) goto no_more_servers; selected_server: _debug("use %d", op->index); __clear_bit(op->index, &op->untried); /* We're starting on a different fileserver from the list. We need to * check it, create a callback intercept, find its address list and * probe its capabilities before we use it. */ ASSERTCMP(op->ac.alist, ==, NULL); server = op->server_list->servers[op->index].server; if (!afs_check_server_record(op, server)) goto failed; _debug("USING SERVER: %pU", &server->uuid); op->flags |= AFS_OPERATION_RETRY_SERVER; op->server = server; if (vnode->cb_server != server) { vnode->cb_server = server; vnode->cb_s_break = server->cb_s_break; vnode->cb_fs_s_break = atomic_read(&server->cell->fs_s_break); vnode->cb_v_break = vnode->volume->cb_v_break; clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); } read_lock(&server->fs_lock); alist = rcu_dereference_protected(server->addresses, lockdep_is_held(&server->fs_lock)); afs_get_addrlist(alist); read_unlock(&server->fs_lock); retry_server: memset(&op->ac, 0, sizeof(op->ac)); if (!op->ac.alist) op->ac.alist = alist; else afs_put_addrlist(alist); op->ac.index = -1; iterate_address: ASSERT(op->ac.alist); /* Iterate over the current server's address list to try and find an * address on which it will respond to us. */ if (!afs_iterate_addresses(&op->ac)) goto out_of_addresses; _debug("address [%u] %u/%u %pISp", op->index, op->ac.index, op->ac.alist->nr_addrs, &op->ac.alist->addrs[op->ac.index].transport); _leave(" = t"); return true; out_of_addresses: /* We've now had a failure to respond on all of a server's addresses - * immediately probe them again and consider retrying the server. */ afs_probe_fileserver(op->net, op->server); if (op->flags & AFS_OPERATION_RETRY_SERVER) { alist = op->ac.alist; error = afs_wait_for_one_fs_probe( op->server, !(op->flags & AFS_OPERATION_UNINTR)); switch (error) { case 0: op->flags &= ~AFS_OPERATION_RETRY_SERVER; goto retry_server; case -ERESTARTSYS: goto failed_set_error; case -ETIME: case -EDESTADDRREQ: goto next_server; } } next_server: _debug("next"); afs_end_cursor(&op->ac); goto pick_server; no_more_servers: /* That's all the servers poked to no good effect. Try again if some * of them were busy. */ if (op->flags & AFS_OPERATION_VBUSY) goto restart_from_beginning; e.error = -EDESTADDRREQ; e.responded = false; for (i = 0; i < op->server_list->nr_servers; i++) { struct afs_server *s = op->server_list->servers[i].server; afs_prioritise_error(&e, READ_ONCE(s->probe.error), s->probe.abort_code); } error = e.error; failed_set_error: op->error = error; failed: op->flags |= AFS_OPERATION_STOP; afs_end_cursor(&op->ac); _leave(" = f [failed %d]", op->error); return false; } /* * Dump cursor state in the case of the error being EDESTADDRREQ. */ void afs_dump_edestaddrreq(const struct afs_operation *op) { static int count; int i; if (!IS_ENABLED(CONFIG_AFS_DEBUG_CURSOR) || count > 3) return; count++; rcu_read_lock(); pr_notice("EDESTADDR occurred\n"); pr_notice("FC: cbb=%x cbb2=%x fl=%x err=%hd\n", op->file[0].cb_break_before, op->file[1].cb_break_before, op->flags, op->error); pr_notice("FC: ut=%lx ix=%d ni=%u\n", op->untried, op->index, op->nr_iterations); if (op->server_list) { const struct afs_server_list *sl = op->server_list; pr_notice("FC: SL nr=%u pr=%u vnov=%hx\n", sl->nr_servers, sl->preferred, sl->vnovol_mask); for (i = 0; i < sl->nr_servers; i++) { const struct afs_server *s = sl->servers[i].server; pr_notice("FC: server fl=%lx av=%u %pU\n", s->flags, s->addr_version, &s->uuid); if (s->addresses) { const struct afs_addr_list *a = rcu_dereference(s->addresses); pr_notice("FC: - av=%u nr=%u/%u/%u pr=%u\n", a->version, a->nr_ipv4, a->nr_addrs, a->max_addrs, a->preferred); pr_notice("FC: - R=%lx F=%lx\n", a->responded, a->failed); if (a == op->ac.alist) pr_notice("FC: - current\n"); } } } pr_notice("AC: t=%lx ax=%u ac=%d er=%d r=%u ni=%u\n", op->ac.tried, op->ac.index, op->ac.abort_code, op->ac.error, op->ac.responded, op->ac.nr_iterations); rcu_read_unlock(); }
linux-master
fs/afs/rotate.c
// SPDX-License-Identifier: GPL-2.0-or-later /* AFS volume management * * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/kernel.h> #include <linux/slab.h> #include "internal.h" static unsigned __read_mostly afs_volume_record_life = 60 * 60; /* * Insert a volume into a cell. If there's an existing volume record, that is * returned instead with a ref held. */ static struct afs_volume *afs_insert_volume_into_cell(struct afs_cell *cell, struct afs_volume *volume) { struct afs_volume *p; struct rb_node *parent = NULL, **pp; write_seqlock(&cell->volume_lock); pp = &cell->volumes.rb_node; while (*pp) { parent = *pp; p = rb_entry(parent, struct afs_volume, cell_node); if (p->vid < volume->vid) { pp = &(*pp)->rb_left; } else if (p->vid > volume->vid) { pp = &(*pp)->rb_right; } else { volume = afs_get_volume(p, afs_volume_trace_get_cell_insert); goto found; } } rb_link_node_rcu(&volume->cell_node, parent, pp); rb_insert_color(&volume->cell_node, &cell->volumes); hlist_add_head_rcu(&volume->proc_link, &cell->proc_volumes); found: write_sequnlock(&cell->volume_lock); return volume; } static void afs_remove_volume_from_cell(struct afs_volume *volume) { struct afs_cell *cell = volume->cell; if (!hlist_unhashed(&volume->proc_link)) { trace_afs_volume(volume->vid, refcount_read(&cell->ref), afs_volume_trace_remove); write_seqlock(&cell->volume_lock); hlist_del_rcu(&volume->proc_link); rb_erase(&volume->cell_node, &cell->volumes); write_sequnlock(&cell->volume_lock); } } /* * Allocate a volume record and load it up from a vldb record. */ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params, struct afs_vldb_entry *vldb, unsigned long type_mask) { struct afs_server_list *slist; struct afs_volume *volume; int ret = -ENOMEM; volume = kzalloc(sizeof(struct afs_volume), GFP_KERNEL); if (!volume) goto error_0; volume->vid = vldb->vid[params->type]; volume->update_at = ktime_get_real_seconds() + afs_volume_record_life; volume->cell = afs_get_cell(params->cell, afs_cell_trace_get_vol); volume->type = params->type; volume->type_force = params->force; volume->name_len = vldb->name_len; refcount_set(&volume->ref, 1); INIT_HLIST_NODE(&volume->proc_link); rwlock_init(&volume->servers_lock); rwlock_init(&volume->cb_v_break_lock); memcpy(volume->name, vldb->name, vldb->name_len + 1); slist = afs_alloc_server_list(params->cell, params->key, vldb, type_mask); if (IS_ERR(slist)) { ret = PTR_ERR(slist); goto error_1; } refcount_set(&slist->usage, 1); rcu_assign_pointer(volume->servers, slist); trace_afs_volume(volume->vid, 1, afs_volume_trace_alloc); return volume; error_1: afs_put_cell(volume->cell, afs_cell_trace_put_vol); kfree(volume); error_0: return ERR_PTR(ret); } /* * Look up or allocate a volume record. */ static struct afs_volume *afs_lookup_volume(struct afs_fs_context *params, struct afs_vldb_entry *vldb, unsigned long type_mask) { struct afs_volume *candidate, *volume; candidate = afs_alloc_volume(params, vldb, type_mask); if (IS_ERR(candidate)) return candidate; volume = afs_insert_volume_into_cell(params->cell, candidate); if (volume != candidate) afs_put_volume(params->net, candidate, afs_volume_trace_put_cell_dup); return volume; } /* * Look up a VLDB record for a volume. */ static struct afs_vldb_entry *afs_vl_lookup_vldb(struct afs_cell *cell, struct key *key, const char *volname, size_t volnamesz) { struct afs_vldb_entry *vldb = ERR_PTR(-EDESTADDRREQ); struct afs_vl_cursor vc; int ret; if (!afs_begin_vlserver_operation(&vc, cell, key)) return ERR_PTR(-ERESTARTSYS); while (afs_select_vlserver(&vc)) { vldb = afs_vl_get_entry_by_name_u(&vc, volname, volnamesz); } ret = afs_end_vlserver_operation(&vc); return ret < 0 ? ERR_PTR(ret) : vldb; } /* * Look up a volume in the VL server and create a candidate volume record for * it. * * The volume name can be one of the following: * "%[cell:]volume[.]" R/W volume * "#[cell:]volume[.]" R/O or R/W volume (rwparent=0), * or R/W (rwparent=1) volume * "%[cell:]volume.readonly" R/O volume * "#[cell:]volume.readonly" R/O volume * "%[cell:]volume.backup" Backup volume * "#[cell:]volume.backup" Backup volume * * The cell name is optional, and defaults to the current cell. * * See "The Rules of Mount Point Traversal" in Chapter 5 of the AFS SysAdmin * Guide * - Rule 1: Explicit type suffix forces access of that type or nothing * (no suffix, then use Rule 2 & 3) * - Rule 2: If parent volume is R/O, then mount R/O volume by preference, R/W * if not available * - Rule 3: If parent volume is R/W, then only mount R/W volume unless * explicitly told otherwise */ struct afs_volume *afs_create_volume(struct afs_fs_context *params) { struct afs_vldb_entry *vldb; struct afs_volume *volume; unsigned long type_mask = 1UL << params->type; vldb = afs_vl_lookup_vldb(params->cell, params->key, params->volname, params->volnamesz); if (IS_ERR(vldb)) return ERR_CAST(vldb); if (test_bit(AFS_VLDB_QUERY_ERROR, &vldb->flags)) { volume = ERR_PTR(vldb->error); goto error; } /* Make the final decision on the type we want */ volume = ERR_PTR(-ENOMEDIUM); if (params->force) { if (!(vldb->flags & type_mask)) goto error; } else if (test_bit(AFS_VLDB_HAS_RO, &vldb->flags)) { params->type = AFSVL_ROVOL; } else if (test_bit(AFS_VLDB_HAS_RW, &vldb->flags)) { params->type = AFSVL_RWVOL; } else { goto error; } type_mask = 1UL << params->type; volume = afs_lookup_volume(params, vldb, type_mask); error: kfree(vldb); return volume; } /* * Destroy a volume record */ static void afs_destroy_volume(struct afs_net *net, struct afs_volume *volume) { _enter("%p", volume); #ifdef CONFIG_AFS_FSCACHE ASSERTCMP(volume->cache, ==, NULL); #endif afs_remove_volume_from_cell(volume); afs_put_serverlist(net, rcu_access_pointer(volume->servers)); afs_put_cell(volume->cell, afs_cell_trace_put_vol); trace_afs_volume(volume->vid, refcount_read(&volume->ref), afs_volume_trace_free); kfree_rcu(volume, rcu); _leave(" [destroyed]"); } /* * Get a reference on a volume record. */ struct afs_volume *afs_get_volume(struct afs_volume *volume, enum afs_volume_trace reason) { if (volume) { int r; __refcount_inc(&volume->ref, &r); trace_afs_volume(volume->vid, r + 1, reason); } return volume; } /* * Drop a reference on a volume record. */ void afs_put_volume(struct afs_net *net, struct afs_volume *volume, enum afs_volume_trace reason) { if (volume) { afs_volid_t vid = volume->vid; bool zero; int r; zero = __refcount_dec_and_test(&volume->ref, &r); trace_afs_volume(vid, r - 1, reason); if (zero) afs_destroy_volume(net, volume); } } /* * Activate a volume. */ int afs_activate_volume(struct afs_volume *volume) { #ifdef CONFIG_AFS_FSCACHE struct fscache_volume *vcookie; char *name; name = kasprintf(GFP_KERNEL, "afs,%s,%llx", volume->cell->name, volume->vid); if (!name) return -ENOMEM; vcookie = fscache_acquire_volume(name, NULL, NULL, 0); if (IS_ERR(vcookie)) { if (vcookie != ERR_PTR(-EBUSY)) { kfree(name); return PTR_ERR(vcookie); } pr_err("AFS: Cache volume key already in use (%s)\n", name); vcookie = NULL; } volume->cache = vcookie; kfree(name); #endif return 0; } /* * Deactivate a volume. */ void afs_deactivate_volume(struct afs_volume *volume) { _enter("%s", volume->name); #ifdef CONFIG_AFS_FSCACHE fscache_relinquish_volume(volume->cache, NULL, test_bit(AFS_VOLUME_DELETED, &volume->flags)); volume->cache = NULL; #endif _leave(""); } /* * Query the VL service to update the volume status. */ static int afs_update_volume_status(struct afs_volume *volume, struct key *key) { struct afs_server_list *new, *old, *discard; struct afs_vldb_entry *vldb; char idbuf[16]; int ret, idsz; _enter(""); /* We look up an ID by passing it as a decimal string in the * operation's name parameter. */ idsz = sprintf(idbuf, "%llu", volume->vid); vldb = afs_vl_lookup_vldb(volume->cell, key, idbuf, idsz); if (IS_ERR(vldb)) { ret = PTR_ERR(vldb); goto error; } /* See if the volume got renamed. */ if (vldb->name_len != volume->name_len || memcmp(vldb->name, volume->name, vldb->name_len) != 0) { /* TODO: Use RCU'd string. */ memcpy(volume->name, vldb->name, AFS_MAXVOLNAME); volume->name_len = vldb->name_len; } /* See if the volume's server list got updated. */ new = afs_alloc_server_list(volume->cell, key, vldb, (1 << volume->type)); if (IS_ERR(new)) { ret = PTR_ERR(new); goto error_vldb; } write_lock(&volume->servers_lock); discard = new; old = rcu_dereference_protected(volume->servers, lockdep_is_held(&volume->servers_lock)); if (afs_annotate_server_list(new, old)) { new->seq = volume->servers_seq + 1; rcu_assign_pointer(volume->servers, new); smp_wmb(); volume->servers_seq++; discard = old; } volume->update_at = ktime_get_real_seconds() + afs_volume_record_life; write_unlock(&volume->servers_lock); ret = 0; afs_put_serverlist(volume->cell->net, discard); error_vldb: kfree(vldb); error: _leave(" = %d", ret); return ret; } /* * Make sure the volume record is up to date. */ int afs_check_volume_status(struct afs_volume *volume, struct afs_operation *op) { int ret, retries = 0; _enter(""); retry: if (test_bit(AFS_VOLUME_WAIT, &volume->flags)) goto wait; if (volume->update_at <= ktime_get_real_seconds() || test_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags)) goto update; _leave(" = 0"); return 0; update: if (!test_and_set_bit_lock(AFS_VOLUME_UPDATING, &volume->flags)) { clear_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags); ret = afs_update_volume_status(volume, op->key); if (ret < 0) set_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags); clear_bit_unlock(AFS_VOLUME_WAIT, &volume->flags); clear_bit_unlock(AFS_VOLUME_UPDATING, &volume->flags); wake_up_bit(&volume->flags, AFS_VOLUME_WAIT); _leave(" = %d", ret); return ret; } wait: if (!test_bit(AFS_VOLUME_WAIT, &volume->flags)) { _leave(" = 0 [no wait]"); return 0; } ret = wait_on_bit(&volume->flags, AFS_VOLUME_WAIT, (op->flags & AFS_OPERATION_UNINTR) ? TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE); if (ret == -ERESTARTSYS) { _leave(" = %d", ret); return ret; } retries++; if (retries == 4) { _leave(" = -ESTALE"); return -ESTALE; } goto retry; }
linux-master
fs/afs/volume.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Fileserver-directed operation handling. * * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/fs.h> #include "internal.h" static atomic_t afs_operation_debug_counter; /* * Create an operation against a volume. */ struct afs_operation *afs_alloc_operation(struct key *key, struct afs_volume *volume) { struct afs_operation *op; _enter(""); op = kzalloc(sizeof(*op), GFP_KERNEL); if (!op) return ERR_PTR(-ENOMEM); if (!key) { key = afs_request_key(volume->cell); if (IS_ERR(key)) { kfree(op); return ERR_CAST(key); } } else { key_get(key); } op->key = key; op->volume = afs_get_volume(volume, afs_volume_trace_get_new_op); op->net = volume->cell->net; op->cb_v_break = volume->cb_v_break; op->debug_id = atomic_inc_return(&afs_operation_debug_counter); op->error = -EDESTADDRREQ; op->ac.error = SHRT_MAX; _leave(" = [op=%08x]", op->debug_id); return op; } /* * Lock the vnode(s) being operated upon. */ static bool afs_get_io_locks(struct afs_operation *op) { struct afs_vnode *vnode = op->file[0].vnode; struct afs_vnode *vnode2 = op->file[1].vnode; _enter(""); if (op->flags & AFS_OPERATION_UNINTR) { mutex_lock(&vnode->io_lock); op->flags |= AFS_OPERATION_LOCK_0; _leave(" = t [1]"); return true; } if (!vnode2 || !op->file[1].need_io_lock || vnode == vnode2) vnode2 = NULL; if (vnode2 > vnode) swap(vnode, vnode2); if (mutex_lock_interruptible(&vnode->io_lock) < 0) { op->error = -ERESTARTSYS; op->flags |= AFS_OPERATION_STOP; _leave(" = f [I 0]"); return false; } op->flags |= AFS_OPERATION_LOCK_0; if (vnode2) { if (mutex_lock_interruptible_nested(&vnode2->io_lock, 1) < 0) { op->error = -ERESTARTSYS; op->flags |= AFS_OPERATION_STOP; mutex_unlock(&vnode->io_lock); op->flags &= ~AFS_OPERATION_LOCK_0; _leave(" = f [I 1]"); return false; } op->flags |= AFS_OPERATION_LOCK_1; } _leave(" = t [2]"); return true; } static void afs_drop_io_locks(struct afs_operation *op) { struct afs_vnode *vnode = op->file[0].vnode; struct afs_vnode *vnode2 = op->file[1].vnode; _enter(""); if (op->flags & AFS_OPERATION_LOCK_1) mutex_unlock(&vnode2->io_lock); if (op->flags & AFS_OPERATION_LOCK_0) mutex_unlock(&vnode->io_lock); } static void afs_prepare_vnode(struct afs_operation *op, struct afs_vnode_param *vp, unsigned int index) { struct afs_vnode *vnode = vp->vnode; if (vnode) { vp->fid = vnode->fid; vp->dv_before = vnode->status.data_version; vp->cb_break_before = afs_calc_vnode_cb_break(vnode); if (vnode->lock_state != AFS_VNODE_LOCK_NONE) op->flags |= AFS_OPERATION_CUR_ONLY; if (vp->modification) set_bit(AFS_VNODE_MODIFYING, &vnode->flags); } if (vp->fid.vnode) _debug("PREP[%u] {%llx:%llu.%u}", index, vp->fid.vid, vp->fid.vnode, vp->fid.unique); } /* * Begin an operation on the fileserver. * * Fileserver operations are serialised on the server by vnode, so we serialise * them here also using the io_lock. */ bool afs_begin_vnode_operation(struct afs_operation *op) { struct afs_vnode *vnode = op->file[0].vnode; ASSERT(vnode); _enter(""); if (op->file[0].need_io_lock) if (!afs_get_io_locks(op)) return false; afs_prepare_vnode(op, &op->file[0], 0); afs_prepare_vnode(op, &op->file[1], 1); op->cb_v_break = op->volume->cb_v_break; _leave(" = true"); return true; } /* * Tidy up a filesystem cursor and unlock the vnode. */ static void afs_end_vnode_operation(struct afs_operation *op) { _enter(""); if (op->error == -EDESTADDRREQ || op->error == -EADDRNOTAVAIL || op->error == -ENETUNREACH || op->error == -EHOSTUNREACH) afs_dump_edestaddrreq(op); afs_drop_io_locks(op); if (op->error == -ECONNABORTED) op->error = afs_abort_to_error(op->ac.abort_code); } /* * Wait for an in-progress operation to complete. */ void afs_wait_for_operation(struct afs_operation *op) { _enter(""); while (afs_select_fileserver(op)) { op->cb_s_break = op->server->cb_s_break; if (test_bit(AFS_SERVER_FL_IS_YFS, &op->server->flags) && op->ops->issue_yfs_rpc) op->ops->issue_yfs_rpc(op); else if (op->ops->issue_afs_rpc) op->ops->issue_afs_rpc(op); else op->ac.error = -ENOTSUPP; if (op->call) op->error = afs_wait_for_call_to_complete(op->call, &op->ac); } switch (op->error) { case 0: _debug("success"); op->ops->success(op); break; case -ECONNABORTED: if (op->ops->aborted) op->ops->aborted(op); fallthrough; default: if (op->ops->failed) op->ops->failed(op); break; } afs_end_vnode_operation(op); if (op->error == 0 && op->ops->edit_dir) { _debug("edit_dir"); op->ops->edit_dir(op); } _leave(""); } /* * Dispose of an operation. */ int afs_put_operation(struct afs_operation *op) { int i, ret = op->error; _enter("op=%08x,%d", op->debug_id, ret); if (op->ops && op->ops->put) op->ops->put(op); if (op->file[0].modification) clear_bit(AFS_VNODE_MODIFYING, &op->file[0].vnode->flags); if (op->file[1].modification && op->file[1].vnode != op->file[0].vnode) clear_bit(AFS_VNODE_MODIFYING, &op->file[1].vnode->flags); if (op->file[0].put_vnode) iput(&op->file[0].vnode->netfs.inode); if (op->file[1].put_vnode) iput(&op->file[1].vnode->netfs.inode); if (op->more_files) { for (i = 0; i < op->nr_files - 2; i++) if (op->more_files[i].put_vnode) iput(&op->more_files[i].vnode->netfs.inode); kfree(op->more_files); } afs_end_cursor(&op->ac); afs_put_serverlist(op->net, op->server_list); afs_put_volume(op->net, op->volume, afs_volume_trace_put_put_op); key_put(op->key); kfree(op); return ret; } int afs_do_sync_operation(struct afs_operation *op) { afs_begin_vnode_operation(op); afs_wait_for_operation(op); return afs_put_operation(op); }
linux-master
fs/afs/fs_operation.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Server address list management * * Copyright (C) 2017 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/slab.h> #include <linux/ctype.h> #include <linux/dns_resolver.h> #include <linux/inet.h> #include <keys/rxrpc-type.h> #include "internal.h" #include "afs_fs.h" /* * Release an address list. */ void afs_put_addrlist(struct afs_addr_list *alist) { if (alist && refcount_dec_and_test(&alist->usage)) kfree_rcu(alist, rcu); } /* * Allocate an address list. */ struct afs_addr_list *afs_alloc_addrlist(unsigned int nr, unsigned short service, unsigned short port) { struct afs_addr_list *alist; unsigned int i; _enter("%u,%u,%u", nr, service, port); if (nr > AFS_MAX_ADDRESSES) nr = AFS_MAX_ADDRESSES; alist = kzalloc(struct_size(alist, addrs, nr), GFP_KERNEL); if (!alist) return NULL; refcount_set(&alist->usage, 1); alist->max_addrs = nr; for (i = 0; i < nr; i++) { struct sockaddr_rxrpc *srx = &alist->addrs[i]; srx->srx_family = AF_RXRPC; srx->srx_service = service; srx->transport_type = SOCK_DGRAM; srx->transport_len = sizeof(srx->transport.sin6); srx->transport.sin6.sin6_family = AF_INET6; srx->transport.sin6.sin6_port = htons(port); } return alist; } /* * Parse a text string consisting of delimited addresses. */ struct afs_vlserver_list *afs_parse_text_addrs(struct afs_net *net, const char *text, size_t len, char delim, unsigned short service, unsigned short port) { struct afs_vlserver_list *vllist; struct afs_addr_list *alist; const char *p, *end = text + len; const char *problem; unsigned int nr = 0; int ret = -ENOMEM; _enter("%*.*s,%c", (int)len, (int)len, text, delim); if (!len) { _leave(" = -EDESTADDRREQ [empty]"); return ERR_PTR(-EDESTADDRREQ); } if (delim == ':' && (memchr(text, ',', len) || !memchr(text, '.', len))) delim = ','; /* Count the addresses */ p = text; do { if (!*p) { problem = "nul"; goto inval; } if (*p == delim) continue; nr++; if (*p == '[') { p++; if (p == end) { problem = "brace1"; goto inval; } p = memchr(p, ']', end - p); if (!p) { problem = "brace2"; goto inval; } p++; if (p >= end) break; } p = memchr(p, delim, end - p); if (!p) break; p++; } while (p < end); _debug("%u/%u addresses", nr, AFS_MAX_ADDRESSES); vllist = afs_alloc_vlserver_list(1); if (!vllist) return ERR_PTR(-ENOMEM); vllist->nr_servers = 1; vllist->servers[0].server = afs_alloc_vlserver("<dummy>", 7, AFS_VL_PORT); if (!vllist->servers[0].server) goto error_vl; alist = afs_alloc_addrlist(nr, service, AFS_VL_PORT); if (!alist) goto error; /* Extract the addresses */ p = text; do { const char *q, *stop; unsigned int xport = port; __be32 x[4]; int family; if (*p == delim) { p++; continue; } if (*p == '[') { p++; q = memchr(p, ']', end - p); } else { for (q = p; q < end; q++) if (*q == '+' || *q == delim) break; } if (in4_pton(p, q - p, (u8 *)&x[0], -1, &stop)) { family = AF_INET; } else if (in6_pton(p, q - p, (u8 *)x, -1, &stop)) { family = AF_INET6; } else { problem = "family"; goto bad_address; } p = q; if (stop != p) { problem = "nostop"; goto bad_address; } if (q < end && *q == ']') p++; if (p < end) { if (*p == '+') { /* Port number specification "+1234" */ xport = 0; p++; if (p >= end || !isdigit(*p)) { problem = "port"; goto bad_address; } do { xport *= 10; xport += *p - '0'; if (xport > 65535) { problem = "pval"; goto bad_address; } p++; } while (p < end && isdigit(*p)); } else if (*p == delim) { p++; } else { problem = "weird"; goto bad_address; } } if (family == AF_INET) afs_merge_fs_addr4(alist, x[0], xport); else afs_merge_fs_addr6(alist, x, xport); } while (p < end); rcu_assign_pointer(vllist->servers[0].server->addresses, alist); _leave(" = [nr %u]", alist->nr_addrs); return vllist; inval: _leave(" = -EINVAL [%s %zu %*.*s]", problem, p - text, (int)len, (int)len, text); return ERR_PTR(-EINVAL); bad_address: _leave(" = -EINVAL [%s %zu %*.*s]", problem, p - text, (int)len, (int)len, text); ret = -EINVAL; error: afs_put_addrlist(alist); error_vl: afs_put_vlserverlist(net, vllist); return ERR_PTR(ret); } /* * Compare old and new address lists to see if there's been any change. * - How to do this in better than O(Nlog(N)) time? * - We don't really want to sort the address list, but would rather take the * list as we got it so as not to undo record rotation by the DNS server. */ #if 0 static int afs_cmp_addr_list(const struct afs_addr_list *a1, const struct afs_addr_list *a2) { } #endif /* * Perform a DNS query for VL servers and build a up an address list. */ struct afs_vlserver_list *afs_dns_query(struct afs_cell *cell, time64_t *_expiry) { struct afs_vlserver_list *vllist; char *result = NULL; int ret; _enter("%s", cell->name); ret = dns_query(cell->net->net, "afsdb", cell->name, cell->name_len, "srv=1", &result, _expiry, true); if (ret < 0) { _leave(" = %d [dns]", ret); return ERR_PTR(ret); } if (*_expiry == 0) *_expiry = ktime_get_real_seconds() + 60; if (ret > 1 && result[0] == 0) vllist = afs_extract_vlserver_list(cell, result, ret); else vllist = afs_parse_text_addrs(cell->net, result, ret, ',', VL_SERVICE, AFS_VL_PORT); kfree(result); if (IS_ERR(vllist) && vllist != ERR_PTR(-ENOMEM)) pr_err("Failed to parse DNS data %ld\n", PTR_ERR(vllist)); return vllist; } /* * Merge an IPv4 entry into a fileserver address list. */ void afs_merge_fs_addr4(struct afs_addr_list *alist, __be32 xdr, u16 port) { struct sockaddr_rxrpc *srx; u32 addr = ntohl(xdr); int i; if (alist->nr_addrs >= alist->max_addrs) return; for (i = 0; i < alist->nr_ipv4; i++) { struct sockaddr_in *a = &alist->addrs[i].transport.sin; u32 a_addr = ntohl(a->sin_addr.s_addr); u16 a_port = ntohs(a->sin_port); if (addr == a_addr && port == a_port) return; if (addr == a_addr && port < a_port) break; if (addr < a_addr) break; } if (i < alist->nr_addrs) memmove(alist->addrs + i + 1, alist->addrs + i, sizeof(alist->addrs[0]) * (alist->nr_addrs - i)); srx = &alist->addrs[i]; srx->srx_family = AF_RXRPC; srx->transport_type = SOCK_DGRAM; srx->transport_len = sizeof(srx->transport.sin); srx->transport.sin.sin_family = AF_INET; srx->transport.sin.sin_port = htons(port); srx->transport.sin.sin_addr.s_addr = xdr; alist->nr_ipv4++; alist->nr_addrs++; } /* * Merge an IPv6 entry into a fileserver address list. */ void afs_merge_fs_addr6(struct afs_addr_list *alist, __be32 *xdr, u16 port) { struct sockaddr_rxrpc *srx; int i, diff; if (alist->nr_addrs >= alist->max_addrs) return; for (i = alist->nr_ipv4; i < alist->nr_addrs; i++) { struct sockaddr_in6 *a = &alist->addrs[i].transport.sin6; u16 a_port = ntohs(a->sin6_port); diff = memcmp(xdr, &a->sin6_addr, 16); if (diff == 0 && port == a_port) return; if (diff == 0 && port < a_port) break; if (diff < 0) break; } if (i < alist->nr_addrs) memmove(alist->addrs + i + 1, alist->addrs + i, sizeof(alist->addrs[0]) * (alist->nr_addrs - i)); srx = &alist->addrs[i]; srx->srx_family = AF_RXRPC; srx->transport_type = SOCK_DGRAM; srx->transport_len = sizeof(srx->transport.sin6); srx->transport.sin6.sin6_family = AF_INET6; srx->transport.sin6.sin6_port = htons(port); memcpy(&srx->transport.sin6.sin6_addr, xdr, 16); alist->nr_addrs++; } /* * Get an address to try. */ bool afs_iterate_addresses(struct afs_addr_cursor *ac) { unsigned long set, failed; int index; if (!ac->alist) return false; set = ac->alist->responded; failed = ac->alist->failed; _enter("%lx-%lx-%lx,%d", set, failed, ac->tried, ac->index); ac->nr_iterations++; set &= ~(failed | ac->tried); if (!set) return false; index = READ_ONCE(ac->alist->preferred); if (test_bit(index, &set)) goto selected; index = __ffs(set); selected: ac->index = index; set_bit(index, &ac->tried); ac->responded = false; return true; } /* * Release an address list cursor. */ int afs_end_cursor(struct afs_addr_cursor *ac) { struct afs_addr_list *alist; alist = ac->alist; if (alist) { if (ac->responded && ac->index != alist->preferred && test_bit(ac->alist->preferred, &ac->tried)) WRITE_ONCE(alist->preferred, ac->index); afs_put_addrlist(alist); ac->alist = NULL; } return ac->error; }
linux-master
fs/afs/addr_list.c
// SPDX-License-Identifier: GPL-2.0-or-later /* dir.c: AFS filesystem directory handling * * Copyright (C) 2002, 2018 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/ctype.h> #include <linux/sched.h> #include <linux/task_io_accounting_ops.h> #include "internal.h" #include "afs_fs.h" #include "xdr_fs.h" static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags); static int afs_dir_open(struct inode *inode, struct file *file); static int afs_readdir(struct file *file, struct dir_context *ctx); static int afs_d_revalidate(struct dentry *dentry, unsigned int flags); static int afs_d_delete(const struct dentry *dentry); static void afs_d_iput(struct dentry *dentry, struct inode *inode); static bool afs_lookup_one_filldir(struct dir_context *ctx, const char *name, int nlen, loff_t fpos, u64 ino, unsigned dtype); static bool afs_lookup_filldir(struct dir_context *ctx, const char *name, int nlen, loff_t fpos, u64 ino, unsigned dtype); static int afs_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, bool excl); static int afs_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode); static int afs_rmdir(struct inode *dir, struct dentry *dentry); static int afs_unlink(struct inode *dir, struct dentry *dentry); static int afs_link(struct dentry *from, struct inode *dir, struct dentry *dentry); static int afs_symlink(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, const char *content); static int afs_rename(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags); static bool afs_dir_release_folio(struct folio *folio, gfp_t gfp_flags); static void afs_dir_invalidate_folio(struct folio *folio, size_t offset, size_t length); static bool afs_dir_dirty_folio(struct address_space *mapping, struct folio *folio) { BUG(); /* This should never happen. */ } const struct file_operations afs_dir_file_operations = { .open = afs_dir_open, .release = afs_release, .iterate_shared = afs_readdir, .lock = afs_lock, .llseek = generic_file_llseek, }; const struct inode_operations afs_dir_inode_operations = { .create = afs_create, .lookup = afs_lookup, .link = afs_link, .unlink = afs_unlink, .symlink = afs_symlink, .mkdir = afs_mkdir, .rmdir = afs_rmdir, .rename = afs_rename, .permission = afs_permission, .getattr = afs_getattr, .setattr = afs_setattr, }; const struct address_space_operations afs_dir_aops = { .dirty_folio = afs_dir_dirty_folio, .release_folio = afs_dir_release_folio, .invalidate_folio = afs_dir_invalidate_folio, .migrate_folio = filemap_migrate_folio, }; const struct dentry_operations afs_fs_dentry_operations = { .d_revalidate = afs_d_revalidate, .d_delete = afs_d_delete, .d_release = afs_d_release, .d_automount = afs_d_automount, .d_iput = afs_d_iput, }; struct afs_lookup_one_cookie { struct dir_context ctx; struct qstr name; bool found; struct afs_fid fid; }; struct afs_lookup_cookie { struct dir_context ctx; struct qstr name; bool found; bool one_only; unsigned short nr_fids; struct afs_fid fids[50]; }; /* * Drop the refs that we're holding on the folios we were reading into. We've * got refs on the first nr_pages pages. */ static void afs_dir_read_cleanup(struct afs_read *req) { struct address_space *mapping = req->vnode->netfs.inode.i_mapping; struct folio *folio; pgoff_t last = req->nr_pages - 1; XA_STATE(xas, &mapping->i_pages, 0); if (unlikely(!req->nr_pages)) return; rcu_read_lock(); xas_for_each(&xas, folio, last) { if (xas_retry(&xas, folio)) continue; BUG_ON(xa_is_value(folio)); ASSERTCMP(folio_file_mapping(folio), ==, mapping); folio_put(folio); } rcu_read_unlock(); } /* * check that a directory folio is valid */ static bool afs_dir_check_folio(struct afs_vnode *dvnode, struct folio *folio, loff_t i_size) { union afs_xdr_dir_block *block; size_t offset, size; loff_t pos; /* Determine how many magic numbers there should be in this folio, but * we must take care because the directory may change size under us. */ pos = folio_pos(folio); if (i_size <= pos) goto checked; size = min_t(loff_t, folio_size(folio), i_size - pos); for (offset = 0; offset < size; offset += sizeof(*block)) { block = kmap_local_folio(folio, offset); if (block->hdr.magic != AFS_DIR_MAGIC) { printk("kAFS: %s(%lx): [%llx] bad magic %zx/%zx is %04hx\n", __func__, dvnode->netfs.inode.i_ino, pos, offset, size, ntohs(block->hdr.magic)); trace_afs_dir_check_failed(dvnode, pos + offset, i_size); kunmap_local(block); trace_afs_file_error(dvnode, -EIO, afs_file_error_dir_bad_magic); goto error; } /* Make sure each block is NUL terminated so we can reasonably * use string functions on it. The filenames in the folio * *should* be NUL-terminated anyway. */ ((u8 *)block)[AFS_DIR_BLOCK_SIZE - 1] = 0; kunmap_local(block); } checked: afs_stat_v(dvnode, n_read_dir); return true; error: return false; } /* * Dump the contents of a directory. */ static void afs_dir_dump(struct afs_vnode *dvnode, struct afs_read *req) { union afs_xdr_dir_block *block; struct address_space *mapping = dvnode->netfs.inode.i_mapping; struct folio *folio; pgoff_t last = req->nr_pages - 1; size_t offset, size; XA_STATE(xas, &mapping->i_pages, 0); pr_warn("DIR %llx:%llx f=%llx l=%llx al=%llx\n", dvnode->fid.vid, dvnode->fid.vnode, req->file_size, req->len, req->actual_len); pr_warn("DIR %llx %x %zx %zx\n", req->pos, req->nr_pages, req->iter->iov_offset, iov_iter_count(req->iter)); xas_for_each(&xas, folio, last) { if (xas_retry(&xas, folio)) continue; BUG_ON(folio_file_mapping(folio) != mapping); size = min_t(loff_t, folio_size(folio), req->actual_len - folio_pos(folio)); for (offset = 0; offset < size; offset += sizeof(*block)) { block = kmap_local_folio(folio, offset); pr_warn("[%02lx] %32phN\n", folio_index(folio) + offset, block); kunmap_local(block); } } } /* * Check all the blocks in a directory. All the folios are held pinned. */ static int afs_dir_check(struct afs_vnode *dvnode, struct afs_read *req) { struct address_space *mapping = dvnode->netfs.inode.i_mapping; struct folio *folio; pgoff_t last = req->nr_pages - 1; int ret = 0; XA_STATE(xas, &mapping->i_pages, 0); if (unlikely(!req->nr_pages)) return 0; rcu_read_lock(); xas_for_each(&xas, folio, last) { if (xas_retry(&xas, folio)) continue; BUG_ON(folio_file_mapping(folio) != mapping); if (!afs_dir_check_folio(dvnode, folio, req->actual_len)) { afs_dir_dump(dvnode, req); ret = -EIO; break; } } rcu_read_unlock(); return ret; } /* * open an AFS directory file */ static int afs_dir_open(struct inode *inode, struct file *file) { _enter("{%lu}", inode->i_ino); BUILD_BUG_ON(sizeof(union afs_xdr_dir_block) != 2048); BUILD_BUG_ON(sizeof(union afs_xdr_dirent) != 32); if (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(inode)->flags)) return -ENOENT; return afs_open(inode, file); } /* * Read the directory into the pagecache in one go, scrubbing the previous * contents. The list of folios is returned, pinning them so that they don't * get reclaimed during the iteration. */ static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key) __acquires(&dvnode->validate_lock) { struct address_space *mapping = dvnode->netfs.inode.i_mapping; struct afs_read *req; loff_t i_size; int nr_pages, i; int ret; loff_t remote_size = 0; _enter(""); req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return ERR_PTR(-ENOMEM); refcount_set(&req->usage, 1); req->vnode = dvnode; req->key = key_get(key); req->cleanup = afs_dir_read_cleanup; expand: i_size = i_size_read(&dvnode->netfs.inode); if (i_size < remote_size) i_size = remote_size; if (i_size < 2048) { ret = afs_bad(dvnode, afs_file_error_dir_small); goto error; } if (i_size > 2048 * 1024) { trace_afs_file_error(dvnode, -EFBIG, afs_file_error_dir_big); ret = -EFBIG; goto error; } _enter("%llu", i_size); nr_pages = (i_size + PAGE_SIZE - 1) / PAGE_SIZE; req->actual_len = i_size; /* May change */ req->len = nr_pages * PAGE_SIZE; /* We can ask for more than there is */ req->data_version = dvnode->status.data_version; /* May change */ iov_iter_xarray(&req->def_iter, ITER_DEST, &dvnode->netfs.inode.i_mapping->i_pages, 0, i_size); req->iter = &req->def_iter; /* Fill in any gaps that we might find where the memory reclaimer has * been at work and pin all the folios. If there are any gaps, we will * need to reread the entire directory contents. */ i = req->nr_pages; while (i < nr_pages) { struct folio *folio; folio = filemap_get_folio(mapping, i); if (IS_ERR(folio)) { if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) afs_stat_v(dvnode, n_inval); folio = __filemap_get_folio(mapping, i, FGP_LOCK | FGP_CREAT, mapping->gfp_mask); if (IS_ERR(folio)) { ret = PTR_ERR(folio); goto error; } folio_attach_private(folio, (void *)1); folio_unlock(folio); } req->nr_pages += folio_nr_pages(folio); i += folio_nr_pages(folio); } /* If we're going to reload, we need to lock all the pages to prevent * races. */ ret = -ERESTARTSYS; if (down_read_killable(&dvnode->validate_lock) < 0) goto error; if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) goto success; up_read(&dvnode->validate_lock); if (down_write_killable(&dvnode->validate_lock) < 0) goto error; if (!test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) { trace_afs_reload_dir(dvnode); ret = afs_fetch_data(dvnode, req); if (ret < 0) goto error_unlock; task_io_account_read(PAGE_SIZE * req->nr_pages); if (req->len < req->file_size) { /* The content has grown, so we need to expand the * buffer. */ up_write(&dvnode->validate_lock); remote_size = req->file_size; goto expand; } /* Validate the data we just read. */ ret = afs_dir_check(dvnode, req); if (ret < 0) goto error_unlock; // TODO: Trim excess pages set_bit(AFS_VNODE_DIR_VALID, &dvnode->flags); } downgrade_write(&dvnode->validate_lock); success: return req; error_unlock: up_write(&dvnode->validate_lock); error: afs_put_read(req); _leave(" = %d", ret); return ERR_PTR(ret); } /* * deal with one block in an AFS directory */ static int afs_dir_iterate_block(struct afs_vnode *dvnode, struct dir_context *ctx, union afs_xdr_dir_block *block, unsigned blkoff) { union afs_xdr_dirent *dire; unsigned offset, next, curr, nr_slots; size_t nlen; int tmp; _enter("%llx,%x", ctx->pos, blkoff); curr = (ctx->pos - blkoff) / sizeof(union afs_xdr_dirent); /* walk through the block, an entry at a time */ for (offset = (blkoff == 0 ? AFS_DIR_RESV_BLOCKS0 : AFS_DIR_RESV_BLOCKS); offset < AFS_DIR_SLOTS_PER_BLOCK; offset = next ) { /* skip entries marked unused in the bitmap */ if (!(block->hdr.bitmap[offset / 8] & (1 << (offset % 8)))) { _debug("ENT[%zu.%u]: unused", blkoff / sizeof(union afs_xdr_dir_block), offset); next = offset + 1; if (offset >= curr) ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent); continue; } /* got a valid entry */ dire = &block->dirents[offset]; nlen = strnlen(dire->u.name, sizeof(*block) - offset * sizeof(union afs_xdr_dirent)); if (nlen > AFSNAMEMAX - 1) { _debug("ENT[%zu]: name too long (len %u/%zu)", blkoff / sizeof(union afs_xdr_dir_block), offset, nlen); return afs_bad(dvnode, afs_file_error_dir_name_too_long); } _debug("ENT[%zu.%u]: %s %zu \"%s\"", blkoff / sizeof(union afs_xdr_dir_block), offset, (offset < curr ? "skip" : "fill"), nlen, dire->u.name); nr_slots = afs_dir_calc_slots(nlen); next = offset + nr_slots; if (next > AFS_DIR_SLOTS_PER_BLOCK) { _debug("ENT[%zu.%u]:" " %u extends beyond end dir block" " (len %zu)", blkoff / sizeof(union afs_xdr_dir_block), offset, next, nlen); return afs_bad(dvnode, afs_file_error_dir_over_end); } /* Check that the name-extension dirents are all allocated */ for (tmp = 1; tmp < nr_slots; tmp++) { unsigned int ix = offset + tmp; if (!(block->hdr.bitmap[ix / 8] & (1 << (ix % 8)))) { _debug("ENT[%zu.u]:" " %u unmarked extension (%u/%u)", blkoff / sizeof(union afs_xdr_dir_block), offset, tmp, nr_slots); return afs_bad(dvnode, afs_file_error_dir_unmarked_ext); } } /* skip if starts before the current position */ if (offset < curr) { if (next > curr) ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent); continue; } /* found the next entry */ if (!dir_emit(ctx, dire->u.name, nlen, ntohl(dire->u.vnode), (ctx->actor == afs_lookup_filldir || ctx->actor == afs_lookup_one_filldir)? ntohl(dire->u.unique) : DT_UNKNOWN)) { _leave(" = 0 [full]"); return 0; } ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent); } _leave(" = 1 [more]"); return 1; } /* * iterate through the data blob that lists the contents of an AFS directory */ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx, struct key *key, afs_dataversion_t *_dir_version) { struct afs_vnode *dvnode = AFS_FS_I(dir); union afs_xdr_dir_block *dblock; struct afs_read *req; struct folio *folio; unsigned offset, size; int ret; _enter("{%lu},%u,,", dir->i_ino, (unsigned)ctx->pos); if (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dir)->flags)) { _leave(" = -ESTALE"); return -ESTALE; } req = afs_read_dir(dvnode, key); if (IS_ERR(req)) return PTR_ERR(req); *_dir_version = req->data_version; /* round the file position up to the next entry boundary */ ctx->pos += sizeof(union afs_xdr_dirent) - 1; ctx->pos &= ~(sizeof(union afs_xdr_dirent) - 1); /* walk through the blocks in sequence */ ret = 0; while (ctx->pos < req->actual_len) { /* Fetch the appropriate folio from the directory and re-add it * to the LRU. We have all the pages pinned with an extra ref. */ folio = __filemap_get_folio(dir->i_mapping, ctx->pos / PAGE_SIZE, FGP_ACCESSED, 0); if (IS_ERR(folio)) { ret = afs_bad(dvnode, afs_file_error_dir_missing_page); break; } offset = round_down(ctx->pos, sizeof(*dblock)) - folio_file_pos(folio); size = min_t(loff_t, folio_size(folio), req->actual_len - folio_file_pos(folio)); do { dblock = kmap_local_folio(folio, offset); ret = afs_dir_iterate_block(dvnode, ctx, dblock, folio_file_pos(folio) + offset); kunmap_local(dblock); if (ret != 1) goto out; } while (offset += sizeof(*dblock), offset < size); ret = 0; } out: up_read(&dvnode->validate_lock); afs_put_read(req); _leave(" = %d", ret); return ret; } /* * read an AFS directory */ static int afs_readdir(struct file *file, struct dir_context *ctx) { afs_dataversion_t dir_version; return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file), &dir_version); } /* * Search the directory for a single name * - if afs_dir_iterate_block() spots this function, it'll pass the FID * uniquifier through dtype */ static bool afs_lookup_one_filldir(struct dir_context *ctx, const char *name, int nlen, loff_t fpos, u64 ino, unsigned dtype) { struct afs_lookup_one_cookie *cookie = container_of(ctx, struct afs_lookup_one_cookie, ctx); _enter("{%s,%u},%s,%u,,%llu,%u", cookie->name.name, cookie->name.len, name, nlen, (unsigned long long) ino, dtype); /* insanity checks first */ BUILD_BUG_ON(sizeof(union afs_xdr_dir_block) != 2048); BUILD_BUG_ON(sizeof(union afs_xdr_dirent) != 32); if (cookie->name.len != nlen || memcmp(cookie->name.name, name, nlen) != 0) { _leave(" = true [keep looking]"); return true; } cookie->fid.vnode = ino; cookie->fid.unique = dtype; cookie->found = 1; _leave(" = false [found]"); return false; } /* * Do a lookup of a single name in a directory * - just returns the FID the dentry name maps to if found */ static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry, struct afs_fid *fid, struct key *key, afs_dataversion_t *_dir_version) { struct afs_super_info *as = dir->i_sb->s_fs_info; struct afs_lookup_one_cookie cookie = { .ctx.actor = afs_lookup_one_filldir, .name = dentry->d_name, .fid.vid = as->volume->vid }; int ret; _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry); /* search the directory */ ret = afs_dir_iterate(dir, &cookie.ctx, key, _dir_version); if (ret < 0) { _leave(" = %d [iter]", ret); return ret; } if (!cookie.found) { _leave(" = -ENOENT [not found]"); return -ENOENT; } *fid = cookie.fid; _leave(" = 0 { vn=%llu u=%u }", fid->vnode, fid->unique); return 0; } /* * search the directory for a name * - if afs_dir_iterate_block() spots this function, it'll pass the FID * uniquifier through dtype */ static bool afs_lookup_filldir(struct dir_context *ctx, const char *name, int nlen, loff_t fpos, u64 ino, unsigned dtype) { struct afs_lookup_cookie *cookie = container_of(ctx, struct afs_lookup_cookie, ctx); _enter("{%s,%u},%s,%u,,%llu,%u", cookie->name.name, cookie->name.len, name, nlen, (unsigned long long) ino, dtype); /* insanity checks first */ BUILD_BUG_ON(sizeof(union afs_xdr_dir_block) != 2048); BUILD_BUG_ON(sizeof(union afs_xdr_dirent) != 32); if (cookie->found) { if (cookie->nr_fids < 50) { cookie->fids[cookie->nr_fids].vnode = ino; cookie->fids[cookie->nr_fids].unique = dtype; cookie->nr_fids++; } } else if (cookie->name.len == nlen && memcmp(cookie->name.name, name, nlen) == 0) { cookie->fids[1].vnode = ino; cookie->fids[1].unique = dtype; cookie->found = 1; if (cookie->one_only) return false; } return cookie->nr_fids < 50; } /* * Deal with the result of a successful lookup operation. Turn all the files * into inodes and save the first one - which is the one we actually want. */ static void afs_do_lookup_success(struct afs_operation *op) { struct afs_vnode_param *vp; struct afs_vnode *vnode; struct inode *inode; u32 abort_code; int i; _enter(""); for (i = 0; i < op->nr_files; i++) { switch (i) { case 0: vp = &op->file[0]; abort_code = vp->scb.status.abort_code; if (abort_code != 0) { op->ac.abort_code = abort_code; op->error = afs_abort_to_error(abort_code); } break; case 1: vp = &op->file[1]; break; default: vp = &op->more_files[i - 2]; break; } if (!vp->scb.have_status && !vp->scb.have_error) continue; _debug("do [%u]", i); if (vp->vnode) { if (!test_bit(AFS_VNODE_UNSET, &vp->vnode->flags)) afs_vnode_commit_status(op, vp); } else if (vp->scb.status.abort_code == 0) { inode = afs_iget(op, vp); if (!IS_ERR(inode)) { vnode = AFS_FS_I(inode); afs_cache_permit(vnode, op->key, 0 /* Assume vnode->cb_break is 0 */ + op->cb_v_break, &vp->scb); vp->vnode = vnode; vp->put_vnode = true; } } else { _debug("- abort %d %llx:%llx.%x", vp->scb.status.abort_code, vp->fid.vid, vp->fid.vnode, vp->fid.unique); } } _leave(""); } static const struct afs_operation_ops afs_inline_bulk_status_operation = { .issue_afs_rpc = afs_fs_inline_bulk_status, .issue_yfs_rpc = yfs_fs_inline_bulk_status, .success = afs_do_lookup_success, }; static const struct afs_operation_ops afs_lookup_fetch_status_operation = { .issue_afs_rpc = afs_fs_fetch_status, .issue_yfs_rpc = yfs_fs_fetch_status, .success = afs_do_lookup_success, .aborted = afs_check_for_remote_deletion, }; /* * See if we know that the server we expect to use doesn't support * FS.InlineBulkStatus. */ static bool afs_server_supports_ibulk(struct afs_vnode *dvnode) { struct afs_server_list *slist; struct afs_volume *volume = dvnode->volume; struct afs_server *server; bool ret = true; int i; if (!test_bit(AFS_VOLUME_MAYBE_NO_IBULK, &volume->flags)) return true; rcu_read_lock(); slist = rcu_dereference(volume->servers); for (i = 0; i < slist->nr_servers; i++) { server = slist->servers[i].server; if (server == dvnode->cb_server) { if (test_bit(AFS_SERVER_FL_NO_IBULK, &server->flags)) ret = false; break; } } rcu_read_unlock(); return ret; } /* * Do a lookup in a directory. We make use of bulk lookup to query a slew of * files in one go and create inodes for them. The inode of the file we were * asked for is returned. */ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry, struct key *key) { struct afs_lookup_cookie *cookie; struct afs_vnode_param *vp; struct afs_operation *op; struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode; struct inode *inode = NULL, *ti; afs_dataversion_t data_version = READ_ONCE(dvnode->status.data_version); long ret; int i; _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry); cookie = kzalloc(sizeof(struct afs_lookup_cookie), GFP_KERNEL); if (!cookie) return ERR_PTR(-ENOMEM); for (i = 0; i < ARRAY_SIZE(cookie->fids); i++) cookie->fids[i].vid = dvnode->fid.vid; cookie->ctx.actor = afs_lookup_filldir; cookie->name = dentry->d_name; cookie->nr_fids = 2; /* slot 0 is saved for the fid we actually want * and slot 1 for the directory */ if (!afs_server_supports_ibulk(dvnode)) cookie->one_only = true; /* search the directory */ ret = afs_dir_iterate(dir, &cookie->ctx, key, &data_version); if (ret < 0) goto out; dentry->d_fsdata = (void *)(unsigned long)data_version; ret = -ENOENT; if (!cookie->found) goto out; /* Check to see if we already have an inode for the primary fid. */ inode = ilookup5(dir->i_sb, cookie->fids[1].vnode, afs_ilookup5_test_by_fid, &cookie->fids[1]); if (inode) goto out; /* We do */ /* Okay, we didn't find it. We need to query the server - and whilst * we're doing that, we're going to attempt to look up a bunch of other * vnodes also. */ op = afs_alloc_operation(NULL, dvnode->volume); if (IS_ERR(op)) { ret = PTR_ERR(op); goto out; } afs_op_set_vnode(op, 0, dvnode); afs_op_set_fid(op, 1, &cookie->fids[1]); op->nr_files = cookie->nr_fids; _debug("nr_files %u", op->nr_files); /* Need space for examining all the selected files */ op->error = -ENOMEM; if (op->nr_files > 2) { op->more_files = kvcalloc(op->nr_files - 2, sizeof(struct afs_vnode_param), GFP_KERNEL); if (!op->more_files) goto out_op; for (i = 2; i < op->nr_files; i++) { vp = &op->more_files[i - 2]; vp->fid = cookie->fids[i]; /* Find any inodes that already exist and get their * callback counters. */ ti = ilookup5_nowait(dir->i_sb, vp->fid.vnode, afs_ilookup5_test_by_fid, &vp->fid); if (!IS_ERR_OR_NULL(ti)) { vnode = AFS_FS_I(ti); vp->dv_before = vnode->status.data_version; vp->cb_break_before = afs_calc_vnode_cb_break(vnode); vp->vnode = vnode; vp->put_vnode = true; vp->speculative = true; /* vnode not locked */ } } } /* Try FS.InlineBulkStatus first. Abort codes for the individual * lookups contained therein are stored in the reply without aborting * the whole operation. */ op->error = -ENOTSUPP; if (!cookie->one_only) { op->ops = &afs_inline_bulk_status_operation; afs_begin_vnode_operation(op); afs_wait_for_operation(op); } if (op->error == -ENOTSUPP) { /* We could try FS.BulkStatus next, but this aborts the entire * op if any of the lookups fails - so, for the moment, revert * to FS.FetchStatus for op->file[1]. */ op->fetch_status.which = 1; op->ops = &afs_lookup_fetch_status_operation; afs_begin_vnode_operation(op); afs_wait_for_operation(op); } inode = ERR_PTR(op->error); out_op: if (op->error == 0) { inode = &op->file[1].vnode->netfs.inode; op->file[1].vnode = NULL; } if (op->file[0].scb.have_status) dentry->d_fsdata = (void *)(unsigned long)op->file[0].scb.status.data_version; else dentry->d_fsdata = (void *)(unsigned long)op->file[0].dv_before; ret = afs_put_operation(op); out: kfree(cookie); _leave(""); return inode ?: ERR_PTR(ret); } /* * Look up an entry in a directory with @sys substitution. */ static struct dentry *afs_lookup_atsys(struct inode *dir, struct dentry *dentry, struct key *key) { struct afs_sysnames *subs; struct afs_net *net = afs_i2net(dir); struct dentry *ret; char *buf, *p, *name; int len, i; _enter(""); ret = ERR_PTR(-ENOMEM); p = buf = kmalloc(AFSNAMEMAX, GFP_KERNEL); if (!buf) goto out_p; if (dentry->d_name.len > 4) { memcpy(p, dentry->d_name.name, dentry->d_name.len - 4); p += dentry->d_name.len - 4; } /* There is an ordered list of substitutes that we have to try. */ read_lock(&net->sysnames_lock); subs = net->sysnames; refcount_inc(&subs->usage); read_unlock(&net->sysnames_lock); for (i = 0; i < subs->nr; i++) { name = subs->subs[i]; len = dentry->d_name.len - 4 + strlen(name); if (len >= AFSNAMEMAX) { ret = ERR_PTR(-ENAMETOOLONG); goto out_s; } strcpy(p, name); ret = lookup_one_len(buf, dentry->d_parent, len); if (IS_ERR(ret) || d_is_positive(ret)) goto out_s; dput(ret); } /* We don't want to d_add() the @sys dentry here as we don't want to * the cached dentry to hide changes to the sysnames list. */ ret = NULL; out_s: afs_put_sysnames(subs); kfree(buf); out_p: key_put(key); return ret; } /* * look up an entry in a directory */ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct afs_vnode *dvnode = AFS_FS_I(dir); struct afs_fid fid = {}; struct inode *inode; struct dentry *d; struct key *key; int ret; _enter("{%llx:%llu},%p{%pd},", dvnode->fid.vid, dvnode->fid.vnode, dentry, dentry); ASSERTCMP(d_inode(dentry), ==, NULL); if (dentry->d_name.len >= AFSNAMEMAX) { _leave(" = -ENAMETOOLONG"); return ERR_PTR(-ENAMETOOLONG); } if (test_bit(AFS_VNODE_DELETED, &dvnode->flags)) { _leave(" = -ESTALE"); return ERR_PTR(-ESTALE); } key = afs_request_key(dvnode->volume->cell); if (IS_ERR(key)) { _leave(" = %ld [key]", PTR_ERR(key)); return ERR_CAST(key); } ret = afs_validate(dvnode, key); if (ret < 0) { key_put(key); _leave(" = %d [val]", ret); return ERR_PTR(ret); } if (dentry->d_name.len >= 4 && dentry->d_name.name[dentry->d_name.len - 4] == '@' && dentry->d_name.name[dentry->d_name.len - 3] == 's' && dentry->d_name.name[dentry->d_name.len - 2] == 'y' && dentry->d_name.name[dentry->d_name.len - 1] == 's') return afs_lookup_atsys(dir, dentry, key); afs_stat_v(dvnode, n_lookup); inode = afs_do_lookup(dir, dentry, key); key_put(key); if (inode == ERR_PTR(-ENOENT)) inode = afs_try_auto_mntpt(dentry, dir); if (!IS_ERR_OR_NULL(inode)) fid = AFS_FS_I(inode)->fid; _debug("splice %p", dentry->d_inode); d = d_splice_alias(inode, dentry); if (!IS_ERR_OR_NULL(d)) { d->d_fsdata = dentry->d_fsdata; trace_afs_lookup(dvnode, &d->d_name, &fid); } else { trace_afs_lookup(dvnode, &dentry->d_name, &fid); } _leave(""); return d; } /* * Check the validity of a dentry under RCU conditions. */ static int afs_d_revalidate_rcu(struct dentry *dentry) { struct afs_vnode *dvnode; struct dentry *parent; struct inode *dir; long dir_version, de_version; _enter("%p", dentry); /* Check the parent directory is still valid first. */ parent = READ_ONCE(dentry->d_parent); dir = d_inode_rcu(parent); if (!dir) return -ECHILD; dvnode = AFS_FS_I(dir); if (test_bit(AFS_VNODE_DELETED, &dvnode->flags)) return -ECHILD; if (!afs_check_validity(dvnode)) return -ECHILD; /* We only need to invalidate a dentry if the server's copy changed * behind our back. If we made the change, it's no problem. Note that * on a 32-bit system, we only have 32 bits in the dentry to store the * version. */ dir_version = (long)READ_ONCE(dvnode->status.data_version); de_version = (long)READ_ONCE(dentry->d_fsdata); if (de_version != dir_version) { dir_version = (long)READ_ONCE(dvnode->invalid_before); if (de_version - dir_version < 0) return -ECHILD; } return 1; /* Still valid */ } /* * check that a dentry lookup hit has found a valid entry * - NOTE! the hit can be a negative hit too, so we can't assume we have an * inode */ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) { struct afs_vnode *vnode, *dir; struct afs_fid fid; struct dentry *parent; struct inode *inode; struct key *key; afs_dataversion_t dir_version, invalid_before; long de_version; int ret; if (flags & LOOKUP_RCU) return afs_d_revalidate_rcu(dentry); if (d_really_is_positive(dentry)) { vnode = AFS_FS_I(d_inode(dentry)); _enter("{v={%llx:%llu} n=%pd fl=%lx},", vnode->fid.vid, vnode->fid.vnode, dentry, vnode->flags); } else { _enter("{neg n=%pd}", dentry); } key = afs_request_key(AFS_FS_S(dentry->d_sb)->volume->cell); if (IS_ERR(key)) key = NULL; /* Hold the parent dentry so we can peer at it */ parent = dget_parent(dentry); dir = AFS_FS_I(d_inode(parent)); /* validate the parent directory */ afs_validate(dir, key); if (test_bit(AFS_VNODE_DELETED, &dir->flags)) { _debug("%pd: parent dir deleted", dentry); goto not_found; } /* We only need to invalidate a dentry if the server's copy changed * behind our back. If we made the change, it's no problem. Note that * on a 32-bit system, we only have 32 bits in the dentry to store the * version. */ dir_version = dir->status.data_version; de_version = (long)dentry->d_fsdata; if (de_version == (long)dir_version) goto out_valid_noupdate; invalid_before = dir->invalid_before; if (de_version - (long)invalid_before >= 0) goto out_valid; _debug("dir modified"); afs_stat_v(dir, n_reval); /* search the directory for this vnode */ ret = afs_do_lookup_one(&dir->netfs.inode, dentry, &fid, key, &dir_version); switch (ret) { case 0: /* the filename maps to something */ if (d_really_is_negative(dentry)) goto not_found; inode = d_inode(dentry); if (is_bad_inode(inode)) { printk("kAFS: afs_d_revalidate: %pd2 has bad inode\n", dentry); goto not_found; } vnode = AFS_FS_I(inode); /* if the vnode ID has changed, then the dirent points to a * different file */ if (fid.vnode != vnode->fid.vnode) { _debug("%pd: dirent changed [%llu != %llu]", dentry, fid.vnode, vnode->fid.vnode); goto not_found; } /* if the vnode ID uniqifier has changed, then the file has * been deleted and replaced, and the original vnode ID has * been reused */ if (fid.unique != vnode->fid.unique) { _debug("%pd: file deleted (uq %u -> %u I:%u)", dentry, fid.unique, vnode->fid.unique, vnode->netfs.inode.i_generation); goto not_found; } goto out_valid; case -ENOENT: /* the filename is unknown */ _debug("%pd: dirent not found", dentry); if (d_really_is_positive(dentry)) goto not_found; goto out_valid; default: _debug("failed to iterate dir %pd: %d", parent, ret); goto not_found; } out_valid: dentry->d_fsdata = (void *)(unsigned long)dir_version; out_valid_noupdate: dput(parent); key_put(key); _leave(" = 1 [valid]"); return 1; not_found: _debug("dropping dentry %pd2", dentry); dput(parent); key_put(key); _leave(" = 0 [bad]"); return 0; } /* * allow the VFS to enquire as to whether a dentry should be unhashed (mustn't * sleep) * - called from dput() when d_count is going to 0. * - return 1 to request dentry be unhashed, 0 otherwise */ static int afs_d_delete(const struct dentry *dentry) { _enter("%pd", dentry); if (dentry->d_flags & DCACHE_NFSFS_RENAMED) goto zap; if (d_really_is_positive(dentry) && (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(d_inode(dentry))->flags) || test_bit(AFS_VNODE_PSEUDODIR, &AFS_FS_I(d_inode(dentry))->flags))) goto zap; _leave(" = 0 [keep]"); return 0; zap: _leave(" = 1 [zap]"); return 1; } /* * Clean up sillyrename files on dentry removal. */ static void afs_d_iput(struct dentry *dentry, struct inode *inode) { if (dentry->d_flags & DCACHE_NFSFS_RENAMED) afs_silly_iput(dentry, inode); iput(inode); } /* * handle dentry release */ void afs_d_release(struct dentry *dentry) { _enter("%pd", dentry); } void afs_check_for_remote_deletion(struct afs_operation *op) { struct afs_vnode *vnode = op->file[0].vnode; switch (op->ac.abort_code) { case VNOVNODE: set_bit(AFS_VNODE_DELETED, &vnode->flags); afs_break_callback(vnode, afs_cb_break_for_deleted); } } /* * Create a new inode for create/mkdir/symlink */ static void afs_vnode_new_inode(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[1]; struct afs_vnode *vnode; struct inode *inode; _enter(""); ASSERTCMP(op->error, ==, 0); inode = afs_iget(op, vp); if (IS_ERR(inode)) { /* ENOMEM or EINTR at a really inconvenient time - just abandon * the new directory on the server. */ op->error = PTR_ERR(inode); return; } vnode = AFS_FS_I(inode); set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); if (!op->error) afs_cache_permit(vnode, op->key, vnode->cb_break, &vp->scb); d_instantiate(op->dentry, inode); } static void afs_create_success(struct afs_operation *op) { _enter("op=%08x", op->debug_id); op->ctime = op->file[0].scb.status.mtime_client; afs_vnode_commit_status(op, &op->file[0]); afs_update_dentry_version(op, &op->file[0], op->dentry); afs_vnode_new_inode(op); } static void afs_create_edit_dir(struct afs_operation *op) { struct afs_vnode_param *dvp = &op->file[0]; struct afs_vnode_param *vp = &op->file[1]; struct afs_vnode *dvnode = dvp->vnode; _enter("op=%08x", op->debug_id); down_write(&dvnode->validate_lock); if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && dvnode->status.data_version == dvp->dv_before + dvp->dv_delta) afs_edit_dir_add(dvnode, &op->dentry->d_name, &vp->fid, op->create.reason); up_write(&dvnode->validate_lock); } static void afs_create_put(struct afs_operation *op) { _enter("op=%08x", op->debug_id); if (op->error) d_drop(op->dentry); } static const struct afs_operation_ops afs_mkdir_operation = { .issue_afs_rpc = afs_fs_make_dir, .issue_yfs_rpc = yfs_fs_make_dir, .success = afs_create_success, .aborted = afs_check_for_remote_deletion, .edit_dir = afs_create_edit_dir, .put = afs_create_put, }; /* * create a directory on an AFS filesystem */ static int afs_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) { struct afs_operation *op; struct afs_vnode *dvnode = AFS_FS_I(dir); _enter("{%llx:%llu},{%pd},%ho", dvnode->fid.vid, dvnode->fid.vnode, dentry, mode); op = afs_alloc_operation(NULL, dvnode->volume); if (IS_ERR(op)) { d_drop(dentry); return PTR_ERR(op); } afs_op_set_vnode(op, 0, dvnode); op->file[0].dv_delta = 1; op->file[0].modification = true; op->file[0].update_ctime = true; op->dentry = dentry; op->create.mode = S_IFDIR | mode; op->create.reason = afs_edit_dir_for_mkdir; op->mtime = current_time(dir); op->ops = &afs_mkdir_operation; return afs_do_sync_operation(op); } /* * Remove a subdir from a directory. */ static void afs_dir_remove_subdir(struct dentry *dentry) { if (d_really_is_positive(dentry)) { struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry)); clear_nlink(&vnode->netfs.inode); set_bit(AFS_VNODE_DELETED, &vnode->flags); clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); } } static void afs_rmdir_success(struct afs_operation *op) { _enter("op=%08x", op->debug_id); op->ctime = op->file[0].scb.status.mtime_client; afs_vnode_commit_status(op, &op->file[0]); afs_update_dentry_version(op, &op->file[0], op->dentry); } static void afs_rmdir_edit_dir(struct afs_operation *op) { struct afs_vnode_param *dvp = &op->file[0]; struct afs_vnode *dvnode = dvp->vnode; _enter("op=%08x", op->debug_id); afs_dir_remove_subdir(op->dentry); down_write(&dvnode->validate_lock); if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && dvnode->status.data_version == dvp->dv_before + dvp->dv_delta) afs_edit_dir_remove(dvnode, &op->dentry->d_name, afs_edit_dir_for_rmdir); up_write(&dvnode->validate_lock); } static void afs_rmdir_put(struct afs_operation *op) { _enter("op=%08x", op->debug_id); if (op->file[1].vnode) up_write(&op->file[1].vnode->rmdir_lock); } static const struct afs_operation_ops afs_rmdir_operation = { .issue_afs_rpc = afs_fs_remove_dir, .issue_yfs_rpc = yfs_fs_remove_dir, .success = afs_rmdir_success, .aborted = afs_check_for_remote_deletion, .edit_dir = afs_rmdir_edit_dir, .put = afs_rmdir_put, }; /* * remove a directory from an AFS filesystem */ static int afs_rmdir(struct inode *dir, struct dentry *dentry) { struct afs_operation *op; struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode = NULL; int ret; _enter("{%llx:%llu},{%pd}", dvnode->fid.vid, dvnode->fid.vnode, dentry); op = afs_alloc_operation(NULL, dvnode->volume); if (IS_ERR(op)) return PTR_ERR(op); afs_op_set_vnode(op, 0, dvnode); op->file[0].dv_delta = 1; op->file[0].modification = true; op->file[0].update_ctime = true; op->dentry = dentry; op->ops = &afs_rmdir_operation; /* Try to make sure we have a callback promise on the victim. */ if (d_really_is_positive(dentry)) { vnode = AFS_FS_I(d_inode(dentry)); ret = afs_validate(vnode, op->key); if (ret < 0) goto error; } if (vnode) { ret = down_write_killable(&vnode->rmdir_lock); if (ret < 0) goto error; op->file[1].vnode = vnode; } return afs_do_sync_operation(op); error: return afs_put_operation(op); } /* * Remove a link to a file or symlink from a directory. * * If the file was not deleted due to excess hard links, the fileserver will * break the callback promise on the file - if it had one - before it returns * to us, and if it was deleted, it won't * * However, if we didn't have a callback promise outstanding, or it was * outstanding on a different server, then it won't break it either... */ static void afs_dir_remove_link(struct afs_operation *op) { struct afs_vnode *dvnode = op->file[0].vnode; struct afs_vnode *vnode = op->file[1].vnode; struct dentry *dentry = op->dentry; int ret; if (op->error != 0 || (op->file[1].scb.have_status && op->file[1].scb.have_error)) return; if (d_really_is_positive(dentry)) return; if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { /* Already done */ } else if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) { write_seqlock(&vnode->cb_lock); drop_nlink(&vnode->netfs.inode); if (vnode->netfs.inode.i_nlink == 0) { set_bit(AFS_VNODE_DELETED, &vnode->flags); __afs_break_callback(vnode, afs_cb_break_for_unlink); } write_sequnlock(&vnode->cb_lock); } else { afs_break_callback(vnode, afs_cb_break_for_unlink); if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) _debug("AFS_VNODE_DELETED"); ret = afs_validate(vnode, op->key); if (ret != -ESTALE) op->error = ret; } _debug("nlink %d [val %d]", vnode->netfs.inode.i_nlink, op->error); } static void afs_unlink_success(struct afs_operation *op) { _enter("op=%08x", op->debug_id); op->ctime = op->file[0].scb.status.mtime_client; afs_check_dir_conflict(op, &op->file[0]); afs_vnode_commit_status(op, &op->file[0]); afs_vnode_commit_status(op, &op->file[1]); afs_update_dentry_version(op, &op->file[0], op->dentry); afs_dir_remove_link(op); } static void afs_unlink_edit_dir(struct afs_operation *op) { struct afs_vnode_param *dvp = &op->file[0]; struct afs_vnode *dvnode = dvp->vnode; _enter("op=%08x", op->debug_id); down_write(&dvnode->validate_lock); if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && dvnode->status.data_version == dvp->dv_before + dvp->dv_delta) afs_edit_dir_remove(dvnode, &op->dentry->d_name, afs_edit_dir_for_unlink); up_write(&dvnode->validate_lock); } static void afs_unlink_put(struct afs_operation *op) { _enter("op=%08x", op->debug_id); if (op->unlink.need_rehash && op->error < 0 && op->error != -ENOENT) d_rehash(op->dentry); } static const struct afs_operation_ops afs_unlink_operation = { .issue_afs_rpc = afs_fs_remove_file, .issue_yfs_rpc = yfs_fs_remove_file, .success = afs_unlink_success, .aborted = afs_check_for_remote_deletion, .edit_dir = afs_unlink_edit_dir, .put = afs_unlink_put, }; /* * Remove a file or symlink from an AFS filesystem. */ static int afs_unlink(struct inode *dir, struct dentry *dentry) { struct afs_operation *op; struct afs_vnode *dvnode = AFS_FS_I(dir); struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry)); int ret; _enter("{%llx:%llu},{%pd}", dvnode->fid.vid, dvnode->fid.vnode, dentry); if (dentry->d_name.len >= AFSNAMEMAX) return -ENAMETOOLONG; op = afs_alloc_operation(NULL, dvnode->volume); if (IS_ERR(op)) return PTR_ERR(op); afs_op_set_vnode(op, 0, dvnode); op->file[0].dv_delta = 1; op->file[0].modification = true; op->file[0].update_ctime = true; /* Try to make sure we have a callback promise on the victim. */ ret = afs_validate(vnode, op->key); if (ret < 0) { op->error = ret; goto error; } spin_lock(&dentry->d_lock); if (d_count(dentry) > 1) { spin_unlock(&dentry->d_lock); /* Start asynchronous writeout of the inode */ write_inode_now(d_inode(dentry), 0); op->error = afs_sillyrename(dvnode, vnode, dentry, op->key); goto error; } if (!d_unhashed(dentry)) { /* Prevent a race with RCU lookup. */ __d_drop(dentry); op->unlink.need_rehash = true; } spin_unlock(&dentry->d_lock); op->file[1].vnode = vnode; op->file[1].update_ctime = true; op->file[1].op_unlinked = true; op->dentry = dentry; op->ops = &afs_unlink_operation; afs_begin_vnode_operation(op); afs_wait_for_operation(op); /* If there was a conflict with a third party, check the status of the * unlinked vnode. */ if (op->error == 0 && (op->flags & AFS_OPERATION_DIR_CONFLICT)) { op->file[1].update_ctime = false; op->fetch_status.which = 1; op->ops = &afs_fetch_status_operation; afs_begin_vnode_operation(op); afs_wait_for_operation(op); } return afs_put_operation(op); error: return afs_put_operation(op); } static const struct afs_operation_ops afs_create_operation = { .issue_afs_rpc = afs_fs_create_file, .issue_yfs_rpc = yfs_fs_create_file, .success = afs_create_success, .aborted = afs_check_for_remote_deletion, .edit_dir = afs_create_edit_dir, .put = afs_create_put, }; /* * create a regular file on an AFS filesystem */ static int afs_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { struct afs_operation *op; struct afs_vnode *dvnode = AFS_FS_I(dir); int ret = -ENAMETOOLONG; _enter("{%llx:%llu},{%pd},%ho", dvnode->fid.vid, dvnode->fid.vnode, dentry, mode); if (dentry->d_name.len >= AFSNAMEMAX) goto error; op = afs_alloc_operation(NULL, dvnode->volume); if (IS_ERR(op)) { ret = PTR_ERR(op); goto error; } afs_op_set_vnode(op, 0, dvnode); op->file[0].dv_delta = 1; op->file[0].modification = true; op->file[0].update_ctime = true; op->dentry = dentry; op->create.mode = S_IFREG | mode; op->create.reason = afs_edit_dir_for_create; op->mtime = current_time(dir); op->ops = &afs_create_operation; return afs_do_sync_operation(op); error: d_drop(dentry); _leave(" = %d", ret); return ret; } static void afs_link_success(struct afs_operation *op) { struct afs_vnode_param *dvp = &op->file[0]; struct afs_vnode_param *vp = &op->file[1]; _enter("op=%08x", op->debug_id); op->ctime = dvp->scb.status.mtime_client; afs_vnode_commit_status(op, dvp); afs_vnode_commit_status(op, vp); afs_update_dentry_version(op, dvp, op->dentry); if (op->dentry_2->d_parent == op->dentry->d_parent) afs_update_dentry_version(op, dvp, op->dentry_2); ihold(&vp->vnode->netfs.inode); d_instantiate(op->dentry, &vp->vnode->netfs.inode); } static void afs_link_put(struct afs_operation *op) { _enter("op=%08x", op->debug_id); if (op->error) d_drop(op->dentry); } static const struct afs_operation_ops afs_link_operation = { .issue_afs_rpc = afs_fs_link, .issue_yfs_rpc = yfs_fs_link, .success = afs_link_success, .aborted = afs_check_for_remote_deletion, .edit_dir = afs_create_edit_dir, .put = afs_link_put, }; /* * create a hard link between files in an AFS filesystem */ static int afs_link(struct dentry *from, struct inode *dir, struct dentry *dentry) { struct afs_operation *op; struct afs_vnode *dvnode = AFS_FS_I(dir); struct afs_vnode *vnode = AFS_FS_I(d_inode(from)); int ret = -ENAMETOOLONG; _enter("{%llx:%llu},{%llx:%llu},{%pd}", vnode->fid.vid, vnode->fid.vnode, dvnode->fid.vid, dvnode->fid.vnode, dentry); if (dentry->d_name.len >= AFSNAMEMAX) goto error; op = afs_alloc_operation(NULL, dvnode->volume); if (IS_ERR(op)) { ret = PTR_ERR(op); goto error; } ret = afs_validate(vnode, op->key); if (ret < 0) goto error_op; afs_op_set_vnode(op, 0, dvnode); afs_op_set_vnode(op, 1, vnode); op->file[0].dv_delta = 1; op->file[0].modification = true; op->file[0].update_ctime = true; op->file[1].update_ctime = true; op->dentry = dentry; op->dentry_2 = from; op->ops = &afs_link_operation; op->create.reason = afs_edit_dir_for_link; return afs_do_sync_operation(op); error_op: afs_put_operation(op); error: d_drop(dentry); _leave(" = %d", ret); return ret; } static const struct afs_operation_ops afs_symlink_operation = { .issue_afs_rpc = afs_fs_symlink, .issue_yfs_rpc = yfs_fs_symlink, .success = afs_create_success, .aborted = afs_check_for_remote_deletion, .edit_dir = afs_create_edit_dir, .put = afs_create_put, }; /* * create a symlink in an AFS filesystem */ static int afs_symlink(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, const char *content) { struct afs_operation *op; struct afs_vnode *dvnode = AFS_FS_I(dir); int ret; _enter("{%llx:%llu},{%pd},%s", dvnode->fid.vid, dvnode->fid.vnode, dentry, content); ret = -ENAMETOOLONG; if (dentry->d_name.len >= AFSNAMEMAX) goto error; ret = -EINVAL; if (strlen(content) >= AFSPATHMAX) goto error; op = afs_alloc_operation(NULL, dvnode->volume); if (IS_ERR(op)) { ret = PTR_ERR(op); goto error; } afs_op_set_vnode(op, 0, dvnode); op->file[0].dv_delta = 1; op->dentry = dentry; op->ops = &afs_symlink_operation; op->create.reason = afs_edit_dir_for_symlink; op->create.symlink = content; op->mtime = current_time(dir); return afs_do_sync_operation(op); error: d_drop(dentry); _leave(" = %d", ret); return ret; } static void afs_rename_success(struct afs_operation *op) { _enter("op=%08x", op->debug_id); op->ctime = op->file[0].scb.status.mtime_client; afs_check_dir_conflict(op, &op->file[1]); afs_vnode_commit_status(op, &op->file[0]); if (op->file[1].vnode != op->file[0].vnode) { op->ctime = op->file[1].scb.status.mtime_client; afs_vnode_commit_status(op, &op->file[1]); } } static void afs_rename_edit_dir(struct afs_operation *op) { struct afs_vnode_param *orig_dvp = &op->file[0]; struct afs_vnode_param *new_dvp = &op->file[1]; struct afs_vnode *orig_dvnode = orig_dvp->vnode; struct afs_vnode *new_dvnode = new_dvp->vnode; struct afs_vnode *vnode = AFS_FS_I(d_inode(op->dentry)); struct dentry *old_dentry = op->dentry; struct dentry *new_dentry = op->dentry_2; struct inode *new_inode; _enter("op=%08x", op->debug_id); if (op->rename.rehash) { d_rehash(op->rename.rehash); op->rename.rehash = NULL; } down_write(&orig_dvnode->validate_lock); if (test_bit(AFS_VNODE_DIR_VALID, &orig_dvnode->flags) && orig_dvnode->status.data_version == orig_dvp->dv_before + orig_dvp->dv_delta) afs_edit_dir_remove(orig_dvnode, &old_dentry->d_name, afs_edit_dir_for_rename_0); if (new_dvnode != orig_dvnode) { up_write(&orig_dvnode->validate_lock); down_write(&new_dvnode->validate_lock); } if (test_bit(AFS_VNODE_DIR_VALID, &new_dvnode->flags) && new_dvnode->status.data_version == new_dvp->dv_before + new_dvp->dv_delta) { if (!op->rename.new_negative) afs_edit_dir_remove(new_dvnode, &new_dentry->d_name, afs_edit_dir_for_rename_1); afs_edit_dir_add(new_dvnode, &new_dentry->d_name, &vnode->fid, afs_edit_dir_for_rename_2); } new_inode = d_inode(new_dentry); if (new_inode) { spin_lock(&new_inode->i_lock); if (S_ISDIR(new_inode->i_mode)) clear_nlink(new_inode); else if (new_inode->i_nlink > 0) drop_nlink(new_inode); spin_unlock(&new_inode->i_lock); } /* Now we can update d_fsdata on the dentries to reflect their * new parent's data_version. * * Note that if we ever implement RENAME_EXCHANGE, we'll have * to update both dentries with opposing dir versions. */ afs_update_dentry_version(op, new_dvp, op->dentry); afs_update_dentry_version(op, new_dvp, op->dentry_2); d_move(old_dentry, new_dentry); up_write(&new_dvnode->validate_lock); } static void afs_rename_put(struct afs_operation *op) { _enter("op=%08x", op->debug_id); if (op->rename.rehash) d_rehash(op->rename.rehash); dput(op->rename.tmp); if (op->error) d_rehash(op->dentry); } static const struct afs_operation_ops afs_rename_operation = { .issue_afs_rpc = afs_fs_rename, .issue_yfs_rpc = yfs_fs_rename, .success = afs_rename_success, .edit_dir = afs_rename_edit_dir, .put = afs_rename_put, }; /* * rename a file in an AFS filesystem and/or move it between directories */ static int afs_rename(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { struct afs_operation *op; struct afs_vnode *orig_dvnode, *new_dvnode, *vnode; int ret; if (flags) return -EINVAL; /* Don't allow silly-rename files be moved around. */ if (old_dentry->d_flags & DCACHE_NFSFS_RENAMED) return -EINVAL; vnode = AFS_FS_I(d_inode(old_dentry)); orig_dvnode = AFS_FS_I(old_dir); new_dvnode = AFS_FS_I(new_dir); _enter("{%llx:%llu},{%llx:%llu},{%llx:%llu},{%pd}", orig_dvnode->fid.vid, orig_dvnode->fid.vnode, vnode->fid.vid, vnode->fid.vnode, new_dvnode->fid.vid, new_dvnode->fid.vnode, new_dentry); op = afs_alloc_operation(NULL, orig_dvnode->volume); if (IS_ERR(op)) return PTR_ERR(op); ret = afs_validate(vnode, op->key); op->error = ret; if (ret < 0) goto error; afs_op_set_vnode(op, 0, orig_dvnode); afs_op_set_vnode(op, 1, new_dvnode); /* May be same as orig_dvnode */ op->file[0].dv_delta = 1; op->file[1].dv_delta = 1; op->file[0].modification = true; op->file[1].modification = true; op->file[0].update_ctime = true; op->file[1].update_ctime = true; op->dentry = old_dentry; op->dentry_2 = new_dentry; op->rename.new_negative = d_is_negative(new_dentry); op->ops = &afs_rename_operation; /* For non-directories, check whether the target is busy and if so, * make a copy of the dentry and then do a silly-rename. If the * silly-rename succeeds, the copied dentry is hashed and becomes the * new target. */ if (d_is_positive(new_dentry) && !d_is_dir(new_dentry)) { /* To prevent any new references to the target during the * rename, we unhash the dentry in advance. */ if (!d_unhashed(new_dentry)) { d_drop(new_dentry); op->rename.rehash = new_dentry; } if (d_count(new_dentry) > 2) { /* copy the target dentry's name */ op->rename.tmp = d_alloc(new_dentry->d_parent, &new_dentry->d_name); if (!op->rename.tmp) { op->error = -ENOMEM; goto error; } ret = afs_sillyrename(new_dvnode, AFS_FS_I(d_inode(new_dentry)), new_dentry, op->key); if (ret) { op->error = ret; goto error; } op->dentry_2 = op->rename.tmp; op->rename.rehash = NULL; op->rename.new_negative = true; } } /* This bit is potentially nasty as there's a potential race with * afs_d_revalidate{,_rcu}(). We have to change d_fsdata on the dentry * to reflect it's new parent's new data_version after the op, but * d_revalidate may see old_dentry between the op having taken place * and the version being updated. * * So drop the old_dentry for now to make other threads go through * lookup instead - which we hold a lock against. */ d_drop(old_dentry); return afs_do_sync_operation(op); error: return afs_put_operation(op); } /* * Release a directory folio and clean up its private state if it's not busy * - return true if the folio can now be released, false if not */ static bool afs_dir_release_folio(struct folio *folio, gfp_t gfp_flags) { struct afs_vnode *dvnode = AFS_FS_I(folio_inode(folio)); _enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, folio_index(folio)); folio_detach_private(folio); /* The directory will need reloading. */ if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) afs_stat_v(dvnode, n_relpg); return true; } /* * Invalidate part or all of a folio. */ static void afs_dir_invalidate_folio(struct folio *folio, size_t offset, size_t length) { struct afs_vnode *dvnode = AFS_FS_I(folio_inode(folio)); _enter("{%lu},%zu,%zu", folio->index, offset, length); BUG_ON(!folio_test_locked(folio)); /* The directory will need reloading. */ if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) afs_stat_v(dvnode, n_inval); /* we clean up only if the entire folio is being invalidated */ if (offset == 0 && length == folio_size(folio)) folio_detach_private(folio); }
linux-master
fs/afs/dir.c
// SPDX-License-Identifier: GPL-2.0-or-later /* AFS server record management * * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/sched.h> #include <linux/slab.h> #include "afs_fs.h" #include "internal.h" #include "protocol_yfs.h" static unsigned afs_server_gc_delay = 10; /* Server record timeout in seconds */ static atomic_t afs_server_debug_id; static struct afs_server *afs_maybe_use_server(struct afs_server *, enum afs_server_trace); static void __afs_put_server(struct afs_net *, struct afs_server *); /* * Find a server by one of its addresses. */ struct afs_server *afs_find_server(struct afs_net *net, const struct sockaddr_rxrpc *srx) { const struct afs_addr_list *alist; struct afs_server *server = NULL; unsigned int i; int seq = 0, diff; rcu_read_lock(); do { if (server) afs_unuse_server_notime(net, server, afs_server_trace_put_find_rsq); server = NULL; read_seqbegin_or_lock(&net->fs_addr_lock, &seq); if (srx->transport.family == AF_INET6) { const struct sockaddr_in6 *a = &srx->transport.sin6, *b; hlist_for_each_entry_rcu(server, &net->fs_addresses6, addr6_link) { alist = rcu_dereference(server->addresses); for (i = alist->nr_ipv4; i < alist->nr_addrs; i++) { b = &alist->addrs[i].transport.sin6; diff = ((u16 __force)a->sin6_port - (u16 __force)b->sin6_port); if (diff == 0) diff = memcmp(&a->sin6_addr, &b->sin6_addr, sizeof(struct in6_addr)); if (diff == 0) goto found; } } } else { const struct sockaddr_in *a = &srx->transport.sin, *b; hlist_for_each_entry_rcu(server, &net->fs_addresses4, addr4_link) { alist = rcu_dereference(server->addresses); for (i = 0; i < alist->nr_ipv4; i++) { b = &alist->addrs[i].transport.sin; diff = ((u16 __force)a->sin_port - (u16 __force)b->sin_port); if (diff == 0) diff = ((u32 __force)a->sin_addr.s_addr - (u32 __force)b->sin_addr.s_addr); if (diff == 0) goto found; } } } server = NULL; continue; found: server = afs_maybe_use_server(server, afs_server_trace_get_by_addr); } while (need_seqretry(&net->fs_addr_lock, seq)); done_seqretry(&net->fs_addr_lock, seq); rcu_read_unlock(); return server; } /* * Look up a server by its UUID and mark it active. */ struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uuid) { struct afs_server *server = NULL; struct rb_node *p; int diff, seq = 0; _enter("%pU", uuid); do { /* Unfortunately, rbtree walking doesn't give reliable results * under just the RCU read lock, so we have to check for * changes. */ if (server) afs_unuse_server(net, server, afs_server_trace_put_uuid_rsq); server = NULL; read_seqbegin_or_lock(&net->fs_lock, &seq); p = net->fs_servers.rb_node; while (p) { server = rb_entry(p, struct afs_server, uuid_rb); diff = memcmp(uuid, &server->uuid, sizeof(*uuid)); if (diff < 0) { p = p->rb_left; } else if (diff > 0) { p = p->rb_right; } else { afs_use_server(server, afs_server_trace_get_by_uuid); break; } server = NULL; } } while (need_seqretry(&net->fs_lock, seq)); done_seqretry(&net->fs_lock, seq); _leave(" = %p", server); return server; } /* * Install a server record in the namespace tree. If there's a clash, we stick * it into a list anchored on whichever afs_server struct is actually in the * tree. */ static struct afs_server *afs_install_server(struct afs_cell *cell, struct afs_server *candidate) { const struct afs_addr_list *alist; struct afs_server *server, *next; struct afs_net *net = cell->net; struct rb_node **pp, *p; int diff; _enter("%p", candidate); write_seqlock(&net->fs_lock); /* Firstly install the server in the UUID lookup tree */ pp = &net->fs_servers.rb_node; p = NULL; while (*pp) { p = *pp; _debug("- consider %p", p); server = rb_entry(p, struct afs_server, uuid_rb); diff = memcmp(&candidate->uuid, &server->uuid, sizeof(uuid_t)); if (diff < 0) { pp = &(*pp)->rb_left; } else if (diff > 0) { pp = &(*pp)->rb_right; } else { if (server->cell == cell) goto exists; /* We have the same UUID representing servers in * different cells. Append the new server to the list. */ for (;;) { next = rcu_dereference_protected( server->uuid_next, lockdep_is_held(&net->fs_lock.lock)); if (!next) break; server = next; } rcu_assign_pointer(server->uuid_next, candidate); candidate->uuid_prev = server; server = candidate; goto added_dup; } } server = candidate; rb_link_node(&server->uuid_rb, p, pp); rb_insert_color(&server->uuid_rb, &net->fs_servers); hlist_add_head_rcu(&server->proc_link, &net->fs_proc); added_dup: write_seqlock(&net->fs_addr_lock); alist = rcu_dereference_protected(server->addresses, lockdep_is_held(&net->fs_addr_lock.lock)); /* Secondly, if the server has any IPv4 and/or IPv6 addresses, install * it in the IPv4 and/or IPv6 reverse-map lists. * * TODO: For speed we want to use something other than a flat list * here; even sorting the list in terms of lowest address would help a * bit, but anything we might want to do gets messy and memory * intensive. */ if (alist->nr_ipv4 > 0) hlist_add_head_rcu(&server->addr4_link, &net->fs_addresses4); if (alist->nr_addrs > alist->nr_ipv4) hlist_add_head_rcu(&server->addr6_link, &net->fs_addresses6); write_sequnlock(&net->fs_addr_lock); exists: afs_get_server(server, afs_server_trace_get_install); write_sequnlock(&net->fs_lock); return server; } /* * Allocate a new server record and mark it active. */ static struct afs_server *afs_alloc_server(struct afs_cell *cell, const uuid_t *uuid, struct afs_addr_list *alist) { struct afs_server *server; struct afs_net *net = cell->net; _enter(""); server = kzalloc(sizeof(struct afs_server), GFP_KERNEL); if (!server) goto enomem; refcount_set(&server->ref, 1); atomic_set(&server->active, 1); server->debug_id = atomic_inc_return(&afs_server_debug_id); RCU_INIT_POINTER(server->addresses, alist); server->addr_version = alist->version; server->uuid = *uuid; rwlock_init(&server->fs_lock); INIT_WORK(&server->initcb_work, afs_server_init_callback_work); init_waitqueue_head(&server->probe_wq); INIT_LIST_HEAD(&server->probe_link); spin_lock_init(&server->probe_lock); server->cell = cell; server->rtt = UINT_MAX; afs_inc_servers_outstanding(net); trace_afs_server(server->debug_id, 1, 1, afs_server_trace_alloc); _leave(" = %p", server); return server; enomem: _leave(" = NULL [nomem]"); return NULL; } /* * Look up an address record for a server */ static struct afs_addr_list *afs_vl_lookup_addrs(struct afs_cell *cell, struct key *key, const uuid_t *uuid) { struct afs_vl_cursor vc; struct afs_addr_list *alist = NULL; int ret; ret = -ERESTARTSYS; if (afs_begin_vlserver_operation(&vc, cell, key)) { while (afs_select_vlserver(&vc)) { if (test_bit(AFS_VLSERVER_FL_IS_YFS, &vc.server->flags)) alist = afs_yfsvl_get_endpoints(&vc, uuid); else alist = afs_vl_get_addrs_u(&vc, uuid); } ret = afs_end_vlserver_operation(&vc); } return ret < 0 ? ERR_PTR(ret) : alist; } /* * Get or create a fileserver record. */ struct afs_server *afs_lookup_server(struct afs_cell *cell, struct key *key, const uuid_t *uuid, u32 addr_version) { struct afs_addr_list *alist; struct afs_server *server, *candidate; _enter("%p,%pU", cell->net, uuid); server = afs_find_server_by_uuid(cell->net, uuid); if (server) { if (server->addr_version != addr_version) set_bit(AFS_SERVER_FL_NEEDS_UPDATE, &server->flags); return server; } alist = afs_vl_lookup_addrs(cell, key, uuid); if (IS_ERR(alist)) return ERR_CAST(alist); candidate = afs_alloc_server(cell, uuid, alist); if (!candidate) { afs_put_addrlist(alist); return ERR_PTR(-ENOMEM); } server = afs_install_server(cell, candidate); if (server != candidate) { afs_put_addrlist(alist); kfree(candidate); } else { /* Immediately dispatch an asynchronous probe to each interface * on the fileserver. This will make sure the repeat-probing * service is started. */ afs_fs_probe_fileserver(cell->net, server, key, true); } return server; } /* * Set the server timer to fire after a given delay, assuming it's not already * set for an earlier time. */ static void afs_set_server_timer(struct afs_net *net, time64_t delay) { if (net->live) { afs_inc_servers_outstanding(net); if (timer_reduce(&net->fs_timer, jiffies + delay * HZ)) afs_dec_servers_outstanding(net); } } /* * Server management timer. We have an increment on fs_outstanding that we * need to pass along to the work item. */ void afs_servers_timer(struct timer_list *timer) { struct afs_net *net = container_of(timer, struct afs_net, fs_timer); _enter(""); if (!queue_work(afs_wq, &net->fs_manager)) afs_dec_servers_outstanding(net); } /* * Get a reference on a server object. */ struct afs_server *afs_get_server(struct afs_server *server, enum afs_server_trace reason) { unsigned int a; int r; __refcount_inc(&server->ref, &r); a = atomic_read(&server->active); trace_afs_server(server->debug_id, r + 1, a, reason); return server; } /* * Try to get a reference on a server object. */ static struct afs_server *afs_maybe_use_server(struct afs_server *server, enum afs_server_trace reason) { unsigned int a; int r; if (!__refcount_inc_not_zero(&server->ref, &r)) return NULL; a = atomic_inc_return(&server->active); trace_afs_server(server->debug_id, r + 1, a, reason); return server; } /* * Get an active count on a server object. */ struct afs_server *afs_use_server(struct afs_server *server, enum afs_server_trace reason) { unsigned int a; int r; __refcount_inc(&server->ref, &r); a = atomic_inc_return(&server->active); trace_afs_server(server->debug_id, r + 1, a, reason); return server; } /* * Release a reference on a server record. */ void afs_put_server(struct afs_net *net, struct afs_server *server, enum afs_server_trace reason) { unsigned int a, debug_id = server->debug_id; bool zero; int r; if (!server) return; a = atomic_read(&server->active); zero = __refcount_dec_and_test(&server->ref, &r); trace_afs_server(debug_id, r - 1, a, reason); if (unlikely(zero)) __afs_put_server(net, server); } /* * Drop an active count on a server object without updating the last-unused * time. */ void afs_unuse_server_notime(struct afs_net *net, struct afs_server *server, enum afs_server_trace reason) { if (server) { unsigned int active = atomic_dec_return(&server->active); if (active == 0) afs_set_server_timer(net, afs_server_gc_delay); afs_put_server(net, server, reason); } } /* * Drop an active count on a server object. */ void afs_unuse_server(struct afs_net *net, struct afs_server *server, enum afs_server_trace reason) { if (server) { server->unuse_time = ktime_get_real_seconds(); afs_unuse_server_notime(net, server, reason); } } static void afs_server_rcu(struct rcu_head *rcu) { struct afs_server *server = container_of(rcu, struct afs_server, rcu); trace_afs_server(server->debug_id, refcount_read(&server->ref), atomic_read(&server->active), afs_server_trace_free); afs_put_addrlist(rcu_access_pointer(server->addresses)); kfree(server); } static void __afs_put_server(struct afs_net *net, struct afs_server *server) { call_rcu(&server->rcu, afs_server_rcu); afs_dec_servers_outstanding(net); } static void afs_give_up_callbacks(struct afs_net *net, struct afs_server *server) { struct afs_addr_list *alist = rcu_access_pointer(server->addresses); struct afs_addr_cursor ac = { .alist = alist, .index = alist->preferred, .error = 0, }; afs_fs_give_up_all_callbacks(net, server, &ac, NULL); } /* * destroy a dead server */ static void afs_destroy_server(struct afs_net *net, struct afs_server *server) { if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags)) afs_give_up_callbacks(net, server); flush_work(&server->initcb_work); afs_put_server(net, server, afs_server_trace_destroy); } /* * Garbage collect any expired servers. */ static void afs_gc_servers(struct afs_net *net, struct afs_server *gc_list) { struct afs_server *server, *next, *prev; int active; while ((server = gc_list)) { gc_list = server->gc_next; write_seqlock(&net->fs_lock); active = atomic_read(&server->active); if (active == 0) { trace_afs_server(server->debug_id, refcount_read(&server->ref), active, afs_server_trace_gc); next = rcu_dereference_protected( server->uuid_next, lockdep_is_held(&net->fs_lock.lock)); prev = server->uuid_prev; if (!prev) { /* The one at the front is in the tree */ if (!next) { rb_erase(&server->uuid_rb, &net->fs_servers); } else { rb_replace_node_rcu(&server->uuid_rb, &next->uuid_rb, &net->fs_servers); next->uuid_prev = NULL; } } else { /* This server is not at the front */ rcu_assign_pointer(prev->uuid_next, next); if (next) next->uuid_prev = prev; } list_del(&server->probe_link); hlist_del_rcu(&server->proc_link); if (!hlist_unhashed(&server->addr4_link)) hlist_del_rcu(&server->addr4_link); if (!hlist_unhashed(&server->addr6_link)) hlist_del_rcu(&server->addr6_link); } write_sequnlock(&net->fs_lock); if (active == 0) afs_destroy_server(net, server); } } /* * Manage the records of servers known to be within a network namespace. This * includes garbage collecting unused servers. * * Note also that we were given an increment on net->servers_outstanding by * whoever queued us that we need to deal with before returning. */ void afs_manage_servers(struct work_struct *work) { struct afs_net *net = container_of(work, struct afs_net, fs_manager); struct afs_server *gc_list = NULL; struct rb_node *cursor; time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX; bool purging = !net->live; _enter(""); /* Trawl the server list looking for servers that have expired from * lack of use. */ read_seqlock_excl(&net->fs_lock); for (cursor = rb_first(&net->fs_servers); cursor; cursor = rb_next(cursor)) { struct afs_server *server = rb_entry(cursor, struct afs_server, uuid_rb); int active = atomic_read(&server->active); _debug("manage %pU %u", &server->uuid, active); if (purging) { trace_afs_server(server->debug_id, refcount_read(&server->ref), active, afs_server_trace_purging); if (active != 0) pr_notice("Can't purge s=%08x\n", server->debug_id); } if (active == 0) { time64_t expire_at = server->unuse_time; if (!test_bit(AFS_SERVER_FL_VL_FAIL, &server->flags) && !test_bit(AFS_SERVER_FL_NOT_FOUND, &server->flags)) expire_at += afs_server_gc_delay; if (purging || expire_at <= now) { server->gc_next = gc_list; gc_list = server; } else if (expire_at < next_manage) { next_manage = expire_at; } } } read_sequnlock_excl(&net->fs_lock); /* Update the timer on the way out. We have to pass an increment on * servers_outstanding in the namespace that we are in to the timer or * the work scheduler. */ if (!purging && next_manage < TIME64_MAX) { now = ktime_get_real_seconds(); if (next_manage - now <= 0) { if (queue_work(afs_wq, &net->fs_manager)) afs_inc_servers_outstanding(net); } else { afs_set_server_timer(net, next_manage - now); } } afs_gc_servers(net, gc_list); afs_dec_servers_outstanding(net); _leave(" [%d]", atomic_read(&net->servers_outstanding)); } static void afs_queue_server_manager(struct afs_net *net) { afs_inc_servers_outstanding(net); if (!queue_work(afs_wq, &net->fs_manager)) afs_dec_servers_outstanding(net); } /* * Purge list of servers. */ void afs_purge_servers(struct afs_net *net) { _enter(""); if (del_timer_sync(&net->fs_timer)) afs_dec_servers_outstanding(net); afs_queue_server_manager(net); _debug("wait"); atomic_dec(&net->servers_outstanding); wait_var_event(&net->servers_outstanding, !atomic_read(&net->servers_outstanding)); _leave(""); } /* * Get an update for a server's address list. */ static noinline bool afs_update_server_record(struct afs_operation *op, struct afs_server *server) { struct afs_addr_list *alist, *discard; _enter(""); trace_afs_server(server->debug_id, refcount_read(&server->ref), atomic_read(&server->active), afs_server_trace_update); alist = afs_vl_lookup_addrs(op->volume->cell, op->key, &server->uuid); if (IS_ERR(alist)) { if ((PTR_ERR(alist) == -ERESTARTSYS || PTR_ERR(alist) == -EINTR) && (op->flags & AFS_OPERATION_UNINTR) && server->addresses) { _leave(" = t [intr]"); return true; } op->error = PTR_ERR(alist); _leave(" = f [%d]", op->error); return false; } discard = alist; if (server->addr_version != alist->version) { write_lock(&server->fs_lock); discard = rcu_dereference_protected(server->addresses, lockdep_is_held(&server->fs_lock)); rcu_assign_pointer(server->addresses, alist); server->addr_version = alist->version; write_unlock(&server->fs_lock); } afs_put_addrlist(discard); _leave(" = t"); return true; } /* * See if a server's address list needs updating. */ bool afs_check_server_record(struct afs_operation *op, struct afs_server *server) { bool success; int ret, retries = 0; _enter(""); ASSERT(server); retry: if (test_bit(AFS_SERVER_FL_UPDATING, &server->flags)) goto wait; if (test_bit(AFS_SERVER_FL_NEEDS_UPDATE, &server->flags)) goto update; _leave(" = t [good]"); return true; update: if (!test_and_set_bit_lock(AFS_SERVER_FL_UPDATING, &server->flags)) { clear_bit(AFS_SERVER_FL_NEEDS_UPDATE, &server->flags); success = afs_update_server_record(op, server); clear_bit_unlock(AFS_SERVER_FL_UPDATING, &server->flags); wake_up_bit(&server->flags, AFS_SERVER_FL_UPDATING); _leave(" = %d", success); return success; } wait: ret = wait_on_bit(&server->flags, AFS_SERVER_FL_UPDATING, (op->flags & AFS_OPERATION_UNINTR) ? TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE); if (ret == -ERESTARTSYS) { op->error = ret; _leave(" = f [intr]"); return false; } retries++; if (retries == 4) { _leave(" = f [stale]"); ret = -ESTALE; return false; } goto retry; }
linux-master
fs/afs/server.c
// SPDX-License-Identifier: GPL-2.0-or-later /* AFS cell alias detection * * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/slab.h> #include <linux/sched.h> #include <linux/namei.h> #include <keys/rxrpc-type.h> #include "internal.h" /* * Sample a volume. */ static struct afs_volume *afs_sample_volume(struct afs_cell *cell, struct key *key, const char *name, unsigned int namelen) { struct afs_volume *volume; struct afs_fs_context fc = { .type = 0, /* Explicitly leave it to the VLDB */ .volnamesz = namelen, .volname = name, .net = cell->net, .cell = cell, .key = key, /* This might need to be something */ }; volume = afs_create_volume(&fc); _leave(" = %p", volume); return volume; } /* * Compare two addresses. */ static int afs_compare_addrs(const struct sockaddr_rxrpc *srx_a, const struct sockaddr_rxrpc *srx_b) { short port_a, port_b; int addr_a, addr_b, diff; diff = (short)srx_a->transport_type - (short)srx_b->transport_type; if (diff) goto out; switch (srx_a->transport_type) { case AF_INET: { const struct sockaddr_in *a = &srx_a->transport.sin; const struct sockaddr_in *b = &srx_b->transport.sin; addr_a = ntohl(a->sin_addr.s_addr); addr_b = ntohl(b->sin_addr.s_addr); diff = addr_a - addr_b; if (diff == 0) { port_a = ntohs(a->sin_port); port_b = ntohs(b->sin_port); diff = port_a - port_b; } break; } case AF_INET6: { const struct sockaddr_in6 *a = &srx_a->transport.sin6; const struct sockaddr_in6 *b = &srx_b->transport.sin6; diff = memcmp(&a->sin6_addr, &b->sin6_addr, 16); if (diff == 0) { port_a = ntohs(a->sin6_port); port_b = ntohs(b->sin6_port); diff = port_a - port_b; } break; } default: WARN_ON(1); diff = 1; } out: return diff; } /* * Compare the address lists of a pair of fileservers. */ static int afs_compare_fs_alists(const struct afs_server *server_a, const struct afs_server *server_b) { const struct afs_addr_list *la, *lb; int a = 0, b = 0, addr_matches = 0; la = rcu_dereference(server_a->addresses); lb = rcu_dereference(server_b->addresses); while (a < la->nr_addrs && b < lb->nr_addrs) { const struct sockaddr_rxrpc *srx_a = &la->addrs[a]; const struct sockaddr_rxrpc *srx_b = &lb->addrs[b]; int diff = afs_compare_addrs(srx_a, srx_b); if (diff < 0) { a++; } else if (diff > 0) { b++; } else { addr_matches++; a++; b++; } } return addr_matches; } /* * Compare the fileserver lists of two volumes. The server lists are sorted in * order of ascending UUID. */ static int afs_compare_volume_slists(const struct afs_volume *vol_a, const struct afs_volume *vol_b) { const struct afs_server_list *la, *lb; int i, a = 0, b = 0, uuid_matches = 0, addr_matches = 0; la = rcu_dereference(vol_a->servers); lb = rcu_dereference(vol_b->servers); for (i = 0; i < AFS_MAXTYPES; i++) if (la->vids[i] != lb->vids[i]) return 0; while (a < la->nr_servers && b < lb->nr_servers) { const struct afs_server *server_a = la->servers[a].server; const struct afs_server *server_b = lb->servers[b].server; int diff = memcmp(&server_a->uuid, &server_b->uuid, sizeof(uuid_t)); if (diff < 0) { a++; } else if (diff > 0) { b++; } else { uuid_matches++; addr_matches += afs_compare_fs_alists(server_a, server_b); a++; b++; } } _leave(" = %d [um %d]", addr_matches, uuid_matches); return addr_matches; } /* * Compare root.cell volumes. */ static int afs_compare_cell_roots(struct afs_cell *cell) { struct afs_cell *p; _enter(""); rcu_read_lock(); hlist_for_each_entry_rcu(p, &cell->net->proc_cells, proc_link) { if (p == cell || p->alias_of) continue; if (!p->root_volume) continue; /* Ignore cells that don't have a root.cell volume. */ if (afs_compare_volume_slists(cell->root_volume, p->root_volume) != 0) goto is_alias; } rcu_read_unlock(); _leave(" = 0"); return 0; is_alias: rcu_read_unlock(); cell->alias_of = afs_use_cell(p, afs_cell_trace_use_alias); return 1; } /* * Query the new cell for a volume from a cell we're already using. */ static int afs_query_for_alias_one(struct afs_cell *cell, struct key *key, struct afs_cell *p) { struct afs_volume *volume, *pvol = NULL; int ret; /* Arbitrarily pick a volume from the list. */ read_seqlock_excl(&p->volume_lock); if (!RB_EMPTY_ROOT(&p->volumes)) pvol = afs_get_volume(rb_entry(p->volumes.rb_node, struct afs_volume, cell_node), afs_volume_trace_get_query_alias); read_sequnlock_excl(&p->volume_lock); if (!pvol) return 0; _enter("%s:%s", cell->name, pvol->name); /* And see if it's in the new cell. */ volume = afs_sample_volume(cell, key, pvol->name, pvol->name_len); if (IS_ERR(volume)) { afs_put_volume(cell->net, pvol, afs_volume_trace_put_query_alias); if (PTR_ERR(volume) != -ENOMEDIUM) return PTR_ERR(volume); /* That volume is not in the new cell, so not an alias */ return 0; } /* The new cell has a like-named volume also - compare volume ID, * server and address lists. */ ret = 0; if (pvol->vid == volume->vid) { rcu_read_lock(); if (afs_compare_volume_slists(volume, pvol)) ret = 1; rcu_read_unlock(); } afs_put_volume(cell->net, volume, afs_volume_trace_put_query_alias); afs_put_volume(cell->net, pvol, afs_volume_trace_put_query_alias); return ret; } /* * Query the new cell for volumes we know exist in cells we're already using. */ static int afs_query_for_alias(struct afs_cell *cell, struct key *key) { struct afs_cell *p; _enter("%s", cell->name); if (mutex_lock_interruptible(&cell->net->proc_cells_lock) < 0) return -ERESTARTSYS; hlist_for_each_entry(p, &cell->net->proc_cells, proc_link) { if (p == cell || p->alias_of) continue; if (RB_EMPTY_ROOT(&p->volumes)) continue; if (p->root_volume) continue; /* Ignore cells that have a root.cell volume. */ afs_use_cell(p, afs_cell_trace_use_check_alias); mutex_unlock(&cell->net->proc_cells_lock); if (afs_query_for_alias_one(cell, key, p) != 0) goto is_alias; if (mutex_lock_interruptible(&cell->net->proc_cells_lock) < 0) { afs_unuse_cell(cell->net, p, afs_cell_trace_unuse_check_alias); return -ERESTARTSYS; } afs_unuse_cell(cell->net, p, afs_cell_trace_unuse_check_alias); } mutex_unlock(&cell->net->proc_cells_lock); _leave(" = 0"); return 0; is_alias: cell->alias_of = p; /* Transfer our ref */ return 1; } /* * Look up a VLDB record for a volume. */ static char *afs_vl_get_cell_name(struct afs_cell *cell, struct key *key) { struct afs_vl_cursor vc; char *cell_name = ERR_PTR(-EDESTADDRREQ); bool skipped = false, not_skipped = false; int ret; if (!afs_begin_vlserver_operation(&vc, cell, key)) return ERR_PTR(-ERESTARTSYS); while (afs_select_vlserver(&vc)) { if (!test_bit(AFS_VLSERVER_FL_IS_YFS, &vc.server->flags)) { vc.ac.error = -EOPNOTSUPP; skipped = true; continue; } not_skipped = true; cell_name = afs_yfsvl_get_cell_name(&vc); } ret = afs_end_vlserver_operation(&vc); if (skipped && !not_skipped) ret = -EOPNOTSUPP; return ret < 0 ? ERR_PTR(ret) : cell_name; } static int yfs_check_canonical_cell_name(struct afs_cell *cell, struct key *key) { struct afs_cell *master; char *cell_name; cell_name = afs_vl_get_cell_name(cell, key); if (IS_ERR(cell_name)) return PTR_ERR(cell_name); if (strcmp(cell_name, cell->name) == 0) { kfree(cell_name); return 0; } master = afs_lookup_cell(cell->net, cell_name, strlen(cell_name), NULL, false); kfree(cell_name); if (IS_ERR(master)) return PTR_ERR(master); cell->alias_of = master; /* Transfer our ref */ return 1; } static int afs_do_cell_detect_alias(struct afs_cell *cell, struct key *key) { struct afs_volume *root_volume; int ret; _enter("%s", cell->name); ret = yfs_check_canonical_cell_name(cell, key); if (ret != -EOPNOTSUPP) return ret; /* Try and get the root.cell volume for comparison with other cells */ root_volume = afs_sample_volume(cell, key, "root.cell", 9); if (!IS_ERR(root_volume)) { cell->root_volume = root_volume; return afs_compare_cell_roots(cell); } if (PTR_ERR(root_volume) != -ENOMEDIUM) return PTR_ERR(root_volume); /* Okay, this cell doesn't have an root.cell volume. We need to * locate some other random volume and use that to check. */ return afs_query_for_alias(cell, key); } /* * Check to see if a new cell is an alias of a cell we already have. At this * point we have the cell's volume server list. * * Returns 0 if we didn't detect an alias, 1 if we found an alias and an error * if we had problems gathering the data required. In the case the we did * detect an alias, cell->alias_of is set to point to the assumed master. */ int afs_cell_detect_alias(struct afs_cell *cell, struct key *key) { struct afs_net *net = cell->net; int ret; if (mutex_lock_interruptible(&net->cells_alias_lock) < 0) return -ERESTARTSYS; if (test_bit(AFS_CELL_FL_CHECK_ALIAS, &cell->flags)) { ret = afs_do_cell_detect_alias(cell, key); if (ret >= 0) clear_bit_unlock(AFS_CELL_FL_CHECK_ALIAS, &cell->flags); } else { ret = cell->alias_of ? 1 : 0; } mutex_unlock(&net->cells_alias_lock); if (ret == 1) pr_notice("kAFS: Cell %s is an alias of %s\n", cell->name, cell->alias_of->name); return ret; }
linux-master
fs/afs/vl_alias.c
/* * Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved. * * This software may be freely redistributed under the terms of the * GNU General Public License. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Authors: David Woodhouse <[email protected]> * David Howells <[email protected]> * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/circ_buf.h> #include <linux/sched.h> #include "internal.h" /* * Handle invalidation of an mmap'd file. We invalidate all the PTEs referring * to the pages in this file's pagecache, forcing the kernel to go through * ->fault() or ->page_mkwrite() - at which point we can handle invalidation * more fully. */ void afs_invalidate_mmap_work(struct work_struct *work) { struct afs_vnode *vnode = container_of(work, struct afs_vnode, cb_work); unmap_mapping_pages(vnode->netfs.inode.i_mapping, 0, 0, false); } void afs_server_init_callback_work(struct work_struct *work) { struct afs_server *server = container_of(work, struct afs_server, initcb_work); struct afs_vnode *vnode; struct afs_cell *cell = server->cell; down_read(&cell->fs_open_mmaps_lock); list_for_each_entry(vnode, &cell->fs_open_mmaps, cb_mmap_link) { if (vnode->cb_server == server) { clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); queue_work(system_unbound_wq, &vnode->cb_work); } } up_read(&cell->fs_open_mmaps_lock); } /* * Allow the fileserver to request callback state (re-)initialisation. * Unfortunately, UUIDs are not guaranteed unique. */ void afs_init_callback_state(struct afs_server *server) { rcu_read_lock(); do { server->cb_s_break++; atomic_inc(&server->cell->fs_s_break); if (!list_empty(&server->cell->fs_open_mmaps)) queue_work(system_unbound_wq, &server->initcb_work); } while ((server = rcu_dereference(server->uuid_next))); rcu_read_unlock(); } /* * actually break a callback */ void __afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reason) { _enter(""); clear_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); if (test_and_clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) { vnode->cb_break++; vnode->cb_v_break = vnode->volume->cb_v_break; afs_clear_permits(vnode); if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB) afs_lock_may_be_available(vnode); if (reason != afs_cb_break_for_deleted && vnode->status.type == AFS_FTYPE_FILE && atomic_read(&vnode->cb_nr_mmap)) queue_work(system_unbound_wq, &vnode->cb_work); trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, true); } else { trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, false); } } void afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reason) { write_seqlock(&vnode->cb_lock); __afs_break_callback(vnode, reason); write_sequnlock(&vnode->cb_lock); } /* * Look up a volume by volume ID under RCU conditions. */ static struct afs_volume *afs_lookup_volume_rcu(struct afs_cell *cell, afs_volid_t vid) { struct afs_volume *volume = NULL; struct rb_node *p; int seq = 0; do { /* Unfortunately, rbtree walking doesn't give reliable results * under just the RCU read lock, so we have to check for * changes. */ read_seqbegin_or_lock(&cell->volume_lock, &seq); p = rcu_dereference_raw(cell->volumes.rb_node); while (p) { volume = rb_entry(p, struct afs_volume, cell_node); if (volume->vid < vid) p = rcu_dereference_raw(p->rb_left); else if (volume->vid > vid) p = rcu_dereference_raw(p->rb_right); else break; volume = NULL; } } while (need_seqretry(&cell->volume_lock, seq)); done_seqretry(&cell->volume_lock, seq); return volume; } /* * allow the fileserver to explicitly break one callback * - happens when * - the backing file is changed * - a lock is released */ static void afs_break_one_callback(struct afs_volume *volume, struct afs_fid *fid) { struct super_block *sb; struct afs_vnode *vnode; struct inode *inode; if (fid->vnode == 0 && fid->unique == 0) { /* The callback break applies to an entire volume. */ write_lock(&volume->cb_v_break_lock); volume->cb_v_break++; trace_afs_cb_break(fid, volume->cb_v_break, afs_cb_break_for_volume_callback, false); write_unlock(&volume->cb_v_break_lock); return; } /* See if we can find a matching inode - even an I_NEW inode needs to * be marked as it can have its callback broken before we finish * setting up the local inode. */ sb = rcu_dereference(volume->sb); if (!sb) return; inode = find_inode_rcu(sb, fid->vnode, afs_ilookup5_test_by_fid, fid); if (inode) { vnode = AFS_FS_I(inode); afs_break_callback(vnode, afs_cb_break_for_callback); } else { trace_afs_cb_miss(fid, afs_cb_break_for_callback); } } static void afs_break_some_callbacks(struct afs_server *server, struct afs_callback_break *cbb, size_t *_count) { struct afs_callback_break *residue = cbb; struct afs_volume *volume; afs_volid_t vid = cbb->fid.vid; size_t i; volume = afs_lookup_volume_rcu(server->cell, vid); /* TODO: Find all matching volumes if we couldn't match the server and * break them anyway. */ for (i = *_count; i > 0; cbb++, i--) { if (cbb->fid.vid == vid) { _debug("- Fid { vl=%08llx n=%llu u=%u }", cbb->fid.vid, cbb->fid.vnode, cbb->fid.unique); --*_count; if (volume) afs_break_one_callback(volume, &cbb->fid); } else { *residue++ = *cbb; } } } /* * allow the fileserver to break callback promises */ void afs_break_callbacks(struct afs_server *server, size_t count, struct afs_callback_break *callbacks) { _enter("%p,%zu,", server, count); ASSERT(server != NULL); rcu_read_lock(); while (count > 0) afs_break_some_callbacks(server, callbacks, &count); rcu_read_unlock(); return; }
linux-master
fs/afs/callback.c
/* * Copyright (c) 2002 Red Hat, Inc. All rights reserved. * * This software may be freely redistributed under the terms of the * GNU General Public License. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Authors: David Woodhouse <[email protected]> * David Howells <[email protected]> * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/sched.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/iversion.h> #include "internal.h" #include "afs_fs.h" static const struct inode_operations afs_symlink_inode_operations = { .get_link = page_get_link, }; static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *parent_vnode) { static unsigned long once_only; pr_warn("kAFS: AFS vnode with undefined type %u\n", vnode->status.type); pr_warn("kAFS: A=%d m=%o s=%llx v=%llx\n", vnode->status.abort_code, vnode->status.mode, vnode->status.size, vnode->status.data_version); pr_warn("kAFS: vnode %llx:%llx:%x\n", vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); if (parent_vnode) pr_warn("kAFS: dir %llx:%llx:%x\n", parent_vnode->fid.vid, parent_vnode->fid.vnode, parent_vnode->fid.unique); if (!test_and_set_bit(0, &once_only)) dump_stack(); } /* * Set parameters for the netfs library */ static void afs_set_netfs_context(struct afs_vnode *vnode) { netfs_inode_init(&vnode->netfs, &afs_req_ops); } /* * Initialise an inode from the vnode status. */ static int afs_inode_init_from_status(struct afs_operation *op, struct afs_vnode_param *vp, struct afs_vnode *vnode) { struct afs_file_status *status = &vp->scb.status; struct inode *inode = AFS_VNODE_TO_I(vnode); struct timespec64 t; _enter("{%llx:%llu.%u} %s", vp->fid.vid, vp->fid.vnode, vp->fid.unique, op->type ? op->type->name : "???"); _debug("FS: ft=%d lk=%d sz=%llu ver=%Lu mod=%hu", status->type, status->nlink, (unsigned long long) status->size, status->data_version, status->mode); write_seqlock(&vnode->cb_lock); vnode->cb_v_break = op->cb_v_break; vnode->cb_s_break = op->cb_s_break; vnode->status = *status; t = status->mtime_client; inode_set_ctime_to_ts(inode, t); inode->i_mtime = t; inode->i_atime = t; inode->i_flags |= S_NOATIME; inode->i_uid = make_kuid(&init_user_ns, status->owner); inode->i_gid = make_kgid(&init_user_ns, status->group); set_nlink(&vnode->netfs.inode, status->nlink); switch (status->type) { case AFS_FTYPE_FILE: inode->i_mode = S_IFREG | (status->mode & S_IALLUGO); inode->i_op = &afs_file_inode_operations; inode->i_fop = &afs_file_operations; inode->i_mapping->a_ops = &afs_file_aops; mapping_set_large_folios(inode->i_mapping); break; case AFS_FTYPE_DIR: inode->i_mode = S_IFDIR | (status->mode & S_IALLUGO); inode->i_op = &afs_dir_inode_operations; inode->i_fop = &afs_dir_file_operations; inode->i_mapping->a_ops = &afs_dir_aops; mapping_set_large_folios(inode->i_mapping); break; case AFS_FTYPE_SYMLINK: /* Symlinks with a mode of 0644 are actually mountpoints. */ if ((status->mode & 0777) == 0644) { inode->i_flags |= S_AUTOMOUNT; set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags); inode->i_mode = S_IFDIR | 0555; inode->i_op = &afs_mntpt_inode_operations; inode->i_fop = &afs_mntpt_file_operations; inode->i_mapping->a_ops = &afs_symlink_aops; } else { inode->i_mode = S_IFLNK | status->mode; inode->i_op = &afs_symlink_inode_operations; inode->i_mapping->a_ops = &afs_symlink_aops; } inode_nohighmem(inode); break; default: dump_vnode(vnode, op->file[0].vnode != vnode ? op->file[0].vnode : NULL); write_sequnlock(&vnode->cb_lock); return afs_protocol_error(NULL, afs_eproto_file_type); } afs_set_i_size(vnode, status->size); afs_set_netfs_context(vnode); vnode->invalid_before = status->data_version; inode_set_iversion_raw(&vnode->netfs.inode, status->data_version); if (!vp->scb.have_cb) { /* it's a symlink we just created (the fileserver * didn't give us a callback) */ vnode->cb_expires_at = ktime_get_real_seconds(); } else { vnode->cb_expires_at = vp->scb.callback.expires_at; vnode->cb_server = op->server; set_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); } write_sequnlock(&vnode->cb_lock); return 0; } /* * Update the core inode struct from a returned status record. */ static void afs_apply_status(struct afs_operation *op, struct afs_vnode_param *vp) { struct afs_file_status *status = &vp->scb.status; struct afs_vnode *vnode = vp->vnode; struct inode *inode = &vnode->netfs.inode; struct timespec64 t; umode_t mode; bool data_changed = false; bool change_size = vp->set_size; _enter("{%llx:%llu.%u} %s", vp->fid.vid, vp->fid.vnode, vp->fid.unique, op->type ? op->type->name : "???"); BUG_ON(test_bit(AFS_VNODE_UNSET, &vnode->flags)); if (status->type != vnode->status.type) { pr_warn("Vnode %llx:%llx:%x changed type %u to %u\n", vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique, status->type, vnode->status.type); afs_protocol_error(NULL, afs_eproto_bad_status); return; } if (status->nlink != vnode->status.nlink) set_nlink(inode, status->nlink); if (status->owner != vnode->status.owner) inode->i_uid = make_kuid(&init_user_ns, status->owner); if (status->group != vnode->status.group) inode->i_gid = make_kgid(&init_user_ns, status->group); if (status->mode != vnode->status.mode) { mode = inode->i_mode; mode &= ~S_IALLUGO; mode |= status->mode & S_IALLUGO; WRITE_ONCE(inode->i_mode, mode); } t = status->mtime_client; inode->i_mtime = t; if (vp->update_ctime) inode_set_ctime_to_ts(inode, op->ctime); if (vnode->status.data_version != status->data_version) data_changed = true; vnode->status = *status; if (vp->dv_before + vp->dv_delta != status->data_version) { if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) pr_warn("kAFS: vnode modified {%llx:%llu} %llx->%llx %s (op=%x)\n", vnode->fid.vid, vnode->fid.vnode, (unsigned long long)vp->dv_before + vp->dv_delta, (unsigned long long)status->data_version, op->type ? op->type->name : "???", op->debug_id); vnode->invalid_before = status->data_version; if (vnode->status.type == AFS_FTYPE_DIR) { if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags)) afs_stat_v(vnode, n_inval); } else { set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags); } change_size = true; data_changed = true; } else if (vnode->status.type == AFS_FTYPE_DIR) { /* Expected directory change is handled elsewhere so * that we can locally edit the directory and save on a * download. */ if (test_bit(AFS_VNODE_DIR_VALID, &vnode->flags)) data_changed = false; change_size = true; } if (data_changed) { inode_set_iversion_raw(inode, status->data_version); /* Only update the size if the data version jumped. If the * file is being modified locally, then we might have our own * idea of what the size should be that's not the same as * what's on the server. */ vnode->netfs.remote_i_size = status->size; if (change_size) { afs_set_i_size(vnode, status->size); inode_set_ctime_to_ts(inode, t); inode->i_atime = t; } } } /* * Apply a callback to a vnode. */ static void afs_apply_callback(struct afs_operation *op, struct afs_vnode_param *vp) { struct afs_callback *cb = &vp->scb.callback; struct afs_vnode *vnode = vp->vnode; if (!afs_cb_is_broken(vp->cb_break_before, vnode)) { vnode->cb_expires_at = cb->expires_at; vnode->cb_server = op->server; set_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); } } /* * Apply the received status and callback to an inode all in the same critical * section to avoid races with afs_validate(). */ void afs_vnode_commit_status(struct afs_operation *op, struct afs_vnode_param *vp) { struct afs_vnode *vnode = vp->vnode; _enter(""); write_seqlock(&vnode->cb_lock); if (vp->scb.have_error) { /* A YFS server will return this from RemoveFile2 and AFS and * YFS will return this from InlineBulkStatus. */ if (vp->scb.status.abort_code == VNOVNODE) { set_bit(AFS_VNODE_DELETED, &vnode->flags); clear_nlink(&vnode->netfs.inode); __afs_break_callback(vnode, afs_cb_break_for_deleted); op->flags &= ~AFS_OPERATION_DIR_CONFLICT; } } else if (vp->scb.have_status) { if (vp->speculative && (test_bit(AFS_VNODE_MODIFYING, &vnode->flags) || vp->dv_before != vnode->status.data_version)) /* Ignore the result of a speculative bulk status fetch * if it splits around a modification op, thereby * appearing to regress the data version. */ goto out; afs_apply_status(op, vp); if (vp->scb.have_cb) afs_apply_callback(op, vp); } else if (vp->op_unlinked && !(op->flags & AFS_OPERATION_DIR_CONFLICT)) { drop_nlink(&vnode->netfs.inode); if (vnode->netfs.inode.i_nlink == 0) { set_bit(AFS_VNODE_DELETED, &vnode->flags); __afs_break_callback(vnode, afs_cb_break_for_deleted); } } out: write_sequnlock(&vnode->cb_lock); if (vp->scb.have_status) afs_cache_permit(vnode, op->key, vp->cb_break_before, &vp->scb); } static void afs_fetch_status_success(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[op->fetch_status.which]; struct afs_vnode *vnode = vp->vnode; int ret; if (vnode->netfs.inode.i_state & I_NEW) { ret = afs_inode_init_from_status(op, vp, vnode); op->error = ret; if (ret == 0) afs_cache_permit(vnode, op->key, vp->cb_break_before, &vp->scb); } else { afs_vnode_commit_status(op, vp); } } const struct afs_operation_ops afs_fetch_status_operation = { .issue_afs_rpc = afs_fs_fetch_status, .issue_yfs_rpc = yfs_fs_fetch_status, .success = afs_fetch_status_success, .aborted = afs_check_for_remote_deletion, }; /* * Fetch file status from the volume. */ int afs_fetch_status(struct afs_vnode *vnode, struct key *key, bool is_new, afs_access_t *_caller_access) { struct afs_operation *op; _enter("%s,{%llx:%llu.%u,S=%lx}", vnode->volume->name, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique, vnode->flags); op = afs_alloc_operation(key, vnode->volume); if (IS_ERR(op)) return PTR_ERR(op); afs_op_set_vnode(op, 0, vnode); op->nr_files = 1; op->ops = &afs_fetch_status_operation; afs_begin_vnode_operation(op); afs_wait_for_operation(op); if (_caller_access) *_caller_access = op->file[0].scb.status.caller_access; return afs_put_operation(op); } /* * ilookup() comparator */ int afs_ilookup5_test_by_fid(struct inode *inode, void *opaque) { struct afs_vnode *vnode = AFS_FS_I(inode); struct afs_fid *fid = opaque; return (fid->vnode == vnode->fid.vnode && fid->vnode_hi == vnode->fid.vnode_hi && fid->unique == vnode->fid.unique); } /* * iget5() comparator */ static int afs_iget5_test(struct inode *inode, void *opaque) { struct afs_vnode_param *vp = opaque; //struct afs_vnode *vnode = AFS_FS_I(inode); return afs_ilookup5_test_by_fid(inode, &vp->fid); } /* * iget5() inode initialiser */ static int afs_iget5_set(struct inode *inode, void *opaque) { struct afs_vnode_param *vp = opaque; struct afs_super_info *as = AFS_FS_S(inode->i_sb); struct afs_vnode *vnode = AFS_FS_I(inode); vnode->volume = as->volume; vnode->fid = vp->fid; /* YFS supports 96-bit vnode IDs, but Linux only supports * 64-bit inode numbers. */ inode->i_ino = vnode->fid.vnode; inode->i_generation = vnode->fid.unique; return 0; } /* * Get a cache cookie for an inode. */ static void afs_get_inode_cache(struct afs_vnode *vnode) { #ifdef CONFIG_AFS_FSCACHE struct { __be32 vnode_id; __be32 unique; __be32 vnode_id_ext[2]; /* Allow for a 96-bit key */ } __packed key; struct afs_vnode_cache_aux aux; if (vnode->status.type != AFS_FTYPE_FILE) { vnode->netfs.cache = NULL; return; } key.vnode_id = htonl(vnode->fid.vnode); key.unique = htonl(vnode->fid.unique); key.vnode_id_ext[0] = htonl(vnode->fid.vnode >> 32); key.vnode_id_ext[1] = htonl(vnode->fid.vnode_hi); afs_set_cache_aux(vnode, &aux); afs_vnode_set_cache(vnode, fscache_acquire_cookie( vnode->volume->cache, vnode->status.type == AFS_FTYPE_FILE ? 0 : FSCACHE_ADV_SINGLE_CHUNK, &key, sizeof(key), &aux, sizeof(aux), i_size_read(&vnode->netfs.inode))); #endif } /* * inode retrieval */ struct inode *afs_iget(struct afs_operation *op, struct afs_vnode_param *vp) { struct afs_vnode_param *dvp = &op->file[0]; struct super_block *sb = dvp->vnode->netfs.inode.i_sb; struct afs_vnode *vnode; struct inode *inode; int ret; _enter(",{%llx:%llu.%u},,", vp->fid.vid, vp->fid.vnode, vp->fid.unique); inode = iget5_locked(sb, vp->fid.vnode, afs_iget5_test, afs_iget5_set, vp); if (!inode) { _leave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); } vnode = AFS_FS_I(inode); _debug("GOT INODE %p { vl=%llx vn=%llx, u=%x }", inode, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); /* deal with an existing inode */ if (!(inode->i_state & I_NEW)) { _leave(" = %p", inode); return inode; } ret = afs_inode_init_from_status(op, vp, vnode); if (ret < 0) goto bad_inode; afs_get_inode_cache(vnode); /* success */ clear_bit(AFS_VNODE_UNSET, &vnode->flags); unlock_new_inode(inode); _leave(" = %p", inode); return inode; /* failure */ bad_inode: iget_failed(inode); _leave(" = %d [bad]", ret); return ERR_PTR(ret); } static int afs_iget5_set_root(struct inode *inode, void *opaque) { struct afs_super_info *as = AFS_FS_S(inode->i_sb); struct afs_vnode *vnode = AFS_FS_I(inode); vnode->volume = as->volume; vnode->fid.vid = as->volume->vid, vnode->fid.vnode = 1; vnode->fid.unique = 1; inode->i_ino = 1; inode->i_generation = 1; return 0; } /* * Set up the root inode for a volume. This is always vnode 1, unique 1 within * the volume. */ struct inode *afs_root_iget(struct super_block *sb, struct key *key) { struct afs_super_info *as = AFS_FS_S(sb); struct afs_operation *op; struct afs_vnode *vnode; struct inode *inode; int ret; _enter(",{%llx},,", as->volume->vid); inode = iget5_locked(sb, 1, NULL, afs_iget5_set_root, NULL); if (!inode) { _leave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); } _debug("GOT ROOT INODE %p { vl=%llx }", inode, as->volume->vid); BUG_ON(!(inode->i_state & I_NEW)); vnode = AFS_FS_I(inode); vnode->cb_v_break = as->volume->cb_v_break, afs_set_netfs_context(vnode); op = afs_alloc_operation(key, as->volume); if (IS_ERR(op)) { ret = PTR_ERR(op); goto error; } afs_op_set_vnode(op, 0, vnode); op->nr_files = 1; op->ops = &afs_fetch_status_operation; ret = afs_do_sync_operation(op); if (ret < 0) goto error; afs_get_inode_cache(vnode); clear_bit(AFS_VNODE_UNSET, &vnode->flags); unlock_new_inode(inode); _leave(" = %p", inode); return inode; error: iget_failed(inode); _leave(" = %d [bad]", ret); return ERR_PTR(ret); } /* * mark the data attached to an inode as obsolete due to a write on the server * - might also want to ditch all the outstanding writes and dirty pages */ static void afs_zap_data(struct afs_vnode *vnode) { _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode); afs_invalidate_cache(vnode, 0); /* nuke all the non-dirty pages that aren't locked, mapped or being * written back in a regular file and completely discard the pages in a * directory or symlink */ if (S_ISREG(vnode->netfs.inode.i_mode)) invalidate_remote_inode(&vnode->netfs.inode); else invalidate_inode_pages2(vnode->netfs.inode.i_mapping); } /* * Check to see if we have a server currently serving this volume and that it * hasn't been reinitialised or dropped from the list. */ static bool afs_check_server_good(struct afs_vnode *vnode) { struct afs_server_list *slist; struct afs_server *server; bool good; int i; if (vnode->cb_fs_s_break == atomic_read(&vnode->volume->cell->fs_s_break)) return true; rcu_read_lock(); slist = rcu_dereference(vnode->volume->servers); for (i = 0; i < slist->nr_servers; i++) { server = slist->servers[i].server; if (server == vnode->cb_server) { good = (vnode->cb_s_break == server->cb_s_break); rcu_read_unlock(); return good; } } rcu_read_unlock(); return false; } /* * Check the validity of a vnode/inode. */ bool afs_check_validity(struct afs_vnode *vnode) { enum afs_cb_break_reason need_clear = afs_cb_break_no_break; time64_t now = ktime_get_real_seconds(); unsigned int cb_break; int seq = 0; do { read_seqbegin_or_lock(&vnode->cb_lock, &seq); cb_break = vnode->cb_break; if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) { if (vnode->cb_v_break != vnode->volume->cb_v_break) need_clear = afs_cb_break_for_v_break; else if (!afs_check_server_good(vnode)) need_clear = afs_cb_break_for_s_reinit; else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) need_clear = afs_cb_break_for_zap; else if (vnode->cb_expires_at - 10 <= now) need_clear = afs_cb_break_for_lapsed; } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { ; } else { need_clear = afs_cb_break_no_promise; } } while (need_seqretry(&vnode->cb_lock, seq)); done_seqretry(&vnode->cb_lock, seq); if (need_clear == afs_cb_break_no_break) return true; write_seqlock(&vnode->cb_lock); if (need_clear == afs_cb_break_no_promise) vnode->cb_v_break = vnode->volume->cb_v_break; else if (cb_break == vnode->cb_break) __afs_break_callback(vnode, need_clear); else trace_afs_cb_miss(&vnode->fid, need_clear); write_sequnlock(&vnode->cb_lock); return false; } /* * Returns true if the pagecache is still valid. Does not sleep. */ bool afs_pagecache_valid(struct afs_vnode *vnode) { if (unlikely(test_bit(AFS_VNODE_DELETED, &vnode->flags))) { if (vnode->netfs.inode.i_nlink) clear_nlink(&vnode->netfs.inode); return true; } if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags) && afs_check_validity(vnode)) return true; return false; } /* * validate a vnode/inode * - there are several things we need to check * - parent dir data changes (rm, rmdir, rename, mkdir, create, link, * symlink) * - parent dir metadata changed (security changes) * - dentry data changed (write, truncate) * - dentry metadata changed (security changes) */ int afs_validate(struct afs_vnode *vnode, struct key *key) { int ret; _enter("{v={%llx:%llu} fl=%lx},%x", vnode->fid.vid, vnode->fid.vnode, vnode->flags, key_serial(key)); if (afs_pagecache_valid(vnode)) goto valid; down_write(&vnode->validate_lock); /* if the promise has expired, we need to check the server again to get * a new promise - note that if the (parent) directory's metadata was * changed then the security may be different and we may no longer have * access */ if (!test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) { _debug("not promised"); ret = afs_fetch_status(vnode, key, false, NULL); if (ret < 0) { if (ret == -ENOENT) { set_bit(AFS_VNODE_DELETED, &vnode->flags); ret = -ESTALE; } goto error_unlock; } _debug("new promise [fl=%lx]", vnode->flags); } if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { _debug("file already deleted"); ret = -ESTALE; goto error_unlock; } /* if the vnode's data version number changed then its contents are * different */ if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) afs_zap_data(vnode); up_write(&vnode->validate_lock); valid: _leave(" = 0"); return 0; error_unlock: up_write(&vnode->validate_lock); _leave(" = %d", ret); return ret; } /* * read the attributes of an inode */ int afs_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { struct inode *inode = d_inode(path->dentry); struct afs_vnode *vnode = AFS_FS_I(inode); struct key *key; int ret, seq = 0; _enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation); if (vnode->volume && !(query_flags & AT_STATX_DONT_SYNC) && !test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) { key = afs_request_key(vnode->volume->cell); if (IS_ERR(key)) return PTR_ERR(key); ret = afs_validate(vnode, key); key_put(key); if (ret < 0) return ret; } do { read_seqbegin_or_lock(&vnode->cb_lock, &seq); generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat); if (test_bit(AFS_VNODE_SILLY_DELETED, &vnode->flags) && stat->nlink > 0) stat->nlink -= 1; /* Lie about the size of directories. We maintain a locally * edited copy and may make different allocation decisions on * it, but we need to give userspace the server's size. */ if (S_ISDIR(inode->i_mode)) stat->size = vnode->netfs.remote_i_size; } while (need_seqretry(&vnode->cb_lock, seq)); done_seqretry(&vnode->cb_lock, seq); return 0; } /* * discard an AFS inode */ int afs_drop_inode(struct inode *inode) { _enter(""); if (test_bit(AFS_VNODE_PSEUDODIR, &AFS_FS_I(inode)->flags)) return generic_delete_inode(inode); else return generic_drop_inode(inode); } /* * clear an AFS inode */ void afs_evict_inode(struct inode *inode) { struct afs_vnode_cache_aux aux; struct afs_vnode *vnode = AFS_FS_I(inode); _enter("{%llx:%llu.%d}", vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); _debug("CLEAR INODE %p", inode); ASSERTCMP(inode->i_ino, ==, vnode->fid.vnode); truncate_inode_pages_final(&inode->i_data); afs_set_cache_aux(vnode, &aux); fscache_clear_inode_writeback(afs_vnode_cache(vnode), inode, &aux); clear_inode(inode); while (!list_empty(&vnode->wb_keys)) { struct afs_wb_key *wbk = list_entry(vnode->wb_keys.next, struct afs_wb_key, vnode_link); list_del(&wbk->vnode_link); afs_put_wb_key(wbk); } fscache_relinquish_cookie(afs_vnode_cache(vnode), test_bit(AFS_VNODE_DELETED, &vnode->flags)); afs_prune_wb_keys(vnode); afs_put_permits(rcu_access_pointer(vnode->permit_cache)); key_put(vnode->silly_key); vnode->silly_key = NULL; key_put(vnode->lock_key); vnode->lock_key = NULL; _leave(""); } static void afs_setattr_success(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct inode *inode = &vp->vnode->netfs.inode; loff_t old_i_size = i_size_read(inode); op->setattr.old_i_size = old_i_size; afs_vnode_commit_status(op, vp); /* inode->i_size has now been changed. */ if (op->setattr.attr->ia_valid & ATTR_SIZE) { loff_t size = op->setattr.attr->ia_size; if (size > old_i_size) pagecache_isize_extended(inode, old_i_size, size); } } static void afs_setattr_edit_file(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct inode *inode = &vp->vnode->netfs.inode; if (op->setattr.attr->ia_valid & ATTR_SIZE) { loff_t size = op->setattr.attr->ia_size; loff_t i_size = op->setattr.old_i_size; if (size < i_size) truncate_pagecache(inode, size); if (size != i_size) fscache_resize_cookie(afs_vnode_cache(vp->vnode), vp->scb.status.size); } } static const struct afs_operation_ops afs_setattr_operation = { .issue_afs_rpc = afs_fs_setattr, .issue_yfs_rpc = yfs_fs_setattr, .success = afs_setattr_success, .edit_dir = afs_setattr_edit_file, }; /* * set the attributes of an inode */ int afs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { const unsigned int supported = ATTR_SIZE | ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_MTIME | ATTR_MTIME_SET | ATTR_TIMES_SET | ATTR_TOUCH; struct afs_operation *op; struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry)); struct inode *inode = &vnode->netfs.inode; loff_t i_size; int ret; _enter("{%llx:%llu},{n=%pd},%x", vnode->fid.vid, vnode->fid.vnode, dentry, attr->ia_valid); if (!(attr->ia_valid & supported)) { _leave(" = 0 [unsupported]"); return 0; } i_size = i_size_read(inode); if (attr->ia_valid & ATTR_SIZE) { if (!S_ISREG(inode->i_mode)) return -EISDIR; ret = inode_newsize_ok(inode, attr->ia_size); if (ret) return ret; if (attr->ia_size == i_size) attr->ia_valid &= ~ATTR_SIZE; } fscache_use_cookie(afs_vnode_cache(vnode), true); /* Prevent any new writebacks from starting whilst we do this. */ down_write(&vnode->validate_lock); if ((attr->ia_valid & ATTR_SIZE) && S_ISREG(inode->i_mode)) { loff_t size = attr->ia_size; /* Wait for any outstanding writes to the server to complete */ loff_t from = min(size, i_size); loff_t to = max(size, i_size); ret = filemap_fdatawait_range(inode->i_mapping, from, to); if (ret < 0) goto out_unlock; /* Don't talk to the server if we're just shortening in-memory * writes that haven't gone to the server yet. */ if (!(attr->ia_valid & (supported & ~ATTR_SIZE & ~ATTR_MTIME)) && attr->ia_size < i_size && attr->ia_size > vnode->status.size) { truncate_pagecache(inode, attr->ia_size); fscache_resize_cookie(afs_vnode_cache(vnode), attr->ia_size); i_size_write(inode, attr->ia_size); ret = 0; goto out_unlock; } } op = afs_alloc_operation(((attr->ia_valid & ATTR_FILE) ? afs_file_key(attr->ia_file) : NULL), vnode->volume); if (IS_ERR(op)) { ret = PTR_ERR(op); goto out_unlock; } afs_op_set_vnode(op, 0, vnode); op->setattr.attr = attr; if (attr->ia_valid & ATTR_SIZE) { op->file[0].dv_delta = 1; op->file[0].set_size = true; } op->ctime = attr->ia_ctime; op->file[0].update_ctime = 1; op->file[0].modification = true; op->ops = &afs_setattr_operation; ret = afs_do_sync_operation(op); out_unlock: up_write(&vnode->validate_lock); fscache_unuse_cookie(afs_vnode_cache(vnode), NULL, NULL); _leave(" = %d", ret); return ret; }
linux-master
fs/afs/inode.c
// SPDX-License-Identifier: GPL-2.0-or-later /* AFS dynamic root handling * * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/fs.h> #include <linux/namei.h> #include <linux/dns_resolver.h> #include "internal.h" static atomic_t afs_autocell_ino; /* * iget5() comparator for inode created by autocell operations * * These pseudo inodes don't match anything. */ static int afs_iget5_pseudo_test(struct inode *inode, void *opaque) { return 0; } /* * iget5() inode initialiser */ static int afs_iget5_pseudo_set(struct inode *inode, void *opaque) { struct afs_super_info *as = AFS_FS_S(inode->i_sb); struct afs_vnode *vnode = AFS_FS_I(inode); struct afs_fid *fid = opaque; vnode->volume = as->volume; vnode->fid = *fid; inode->i_ino = fid->vnode; inode->i_generation = fid->unique; return 0; } /* * Create an inode for a dynamic root directory or an autocell dynamic * automount dir. */ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root) { struct afs_super_info *as = AFS_FS_S(sb); struct afs_vnode *vnode; struct inode *inode; struct afs_fid fid = {}; _enter(""); if (as->volume) fid.vid = as->volume->vid; if (root) { fid.vnode = 1; fid.unique = 1; } else { fid.vnode = atomic_inc_return(&afs_autocell_ino); fid.unique = 0; } inode = iget5_locked(sb, fid.vnode, afs_iget5_pseudo_test, afs_iget5_pseudo_set, &fid); if (!inode) { _leave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); } _debug("GOT INODE %p { ino=%lu, vl=%llx, vn=%llx, u=%x }", inode, inode->i_ino, fid.vid, fid.vnode, fid.unique); vnode = AFS_FS_I(inode); /* there shouldn't be an existing inode */ BUG_ON(!(inode->i_state & I_NEW)); netfs_inode_init(&vnode->netfs, NULL); inode->i_size = 0; inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO; if (root) { inode->i_op = &afs_dynroot_inode_operations; inode->i_fop = &simple_dir_operations; } else { inode->i_op = &afs_autocell_inode_operations; } set_nlink(inode, 2); inode->i_uid = GLOBAL_ROOT_UID; inode->i_gid = GLOBAL_ROOT_GID; inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode); inode->i_blocks = 0; inode->i_generation = 0; set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags); if (!root) { set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags); inode->i_flags |= S_AUTOMOUNT; } inode->i_flags |= S_NOATIME; unlock_new_inode(inode); _leave(" = %p", inode); return inode; } /* * Probe to see if a cell may exist. This prevents positive dentries from * being created unnecessarily. */ static int afs_probe_cell_name(struct dentry *dentry) { struct afs_cell *cell; struct afs_net *net = afs_d2net(dentry); const char *name = dentry->d_name.name; size_t len = dentry->d_name.len; int ret; /* Names prefixed with a dot are R/W mounts. */ if (name[0] == '.') { if (len == 1) return -EINVAL; name++; len--; } cell = afs_find_cell(net, name, len, afs_cell_trace_use_probe); if (!IS_ERR(cell)) { afs_unuse_cell(net, cell, afs_cell_trace_unuse_probe); return 0; } ret = dns_query(net->net, "afsdb", name, len, "srv=1", NULL, NULL, false); if (ret == -ENODATA) ret = -EDESTADDRREQ; return ret; } /* * Try to auto mount the mountpoint with pseudo directory, if the autocell * operation is setted. */ struct inode *afs_try_auto_mntpt(struct dentry *dentry, struct inode *dir) { struct afs_vnode *vnode = AFS_FS_I(dir); struct inode *inode; int ret = -ENOENT; _enter("%p{%pd}, {%llx:%llu}", dentry, dentry, vnode->fid.vid, vnode->fid.vnode); if (!test_bit(AFS_VNODE_AUTOCELL, &vnode->flags)) goto out; ret = afs_probe_cell_name(dentry); if (ret < 0) goto out; inode = afs_iget_pseudo_dir(dir->i_sb, false); if (IS_ERR(inode)) { ret = PTR_ERR(inode); goto out; } _leave("= %p", inode); return inode; out: _leave("= %d", ret); return ret == -ENOENT ? NULL : ERR_PTR(ret); } /* * Look up @cell in a dynroot directory. This is a substitution for the * local cell name for the net namespace. */ static struct dentry *afs_lookup_atcell(struct dentry *dentry) { struct afs_cell *cell; struct afs_net *net = afs_d2net(dentry); struct dentry *ret; char *name; int len; if (!net->ws_cell) return ERR_PTR(-ENOENT); ret = ERR_PTR(-ENOMEM); name = kmalloc(AFS_MAXCELLNAME + 1, GFP_KERNEL); if (!name) goto out_p; down_read(&net->cells_lock); cell = net->ws_cell; if (cell) { len = cell->name_len; memcpy(name, cell->name, len + 1); } up_read(&net->cells_lock); ret = ERR_PTR(-ENOENT); if (!cell) goto out_n; ret = lookup_one_len(name, dentry->d_parent, len); /* We don't want to d_add() the @cell dentry here as we don't want to * the cached dentry to hide changes to the local cell name. */ out_n: kfree(name); out_p: return ret; } /* * Look up an entry in a dynroot directory. */ static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { _enter("%pd", dentry); ASSERTCMP(d_inode(dentry), ==, NULL); if (flags & LOOKUP_CREATE) return ERR_PTR(-EOPNOTSUPP); if (dentry->d_name.len >= AFSNAMEMAX) { _leave(" = -ENAMETOOLONG"); return ERR_PTR(-ENAMETOOLONG); } if (dentry->d_name.len == 5 && memcmp(dentry->d_name.name, "@cell", 5) == 0) return afs_lookup_atcell(dentry); return d_splice_alias(afs_try_auto_mntpt(dentry, dir), dentry); } const struct inode_operations afs_dynroot_inode_operations = { .lookup = afs_dynroot_lookup, }; /* * Dirs in the dynamic root don't need revalidation. */ static int afs_dynroot_d_revalidate(struct dentry *dentry, unsigned int flags) { return 1; } /* * Allow the VFS to enquire as to whether a dentry should be unhashed (mustn't * sleep) * - called from dput() when d_count is going to 0. * - return 1 to request dentry be unhashed, 0 otherwise */ static int afs_dynroot_d_delete(const struct dentry *dentry) { return d_really_is_positive(dentry); } const struct dentry_operations afs_dynroot_dentry_operations = { .d_revalidate = afs_dynroot_d_revalidate, .d_delete = afs_dynroot_d_delete, .d_release = afs_d_release, .d_automount = afs_d_automount, }; /* * Create a manually added cell mount directory. * - The caller must hold net->proc_cells_lock */ int afs_dynroot_mkdir(struct afs_net *net, struct afs_cell *cell) { struct super_block *sb = net->dynroot_sb; struct dentry *root, *subdir; int ret; if (!sb || atomic_read(&sb->s_active) == 0) return 0; /* Let the ->lookup op do the creation */ root = sb->s_root; inode_lock(root->d_inode); subdir = lookup_one_len(cell->name, root, cell->name_len); if (IS_ERR(subdir)) { ret = PTR_ERR(subdir); goto unlock; } /* Note that we're retaining an extra ref on the dentry */ subdir->d_fsdata = (void *)1UL; ret = 0; unlock: inode_unlock(root->d_inode); return ret; } /* * Remove a manually added cell mount directory. * - The caller must hold net->proc_cells_lock */ void afs_dynroot_rmdir(struct afs_net *net, struct afs_cell *cell) { struct super_block *sb = net->dynroot_sb; struct dentry *root, *subdir; if (!sb || atomic_read(&sb->s_active) == 0) return; root = sb->s_root; inode_lock(root->d_inode); /* Don't want to trigger a lookup call, which will re-add the cell */ subdir = try_lookup_one_len(cell->name, root, cell->name_len); if (IS_ERR_OR_NULL(subdir)) { _debug("lookup %ld", PTR_ERR(subdir)); goto no_dentry; } _debug("rmdir %pd %u", subdir, d_count(subdir)); if (subdir->d_fsdata) { _debug("unpin %u", d_count(subdir)); subdir->d_fsdata = NULL; dput(subdir); } dput(subdir); no_dentry: inode_unlock(root->d_inode); _leave(""); } /* * Populate a newly created dynamic root with cell names. */ int afs_dynroot_populate(struct super_block *sb) { struct afs_cell *cell; struct afs_net *net = afs_sb2net(sb); int ret; mutex_lock(&net->proc_cells_lock); net->dynroot_sb = sb; hlist_for_each_entry(cell, &net->proc_cells, proc_link) { ret = afs_dynroot_mkdir(net, cell); if (ret < 0) goto error; } ret = 0; out: mutex_unlock(&net->proc_cells_lock); return ret; error: net->dynroot_sb = NULL; goto out; } /* * When a dynamic root that's in the process of being destroyed, depopulate it * of pinned directories. */ void afs_dynroot_depopulate(struct super_block *sb) { struct afs_net *net = afs_sb2net(sb); struct dentry *root = sb->s_root, *subdir, *tmp; /* Prevent more subdirs from being created */ mutex_lock(&net->proc_cells_lock); if (net->dynroot_sb == sb) net->dynroot_sb = NULL; mutex_unlock(&net->proc_cells_lock); if (root) { inode_lock(root->d_inode); /* Remove all the pins for dirs created for manually added cells */ list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) { if (subdir->d_fsdata) { subdir->d_fsdata = NULL; dput(subdir); } } inode_unlock(root->d_inode); } }
linux-master
fs/afs/dynroot.c
// SPDX-License-Identifier: GPL-2.0-or-later /* AFS client file system * * Copyright (C) 2002,5 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/completion.h> #include <linux/sched.h> #include <linux/random.h> #include <linux/proc_fs.h> #define CREATE_TRACE_POINTS #include "internal.h" MODULE_DESCRIPTION("AFS Client File System"); MODULE_AUTHOR("Red Hat, Inc."); MODULE_LICENSE("GPL"); unsigned afs_debug; module_param_named(debug, afs_debug, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(debug, "AFS debugging mask"); static char *rootcell; module_param(rootcell, charp, 0); MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list"); struct workqueue_struct *afs_wq; static struct proc_dir_entry *afs_proc_symlink; #if defined(CONFIG_ALPHA) const char afs_init_sysname[] = "alpha_linux26"; #elif defined(CONFIG_X86_64) const char afs_init_sysname[] = "amd64_linux26"; #elif defined(CONFIG_ARM) const char afs_init_sysname[] = "arm_linux26"; #elif defined(CONFIG_ARM64) const char afs_init_sysname[] = "aarch64_linux26"; #elif defined(CONFIG_X86_32) const char afs_init_sysname[] = "i386_linux26"; #elif defined(CONFIG_IA64) const char afs_init_sysname[] = "ia64_linux26"; #elif defined(CONFIG_PPC64) const char afs_init_sysname[] = "ppc64_linux26"; #elif defined(CONFIG_PPC32) const char afs_init_sysname[] = "ppc_linux26"; #elif defined(CONFIG_S390) #ifdef CONFIG_64BIT const char afs_init_sysname[] = "s390x_linux26"; #else const char afs_init_sysname[] = "s390_linux26"; #endif #elif defined(CONFIG_SPARC64) const char afs_init_sysname[] = "sparc64_linux26"; #elif defined(CONFIG_SPARC32) const char afs_init_sysname[] = "sparc_linux26"; #else const char afs_init_sysname[] = "unknown_linux26"; #endif /* * Initialise an AFS network namespace record. */ static int __net_init afs_net_init(struct net *net_ns) { struct afs_sysnames *sysnames; struct afs_net *net = afs_net(net_ns); int ret; net->net = net_ns; net->live = true; generate_random_uuid((unsigned char *)&net->uuid); INIT_WORK(&net->charge_preallocation_work, afs_charge_preallocation); mutex_init(&net->socket_mutex); net->cells = RB_ROOT; init_rwsem(&net->cells_lock); INIT_WORK(&net->cells_manager, afs_manage_cells); timer_setup(&net->cells_timer, afs_cells_timer, 0); mutex_init(&net->cells_alias_lock); mutex_init(&net->proc_cells_lock); INIT_HLIST_HEAD(&net->proc_cells); seqlock_init(&net->fs_lock); net->fs_servers = RB_ROOT; INIT_LIST_HEAD(&net->fs_probe_fast); INIT_LIST_HEAD(&net->fs_probe_slow); INIT_HLIST_HEAD(&net->fs_proc); INIT_HLIST_HEAD(&net->fs_addresses4); INIT_HLIST_HEAD(&net->fs_addresses6); seqlock_init(&net->fs_addr_lock); INIT_WORK(&net->fs_manager, afs_manage_servers); timer_setup(&net->fs_timer, afs_servers_timer, 0); INIT_WORK(&net->fs_prober, afs_fs_probe_dispatcher); timer_setup(&net->fs_probe_timer, afs_fs_probe_timer, 0); atomic_set(&net->servers_outstanding, 1); ret = -ENOMEM; sysnames = kzalloc(sizeof(*sysnames), GFP_KERNEL); if (!sysnames) goto error_sysnames; sysnames->subs[0] = (char *)&afs_init_sysname; sysnames->nr = 1; refcount_set(&sysnames->usage, 1); net->sysnames = sysnames; rwlock_init(&net->sysnames_lock); /* Register the /proc stuff */ ret = afs_proc_init(net); if (ret < 0) goto error_proc; /* Initialise the cell DB */ ret = afs_cell_init(net, rootcell); if (ret < 0) goto error_cell_init; /* Create the RxRPC transport */ ret = afs_open_socket(net); if (ret < 0) goto error_open_socket; return 0; error_open_socket: net->live = false; afs_fs_probe_cleanup(net); afs_cell_purge(net); afs_purge_servers(net); error_cell_init: net->live = false; afs_proc_cleanup(net); error_proc: afs_put_sysnames(net->sysnames); error_sysnames: net->live = false; return ret; } /* * Clean up and destroy an AFS network namespace record. */ static void __net_exit afs_net_exit(struct net *net_ns) { struct afs_net *net = afs_net(net_ns); net->live = false; afs_fs_probe_cleanup(net); afs_cell_purge(net); afs_purge_servers(net); afs_close_socket(net); afs_proc_cleanup(net); afs_put_sysnames(net->sysnames); } static struct pernet_operations afs_net_ops = { .init = afs_net_init, .exit = afs_net_exit, .id = &afs_net_id, .size = sizeof(struct afs_net), }; /* * initialise the AFS client FS module */ static int __init afs_init(void) { int ret = -ENOMEM; printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 registering.\n"); afs_wq = alloc_workqueue("afs", 0, 0); if (!afs_wq) goto error_afs_wq; afs_async_calls = alloc_workqueue("kafsd", WQ_MEM_RECLAIM, 0); if (!afs_async_calls) goto error_async; afs_lock_manager = alloc_workqueue("kafs_lockd", WQ_MEM_RECLAIM, 0); if (!afs_lock_manager) goto error_lockmgr; ret = register_pernet_device(&afs_net_ops); if (ret < 0) goto error_net; /* register the filesystems */ ret = afs_fs_init(); if (ret < 0) goto error_fs; afs_proc_symlink = proc_symlink("fs/afs", NULL, "../self/net/afs"); if (!afs_proc_symlink) { ret = -ENOMEM; goto error_proc; } return ret; error_proc: afs_fs_exit(); error_fs: unregister_pernet_device(&afs_net_ops); error_net: destroy_workqueue(afs_lock_manager); error_lockmgr: destroy_workqueue(afs_async_calls); error_async: destroy_workqueue(afs_wq); error_afs_wq: rcu_barrier(); printk(KERN_ERR "kAFS: failed to register: %d\n", ret); return ret; } /* XXX late_initcall is kludgy, but the only alternative seems to create * a transport upon the first mount, which is worse. Or is it? */ late_initcall(afs_init); /* must be called after net/ to create socket */ /* * clean up on module removal */ static void __exit afs_exit(void) { printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 unregistering.\n"); proc_remove(afs_proc_symlink); afs_fs_exit(); unregister_pernet_device(&afs_net_ops); destroy_workqueue(afs_lock_manager); destroy_workqueue(afs_async_calls); destroy_workqueue(afs_wq); afs_clean_up_permit_cache(); rcu_barrier(); } module_exit(afs_exit);
linux-master
fs/afs/main.c
// SPDX-License-Identifier: GPL-2.0-or-later /* AFS fileserver probing * * Copyright (C) 2018, 2020 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/sched.h> #include <linux/slab.h> #include "afs_fs.h" #include "internal.h" #include "protocol_afs.h" #include "protocol_yfs.h" static unsigned int afs_fs_probe_fast_poll_interval = 30 * HZ; static unsigned int afs_fs_probe_slow_poll_interval = 5 * 60 * HZ; /* * Start the probe polling timer. We have to supply it with an inc on the * outstanding server count. */ static void afs_schedule_fs_probe(struct afs_net *net, struct afs_server *server, bool fast) { unsigned long atj; if (!net->live) return; atj = server->probed_at; atj += fast ? afs_fs_probe_fast_poll_interval : afs_fs_probe_slow_poll_interval; afs_inc_servers_outstanding(net); if (timer_reduce(&net->fs_probe_timer, atj)) afs_dec_servers_outstanding(net); } /* * Handle the completion of a set of probes. */ static void afs_finished_fs_probe(struct afs_net *net, struct afs_server *server) { bool responded = server->probe.responded; write_seqlock(&net->fs_lock); if (responded) { list_add_tail(&server->probe_link, &net->fs_probe_slow); } else { server->rtt = UINT_MAX; clear_bit(AFS_SERVER_FL_RESPONDING, &server->flags); list_add_tail(&server->probe_link, &net->fs_probe_fast); } write_sequnlock(&net->fs_lock); afs_schedule_fs_probe(net, server, !responded); } /* * Handle the completion of a probe. */ static void afs_done_one_fs_probe(struct afs_net *net, struct afs_server *server) { _enter(""); if (atomic_dec_and_test(&server->probe_outstanding)) afs_finished_fs_probe(net, server); wake_up_all(&server->probe_wq); } /* * Handle inability to send a probe due to ENOMEM when trying to allocate a * call struct. */ static void afs_fs_probe_not_done(struct afs_net *net, struct afs_server *server, struct afs_addr_cursor *ac) { struct afs_addr_list *alist = ac->alist; unsigned int index = ac->index; _enter(""); trace_afs_io_error(0, -ENOMEM, afs_io_error_fs_probe_fail); spin_lock(&server->probe_lock); server->probe.local_failure = true; if (server->probe.error == 0) server->probe.error = -ENOMEM; set_bit(index, &alist->failed); spin_unlock(&server->probe_lock); return afs_done_one_fs_probe(net, server); } /* * Process the result of probing a fileserver. This is called after successful * or failed delivery of an FS.GetCapabilities operation. */ void afs_fileserver_probe_result(struct afs_call *call) { struct afs_addr_list *alist = call->alist; struct afs_server *server = call->server; unsigned int index = call->addr_ix; unsigned int rtt_us = 0, cap0; int ret = call->error; _enter("%pU,%u", &server->uuid, index); spin_lock(&server->probe_lock); switch (ret) { case 0: server->probe.error = 0; goto responded; case -ECONNABORTED: if (!server->probe.responded) { server->probe.abort_code = call->abort_code; server->probe.error = ret; } goto responded; case -ENOMEM: case -ENONET: clear_bit(index, &alist->responded); server->probe.local_failure = true; trace_afs_io_error(call->debug_id, ret, afs_io_error_fs_probe_fail); goto out; case -ECONNRESET: /* Responded, but call expired. */ case -ERFKILL: case -EADDRNOTAVAIL: case -ENETUNREACH: case -EHOSTUNREACH: case -EHOSTDOWN: case -ECONNREFUSED: case -ETIMEDOUT: case -ETIME: default: clear_bit(index, &alist->responded); set_bit(index, &alist->failed); if (!server->probe.responded && (server->probe.error == 0 || server->probe.error == -ETIMEDOUT || server->probe.error == -ETIME)) server->probe.error = ret; trace_afs_io_error(call->debug_id, ret, afs_io_error_fs_probe_fail); goto out; } responded: clear_bit(index, &alist->failed); if (call->service_id == YFS_FS_SERVICE) { server->probe.is_yfs = true; set_bit(AFS_SERVER_FL_IS_YFS, &server->flags); alist->addrs[index].srx_service = call->service_id; } else { server->probe.not_yfs = true; if (!server->probe.is_yfs) { clear_bit(AFS_SERVER_FL_IS_YFS, &server->flags); alist->addrs[index].srx_service = call->service_id; } cap0 = ntohl(call->tmp); if (cap0 & AFS3_VICED_CAPABILITY_64BITFILES) set_bit(AFS_SERVER_FL_HAS_FS64, &server->flags); else clear_bit(AFS_SERVER_FL_HAS_FS64, &server->flags); } rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us); if (rtt_us < server->probe.rtt) { server->probe.rtt = rtt_us; server->rtt = rtt_us; alist->preferred = index; } smp_wmb(); /* Set rtt before responded. */ server->probe.responded = true; set_bit(index, &alist->responded); set_bit(AFS_SERVER_FL_RESPONDING, &server->flags); out: spin_unlock(&server->probe_lock); _debug("probe %pU [%u] %pISpc rtt=%u ret=%d", &server->uuid, index, &alist->addrs[index].transport, rtt_us, ret); return afs_done_one_fs_probe(call->net, server); } /* * Probe one or all of a fileserver's addresses to find out the best route and * to query its capabilities. */ void afs_fs_probe_fileserver(struct afs_net *net, struct afs_server *server, struct key *key, bool all) { struct afs_addr_cursor ac = { .index = 0, }; _enter("%pU", &server->uuid); read_lock(&server->fs_lock); ac.alist = rcu_dereference_protected(server->addresses, lockdep_is_held(&server->fs_lock)); afs_get_addrlist(ac.alist); read_unlock(&server->fs_lock); server->probed_at = jiffies; atomic_set(&server->probe_outstanding, all ? ac.alist->nr_addrs : 1); memset(&server->probe, 0, sizeof(server->probe)); server->probe.rtt = UINT_MAX; ac.index = ac.alist->preferred; if (ac.index < 0 || ac.index >= ac.alist->nr_addrs) all = true; if (all) { for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++) if (!afs_fs_get_capabilities(net, server, &ac, key)) afs_fs_probe_not_done(net, server, &ac); } else { if (!afs_fs_get_capabilities(net, server, &ac, key)) afs_fs_probe_not_done(net, server, &ac); } afs_put_addrlist(ac.alist); } /* * Wait for the first as-yet untried fileserver to respond. */ int afs_wait_for_fs_probes(struct afs_server_list *slist, unsigned long untried) { struct wait_queue_entry *waits; struct afs_server *server; unsigned int rtt = UINT_MAX, rtt_s; bool have_responders = false; int pref = -1, i; _enter("%u,%lx", slist->nr_servers, untried); /* Only wait for servers that have a probe outstanding. */ for (i = 0; i < slist->nr_servers; i++) { if (test_bit(i, &untried)) { server = slist->servers[i].server; if (!atomic_read(&server->probe_outstanding)) __clear_bit(i, &untried); if (server->probe.responded) have_responders = true; } } if (have_responders || !untried) return 0; waits = kmalloc(array_size(slist->nr_servers, sizeof(*waits)), GFP_KERNEL); if (!waits) return -ENOMEM; for (i = 0; i < slist->nr_servers; i++) { if (test_bit(i, &untried)) { server = slist->servers[i].server; init_waitqueue_entry(&waits[i], current); add_wait_queue(&server->probe_wq, &waits[i]); } } for (;;) { bool still_probing = false; set_current_state(TASK_INTERRUPTIBLE); for (i = 0; i < slist->nr_servers; i++) { if (test_bit(i, &untried)) { server = slist->servers[i].server; if (server->probe.responded) goto stop; if (atomic_read(&server->probe_outstanding)) still_probing = true; } } if (!still_probing || signal_pending(current)) goto stop; schedule(); } stop: set_current_state(TASK_RUNNING); for (i = 0; i < slist->nr_servers; i++) { if (test_bit(i, &untried)) { server = slist->servers[i].server; rtt_s = READ_ONCE(server->rtt); if (test_bit(AFS_SERVER_FL_RESPONDING, &server->flags) && rtt_s < rtt) { pref = i; rtt = rtt_s; } remove_wait_queue(&server->probe_wq, &waits[i]); } } kfree(waits); if (pref == -1 && signal_pending(current)) return -ERESTARTSYS; if (pref >= 0) slist->preferred = pref; return 0; } /* * Probe timer. We have an increment on fs_outstanding that we need to pass * along to the work item. */ void afs_fs_probe_timer(struct timer_list *timer) { struct afs_net *net = container_of(timer, struct afs_net, fs_probe_timer); if (!net->live || !queue_work(afs_wq, &net->fs_prober)) afs_dec_servers_outstanding(net); } /* * Dispatch a probe to a server. */ static void afs_dispatch_fs_probe(struct afs_net *net, struct afs_server *server, bool all) __releases(&net->fs_lock) { struct key *key = NULL; /* We remove it from the queues here - it will be added back to * one of the queues on the completion of the probe. */ list_del_init(&server->probe_link); afs_get_server(server, afs_server_trace_get_probe); write_sequnlock(&net->fs_lock); afs_fs_probe_fileserver(net, server, key, all); afs_put_server(net, server, afs_server_trace_put_probe); } /* * Probe a server immediately without waiting for its due time to come * round. This is used when all of the addresses have been tried. */ void afs_probe_fileserver(struct afs_net *net, struct afs_server *server) { write_seqlock(&net->fs_lock); if (!list_empty(&server->probe_link)) return afs_dispatch_fs_probe(net, server, true); write_sequnlock(&net->fs_lock); } /* * Probe dispatcher to regularly dispatch probes to keep NAT alive. */ void afs_fs_probe_dispatcher(struct work_struct *work) { struct afs_net *net = container_of(work, struct afs_net, fs_prober); struct afs_server *fast, *slow, *server; unsigned long nowj, timer_at, poll_at; bool first_pass = true, set_timer = false; if (!net->live) { afs_dec_servers_outstanding(net); return; } _enter(""); if (list_empty(&net->fs_probe_fast) && list_empty(&net->fs_probe_slow)) { afs_dec_servers_outstanding(net); _leave(" [none]"); return; } again: write_seqlock(&net->fs_lock); fast = slow = server = NULL; nowj = jiffies; timer_at = nowj + MAX_JIFFY_OFFSET; if (!list_empty(&net->fs_probe_fast)) { fast = list_first_entry(&net->fs_probe_fast, struct afs_server, probe_link); poll_at = fast->probed_at + afs_fs_probe_fast_poll_interval; if (time_before(nowj, poll_at)) { timer_at = poll_at; set_timer = true; fast = NULL; } } if (!list_empty(&net->fs_probe_slow)) { slow = list_first_entry(&net->fs_probe_slow, struct afs_server, probe_link); poll_at = slow->probed_at + afs_fs_probe_slow_poll_interval; if (time_before(nowj, poll_at)) { if (time_before(poll_at, timer_at)) timer_at = poll_at; set_timer = true; slow = NULL; } } server = fast ?: slow; if (server) _debug("probe %pU", &server->uuid); if (server && (first_pass || !need_resched())) { afs_dispatch_fs_probe(net, server, server == fast); first_pass = false; goto again; } write_sequnlock(&net->fs_lock); if (server) { if (!queue_work(afs_wq, &net->fs_prober)) afs_dec_servers_outstanding(net); _leave(" [requeue]"); } else if (set_timer) { if (timer_reduce(&net->fs_probe_timer, timer_at)) afs_dec_servers_outstanding(net); _leave(" [timer]"); } else { afs_dec_servers_outstanding(net); _leave(" [quiesce]"); } } /* * Wait for a probe on a particular fileserver to complete for 2s. */ int afs_wait_for_one_fs_probe(struct afs_server *server, bool is_intr) { struct wait_queue_entry wait; unsigned long timo = 2 * HZ; if (atomic_read(&server->probe_outstanding) == 0) goto dont_wait; init_wait_entry(&wait, 0); for (;;) { prepare_to_wait_event(&server->probe_wq, &wait, is_intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); if (timo == 0 || server->probe.responded || atomic_read(&server->probe_outstanding) == 0 || (is_intr && signal_pending(current))) break; timo = schedule_timeout(timo); } finish_wait(&server->probe_wq, &wait); dont_wait: if (server->probe.responded) return 0; if (is_intr && signal_pending(current)) return -ERESTARTSYS; if (timo == 0) return -ETIME; return -EDESTADDRREQ; } /* * Clean up the probing when the namespace is killed off. */ void afs_fs_probe_cleanup(struct afs_net *net) { if (del_timer_sync(&net->fs_probe_timer)) afs_dec_servers_outstanding(net); }
linux-master
fs/afs/fs_probe.c
// SPDX-License-Identifier: GPL-2.0-or-later /* AFS silly rename handling * * Copyright (C) 2019 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) * - Derived from NFS's sillyrename. */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/fsnotify.h> #include "internal.h" static void afs_silly_rename_success(struct afs_operation *op) { _enter("op=%08x", op->debug_id); afs_check_dir_conflict(op, &op->file[0]); afs_vnode_commit_status(op, &op->file[0]); } static void afs_silly_rename_edit_dir(struct afs_operation *op) { struct afs_vnode_param *dvp = &op->file[0]; struct afs_vnode *dvnode = dvp->vnode; struct afs_vnode *vnode = AFS_FS_I(d_inode(op->dentry)); struct dentry *old = op->dentry; struct dentry *new = op->dentry_2; spin_lock(&old->d_lock); old->d_flags |= DCACHE_NFSFS_RENAMED; spin_unlock(&old->d_lock); if (dvnode->silly_key != op->key) { key_put(dvnode->silly_key); dvnode->silly_key = key_get(op->key); } down_write(&dvnode->validate_lock); if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && dvnode->status.data_version == dvp->dv_before + dvp->dv_delta) { afs_edit_dir_remove(dvnode, &old->d_name, afs_edit_dir_for_silly_0); afs_edit_dir_add(dvnode, &new->d_name, &vnode->fid, afs_edit_dir_for_silly_1); } up_write(&dvnode->validate_lock); } static const struct afs_operation_ops afs_silly_rename_operation = { .issue_afs_rpc = afs_fs_rename, .issue_yfs_rpc = yfs_fs_rename, .success = afs_silly_rename_success, .edit_dir = afs_silly_rename_edit_dir, }; /* * Actually perform the silly rename step. */ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode, struct dentry *old, struct dentry *new, struct key *key) { struct afs_operation *op; _enter("%pd,%pd", old, new); op = afs_alloc_operation(key, dvnode->volume); if (IS_ERR(op)) return PTR_ERR(op); afs_op_set_vnode(op, 0, dvnode); afs_op_set_vnode(op, 1, dvnode); op->file[0].dv_delta = 1; op->file[1].dv_delta = 1; op->file[0].modification = true; op->file[1].modification = true; op->file[0].update_ctime = true; op->file[1].update_ctime = true; op->dentry = old; op->dentry_2 = new; op->ops = &afs_silly_rename_operation; trace_afs_silly_rename(vnode, false); return afs_do_sync_operation(op); } /* * Perform silly-rename of a dentry. * * AFS is stateless and the server doesn't know when the client is holding a * file open. To prevent application problems when a file is unlinked while * it's still open, the client performs a "silly-rename". That is, it renames * the file to a hidden file in the same directory, and only performs the * unlink once the last reference to it is put. * * The final cleanup is done during dentry_iput. */ int afs_sillyrename(struct afs_vnode *dvnode, struct afs_vnode *vnode, struct dentry *dentry, struct key *key) { static unsigned int sillycounter; struct dentry *sdentry = NULL; unsigned char silly[16]; int ret = -EBUSY; _enter(""); /* We don't allow a dentry to be silly-renamed twice. */ if (dentry->d_flags & DCACHE_NFSFS_RENAMED) return -EBUSY; sdentry = NULL; do { int slen; dput(sdentry); sillycounter++; /* Create a silly name. Note that the ".__afs" prefix is * understood by the salvager and must not be changed. */ slen = scnprintf(silly, sizeof(silly), ".__afs%04X", sillycounter); sdentry = lookup_one_len(silly, dentry->d_parent, slen); /* N.B. Better to return EBUSY here ... it could be dangerous * to delete the file while it's in use. */ if (IS_ERR(sdentry)) goto out; } while (!d_is_negative(sdentry)); ihold(&vnode->netfs.inode); ret = afs_do_silly_rename(dvnode, vnode, dentry, sdentry, key); switch (ret) { case 0: /* The rename succeeded. */ set_bit(AFS_VNODE_SILLY_DELETED, &vnode->flags); d_move(dentry, sdentry); break; case -ERESTARTSYS: /* The result of the rename is unknown. Play it safe by forcing * a new lookup. */ d_drop(dentry); d_drop(sdentry); } iput(&vnode->netfs.inode); dput(sdentry); out: _leave(" = %d", ret); return ret; } static void afs_silly_unlink_success(struct afs_operation *op) { _enter("op=%08x", op->debug_id); afs_check_dir_conflict(op, &op->file[0]); afs_vnode_commit_status(op, &op->file[0]); afs_vnode_commit_status(op, &op->file[1]); afs_update_dentry_version(op, &op->file[0], op->dentry); } static void afs_silly_unlink_edit_dir(struct afs_operation *op) { struct afs_vnode_param *dvp = &op->file[0]; struct afs_vnode *dvnode = dvp->vnode; _enter("op=%08x", op->debug_id); down_write(&dvnode->validate_lock); if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && dvnode->status.data_version == dvp->dv_before + dvp->dv_delta) afs_edit_dir_remove(dvnode, &op->dentry->d_name, afs_edit_dir_for_unlink); up_write(&dvnode->validate_lock); } static const struct afs_operation_ops afs_silly_unlink_operation = { .issue_afs_rpc = afs_fs_remove_file, .issue_yfs_rpc = yfs_fs_remove_file, .success = afs_silly_unlink_success, .aborted = afs_check_for_remote_deletion, .edit_dir = afs_silly_unlink_edit_dir, }; /* * Tell the server to remove a sillyrename file. */ static int afs_do_silly_unlink(struct afs_vnode *dvnode, struct afs_vnode *vnode, struct dentry *dentry, struct key *key) { struct afs_operation *op; _enter(""); op = afs_alloc_operation(NULL, dvnode->volume); if (IS_ERR(op)) return PTR_ERR(op); afs_op_set_vnode(op, 0, dvnode); afs_op_set_vnode(op, 1, vnode); op->file[0].dv_delta = 1; op->file[0].modification = true; op->file[0].update_ctime = true; op->file[1].op_unlinked = true; op->file[1].update_ctime = true; op->dentry = dentry; op->ops = &afs_silly_unlink_operation; trace_afs_silly_rename(vnode, true); afs_begin_vnode_operation(op); afs_wait_for_operation(op); /* If there was a conflict with a third party, check the status of the * unlinked vnode. */ if (op->error == 0 && (op->flags & AFS_OPERATION_DIR_CONFLICT)) { op->file[1].update_ctime = false; op->fetch_status.which = 1; op->ops = &afs_fetch_status_operation; afs_begin_vnode_operation(op); afs_wait_for_operation(op); } return afs_put_operation(op); } /* * Remove sillyrename file on iput. */ int afs_silly_iput(struct dentry *dentry, struct inode *inode) { struct afs_vnode *dvnode = AFS_FS_I(d_inode(dentry->d_parent)); struct afs_vnode *vnode = AFS_FS_I(inode); struct dentry *alias; int ret; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); _enter("%p{%pd},%llx", dentry, dentry, vnode->fid.vnode); down_read(&dvnode->rmdir_lock); alias = d_alloc_parallel(dentry->d_parent, &dentry->d_name, &wq); if (IS_ERR(alias)) { up_read(&dvnode->rmdir_lock); return 0; } if (!d_in_lookup(alias)) { /* We raced with lookup... See if we need to transfer the * sillyrename information to the aliased dentry. */ ret = 0; spin_lock(&alias->d_lock); if (d_really_is_positive(alias) && !(alias->d_flags & DCACHE_NFSFS_RENAMED)) { alias->d_flags |= DCACHE_NFSFS_RENAMED; ret = 1; } spin_unlock(&alias->d_lock); up_read(&dvnode->rmdir_lock); dput(alias); return ret; } /* Stop lock-release from complaining. */ spin_lock(&vnode->lock); vnode->lock_state = AFS_VNODE_LOCK_DELETED; trace_afs_flock_ev(vnode, NULL, afs_flock_silly_delete, 0); spin_unlock(&vnode->lock); afs_do_silly_unlink(dvnode, vnode, dentry, dvnode->silly_key); up_read(&dvnode->rmdir_lock); d_lookup_done(alias); dput(alias); return 1; }
linux-master
fs/afs/dir_silly.c
// SPDX-License-Identifier: GPL-2.0-or-later /* mountpoint management * * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/gfp.h> #include <linux/fs_context.h> #include "internal.h" static struct dentry *afs_mntpt_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags); static int afs_mntpt_open(struct inode *inode, struct file *file); static void afs_mntpt_expiry_timed_out(struct work_struct *work); const struct file_operations afs_mntpt_file_operations = { .open = afs_mntpt_open, .llseek = noop_llseek, }; const struct inode_operations afs_mntpt_inode_operations = { .lookup = afs_mntpt_lookup, .readlink = page_readlink, .getattr = afs_getattr, }; const struct inode_operations afs_autocell_inode_operations = { .getattr = afs_getattr, }; static LIST_HEAD(afs_vfsmounts); static DECLARE_DELAYED_WORK(afs_mntpt_expiry_timer, afs_mntpt_expiry_timed_out); static unsigned long afs_mntpt_expiry_timeout = 10 * 60; static const char afs_root_volume[] = "root.cell"; /* * no valid lookup procedure on this sort of dir */ static struct dentry *afs_mntpt_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { _enter("%p,%p{%pd2}", dir, dentry, dentry); return ERR_PTR(-EREMOTE); } /* * no valid open procedure on this sort of dir */ static int afs_mntpt_open(struct inode *inode, struct file *file) { _enter("%p,%p{%pD2}", inode, file, file); return -EREMOTE; } /* * Set the parameters for the proposed superblock. */ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt) { struct afs_fs_context *ctx = fc->fs_private; struct afs_super_info *src_as = AFS_FS_S(mntpt->d_sb); struct afs_vnode *vnode = AFS_FS_I(d_inode(mntpt)); struct afs_cell *cell; const char *p; int ret; if (fc->net_ns != src_as->net_ns) { put_net(fc->net_ns); fc->net_ns = get_net(src_as->net_ns); } if (src_as->volume && src_as->volume->type == AFSVL_RWVOL) { ctx->type = AFSVL_RWVOL; ctx->force = true; } if (ctx->cell) { afs_unuse_cell(ctx->net, ctx->cell, afs_cell_trace_unuse_mntpt); ctx->cell = NULL; } if (test_bit(AFS_VNODE_PSEUDODIR, &vnode->flags)) { /* if the directory is a pseudo directory, use the d_name */ unsigned size = mntpt->d_name.len; if (size < 2) return -ENOENT; p = mntpt->d_name.name; if (mntpt->d_name.name[0] == '.') { size--; p++; ctx->type = AFSVL_RWVOL; ctx->force = true; } if (size > AFS_MAXCELLNAME) return -ENAMETOOLONG; cell = afs_lookup_cell(ctx->net, p, size, NULL, false); if (IS_ERR(cell)) { pr_err("kAFS: unable to lookup cell '%pd'\n", mntpt); return PTR_ERR(cell); } ctx->cell = cell; ctx->volname = afs_root_volume; ctx->volnamesz = sizeof(afs_root_volume) - 1; } else { /* read the contents of the AFS special symlink */ struct page *page; loff_t size = i_size_read(d_inode(mntpt)); char *buf; if (src_as->cell) ctx->cell = afs_use_cell(src_as->cell, afs_cell_trace_use_mntpt); if (size < 2 || size > PAGE_SIZE - 1) return -EINVAL; page = read_mapping_page(d_inode(mntpt)->i_mapping, 0, NULL); if (IS_ERR(page)) return PTR_ERR(page); buf = kmap(page); ret = -EINVAL; if (buf[size - 1] == '.') ret = vfs_parse_fs_string(fc, "source", buf, size - 1); kunmap(page); put_page(page); if (ret < 0) return ret; } return 0; } /* * create a vfsmount to be automounted */ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt) { struct fs_context *fc; struct vfsmount *mnt; int ret; BUG_ON(!d_inode(mntpt)); fc = fs_context_for_submount(&afs_fs_type, mntpt); if (IS_ERR(fc)) return ERR_CAST(fc); ret = afs_mntpt_set_params(fc, mntpt); if (!ret) mnt = fc_mount(fc); else mnt = ERR_PTR(ret); put_fs_context(fc); return mnt; } /* * handle an automount point */ struct vfsmount *afs_d_automount(struct path *path) { struct vfsmount *newmnt; _enter("{%pd}", path->dentry); newmnt = afs_mntpt_do_automount(path->dentry); if (IS_ERR(newmnt)) return newmnt; mntget(newmnt); /* prevent immediate expiration */ mnt_set_expiry(newmnt, &afs_vfsmounts); queue_delayed_work(afs_wq, &afs_mntpt_expiry_timer, afs_mntpt_expiry_timeout * HZ); _leave(" = %p", newmnt); return newmnt; } /* * handle mountpoint expiry timer going off */ static void afs_mntpt_expiry_timed_out(struct work_struct *work) { _enter(""); if (!list_empty(&afs_vfsmounts)) { mark_mounts_for_expiry(&afs_vfsmounts); queue_delayed_work(afs_wq, &afs_mntpt_expiry_timer, afs_mntpt_expiry_timeout * HZ); } _leave(""); } /* * kill the AFS mountpoint timer if it's still running */ void afs_mntpt_kill_timer(void) { _enter(""); ASSERT(list_empty(&afs_vfsmounts)); cancel_delayed_work_sync(&afs_mntpt_expiry_timer); }
linux-master
fs/afs/mntpt.c
// SPDX-License-Identifier: GPL-2.0-or-later /* AFS Volume Location Service client * * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/gfp.h> #include <linux/init.h> #include <linux/sched.h> #include "afs_fs.h" #include "internal.h" /* * Deliver reply data to a VL.GetEntryByNameU call. */ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call) { struct afs_uvldbentry__xdr *uvldb; struct afs_vldb_entry *entry; bool new_only = false; u32 tmp, nr_servers, vlflags; int i, ret; _enter(""); ret = afs_transfer_reply(call); if (ret < 0) return ret; /* unmarshall the reply once we've received all of it */ uvldb = call->buffer; entry = call->ret_vldb; nr_servers = ntohl(uvldb->nServers); if (nr_servers > AFS_NMAXNSERVERS) nr_servers = AFS_NMAXNSERVERS; for (i = 0; i < ARRAY_SIZE(uvldb->name) - 1; i++) entry->name[i] = (u8)ntohl(uvldb->name[i]); entry->name[i] = 0; entry->name_len = strlen(entry->name); /* If there is a new replication site that we can use, ignore all the * sites that aren't marked as new. */ for (i = 0; i < nr_servers; i++) { tmp = ntohl(uvldb->serverFlags[i]); if (!(tmp & AFS_VLSF_DONTUSE) && (tmp & AFS_VLSF_NEWREPSITE)) new_only = true; } vlflags = ntohl(uvldb->flags); for (i = 0; i < nr_servers; i++) { struct afs_uuid__xdr *xdr; struct afs_uuid *uuid; int j; int n = entry->nr_servers; tmp = ntohl(uvldb->serverFlags[i]); if (tmp & AFS_VLSF_DONTUSE || (new_only && !(tmp & AFS_VLSF_NEWREPSITE))) continue; if (tmp & AFS_VLSF_RWVOL) { entry->fs_mask[n] |= AFS_VOL_VTM_RW; if (vlflags & AFS_VLF_BACKEXISTS) entry->fs_mask[n] |= AFS_VOL_VTM_BAK; } if (tmp & AFS_VLSF_ROVOL) entry->fs_mask[n] |= AFS_VOL_VTM_RO; if (!entry->fs_mask[n]) continue; xdr = &uvldb->serverNumber[i]; uuid = (struct afs_uuid *)&entry->fs_server[n]; uuid->time_low = xdr->time_low; uuid->time_mid = htons(ntohl(xdr->time_mid)); uuid->time_hi_and_version = htons(ntohl(xdr->time_hi_and_version)); uuid->clock_seq_hi_and_reserved = (u8)ntohl(xdr->clock_seq_hi_and_reserved); uuid->clock_seq_low = (u8)ntohl(xdr->clock_seq_low); for (j = 0; j < 6; j++) uuid->node[j] = (u8)ntohl(xdr->node[j]); entry->addr_version[n] = ntohl(uvldb->serverUnique[i]); entry->nr_servers++; } for (i = 0; i < AFS_MAXTYPES; i++) entry->vid[i] = ntohl(uvldb->volumeId[i]); if (vlflags & AFS_VLF_RWEXISTS) __set_bit(AFS_VLDB_HAS_RW, &entry->flags); if (vlflags & AFS_VLF_ROEXISTS) __set_bit(AFS_VLDB_HAS_RO, &entry->flags); if (vlflags & AFS_VLF_BACKEXISTS) __set_bit(AFS_VLDB_HAS_BAK, &entry->flags); if (!(vlflags & (AFS_VLF_RWEXISTS | AFS_VLF_ROEXISTS | AFS_VLF_BACKEXISTS))) { entry->error = -ENOMEDIUM; __set_bit(AFS_VLDB_QUERY_ERROR, &entry->flags); } __set_bit(AFS_VLDB_QUERY_VALID, &entry->flags); _leave(" = 0 [done]"); return 0; } static void afs_destroy_vl_get_entry_by_name_u(struct afs_call *call) { kfree(call->ret_vldb); afs_flat_call_destructor(call); } /* * VL.GetEntryByNameU operation type. */ static const struct afs_call_type afs_RXVLGetEntryByNameU = { .name = "VL.GetEntryByNameU", .op = afs_VL_GetEntryByNameU, .deliver = afs_deliver_vl_get_entry_by_name_u, .destructor = afs_destroy_vl_get_entry_by_name_u, }; /* * Dispatch a get volume entry by name or ID operation (uuid variant). If the * volname is a decimal number then it's a volume ID not a volume name. */ struct afs_vldb_entry *afs_vl_get_entry_by_name_u(struct afs_vl_cursor *vc, const char *volname, int volnamesz) { struct afs_vldb_entry *entry; struct afs_call *call; struct afs_net *net = vc->cell->net; size_t reqsz, padsz; __be32 *bp; _enter(""); padsz = (4 - (volnamesz & 3)) & 3; reqsz = 8 + volnamesz + padsz; entry = kzalloc(sizeof(struct afs_vldb_entry), GFP_KERNEL); if (!entry) return ERR_PTR(-ENOMEM); call = afs_alloc_flat_call(net, &afs_RXVLGetEntryByNameU, reqsz, sizeof(struct afs_uvldbentry__xdr)); if (!call) { kfree(entry); return ERR_PTR(-ENOMEM); } call->key = vc->key; call->ret_vldb = entry; call->max_lifespan = AFS_VL_MAX_LIFESPAN; /* Marshall the parameters */ bp = call->request; *bp++ = htonl(VLGETENTRYBYNAMEU); *bp++ = htonl(volnamesz); memcpy(bp, volname, volnamesz); if (padsz > 0) memset((void *)bp + volnamesz, 0, padsz); trace_afs_make_vl_call(call); afs_make_call(&vc->ac, call, GFP_KERNEL); return (struct afs_vldb_entry *)afs_wait_for_call_to_complete(call, &vc->ac); } /* * Deliver reply data to a VL.GetAddrsU call. * * GetAddrsU(IN ListAddrByAttributes *inaddr, * OUT afsUUID *uuidp1, * OUT uint32_t *uniquifier, * OUT uint32_t *nentries, * OUT bulkaddrs *blkaddrs); */ static int afs_deliver_vl_get_addrs_u(struct afs_call *call) { struct afs_addr_list *alist; __be32 *bp; u32 uniquifier, nentries, count; int i, ret; _enter("{%u,%zu/%u}", call->unmarshall, iov_iter_count(call->iter), call->count); switch (call->unmarshall) { case 0: afs_extract_to_buf(call, sizeof(struct afs_uuid__xdr) + 3 * sizeof(__be32)); call->unmarshall++; /* Extract the returned uuid, uniquifier, nentries and * blkaddrs size */ fallthrough; case 1: ret = afs_extract_data(call, true); if (ret < 0) return ret; bp = call->buffer + sizeof(struct afs_uuid__xdr); uniquifier = ntohl(*bp++); nentries = ntohl(*bp++); count = ntohl(*bp); nentries = min(nentries, count); alist = afs_alloc_addrlist(nentries, FS_SERVICE, AFS_FS_PORT); if (!alist) return -ENOMEM; alist->version = uniquifier; call->ret_alist = alist; call->count = count; call->count2 = nentries; call->unmarshall++; more_entries: count = min(call->count, 4U); afs_extract_to_buf(call, count * sizeof(__be32)); fallthrough; /* and extract entries */ case 2: ret = afs_extract_data(call, call->count > 4); if (ret < 0) return ret; alist = call->ret_alist; bp = call->buffer; count = min(call->count, 4U); for (i = 0; i < count; i++) if (alist->nr_addrs < call->count2) afs_merge_fs_addr4(alist, *bp++, AFS_FS_PORT); call->count -= count; if (call->count > 0) goto more_entries; call->unmarshall++; break; } _leave(" = 0 [done]"); return 0; } static void afs_vl_get_addrs_u_destructor(struct afs_call *call) { afs_put_addrlist(call->ret_alist); return afs_flat_call_destructor(call); } /* * VL.GetAddrsU operation type. */ static const struct afs_call_type afs_RXVLGetAddrsU = { .name = "VL.GetAddrsU", .op = afs_VL_GetAddrsU, .deliver = afs_deliver_vl_get_addrs_u, .destructor = afs_vl_get_addrs_u_destructor, }; /* * Dispatch an operation to get the addresses for a server, where the server is * nominated by UUID. */ struct afs_addr_list *afs_vl_get_addrs_u(struct afs_vl_cursor *vc, const uuid_t *uuid) { struct afs_ListAddrByAttributes__xdr *r; const struct afs_uuid *u = (const struct afs_uuid *)uuid; struct afs_call *call; struct afs_net *net = vc->cell->net; __be32 *bp; int i; _enter(""); call = afs_alloc_flat_call(net, &afs_RXVLGetAddrsU, sizeof(__be32) + sizeof(struct afs_ListAddrByAttributes__xdr), sizeof(struct afs_uuid__xdr) + 3 * sizeof(__be32)); if (!call) return ERR_PTR(-ENOMEM); call->key = vc->key; call->ret_alist = NULL; call->max_lifespan = AFS_VL_MAX_LIFESPAN; /* Marshall the parameters */ bp = call->request; *bp++ = htonl(VLGETADDRSU); r = (struct afs_ListAddrByAttributes__xdr *)bp; r->Mask = htonl(AFS_VLADDR_UUID); r->ipaddr = 0; r->index = 0; r->spare = 0; r->uuid.time_low = u->time_low; r->uuid.time_mid = htonl(ntohs(u->time_mid)); r->uuid.time_hi_and_version = htonl(ntohs(u->time_hi_and_version)); r->uuid.clock_seq_hi_and_reserved = htonl(u->clock_seq_hi_and_reserved); r->uuid.clock_seq_low = htonl(u->clock_seq_low); for (i = 0; i < 6; i++) r->uuid.node[i] = htonl(u->node[i]); trace_afs_make_vl_call(call); afs_make_call(&vc->ac, call, GFP_KERNEL); return (struct afs_addr_list *)afs_wait_for_call_to_complete(call, &vc->ac); } /* * Deliver reply data to an VL.GetCapabilities operation. */ static int afs_deliver_vl_get_capabilities(struct afs_call *call) { u32 count; int ret; _enter("{%u,%zu/%u}", call->unmarshall, iov_iter_count(call->iter), call->count); switch (call->unmarshall) { case 0: afs_extract_to_tmp(call); call->unmarshall++; fallthrough; /* and extract the capabilities word count */ case 1: ret = afs_extract_data(call, true); if (ret < 0) return ret; count = ntohl(call->tmp); call->count = count; call->count2 = count; call->unmarshall++; afs_extract_discard(call, count * sizeof(__be32)); fallthrough; /* and extract capabilities words */ case 2: ret = afs_extract_data(call, false); if (ret < 0) return ret; /* TODO: Examine capabilities */ call->unmarshall++; break; } _leave(" = 0 [done]"); return 0; } static void afs_destroy_vl_get_capabilities(struct afs_call *call) { afs_put_vlserver(call->net, call->vlserver); afs_flat_call_destructor(call); } /* * VL.GetCapabilities operation type */ static const struct afs_call_type afs_RXVLGetCapabilities = { .name = "VL.GetCapabilities", .op = afs_VL_GetCapabilities, .deliver = afs_deliver_vl_get_capabilities, .done = afs_vlserver_probe_result, .destructor = afs_destroy_vl_get_capabilities, }; /* * Probe a volume server for the capabilities that it supports. This can * return up to 196 words. * * We use this to probe for service upgrade to determine what the server at the * other end supports. */ struct afs_call *afs_vl_get_capabilities(struct afs_net *net, struct afs_addr_cursor *ac, struct key *key, struct afs_vlserver *server, unsigned int server_index) { struct afs_call *call; __be32 *bp; _enter(""); call = afs_alloc_flat_call(net, &afs_RXVLGetCapabilities, 1 * 4, 16 * 4); if (!call) return ERR_PTR(-ENOMEM); call->key = key; call->vlserver = afs_get_vlserver(server); call->server_index = server_index; call->upgrade = true; call->async = true; call->max_lifespan = AFS_PROBE_MAX_LIFESPAN; /* marshall the parameters */ bp = call->request; *bp++ = htonl(VLGETCAPABILITIES); /* Can't take a ref on server */ trace_afs_make_vl_call(call); afs_make_call(ac, call, GFP_KERNEL); return call; } /* * Deliver reply data to a YFSVL.GetEndpoints call. * * GetEndpoints(IN yfsServerAttributes *attr, * OUT opr_uuid *uuid, * OUT afs_int32 *uniquifier, * OUT endpoints *fsEndpoints, * OUT endpoints *volEndpoints) */ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call) { struct afs_addr_list *alist; __be32 *bp; u32 uniquifier, size; int ret; _enter("{%u,%zu,%u}", call->unmarshall, iov_iter_count(call->iter), call->count2); switch (call->unmarshall) { case 0: afs_extract_to_buf(call, sizeof(uuid_t) + 3 * sizeof(__be32)); call->unmarshall = 1; /* Extract the returned uuid, uniquifier, fsEndpoints count and * either the first fsEndpoint type or the volEndpoints * count if there are no fsEndpoints. */ fallthrough; case 1: ret = afs_extract_data(call, true); if (ret < 0) return ret; bp = call->buffer + sizeof(uuid_t); uniquifier = ntohl(*bp++); call->count = ntohl(*bp++); call->count2 = ntohl(*bp); /* Type or next count */ if (call->count > YFS_MAXENDPOINTS) return afs_protocol_error(call, afs_eproto_yvl_fsendpt_num); alist = afs_alloc_addrlist(call->count, FS_SERVICE, AFS_FS_PORT); if (!alist) return -ENOMEM; alist->version = uniquifier; call->ret_alist = alist; if (call->count == 0) goto extract_volendpoints; next_fsendpoint: switch (call->count2) { case YFS_ENDPOINT_IPV4: size = sizeof(__be32) * (1 + 1 + 1); break; case YFS_ENDPOINT_IPV6: size = sizeof(__be32) * (1 + 4 + 1); break; default: return afs_protocol_error(call, afs_eproto_yvl_fsendpt_type); } size += sizeof(__be32); afs_extract_to_buf(call, size); call->unmarshall = 2; fallthrough; /* and extract fsEndpoints[] entries */ case 2: ret = afs_extract_data(call, true); if (ret < 0) return ret; alist = call->ret_alist; bp = call->buffer; switch (call->count2) { case YFS_ENDPOINT_IPV4: if (ntohl(bp[0]) != sizeof(__be32) * 2) return afs_protocol_error( call, afs_eproto_yvl_fsendpt4_len); afs_merge_fs_addr4(alist, bp[1], ntohl(bp[2])); bp += 3; break; case YFS_ENDPOINT_IPV6: if (ntohl(bp[0]) != sizeof(__be32) * 5) return afs_protocol_error( call, afs_eproto_yvl_fsendpt6_len); afs_merge_fs_addr6(alist, bp + 1, ntohl(bp[5])); bp += 6; break; default: return afs_protocol_error(call, afs_eproto_yvl_fsendpt_type); } /* Got either the type of the next entry or the count of * volEndpoints if no more fsEndpoints. */ call->count2 = ntohl(*bp++); call->count--; if (call->count > 0) goto next_fsendpoint; extract_volendpoints: /* Extract the list of volEndpoints. */ call->count = call->count2; if (!call->count) goto end; if (call->count > YFS_MAXENDPOINTS) return afs_protocol_error(call, afs_eproto_yvl_vlendpt_type); afs_extract_to_buf(call, 1 * sizeof(__be32)); call->unmarshall = 3; /* Extract the type of volEndpoints[0]. Normally we would * extract the type of the next endpoint when we extract the * data of the current one, but this is the first... */ fallthrough; case 3: ret = afs_extract_data(call, true); if (ret < 0) return ret; bp = call->buffer; next_volendpoint: call->count2 = ntohl(*bp++); switch (call->count2) { case YFS_ENDPOINT_IPV4: size = sizeof(__be32) * (1 + 1 + 1); break; case YFS_ENDPOINT_IPV6: size = sizeof(__be32) * (1 + 4 + 1); break; default: return afs_protocol_error(call, afs_eproto_yvl_vlendpt_type); } if (call->count > 1) size += sizeof(__be32); /* Get next type too */ afs_extract_to_buf(call, size); call->unmarshall = 4; fallthrough; /* and extract volEndpoints[] entries */ case 4: ret = afs_extract_data(call, true); if (ret < 0) return ret; bp = call->buffer; switch (call->count2) { case YFS_ENDPOINT_IPV4: if (ntohl(bp[0]) != sizeof(__be32) * 2) return afs_protocol_error( call, afs_eproto_yvl_vlendpt4_len); bp += 3; break; case YFS_ENDPOINT_IPV6: if (ntohl(bp[0]) != sizeof(__be32) * 5) return afs_protocol_error( call, afs_eproto_yvl_vlendpt6_len); bp += 6; break; default: return afs_protocol_error(call, afs_eproto_yvl_vlendpt_type); } /* Got either the type of the next entry or the count of * volEndpoints if no more fsEndpoints. */ call->count--; if (call->count > 0) goto next_volendpoint; end: afs_extract_discard(call, 0); call->unmarshall = 5; fallthrough; /* Done */ case 5: ret = afs_extract_data(call, false); if (ret < 0) return ret; call->unmarshall = 6; fallthrough; case 6: break; } _leave(" = 0 [done]"); return 0; } /* * YFSVL.GetEndpoints operation type. */ static const struct afs_call_type afs_YFSVLGetEndpoints = { .name = "YFSVL.GetEndpoints", .op = afs_YFSVL_GetEndpoints, .deliver = afs_deliver_yfsvl_get_endpoints, .destructor = afs_vl_get_addrs_u_destructor, }; /* * Dispatch an operation to get the addresses for a server, where the server is * nominated by UUID. */ struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_vl_cursor *vc, const uuid_t *uuid) { struct afs_call *call; struct afs_net *net = vc->cell->net; __be32 *bp; _enter(""); call = afs_alloc_flat_call(net, &afs_YFSVLGetEndpoints, sizeof(__be32) * 2 + sizeof(*uuid), sizeof(struct in6_addr) + sizeof(__be32) * 3); if (!call) return ERR_PTR(-ENOMEM); call->key = vc->key; call->ret_alist = NULL; call->max_lifespan = AFS_VL_MAX_LIFESPAN; /* Marshall the parameters */ bp = call->request; *bp++ = htonl(YVLGETENDPOINTS); *bp++ = htonl(YFS_SERVER_UUID); memcpy(bp, uuid, sizeof(*uuid)); /* Type opr_uuid */ trace_afs_make_vl_call(call); afs_make_call(&vc->ac, call, GFP_KERNEL); return (struct afs_addr_list *)afs_wait_for_call_to_complete(call, &vc->ac); } /* * Deliver reply data to a YFSVL.GetCellName operation. */ static int afs_deliver_yfsvl_get_cell_name(struct afs_call *call) { char *cell_name; u32 namesz, paddedsz; int ret; _enter("{%u,%zu/%u}", call->unmarshall, iov_iter_count(call->iter), call->count); switch (call->unmarshall) { case 0: afs_extract_to_tmp(call); call->unmarshall++; fallthrough; /* and extract the cell name length */ case 1: ret = afs_extract_data(call, true); if (ret < 0) return ret; namesz = ntohl(call->tmp); if (namesz > AFS_MAXCELLNAME) return afs_protocol_error(call, afs_eproto_cellname_len); paddedsz = (namesz + 3) & ~3; call->count = namesz; call->count2 = paddedsz - namesz; cell_name = kmalloc(namesz + 1, GFP_KERNEL); if (!cell_name) return -ENOMEM; cell_name[namesz] = 0; call->ret_str = cell_name; afs_extract_begin(call, cell_name, namesz); call->unmarshall++; fallthrough; /* and extract cell name */ case 2: ret = afs_extract_data(call, true); if (ret < 0) return ret; afs_extract_discard(call, call->count2); call->unmarshall++; fallthrough; /* and extract padding */ case 3: ret = afs_extract_data(call, false); if (ret < 0) return ret; call->unmarshall++; break; } _leave(" = 0 [done]"); return 0; } static void afs_destroy_yfsvl_get_cell_name(struct afs_call *call) { kfree(call->ret_str); afs_flat_call_destructor(call); } /* * VL.GetCapabilities operation type */ static const struct afs_call_type afs_YFSVLGetCellName = { .name = "YFSVL.GetCellName", .op = afs_YFSVL_GetCellName, .deliver = afs_deliver_yfsvl_get_cell_name, .destructor = afs_destroy_yfsvl_get_cell_name, }; /* * Probe a volume server for the capabilities that it supports. This can * return up to 196 words. * * We use this to probe for service upgrade to determine what the server at the * other end supports. */ char *afs_yfsvl_get_cell_name(struct afs_vl_cursor *vc) { struct afs_call *call; struct afs_net *net = vc->cell->net; __be32 *bp; _enter(""); call = afs_alloc_flat_call(net, &afs_YFSVLGetCellName, 1 * 4, 0); if (!call) return ERR_PTR(-ENOMEM); call->key = vc->key; call->ret_str = NULL; call->max_lifespan = AFS_VL_MAX_LIFESPAN; /* marshall the parameters */ bp = call->request; *bp++ = htonl(YVLGETCELLNAME); /* Can't take a ref on server */ trace_afs_make_vl_call(call); afs_make_call(&vc->ac, call, GFP_KERNEL); return (char *)afs_wait_for_call_to_complete(call, &vc->ac); }
linux-master
fs/afs/vlclient.c
// SPDX-License-Identifier: GPL-2.0-or-later /* /proc interface for AFS * * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/slab.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/sched.h> #include <linux/uaccess.h> #include "internal.h" struct afs_vl_seq_net_private { struct seq_net_private seq; /* Must be first */ struct afs_vlserver_list *vllist; }; static inline struct afs_net *afs_seq2net(struct seq_file *m) { return afs_net(seq_file_net(m)); } static inline struct afs_net *afs_seq2net_single(struct seq_file *m) { return afs_net(seq_file_single_net(m)); } /* * Display the list of cells known to the namespace. */ static int afs_proc_cells_show(struct seq_file *m, void *v) { struct afs_vlserver_list *vllist; struct afs_cell *cell; if (v == SEQ_START_TOKEN) { /* display header on line 1 */ seq_puts(m, "USE ACT TTL SV ST NAME\n"); return 0; } cell = list_entry(v, struct afs_cell, proc_link); vllist = rcu_dereference(cell->vl_servers); /* display one cell per line on subsequent lines */ seq_printf(m, "%3u %3u %6lld %2u %2u %s\n", refcount_read(&cell->ref), atomic_read(&cell->active), cell->dns_expiry - ktime_get_real_seconds(), vllist ? vllist->nr_servers : 0, cell->state, cell->name); return 0; } static void *afs_proc_cells_start(struct seq_file *m, loff_t *_pos) __acquires(rcu) { rcu_read_lock(); return seq_hlist_start_head_rcu(&afs_seq2net(m)->proc_cells, *_pos); } static void *afs_proc_cells_next(struct seq_file *m, void *v, loff_t *pos) { return seq_hlist_next_rcu(v, &afs_seq2net(m)->proc_cells, pos); } static void afs_proc_cells_stop(struct seq_file *m, void *v) __releases(rcu) { rcu_read_unlock(); } static const struct seq_operations afs_proc_cells_ops = { .start = afs_proc_cells_start, .next = afs_proc_cells_next, .stop = afs_proc_cells_stop, .show = afs_proc_cells_show, }; /* * handle writes to /proc/fs/afs/cells * - to add cells: echo "add <cellname> <IP>[:<IP>][:<IP>]" */ static int afs_proc_cells_write(struct file *file, char *buf, size_t size) { struct seq_file *m = file->private_data; struct afs_net *net = afs_seq2net(m); char *name, *args; int ret; /* trim to first NL */ name = memchr(buf, '\n', size); if (name) *name = 0; /* split into command, name and argslist */ name = strchr(buf, ' '); if (!name) goto inval; do { *name++ = 0; } while(*name == ' '); if (!*name) goto inval; args = strchr(name, ' '); if (args) { do { *args++ = 0; } while(*args == ' '); if (!*args) goto inval; } /* determine command to perform */ _debug("cmd=%s name=%s args=%s", buf, name, args); if (strcmp(buf, "add") == 0) { struct afs_cell *cell; cell = afs_lookup_cell(net, name, strlen(name), args, true); if (IS_ERR(cell)) { ret = PTR_ERR(cell); goto done; } if (test_and_set_bit(AFS_CELL_FL_NO_GC, &cell->flags)) afs_unuse_cell(net, cell, afs_cell_trace_unuse_no_pin); } else { goto inval; } ret = 0; done: _leave(" = %d", ret); return ret; inval: ret = -EINVAL; printk("kAFS: Invalid Command on /proc/fs/afs/cells file\n"); goto done; } /* * Display the name of the current workstation cell. */ static int afs_proc_rootcell_show(struct seq_file *m, void *v) { struct afs_cell *cell; struct afs_net *net; net = afs_seq2net_single(m); down_read(&net->cells_lock); cell = net->ws_cell; if (cell) seq_printf(m, "%s\n", cell->name); up_read(&net->cells_lock); return 0; } /* * Set the current workstation cell and optionally supply its list of volume * location servers. * * echo "cell.name:192.168.231.14" >/proc/fs/afs/rootcell */ static int afs_proc_rootcell_write(struct file *file, char *buf, size_t size) { struct seq_file *m = file->private_data; struct afs_net *net = afs_seq2net_single(m); char *s; int ret; ret = -EINVAL; if (buf[0] == '.') goto out; if (memchr(buf, '/', size)) goto out; /* trim to first NL */ s = memchr(buf, '\n', size); if (s) *s = 0; /* determine command to perform */ _debug("rootcell=%s", buf); ret = afs_cell_init(net, buf); out: _leave(" = %d", ret); return ret; } static const char afs_vol_types[3][3] = { [AFSVL_RWVOL] = "RW", [AFSVL_ROVOL] = "RO", [AFSVL_BACKVOL] = "BK", }; /* * Display the list of volumes known to a cell. */ static int afs_proc_cell_volumes_show(struct seq_file *m, void *v) { struct afs_volume *vol = hlist_entry(v, struct afs_volume, proc_link); /* Display header on line 1 */ if (v == SEQ_START_TOKEN) { seq_puts(m, "USE VID TY NAME\n"); return 0; } seq_printf(m, "%3d %08llx %s %s\n", refcount_read(&vol->ref), vol->vid, afs_vol_types[vol->type], vol->name); return 0; } static void *afs_proc_cell_volumes_start(struct seq_file *m, loff_t *_pos) __acquires(cell->proc_lock) { struct afs_cell *cell = pde_data(file_inode(m->file)); rcu_read_lock(); return seq_hlist_start_head_rcu(&cell->proc_volumes, *_pos); } static void *afs_proc_cell_volumes_next(struct seq_file *m, void *v, loff_t *_pos) { struct afs_cell *cell = pde_data(file_inode(m->file)); return seq_hlist_next_rcu(v, &cell->proc_volumes, _pos); } static void afs_proc_cell_volumes_stop(struct seq_file *m, void *v) __releases(cell->proc_lock) { rcu_read_unlock(); } static const struct seq_operations afs_proc_cell_volumes_ops = { .start = afs_proc_cell_volumes_start, .next = afs_proc_cell_volumes_next, .stop = afs_proc_cell_volumes_stop, .show = afs_proc_cell_volumes_show, }; static const char *const dns_record_sources[NR__dns_record_source + 1] = { [DNS_RECORD_UNAVAILABLE] = "unav", [DNS_RECORD_FROM_CONFIG] = "cfg", [DNS_RECORD_FROM_DNS_A] = "A", [DNS_RECORD_FROM_DNS_AFSDB] = "AFSDB", [DNS_RECORD_FROM_DNS_SRV] = "SRV", [DNS_RECORD_FROM_NSS] = "nss", [NR__dns_record_source] = "[weird]" }; static const char *const dns_lookup_statuses[NR__dns_lookup_status + 1] = { [DNS_LOOKUP_NOT_DONE] = "no-lookup", [DNS_LOOKUP_GOOD] = "good", [DNS_LOOKUP_GOOD_WITH_BAD] = "good/bad", [DNS_LOOKUP_BAD] = "bad", [DNS_LOOKUP_GOT_NOT_FOUND] = "not-found", [DNS_LOOKUP_GOT_LOCAL_FAILURE] = "local-failure", [DNS_LOOKUP_GOT_TEMP_FAILURE] = "temp-failure", [DNS_LOOKUP_GOT_NS_FAILURE] = "ns-failure", [NR__dns_lookup_status] = "[weird]" }; /* * Display the list of Volume Location servers we're using for a cell. */ static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v) { const struct afs_vl_seq_net_private *priv = m->private; const struct afs_vlserver_list *vllist = priv->vllist; const struct afs_vlserver_entry *entry; const struct afs_vlserver *vlserver; const struct afs_addr_list *alist; int i; if (v == SEQ_START_TOKEN) { seq_printf(m, "# source %s, status %s\n", dns_record_sources[vllist ? vllist->source : 0], dns_lookup_statuses[vllist ? vllist->status : 0]); return 0; } entry = v; vlserver = entry->server; alist = rcu_dereference(vlserver->addresses); seq_printf(m, "%s [p=%hu w=%hu s=%s,%s]:\n", vlserver->name, entry->priority, entry->weight, dns_record_sources[alist ? alist->source : entry->source], dns_lookup_statuses[alist ? alist->status : entry->status]); if (alist) { for (i = 0; i < alist->nr_addrs; i++) seq_printf(m, " %c %pISpc\n", alist->preferred == i ? '>' : '-', &alist->addrs[i].transport); } seq_printf(m, " info: fl=%lx rtt=%d\n", vlserver->flags, vlserver->rtt); seq_printf(m, " probe: fl=%x e=%d ac=%d out=%d\n", vlserver->probe.flags, vlserver->probe.error, vlserver->probe.abort_code, atomic_read(&vlserver->probe_outstanding)); return 0; } static void *afs_proc_cell_vlservers_start(struct seq_file *m, loff_t *_pos) __acquires(rcu) { struct afs_vl_seq_net_private *priv = m->private; struct afs_vlserver_list *vllist; struct afs_cell *cell = pde_data(file_inode(m->file)); loff_t pos = *_pos; rcu_read_lock(); vllist = rcu_dereference(cell->vl_servers); priv->vllist = vllist; if (pos < 0) *_pos = pos = 0; if (pos == 0) return SEQ_START_TOKEN; if (pos - 1 >= vllist->nr_servers) return NULL; return &vllist->servers[pos - 1]; } static void *afs_proc_cell_vlservers_next(struct seq_file *m, void *v, loff_t *_pos) { struct afs_vl_seq_net_private *priv = m->private; struct afs_vlserver_list *vllist = priv->vllist; loff_t pos; pos = *_pos; pos++; *_pos = pos; if (!vllist || pos - 1 >= vllist->nr_servers) return NULL; return &vllist->servers[pos - 1]; } static void afs_proc_cell_vlservers_stop(struct seq_file *m, void *v) __releases(rcu) { rcu_read_unlock(); } static const struct seq_operations afs_proc_cell_vlservers_ops = { .start = afs_proc_cell_vlservers_start, .next = afs_proc_cell_vlservers_next, .stop = afs_proc_cell_vlservers_stop, .show = afs_proc_cell_vlservers_show, }; /* * Display the list of fileservers we're using within a namespace. */ static int afs_proc_servers_show(struct seq_file *m, void *v) { struct afs_server *server; struct afs_addr_list *alist; int i; if (v == SEQ_START_TOKEN) { seq_puts(m, "UUID REF ACT\n"); return 0; } server = list_entry(v, struct afs_server, proc_link); alist = rcu_dereference(server->addresses); seq_printf(m, "%pU %3d %3d\n", &server->uuid, refcount_read(&server->ref), atomic_read(&server->active)); seq_printf(m, " - info: fl=%lx rtt=%u brk=%x\n", server->flags, server->rtt, server->cb_s_break); seq_printf(m, " - probe: last=%d out=%d\n", (int)(jiffies - server->probed_at) / HZ, atomic_read(&server->probe_outstanding)); seq_printf(m, " - ALIST v=%u rsp=%lx f=%lx\n", alist->version, alist->responded, alist->failed); for (i = 0; i < alist->nr_addrs; i++) seq_printf(m, " [%x] %pISpc%s\n", i, &alist->addrs[i].transport, alist->preferred == i ? "*" : ""); return 0; } static void *afs_proc_servers_start(struct seq_file *m, loff_t *_pos) __acquires(rcu) { rcu_read_lock(); return seq_hlist_start_head_rcu(&afs_seq2net(m)->fs_proc, *_pos); } static void *afs_proc_servers_next(struct seq_file *m, void *v, loff_t *_pos) { return seq_hlist_next_rcu(v, &afs_seq2net(m)->fs_proc, _pos); } static void afs_proc_servers_stop(struct seq_file *m, void *v) __releases(rcu) { rcu_read_unlock(); } static const struct seq_operations afs_proc_servers_ops = { .start = afs_proc_servers_start, .next = afs_proc_servers_next, .stop = afs_proc_servers_stop, .show = afs_proc_servers_show, }; /* * Display the list of strings that may be substituted for the @sys pathname * macro. */ static int afs_proc_sysname_show(struct seq_file *m, void *v) { struct afs_net *net = afs_seq2net(m); struct afs_sysnames *sysnames = net->sysnames; unsigned int i = (unsigned long)v - 1; if (i < sysnames->nr) seq_printf(m, "%s\n", sysnames->subs[i]); return 0; } static void *afs_proc_sysname_start(struct seq_file *m, loff_t *pos) __acquires(&net->sysnames_lock) { struct afs_net *net = afs_seq2net(m); struct afs_sysnames *names; read_lock(&net->sysnames_lock); names = net->sysnames; if (*pos >= names->nr) return NULL; return (void *)(unsigned long)(*pos + 1); } static void *afs_proc_sysname_next(struct seq_file *m, void *v, loff_t *pos) { struct afs_net *net = afs_seq2net(m); struct afs_sysnames *names = net->sysnames; *pos += 1; if (*pos >= names->nr) return NULL; return (void *)(unsigned long)(*pos + 1); } static void afs_proc_sysname_stop(struct seq_file *m, void *v) __releases(&net->sysnames_lock) { struct afs_net *net = afs_seq2net(m); read_unlock(&net->sysnames_lock); } static const struct seq_operations afs_proc_sysname_ops = { .start = afs_proc_sysname_start, .next = afs_proc_sysname_next, .stop = afs_proc_sysname_stop, .show = afs_proc_sysname_show, }; /* * Allow the @sys substitution to be configured. */ static int afs_proc_sysname_write(struct file *file, char *buf, size_t size) { struct afs_sysnames *sysnames, *kill; struct seq_file *m = file->private_data; struct afs_net *net = afs_seq2net(m); char *s, *p, *sub; int ret, len; sysnames = kzalloc(sizeof(*sysnames), GFP_KERNEL); if (!sysnames) return -ENOMEM; refcount_set(&sysnames->usage, 1); kill = sysnames; p = buf; while ((s = strsep(&p, " \t\n"))) { len = strlen(s); if (len == 0) continue; ret = -ENAMETOOLONG; if (len >= AFSNAMEMAX) goto error; if (len >= 4 && s[len - 4] == '@' && s[len - 3] == 's' && s[len - 2] == 'y' && s[len - 1] == 's') /* Protect against recursion */ goto invalid; if (s[0] == '.' && (len < 2 || (len == 2 && s[1] == '.'))) goto invalid; if (memchr(s, '/', len)) goto invalid; ret = -EFBIG; if (sysnames->nr >= AFS_NR_SYSNAME) goto out; if (strcmp(s, afs_init_sysname) == 0) { sub = (char *)afs_init_sysname; } else { ret = -ENOMEM; sub = kmemdup(s, len + 1, GFP_KERNEL); if (!sub) goto out; } sysnames->subs[sysnames->nr] = sub; sysnames->nr++; } if (sysnames->nr == 0) { sysnames->subs[0] = sysnames->blank; sysnames->nr++; } write_lock(&net->sysnames_lock); kill = net->sysnames; net->sysnames = sysnames; write_unlock(&net->sysnames_lock); ret = 0; out: afs_put_sysnames(kill); return ret; invalid: ret = -EINVAL; error: goto out; } void afs_put_sysnames(struct afs_sysnames *sysnames) { int i; if (sysnames && refcount_dec_and_test(&sysnames->usage)) { for (i = 0; i < sysnames->nr; i++) if (sysnames->subs[i] != afs_init_sysname && sysnames->subs[i] != sysnames->blank) kfree(sysnames->subs[i]); kfree(sysnames); } } /* * Display general per-net namespace statistics */ static int afs_proc_stats_show(struct seq_file *m, void *v) { struct afs_net *net = afs_seq2net_single(m); seq_puts(m, "kAFS statistics\n"); seq_printf(m, "dir-mgmt: look=%u reval=%u inval=%u relpg=%u\n", atomic_read(&net->n_lookup), atomic_read(&net->n_reval), atomic_read(&net->n_inval), atomic_read(&net->n_relpg)); seq_printf(m, "dir-data: rdpg=%u\n", atomic_read(&net->n_read_dir)); seq_printf(m, "dir-edit: cr=%u rm=%u\n", atomic_read(&net->n_dir_cr), atomic_read(&net->n_dir_rm)); seq_printf(m, "file-rd : n=%u nb=%lu\n", atomic_read(&net->n_fetches), atomic_long_read(&net->n_fetch_bytes)); seq_printf(m, "file-wr : n=%u nb=%lu\n", atomic_read(&net->n_stores), atomic_long_read(&net->n_store_bytes)); return 0; } /* * initialise /proc/fs/afs/<cell>/ */ int afs_proc_cell_setup(struct afs_cell *cell) { struct proc_dir_entry *dir; struct afs_net *net = cell->net; _enter("%p{%s},%p", cell, cell->name, net->proc_afs); dir = proc_net_mkdir(net->net, cell->name, net->proc_afs); if (!dir) goto error_dir; if (!proc_create_net_data("vlservers", 0444, dir, &afs_proc_cell_vlservers_ops, sizeof(struct afs_vl_seq_net_private), cell) || !proc_create_net_data("volumes", 0444, dir, &afs_proc_cell_volumes_ops, sizeof(struct seq_net_private), cell)) goto error_tree; _leave(" = 0"); return 0; error_tree: remove_proc_subtree(cell->name, net->proc_afs); error_dir: _leave(" = -ENOMEM"); return -ENOMEM; } /* * remove /proc/fs/afs/<cell>/ */ void afs_proc_cell_remove(struct afs_cell *cell) { struct afs_net *net = cell->net; _enter(""); remove_proc_subtree(cell->name, net->proc_afs); _leave(""); } /* * initialise the /proc/fs/afs/ directory */ int afs_proc_init(struct afs_net *net) { struct proc_dir_entry *p; _enter(""); p = proc_net_mkdir(net->net, "afs", net->net->proc_net); if (!p) goto error_dir; if (!proc_create_net_data_write("cells", 0644, p, &afs_proc_cells_ops, afs_proc_cells_write, sizeof(struct seq_net_private), NULL) || !proc_create_net_single_write("rootcell", 0644, p, afs_proc_rootcell_show, afs_proc_rootcell_write, NULL) || !proc_create_net("servers", 0444, p, &afs_proc_servers_ops, sizeof(struct seq_net_private)) || !proc_create_net_single("stats", 0444, p, afs_proc_stats_show, NULL) || !proc_create_net_data_write("sysname", 0644, p, &afs_proc_sysname_ops, afs_proc_sysname_write, sizeof(struct seq_net_private), NULL)) goto error_tree; net->proc_afs = p; _leave(" = 0"); return 0; error_tree: proc_remove(p); error_dir: _leave(" = -ENOMEM"); return -ENOMEM; } /* * clean up the /proc/fs/afs/ directory */ void afs_proc_cleanup(struct afs_net *net) { proc_remove(net->proc_afs); net->proc_afs = NULL; }
linux-master
fs/afs/proc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Maintain an RxRPC server socket to do AFS communications through * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/slab.h> #include <linux/sched/signal.h> #include <net/sock.h> #include <net/af_rxrpc.h> #include "internal.h" #include "afs_cm.h" #include "protocol_yfs.h" #define RXRPC_TRACE_ONLY_DEFINE_ENUMS #include <trace/events/rxrpc.h> struct workqueue_struct *afs_async_calls; static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long); static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long); static void afs_process_async_call(struct work_struct *); static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long); static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long); static int afs_deliver_cm_op_id(struct afs_call *); /* asynchronous incoming call initial processing */ static const struct afs_call_type afs_RXCMxxxx = { .name = "CB.xxxx", .deliver = afs_deliver_cm_op_id, }; /* * open an RxRPC socket and bind it to be a server for callback notifications * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT */ int afs_open_socket(struct afs_net *net) { struct sockaddr_rxrpc srx; struct socket *socket; int ret; _enter(""); ret = sock_create_kern(net->net, AF_RXRPC, SOCK_DGRAM, PF_INET6, &socket); if (ret < 0) goto error_1; socket->sk->sk_allocation = GFP_NOFS; /* bind the callback manager's address to make this a server socket */ memset(&srx, 0, sizeof(srx)); srx.srx_family = AF_RXRPC; srx.srx_service = CM_SERVICE; srx.transport_type = SOCK_DGRAM; srx.transport_len = sizeof(srx.transport.sin6); srx.transport.sin6.sin6_family = AF_INET6; srx.transport.sin6.sin6_port = htons(AFS_CM_PORT); ret = rxrpc_sock_set_min_security_level(socket->sk, RXRPC_SECURITY_ENCRYPT); if (ret < 0) goto error_2; ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx)); if (ret == -EADDRINUSE) { srx.transport.sin6.sin6_port = 0; ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx)); } if (ret < 0) goto error_2; srx.srx_service = YFS_CM_SERVICE; ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx)); if (ret < 0) goto error_2; /* Ideally, we'd turn on service upgrade here, but we can't because * OpenAFS is buggy and leaks the userStatus field from packet to * packet and between FS packets and CB packets - so if we try to do an * upgrade on an FS packet, OpenAFS will leak that into the CB packet * it sends back to us. */ rxrpc_kernel_new_call_notification(socket, afs_rx_new_call, afs_rx_discard_new_call); ret = kernel_listen(socket, INT_MAX); if (ret < 0) goto error_2; net->socket = socket; afs_charge_preallocation(&net->charge_preallocation_work); _leave(" = 0"); return 0; error_2: sock_release(socket); error_1: _leave(" = %d", ret); return ret; } /* * close the RxRPC socket AFS was using */ void afs_close_socket(struct afs_net *net) { _enter(""); kernel_listen(net->socket, 0); flush_workqueue(afs_async_calls); if (net->spare_incoming_call) { afs_put_call(net->spare_incoming_call); net->spare_incoming_call = NULL; } _debug("outstanding %u", atomic_read(&net->nr_outstanding_calls)); wait_var_event(&net->nr_outstanding_calls, !atomic_read(&net->nr_outstanding_calls)); _debug("no outstanding calls"); kernel_sock_shutdown(net->socket, SHUT_RDWR); flush_workqueue(afs_async_calls); sock_release(net->socket); _debug("dework"); _leave(""); } /* * Allocate a call. */ static struct afs_call *afs_alloc_call(struct afs_net *net, const struct afs_call_type *type, gfp_t gfp) { struct afs_call *call; int o; call = kzalloc(sizeof(*call), gfp); if (!call) return NULL; call->type = type; call->net = net; call->debug_id = atomic_inc_return(&rxrpc_debug_id); refcount_set(&call->ref, 1); INIT_WORK(&call->async_work, afs_process_async_call); init_waitqueue_head(&call->waitq); spin_lock_init(&call->state_lock); call->iter = &call->def_iter; o = atomic_inc_return(&net->nr_outstanding_calls); trace_afs_call(call->debug_id, afs_call_trace_alloc, 1, o, __builtin_return_address(0)); return call; } /* * Dispose of a reference on a call. */ void afs_put_call(struct afs_call *call) { struct afs_net *net = call->net; unsigned int debug_id = call->debug_id; bool zero; int r, o; zero = __refcount_dec_and_test(&call->ref, &r); o = atomic_read(&net->nr_outstanding_calls); trace_afs_call(debug_id, afs_call_trace_put, r - 1, o, __builtin_return_address(0)); if (zero) { ASSERT(!work_pending(&call->async_work)); ASSERT(call->type->name != NULL); if (call->rxcall) { rxrpc_kernel_shutdown_call(net->socket, call->rxcall); rxrpc_kernel_put_call(net->socket, call->rxcall); call->rxcall = NULL; } if (call->type->destructor) call->type->destructor(call); afs_unuse_server_notime(call->net, call->server, afs_server_trace_put_call); afs_put_addrlist(call->alist); kfree(call->request); trace_afs_call(call->debug_id, afs_call_trace_free, 0, o, __builtin_return_address(0)); kfree(call); o = atomic_dec_return(&net->nr_outstanding_calls); if (o == 0) wake_up_var(&net->nr_outstanding_calls); } } static struct afs_call *afs_get_call(struct afs_call *call, enum afs_call_trace why) { int r; __refcount_inc(&call->ref, &r); trace_afs_call(call->debug_id, why, r + 1, atomic_read(&call->net->nr_outstanding_calls), __builtin_return_address(0)); return call; } /* * Queue the call for actual work. */ static void afs_queue_call_work(struct afs_call *call) { if (call->type->work) { INIT_WORK(&call->work, call->type->work); afs_get_call(call, afs_call_trace_work); if (!queue_work(afs_wq, &call->work)) afs_put_call(call); } } /* * allocate a call with flat request and reply buffers */ struct afs_call *afs_alloc_flat_call(struct afs_net *net, const struct afs_call_type *type, size_t request_size, size_t reply_max) { struct afs_call *call; call = afs_alloc_call(net, type, GFP_NOFS); if (!call) goto nomem_call; if (request_size) { call->request_size = request_size; call->request = kmalloc(request_size, GFP_NOFS); if (!call->request) goto nomem_free; } if (reply_max) { call->reply_max = reply_max; call->buffer = kmalloc(reply_max, GFP_NOFS); if (!call->buffer) goto nomem_free; } afs_extract_to_buf(call, call->reply_max); call->operation_ID = type->op; init_waitqueue_head(&call->waitq); return call; nomem_free: afs_put_call(call); nomem_call: return NULL; } /* * clean up a call with flat buffer */ void afs_flat_call_destructor(struct afs_call *call) { _enter(""); kfree(call->request); call->request = NULL; kfree(call->buffer); call->buffer = NULL; } /* * Advance the AFS call state when the RxRPC call ends the transmit phase. */ static void afs_notify_end_request_tx(struct sock *sock, struct rxrpc_call *rxcall, unsigned long call_user_ID) { struct afs_call *call = (struct afs_call *)call_user_ID; afs_set_call_state(call, AFS_CALL_CL_REQUESTING, AFS_CALL_CL_AWAIT_REPLY); } /* * Initiate a call and synchronously queue up the parameters for dispatch. Any * error is stored into the call struct, which the caller must check for. */ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp) { struct sockaddr_rxrpc *srx = &ac->alist->addrs[ac->index]; struct rxrpc_call *rxcall; struct msghdr msg; struct kvec iov[1]; size_t len; s64 tx_total_len; int ret; _enter(",{%pISp},", &srx->transport); ASSERT(call->type != NULL); ASSERT(call->type->name != NULL); _debug("____MAKE %p{%s,%x} [%d]____", call, call->type->name, key_serial(call->key), atomic_read(&call->net->nr_outstanding_calls)); call->addr_ix = ac->index; call->alist = afs_get_addrlist(ac->alist); /* Work out the length we're going to transmit. This is awkward for * calls such as FS.StoreData where there's an extra injection of data * after the initial fixed part. */ tx_total_len = call->request_size; if (call->write_iter) tx_total_len += iov_iter_count(call->write_iter); /* If the call is going to be asynchronous, we need an extra ref for * the call to hold itself so the caller need not hang on to its ref. */ if (call->async) { afs_get_call(call, afs_call_trace_get); call->drop_ref = true; } /* create a call */ rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key, (unsigned long)call, tx_total_len, call->max_lifespan, gfp, (call->async ? afs_wake_up_async_call : afs_wake_up_call_waiter), call->upgrade, (call->intr ? RXRPC_PREINTERRUPTIBLE : RXRPC_UNINTERRUPTIBLE), call->debug_id); if (IS_ERR(rxcall)) { ret = PTR_ERR(rxcall); call->error = ret; goto error_kill_call; } call->rxcall = rxcall; call->issue_time = ktime_get_real(); /* send the request */ iov[0].iov_base = call->request; iov[0].iov_len = call->request_size; msg.msg_name = NULL; msg.msg_namelen = 0; iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, iov, 1, call->request_size); msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = MSG_WAITALL | (call->write_iter ? MSG_MORE : 0); ret = rxrpc_kernel_send_data(call->net->socket, rxcall, &msg, call->request_size, afs_notify_end_request_tx); if (ret < 0) goto error_do_abort; if (call->write_iter) { msg.msg_iter = *call->write_iter; msg.msg_flags &= ~MSG_MORE; trace_afs_send_data(call, &msg); ret = rxrpc_kernel_send_data(call->net->socket, call->rxcall, &msg, iov_iter_count(&msg.msg_iter), afs_notify_end_request_tx); *call->write_iter = msg.msg_iter; trace_afs_sent_data(call, &msg, ret); if (ret < 0) goto error_do_abort; } /* Note that at this point, we may have received the reply or an abort * - and an asynchronous call may already have completed. * * afs_wait_for_call_to_complete(call, ac) * must be called to synchronously clean up. */ return; error_do_abort: if (ret != -ECONNABORTED) { rxrpc_kernel_abort_call(call->net->socket, rxcall, RX_USER_ABORT, ret, afs_abort_send_data_error); } else { len = 0; iov_iter_kvec(&msg.msg_iter, ITER_DEST, NULL, 0, 0); rxrpc_kernel_recv_data(call->net->socket, rxcall, &msg.msg_iter, &len, false, &call->abort_code, &call->service_id); ac->abort_code = call->abort_code; ac->responded = true; } call->error = ret; trace_afs_call_done(call); error_kill_call: if (call->type->done) call->type->done(call); /* We need to dispose of the extra ref we grabbed for an async call. * The call, however, might be queued on afs_async_calls and we need to * make sure we don't get any more notifications that might requeue it. */ if (call->rxcall) rxrpc_kernel_shutdown_call(call->net->socket, call->rxcall); if (call->async) { if (cancel_work_sync(&call->async_work)) afs_put_call(call); afs_put_call(call); } ac->error = ret; call->state = AFS_CALL_COMPLETE; _leave(" = %d", ret); } /* * Log remote abort codes that indicate that we have a protocol disagreement * with the server. */ static void afs_log_error(struct afs_call *call, s32 remote_abort) { static int max = 0; const char *msg; int m; switch (remote_abort) { case RX_EOF: msg = "unexpected EOF"; break; case RXGEN_CC_MARSHAL: msg = "client marshalling"; break; case RXGEN_CC_UNMARSHAL: msg = "client unmarshalling"; break; case RXGEN_SS_MARSHAL: msg = "server marshalling"; break; case RXGEN_SS_UNMARSHAL: msg = "server unmarshalling"; break; case RXGEN_DECODE: msg = "opcode decode"; break; case RXGEN_SS_XDRFREE: msg = "server XDR cleanup"; break; case RXGEN_CC_XDRFREE: msg = "client XDR cleanup"; break; case -32: msg = "insufficient data"; break; default: return; } m = max; if (m < 3) { max = m + 1; pr_notice("kAFS: Peer reported %s failure on %s [%pISp]\n", msg, call->type->name, &call->alist->addrs[call->addr_ix].transport); } } /* * deliver messages to a call */ static void afs_deliver_to_call(struct afs_call *call) { enum afs_call_state state; size_t len; u32 abort_code, remote_abort = 0; int ret; _enter("%s", call->type->name); while (state = READ_ONCE(call->state), state == AFS_CALL_CL_AWAIT_REPLY || state == AFS_CALL_SV_AWAIT_OP_ID || state == AFS_CALL_SV_AWAIT_REQUEST || state == AFS_CALL_SV_AWAIT_ACK ) { if (state == AFS_CALL_SV_AWAIT_ACK) { len = 0; iov_iter_kvec(&call->def_iter, ITER_DEST, NULL, 0, 0); ret = rxrpc_kernel_recv_data(call->net->socket, call->rxcall, &call->def_iter, &len, false, &remote_abort, &call->service_id); trace_afs_receive_data(call, &call->def_iter, false, ret); if (ret == -EINPROGRESS || ret == -EAGAIN) return; if (ret < 0 || ret == 1) { if (ret == 1) ret = 0; goto call_complete; } return; } ret = call->type->deliver(call); state = READ_ONCE(call->state); if (ret == 0 && call->unmarshalling_error) ret = -EBADMSG; switch (ret) { case 0: afs_queue_call_work(call); if (state == AFS_CALL_CL_PROC_REPLY) { if (call->op) set_bit(AFS_SERVER_FL_MAY_HAVE_CB, &call->op->server->flags); goto call_complete; } ASSERTCMP(state, >, AFS_CALL_CL_PROC_REPLY); goto done; case -EINPROGRESS: case -EAGAIN: goto out; case -ECONNABORTED: ASSERTCMP(state, ==, AFS_CALL_COMPLETE); afs_log_error(call, call->abort_code); goto done; case -ENOTSUPP: abort_code = RXGEN_OPCODE; rxrpc_kernel_abort_call(call->net->socket, call->rxcall, abort_code, ret, afs_abort_op_not_supported); goto local_abort; case -EIO: pr_err("kAFS: Call %u in bad state %u\n", call->debug_id, state); fallthrough; case -ENODATA: case -EBADMSG: case -EMSGSIZE: case -ENOMEM: case -EFAULT: abort_code = RXGEN_CC_UNMARSHAL; if (state != AFS_CALL_CL_AWAIT_REPLY) abort_code = RXGEN_SS_UNMARSHAL; rxrpc_kernel_abort_call(call->net->socket, call->rxcall, abort_code, ret, afs_abort_unmarshal_error); goto local_abort; default: abort_code = RX_CALL_DEAD; rxrpc_kernel_abort_call(call->net->socket, call->rxcall, abort_code, ret, afs_abort_general_error); goto local_abort; } } done: if (call->type->done) call->type->done(call); out: _leave(""); return; local_abort: abort_code = 0; call_complete: afs_set_call_complete(call, ret, remote_abort); state = AFS_CALL_COMPLETE; goto done; } /* * Wait synchronously for a call to complete and clean up the call struct. */ long afs_wait_for_call_to_complete(struct afs_call *call, struct afs_addr_cursor *ac) { long ret; bool rxrpc_complete = false; DECLARE_WAITQUEUE(myself, current); _enter(""); ret = call->error; if (ret < 0) goto out; add_wait_queue(&call->waitq, &myself); for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); /* deliver any messages that are in the queue */ if (!afs_check_call_state(call, AFS_CALL_COMPLETE) && call->need_attention) { call->need_attention = false; __set_current_state(TASK_RUNNING); afs_deliver_to_call(call); continue; } if (afs_check_call_state(call, AFS_CALL_COMPLETE)) break; if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall)) { /* rxrpc terminated the call. */ rxrpc_complete = true; break; } schedule(); } remove_wait_queue(&call->waitq, &myself); __set_current_state(TASK_RUNNING); if (!afs_check_call_state(call, AFS_CALL_COMPLETE)) { if (rxrpc_complete) { afs_set_call_complete(call, call->error, call->abort_code); } else { /* Kill off the call if it's still live. */ _debug("call interrupted"); if (rxrpc_kernel_abort_call(call->net->socket, call->rxcall, RX_USER_ABORT, -EINTR, afs_abort_interrupted)) afs_set_call_complete(call, -EINTR, 0); } } spin_lock_bh(&call->state_lock); ac->abort_code = call->abort_code; ac->error = call->error; spin_unlock_bh(&call->state_lock); ret = ac->error; switch (ret) { case 0: ret = call->ret0; call->ret0 = 0; fallthrough; case -ECONNABORTED: ac->responded = true; break; } out: _debug("call complete"); afs_put_call(call); _leave(" = %p", (void *)ret); return ret; } /* * wake up a waiting call */ static void afs_wake_up_call_waiter(struct sock *sk, struct rxrpc_call *rxcall, unsigned long call_user_ID) { struct afs_call *call = (struct afs_call *)call_user_ID; call->need_attention = true; wake_up(&call->waitq); } /* * wake up an asynchronous call */ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall, unsigned long call_user_ID) { struct afs_call *call = (struct afs_call *)call_user_ID; int r; trace_afs_notify_call(rxcall, call); call->need_attention = true; if (__refcount_inc_not_zero(&call->ref, &r)) { trace_afs_call(call->debug_id, afs_call_trace_wake, r + 1, atomic_read(&call->net->nr_outstanding_calls), __builtin_return_address(0)); if (!queue_work(afs_async_calls, &call->async_work)) afs_put_call(call); } } /* * Perform I/O processing on an asynchronous call. The work item carries a ref * to the call struct that we either need to release or to pass on. */ static void afs_process_async_call(struct work_struct *work) { struct afs_call *call = container_of(work, struct afs_call, async_work); _enter(""); if (call->state < AFS_CALL_COMPLETE && call->need_attention) { call->need_attention = false; afs_deliver_to_call(call); } afs_put_call(call); _leave(""); } static void afs_rx_attach(struct rxrpc_call *rxcall, unsigned long user_call_ID) { struct afs_call *call = (struct afs_call *)user_call_ID; call->rxcall = rxcall; } /* * Charge the incoming call preallocation. */ void afs_charge_preallocation(struct work_struct *work) { struct afs_net *net = container_of(work, struct afs_net, charge_preallocation_work); struct afs_call *call = net->spare_incoming_call; for (;;) { if (!call) { call = afs_alloc_call(net, &afs_RXCMxxxx, GFP_KERNEL); if (!call) break; call->drop_ref = true; call->async = true; call->state = AFS_CALL_SV_AWAIT_OP_ID; init_waitqueue_head(&call->waitq); afs_extract_to_tmp(call); } if (rxrpc_kernel_charge_accept(net->socket, afs_wake_up_async_call, afs_rx_attach, (unsigned long)call, GFP_KERNEL, call->debug_id) < 0) break; call = NULL; } net->spare_incoming_call = call; } /* * Discard a preallocated call when a socket is shut down. */ static void afs_rx_discard_new_call(struct rxrpc_call *rxcall, unsigned long user_call_ID) { struct afs_call *call = (struct afs_call *)user_call_ID; call->rxcall = NULL; afs_put_call(call); } /* * Notification of an incoming call. */ static void afs_rx_new_call(struct sock *sk, struct rxrpc_call *rxcall, unsigned long user_call_ID) { struct afs_net *net = afs_sock2net(sk); queue_work(afs_wq, &net->charge_preallocation_work); } /* * Grab the operation ID from an incoming cache manager call. The socket * buffer is discarded on error or if we don't yet have sufficient data. */ static int afs_deliver_cm_op_id(struct afs_call *call) { int ret; _enter("{%zu}", iov_iter_count(call->iter)); /* the operation ID forms the first four bytes of the request data */ ret = afs_extract_data(call, true); if (ret < 0) return ret; call->operation_ID = ntohl(call->tmp); afs_set_call_state(call, AFS_CALL_SV_AWAIT_OP_ID, AFS_CALL_SV_AWAIT_REQUEST); /* ask the cache manager to route the call (it'll change the call type * if successful) */ if (!afs_cm_incoming_call(call)) return -ENOTSUPP; trace_afs_cb_call(call); /* pass responsibility for the remainer of this message off to the * cache manager op */ return call->type->deliver(call); } /* * Advance the AFS call state when an RxRPC service call ends the transmit * phase. */ static void afs_notify_end_reply_tx(struct sock *sock, struct rxrpc_call *rxcall, unsigned long call_user_ID) { struct afs_call *call = (struct afs_call *)call_user_ID; afs_set_call_state(call, AFS_CALL_SV_REPLYING, AFS_CALL_SV_AWAIT_ACK); } /* * send an empty reply */ void afs_send_empty_reply(struct afs_call *call) { struct afs_net *net = call->net; struct msghdr msg; _enter(""); rxrpc_kernel_set_tx_length(net->socket, call->rxcall, 0); msg.msg_name = NULL; msg.msg_namelen = 0; iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, NULL, 0, 0); msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; switch (rxrpc_kernel_send_data(net->socket, call->rxcall, &msg, 0, afs_notify_end_reply_tx)) { case 0: _leave(" [replied]"); return; case -ENOMEM: _debug("oom"); rxrpc_kernel_abort_call(net->socket, call->rxcall, RXGEN_SS_MARSHAL, -ENOMEM, afs_abort_oom); fallthrough; default: _leave(" [error]"); return; } } /* * send a simple reply */ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len) { struct afs_net *net = call->net; struct msghdr msg; struct kvec iov[1]; int n; _enter(""); rxrpc_kernel_set_tx_length(net->socket, call->rxcall, len); iov[0].iov_base = (void *) buf; iov[0].iov_len = len; msg.msg_name = NULL; msg.msg_namelen = 0; iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, iov, 1, len); msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; n = rxrpc_kernel_send_data(net->socket, call->rxcall, &msg, len, afs_notify_end_reply_tx); if (n >= 0) { /* Success */ _leave(" [replied]"); return; } if (n == -ENOMEM) { _debug("oom"); rxrpc_kernel_abort_call(net->socket, call->rxcall, RXGEN_SS_MARSHAL, -ENOMEM, afs_abort_oom); } _leave(" [error]"); } /* * Extract a piece of data from the received data socket buffers. */ int afs_extract_data(struct afs_call *call, bool want_more) { struct afs_net *net = call->net; struct iov_iter *iter = call->iter; enum afs_call_state state; u32 remote_abort = 0; int ret; _enter("{%s,%zu,%zu},%d", call->type->name, call->iov_len, iov_iter_count(iter), want_more); ret = rxrpc_kernel_recv_data(net->socket, call->rxcall, iter, &call->iov_len, want_more, &remote_abort, &call->service_id); trace_afs_receive_data(call, call->iter, want_more, ret); if (ret == 0 || ret == -EAGAIN) return ret; state = READ_ONCE(call->state); if (ret == 1) { switch (state) { case AFS_CALL_CL_AWAIT_REPLY: afs_set_call_state(call, state, AFS_CALL_CL_PROC_REPLY); break; case AFS_CALL_SV_AWAIT_REQUEST: afs_set_call_state(call, state, AFS_CALL_SV_REPLYING); break; case AFS_CALL_COMPLETE: kdebug("prem complete %d", call->error); return afs_io_error(call, afs_io_error_extract); default: break; } return 0; } afs_set_call_complete(call, ret, remote_abort); return ret; } /* * Log protocol error production. */ noinline int afs_protocol_error(struct afs_call *call, enum afs_eproto_cause cause) { trace_afs_protocol_error(call, cause); if (call) call->unmarshalling_error = true; return -EBADMSG; }
linux-master
fs/afs/rxrpc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* AFS filesystem file handling * * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/writeback.h> #include <linux/gfp.h> #include <linux/task_io_accounting_ops.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/netfs.h> #include "internal.h" static int afs_file_mmap(struct file *file, struct vm_area_struct *vma); static int afs_symlink_read_folio(struct file *file, struct folio *folio); static void afs_invalidate_folio(struct folio *folio, size_t offset, size_t length); static bool afs_release_folio(struct folio *folio, gfp_t gfp_flags); static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter); static ssize_t afs_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); static void afs_vm_open(struct vm_area_struct *area); static void afs_vm_close(struct vm_area_struct *area); static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); const struct file_operations afs_file_operations = { .open = afs_open, .release = afs_release, .llseek = generic_file_llseek, .read_iter = afs_file_read_iter, .write_iter = afs_file_write, .mmap = afs_file_mmap, .splice_read = afs_file_splice_read, .splice_write = iter_file_splice_write, .fsync = afs_fsync, .lock = afs_lock, .flock = afs_flock, }; const struct inode_operations afs_file_inode_operations = { .getattr = afs_getattr, .setattr = afs_setattr, .permission = afs_permission, }; const struct address_space_operations afs_file_aops = { .read_folio = netfs_read_folio, .readahead = netfs_readahead, .dirty_folio = afs_dirty_folio, .launder_folio = afs_launder_folio, .release_folio = afs_release_folio, .invalidate_folio = afs_invalidate_folio, .write_begin = afs_write_begin, .write_end = afs_write_end, .writepages = afs_writepages, .migrate_folio = filemap_migrate_folio, }; const struct address_space_operations afs_symlink_aops = { .read_folio = afs_symlink_read_folio, .release_folio = afs_release_folio, .invalidate_folio = afs_invalidate_folio, .migrate_folio = filemap_migrate_folio, }; static const struct vm_operations_struct afs_vm_ops = { .open = afs_vm_open, .close = afs_vm_close, .fault = filemap_fault, .map_pages = afs_vm_map_pages, .page_mkwrite = afs_page_mkwrite, }; /* * Discard a pin on a writeback key. */ void afs_put_wb_key(struct afs_wb_key *wbk) { if (wbk && refcount_dec_and_test(&wbk->usage)) { key_put(wbk->key); kfree(wbk); } } /* * Cache key for writeback. */ int afs_cache_wb_key(struct afs_vnode *vnode, struct afs_file *af) { struct afs_wb_key *wbk, *p; wbk = kzalloc(sizeof(struct afs_wb_key), GFP_KERNEL); if (!wbk) return -ENOMEM; refcount_set(&wbk->usage, 2); wbk->key = af->key; spin_lock(&vnode->wb_lock); list_for_each_entry(p, &vnode->wb_keys, vnode_link) { if (p->key == wbk->key) goto found; } key_get(wbk->key); list_add_tail(&wbk->vnode_link, &vnode->wb_keys); spin_unlock(&vnode->wb_lock); af->wb = wbk; return 0; found: refcount_inc(&p->usage); spin_unlock(&vnode->wb_lock); af->wb = p; kfree(wbk); return 0; } /* * open an AFS file or directory and attach a key to it */ int afs_open(struct inode *inode, struct file *file) { struct afs_vnode *vnode = AFS_FS_I(inode); struct afs_file *af; struct key *key; int ret; _enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode); key = afs_request_key(vnode->volume->cell); if (IS_ERR(key)) { ret = PTR_ERR(key); goto error; } af = kzalloc(sizeof(*af), GFP_KERNEL); if (!af) { ret = -ENOMEM; goto error_key; } af->key = key; ret = afs_validate(vnode, key); if (ret < 0) goto error_af; if (file->f_mode & FMODE_WRITE) { ret = afs_cache_wb_key(vnode, af); if (ret < 0) goto error_af; } if (file->f_flags & O_TRUNC) set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); fscache_use_cookie(afs_vnode_cache(vnode), file->f_mode & FMODE_WRITE); file->private_data = af; _leave(" = 0"); return 0; error_af: kfree(af); error_key: key_put(key); error: _leave(" = %d", ret); return ret; } /* * release an AFS file or directory and discard its key */ int afs_release(struct inode *inode, struct file *file) { struct afs_vnode_cache_aux aux; struct afs_vnode *vnode = AFS_FS_I(inode); struct afs_file *af = file->private_data; loff_t i_size; int ret = 0; _enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode); if ((file->f_mode & FMODE_WRITE)) ret = vfs_fsync(file, 0); file->private_data = NULL; if (af->wb) afs_put_wb_key(af->wb); if ((file->f_mode & FMODE_WRITE)) { i_size = i_size_read(&vnode->netfs.inode); afs_set_cache_aux(vnode, &aux); fscache_unuse_cookie(afs_vnode_cache(vnode), &aux, &i_size); } else { fscache_unuse_cookie(afs_vnode_cache(vnode), NULL, NULL); } key_put(af->key); kfree(af); afs_prune_wb_keys(vnode); _leave(" = %d", ret); return ret; } /* * Allocate a new read record. */ struct afs_read *afs_alloc_read(gfp_t gfp) { struct afs_read *req; req = kzalloc(sizeof(struct afs_read), gfp); if (req) refcount_set(&req->usage, 1); return req; } /* * Dispose of a ref to a read record. */ void afs_put_read(struct afs_read *req) { if (refcount_dec_and_test(&req->usage)) { if (req->cleanup) req->cleanup(req); key_put(req->key); kfree(req); } } static void afs_fetch_data_notify(struct afs_operation *op) { struct afs_read *req = op->fetch.req; struct netfs_io_subrequest *subreq = req->subreq; int error = op->error; if (error == -ECONNABORTED) error = afs_abort_to_error(op->ac.abort_code); req->error = error; if (subreq) { __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); netfs_subreq_terminated(subreq, error ?: req->actual_len, false); req->subreq = NULL; } else if (req->done) { req->done(req); } } static void afs_fetch_data_success(struct afs_operation *op) { struct afs_vnode *vnode = op->file[0].vnode; _enter("op=%08x", op->debug_id); afs_vnode_commit_status(op, &op->file[0]); afs_stat_v(vnode, n_fetches); atomic_long_add(op->fetch.req->actual_len, &op->net->n_fetch_bytes); afs_fetch_data_notify(op); } static void afs_fetch_data_put(struct afs_operation *op) { op->fetch.req->error = op->error; afs_put_read(op->fetch.req); } static const struct afs_operation_ops afs_fetch_data_operation = { .issue_afs_rpc = afs_fs_fetch_data, .issue_yfs_rpc = yfs_fs_fetch_data, .success = afs_fetch_data_success, .aborted = afs_check_for_remote_deletion, .failed = afs_fetch_data_notify, .put = afs_fetch_data_put, }; /* * Fetch file data from the volume. */ int afs_fetch_data(struct afs_vnode *vnode, struct afs_read *req) { struct afs_operation *op; _enter("%s{%llx:%llu.%u},%x,,,", vnode->volume->name, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique, key_serial(req->key)); op = afs_alloc_operation(req->key, vnode->volume); if (IS_ERR(op)) { if (req->subreq) netfs_subreq_terminated(req->subreq, PTR_ERR(op), false); return PTR_ERR(op); } afs_op_set_vnode(op, 0, vnode); op->fetch.req = afs_get_read(req); op->ops = &afs_fetch_data_operation; return afs_do_sync_operation(op); } static void afs_issue_read(struct netfs_io_subrequest *subreq) { struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode); struct afs_read *fsreq; fsreq = afs_alloc_read(GFP_NOFS); if (!fsreq) return netfs_subreq_terminated(subreq, -ENOMEM, false); fsreq->subreq = subreq; fsreq->pos = subreq->start + subreq->transferred; fsreq->len = subreq->len - subreq->transferred; fsreq->key = key_get(subreq->rreq->netfs_priv); fsreq->vnode = vnode; fsreq->iter = &fsreq->def_iter; iov_iter_xarray(&fsreq->def_iter, ITER_DEST, &fsreq->vnode->netfs.inode.i_mapping->i_pages, fsreq->pos, fsreq->len); afs_fetch_data(fsreq->vnode, fsreq); afs_put_read(fsreq); } static int afs_symlink_read_folio(struct file *file, struct folio *folio) { struct afs_vnode *vnode = AFS_FS_I(folio->mapping->host); struct afs_read *fsreq; int ret; fsreq = afs_alloc_read(GFP_NOFS); if (!fsreq) return -ENOMEM; fsreq->pos = folio_pos(folio); fsreq->len = folio_size(folio); fsreq->vnode = vnode; fsreq->iter = &fsreq->def_iter; iov_iter_xarray(&fsreq->def_iter, ITER_DEST, &folio->mapping->i_pages, fsreq->pos, fsreq->len); ret = afs_fetch_data(fsreq->vnode, fsreq); if (ret == 0) folio_mark_uptodate(folio); folio_unlock(folio); return ret; } static int afs_init_request(struct netfs_io_request *rreq, struct file *file) { rreq->netfs_priv = key_get(afs_file_key(file)); return 0; } static int afs_begin_cache_operation(struct netfs_io_request *rreq) { #ifdef CONFIG_AFS_FSCACHE struct afs_vnode *vnode = AFS_FS_I(rreq->inode); return fscache_begin_read_operation(&rreq->cache_resources, afs_vnode_cache(vnode)); #else return -ENOBUFS; #endif } static int afs_check_write_begin(struct file *file, loff_t pos, unsigned len, struct folio **foliop, void **_fsdata) { struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); return test_bit(AFS_VNODE_DELETED, &vnode->flags) ? -ESTALE : 0; } static void afs_free_request(struct netfs_io_request *rreq) { key_put(rreq->netfs_priv); } const struct netfs_request_ops afs_req_ops = { .init_request = afs_init_request, .free_request = afs_free_request, .begin_cache_operation = afs_begin_cache_operation, .check_write_begin = afs_check_write_begin, .issue_read = afs_issue_read, }; int afs_write_inode(struct inode *inode, struct writeback_control *wbc) { fscache_unpin_writeback(wbc, afs_vnode_cache(AFS_FS_I(inode))); return 0; } /* * Adjust the dirty region of the page on truncation or full invalidation, * getting rid of the markers altogether if the region is entirely invalidated. */ static void afs_invalidate_dirty(struct folio *folio, size_t offset, size_t length) { struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio)); unsigned long priv; unsigned int f, t, end = offset + length; priv = (unsigned long)folio_get_private(folio); /* we clean up only if the entire page is being invalidated */ if (offset == 0 && length == folio_size(folio)) goto full_invalidate; /* If the page was dirtied by page_mkwrite(), the PTE stays writable * and we don't get another notification to tell us to expand it * again. */ if (afs_is_folio_dirty_mmapped(priv)) return; /* We may need to shorten the dirty region */ f = afs_folio_dirty_from(folio, priv); t = afs_folio_dirty_to(folio, priv); if (t <= offset || f >= end) return; /* Doesn't overlap */ if (f < offset && t > end) return; /* Splits the dirty region - just absorb it */ if (f >= offset && t <= end) goto undirty; if (f < offset) t = offset; else f = end; if (f == t) goto undirty; priv = afs_folio_dirty(folio, f, t); folio_change_private(folio, (void *)priv); trace_afs_folio_dirty(vnode, tracepoint_string("trunc"), folio); return; undirty: trace_afs_folio_dirty(vnode, tracepoint_string("undirty"), folio); folio_clear_dirty_for_io(folio); full_invalidate: trace_afs_folio_dirty(vnode, tracepoint_string("inval"), folio); folio_detach_private(folio); } /* * invalidate part or all of a page * - release a page and clean up its private data if offset is 0 (indicating * the entire page) */ static void afs_invalidate_folio(struct folio *folio, size_t offset, size_t length) { _enter("{%lu},%zu,%zu", folio->index, offset, length); BUG_ON(!folio_test_locked(folio)); if (folio_get_private(folio)) afs_invalidate_dirty(folio, offset, length); folio_wait_fscache(folio); _leave(""); } /* * release a page and clean up its private state if it's not busy * - return true if the page can now be released, false if not */ static bool afs_release_folio(struct folio *folio, gfp_t gfp) { struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio)); _enter("{{%llx:%llu}[%lu],%lx},%x", vnode->fid.vid, vnode->fid.vnode, folio_index(folio), folio->flags, gfp); /* deny if folio is being written to the cache and the caller hasn't * elected to wait */ #ifdef CONFIG_AFS_FSCACHE if (folio_test_fscache(folio)) { if (current_is_kswapd() || !(gfp & __GFP_FS)) return false; folio_wait_fscache(folio); } fscache_note_page_release(afs_vnode_cache(vnode)); #endif if (folio_test_private(folio)) { trace_afs_folio_dirty(vnode, tracepoint_string("rel"), folio); folio_detach_private(folio); } /* Indicate that the folio can be released */ _leave(" = T"); return true; } static void afs_add_open_mmap(struct afs_vnode *vnode) { if (atomic_inc_return(&vnode->cb_nr_mmap) == 1) { down_write(&vnode->volume->cell->fs_open_mmaps_lock); if (list_empty(&vnode->cb_mmap_link)) list_add_tail(&vnode->cb_mmap_link, &vnode->volume->cell->fs_open_mmaps); up_write(&vnode->volume->cell->fs_open_mmaps_lock); } } static void afs_drop_open_mmap(struct afs_vnode *vnode) { if (!atomic_dec_and_test(&vnode->cb_nr_mmap)) return; down_write(&vnode->volume->cell->fs_open_mmaps_lock); if (atomic_read(&vnode->cb_nr_mmap) == 0) list_del_init(&vnode->cb_mmap_link); up_write(&vnode->volume->cell->fs_open_mmaps_lock); flush_work(&vnode->cb_work); } /* * Handle setting up a memory mapping on an AFS file. */ static int afs_file_mmap(struct file *file, struct vm_area_struct *vma) { struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); int ret; afs_add_open_mmap(vnode); ret = generic_file_mmap(file, vma); if (ret == 0) vma->vm_ops = &afs_vm_ops; else afs_drop_open_mmap(vnode); return ret; } static void afs_vm_open(struct vm_area_struct *vma) { afs_add_open_mmap(AFS_FS_I(file_inode(vma->vm_file))); } static void afs_vm_close(struct vm_area_struct *vma) { afs_drop_open_mmap(AFS_FS_I(file_inode(vma->vm_file))); } static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff) { struct afs_vnode *vnode = AFS_FS_I(file_inode(vmf->vma->vm_file)); if (afs_pagecache_valid(vnode)) return filemap_map_pages(vmf, start_pgoff, end_pgoff); return 0; } static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) { struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp)); struct afs_file *af = iocb->ki_filp->private_data; int ret; ret = afs_validate(vnode, af->key); if (ret < 0) return ret; return generic_file_read_iter(iocb, iter); } static ssize_t afs_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct afs_vnode *vnode = AFS_FS_I(file_inode(in)); struct afs_file *af = in->private_data; int ret; ret = afs_validate(vnode, af->key); if (ret < 0) return ret; return filemap_splice_read(in, ppos, pipe, len, flags); }
linux-master
fs/afs/file.c
// SPDX-License-Identifier: GPL-2.0-or-later /* AFS filesystem directory editing * * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/pagemap.h> #include <linux/iversion.h> #include "internal.h" #include "xdr_fs.h" /* * Find a number of contiguous clear bits in a directory block bitmask. * * There are 64 slots, which means we can load the entire bitmap into a * variable. The first bit doesn't count as it corresponds to the block header * slot. nr_slots is between 1 and 9. */ static int afs_find_contig_bits(union afs_xdr_dir_block *block, unsigned int nr_slots) { u64 bitmap; u32 mask; int bit, n; bitmap = (u64)block->hdr.bitmap[0] << 0 * 8; bitmap |= (u64)block->hdr.bitmap[1] << 1 * 8; bitmap |= (u64)block->hdr.bitmap[2] << 2 * 8; bitmap |= (u64)block->hdr.bitmap[3] << 3 * 8; bitmap |= (u64)block->hdr.bitmap[4] << 4 * 8; bitmap |= (u64)block->hdr.bitmap[5] << 5 * 8; bitmap |= (u64)block->hdr.bitmap[6] << 6 * 8; bitmap |= (u64)block->hdr.bitmap[7] << 7 * 8; bitmap >>= 1; /* The first entry is metadata */ bit = 1; mask = (1 << nr_slots) - 1; do { if (sizeof(unsigned long) == 8) n = ffz(bitmap); else n = ((u32)bitmap) != 0 ? ffz((u32)bitmap) : ffz((u32)(bitmap >> 32)) + 32; bitmap >>= n; bit += n; if ((bitmap & mask) == 0) { if (bit > 64 - nr_slots) return -1; return bit; } n = __ffs(bitmap); bitmap >>= n; bit += n; } while (bitmap); return -1; } /* * Set a number of contiguous bits in the directory block bitmap. */ static void afs_set_contig_bits(union afs_xdr_dir_block *block, int bit, unsigned int nr_slots) { u64 mask; mask = (1 << nr_slots) - 1; mask <<= bit; block->hdr.bitmap[0] |= (u8)(mask >> 0 * 8); block->hdr.bitmap[1] |= (u8)(mask >> 1 * 8); block->hdr.bitmap[2] |= (u8)(mask >> 2 * 8); block->hdr.bitmap[3] |= (u8)(mask >> 3 * 8); block->hdr.bitmap[4] |= (u8)(mask >> 4 * 8); block->hdr.bitmap[5] |= (u8)(mask >> 5 * 8); block->hdr.bitmap[6] |= (u8)(mask >> 6 * 8); block->hdr.bitmap[7] |= (u8)(mask >> 7 * 8); } /* * Clear a number of contiguous bits in the directory block bitmap. */ static void afs_clear_contig_bits(union afs_xdr_dir_block *block, int bit, unsigned int nr_slots) { u64 mask; mask = (1 << nr_slots) - 1; mask <<= bit; block->hdr.bitmap[0] &= ~(u8)(mask >> 0 * 8); block->hdr.bitmap[1] &= ~(u8)(mask >> 1 * 8); block->hdr.bitmap[2] &= ~(u8)(mask >> 2 * 8); block->hdr.bitmap[3] &= ~(u8)(mask >> 3 * 8); block->hdr.bitmap[4] &= ~(u8)(mask >> 4 * 8); block->hdr.bitmap[5] &= ~(u8)(mask >> 5 * 8); block->hdr.bitmap[6] &= ~(u8)(mask >> 6 * 8); block->hdr.bitmap[7] &= ~(u8)(mask >> 7 * 8); } /* * Get a new directory folio. */ static struct folio *afs_dir_get_folio(struct afs_vnode *vnode, pgoff_t index) { struct address_space *mapping = vnode->netfs.inode.i_mapping; struct folio *folio; folio = __filemap_get_folio(mapping, index, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mapping->gfp_mask); if (IS_ERR(folio)) { clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); return NULL; } if (!folio_test_private(folio)) folio_attach_private(folio, (void *)1); return folio; } /* * Scan a directory block looking for a dirent of the right name. */ static int afs_dir_scan_block(union afs_xdr_dir_block *block, struct qstr *name, unsigned int blocknum) { union afs_xdr_dirent *de; u64 bitmap; int d, len, n; _enter(""); bitmap = (u64)block->hdr.bitmap[0] << 0 * 8; bitmap |= (u64)block->hdr.bitmap[1] << 1 * 8; bitmap |= (u64)block->hdr.bitmap[2] << 2 * 8; bitmap |= (u64)block->hdr.bitmap[3] << 3 * 8; bitmap |= (u64)block->hdr.bitmap[4] << 4 * 8; bitmap |= (u64)block->hdr.bitmap[5] << 5 * 8; bitmap |= (u64)block->hdr.bitmap[6] << 6 * 8; bitmap |= (u64)block->hdr.bitmap[7] << 7 * 8; for (d = (blocknum == 0 ? AFS_DIR_RESV_BLOCKS0 : AFS_DIR_RESV_BLOCKS); d < AFS_DIR_SLOTS_PER_BLOCK; d++) { if (!((bitmap >> d) & 1)) continue; de = &block->dirents[d]; if (de->u.valid != 1) continue; /* The block was NUL-terminated by afs_dir_check_page(). */ len = strlen(de->u.name); if (len == name->len && memcmp(de->u.name, name->name, name->len) == 0) return d; n = round_up(12 + len + 1 + 4, AFS_DIR_DIRENT_SIZE); n /= AFS_DIR_DIRENT_SIZE; d += n - 1; } return -1; } /* * Initialise a new directory block. Note that block 0 is special and contains * some extra metadata. */ static void afs_edit_init_block(union afs_xdr_dir_block *meta, union afs_xdr_dir_block *block, int block_num) { memset(block, 0, sizeof(*block)); block->hdr.npages = htons(1); block->hdr.magic = AFS_DIR_MAGIC; block->hdr.bitmap[0] = 1; if (block_num == 0) { block->hdr.bitmap[0] = 0xff; block->hdr.bitmap[1] = 0x1f; memset(block->meta.alloc_ctrs, AFS_DIR_SLOTS_PER_BLOCK, sizeof(block->meta.alloc_ctrs)); meta->meta.alloc_ctrs[0] = AFS_DIR_SLOTS_PER_BLOCK - AFS_DIR_RESV_BLOCKS0; } if (block_num < AFS_DIR_BLOCKS_WITH_CTR) meta->meta.alloc_ctrs[block_num] = AFS_DIR_SLOTS_PER_BLOCK - AFS_DIR_RESV_BLOCKS; } /* * Edit a directory's file data to add a new directory entry. Doing this after * create, mkdir, symlink, link or rename if the data version number is * incremented by exactly one avoids the need to re-download the entire * directory contents. * * The caller must hold the inode locked. */ void afs_edit_dir_add(struct afs_vnode *vnode, struct qstr *name, struct afs_fid *new_fid, enum afs_edit_dir_reason why) { union afs_xdr_dir_block *meta, *block; union afs_xdr_dirent *de; struct folio *folio0, *folio; unsigned int need_slots, nr_blocks, b; pgoff_t index; loff_t i_size; int slot; _enter(",,{%d,%s},", name->len, name->name); i_size = i_size_read(&vnode->netfs.inode); if (i_size > AFS_DIR_BLOCK_SIZE * AFS_DIR_MAX_BLOCKS || (i_size & (AFS_DIR_BLOCK_SIZE - 1))) { clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); return; } folio0 = afs_dir_get_folio(vnode, 0); if (!folio0) { _leave(" [fgp]"); return; } /* Work out how many slots we're going to need. */ need_slots = afs_dir_calc_slots(name->len); meta = kmap_local_folio(folio0, 0); if (i_size == 0) goto new_directory; nr_blocks = i_size / AFS_DIR_BLOCK_SIZE; /* Find a block that has sufficient slots available. Each folio * contains two or more directory blocks. */ for (b = 0; b < nr_blocks + 1; b++) { /* If the directory extended into a new folio, then we need to * tack a new folio on the end. */ index = b / AFS_DIR_BLOCKS_PER_PAGE; if (nr_blocks >= AFS_DIR_MAX_BLOCKS) goto error; if (index >= folio_nr_pages(folio0)) { folio = afs_dir_get_folio(vnode, index); if (!folio) goto error; } else { folio = folio0; } block = kmap_local_folio(folio, b * AFS_DIR_BLOCK_SIZE - folio_file_pos(folio)); /* Abandon the edit if we got a callback break. */ if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags)) goto invalidated; _debug("block %u: %2u %3u %u", b, (b < AFS_DIR_BLOCKS_WITH_CTR) ? meta->meta.alloc_ctrs[b] : 99, ntohs(block->hdr.npages), ntohs(block->hdr.magic)); /* Initialise the block if necessary. */ if (b == nr_blocks) { _debug("init %u", b); afs_edit_init_block(meta, block, b); afs_set_i_size(vnode, (b + 1) * AFS_DIR_BLOCK_SIZE); } /* Only lower dir blocks have a counter in the header. */ if (b >= AFS_DIR_BLOCKS_WITH_CTR || meta->meta.alloc_ctrs[b] >= need_slots) { /* We need to try and find one or more consecutive * slots to hold the entry. */ slot = afs_find_contig_bits(block, need_slots); if (slot >= 0) { _debug("slot %u", slot); goto found_space; } } kunmap_local(block); if (folio != folio0) { folio_unlock(folio); folio_put(folio); } } /* There are no spare slots of sufficient size, yet the operation * succeeded. Download the directory again. */ trace_afs_edit_dir(vnode, why, afs_edit_dir_create_nospc, 0, 0, 0, 0, name->name); clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); goto out_unmap; new_directory: afs_edit_init_block(meta, meta, 0); i_size = AFS_DIR_BLOCK_SIZE; afs_set_i_size(vnode, i_size); slot = AFS_DIR_RESV_BLOCKS0; folio = folio0; block = kmap_local_folio(folio, 0); nr_blocks = 1; b = 0; found_space: /* Set the dirent slot. */ trace_afs_edit_dir(vnode, why, afs_edit_dir_create, b, slot, new_fid->vnode, new_fid->unique, name->name); de = &block->dirents[slot]; de->u.valid = 1; de->u.unused[0] = 0; de->u.hash_next = 0; // TODO: Really need to maintain this de->u.vnode = htonl(new_fid->vnode); de->u.unique = htonl(new_fid->unique); memcpy(de->u.name, name->name, name->len + 1); de->u.name[name->len] = 0; /* Adjust the bitmap. */ afs_set_contig_bits(block, slot, need_slots); kunmap_local(block); if (folio != folio0) { folio_unlock(folio); folio_put(folio); } /* Adjust the allocation counter. */ if (b < AFS_DIR_BLOCKS_WITH_CTR) meta->meta.alloc_ctrs[b] -= need_slots; inode_inc_iversion_raw(&vnode->netfs.inode); afs_stat_v(vnode, n_dir_cr); _debug("Insert %s in %u[%u]", name->name, b, slot); out_unmap: kunmap_local(meta); folio_unlock(folio0); folio_put(folio0); _leave(""); return; invalidated: trace_afs_edit_dir(vnode, why, afs_edit_dir_create_inval, 0, 0, 0, 0, name->name); clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); kunmap_local(block); if (folio != folio0) { folio_unlock(folio); folio_put(folio); } goto out_unmap; error: trace_afs_edit_dir(vnode, why, afs_edit_dir_create_error, 0, 0, 0, 0, name->name); clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); goto out_unmap; } /* * Edit a directory's file data to remove a new directory entry. Doing this * after unlink, rmdir or rename if the data version number is incremented by * exactly one avoids the need to re-download the entire directory contents. * * The caller must hold the inode locked. */ void afs_edit_dir_remove(struct afs_vnode *vnode, struct qstr *name, enum afs_edit_dir_reason why) { union afs_xdr_dir_block *meta, *block; union afs_xdr_dirent *de; struct folio *folio0, *folio; unsigned int need_slots, nr_blocks, b; pgoff_t index; loff_t i_size; int slot; _enter(",,{%d,%s},", name->len, name->name); i_size = i_size_read(&vnode->netfs.inode); if (i_size < AFS_DIR_BLOCK_SIZE || i_size > AFS_DIR_BLOCK_SIZE * AFS_DIR_MAX_BLOCKS || (i_size & (AFS_DIR_BLOCK_SIZE - 1))) { clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); return; } nr_blocks = i_size / AFS_DIR_BLOCK_SIZE; folio0 = afs_dir_get_folio(vnode, 0); if (!folio0) { _leave(" [fgp]"); return; } /* Work out how many slots we're going to discard. */ need_slots = afs_dir_calc_slots(name->len); meta = kmap_local_folio(folio0, 0); /* Find a block that has sufficient slots available. Each folio * contains two or more directory blocks. */ for (b = 0; b < nr_blocks; b++) { index = b / AFS_DIR_BLOCKS_PER_PAGE; if (index >= folio_nr_pages(folio0)) { folio = afs_dir_get_folio(vnode, index); if (!folio) goto error; } else { folio = folio0; } block = kmap_local_folio(folio, b * AFS_DIR_BLOCK_SIZE - folio_file_pos(folio)); /* Abandon the edit if we got a callback break. */ if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags)) goto invalidated; if (b > AFS_DIR_BLOCKS_WITH_CTR || meta->meta.alloc_ctrs[b] <= AFS_DIR_SLOTS_PER_BLOCK - 1 - need_slots) { slot = afs_dir_scan_block(block, name, b); if (slot >= 0) goto found_dirent; } kunmap_local(block); if (folio != folio0) { folio_unlock(folio); folio_put(folio); } } /* Didn't find the dirent to clobber. Download the directory again. */ trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_noent, 0, 0, 0, 0, name->name); clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); goto out_unmap; found_dirent: de = &block->dirents[slot]; trace_afs_edit_dir(vnode, why, afs_edit_dir_delete, b, slot, ntohl(de->u.vnode), ntohl(de->u.unique), name->name); memset(de, 0, sizeof(*de) * need_slots); /* Adjust the bitmap. */ afs_clear_contig_bits(block, slot, need_slots); kunmap_local(block); if (folio != folio0) { folio_unlock(folio); folio_put(folio); } /* Adjust the allocation counter. */ if (b < AFS_DIR_BLOCKS_WITH_CTR) meta->meta.alloc_ctrs[b] += need_slots; inode_set_iversion_raw(&vnode->netfs.inode, vnode->status.data_version); afs_stat_v(vnode, n_dir_rm); _debug("Remove %s from %u[%u]", name->name, b, slot); out_unmap: kunmap_local(meta); folio_unlock(folio0); folio_put(folio0); _leave(""); return; invalidated: trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_inval, 0, 0, 0, 0, name->name); clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); kunmap_local(block); if (folio != folio0) { folio_unlock(folio); folio_put(folio); } goto out_unmap; error: trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_error, 0, 0, 0, 0, name->name); clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); goto out_unmap; }
linux-master
fs/afs/dir_edit.c
// SPDX-License-Identifier: GPL-2.0-or-later /* AFS cell and server record management * * Copyright (C) 2002, 2017 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/slab.h> #include <linux/key.h> #include <linux/ctype.h> #include <linux/dns_resolver.h> #include <linux/sched.h> #include <linux/inet.h> #include <linux/namei.h> #include <keys/rxrpc-type.h> #include "internal.h" static unsigned __read_mostly afs_cell_gc_delay = 10; static unsigned __read_mostly afs_cell_min_ttl = 10 * 60; static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60; static atomic_t cell_debug_id; static void afs_queue_cell_manager(struct afs_net *); static void afs_manage_cell_work(struct work_struct *); static void afs_dec_cells_outstanding(struct afs_net *net) { if (atomic_dec_and_test(&net->cells_outstanding)) wake_up_var(&net->cells_outstanding); } /* * Set the cell timer to fire after a given delay, assuming it's not already * set for an earlier time. */ static void afs_set_cell_timer(struct afs_net *net, time64_t delay) { if (net->live) { atomic_inc(&net->cells_outstanding); if (timer_reduce(&net->cells_timer, jiffies + delay * HZ)) afs_dec_cells_outstanding(net); } else { afs_queue_cell_manager(net); } } /* * Look up and get an activation reference on a cell record. The caller must * hold net->cells_lock at least read-locked. */ static struct afs_cell *afs_find_cell_locked(struct afs_net *net, const char *name, unsigned int namesz, enum afs_cell_trace reason) { struct afs_cell *cell = NULL; struct rb_node *p; int n; _enter("%*.*s", namesz, namesz, name); if (name && namesz == 0) return ERR_PTR(-EINVAL); if (namesz > AFS_MAXCELLNAME) return ERR_PTR(-ENAMETOOLONG); if (!name) { cell = net->ws_cell; if (!cell) return ERR_PTR(-EDESTADDRREQ); goto found; } p = net->cells.rb_node; while (p) { cell = rb_entry(p, struct afs_cell, net_node); n = strncasecmp(cell->name, name, min_t(size_t, cell->name_len, namesz)); if (n == 0) n = cell->name_len - namesz; if (n < 0) p = p->rb_left; else if (n > 0) p = p->rb_right; else goto found; } return ERR_PTR(-ENOENT); found: return afs_use_cell(cell, reason); } /* * Look up and get an activation reference on a cell record. */ struct afs_cell *afs_find_cell(struct afs_net *net, const char *name, unsigned int namesz, enum afs_cell_trace reason) { struct afs_cell *cell; down_read(&net->cells_lock); cell = afs_find_cell_locked(net, name, namesz, reason); up_read(&net->cells_lock); return cell; } /* * Set up a cell record and fill in its name, VL server address list and * allocate an anonymous key */ static struct afs_cell *afs_alloc_cell(struct afs_net *net, const char *name, unsigned int namelen, const char *addresses) { struct afs_vlserver_list *vllist; struct afs_cell *cell; int i, ret; ASSERT(name); if (namelen == 0) return ERR_PTR(-EINVAL); if (namelen > AFS_MAXCELLNAME) { _leave(" = -ENAMETOOLONG"); return ERR_PTR(-ENAMETOOLONG); } /* Prohibit cell names that contain unprintable chars, '/' and '@' or * that begin with a dot. This also precludes "@cell". */ if (name[0] == '.') return ERR_PTR(-EINVAL); for (i = 0; i < namelen; i++) { char ch = name[i]; if (!isprint(ch) || ch == '/' || ch == '@') return ERR_PTR(-EINVAL); } _enter("%*.*s,%s", namelen, namelen, name, addresses); cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL); if (!cell) { _leave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); } cell->name = kmalloc(namelen + 1, GFP_KERNEL); if (!cell->name) { kfree(cell); return ERR_PTR(-ENOMEM); } cell->net = net; cell->name_len = namelen; for (i = 0; i < namelen; i++) cell->name[i] = tolower(name[i]); cell->name[i] = 0; refcount_set(&cell->ref, 1); atomic_set(&cell->active, 0); INIT_WORK(&cell->manager, afs_manage_cell_work); cell->volumes = RB_ROOT; INIT_HLIST_HEAD(&cell->proc_volumes); seqlock_init(&cell->volume_lock); cell->fs_servers = RB_ROOT; seqlock_init(&cell->fs_lock); INIT_LIST_HEAD(&cell->fs_open_mmaps); init_rwsem(&cell->fs_open_mmaps_lock); rwlock_init(&cell->vl_servers_lock); cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS); /* Provide a VL server list, filling it in if we were given a list of * addresses to use. */ if (addresses) { vllist = afs_parse_text_addrs(net, addresses, strlen(addresses), ':', VL_SERVICE, AFS_VL_PORT); if (IS_ERR(vllist)) { ret = PTR_ERR(vllist); goto parse_failed; } vllist->source = DNS_RECORD_FROM_CONFIG; vllist->status = DNS_LOOKUP_NOT_DONE; cell->dns_expiry = TIME64_MAX; } else { ret = -ENOMEM; vllist = afs_alloc_vlserver_list(0); if (!vllist) goto error; vllist->source = DNS_RECORD_UNAVAILABLE; vllist->status = DNS_LOOKUP_NOT_DONE; cell->dns_expiry = ktime_get_real_seconds(); } rcu_assign_pointer(cell->vl_servers, vllist); cell->dns_source = vllist->source; cell->dns_status = vllist->status; smp_store_release(&cell->dns_lookup_count, 1); /* vs source/status */ atomic_inc(&net->cells_outstanding); cell->debug_id = atomic_inc_return(&cell_debug_id); trace_afs_cell(cell->debug_id, 1, 0, afs_cell_trace_alloc); _leave(" = %p", cell); return cell; parse_failed: if (ret == -EINVAL) printk(KERN_ERR "kAFS: bad VL server IP address\n"); error: kfree(cell->name); kfree(cell); _leave(" = %d", ret); return ERR_PTR(ret); } /* * afs_lookup_cell - Look up or create a cell record. * @net: The network namespace * @name: The name of the cell. * @namesz: The strlen of the cell name. * @vllist: A colon/comma separated list of numeric IP addresses or NULL. * @excl: T if an error should be given if the cell name already exists. * * Look up a cell record by name and query the DNS for VL server addresses if * needed. Note that that actual DNS query is punted off to the manager thread * so that this function can return immediately if interrupted whilst allowing * cell records to be shared even if not yet fully constructed. */ struct afs_cell *afs_lookup_cell(struct afs_net *net, const char *name, unsigned int namesz, const char *vllist, bool excl) { struct afs_cell *cell, *candidate, *cursor; struct rb_node *parent, **pp; enum afs_cell_state state; int ret, n; _enter("%s,%s", name, vllist); if (!excl) { cell = afs_find_cell(net, name, namesz, afs_cell_trace_use_lookup); if (!IS_ERR(cell)) goto wait_for_cell; } /* Assume we're probably going to create a cell and preallocate and * mostly set up a candidate record. We can then use this to stash the * name, the net namespace and VL server addresses. * * We also want to do this before we hold any locks as it may involve * upcalling to userspace to make DNS queries. */ candidate = afs_alloc_cell(net, name, namesz, vllist); if (IS_ERR(candidate)) { _leave(" = %ld", PTR_ERR(candidate)); return candidate; } /* Find the insertion point and check to see if someone else added a * cell whilst we were allocating. */ down_write(&net->cells_lock); pp = &net->cells.rb_node; parent = NULL; while (*pp) { parent = *pp; cursor = rb_entry(parent, struct afs_cell, net_node); n = strncasecmp(cursor->name, name, min_t(size_t, cursor->name_len, namesz)); if (n == 0) n = cursor->name_len - namesz; if (n < 0) pp = &(*pp)->rb_left; else if (n > 0) pp = &(*pp)->rb_right; else goto cell_already_exists; } cell = candidate; candidate = NULL; atomic_set(&cell->active, 2); trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 2, afs_cell_trace_insert); rb_link_node_rcu(&cell->net_node, parent, pp); rb_insert_color(&cell->net_node, &net->cells); up_write(&net->cells_lock); afs_queue_cell(cell, afs_cell_trace_get_queue_new); wait_for_cell: trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), atomic_read(&cell->active), afs_cell_trace_wait); _debug("wait_for_cell"); wait_var_event(&cell->state, ({ state = smp_load_acquire(&cell->state); /* vs error */ state == AFS_CELL_ACTIVE || state == AFS_CELL_REMOVED; })); /* Check the state obtained from the wait check. */ if (state == AFS_CELL_REMOVED) { ret = cell->error; goto error; } _leave(" = %p [cell]", cell); return cell; cell_already_exists: _debug("cell exists"); cell = cursor; if (excl) { ret = -EEXIST; } else { afs_use_cell(cursor, afs_cell_trace_use_lookup); ret = 0; } up_write(&net->cells_lock); if (candidate) afs_put_cell(candidate, afs_cell_trace_put_candidate); if (ret == 0) goto wait_for_cell; goto error_noput; error: afs_unuse_cell(net, cell, afs_cell_trace_unuse_lookup); error_noput: _leave(" = %d [error]", ret); return ERR_PTR(ret); } /* * set the root cell information * - can be called with a module parameter string * - can be called from a write to /proc/fs/afs/rootcell */ int afs_cell_init(struct afs_net *net, const char *rootcell) { struct afs_cell *old_root, *new_root; const char *cp, *vllist; size_t len; _enter(""); if (!rootcell) { /* module is loaded with no parameters, or built statically. * - in the future we might initialize cell DB here. */ _leave(" = 0 [no root]"); return 0; } cp = strchr(rootcell, ':'); if (!cp) { _debug("kAFS: no VL server IP addresses specified"); vllist = NULL; len = strlen(rootcell); } else { vllist = cp + 1; len = cp - rootcell; } /* allocate a cell record for the root cell */ new_root = afs_lookup_cell(net, rootcell, len, vllist, false); if (IS_ERR(new_root)) { _leave(" = %ld", PTR_ERR(new_root)); return PTR_ERR(new_root); } if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags)) afs_use_cell(new_root, afs_cell_trace_use_pin); /* install the new cell */ down_write(&net->cells_lock); afs_see_cell(new_root, afs_cell_trace_see_ws); old_root = net->ws_cell; net->ws_cell = new_root; up_write(&net->cells_lock); afs_unuse_cell(net, old_root, afs_cell_trace_unuse_ws); _leave(" = 0"); return 0; } /* * Update a cell's VL server address list from the DNS. */ static int afs_update_cell(struct afs_cell *cell) { struct afs_vlserver_list *vllist, *old = NULL, *p; unsigned int min_ttl = READ_ONCE(afs_cell_min_ttl); unsigned int max_ttl = READ_ONCE(afs_cell_max_ttl); time64_t now, expiry = 0; int ret = 0; _enter("%s", cell->name); vllist = afs_dns_query(cell, &expiry); if (IS_ERR(vllist)) { ret = PTR_ERR(vllist); _debug("%s: fail %d", cell->name, ret); if (ret == -ENOMEM) goto out_wake; ret = -ENOMEM; vllist = afs_alloc_vlserver_list(0); if (!vllist) goto out_wake; switch (ret) { case -ENODATA: case -EDESTADDRREQ: vllist->status = DNS_LOOKUP_GOT_NOT_FOUND; break; case -EAGAIN: case -ECONNREFUSED: vllist->status = DNS_LOOKUP_GOT_TEMP_FAILURE; break; default: vllist->status = DNS_LOOKUP_GOT_LOCAL_FAILURE; break; } } _debug("%s: got list %d %d", cell->name, vllist->source, vllist->status); cell->dns_status = vllist->status; now = ktime_get_real_seconds(); if (min_ttl > max_ttl) max_ttl = min_ttl; if (expiry < now + min_ttl) expiry = now + min_ttl; else if (expiry > now + max_ttl) expiry = now + max_ttl; _debug("%s: status %d", cell->name, vllist->status); if (vllist->source == DNS_RECORD_UNAVAILABLE) { switch (vllist->status) { case DNS_LOOKUP_GOT_NOT_FOUND: /* The DNS said that the cell does not exist or there * weren't any addresses to be had. */ cell->dns_expiry = expiry; break; case DNS_LOOKUP_BAD: case DNS_LOOKUP_GOT_LOCAL_FAILURE: case DNS_LOOKUP_GOT_TEMP_FAILURE: case DNS_LOOKUP_GOT_NS_FAILURE: default: cell->dns_expiry = now + 10; break; } } else { cell->dns_expiry = expiry; } /* Replace the VL server list if the new record has servers or the old * record doesn't. */ write_lock(&cell->vl_servers_lock); p = rcu_dereference_protected(cell->vl_servers, true); if (vllist->nr_servers > 0 || p->nr_servers == 0) { rcu_assign_pointer(cell->vl_servers, vllist); cell->dns_source = vllist->source; old = p; } write_unlock(&cell->vl_servers_lock); afs_put_vlserverlist(cell->net, old); out_wake: smp_store_release(&cell->dns_lookup_count, cell->dns_lookup_count + 1); /* vs source/status */ wake_up_var(&cell->dns_lookup_count); _leave(" = %d", ret); return ret; } /* * Destroy a cell record */ static void afs_cell_destroy(struct rcu_head *rcu) { struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu); struct afs_net *net = cell->net; int r; _enter("%p{%s}", cell, cell->name); r = refcount_read(&cell->ref); ASSERTCMP(r, ==, 0); trace_afs_cell(cell->debug_id, r, atomic_read(&cell->active), afs_cell_trace_free); afs_put_vlserverlist(net, rcu_access_pointer(cell->vl_servers)); afs_unuse_cell(net, cell->alias_of, afs_cell_trace_unuse_alias); key_put(cell->anonymous_key); kfree(cell->name); kfree(cell); afs_dec_cells_outstanding(net); _leave(" [destroyed]"); } /* * Queue the cell manager. */ static void afs_queue_cell_manager(struct afs_net *net) { int outstanding = atomic_inc_return(&net->cells_outstanding); _enter("%d", outstanding); if (!queue_work(afs_wq, &net->cells_manager)) afs_dec_cells_outstanding(net); } /* * Cell management timer. We have an increment on cells_outstanding that we * need to pass along to the work item. */ void afs_cells_timer(struct timer_list *timer) { struct afs_net *net = container_of(timer, struct afs_net, cells_timer); _enter(""); if (!queue_work(afs_wq, &net->cells_manager)) afs_dec_cells_outstanding(net); } /* * Get a reference on a cell record. */ struct afs_cell *afs_get_cell(struct afs_cell *cell, enum afs_cell_trace reason) { int r; __refcount_inc(&cell->ref, &r); trace_afs_cell(cell->debug_id, r + 1, atomic_read(&cell->active), reason); return cell; } /* * Drop a reference on a cell record. */ void afs_put_cell(struct afs_cell *cell, enum afs_cell_trace reason) { if (cell) { unsigned int debug_id = cell->debug_id; unsigned int a; bool zero; int r; a = atomic_read(&cell->active); zero = __refcount_dec_and_test(&cell->ref, &r); trace_afs_cell(debug_id, r - 1, a, reason); if (zero) { a = atomic_read(&cell->active); WARN(a != 0, "Cell active count %u > 0\n", a); call_rcu(&cell->rcu, afs_cell_destroy); } } } /* * Note a cell becoming more active. */ struct afs_cell *afs_use_cell(struct afs_cell *cell, enum afs_cell_trace reason) { int r, a; r = refcount_read(&cell->ref); WARN_ON(r == 0); a = atomic_inc_return(&cell->active); trace_afs_cell(cell->debug_id, r, a, reason); return cell; } /* * Record a cell becoming less active. When the active counter reaches 1, it * is scheduled for destruction, but may get reactivated. */ void afs_unuse_cell(struct afs_net *net, struct afs_cell *cell, enum afs_cell_trace reason) { unsigned int debug_id; time64_t now, expire_delay; int r, a; if (!cell) return; _enter("%s", cell->name); now = ktime_get_real_seconds(); cell->last_inactive = now; expire_delay = 0; if (cell->vl_servers->nr_servers) expire_delay = afs_cell_gc_delay; debug_id = cell->debug_id; r = refcount_read(&cell->ref); a = atomic_dec_return(&cell->active); trace_afs_cell(debug_id, r, a, reason); WARN_ON(a == 0); if (a == 1) /* 'cell' may now be garbage collected. */ afs_set_cell_timer(net, expire_delay); } /* * Note that a cell has been seen. */ void afs_see_cell(struct afs_cell *cell, enum afs_cell_trace reason) { int r, a; r = refcount_read(&cell->ref); a = atomic_read(&cell->active); trace_afs_cell(cell->debug_id, r, a, reason); } /* * Queue a cell for management, giving the workqueue a ref to hold. */ void afs_queue_cell(struct afs_cell *cell, enum afs_cell_trace reason) { afs_get_cell(cell, reason); if (!queue_work(afs_wq, &cell->manager)) afs_put_cell(cell, afs_cell_trace_put_queue_fail); } /* * Allocate a key to use as a placeholder for anonymous user security. */ static int afs_alloc_anon_key(struct afs_cell *cell) { struct key *key; char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp; /* Create a key to represent an anonymous user. */ memcpy(keyname, "afs@", 4); dp = keyname + 4; cp = cell->name; do { *dp++ = tolower(*cp); } while (*cp++); key = rxrpc_get_null_key(keyname); if (IS_ERR(key)) return PTR_ERR(key); cell->anonymous_key = key; _debug("anon key %p{%x}", cell->anonymous_key, key_serial(cell->anonymous_key)); return 0; } /* * Activate a cell. */ static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell) { struct hlist_node **p; struct afs_cell *pcell; int ret; if (!cell->anonymous_key) { ret = afs_alloc_anon_key(cell); if (ret < 0) return ret; } ret = afs_proc_cell_setup(cell); if (ret < 0) return ret; mutex_lock(&net->proc_cells_lock); for (p = &net->proc_cells.first; *p; p = &(*p)->next) { pcell = hlist_entry(*p, struct afs_cell, proc_link); if (strcmp(cell->name, pcell->name) < 0) break; } cell->proc_link.pprev = p; cell->proc_link.next = *p; rcu_assign_pointer(*p, &cell->proc_link.next); if (cell->proc_link.next) cell->proc_link.next->pprev = &cell->proc_link.next; afs_dynroot_mkdir(net, cell); mutex_unlock(&net->proc_cells_lock); return 0; } /* * Deactivate a cell. */ static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell) { _enter("%s", cell->name); afs_proc_cell_remove(cell); mutex_lock(&net->proc_cells_lock); hlist_del_rcu(&cell->proc_link); afs_dynroot_rmdir(net, cell); mutex_unlock(&net->proc_cells_lock); _leave(""); } /* * Manage a cell record, initialising and destroying it, maintaining its DNS * records. */ static void afs_manage_cell(struct afs_cell *cell) { struct afs_net *net = cell->net; int ret, active; _enter("%s", cell->name); again: _debug("state %u", cell->state); switch (cell->state) { case AFS_CELL_INACTIVE: case AFS_CELL_FAILED: down_write(&net->cells_lock); active = 1; if (atomic_try_cmpxchg_relaxed(&cell->active, &active, 0)) { rb_erase(&cell->net_node, &net->cells); trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 0, afs_cell_trace_unuse_delete); smp_store_release(&cell->state, AFS_CELL_REMOVED); } up_write(&net->cells_lock); if (cell->state == AFS_CELL_REMOVED) { wake_up_var(&cell->state); goto final_destruction; } if (cell->state == AFS_CELL_FAILED) goto done; smp_store_release(&cell->state, AFS_CELL_UNSET); wake_up_var(&cell->state); goto again; case AFS_CELL_UNSET: smp_store_release(&cell->state, AFS_CELL_ACTIVATING); wake_up_var(&cell->state); goto again; case AFS_CELL_ACTIVATING: ret = afs_activate_cell(net, cell); if (ret < 0) goto activation_failed; smp_store_release(&cell->state, AFS_CELL_ACTIVE); wake_up_var(&cell->state); goto again; case AFS_CELL_ACTIVE: if (atomic_read(&cell->active) > 1) { if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) { ret = afs_update_cell(cell); if (ret < 0) cell->error = ret; } goto done; } smp_store_release(&cell->state, AFS_CELL_DEACTIVATING); wake_up_var(&cell->state); goto again; case AFS_CELL_DEACTIVATING: if (atomic_read(&cell->active) > 1) goto reverse_deactivation; afs_deactivate_cell(net, cell); smp_store_release(&cell->state, AFS_CELL_INACTIVE); wake_up_var(&cell->state); goto again; case AFS_CELL_REMOVED: goto done; default: break; } _debug("bad state %u", cell->state); BUG(); /* Unhandled state */ activation_failed: cell->error = ret; afs_deactivate_cell(net, cell); smp_store_release(&cell->state, AFS_CELL_FAILED); /* vs error */ wake_up_var(&cell->state); goto again; reverse_deactivation: smp_store_release(&cell->state, AFS_CELL_ACTIVE); wake_up_var(&cell->state); _leave(" [deact->act]"); return; done: _leave(" [done %u]", cell->state); return; final_destruction: /* The root volume is pinning the cell */ afs_put_volume(cell->net, cell->root_volume, afs_volume_trace_put_cell_root); cell->root_volume = NULL; afs_put_cell(cell, afs_cell_trace_put_destroy); } static void afs_manage_cell_work(struct work_struct *work) { struct afs_cell *cell = container_of(work, struct afs_cell, manager); afs_manage_cell(cell); afs_put_cell(cell, afs_cell_trace_put_queue_work); } /* * Manage the records of cells known to a network namespace. This includes * updating the DNS records and garbage collecting unused cells that were * automatically added. * * Note that constructed cell records may only be removed from net->cells by * this work item, so it is safe for this work item to stash a cursor pointing * into the tree and then return to caller (provided it skips cells that are * still under construction). * * Note also that we were given an increment on net->cells_outstanding by * whoever queued us that we need to deal with before returning. */ void afs_manage_cells(struct work_struct *work) { struct afs_net *net = container_of(work, struct afs_net, cells_manager); struct rb_node *cursor; time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX; bool purging = !net->live; _enter(""); /* Trawl the cell database looking for cells that have expired from * lack of use and cells whose DNS results have expired and dispatch * their managers. */ down_read(&net->cells_lock); for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) { struct afs_cell *cell = rb_entry(cursor, struct afs_cell, net_node); unsigned active; bool sched_cell = false; active = atomic_read(&cell->active); trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), active, afs_cell_trace_manage); ASSERTCMP(active, >=, 1); if (purging) { if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags)) { active = atomic_dec_return(&cell->active); trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), active, afs_cell_trace_unuse_pin); } } if (active == 1) { struct afs_vlserver_list *vllist; time64_t expire_at = cell->last_inactive; read_lock(&cell->vl_servers_lock); vllist = rcu_dereference_protected( cell->vl_servers, lockdep_is_held(&cell->vl_servers_lock)); if (vllist->nr_servers > 0) expire_at += afs_cell_gc_delay; read_unlock(&cell->vl_servers_lock); if (purging || expire_at <= now) sched_cell = true; else if (expire_at < next_manage) next_manage = expire_at; } if (!purging) { if (test_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) sched_cell = true; } if (sched_cell) afs_queue_cell(cell, afs_cell_trace_get_queue_manage); } up_read(&net->cells_lock); /* Update the timer on the way out. We have to pass an increment on * cells_outstanding in the namespace that we are in to the timer or * the work scheduler. */ if (!purging && next_manage < TIME64_MAX) { now = ktime_get_real_seconds(); if (next_manage - now <= 0) { if (queue_work(afs_wq, &net->cells_manager)) atomic_inc(&net->cells_outstanding); } else { afs_set_cell_timer(net, next_manage - now); } } afs_dec_cells_outstanding(net); _leave(" [%d]", atomic_read(&net->cells_outstanding)); } /* * Purge in-memory cell database. */ void afs_cell_purge(struct afs_net *net) { struct afs_cell *ws; _enter(""); down_write(&net->cells_lock); ws = net->ws_cell; net->ws_cell = NULL; up_write(&net->cells_lock); afs_unuse_cell(net, ws, afs_cell_trace_unuse_ws); _debug("del timer"); if (del_timer_sync(&net->cells_timer)) atomic_dec(&net->cells_outstanding); _debug("kick mgr"); afs_queue_cell_manager(net); _debug("wait"); wait_var_event(&net->cells_outstanding, !atomic_read(&net->cells_outstanding)); _leave(""); }
linux-master
fs/afs/cell.c
// SPDX-License-Identifier: GPL-2.0-or-later /* miscellaneous bits * * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include "internal.h" #include "afs_fs.h" #include "protocol_uae.h" /* * convert an AFS abort code to a Linux error number */ int afs_abort_to_error(u32 abort_code) { switch (abort_code) { /* Low errno codes inserted into abort namespace */ case 13: return -EACCES; case 27: return -EFBIG; case 30: return -EROFS; /* VICE "special error" codes; 101 - 111 */ case VSALVAGE: return -EIO; case VNOVNODE: return -ENOENT; case VNOVOL: return -ENOMEDIUM; case VVOLEXISTS: return -EEXIST; case VNOSERVICE: return -EIO; case VOFFLINE: return -ENOENT; case VONLINE: return -EEXIST; case VDISKFULL: return -ENOSPC; case VOVERQUOTA: return -EDQUOT; case VBUSY: return -EBUSY; case VMOVED: return -ENXIO; /* Volume Location server errors */ case AFSVL_IDEXIST: return -EEXIST; case AFSVL_IO: return -EREMOTEIO; case AFSVL_NAMEEXIST: return -EEXIST; case AFSVL_CREATEFAIL: return -EREMOTEIO; case AFSVL_NOENT: return -ENOMEDIUM; case AFSVL_EMPTY: return -ENOMEDIUM; case AFSVL_ENTDELETED: return -ENOMEDIUM; case AFSVL_BADNAME: return -EINVAL; case AFSVL_BADINDEX: return -EINVAL; case AFSVL_BADVOLTYPE: return -EINVAL; case AFSVL_BADSERVER: return -EINVAL; case AFSVL_BADPARTITION: return -EINVAL; case AFSVL_REPSFULL: return -EFBIG; case AFSVL_NOREPSERVER: return -ENOENT; case AFSVL_DUPREPSERVER: return -EEXIST; case AFSVL_RWNOTFOUND: return -ENOENT; case AFSVL_BADREFCOUNT: return -EINVAL; case AFSVL_SIZEEXCEEDED: return -EINVAL; case AFSVL_BADENTRY: return -EINVAL; case AFSVL_BADVOLIDBUMP: return -EINVAL; case AFSVL_IDALREADYHASHED: return -EINVAL; case AFSVL_ENTRYLOCKED: return -EBUSY; case AFSVL_BADVOLOPER: return -EBADRQC; case AFSVL_BADRELLOCKTYPE: return -EINVAL; case AFSVL_RERELEASE: return -EREMOTEIO; case AFSVL_BADSERVERFLAG: return -EINVAL; case AFSVL_PERM: return -EACCES; case AFSVL_NOMEM: return -EREMOTEIO; /* Unified AFS error table */ case UAEPERM: return -EPERM; case UAENOENT: return -ENOENT; case UAEAGAIN: return -EAGAIN; case UAEACCES: return -EACCES; case UAEBUSY: return -EBUSY; case UAEEXIST: return -EEXIST; case UAENOTDIR: return -ENOTDIR; case UAEISDIR: return -EISDIR; case UAEFBIG: return -EFBIG; case UAENOSPC: return -ENOSPC; case UAEROFS: return -EROFS; case UAEMLINK: return -EMLINK; case UAEDEADLK: return -EDEADLK; case UAENAMETOOLONG: return -ENAMETOOLONG; case UAENOLCK: return -ENOLCK; case UAENOTEMPTY: return -ENOTEMPTY; case UAELOOP: return -ELOOP; case UAEOVERFLOW: return -EOVERFLOW; case UAENOMEDIUM: return -ENOMEDIUM; case UAEDQUOT: return -EDQUOT; /* RXKAD abort codes; from include/rxrpc/packet.h. ET "RXK" == 0x1260B00 */ case RXKADINCONSISTENCY: return -EPROTO; case RXKADPACKETSHORT: return -EPROTO; case RXKADLEVELFAIL: return -EKEYREJECTED; case RXKADTICKETLEN: return -EKEYREJECTED; case RXKADOUTOFSEQUENCE: return -EPROTO; case RXKADNOAUTH: return -EKEYREJECTED; case RXKADBADKEY: return -EKEYREJECTED; case RXKADBADTICKET: return -EKEYREJECTED; case RXKADUNKNOWNKEY: return -EKEYREJECTED; case RXKADEXPIRED: return -EKEYEXPIRED; case RXKADSEALEDINCON: return -EKEYREJECTED; case RXKADDATALEN: return -EKEYREJECTED; case RXKADILLEGALLEVEL: return -EKEYREJECTED; case RXGEN_OPCODE: return -ENOTSUPP; default: return -EREMOTEIO; } } /* * Select the error to report from a set of errors. */ void afs_prioritise_error(struct afs_error *e, int error, u32 abort_code) { switch (error) { case 0: return; default: if (e->error == -ETIMEDOUT || e->error == -ETIME) return; fallthrough; case -ETIMEDOUT: case -ETIME: if (e->error == -ENOMEM || e->error == -ENONET) return; fallthrough; case -ENOMEM: case -ENONET: if (e->error == -ERFKILL) return; fallthrough; case -ERFKILL: if (e->error == -EADDRNOTAVAIL) return; fallthrough; case -EADDRNOTAVAIL: if (e->error == -ENETUNREACH) return; fallthrough; case -ENETUNREACH: if (e->error == -EHOSTUNREACH) return; fallthrough; case -EHOSTUNREACH: if (e->error == -EHOSTDOWN) return; fallthrough; case -EHOSTDOWN: if (e->error == -ECONNREFUSED) return; fallthrough; case -ECONNREFUSED: if (e->error == -ECONNRESET) return; fallthrough; case -ECONNRESET: /* Responded, but call expired. */ if (e->responded) return; e->error = error; return; case -ECONNABORTED: error = afs_abort_to_error(abort_code); fallthrough; case -ENETRESET: /* Responded, but we seem to have changed address */ e->responded = true; e->error = error; return; } }
linux-master
fs/afs/misc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Handle vlserver selection and rotation. * * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/sched/signal.h> #include "internal.h" #include "afs_vl.h" /* * Begin an operation on a volume location server. */ bool afs_begin_vlserver_operation(struct afs_vl_cursor *vc, struct afs_cell *cell, struct key *key) { memset(vc, 0, sizeof(*vc)); vc->cell = cell; vc->key = key; vc->error = -EDESTADDRREQ; vc->ac.error = SHRT_MAX; if (signal_pending(current)) { vc->error = -EINTR; vc->flags |= AFS_VL_CURSOR_STOP; return false; } return true; } /* * Begin iteration through a server list, starting with the last used server if * possible, or the last recorded good server if not. */ static bool afs_start_vl_iteration(struct afs_vl_cursor *vc) { struct afs_cell *cell = vc->cell; unsigned int dns_lookup_count; if (cell->dns_source == DNS_RECORD_UNAVAILABLE || cell->dns_expiry <= ktime_get_real_seconds()) { dns_lookup_count = smp_load_acquire(&cell->dns_lookup_count); set_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags); afs_queue_cell(cell, afs_cell_trace_get_queue_dns); if (cell->dns_source == DNS_RECORD_UNAVAILABLE) { if (wait_var_event_interruptible( &cell->dns_lookup_count, smp_load_acquire(&cell->dns_lookup_count) != dns_lookup_count) < 0) { vc->error = -ERESTARTSYS; return false; } } /* Status load is ordered after lookup counter load */ if (cell->dns_source == DNS_RECORD_UNAVAILABLE) { vc->error = -EDESTADDRREQ; return false; } } read_lock(&cell->vl_servers_lock); vc->server_list = afs_get_vlserverlist( rcu_dereference_protected(cell->vl_servers, lockdep_is_held(&cell->vl_servers_lock))); read_unlock(&cell->vl_servers_lock); if (!vc->server_list->nr_servers) return false; vc->untried = (1UL << vc->server_list->nr_servers) - 1; vc->index = -1; return true; } /* * Select the vlserver to use. May be called multiple times to rotate * through the vlservers. */ bool afs_select_vlserver(struct afs_vl_cursor *vc) { struct afs_addr_list *alist; struct afs_vlserver *vlserver; struct afs_error e; u32 rtt; int error = vc->ac.error, i; _enter("%lx[%d],%lx[%d],%d,%d", vc->untried, vc->index, vc->ac.tried, vc->ac.index, error, vc->ac.abort_code); if (vc->flags & AFS_VL_CURSOR_STOP) { _leave(" = f [stopped]"); return false; } vc->nr_iterations++; /* Evaluate the result of the previous operation, if there was one. */ switch (error) { case SHRT_MAX: goto start; default: case 0: /* Success or local failure. Stop. */ vc->error = error; vc->flags |= AFS_VL_CURSOR_STOP; _leave(" = f [okay/local %d]", vc->ac.error); return false; case -ECONNABORTED: /* The far side rejected the operation on some grounds. This * might involve the server being busy or the volume having been moved. */ switch (vc->ac.abort_code) { case AFSVL_IO: case AFSVL_BADVOLOPER: case AFSVL_NOMEM: /* The server went weird. */ vc->error = -EREMOTEIO; //write_lock(&vc->cell->vl_servers_lock); //vc->server_list->weird_mask |= 1 << vc->index; //write_unlock(&vc->cell->vl_servers_lock); goto next_server; default: vc->error = afs_abort_to_error(vc->ac.abort_code); goto failed; } case -ERFKILL: case -EADDRNOTAVAIL: case -ENETUNREACH: case -EHOSTUNREACH: case -EHOSTDOWN: case -ECONNREFUSED: case -ETIMEDOUT: case -ETIME: _debug("no conn %d", error); vc->error = error; goto iterate_address; case -ECONNRESET: _debug("call reset"); vc->error = error; vc->flags |= AFS_VL_CURSOR_RETRY; goto next_server; case -EOPNOTSUPP: _debug("notsupp"); goto next_server; } restart_from_beginning: _debug("restart"); afs_end_cursor(&vc->ac); afs_put_vlserverlist(vc->cell->net, vc->server_list); vc->server_list = NULL; if (vc->flags & AFS_VL_CURSOR_RETRIED) goto failed; vc->flags |= AFS_VL_CURSOR_RETRIED; start: _debug("start"); if (!afs_start_vl_iteration(vc)) goto failed; error = afs_send_vl_probes(vc->cell->net, vc->key, vc->server_list); if (error < 0) goto failed_set_error; pick_server: _debug("pick [%lx]", vc->untried); error = afs_wait_for_vl_probes(vc->server_list, vc->untried); if (error < 0) goto failed_set_error; /* Pick the untried server with the lowest RTT. */ vc->index = vc->server_list->preferred; if (test_bit(vc->index, &vc->untried)) goto selected_server; vc->index = -1; rtt = U32_MAX; for (i = 0; i < vc->server_list->nr_servers; i++) { struct afs_vlserver *s = vc->server_list->servers[i].server; if (!test_bit(i, &vc->untried) || !test_bit(AFS_VLSERVER_FL_RESPONDING, &s->flags)) continue; if (s->probe.rtt < rtt) { vc->index = i; rtt = s->probe.rtt; } } if (vc->index == -1) goto no_more_servers; selected_server: _debug("use %d", vc->index); __clear_bit(vc->index, &vc->untried); /* We're starting on a different vlserver from the list. We need to * check it, find its address list and probe its capabilities before we * use it. */ ASSERTCMP(vc->ac.alist, ==, NULL); vlserver = vc->server_list->servers[vc->index].server; vc->server = vlserver; _debug("USING VLSERVER: %s", vlserver->name); read_lock(&vlserver->lock); alist = rcu_dereference_protected(vlserver->addresses, lockdep_is_held(&vlserver->lock)); afs_get_addrlist(alist); read_unlock(&vlserver->lock); memset(&vc->ac, 0, sizeof(vc->ac)); if (!vc->ac.alist) vc->ac.alist = alist; else afs_put_addrlist(alist); vc->ac.index = -1; iterate_address: ASSERT(vc->ac.alist); /* Iterate over the current server's address list to try and find an * address on which it will respond to us. */ if (!afs_iterate_addresses(&vc->ac)) goto next_server; _debug("VL address %d/%d", vc->ac.index, vc->ac.alist->nr_addrs); _leave(" = t %pISpc", &vc->ac.alist->addrs[vc->ac.index].transport); return true; next_server: _debug("next"); afs_end_cursor(&vc->ac); goto pick_server; no_more_servers: /* That's all the servers poked to no good effect. Try again if some * of them were busy. */ if (vc->flags & AFS_VL_CURSOR_RETRY) goto restart_from_beginning; e.error = -EDESTADDRREQ; e.responded = false; for (i = 0; i < vc->server_list->nr_servers; i++) { struct afs_vlserver *s = vc->server_list->servers[i].server; if (test_bit(AFS_VLSERVER_FL_RESPONDING, &s->flags)) e.responded = true; afs_prioritise_error(&e, READ_ONCE(s->probe.error), s->probe.abort_code); } error = e.error; failed_set_error: vc->error = error; failed: vc->flags |= AFS_VL_CURSOR_STOP; afs_end_cursor(&vc->ac); _leave(" = f [failed %d]", vc->error); return false; } /* * Dump cursor state in the case of the error being EDESTADDRREQ. */ static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc) { static int count; int i; if (!IS_ENABLED(CONFIG_AFS_DEBUG_CURSOR) || count > 3) return; count++; rcu_read_lock(); pr_notice("EDESTADDR occurred\n"); pr_notice("VC: ut=%lx ix=%u ni=%hu fl=%hx err=%hd\n", vc->untried, vc->index, vc->nr_iterations, vc->flags, vc->error); if (vc->server_list) { const struct afs_vlserver_list *sl = vc->server_list; pr_notice("VC: SL nr=%u ix=%u\n", sl->nr_servers, sl->index); for (i = 0; i < sl->nr_servers; i++) { const struct afs_vlserver *s = sl->servers[i].server; pr_notice("VC: server %s+%hu fl=%lx E=%hd\n", s->name, s->port, s->flags, s->probe.error); if (s->addresses) { const struct afs_addr_list *a = rcu_dereference(s->addresses); pr_notice("VC: - nr=%u/%u/%u pf=%u\n", a->nr_ipv4, a->nr_addrs, a->max_addrs, a->preferred); pr_notice("VC: - R=%lx F=%lx\n", a->responded, a->failed); if (a == vc->ac.alist) pr_notice("VC: - current\n"); } } } pr_notice("AC: t=%lx ax=%u ac=%d er=%d r=%u ni=%u\n", vc->ac.tried, vc->ac.index, vc->ac.abort_code, vc->ac.error, vc->ac.responded, vc->ac.nr_iterations); rcu_read_unlock(); } /* * Tidy up a volume location server cursor and unlock the vnode. */ int afs_end_vlserver_operation(struct afs_vl_cursor *vc) { struct afs_net *net = vc->cell->net; if (vc->error == -EDESTADDRREQ || vc->error == -EADDRNOTAVAIL || vc->error == -ENETUNREACH || vc->error == -EHOSTUNREACH) afs_vl_dump_edestaddrreq(vc); afs_end_cursor(&vc->ac); afs_put_vlserverlist(net, vc->server_list); if (vc->error == -ECONNABORTED) vc->error = afs_abort_to_error(vc->ac.abort_code); return vc->error; }
linux-master
fs/afs/vl_rotate.c
// SPDX-License-Identifier: GPL-2.0-or-later /* AFS security handling * * Copyright (C) 2007, 2017 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/init.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/ctype.h> #include <linux/sched.h> #include <linux/hashtable.h> #include <keys/rxrpc-type.h> #include "internal.h" static DEFINE_HASHTABLE(afs_permits_cache, 10); static DEFINE_SPINLOCK(afs_permits_lock); /* * get a key */ struct key *afs_request_key(struct afs_cell *cell) { struct key *key; _enter("{%x}", key_serial(cell->anonymous_key)); _debug("key %s", cell->anonymous_key->description); key = request_key_net(&key_type_rxrpc, cell->anonymous_key->description, cell->net->net, NULL); if (IS_ERR(key)) { if (PTR_ERR(key) != -ENOKEY) { _leave(" = %ld", PTR_ERR(key)); return key; } /* act as anonymous user */ _leave(" = {%x} [anon]", key_serial(cell->anonymous_key)); return key_get(cell->anonymous_key); } else { /* act as authorised user */ _leave(" = {%x} [auth]", key_serial(key)); return key; } } /* * Get a key when pathwalk is in rcuwalk mode. */ struct key *afs_request_key_rcu(struct afs_cell *cell) { struct key *key; _enter("{%x}", key_serial(cell->anonymous_key)); _debug("key %s", cell->anonymous_key->description); key = request_key_net_rcu(&key_type_rxrpc, cell->anonymous_key->description, cell->net->net); if (IS_ERR(key)) { if (PTR_ERR(key) != -ENOKEY) { _leave(" = %ld", PTR_ERR(key)); return key; } /* act as anonymous user */ _leave(" = {%x} [anon]", key_serial(cell->anonymous_key)); return key_get(cell->anonymous_key); } else { /* act as authorised user */ _leave(" = {%x} [auth]", key_serial(key)); return key; } } /* * Dispose of a list of permits. */ static void afs_permits_rcu(struct rcu_head *rcu) { struct afs_permits *permits = container_of(rcu, struct afs_permits, rcu); int i; for (i = 0; i < permits->nr_permits; i++) key_put(permits->permits[i].key); kfree(permits); } /* * Discard a permission cache. */ void afs_put_permits(struct afs_permits *permits) { if (permits && refcount_dec_and_test(&permits->usage)) { spin_lock(&afs_permits_lock); hash_del_rcu(&permits->hash_node); spin_unlock(&afs_permits_lock); call_rcu(&permits->rcu, afs_permits_rcu); } } /* * Clear a permit cache on callback break. */ void afs_clear_permits(struct afs_vnode *vnode) { struct afs_permits *permits; spin_lock(&vnode->lock); permits = rcu_dereference_protected(vnode->permit_cache, lockdep_is_held(&vnode->lock)); RCU_INIT_POINTER(vnode->permit_cache, NULL); spin_unlock(&vnode->lock); afs_put_permits(permits); } /* * Hash a list of permits. Use simple addition to make it easy to add an extra * one at an as-yet indeterminate position in the list. */ static void afs_hash_permits(struct afs_permits *permits) { unsigned long h = permits->nr_permits; int i; for (i = 0; i < permits->nr_permits; i++) { h += (unsigned long)permits->permits[i].key / sizeof(void *); h += permits->permits[i].access; } permits->h = h; } /* * Cache the CallerAccess result obtained from doing a fileserver operation * that returned a vnode status for a particular key. If a callback break * occurs whilst the operation was in progress then we have to ditch the cache * as the ACL *may* have changed. */ void afs_cache_permit(struct afs_vnode *vnode, struct key *key, unsigned int cb_break, struct afs_status_cb *scb) { struct afs_permits *permits, *xpermits, *replacement, *zap, *new = NULL; afs_access_t caller_access = scb->status.caller_access; size_t size = 0; bool changed = false; int i, j; _enter("{%llx:%llu},%x,%x", vnode->fid.vid, vnode->fid.vnode, key_serial(key), caller_access); rcu_read_lock(); /* Check for the common case first: We got back the same access as last * time we tried and already have it recorded. */ permits = rcu_dereference(vnode->permit_cache); if (permits) { if (!permits->invalidated) { for (i = 0; i < permits->nr_permits; i++) { if (permits->permits[i].key < key) continue; if (permits->permits[i].key > key) break; if (permits->permits[i].access != caller_access) { changed = true; break; } if (afs_cb_is_broken(cb_break, vnode)) { changed = true; break; } /* The cache is still good. */ rcu_read_unlock(); return; } } changed |= permits->invalidated; size = permits->nr_permits; /* If this set of permits is now wrong, clear the permits * pointer so that no one tries to use the stale information. */ if (changed) { spin_lock(&vnode->lock); if (permits != rcu_access_pointer(vnode->permit_cache)) goto someone_else_changed_it_unlock; RCU_INIT_POINTER(vnode->permit_cache, NULL); spin_unlock(&vnode->lock); afs_put_permits(permits); permits = NULL; size = 0; } } if (afs_cb_is_broken(cb_break, vnode)) goto someone_else_changed_it; /* We need a ref on any permits list we want to copy as we'll have to * drop the lock to do memory allocation. */ if (permits && !refcount_inc_not_zero(&permits->usage)) goto someone_else_changed_it; rcu_read_unlock(); /* Speculatively create a new list with the revised permission set. We * discard this if we find an extant match already in the hash, but * it's easier to compare with memcmp this way. * * We fill in the key pointers at this time, but we don't get the refs * yet. */ size++; new = kzalloc(struct_size(new, permits, size), GFP_NOFS); if (!new) goto out_put; refcount_set(&new->usage, 1); new->nr_permits = size; i = j = 0; if (permits) { for (i = 0; i < permits->nr_permits; i++) { if (j == i && permits->permits[i].key > key) { new->permits[j].key = key; new->permits[j].access = caller_access; j++; } new->permits[j].key = permits->permits[i].key; new->permits[j].access = permits->permits[i].access; j++; } } if (j == i) { new->permits[j].key = key; new->permits[j].access = caller_access; } afs_hash_permits(new); /* Now see if the permit list we want is actually already available */ spin_lock(&afs_permits_lock); hash_for_each_possible(afs_permits_cache, xpermits, hash_node, new->h) { if (xpermits->h != new->h || xpermits->invalidated || xpermits->nr_permits != new->nr_permits || memcmp(xpermits->permits, new->permits, new->nr_permits * sizeof(struct afs_permit)) != 0) continue; if (refcount_inc_not_zero(&xpermits->usage)) { replacement = xpermits; goto found; } break; } for (i = 0; i < new->nr_permits; i++) key_get(new->permits[i].key); hash_add_rcu(afs_permits_cache, &new->hash_node, new->h); replacement = new; new = NULL; found: spin_unlock(&afs_permits_lock); kfree(new); rcu_read_lock(); spin_lock(&vnode->lock); zap = rcu_access_pointer(vnode->permit_cache); if (!afs_cb_is_broken(cb_break, vnode) && zap == permits) rcu_assign_pointer(vnode->permit_cache, replacement); else zap = replacement; spin_unlock(&vnode->lock); rcu_read_unlock(); afs_put_permits(zap); out_put: afs_put_permits(permits); return; someone_else_changed_it_unlock: spin_unlock(&vnode->lock); someone_else_changed_it: /* Someone else changed the cache under us - don't recheck at this * time. */ rcu_read_unlock(); return; } static bool afs_check_permit_rcu(struct afs_vnode *vnode, struct key *key, afs_access_t *_access) { const struct afs_permits *permits; int i; _enter("{%llx:%llu},%x", vnode->fid.vid, vnode->fid.vnode, key_serial(key)); /* check the permits to see if we've got one yet */ if (key == vnode->volume->cell->anonymous_key) { *_access = vnode->status.anon_access; _leave(" = t [anon %x]", *_access); return true; } permits = rcu_dereference(vnode->permit_cache); if (permits) { for (i = 0; i < permits->nr_permits; i++) { if (permits->permits[i].key < key) continue; if (permits->permits[i].key > key) break; *_access = permits->permits[i].access; _leave(" = %u [perm %x]", !permits->invalidated, *_access); return !permits->invalidated; } } _leave(" = f"); return false; } /* * check with the fileserver to see if the directory or parent directory is * permitted to be accessed with this authorisation, and if so, what access it * is granted */ int afs_check_permit(struct afs_vnode *vnode, struct key *key, afs_access_t *_access) { struct afs_permits *permits; bool valid = false; int i, ret; _enter("{%llx:%llu},%x", vnode->fid.vid, vnode->fid.vnode, key_serial(key)); /* check the permits to see if we've got one yet */ if (key == vnode->volume->cell->anonymous_key) { _debug("anon"); *_access = vnode->status.anon_access; valid = true; } else { rcu_read_lock(); permits = rcu_dereference(vnode->permit_cache); if (permits) { for (i = 0; i < permits->nr_permits; i++) { if (permits->permits[i].key < key) continue; if (permits->permits[i].key > key) break; *_access = permits->permits[i].access; valid = !permits->invalidated; break; } } rcu_read_unlock(); } if (!valid) { /* Check the status on the file we're actually interested in * (the post-processing will cache the result). */ _debug("no valid permit"); ret = afs_fetch_status(vnode, key, false, _access); if (ret < 0) { *_access = 0; _leave(" = %d", ret); return ret; } } _leave(" = 0 [access %x]", *_access); return 0; } /* * check the permissions on an AFS file * - AFS ACLs are attached to directories only, and a file is controlled by its * parent directory's ACL */ int afs_permission(struct mnt_idmap *idmap, struct inode *inode, int mask) { struct afs_vnode *vnode = AFS_FS_I(inode); afs_access_t access; struct key *key; int ret = 0; _enter("{{%llx:%llu},%lx},%x,", vnode->fid.vid, vnode->fid.vnode, vnode->flags, mask); if (mask & MAY_NOT_BLOCK) { key = afs_request_key_rcu(vnode->volume->cell); if (IS_ERR(key)) return -ECHILD; ret = -ECHILD; if (!afs_check_validity(vnode) || !afs_check_permit_rcu(vnode, key, &access)) goto error; } else { key = afs_request_key(vnode->volume->cell); if (IS_ERR(key)) { _leave(" = %ld [key]", PTR_ERR(key)); return PTR_ERR(key); } ret = afs_validate(vnode, key); if (ret < 0) goto error; /* check the permits to see if we've got one yet */ ret = afs_check_permit(vnode, key, &access); if (ret < 0) goto error; } /* interpret the access mask */ _debug("REQ %x ACC %x on %s", mask, access, S_ISDIR(inode->i_mode) ? "dir" : "file"); ret = 0; if (S_ISDIR(inode->i_mode)) { if (mask & (MAY_EXEC | MAY_READ | MAY_CHDIR)) { if (!(access & AFS_ACE_LOOKUP)) goto permission_denied; } if (mask & MAY_WRITE) { if (!(access & (AFS_ACE_DELETE | /* rmdir, unlink, rename from */ AFS_ACE_INSERT))) /* create, mkdir, symlink, rename to */ goto permission_denied; } } else { if (!(access & AFS_ACE_LOOKUP)) goto permission_denied; if ((mask & MAY_EXEC) && !(inode->i_mode & S_IXUSR)) goto permission_denied; if (mask & (MAY_EXEC | MAY_READ)) { if (!(access & AFS_ACE_READ)) goto permission_denied; if (!(inode->i_mode & S_IRUSR)) goto permission_denied; } else if (mask & MAY_WRITE) { if (!(access & AFS_ACE_WRITE)) goto permission_denied; if (!(inode->i_mode & S_IWUSR)) goto permission_denied; } } key_put(key); _leave(" = %d", ret); return ret; permission_denied: ret = -EACCES; error: key_put(key); _leave(" = %d", ret); return ret; } void __exit afs_clean_up_permit_cache(void) { int i; for (i = 0; i < HASH_SIZE(afs_permits_cache); i++) WARN_ON_ONCE(!hlist_empty(&afs_permits_cache[i])); }
linux-master
fs/afs/security.c
// SPDX-License-Identifier: GPL-2.0-or-later /* handling of writes to regular files and writing back to the server * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/backing-dev.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/writeback.h> #include <linux/pagevec.h> #include <linux/netfs.h> #include "internal.h" static int afs_writepages_region(struct address_space *mapping, struct writeback_control *wbc, loff_t start, loff_t end, loff_t *_next, bool max_one_loop); static void afs_write_to_cache(struct afs_vnode *vnode, loff_t start, size_t len, loff_t i_size, bool caching); #ifdef CONFIG_AFS_FSCACHE /* * Mark a page as having been made dirty and thus needing writeback. We also * need to pin the cache object to write back to. */ bool afs_dirty_folio(struct address_space *mapping, struct folio *folio) { return fscache_dirty_folio(mapping, folio, afs_vnode_cache(AFS_FS_I(mapping->host))); } static void afs_folio_start_fscache(bool caching, struct folio *folio) { if (caching) folio_start_fscache(folio); } #else static void afs_folio_start_fscache(bool caching, struct folio *folio) { } #endif /* * Flush out a conflicting write. This may extend the write to the surrounding * pages if also dirty and contiguous to the conflicting region.. */ static int afs_flush_conflicting_write(struct address_space *mapping, struct folio *folio) { struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = LONG_MAX, .range_start = folio_pos(folio), .range_end = LLONG_MAX, }; loff_t next; return afs_writepages_region(mapping, &wbc, folio_pos(folio), LLONG_MAX, &next, true); } /* * prepare to perform part of a write to a page */ int afs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, struct page **_page, void **fsdata) { struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); struct folio *folio; unsigned long priv; unsigned f, from; unsigned t, to; pgoff_t index; int ret; _enter("{%llx:%llu},%llx,%x", vnode->fid.vid, vnode->fid.vnode, pos, len); /* Prefetch area to be written into the cache if we're caching this * file. We need to do this before we get a lock on the page in case * there's more than one writer competing for the same cache block. */ ret = netfs_write_begin(&vnode->netfs, file, mapping, pos, len, &folio, fsdata); if (ret < 0) return ret; index = folio_index(folio); from = pos - index * PAGE_SIZE; to = from + len; try_again: /* See if this page is already partially written in a way that we can * merge the new write with. */ if (folio_test_private(folio)) { priv = (unsigned long)folio_get_private(folio); f = afs_folio_dirty_from(folio, priv); t = afs_folio_dirty_to(folio, priv); ASSERTCMP(f, <=, t); if (folio_test_writeback(folio)) { trace_afs_folio_dirty(vnode, tracepoint_string("alrdy"), folio); folio_unlock(folio); goto wait_for_writeback; } /* If the file is being filled locally, allow inter-write * spaces to be merged into writes. If it's not, only write * back what the user gives us. */ if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) && (to < f || from > t)) goto flush_conflicting_write; } *_page = folio_file_page(folio, pos / PAGE_SIZE); _leave(" = 0"); return 0; /* The previous write and this write aren't adjacent or overlapping, so * flush the page out. */ flush_conflicting_write: trace_afs_folio_dirty(vnode, tracepoint_string("confl"), folio); folio_unlock(folio); ret = afs_flush_conflicting_write(mapping, folio); if (ret < 0) goto error; wait_for_writeback: ret = folio_wait_writeback_killable(folio); if (ret < 0) goto error; ret = folio_lock_killable(folio); if (ret < 0) goto error; goto try_again; error: folio_put(folio); _leave(" = %d", ret); return ret; } /* * finalise part of a write to a page */ int afs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *subpage, void *fsdata) { struct folio *folio = page_folio(subpage); struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); unsigned long priv; unsigned int f, from = offset_in_folio(folio, pos); unsigned int t, to = from + copied; loff_t i_size, write_end_pos; _enter("{%llx:%llu},{%lx}", vnode->fid.vid, vnode->fid.vnode, folio_index(folio)); if (!folio_test_uptodate(folio)) { if (copied < len) { copied = 0; goto out; } folio_mark_uptodate(folio); } if (copied == 0) goto out; write_end_pos = pos + copied; i_size = i_size_read(&vnode->netfs.inode); if (write_end_pos > i_size) { write_seqlock(&vnode->cb_lock); i_size = i_size_read(&vnode->netfs.inode); if (write_end_pos > i_size) afs_set_i_size(vnode, write_end_pos); write_sequnlock(&vnode->cb_lock); fscache_update_cookie(afs_vnode_cache(vnode), NULL, &write_end_pos); } if (folio_test_private(folio)) { priv = (unsigned long)folio_get_private(folio); f = afs_folio_dirty_from(folio, priv); t = afs_folio_dirty_to(folio, priv); if (from < f) f = from; if (to > t) t = to; priv = afs_folio_dirty(folio, f, t); folio_change_private(folio, (void *)priv); trace_afs_folio_dirty(vnode, tracepoint_string("dirty+"), folio); } else { priv = afs_folio_dirty(folio, from, to); folio_attach_private(folio, (void *)priv); trace_afs_folio_dirty(vnode, tracepoint_string("dirty"), folio); } if (folio_mark_dirty(folio)) _debug("dirtied %lx", folio_index(folio)); out: folio_unlock(folio); folio_put(folio); return copied; } /* * kill all the pages in the given range */ static void afs_kill_pages(struct address_space *mapping, loff_t start, loff_t len) { struct afs_vnode *vnode = AFS_FS_I(mapping->host); struct folio *folio; pgoff_t index = start / PAGE_SIZE; pgoff_t last = (start + len - 1) / PAGE_SIZE, next; _enter("{%llx:%llu},%llx @%llx", vnode->fid.vid, vnode->fid.vnode, len, start); do { _debug("kill %lx (to %lx)", index, last); folio = filemap_get_folio(mapping, index); if (IS_ERR(folio)) { next = index + 1; continue; } next = folio_next_index(folio); folio_clear_uptodate(folio); folio_end_writeback(folio); folio_lock(folio); generic_error_remove_page(mapping, &folio->page); folio_unlock(folio); folio_put(folio); } while (index = next, index <= last); _leave(""); } /* * Redirty all the pages in a given range. */ static void afs_redirty_pages(struct writeback_control *wbc, struct address_space *mapping, loff_t start, loff_t len) { struct afs_vnode *vnode = AFS_FS_I(mapping->host); struct folio *folio; pgoff_t index = start / PAGE_SIZE; pgoff_t last = (start + len - 1) / PAGE_SIZE, next; _enter("{%llx:%llu},%llx @%llx", vnode->fid.vid, vnode->fid.vnode, len, start); do { _debug("redirty %llx @%llx", len, start); folio = filemap_get_folio(mapping, index); if (IS_ERR(folio)) { next = index + 1; continue; } next = index + folio_nr_pages(folio); folio_redirty_for_writepage(wbc, folio); folio_end_writeback(folio); folio_put(folio); } while (index = next, index <= last); _leave(""); } /* * completion of write to server */ static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len) { struct address_space *mapping = vnode->netfs.inode.i_mapping; struct folio *folio; pgoff_t end; XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); _enter("{%llx:%llu},{%x @%llx}", vnode->fid.vid, vnode->fid.vnode, len, start); rcu_read_lock(); end = (start + len - 1) / PAGE_SIZE; xas_for_each(&xas, folio, end) { if (!folio_test_writeback(folio)) { kdebug("bad %x @%llx page %lx %lx", len, start, folio_index(folio), end); ASSERT(folio_test_writeback(folio)); } trace_afs_folio_dirty(vnode, tracepoint_string("clear"), folio); folio_detach_private(folio); folio_end_writeback(folio); } rcu_read_unlock(); afs_prune_wb_keys(vnode); _leave(""); } /* * Find a key to use for the writeback. We cached the keys used to author the * writes on the vnode. *_wbk will contain the last writeback key used or NULL * and we need to start from there if it's set. */ static int afs_get_writeback_key(struct afs_vnode *vnode, struct afs_wb_key **_wbk) { struct afs_wb_key *wbk = NULL; struct list_head *p; int ret = -ENOKEY, ret2; spin_lock(&vnode->wb_lock); if (*_wbk) p = (*_wbk)->vnode_link.next; else p = vnode->wb_keys.next; while (p != &vnode->wb_keys) { wbk = list_entry(p, struct afs_wb_key, vnode_link); _debug("wbk %u", key_serial(wbk->key)); ret2 = key_validate(wbk->key); if (ret2 == 0) { refcount_inc(&wbk->usage); _debug("USE WB KEY %u", key_serial(wbk->key)); break; } wbk = NULL; if (ret == -ENOKEY) ret = ret2; p = p->next; } spin_unlock(&vnode->wb_lock); if (*_wbk) afs_put_wb_key(*_wbk); *_wbk = wbk; return 0; } static void afs_store_data_success(struct afs_operation *op) { struct afs_vnode *vnode = op->file[0].vnode; op->ctime = op->file[0].scb.status.mtime_client; afs_vnode_commit_status(op, &op->file[0]); if (op->error == 0) { if (!op->store.laundering) afs_pages_written_back(vnode, op->store.pos, op->store.size); afs_stat_v(vnode, n_stores); atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes); } } static const struct afs_operation_ops afs_store_data_operation = { .issue_afs_rpc = afs_fs_store_data, .issue_yfs_rpc = yfs_fs_store_data, .success = afs_store_data_success, }; /* * write to a file */ static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos, bool laundering) { struct afs_operation *op; struct afs_wb_key *wbk = NULL; loff_t size = iov_iter_count(iter); int ret = -ENOKEY; _enter("%s{%llx:%llu.%u},%llx,%llx", vnode->volume->name, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique, size, pos); ret = afs_get_writeback_key(vnode, &wbk); if (ret) { _leave(" = %d [no keys]", ret); return ret; } op = afs_alloc_operation(wbk->key, vnode->volume); if (IS_ERR(op)) { afs_put_wb_key(wbk); return -ENOMEM; } afs_op_set_vnode(op, 0, vnode); op->file[0].dv_delta = 1; op->file[0].modification = true; op->store.pos = pos; op->store.size = size; op->store.laundering = laundering; op->flags |= AFS_OPERATION_UNINTR; op->ops = &afs_store_data_operation; try_next_key: afs_begin_vnode_operation(op); op->store.write_iter = iter; op->store.i_size = max(pos + size, vnode->netfs.remote_i_size); op->mtime = vnode->netfs.inode.i_mtime; afs_wait_for_operation(op); switch (op->error) { case -EACCES: case -EPERM: case -ENOKEY: case -EKEYEXPIRED: case -EKEYREJECTED: case -EKEYREVOKED: _debug("next"); ret = afs_get_writeback_key(vnode, &wbk); if (ret == 0) { key_put(op->key); op->key = key_get(wbk->key); goto try_next_key; } break; } afs_put_wb_key(wbk); _leave(" = %d", op->error); return afs_put_operation(op); } /* * Extend the region to be written back to include subsequent contiguously * dirty pages if possible, but don't sleep while doing so. * * If this page holds new content, then we can include filler zeros in the * writeback. */ static void afs_extend_writeback(struct address_space *mapping, struct afs_vnode *vnode, long *_count, loff_t start, loff_t max_len, bool new_content, bool caching, unsigned int *_len) { struct folio_batch fbatch; struct folio *folio; unsigned long priv; unsigned int psize, filler = 0; unsigned int f, t; loff_t len = *_len; pgoff_t index = (start + len) / PAGE_SIZE; bool stop = true; unsigned int i; XA_STATE(xas, &mapping->i_pages, index); folio_batch_init(&fbatch); do { /* Firstly, we gather up a batch of contiguous dirty pages * under the RCU read lock - but we can't clear the dirty flags * there if any of those pages are mapped. */ rcu_read_lock(); xas_for_each(&xas, folio, ULONG_MAX) { stop = true; if (xas_retry(&xas, folio)) continue; if (xa_is_value(folio)) break; if (folio_index(folio) != index) break; if (!folio_try_get_rcu(folio)) { xas_reset(&xas); continue; } /* Has the page moved or been split? */ if (unlikely(folio != xas_reload(&xas))) { folio_put(folio); break; } if (!folio_trylock(folio)) { folio_put(folio); break; } if (!folio_test_dirty(folio) || folio_test_writeback(folio) || folio_test_fscache(folio)) { folio_unlock(folio); folio_put(folio); break; } psize = folio_size(folio); priv = (unsigned long)folio_get_private(folio); f = afs_folio_dirty_from(folio, priv); t = afs_folio_dirty_to(folio, priv); if (f != 0 && !new_content) { folio_unlock(folio); folio_put(folio); break; } len += filler + t; filler = psize - t; if (len >= max_len || *_count <= 0) stop = true; else if (t == psize || new_content) stop = false; index += folio_nr_pages(folio); if (!folio_batch_add(&fbatch, folio)) break; if (stop) break; } if (!stop) xas_pause(&xas); rcu_read_unlock(); /* Now, if we obtained any folios, we can shift them to being * writable and mark them for caching. */ if (!folio_batch_count(&fbatch)) break; for (i = 0; i < folio_batch_count(&fbatch); i++) { folio = fbatch.folios[i]; trace_afs_folio_dirty(vnode, tracepoint_string("store+"), folio); if (!folio_clear_dirty_for_io(folio)) BUG(); if (folio_start_writeback(folio)) BUG(); afs_folio_start_fscache(caching, folio); *_count -= folio_nr_pages(folio); folio_unlock(folio); } folio_batch_release(&fbatch); cond_resched(); } while (!stop); *_len = len; } /* * Synchronously write back the locked page and any subsequent non-locked dirty * pages. */ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping, struct writeback_control *wbc, struct folio *folio, loff_t start, loff_t end) { struct afs_vnode *vnode = AFS_FS_I(mapping->host); struct iov_iter iter; unsigned long priv; unsigned int offset, to, len, max_len; loff_t i_size = i_size_read(&vnode->netfs.inode); bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); bool caching = fscache_cookie_enabled(afs_vnode_cache(vnode)); long count = wbc->nr_to_write; int ret; _enter(",%lx,%llx-%llx", folio_index(folio), start, end); if (folio_start_writeback(folio)) BUG(); afs_folio_start_fscache(caching, folio); count -= folio_nr_pages(folio); /* Find all consecutive lockable dirty pages that have contiguous * written regions, stopping when we find a page that is not * immediately lockable, is not dirty or is missing, or we reach the * end of the range. */ priv = (unsigned long)folio_get_private(folio); offset = afs_folio_dirty_from(folio, priv); to = afs_folio_dirty_to(folio, priv); trace_afs_folio_dirty(vnode, tracepoint_string("store"), folio); len = to - offset; start += offset; if (start < i_size) { /* Trim the write to the EOF; the extra data is ignored. Also * put an upper limit on the size of a single storedata op. */ max_len = 65536 * 4096; max_len = min_t(unsigned long long, max_len, end - start + 1); max_len = min_t(unsigned long long, max_len, i_size - start); if (len < max_len && (to == folio_size(folio) || new_content)) afs_extend_writeback(mapping, vnode, &count, start, max_len, new_content, caching, &len); len = min_t(loff_t, len, max_len); } /* We now have a contiguous set of dirty pages, each with writeback * set; the first page is still locked at this point, but all the rest * have been unlocked. */ folio_unlock(folio); if (start < i_size) { _debug("write back %x @%llx [%llx]", len, start, i_size); /* Speculatively write to the cache. We have to fix this up * later if the store fails. */ afs_write_to_cache(vnode, start, len, i_size, caching); iov_iter_xarray(&iter, ITER_SOURCE, &mapping->i_pages, start, len); ret = afs_store_data(vnode, &iter, start, false); } else { _debug("write discard %x @%llx [%llx]", len, start, i_size); /* The dirty region was entirely beyond the EOF. */ fscache_clear_page_bits(mapping, start, len, caching); afs_pages_written_back(vnode, start, len); ret = 0; } switch (ret) { case 0: wbc->nr_to_write = count; ret = len; break; default: pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret); fallthrough; case -EACCES: case -EPERM: case -ENOKEY: case -EKEYEXPIRED: case -EKEYREJECTED: case -EKEYREVOKED: case -ENETRESET: afs_redirty_pages(wbc, mapping, start, len); mapping_set_error(mapping, ret); break; case -EDQUOT: case -ENOSPC: afs_redirty_pages(wbc, mapping, start, len); mapping_set_error(mapping, -ENOSPC); break; case -EROFS: case -EIO: case -EREMOTEIO: case -EFBIG: case -ENOENT: case -ENOMEDIUM: case -ENXIO: trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail); afs_kill_pages(mapping, start, len); mapping_set_error(mapping, ret); break; } _leave(" = %d", ret); return ret; } /* * write a region of pages back to the server */ static int afs_writepages_region(struct address_space *mapping, struct writeback_control *wbc, loff_t start, loff_t end, loff_t *_next, bool max_one_loop) { struct folio *folio; struct folio_batch fbatch; ssize_t ret; unsigned int i; int n, skips = 0; _enter("%llx,%llx,", start, end); folio_batch_init(&fbatch); do { pgoff_t index = start / PAGE_SIZE; n = filemap_get_folios_tag(mapping, &index, end / PAGE_SIZE, PAGECACHE_TAG_DIRTY, &fbatch); if (!n) break; for (i = 0; i < n; i++) { folio = fbatch.folios[i]; start = folio_pos(folio); /* May regress with THPs */ _debug("wback %lx", folio_index(folio)); /* At this point we hold neither the i_pages lock nor the * page lock: the page may be truncated or invalidated * (changing page->mapping to NULL), or even swizzled * back from swapper_space to tmpfs file mapping */ try_again: if (wbc->sync_mode != WB_SYNC_NONE) { ret = folio_lock_killable(folio); if (ret < 0) { folio_batch_release(&fbatch); return ret; } } else { if (!folio_trylock(folio)) continue; } if (folio->mapping != mapping || !folio_test_dirty(folio)) { start += folio_size(folio); folio_unlock(folio); continue; } if (folio_test_writeback(folio) || folio_test_fscache(folio)) { folio_unlock(folio); if (wbc->sync_mode != WB_SYNC_NONE) { folio_wait_writeback(folio); #ifdef CONFIG_AFS_FSCACHE folio_wait_fscache(folio); #endif goto try_again; } start += folio_size(folio); if (wbc->sync_mode == WB_SYNC_NONE) { if (skips >= 5 || need_resched()) { *_next = start; folio_batch_release(&fbatch); _leave(" = 0 [%llx]", *_next); return 0; } skips++; } continue; } if (!folio_clear_dirty_for_io(folio)) BUG(); ret = afs_write_back_from_locked_folio(mapping, wbc, folio, start, end); if (ret < 0) { _leave(" = %zd", ret); folio_batch_release(&fbatch); return ret; } start += ret; } folio_batch_release(&fbatch); cond_resched(); } while (wbc->nr_to_write > 0); *_next = start; _leave(" = 0 [%llx]", *_next); return 0; } /* * write some of the pending data back to the server */ int afs_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct afs_vnode *vnode = AFS_FS_I(mapping->host); loff_t start, next; int ret; _enter(""); /* We have to be careful as we can end up racing with setattr() * truncating the pagecache since the caller doesn't take a lock here * to prevent it. */ if (wbc->sync_mode == WB_SYNC_ALL) down_read(&vnode->validate_lock); else if (!down_read_trylock(&vnode->validate_lock)) return 0; if (wbc->range_cyclic) { start = mapping->writeback_index * PAGE_SIZE; ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX, &next, false); if (ret == 0) { mapping->writeback_index = next / PAGE_SIZE; if (start > 0 && wbc->nr_to_write > 0) { ret = afs_writepages_region(mapping, wbc, 0, start, &next, false); if (ret == 0) mapping->writeback_index = next / PAGE_SIZE; } } } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next, false); if (wbc->nr_to_write > 0 && ret == 0) mapping->writeback_index = next / PAGE_SIZE; } else { ret = afs_writepages_region(mapping, wbc, wbc->range_start, wbc->range_end, &next, false); } up_read(&vnode->validate_lock); _leave(" = %d", ret); return ret; } /* * write to an AFS file */ ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from) { struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp)); struct afs_file *af = iocb->ki_filp->private_data; ssize_t result; size_t count = iov_iter_count(from); _enter("{%llx:%llu},{%zu},", vnode->fid.vid, vnode->fid.vnode, count); if (IS_SWAPFILE(&vnode->netfs.inode)) { printk(KERN_INFO "AFS: Attempt to write to active swap file!\n"); return -EBUSY; } if (!count) return 0; result = afs_validate(vnode, af->key); if (result < 0) return result; result = generic_file_write_iter(iocb, from); _leave(" = %zd", result); return result; } /* * flush any dirty pages for this process, and check for write errors. * - the return status from this call provides a reliable indication of * whether any write errors occurred for this process. */ int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); struct afs_file *af = file->private_data; int ret; _enter("{%llx:%llu},{n=%pD},%d", vnode->fid.vid, vnode->fid.vnode, file, datasync); ret = afs_validate(vnode, af->key); if (ret < 0) return ret; return file_write_and_wait_range(file, start, end); } /* * notification that a previously read-only page is about to become writable * - if it returns an error, the caller will deliver a bus error signal */ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) { struct folio *folio = page_folio(vmf->page); struct file *file = vmf->vma->vm_file; struct inode *inode = file_inode(file); struct afs_vnode *vnode = AFS_FS_I(inode); struct afs_file *af = file->private_data; unsigned long priv; vm_fault_t ret = VM_FAULT_RETRY; _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, folio_index(folio)); afs_validate(vnode, af->key); sb_start_pagefault(inode->i_sb); /* Wait for the page to be written to the cache before we allow it to * be modified. We then assume the entire page will need writing back. */ #ifdef CONFIG_AFS_FSCACHE if (folio_test_fscache(folio) && folio_wait_fscache_killable(folio) < 0) goto out; #endif if (folio_wait_writeback_killable(folio)) goto out; if (folio_lock_killable(folio) < 0) goto out; /* We mustn't change folio->private until writeback is complete as that * details the portion of the page we need to write back and we might * need to redirty the page if there's a problem. */ if (folio_wait_writeback_killable(folio) < 0) { folio_unlock(folio); goto out; } priv = afs_folio_dirty(folio, 0, folio_size(folio)); priv = afs_folio_dirty_mmapped(priv); if (folio_test_private(folio)) { folio_change_private(folio, (void *)priv); trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite+"), folio); } else { folio_attach_private(folio, (void *)priv); trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite"), folio); } file_update_time(file); ret = VM_FAULT_LOCKED; out: sb_end_pagefault(inode->i_sb); return ret; } /* * Prune the keys cached for writeback. The caller must hold vnode->wb_lock. */ void afs_prune_wb_keys(struct afs_vnode *vnode) { LIST_HEAD(graveyard); struct afs_wb_key *wbk, *tmp; /* Discard unused keys */ spin_lock(&vnode->wb_lock); if (!mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_WRITEBACK) && !mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_DIRTY)) { list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) { if (refcount_read(&wbk->usage) == 1) list_move(&wbk->vnode_link, &graveyard); } } spin_unlock(&vnode->wb_lock); while (!list_empty(&graveyard)) { wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link); list_del(&wbk->vnode_link); afs_put_wb_key(wbk); } } /* * Clean up a page during invalidation. */ int afs_launder_folio(struct folio *folio) { struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio)); struct iov_iter iter; struct bio_vec bv; unsigned long priv; unsigned int f, t; int ret = 0; _enter("{%lx}", folio->index); priv = (unsigned long)folio_get_private(folio); if (folio_clear_dirty_for_io(folio)) { f = 0; t = folio_size(folio); if (folio_test_private(folio)) { f = afs_folio_dirty_from(folio, priv); t = afs_folio_dirty_to(folio, priv); } bvec_set_folio(&bv, folio, t - f, f); iov_iter_bvec(&iter, ITER_SOURCE, &bv, 1, bv.bv_len); trace_afs_folio_dirty(vnode, tracepoint_string("launder"), folio); ret = afs_store_data(vnode, &iter, folio_pos(folio) + f, true); } trace_afs_folio_dirty(vnode, tracepoint_string("laundered"), folio); folio_detach_private(folio); folio_wait_fscache(folio); return ret; } /* * Deal with the completion of writing the data to the cache. */ static void afs_write_to_cache_done(void *priv, ssize_t transferred_or_error, bool was_async) { struct afs_vnode *vnode = priv; if (IS_ERR_VALUE(transferred_or_error) && transferred_or_error != -ENOBUFS) afs_invalidate_cache(vnode, 0); } /* * Save the write to the cache also. */ static void afs_write_to_cache(struct afs_vnode *vnode, loff_t start, size_t len, loff_t i_size, bool caching) { fscache_write_to_cache(afs_vnode_cache(vnode), vnode->netfs.inode.i_mapping, start, len, i_size, afs_write_to_cache_done, vnode, caching); }
linux-master
fs/afs/write.c
// SPDX-License-Identifier: GPL-2.0-or-later /* YFS File Server client stubs * * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/init.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/circ_buf.h> #include <linux/iversion.h> #include "internal.h" #include "afs_fs.h" #include "xdr_fs.h" #include "protocol_yfs.h" #define xdr_size(x) (sizeof(*x) / sizeof(__be32)) static void xdr_decode_YFSFid(const __be32 **_bp, struct afs_fid *fid) { const struct yfs_xdr_YFSFid *x = (const void *)*_bp; fid->vid = xdr_to_u64(x->volume); fid->vnode = xdr_to_u64(x->vnode.lo); fid->vnode_hi = ntohl(x->vnode.hi); fid->unique = ntohl(x->vnode.unique); *_bp += xdr_size(x); } static __be32 *xdr_encode_u32(__be32 *bp, u32 n) { *bp++ = htonl(n); return bp; } static __be32 *xdr_encode_u64(__be32 *bp, u64 n) { struct yfs_xdr_u64 *x = (void *)bp; *x = u64_to_xdr(n); return bp + xdr_size(x); } static __be32 *xdr_encode_YFSFid(__be32 *bp, struct afs_fid *fid) { struct yfs_xdr_YFSFid *x = (void *)bp; x->volume = u64_to_xdr(fid->vid); x->vnode.lo = u64_to_xdr(fid->vnode); x->vnode.hi = htonl(fid->vnode_hi); x->vnode.unique = htonl(fid->unique); return bp + xdr_size(x); } static size_t xdr_strlen(unsigned int len) { return sizeof(__be32) + round_up(len, sizeof(__be32)); } static __be32 *xdr_encode_string(__be32 *bp, const char *p, unsigned int len) { bp = xdr_encode_u32(bp, len); bp = memcpy(bp, p, len); if (len & 3) { unsigned int pad = 4 - (len & 3); memset((u8 *)bp + len, 0, pad); len += pad; } return bp + len / sizeof(__be32); } static __be32 *xdr_encode_name(__be32 *bp, const struct qstr *p) { return xdr_encode_string(bp, p->name, p->len); } static s64 linux_to_yfs_time(const struct timespec64 *t) { /* Convert to 100ns intervals. */ return (u64)t->tv_sec * 10000000 + t->tv_nsec/100; } static __be32 *xdr_encode_YFSStoreStatus(__be32 *bp, mode_t *mode, const struct timespec64 *t) { struct yfs_xdr_YFSStoreStatus *x = (void *)bp; mode_t masked_mode = mode ? *mode & S_IALLUGO : 0; s64 mtime = linux_to_yfs_time(t); u32 mask = AFS_SET_MTIME; mask |= mode ? AFS_SET_MODE : 0; x->mask = htonl(mask); x->mode = htonl(masked_mode); x->mtime_client = u64_to_xdr(mtime); x->owner = u64_to_xdr(0); x->group = u64_to_xdr(0); return bp + xdr_size(x); } /* * Convert a signed 100ns-resolution 64-bit time into a timespec. */ static struct timespec64 yfs_time_to_linux(s64 t) { struct timespec64 ts; u64 abs_t; /* * Unfortunately can not use normal 64 bit division on 32 bit arch, but * the alternative, do_div, does not work with negative numbers so have * to special case them */ if (t < 0) { abs_t = -t; ts.tv_nsec = (time64_t)(do_div(abs_t, 10000000) * 100); ts.tv_nsec = -ts.tv_nsec; ts.tv_sec = -abs_t; } else { abs_t = t; ts.tv_nsec = (time64_t)do_div(abs_t, 10000000) * 100; ts.tv_sec = abs_t; } return ts; } static struct timespec64 xdr_to_time(const struct yfs_xdr_u64 xdr) { s64 t = xdr_to_u64(xdr); return yfs_time_to_linux(t); } static void yfs_check_req(struct afs_call *call, __be32 *bp) { size_t len = (void *)bp - call->request; if (len > call->request_size) pr_err("kAFS: %s: Request buffer overflow (%zu>%u)\n", call->type->name, len, call->request_size); else if (len < call->request_size) pr_warn("kAFS: %s: Request buffer underflow (%zu<%u)\n", call->type->name, len, call->request_size); } /* * Dump a bad file status record. */ static void xdr_dump_bad(const __be32 *bp) { __be32 x[4]; int i; pr_notice("YFS XDR: Bad status record\n"); for (i = 0; i < 6 * 4 * 4; i += 16) { memcpy(x, bp, 16); bp += 4; pr_notice("%03x: %08x %08x %08x %08x\n", i, ntohl(x[0]), ntohl(x[1]), ntohl(x[2]), ntohl(x[3])); } memcpy(x, bp, 8); pr_notice("0x60: %08x %08x\n", ntohl(x[0]), ntohl(x[1])); } /* * Decode a YFSFetchStatus block */ static void xdr_decode_YFSFetchStatus(const __be32 **_bp, struct afs_call *call, struct afs_status_cb *scb) { const struct yfs_xdr_YFSFetchStatus *xdr = (const void *)*_bp; struct afs_file_status *status = &scb->status; u32 type; status->abort_code = ntohl(xdr->abort_code); if (status->abort_code != 0) { if (status->abort_code == VNOVNODE) status->nlink = 0; scb->have_error = true; goto advance; } type = ntohl(xdr->type); switch (type) { case AFS_FTYPE_FILE: case AFS_FTYPE_DIR: case AFS_FTYPE_SYMLINK: status->type = type; break; default: goto bad; } status->nlink = ntohl(xdr->nlink); status->author = xdr_to_u64(xdr->author); status->owner = xdr_to_u64(xdr->owner); status->caller_access = ntohl(xdr->caller_access); /* Ticket dependent */ status->anon_access = ntohl(xdr->anon_access); status->mode = ntohl(xdr->mode) & S_IALLUGO; status->group = xdr_to_u64(xdr->group); status->lock_count = ntohl(xdr->lock_count); status->mtime_client = xdr_to_time(xdr->mtime_client); status->mtime_server = xdr_to_time(xdr->mtime_server); status->size = xdr_to_u64(xdr->size); status->data_version = xdr_to_u64(xdr->data_version); scb->have_status = true; advance: *_bp += xdr_size(xdr); return; bad: xdr_dump_bad(*_bp); afs_protocol_error(call, afs_eproto_bad_status); goto advance; } /* * Decode a YFSCallBack block */ static void xdr_decode_YFSCallBack(const __be32 **_bp, struct afs_call *call, struct afs_status_cb *scb) { struct yfs_xdr_YFSCallBack *x = (void *)*_bp; struct afs_callback *cb = &scb->callback; ktime_t cb_expiry; cb_expiry = ktime_add(call->issue_time, xdr_to_u64(x->expiration_time) * 100); cb->expires_at = ktime_divns(cb_expiry, NSEC_PER_SEC); scb->have_cb = true; *_bp += xdr_size(x); } /* * Decode a YFSVolSync block */ static void xdr_decode_YFSVolSync(const __be32 **_bp, struct afs_volsync *volsync) { struct yfs_xdr_YFSVolSync *x = (void *)*_bp; u64 creation; if (volsync) { creation = xdr_to_u64(x->vol_creation_date); do_div(creation, 10 * 1000 * 1000); volsync->creation = creation; } *_bp += xdr_size(x); } /* * Encode the requested attributes into a YFSStoreStatus block */ static __be32 *xdr_encode_YFS_StoreStatus(__be32 *bp, struct iattr *attr) { struct yfs_xdr_YFSStoreStatus *x = (void *)bp; s64 mtime = 0, owner = 0, group = 0; u32 mask = 0, mode = 0; mask = 0; if (attr->ia_valid & ATTR_MTIME) { mask |= AFS_SET_MTIME; mtime = linux_to_yfs_time(&attr->ia_mtime); } if (attr->ia_valid & ATTR_UID) { mask |= AFS_SET_OWNER; owner = from_kuid(&init_user_ns, attr->ia_uid); } if (attr->ia_valid & ATTR_GID) { mask |= AFS_SET_GROUP; group = from_kgid(&init_user_ns, attr->ia_gid); } if (attr->ia_valid & ATTR_MODE) { mask |= AFS_SET_MODE; mode = attr->ia_mode & S_IALLUGO; } x->mask = htonl(mask); x->mode = htonl(mode); x->mtime_client = u64_to_xdr(mtime); x->owner = u64_to_xdr(owner); x->group = u64_to_xdr(group); return bp + xdr_size(x); } /* * Decode a YFSFetchVolumeStatus block. */ static void xdr_decode_YFSFetchVolumeStatus(const __be32 **_bp, struct afs_volume_status *vs) { const struct yfs_xdr_YFSFetchVolumeStatus *x = (const void *)*_bp; u32 flags; vs->vid = xdr_to_u64(x->vid); vs->parent_id = xdr_to_u64(x->parent_id); flags = ntohl(x->flags); vs->online = flags & yfs_FVSOnline; vs->in_service = flags & yfs_FVSInservice; vs->blessed = flags & yfs_FVSBlessed; vs->needs_salvage = flags & yfs_FVSNeedsSalvage; vs->type = ntohl(x->type); vs->min_quota = 0; vs->max_quota = xdr_to_u64(x->max_quota); vs->blocks_in_use = xdr_to_u64(x->blocks_in_use); vs->part_blocks_avail = xdr_to_u64(x->part_blocks_avail); vs->part_max_blocks = xdr_to_u64(x->part_max_blocks); vs->vol_copy_date = xdr_to_u64(x->vol_copy_date); vs->vol_backup_date = xdr_to_u64(x->vol_backup_date); *_bp += sizeof(*x) / sizeof(__be32); } /* * Deliver reply data to operations that just return a file status and a volume * sync record. */ static int yfs_deliver_status_and_volsync(struct afs_call *call) { struct afs_operation *op = call->op; const __be32 *bp; int ret; ret = afs_transfer_reply(call); if (ret < 0) return ret; bp = call->buffer; xdr_decode_YFSFetchStatus(&bp, call, &op->file[0].scb); xdr_decode_YFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; } /* * Deliver reply data to an YFS.FetchData64. */ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) { struct afs_operation *op = call->op; struct afs_vnode_param *vp = &op->file[0]; struct afs_read *req = op->fetch.req; const __be32 *bp; int ret; _enter("{%u,%zu, %zu/%llu}", call->unmarshall, call->iov_len, iov_iter_count(call->iter), req->actual_len); switch (call->unmarshall) { case 0: req->actual_len = 0; afs_extract_to_tmp64(call); call->unmarshall++; fallthrough; /* Extract the returned data length into ->actual_len. This * may indicate more or less data than was requested will be * returned. */ case 1: _debug("extract data length"); ret = afs_extract_data(call, true); if (ret < 0) return ret; req->actual_len = be64_to_cpu(call->tmp64); _debug("DATA length: %llu", req->actual_len); if (req->actual_len == 0) goto no_more_data; call->iter = req->iter; call->iov_len = min(req->actual_len, req->len); call->unmarshall++; fallthrough; /* extract the returned data */ case 2: _debug("extract data %zu/%llu", iov_iter_count(call->iter), req->actual_len); ret = afs_extract_data(call, true); if (ret < 0) return ret; call->iter = &call->def_iter; if (req->actual_len <= req->len) goto no_more_data; /* Discard any excess data the server gave us */ afs_extract_discard(call, req->actual_len - req->len); call->unmarshall = 3; fallthrough; case 3: _debug("extract discard %zu/%llu", iov_iter_count(call->iter), req->actual_len - req->len); ret = afs_extract_data(call, true); if (ret < 0) return ret; no_more_data: call->unmarshall = 4; afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSCallBack) + sizeof(struct yfs_xdr_YFSVolSync)); fallthrough; /* extract the metadata */ case 4: ret = afs_extract_data(call, false); if (ret < 0) return ret; bp = call->buffer; xdr_decode_YFSFetchStatus(&bp, call, &vp->scb); xdr_decode_YFSCallBack(&bp, call, &vp->scb); xdr_decode_YFSVolSync(&bp, &op->volsync); req->data_version = vp->scb.status.data_version; req->file_size = vp->scb.status.size; call->unmarshall++; fallthrough; case 5: break; } _leave(" = 0 [done]"); return 0; } /* * YFS.FetchData64 operation type */ static const struct afs_call_type yfs_RXYFSFetchData64 = { .name = "YFS.FetchData64", .op = yfs_FS_FetchData64, .deliver = yfs_deliver_fs_fetch_data64, .destructor = afs_flat_call_destructor, }; /* * Fetch data from a file. */ void yfs_fs_fetch_data(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_read *req = op->fetch.req; struct afs_call *call; __be32 *bp; _enter(",%x,{%llx:%llu},%llx,%llx", key_serial(op->key), vp->fid.vid, vp->fid.vnode, req->pos, req->len); call = afs_alloc_flat_call(op->net, &yfs_RXYFSFetchData64, sizeof(__be32) * 2 + sizeof(struct yfs_xdr_YFSFid) + sizeof(struct yfs_xdr_u64) * 2, sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSCallBack) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) return afs_op_nomem(op); req->call_debug_id = call->debug_id; /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSFETCHDATA64); bp = xdr_encode_u32(bp, 0); /* RPC flags */ bp = xdr_encode_YFSFid(bp, &vp->fid); bp = xdr_encode_u64(bp, req->pos); bp = xdr_encode_u64(bp, req->len); yfs_check_req(call, bp); trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_NOFS); } /* * Deliver reply data for YFS.CreateFile or YFS.MakeDir. */ static int yfs_deliver_fs_create_vnode(struct afs_call *call) { struct afs_operation *op = call->op; struct afs_vnode_param *dvp = &op->file[0]; struct afs_vnode_param *vp = &op->file[1]; const __be32 *bp; int ret; _enter("{%u}", call->unmarshall); ret = afs_transfer_reply(call); if (ret < 0) return ret; /* unmarshall the reply once we've received all of it */ bp = call->buffer; xdr_decode_YFSFid(&bp, &op->file[1].fid); xdr_decode_YFSFetchStatus(&bp, call, &vp->scb); xdr_decode_YFSFetchStatus(&bp, call, &dvp->scb); xdr_decode_YFSCallBack(&bp, call, &vp->scb); xdr_decode_YFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; } /* * FS.CreateFile and FS.MakeDir operation type */ static const struct afs_call_type afs_RXFSCreateFile = { .name = "YFS.CreateFile", .op = yfs_FS_CreateFile, .deliver = yfs_deliver_fs_create_vnode, .destructor = afs_flat_call_destructor, }; /* * Create a file. */ void yfs_fs_create_file(struct afs_operation *op) { const struct qstr *name = &op->dentry->d_name; struct afs_vnode_param *dvp = &op->file[0]; struct afs_call *call; size_t reqsz, rplsz; __be32 *bp; _enter(""); reqsz = (sizeof(__be32) + sizeof(__be32) + sizeof(struct yfs_xdr_YFSFid) + xdr_strlen(name->len) + sizeof(struct yfs_xdr_YFSStoreStatus) + sizeof(__be32)); rplsz = (sizeof(struct yfs_xdr_YFSFid) + sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSCallBack) + sizeof(struct yfs_xdr_YFSVolSync)); call = afs_alloc_flat_call(op->net, &afs_RXFSCreateFile, reqsz, rplsz); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSCREATEFILE); bp = xdr_encode_u32(bp, 0); /* RPC flags */ bp = xdr_encode_YFSFid(bp, &dvp->fid); bp = xdr_encode_name(bp, name); bp = xdr_encode_YFSStoreStatus(bp, &op->create.mode, &op->mtime); bp = xdr_encode_u32(bp, yfs_LockNone); /* ViceLockType */ yfs_check_req(call, bp); trace_afs_make_fs_call1(call, &dvp->fid, name); afs_make_op_call(op, call, GFP_NOFS); } static const struct afs_call_type yfs_RXFSMakeDir = { .name = "YFS.MakeDir", .op = yfs_FS_MakeDir, .deliver = yfs_deliver_fs_create_vnode, .destructor = afs_flat_call_destructor, }; /* * Make a directory. */ void yfs_fs_make_dir(struct afs_operation *op) { const struct qstr *name = &op->dentry->d_name; struct afs_vnode_param *dvp = &op->file[0]; struct afs_call *call; size_t reqsz, rplsz; __be32 *bp; _enter(""); reqsz = (sizeof(__be32) + sizeof(struct yfs_xdr_RPCFlags) + sizeof(struct yfs_xdr_YFSFid) + xdr_strlen(name->len) + sizeof(struct yfs_xdr_YFSStoreStatus)); rplsz = (sizeof(struct yfs_xdr_YFSFid) + sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSCallBack) + sizeof(struct yfs_xdr_YFSVolSync)); call = afs_alloc_flat_call(op->net, &yfs_RXFSMakeDir, reqsz, rplsz); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSMAKEDIR); bp = xdr_encode_u32(bp, 0); /* RPC flags */ bp = xdr_encode_YFSFid(bp, &dvp->fid); bp = xdr_encode_name(bp, name); bp = xdr_encode_YFSStoreStatus(bp, &op->create.mode, &op->mtime); yfs_check_req(call, bp); trace_afs_make_fs_call1(call, &dvp->fid, name); afs_make_op_call(op, call, GFP_NOFS); } /* * Deliver reply data to a YFS.RemoveFile2 operation. */ static int yfs_deliver_fs_remove_file2(struct afs_call *call) { struct afs_operation *op = call->op; struct afs_vnode_param *dvp = &op->file[0]; struct afs_vnode_param *vp = &op->file[1]; struct afs_fid fid; const __be32 *bp; int ret; _enter("{%u}", call->unmarshall); ret = afs_transfer_reply(call); if (ret < 0) return ret; bp = call->buffer; xdr_decode_YFSFetchStatus(&bp, call, &dvp->scb); xdr_decode_YFSFid(&bp, &fid); xdr_decode_YFSFetchStatus(&bp, call, &vp->scb); /* Was deleted if vnode->status.abort_code == VNOVNODE. */ xdr_decode_YFSVolSync(&bp, &op->volsync); return 0; } static void yfs_done_fs_remove_file2(struct afs_call *call) { if (call->error == -ECONNABORTED && call->abort_code == RX_INVALID_OPERATION) { set_bit(AFS_SERVER_FL_NO_RM2, &call->server->flags); call->op->flags |= AFS_OPERATION_DOWNGRADE; } } /* * YFS.RemoveFile2 operation type. */ static const struct afs_call_type yfs_RXYFSRemoveFile2 = { .name = "YFS.RemoveFile2", .op = yfs_FS_RemoveFile2, .deliver = yfs_deliver_fs_remove_file2, .done = yfs_done_fs_remove_file2, .destructor = afs_flat_call_destructor, }; /* * Remove a file and retrieve new file status. */ void yfs_fs_remove_file2(struct afs_operation *op) { struct afs_vnode_param *dvp = &op->file[0]; const struct qstr *name = &op->dentry->d_name; struct afs_call *call; __be32 *bp; _enter(""); call = afs_alloc_flat_call(op->net, &yfs_RXYFSRemoveFile2, sizeof(__be32) + sizeof(struct yfs_xdr_RPCFlags) + sizeof(struct yfs_xdr_YFSFid) + xdr_strlen(name->len), sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSFid) + sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSREMOVEFILE2); bp = xdr_encode_u32(bp, 0); /* RPC flags */ bp = xdr_encode_YFSFid(bp, &dvp->fid); bp = xdr_encode_name(bp, name); yfs_check_req(call, bp); trace_afs_make_fs_call1(call, &dvp->fid, name); afs_make_op_call(op, call, GFP_NOFS); } /* * Deliver reply data to a YFS.RemoveFile or YFS.RemoveDir operation. */ static int yfs_deliver_fs_remove(struct afs_call *call) { struct afs_operation *op = call->op; struct afs_vnode_param *dvp = &op->file[0]; const __be32 *bp; int ret; _enter("{%u}", call->unmarshall); ret = afs_transfer_reply(call); if (ret < 0) return ret; bp = call->buffer; xdr_decode_YFSFetchStatus(&bp, call, &dvp->scb); xdr_decode_YFSVolSync(&bp, &op->volsync); return 0; } /* * FS.RemoveDir and FS.RemoveFile operation types. */ static const struct afs_call_type yfs_RXYFSRemoveFile = { .name = "YFS.RemoveFile", .op = yfs_FS_RemoveFile, .deliver = yfs_deliver_fs_remove, .destructor = afs_flat_call_destructor, }; /* * Remove a file. */ void yfs_fs_remove_file(struct afs_operation *op) { const struct qstr *name = &op->dentry->d_name; struct afs_vnode_param *dvp = &op->file[0]; struct afs_call *call; __be32 *bp; _enter(""); if (!test_bit(AFS_SERVER_FL_NO_RM2, &op->server->flags)) return yfs_fs_remove_file2(op); call = afs_alloc_flat_call(op->net, &yfs_RXYFSRemoveFile, sizeof(__be32) + sizeof(struct yfs_xdr_RPCFlags) + sizeof(struct yfs_xdr_YFSFid) + xdr_strlen(name->len), sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSREMOVEFILE); bp = xdr_encode_u32(bp, 0); /* RPC flags */ bp = xdr_encode_YFSFid(bp, &dvp->fid); bp = xdr_encode_name(bp, name); yfs_check_req(call, bp); trace_afs_make_fs_call1(call, &dvp->fid, name); afs_make_op_call(op, call, GFP_NOFS); } static const struct afs_call_type yfs_RXYFSRemoveDir = { .name = "YFS.RemoveDir", .op = yfs_FS_RemoveDir, .deliver = yfs_deliver_fs_remove, .destructor = afs_flat_call_destructor, }; /* * Remove a directory. */ void yfs_fs_remove_dir(struct afs_operation *op) { const struct qstr *name = &op->dentry->d_name; struct afs_vnode_param *dvp = &op->file[0]; struct afs_call *call; __be32 *bp; _enter(""); call = afs_alloc_flat_call(op->net, &yfs_RXYFSRemoveDir, sizeof(__be32) + sizeof(struct yfs_xdr_RPCFlags) + sizeof(struct yfs_xdr_YFSFid) + xdr_strlen(name->len), sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSREMOVEDIR); bp = xdr_encode_u32(bp, 0); /* RPC flags */ bp = xdr_encode_YFSFid(bp, &dvp->fid); bp = xdr_encode_name(bp, name); yfs_check_req(call, bp); trace_afs_make_fs_call1(call, &dvp->fid, name); afs_make_op_call(op, call, GFP_NOFS); } /* * Deliver reply data to a YFS.Link operation. */ static int yfs_deliver_fs_link(struct afs_call *call) { struct afs_operation *op = call->op; struct afs_vnode_param *dvp = &op->file[0]; struct afs_vnode_param *vp = &op->file[1]; const __be32 *bp; int ret; _enter("{%u}", call->unmarshall); ret = afs_transfer_reply(call); if (ret < 0) return ret; bp = call->buffer; xdr_decode_YFSFetchStatus(&bp, call, &vp->scb); xdr_decode_YFSFetchStatus(&bp, call, &dvp->scb); xdr_decode_YFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; } /* * YFS.Link operation type. */ static const struct afs_call_type yfs_RXYFSLink = { .name = "YFS.Link", .op = yfs_FS_Link, .deliver = yfs_deliver_fs_link, .destructor = afs_flat_call_destructor, }; /* * Make a hard link. */ void yfs_fs_link(struct afs_operation *op) { const struct qstr *name = &op->dentry->d_name; struct afs_vnode_param *dvp = &op->file[0]; struct afs_vnode_param *vp = &op->file[1]; struct afs_call *call; __be32 *bp; _enter(""); call = afs_alloc_flat_call(op->net, &yfs_RXYFSLink, sizeof(__be32) + sizeof(struct yfs_xdr_RPCFlags) + sizeof(struct yfs_xdr_YFSFid) + xdr_strlen(name->len) + sizeof(struct yfs_xdr_YFSFid), sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSLINK); bp = xdr_encode_u32(bp, 0); /* RPC flags */ bp = xdr_encode_YFSFid(bp, &dvp->fid); bp = xdr_encode_name(bp, name); bp = xdr_encode_YFSFid(bp, &vp->fid); yfs_check_req(call, bp); trace_afs_make_fs_call1(call, &vp->fid, name); afs_make_op_call(op, call, GFP_NOFS); } /* * Deliver reply data to a YFS.Symlink operation. */ static int yfs_deliver_fs_symlink(struct afs_call *call) { struct afs_operation *op = call->op; struct afs_vnode_param *dvp = &op->file[0]; struct afs_vnode_param *vp = &op->file[1]; const __be32 *bp; int ret; _enter("{%u}", call->unmarshall); ret = afs_transfer_reply(call); if (ret < 0) return ret; /* unmarshall the reply once we've received all of it */ bp = call->buffer; xdr_decode_YFSFid(&bp, &vp->fid); xdr_decode_YFSFetchStatus(&bp, call, &vp->scb); xdr_decode_YFSFetchStatus(&bp, call, &dvp->scb); xdr_decode_YFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; } /* * YFS.Symlink operation type */ static const struct afs_call_type yfs_RXYFSSymlink = { .name = "YFS.Symlink", .op = yfs_FS_Symlink, .deliver = yfs_deliver_fs_symlink, .destructor = afs_flat_call_destructor, }; /* * Create a symbolic link. */ void yfs_fs_symlink(struct afs_operation *op) { const struct qstr *name = &op->dentry->d_name; struct afs_vnode_param *dvp = &op->file[0]; struct afs_call *call; size_t contents_sz; mode_t mode = 0777; __be32 *bp; _enter(""); contents_sz = strlen(op->create.symlink); call = afs_alloc_flat_call(op->net, &yfs_RXYFSSymlink, sizeof(__be32) + sizeof(struct yfs_xdr_RPCFlags) + sizeof(struct yfs_xdr_YFSFid) + xdr_strlen(name->len) + xdr_strlen(contents_sz) + sizeof(struct yfs_xdr_YFSStoreStatus), sizeof(struct yfs_xdr_YFSFid) + sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSSYMLINK); bp = xdr_encode_u32(bp, 0); /* RPC flags */ bp = xdr_encode_YFSFid(bp, &dvp->fid); bp = xdr_encode_name(bp, name); bp = xdr_encode_string(bp, op->create.symlink, contents_sz); bp = xdr_encode_YFSStoreStatus(bp, &mode, &op->mtime); yfs_check_req(call, bp); trace_afs_make_fs_call1(call, &dvp->fid, name); afs_make_op_call(op, call, GFP_NOFS); } /* * Deliver reply data to a YFS.Rename operation. */ static int yfs_deliver_fs_rename(struct afs_call *call) { struct afs_operation *op = call->op; struct afs_vnode_param *orig_dvp = &op->file[0]; struct afs_vnode_param *new_dvp = &op->file[1]; const __be32 *bp; int ret; _enter("{%u}", call->unmarshall); ret = afs_transfer_reply(call); if (ret < 0) return ret; bp = call->buffer; /* If the two dirs are the same, we have two copies of the same status * report, so we just decode it twice. */ xdr_decode_YFSFetchStatus(&bp, call, &orig_dvp->scb); xdr_decode_YFSFetchStatus(&bp, call, &new_dvp->scb); xdr_decode_YFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; } /* * YFS.Rename operation type */ static const struct afs_call_type yfs_RXYFSRename = { .name = "FS.Rename", .op = yfs_FS_Rename, .deliver = yfs_deliver_fs_rename, .destructor = afs_flat_call_destructor, }; /* * Rename a file or directory. */ void yfs_fs_rename(struct afs_operation *op) { struct afs_vnode_param *orig_dvp = &op->file[0]; struct afs_vnode_param *new_dvp = &op->file[1]; const struct qstr *orig_name = &op->dentry->d_name; const struct qstr *new_name = &op->dentry_2->d_name; struct afs_call *call; __be32 *bp; _enter(""); call = afs_alloc_flat_call(op->net, &yfs_RXYFSRename, sizeof(__be32) + sizeof(struct yfs_xdr_RPCFlags) + sizeof(struct yfs_xdr_YFSFid) + xdr_strlen(orig_name->len) + sizeof(struct yfs_xdr_YFSFid) + xdr_strlen(new_name->len), sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSRENAME); bp = xdr_encode_u32(bp, 0); /* RPC flags */ bp = xdr_encode_YFSFid(bp, &orig_dvp->fid); bp = xdr_encode_name(bp, orig_name); bp = xdr_encode_YFSFid(bp, &new_dvp->fid); bp = xdr_encode_name(bp, new_name); yfs_check_req(call, bp); trace_afs_make_fs_call2(call, &orig_dvp->fid, orig_name, new_name); afs_make_op_call(op, call, GFP_NOFS); } /* * YFS.StoreData64 operation type. */ static const struct afs_call_type yfs_RXYFSStoreData64 = { .name = "YFS.StoreData64", .op = yfs_FS_StoreData64, .deliver = yfs_deliver_status_and_volsync, .destructor = afs_flat_call_destructor, }; /* * Store a set of pages to a large file. */ void yfs_fs_store_data(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; __be32 *bp; _enter(",%x,{%llx:%llu},,", key_serial(op->key), vp->fid.vid, vp->fid.vnode); _debug("size %llx, at %llx, i_size %llx", (unsigned long long)op->store.size, (unsigned long long)op->store.pos, (unsigned long long)op->store.i_size); call = afs_alloc_flat_call(op->net, &yfs_RXYFSStoreData64, sizeof(__be32) + sizeof(__be32) + sizeof(struct yfs_xdr_YFSFid) + sizeof(struct yfs_xdr_YFSStoreStatus) + sizeof(struct yfs_xdr_u64) * 3, sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) return afs_op_nomem(op); call->write_iter = op->store.write_iter; /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSSTOREDATA64); bp = xdr_encode_u32(bp, 0); /* RPC flags */ bp = xdr_encode_YFSFid(bp, &vp->fid); bp = xdr_encode_YFSStoreStatus(bp, NULL, &op->mtime); bp = xdr_encode_u64(bp, op->store.pos); bp = xdr_encode_u64(bp, op->store.size); bp = xdr_encode_u64(bp, op->store.i_size); yfs_check_req(call, bp); trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_NOFS); } /* * YFS.StoreStatus operation type */ static const struct afs_call_type yfs_RXYFSStoreStatus = { .name = "YFS.StoreStatus", .op = yfs_FS_StoreStatus, .deliver = yfs_deliver_status_and_volsync, .destructor = afs_flat_call_destructor, }; static const struct afs_call_type yfs_RXYFSStoreData64_as_Status = { .name = "YFS.StoreData64", .op = yfs_FS_StoreData64, .deliver = yfs_deliver_status_and_volsync, .destructor = afs_flat_call_destructor, }; /* * Set the attributes on a file, using YFS.StoreData64 rather than * YFS.StoreStatus so as to alter the file size also. */ static void yfs_fs_setattr_size(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; struct iattr *attr = op->setattr.attr; __be32 *bp; _enter(",%x,{%llx:%llu},,", key_serial(op->key), vp->fid.vid, vp->fid.vnode); call = afs_alloc_flat_call(op->net, &yfs_RXYFSStoreData64_as_Status, sizeof(__be32) * 2 + sizeof(struct yfs_xdr_YFSFid) + sizeof(struct yfs_xdr_YFSStoreStatus) + sizeof(struct yfs_xdr_u64) * 3, sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSSTOREDATA64); bp = xdr_encode_u32(bp, 0); /* RPC flags */ bp = xdr_encode_YFSFid(bp, &vp->fid); bp = xdr_encode_YFS_StoreStatus(bp, attr); bp = xdr_encode_u64(bp, attr->ia_size); /* position of start of write */ bp = xdr_encode_u64(bp, 0); /* size of write */ bp = xdr_encode_u64(bp, attr->ia_size); /* new file length */ yfs_check_req(call, bp); trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_NOFS); } /* * Set the attributes on a file, using YFS.StoreData64 if there's a change in * file size, and YFS.StoreStatus otherwise. */ void yfs_fs_setattr(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; struct iattr *attr = op->setattr.attr; __be32 *bp; if (attr->ia_valid & ATTR_SIZE) return yfs_fs_setattr_size(op); _enter(",%x,{%llx:%llu},,", key_serial(op->key), vp->fid.vid, vp->fid.vnode); call = afs_alloc_flat_call(op->net, &yfs_RXYFSStoreStatus, sizeof(__be32) * 2 + sizeof(struct yfs_xdr_YFSFid) + sizeof(struct yfs_xdr_YFSStoreStatus), sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSSTORESTATUS); bp = xdr_encode_u32(bp, 0); /* RPC flags */ bp = xdr_encode_YFSFid(bp, &vp->fid); bp = xdr_encode_YFS_StoreStatus(bp, attr); yfs_check_req(call, bp); trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_NOFS); } /* * Deliver reply data to a YFS.GetVolumeStatus operation. */ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) { struct afs_operation *op = call->op; const __be32 *bp; char *p; u32 size; int ret; _enter("{%u}", call->unmarshall); switch (call->unmarshall) { case 0: call->unmarshall++; afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchVolumeStatus)); fallthrough; /* extract the returned status record */ case 1: _debug("extract status"); ret = afs_extract_data(call, true); if (ret < 0) return ret; bp = call->buffer; xdr_decode_YFSFetchVolumeStatus(&bp, &op->volstatus.vs); call->unmarshall++; afs_extract_to_tmp(call); fallthrough; /* extract the volume name length */ case 2: ret = afs_extract_data(call, true); if (ret < 0) return ret; call->count = ntohl(call->tmp); _debug("volname length: %u", call->count); if (call->count >= AFSNAMEMAX) return afs_protocol_error(call, afs_eproto_volname_len); size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; fallthrough; /* extract the volume name */ case 3: _debug("extract volname"); ret = afs_extract_data(call, true); if (ret < 0) return ret; p = call->buffer; p[call->count] = 0; _debug("volname '%s'", p); afs_extract_to_tmp(call); call->unmarshall++; fallthrough; /* extract the offline message length */ case 4: ret = afs_extract_data(call, true); if (ret < 0) return ret; call->count = ntohl(call->tmp); _debug("offline msg length: %u", call->count); if (call->count >= AFSNAMEMAX) return afs_protocol_error(call, afs_eproto_offline_msg_len); size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; fallthrough; /* extract the offline message */ case 5: _debug("extract offline"); ret = afs_extract_data(call, true); if (ret < 0) return ret; p = call->buffer; p[call->count] = 0; _debug("offline '%s'", p); afs_extract_to_tmp(call); call->unmarshall++; fallthrough; /* extract the message of the day length */ case 6: ret = afs_extract_data(call, true); if (ret < 0) return ret; call->count = ntohl(call->tmp); _debug("motd length: %u", call->count); if (call->count >= AFSNAMEMAX) return afs_protocol_error(call, afs_eproto_motd_len); size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; fallthrough; /* extract the message of the day */ case 7: _debug("extract motd"); ret = afs_extract_data(call, false); if (ret < 0) return ret; p = call->buffer; p[call->count] = 0; _debug("motd '%s'", p); call->unmarshall++; fallthrough; case 8: break; } _leave(" = 0 [done]"); return 0; } /* * YFS.GetVolumeStatus operation type */ static const struct afs_call_type yfs_RXYFSGetVolumeStatus = { .name = "YFS.GetVolumeStatus", .op = yfs_FS_GetVolumeStatus, .deliver = yfs_deliver_fs_get_volume_status, .destructor = afs_flat_call_destructor, }; /* * fetch the status of a volume */ void yfs_fs_get_volume_status(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; __be32 *bp; _enter(""); call = afs_alloc_flat_call(op->net, &yfs_RXYFSGetVolumeStatus, sizeof(__be32) * 2 + sizeof(struct yfs_xdr_u64), max_t(size_t, sizeof(struct yfs_xdr_YFSFetchVolumeStatus) + sizeof(__be32), AFSOPAQUEMAX + 1)); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSGETVOLUMESTATUS); bp = xdr_encode_u32(bp, 0); /* RPC flags */ bp = xdr_encode_u64(bp, vp->fid.vid); yfs_check_req(call, bp); trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_NOFS); } /* * YFS.SetLock operation type */ static const struct afs_call_type yfs_RXYFSSetLock = { .name = "YFS.SetLock", .op = yfs_FS_SetLock, .deliver = yfs_deliver_status_and_volsync, .done = afs_lock_op_done, .destructor = afs_flat_call_destructor, }; /* * YFS.ExtendLock operation type */ static const struct afs_call_type yfs_RXYFSExtendLock = { .name = "YFS.ExtendLock", .op = yfs_FS_ExtendLock, .deliver = yfs_deliver_status_and_volsync, .done = afs_lock_op_done, .destructor = afs_flat_call_destructor, }; /* * YFS.ReleaseLock operation type */ static const struct afs_call_type yfs_RXYFSReleaseLock = { .name = "YFS.ReleaseLock", .op = yfs_FS_ReleaseLock, .deliver = yfs_deliver_status_and_volsync, .destructor = afs_flat_call_destructor, }; /* * Set a lock on a file */ void yfs_fs_set_lock(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; __be32 *bp; _enter(""); call = afs_alloc_flat_call(op->net, &yfs_RXYFSSetLock, sizeof(__be32) * 2 + sizeof(struct yfs_xdr_YFSFid) + sizeof(__be32), sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSSETLOCK); bp = xdr_encode_u32(bp, 0); /* RPC flags */ bp = xdr_encode_YFSFid(bp, &vp->fid); bp = xdr_encode_u32(bp, op->lock.type); yfs_check_req(call, bp); trace_afs_make_fs_calli(call, &vp->fid, op->lock.type); afs_make_op_call(op, call, GFP_NOFS); } /* * extend a lock on a file */ void yfs_fs_extend_lock(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; __be32 *bp; _enter(""); call = afs_alloc_flat_call(op->net, &yfs_RXYFSExtendLock, sizeof(__be32) * 2 + sizeof(struct yfs_xdr_YFSFid), sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSEXTENDLOCK); bp = xdr_encode_u32(bp, 0); /* RPC flags */ bp = xdr_encode_YFSFid(bp, &vp->fid); yfs_check_req(call, bp); trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_NOFS); } /* * release a lock on a file */ void yfs_fs_release_lock(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; __be32 *bp; _enter(""); call = afs_alloc_flat_call(op->net, &yfs_RXYFSReleaseLock, sizeof(__be32) * 2 + sizeof(struct yfs_xdr_YFSFid), sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSRELEASELOCK); bp = xdr_encode_u32(bp, 0); /* RPC flags */ bp = xdr_encode_YFSFid(bp, &vp->fid); yfs_check_req(call, bp); trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_NOFS); } /* * Deliver a reply to YFS.FetchStatus */ static int yfs_deliver_fs_fetch_status(struct afs_call *call) { struct afs_operation *op = call->op; struct afs_vnode_param *vp = &op->file[op->fetch_status.which]; const __be32 *bp; int ret; ret = afs_transfer_reply(call); if (ret < 0) return ret; /* unmarshall the reply once we've received all of it */ bp = call->buffer; xdr_decode_YFSFetchStatus(&bp, call, &vp->scb); xdr_decode_YFSCallBack(&bp, call, &vp->scb); xdr_decode_YFSVolSync(&bp, &op->volsync); _leave(" = 0 [done]"); return 0; } /* * YFS.FetchStatus operation type */ static const struct afs_call_type yfs_RXYFSFetchStatus = { .name = "YFS.FetchStatus", .op = yfs_FS_FetchStatus, .deliver = yfs_deliver_fs_fetch_status, .destructor = afs_flat_call_destructor, }; /* * Fetch the status information for a fid without needing a vnode handle. */ void yfs_fs_fetch_status(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[op->fetch_status.which]; struct afs_call *call; __be32 *bp; _enter(",%x,{%llx:%llu},,", key_serial(op->key), vp->fid.vid, vp->fid.vnode); call = afs_alloc_flat_call(op->net, &yfs_RXYFSFetchStatus, sizeof(__be32) * 2 + sizeof(struct yfs_xdr_YFSFid), sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSCallBack) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSFETCHSTATUS); bp = xdr_encode_u32(bp, 0); /* RPC flags */ bp = xdr_encode_YFSFid(bp, &vp->fid); yfs_check_req(call, bp); trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_NOFS); } /* * Deliver reply data to an YFS.InlineBulkStatus call */ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) { struct afs_operation *op = call->op; struct afs_status_cb *scb; const __be32 *bp; u32 tmp; int ret; _enter("{%u}", call->unmarshall); switch (call->unmarshall) { case 0: afs_extract_to_tmp(call); call->unmarshall++; fallthrough; /* Extract the file status count and array in two steps */ case 1: _debug("extract status count"); ret = afs_extract_data(call, true); if (ret < 0) return ret; tmp = ntohl(call->tmp); _debug("status count: %u/%u", tmp, op->nr_files); if (tmp != op->nr_files) return afs_protocol_error(call, afs_eproto_ibulkst_count); call->count = 0; call->unmarshall++; more_counts: afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchStatus)); fallthrough; case 2: _debug("extract status array %u", call->count); ret = afs_extract_data(call, true); if (ret < 0) return ret; switch (call->count) { case 0: scb = &op->file[0].scb; break; case 1: scb = &op->file[1].scb; break; default: scb = &op->more_files[call->count - 2].scb; break; } bp = call->buffer; xdr_decode_YFSFetchStatus(&bp, call, scb); call->count++; if (call->count < op->nr_files) goto more_counts; call->count = 0; call->unmarshall++; afs_extract_to_tmp(call); fallthrough; /* Extract the callback count and array in two steps */ case 3: _debug("extract CB count"); ret = afs_extract_data(call, true); if (ret < 0) return ret; tmp = ntohl(call->tmp); _debug("CB count: %u", tmp); if (tmp != op->nr_files) return afs_protocol_error(call, afs_eproto_ibulkst_cb_count); call->count = 0; call->unmarshall++; more_cbs: afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSCallBack)); fallthrough; case 4: _debug("extract CB array"); ret = afs_extract_data(call, true); if (ret < 0) return ret; _debug("unmarshall CB array"); switch (call->count) { case 0: scb = &op->file[0].scb; break; case 1: scb = &op->file[1].scb; break; default: scb = &op->more_files[call->count - 2].scb; break; } bp = call->buffer; xdr_decode_YFSCallBack(&bp, call, scb); call->count++; if (call->count < op->nr_files) goto more_cbs; afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSVolSync)); call->unmarshall++; fallthrough; case 5: ret = afs_extract_data(call, false); if (ret < 0) return ret; bp = call->buffer; xdr_decode_YFSVolSync(&bp, &op->volsync); call->unmarshall++; fallthrough; case 6: break; } _leave(" = 0 [done]"); return 0; } /* * FS.InlineBulkStatus operation type */ static const struct afs_call_type yfs_RXYFSInlineBulkStatus = { .name = "YFS.InlineBulkStatus", .op = yfs_FS_InlineBulkStatus, .deliver = yfs_deliver_fs_inline_bulk_status, .destructor = afs_flat_call_destructor, }; /* * Fetch the status information for up to 1024 files */ void yfs_fs_inline_bulk_status(struct afs_operation *op) { struct afs_vnode_param *dvp = &op->file[0]; struct afs_vnode_param *vp = &op->file[1]; struct afs_call *call; __be32 *bp; int i; _enter(",%x,{%llx:%llu},%u", key_serial(op->key), vp->fid.vid, vp->fid.vnode, op->nr_files); call = afs_alloc_flat_call(op->net, &yfs_RXYFSInlineBulkStatus, sizeof(__be32) + sizeof(__be32) + sizeof(__be32) + sizeof(struct yfs_xdr_YFSFid) * op->nr_files, sizeof(struct yfs_xdr_YFSFetchStatus)); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSINLINEBULKSTATUS); bp = xdr_encode_u32(bp, 0); /* RPCFlags */ bp = xdr_encode_u32(bp, op->nr_files); bp = xdr_encode_YFSFid(bp, &dvp->fid); bp = xdr_encode_YFSFid(bp, &vp->fid); for (i = 0; i < op->nr_files - 2; i++) bp = xdr_encode_YFSFid(bp, &op->more_files[i].fid); yfs_check_req(call, bp); trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_NOFS); } /* * Deliver reply data to an YFS.FetchOpaqueACL. */ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) { struct afs_operation *op = call->op; struct afs_vnode_param *vp = &op->file[0]; struct yfs_acl *yacl = op->yacl; struct afs_acl *acl; const __be32 *bp; unsigned int size; int ret; _enter("{%u}", call->unmarshall); switch (call->unmarshall) { case 0: afs_extract_to_tmp(call); call->unmarshall++; fallthrough; /* Extract the file ACL length */ case 1: ret = afs_extract_data(call, true); if (ret < 0) return ret; size = call->count2 = ntohl(call->tmp); size = round_up(size, 4); if (yacl->flags & YFS_ACL_WANT_ACL) { acl = kmalloc(struct_size(acl, data, size), GFP_KERNEL); if (!acl) return -ENOMEM; yacl->acl = acl; acl->size = call->count2; afs_extract_begin(call, acl->data, size); } else { afs_extract_discard(call, size); } call->unmarshall++; fallthrough; /* Extract the file ACL */ case 2: ret = afs_extract_data(call, true); if (ret < 0) return ret; afs_extract_to_tmp(call); call->unmarshall++; fallthrough; /* Extract the volume ACL length */ case 3: ret = afs_extract_data(call, true); if (ret < 0) return ret; size = call->count2 = ntohl(call->tmp); size = round_up(size, 4); if (yacl->flags & YFS_ACL_WANT_VOL_ACL) { acl = kmalloc(struct_size(acl, data, size), GFP_KERNEL); if (!acl) return -ENOMEM; yacl->vol_acl = acl; acl->size = call->count2; afs_extract_begin(call, acl->data, size); } else { afs_extract_discard(call, size); } call->unmarshall++; fallthrough; /* Extract the volume ACL */ case 4: ret = afs_extract_data(call, true); if (ret < 0) return ret; afs_extract_to_buf(call, sizeof(__be32) * 2 + sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); call->unmarshall++; fallthrough; /* extract the metadata */ case 5: ret = afs_extract_data(call, false); if (ret < 0) return ret; bp = call->buffer; yacl->inherit_flag = ntohl(*bp++); yacl->num_cleaned = ntohl(*bp++); xdr_decode_YFSFetchStatus(&bp, call, &vp->scb); xdr_decode_YFSVolSync(&bp, &op->volsync); call->unmarshall++; fallthrough; case 6: break; } _leave(" = 0 [done]"); return 0; } void yfs_free_opaque_acl(struct yfs_acl *yacl) { if (yacl) { kfree(yacl->acl); kfree(yacl->vol_acl); kfree(yacl); } } /* * YFS.FetchOpaqueACL operation type */ static const struct afs_call_type yfs_RXYFSFetchOpaqueACL = { .name = "YFS.FetchOpaqueACL", .op = yfs_FS_FetchOpaqueACL, .deliver = yfs_deliver_fs_fetch_opaque_acl, .destructor = afs_flat_call_destructor, }; /* * Fetch the YFS advanced ACLs for a file. */ void yfs_fs_fetch_opaque_acl(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; __be32 *bp; _enter(",%x,{%llx:%llu},,", key_serial(op->key), vp->fid.vid, vp->fid.vnode); call = afs_alloc_flat_call(op->net, &yfs_RXYFSFetchOpaqueACL, sizeof(__be32) * 2 + sizeof(struct yfs_xdr_YFSFid), sizeof(__be32) * 2 + sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSFETCHOPAQUEACL); bp = xdr_encode_u32(bp, 0); /* RPC flags */ bp = xdr_encode_YFSFid(bp, &vp->fid); yfs_check_req(call, bp); trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_KERNEL); } /* * YFS.StoreOpaqueACL2 operation type */ static const struct afs_call_type yfs_RXYFSStoreOpaqueACL2 = { .name = "YFS.StoreOpaqueACL2", .op = yfs_FS_StoreOpaqueACL2, .deliver = yfs_deliver_status_and_volsync, .destructor = afs_flat_call_destructor, }; /* * Fetch the YFS ACL for a file. */ void yfs_fs_store_opaque_acl2(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; struct afs_acl *acl = op->acl; size_t size; __be32 *bp; _enter(",%x,{%llx:%llu},,", key_serial(op->key), vp->fid.vid, vp->fid.vnode); size = round_up(acl->size, 4); call = afs_alloc_flat_call(op->net, &yfs_RXYFSStoreOpaqueACL2, sizeof(__be32) * 2 + sizeof(struct yfs_xdr_YFSFid) + sizeof(__be32) + size, sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); if (!call) return afs_op_nomem(op); /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSSTOREOPAQUEACL2); bp = xdr_encode_u32(bp, 0); /* RPC flags */ bp = xdr_encode_YFSFid(bp, &vp->fid); bp = xdr_encode_u32(bp, acl->size); memcpy(bp, acl->data, acl->size); if (acl->size != size) memset((void *)bp + acl->size, 0, size - acl->size); bp += size / sizeof(__be32); yfs_check_req(call, bp); trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_KERNEL); }
linux-master
fs/afs/yfsclient.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * An implementation of a loadable kernel mode driver providing * multiple kernel/user space bidirectional communications links. * * Author: Alan Cox <[email protected]> * * Adapted to become the Linux 2.0 Coda pseudo device * Peter Braam <[email protected]> * Michael Callahan <[email protected]> * * Changes for Linux 2.1 * Copyright (c) 1997 Carnegie-Mellon University */ #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/major.h> #include <linux/time.h> #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/fcntl.h> #include <linux/delay.h> #include <linux/skbuff.h> #include <linux/proc_fs.h> #include <linux/vmalloc.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/pid_namespace.h> #include <asm/io.h> #include <linux/uaccess.h> #include <linux/coda.h> #include "coda_psdev.h" #include "coda_linux.h" #include "coda_int.h" /* statistics */ int coda_hard; /* allows signals during upcalls */ unsigned long coda_timeout = 30; /* .. secs, then signals will dequeue */ struct venus_comm coda_comms[MAX_CODADEVS]; static struct class *coda_psdev_class; /* * Device operations */ static __poll_t coda_psdev_poll(struct file *file, poll_table * wait) { struct venus_comm *vcp = (struct venus_comm *) file->private_data; __poll_t mask = EPOLLOUT | EPOLLWRNORM; poll_wait(file, &vcp->vc_waitq, wait); mutex_lock(&vcp->vc_mutex); if (!list_empty(&vcp->vc_pending)) mask |= EPOLLIN | EPOLLRDNORM; mutex_unlock(&vcp->vc_mutex); return mask; } static long coda_psdev_ioctl(struct file * filp, unsigned int cmd, unsigned long arg) { unsigned int data; switch(cmd) { case CIOC_KERNEL_VERSION: data = CODA_KERNEL_VERSION; return put_user(data, (int __user *) arg); default: return -ENOTTY; } return 0; } /* * Receive a message written by Venus to the psdev */ static ssize_t coda_psdev_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *off) { struct venus_comm *vcp = (struct venus_comm *) file->private_data; struct upc_req *req = NULL; struct upc_req *tmp; struct list_head *lh; struct coda_in_hdr hdr; ssize_t retval = 0, count = 0; int error; /* make sure there is enough to copy out the (opcode, unique) values */ if (nbytes < (2 * sizeof(u_int32_t))) return -EINVAL; /* Peek at the opcode, uniquefier */ if (copy_from_user(&hdr, buf, 2 * sizeof(u_int32_t))) return -EFAULT; if (DOWNCALL(hdr.opcode)) { union outputArgs *dcbuf; int size = sizeof(*dcbuf); if ( nbytes < sizeof(struct coda_out_hdr) ) { pr_warn("coda_downcall opc %d uniq %d, not enough!\n", hdr.opcode, hdr.unique); count = nbytes; goto out; } if ( nbytes > size ) { pr_warn("downcall opc %d, uniq %d, too much!", hdr.opcode, hdr.unique); nbytes = size; } dcbuf = vmemdup_user(buf, nbytes); if (IS_ERR(dcbuf)) { retval = PTR_ERR(dcbuf); goto out; } /* what downcall errors does Venus handle ? */ error = coda_downcall(vcp, hdr.opcode, dcbuf, nbytes); kvfree(dcbuf); if (error) { pr_warn("%s: coda_downcall error: %d\n", __func__, error); retval = error; goto out; } count = nbytes; goto out; } /* Look for the message on the processing queue. */ mutex_lock(&vcp->vc_mutex); list_for_each(lh, &vcp->vc_processing) { tmp = list_entry(lh, struct upc_req , uc_chain); if (tmp->uc_unique == hdr.unique) { req = tmp; list_del(&req->uc_chain); break; } } mutex_unlock(&vcp->vc_mutex); if (!req) { pr_warn("%s: msg (%d, %d) not found\n", __func__, hdr.opcode, hdr.unique); retval = -ESRCH; goto out; } /* move data into response buffer. */ if (req->uc_outSize < nbytes) { pr_warn("%s: too much cnt: %d, cnt: %ld, opc: %d, uniq: %d.\n", __func__, req->uc_outSize, (long)nbytes, hdr.opcode, hdr.unique); nbytes = req->uc_outSize; /* don't have more space! */ } if (copy_from_user(req->uc_data, buf, nbytes)) { req->uc_flags |= CODA_REQ_ABORT; wake_up(&req->uc_sleep); retval = -EFAULT; goto out; } /* adjust outsize. is this useful ?? */ req->uc_outSize = nbytes; req->uc_flags |= CODA_REQ_WRITE; count = nbytes; /* Convert filedescriptor into a file handle */ if (req->uc_opcode == CODA_OPEN_BY_FD) { struct coda_open_by_fd_out *outp = (struct coda_open_by_fd_out *)req->uc_data; if (!outp->oh.result) { outp->fh = fget(outp->fd); if (!outp->fh) return -EBADF; } } wake_up(&req->uc_sleep); out: return(count ? count : retval); } /* * Read a message from the kernel to Venus */ static ssize_t coda_psdev_read(struct file * file, char __user * buf, size_t nbytes, loff_t *off) { DECLARE_WAITQUEUE(wait, current); struct venus_comm *vcp = (struct venus_comm *) file->private_data; struct upc_req *req; ssize_t retval = 0, count = 0; if (nbytes == 0) return 0; mutex_lock(&vcp->vc_mutex); add_wait_queue(&vcp->vc_waitq, &wait); set_current_state(TASK_INTERRUPTIBLE); while (list_empty(&vcp->vc_pending)) { if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; break; } if (signal_pending(current)) { retval = -ERESTARTSYS; break; } mutex_unlock(&vcp->vc_mutex); schedule(); mutex_lock(&vcp->vc_mutex); } set_current_state(TASK_RUNNING); remove_wait_queue(&vcp->vc_waitq, &wait); if (retval) goto out; req = list_entry(vcp->vc_pending.next, struct upc_req,uc_chain); list_del(&req->uc_chain); /* Move the input args into userspace */ count = req->uc_inSize; if (nbytes < req->uc_inSize) { pr_warn("%s: Venus read %ld bytes of %d in message\n", __func__, (long)nbytes, req->uc_inSize); count = nbytes; } if (copy_to_user(buf, req->uc_data, count)) retval = -EFAULT; /* If request was not a signal, enqueue and don't free */ if (!(req->uc_flags & CODA_REQ_ASYNC)) { req->uc_flags |= CODA_REQ_READ; list_add_tail(&(req->uc_chain), &vcp->vc_processing); goto out; } kvfree(req->uc_data); kfree(req); out: mutex_unlock(&vcp->vc_mutex); return (count ? count : retval); } static int coda_psdev_open(struct inode * inode, struct file * file) { struct venus_comm *vcp; int idx, err; if (task_active_pid_ns(current) != &init_pid_ns) return -EINVAL; if (current_user_ns() != &init_user_ns) return -EINVAL; idx = iminor(inode); if (idx < 0 || idx >= MAX_CODADEVS) return -ENODEV; err = -EBUSY; vcp = &coda_comms[idx]; mutex_lock(&vcp->vc_mutex); if (!vcp->vc_inuse) { vcp->vc_inuse++; INIT_LIST_HEAD(&vcp->vc_pending); INIT_LIST_HEAD(&vcp->vc_processing); init_waitqueue_head(&vcp->vc_waitq); vcp->vc_sb = NULL; vcp->vc_seq = 0; file->private_data = vcp; err = 0; } mutex_unlock(&vcp->vc_mutex); return err; } static int coda_psdev_release(struct inode * inode, struct file * file) { struct venus_comm *vcp = (struct venus_comm *) file->private_data; struct upc_req *req, *tmp; if (!vcp || !vcp->vc_inuse ) { pr_warn("%s: Not open.\n", __func__); return -1; } mutex_lock(&vcp->vc_mutex); /* Wakeup clients so they can return. */ list_for_each_entry_safe(req, tmp, &vcp->vc_pending, uc_chain) { list_del(&req->uc_chain); /* Async requests need to be freed here */ if (req->uc_flags & CODA_REQ_ASYNC) { kvfree(req->uc_data); kfree(req); continue; } req->uc_flags |= CODA_REQ_ABORT; wake_up(&req->uc_sleep); } list_for_each_entry_safe(req, tmp, &vcp->vc_processing, uc_chain) { list_del(&req->uc_chain); req->uc_flags |= CODA_REQ_ABORT; wake_up(&req->uc_sleep); } file->private_data = NULL; vcp->vc_inuse--; mutex_unlock(&vcp->vc_mutex); return 0; } static const struct file_operations coda_psdev_fops = { .owner = THIS_MODULE, .read = coda_psdev_read, .write = coda_psdev_write, .poll = coda_psdev_poll, .unlocked_ioctl = coda_psdev_ioctl, .open = coda_psdev_open, .release = coda_psdev_release, .llseek = noop_llseek, }; static int __init init_coda_psdev(void) { int i, err = 0; if (register_chrdev(CODA_PSDEV_MAJOR, "coda", &coda_psdev_fops)) { pr_err("%s: unable to get major %d\n", __func__, CODA_PSDEV_MAJOR); return -EIO; } coda_psdev_class = class_create("coda"); if (IS_ERR(coda_psdev_class)) { err = PTR_ERR(coda_psdev_class); goto out_chrdev; } for (i = 0; i < MAX_CODADEVS; i++) { mutex_init(&(&coda_comms[i])->vc_mutex); device_create(coda_psdev_class, NULL, MKDEV(CODA_PSDEV_MAJOR, i), NULL, "cfs%d", i); } coda_sysctl_init(); goto out; out_chrdev: unregister_chrdev(CODA_PSDEV_MAJOR, "coda"); out: return err; } MODULE_AUTHOR("Jan Harkes, Peter J. Braam"); MODULE_DESCRIPTION("Coda Distributed File System VFS interface"); MODULE_ALIAS_CHARDEV_MAJOR(CODA_PSDEV_MAJOR); MODULE_LICENSE("GPL"); MODULE_VERSION("7.2"); static int __init init_coda(void) { int status; int i; status = coda_init_inodecache(); if (status) goto out2; status = init_coda_psdev(); if ( status ) { pr_warn("Problem (%d) in init_coda_psdev\n", status); goto out1; } status = register_filesystem(&coda_fs_type); if (status) { pr_warn("failed to register filesystem!\n"); goto out; } return 0; out: for (i = 0; i < MAX_CODADEVS; i++) device_destroy(coda_psdev_class, MKDEV(CODA_PSDEV_MAJOR, i)); class_destroy(coda_psdev_class); unregister_chrdev(CODA_PSDEV_MAJOR, "coda"); coda_sysctl_clean(); out1: coda_destroy_inodecache(); out2: return status; } static void __exit exit_coda(void) { int err, i; err = unregister_filesystem(&coda_fs_type); if (err != 0) pr_warn("failed to unregister filesystem\n"); for (i = 0; i < MAX_CODADEVS; i++) device_destroy(coda_psdev_class, MKDEV(CODA_PSDEV_MAJOR, i)); class_destroy(coda_psdev_class); unregister_chrdev(CODA_PSDEV_MAJOR, "coda"); coda_sysctl_clean(); coda_destroy_inodecache(); } module_init(init_coda); module_exit(exit_coda);
linux-master
fs/coda/psdev.c
// SPDX-License-Identifier: GPL-2.0 /* * Sysctl operations for Coda filesystem * Original version: (C) 1996 P. Braam and M. Callahan * Rewritten for Linux 2.1. (C) 1997 Carnegie Mellon University * * Carnegie Mellon encourages users to contribute improvements to * the Coda project. Contact Peter Braam ([email protected]). */ #include <linux/sysctl.h> #include "coda_int.h" static struct ctl_table_header *fs_table_header; static struct ctl_table coda_table[] = { { .procname = "timeout", .data = &coda_timeout, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .procname = "hard", .data = &coda_hard, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .procname = "fake_statfs", .data = &coda_fake_statfs, .maxlen = sizeof(int), .mode = 0600, .proc_handler = proc_dointvec }, {} }; void coda_sysctl_init(void) { if ( !fs_table_header ) fs_table_header = register_sysctl("coda", coda_table); } void coda_sysctl_clean(void) { if ( fs_table_header ) { unregister_sysctl_table(fs_table_header); fs_table_header = NULL; } }
linux-master
fs/coda/sysctl.c
// SPDX-License-Identifier: GPL-2.0 /* * Mostly platform independent upcall operations to Venus: * -- upcalls * -- upcall routines * * Linux 2.0 version * Copyright (C) 1996 Peter J. Braam <[email protected]>, * Michael Callahan <[email protected]> * * Redone for Linux 2.1 * Copyright (C) 1997 Carnegie Mellon University * * Carnegie Mellon University encourages users of this code to contribute * improvements to the Coda project. Contact Peter Braam <[email protected]>. */ #include <linux/signal.h> #include <linux/sched/signal.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/time.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/stat.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/vfs.h> #include <linux/coda.h> #include "coda_psdev.h" #include "coda_linux.h" #include "coda_cache.h" #include "coda_int.h" static int coda_upcall(struct venus_comm *vc, int inSize, int *outSize, union inputArgs *buffer); static void *alloc_upcall(int opcode, int size) { union inputArgs *inp; inp = kvzalloc(size, GFP_KERNEL); if (!inp) return ERR_PTR(-ENOMEM); inp->ih.opcode = opcode; inp->ih.pid = task_pid_nr_ns(current, &init_pid_ns); inp->ih.pgid = task_pgrp_nr_ns(current, &init_pid_ns); inp->ih.uid = from_kuid(&init_user_ns, current_fsuid()); return (void*)inp; } #define UPARG(op)\ do {\ inp = (union inputArgs *)alloc_upcall(op, insize); \ if (IS_ERR(inp)) { return PTR_ERR(inp); }\ outp = (union outputArgs *)(inp); \ outsize = insize; \ } while (0) #define INSIZE(tag) sizeof(struct coda_ ## tag ## _in) #define OUTSIZE(tag) sizeof(struct coda_ ## tag ## _out) #define SIZE(tag) max_t(unsigned int, INSIZE(tag), OUTSIZE(tag)) /* the upcalls */ int venus_rootfid(struct super_block *sb, struct CodaFid *fidp) { union inputArgs *inp; union outputArgs *outp; int insize, outsize, error; insize = SIZE(root); UPARG(CODA_ROOT); error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); if (!error) *fidp = outp->coda_root.VFid; kvfree(inp); return error; } int venus_getattr(struct super_block *sb, struct CodaFid *fid, struct coda_vattr *attr) { union inputArgs *inp; union outputArgs *outp; int insize, outsize, error; insize = SIZE(getattr); UPARG(CODA_GETATTR); inp->coda_getattr.VFid = *fid; error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); if (!error) *attr = outp->coda_getattr.attr; kvfree(inp); return error; } int venus_setattr(struct super_block *sb, struct CodaFid *fid, struct coda_vattr *vattr) { union inputArgs *inp; union outputArgs *outp; int insize, outsize, error; insize = SIZE(setattr); UPARG(CODA_SETATTR); inp->coda_setattr.VFid = *fid; inp->coda_setattr.attr = *vattr; error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); kvfree(inp); return error; } int venus_lookup(struct super_block *sb, struct CodaFid *fid, const char *name, int length, int * type, struct CodaFid *resfid) { union inputArgs *inp; union outputArgs *outp; int insize, outsize, error; int offset; offset = INSIZE(lookup); insize = max_t(unsigned int, offset + length +1, OUTSIZE(lookup)); UPARG(CODA_LOOKUP); inp->coda_lookup.VFid = *fid; inp->coda_lookup.name = offset; inp->coda_lookup.flags = CLU_CASE_SENSITIVE; /* send Venus a null terminated string */ memcpy((char *)(inp) + offset, name, length); *((char *)inp + offset + length) = '\0'; error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); if (!error) { *resfid = outp->coda_lookup.VFid; *type = outp->coda_lookup.vtype; } kvfree(inp); return error; } int venus_close(struct super_block *sb, struct CodaFid *fid, int flags, kuid_t uid) { union inputArgs *inp; union outputArgs *outp; int insize, outsize, error; insize = SIZE(release); UPARG(CODA_CLOSE); inp->ih.uid = from_kuid(&init_user_ns, uid); inp->coda_close.VFid = *fid; inp->coda_close.flags = flags; error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); kvfree(inp); return error; } int venus_open(struct super_block *sb, struct CodaFid *fid, int flags, struct file **fh) { union inputArgs *inp; union outputArgs *outp; int insize, outsize, error; insize = SIZE(open_by_fd); UPARG(CODA_OPEN_BY_FD); inp->coda_open_by_fd.VFid = *fid; inp->coda_open_by_fd.flags = flags; error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); if (!error) *fh = outp->coda_open_by_fd.fh; kvfree(inp); return error; } int venus_mkdir(struct super_block *sb, struct CodaFid *dirfid, const char *name, int length, struct CodaFid *newfid, struct coda_vattr *attrs) { union inputArgs *inp; union outputArgs *outp; int insize, outsize, error; int offset; offset = INSIZE(mkdir); insize = max_t(unsigned int, offset + length + 1, OUTSIZE(mkdir)); UPARG(CODA_MKDIR); inp->coda_mkdir.VFid = *dirfid; inp->coda_mkdir.attr = *attrs; inp->coda_mkdir.name = offset; /* Venus must get null terminated string */ memcpy((char *)(inp) + offset, name, length); *((char *)inp + offset + length) = '\0'; error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); if (!error) { *attrs = outp->coda_mkdir.attr; *newfid = outp->coda_mkdir.VFid; } kvfree(inp); return error; } int venus_rename(struct super_block *sb, struct CodaFid *old_fid, struct CodaFid *new_fid, size_t old_length, size_t new_length, const char *old_name, const char *new_name) { union inputArgs *inp; union outputArgs *outp; int insize, outsize, error; int offset, s; offset = INSIZE(rename); insize = max_t(unsigned int, offset + new_length + old_length + 8, OUTSIZE(rename)); UPARG(CODA_RENAME); inp->coda_rename.sourceFid = *old_fid; inp->coda_rename.destFid = *new_fid; inp->coda_rename.srcname = offset; /* Venus must receive an null terminated string */ s = ( old_length & ~0x3) +4; /* round up to word boundary */ memcpy((char *)(inp) + offset, old_name, old_length); *((char *)inp + offset + old_length) = '\0'; /* another null terminated string for Venus */ offset += s; inp->coda_rename.destname = offset; s = ( new_length & ~0x3) +4; /* round up to word boundary */ memcpy((char *)(inp) + offset, new_name, new_length); *((char *)inp + offset + new_length) = '\0'; error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); kvfree(inp); return error; } int venus_create(struct super_block *sb, struct CodaFid *dirfid, const char *name, int length, int excl, int mode, struct CodaFid *newfid, struct coda_vattr *attrs) { union inputArgs *inp; union outputArgs *outp; int insize, outsize, error; int offset; offset = INSIZE(create); insize = max_t(unsigned int, offset + length + 1, OUTSIZE(create)); UPARG(CODA_CREATE); inp->coda_create.VFid = *dirfid; inp->coda_create.attr.va_mode = mode; inp->coda_create.excl = excl; inp->coda_create.mode = mode; inp->coda_create.name = offset; /* Venus must get null terminated string */ memcpy((char *)(inp) + offset, name, length); *((char *)inp + offset + length) = '\0'; error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); if (!error) { *attrs = outp->coda_create.attr; *newfid = outp->coda_create.VFid; } kvfree(inp); return error; } int venus_rmdir(struct super_block *sb, struct CodaFid *dirfid, const char *name, int length) { union inputArgs *inp; union outputArgs *outp; int insize, outsize, error; int offset; offset = INSIZE(rmdir); insize = max_t(unsigned int, offset + length + 1, OUTSIZE(rmdir)); UPARG(CODA_RMDIR); inp->coda_rmdir.VFid = *dirfid; inp->coda_rmdir.name = offset; memcpy((char *)(inp) + offset, name, length); *((char *)inp + offset + length) = '\0'; error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); kvfree(inp); return error; } int venus_remove(struct super_block *sb, struct CodaFid *dirfid, const char *name, int length) { union inputArgs *inp; union outputArgs *outp; int error=0, insize, outsize, offset; offset = INSIZE(remove); insize = max_t(unsigned int, offset + length + 1, OUTSIZE(remove)); UPARG(CODA_REMOVE); inp->coda_remove.VFid = *dirfid; inp->coda_remove.name = offset; memcpy((char *)(inp) + offset, name, length); *((char *)inp + offset + length) = '\0'; error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); kvfree(inp); return error; } int venus_readlink(struct super_block *sb, struct CodaFid *fid, char *buffer, int *length) { union inputArgs *inp; union outputArgs *outp; int insize, outsize, error; int retlen; char *result; insize = max_t(unsigned int, INSIZE(readlink), OUTSIZE(readlink)+ *length); UPARG(CODA_READLINK); inp->coda_readlink.VFid = *fid; error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); if (!error) { retlen = outp->coda_readlink.count; if (retlen >= *length) retlen = *length - 1; *length = retlen; result = (char *)outp + (long)outp->coda_readlink.data; memcpy(buffer, result, retlen); *(buffer + retlen) = '\0'; } kvfree(inp); return error; } int venus_link(struct super_block *sb, struct CodaFid *fid, struct CodaFid *dirfid, const char *name, int len ) { union inputArgs *inp; union outputArgs *outp; int insize, outsize, error; int offset; offset = INSIZE(link); insize = max_t(unsigned int, offset + len + 1, OUTSIZE(link)); UPARG(CODA_LINK); inp->coda_link.sourceFid = *fid; inp->coda_link.destFid = *dirfid; inp->coda_link.tname = offset; /* make sure strings are null terminated */ memcpy((char *)(inp) + offset, name, len); *((char *)inp + offset + len) = '\0'; error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); kvfree(inp); return error; } int venus_symlink(struct super_block *sb, struct CodaFid *fid, const char *name, int len, const char *symname, int symlen) { union inputArgs *inp; union outputArgs *outp; int insize, outsize, error; int offset, s; offset = INSIZE(symlink); insize = max_t(unsigned int, offset + len + symlen + 8, OUTSIZE(symlink)); UPARG(CODA_SYMLINK); /* inp->coda_symlink.attr = *tva; XXXXXX */ inp->coda_symlink.VFid = *fid; /* Round up to word boundary and null terminate */ inp->coda_symlink.srcname = offset; s = ( symlen & ~0x3 ) + 4; memcpy((char *)(inp) + offset, symname, symlen); *((char *)inp + offset + symlen) = '\0'; /* Round up to word boundary and null terminate */ offset += s; inp->coda_symlink.tname = offset; s = (len & ~0x3) + 4; memcpy((char *)(inp) + offset, name, len); *((char *)inp + offset + len) = '\0'; error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); kvfree(inp); return error; } int venus_fsync(struct super_block *sb, struct CodaFid *fid) { union inputArgs *inp; union outputArgs *outp; int insize, outsize, error; insize=SIZE(fsync); UPARG(CODA_FSYNC); inp->coda_fsync.VFid = *fid; error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); kvfree(inp); return error; } int venus_access(struct super_block *sb, struct CodaFid *fid, int mask) { union inputArgs *inp; union outputArgs *outp; int insize, outsize, error; insize = SIZE(access); UPARG(CODA_ACCESS); inp->coda_access.VFid = *fid; inp->coda_access.flags = mask; error = coda_upcall(coda_vcp(sb), insize, &outsize, inp); kvfree(inp); return error; } int venus_pioctl(struct super_block *sb, struct CodaFid *fid, unsigned int cmd, struct PioctlData *data) { union inputArgs *inp; union outputArgs *outp; int insize, outsize, error; int iocsize; insize = VC_MAXMSGSIZE; UPARG(CODA_IOCTL); /* build packet for Venus */ if (data->vi.in_size > VC_MAXDATASIZE) { error = -EINVAL; goto exit; } if (data->vi.out_size > VC_MAXDATASIZE) { error = -EINVAL; goto exit; } inp->coda_ioctl.VFid = *fid; /* the cmd field was mutated by increasing its size field to * reflect the path and follow args. We need to subtract that * out before sending the command to Venus. */ inp->coda_ioctl.cmd = (cmd & ~(PIOCPARM_MASK << 16)); iocsize = ((cmd >> 16) & PIOCPARM_MASK) - sizeof(char *) - sizeof(int); inp->coda_ioctl.cmd |= (iocsize & PIOCPARM_MASK) << 16; /* in->coda_ioctl.rwflag = flag; */ inp->coda_ioctl.len = data->vi.in_size; inp->coda_ioctl.data = (char *)(INSIZE(ioctl)); /* get the data out of user space */ if (copy_from_user((char *)inp + (long)inp->coda_ioctl.data, data->vi.in, data->vi.in_size)) { error = -EINVAL; goto exit; } error = coda_upcall(coda_vcp(sb), SIZE(ioctl) + data->vi.in_size, &outsize, inp); if (error) { pr_warn("%s: Venus returns: %d for %s\n", __func__, error, coda_f2s(fid)); goto exit; } if (outsize < (long)outp->coda_ioctl.data + outp->coda_ioctl.len) { error = -EINVAL; goto exit; } /* Copy out the OUT buffer. */ if (outp->coda_ioctl.len > data->vi.out_size) { error = -EINVAL; goto exit; } /* Copy out the OUT buffer. */ if (copy_to_user(data->vi.out, (char *)outp + (long)outp->coda_ioctl.data, outp->coda_ioctl.len)) { error = -EFAULT; goto exit; } exit: kvfree(inp); return error; } int venus_statfs(struct dentry *dentry, struct kstatfs *sfs) { union inputArgs *inp; union outputArgs *outp; int insize, outsize, error; insize = SIZE(statfs); UPARG(CODA_STATFS); error = coda_upcall(coda_vcp(dentry->d_sb), insize, &outsize, inp); if (!error) { sfs->f_blocks = outp->coda_statfs.stat.f_blocks; sfs->f_bfree = outp->coda_statfs.stat.f_bfree; sfs->f_bavail = outp->coda_statfs.stat.f_bavail; sfs->f_files = outp->coda_statfs.stat.f_files; sfs->f_ffree = outp->coda_statfs.stat.f_ffree; } kvfree(inp); return error; } int venus_access_intent(struct super_block *sb, struct CodaFid *fid, bool *access_intent_supported, size_t count, loff_t ppos, int type) { union inputArgs *inp; union outputArgs *outp; int insize, outsize, error; bool finalizer = type == CODA_ACCESS_TYPE_READ_FINISH || type == CODA_ACCESS_TYPE_WRITE_FINISH; if (!*access_intent_supported && !finalizer) return 0; insize = SIZE(access_intent); UPARG(CODA_ACCESS_INTENT); inp->coda_access_intent.VFid = *fid; inp->coda_access_intent.count = count; inp->coda_access_intent.pos = ppos; inp->coda_access_intent.type = type; error = coda_upcall(coda_vcp(sb), insize, finalizer ? NULL : &outsize, inp); /* * we have to free the request buffer for synchronous upcalls * or when asynchronous upcalls fail, but not when asynchronous * upcalls succeed */ if (!finalizer || error) kvfree(inp); /* Chunked access is not supported or an old Coda client */ if (error == -EOPNOTSUPP) { *access_intent_supported = false; error = 0; } return error; } /* * coda_upcall and coda_downcall routines. */ static void coda_block_signals(sigset_t *old) { spin_lock_irq(&current->sighand->siglock); *old = current->blocked; sigfillset(&current->blocked); sigdelset(&current->blocked, SIGKILL); sigdelset(&current->blocked, SIGSTOP); sigdelset(&current->blocked, SIGINT); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); } static void coda_unblock_signals(sigset_t *old) { spin_lock_irq(&current->sighand->siglock); current->blocked = *old; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); } /* Don't allow signals to interrupt the following upcalls before venus * has seen them, * - CODA_CLOSE or CODA_RELEASE upcall (to avoid reference count problems) * - CODA_STORE (to avoid data loss) * - CODA_ACCESS_INTENT (to avoid reference count problems) */ #define CODA_INTERRUPTIBLE(r) (!coda_hard && \ (((r)->uc_opcode != CODA_CLOSE && \ (r)->uc_opcode != CODA_STORE && \ (r)->uc_opcode != CODA_ACCESS_INTENT && \ (r)->uc_opcode != CODA_RELEASE) || \ (r)->uc_flags & CODA_REQ_READ)) static inline void coda_waitfor_upcall(struct venus_comm *vcp, struct upc_req *req) { DECLARE_WAITQUEUE(wait, current); unsigned long timeout = jiffies + coda_timeout * HZ; sigset_t old; int blocked; coda_block_signals(&old); blocked = 1; add_wait_queue(&req->uc_sleep, &wait); for (;;) { if (CODA_INTERRUPTIBLE(req)) set_current_state(TASK_INTERRUPTIBLE); else set_current_state(TASK_UNINTERRUPTIBLE); /* got a reply */ if (req->uc_flags & (CODA_REQ_WRITE | CODA_REQ_ABORT)) break; if (blocked && time_after(jiffies, timeout) && CODA_INTERRUPTIBLE(req)) { coda_unblock_signals(&old); blocked = 0; } if (signal_pending(current)) { list_del(&req->uc_chain); break; } mutex_unlock(&vcp->vc_mutex); if (blocked) schedule_timeout(HZ); else schedule(); mutex_lock(&vcp->vc_mutex); } if (blocked) coda_unblock_signals(&old); remove_wait_queue(&req->uc_sleep, &wait); set_current_state(TASK_RUNNING); } /* * coda_upcall will return an error in the case of * failed communication with Venus _or_ will peek at Venus * reply and return Venus' error. * * As venus has 2 types of errors, normal errors (positive) and internal * errors (negative), normal errors are negated, while internal errors * are all mapped to -EINTR, while showing a nice warning message. (jh) */ static int coda_upcall(struct venus_comm *vcp, int inSize, int *outSize, union inputArgs *buffer) { union outputArgs *out; union inputArgs *sig_inputArgs; struct upc_req *req = NULL, *sig_req; int error; mutex_lock(&vcp->vc_mutex); if (!vcp->vc_inuse) { pr_notice("Venus dead, not sending upcall\n"); error = -ENXIO; goto exit; } /* Format the request message. */ req = kmalloc(sizeof(struct upc_req), GFP_KERNEL); if (!req) { error = -ENOMEM; goto exit; } buffer->ih.unique = ++vcp->vc_seq; req->uc_data = (void *)buffer; req->uc_flags = outSize ? 0 : CODA_REQ_ASYNC; req->uc_inSize = inSize; req->uc_outSize = (outSize && *outSize) ? *outSize : inSize; req->uc_opcode = buffer->ih.opcode; req->uc_unique = buffer->ih.unique; init_waitqueue_head(&req->uc_sleep); /* Append msg to pending queue and poke Venus. */ list_add_tail(&req->uc_chain, &vcp->vc_pending); wake_up_interruptible(&vcp->vc_waitq); /* We can return early on asynchronous requests */ if (outSize == NULL) { mutex_unlock(&vcp->vc_mutex); return 0; } /* We can be interrupted while we wait for Venus to process * our request. If the interrupt occurs before Venus has read * the request, we dequeue and return. If it occurs after the * read but before the reply, we dequeue, send a signal * message, and return. If it occurs after the reply we ignore * it. In no case do we want to restart the syscall. If it * was interrupted by a venus shutdown (psdev_close), return * ENODEV. */ /* Go to sleep. Wake up on signals only after the timeout. */ coda_waitfor_upcall(vcp, req); /* Op went through, interrupt or not... */ if (req->uc_flags & CODA_REQ_WRITE) { out = (union outputArgs *)req->uc_data; /* here we map positive Venus errors to kernel errors */ error = -out->oh.result; *outSize = req->uc_outSize; goto exit; } error = -EINTR; if ((req->uc_flags & CODA_REQ_ABORT) || !signal_pending(current)) { pr_warn("Unexpected interruption.\n"); goto exit; } /* Interrupted before venus read it. */ if (!(req->uc_flags & CODA_REQ_READ)) goto exit; /* Venus saw the upcall, make sure we can send interrupt signal */ if (!vcp->vc_inuse) { pr_info("Venus dead, not sending signal.\n"); goto exit; } error = -ENOMEM; sig_req = kmalloc(sizeof(struct upc_req), GFP_KERNEL); if (!sig_req) goto exit; sig_inputArgs = kvzalloc(sizeof(*sig_inputArgs), GFP_KERNEL); if (!sig_inputArgs) { kfree(sig_req); goto exit; } error = -EINTR; sig_inputArgs->ih.opcode = CODA_SIGNAL; sig_inputArgs->ih.unique = req->uc_unique; sig_req->uc_flags = CODA_REQ_ASYNC; sig_req->uc_opcode = sig_inputArgs->ih.opcode; sig_req->uc_unique = sig_inputArgs->ih.unique; sig_req->uc_data = (void *)sig_inputArgs; sig_req->uc_inSize = sizeof(struct coda_in_hdr); sig_req->uc_outSize = sizeof(struct coda_in_hdr); /* insert at head of queue! */ list_add(&(sig_req->uc_chain), &vcp->vc_pending); wake_up_interruptible(&vcp->vc_waitq); exit: kfree(req); mutex_unlock(&vcp->vc_mutex); return error; } /* The statements below are part of the Coda opportunistic programming -- taken from the Mach/BSD kernel code for Coda. You don't get correct semantics by stating what needs to be done without guaranteeing the invariants needed for it to happen. When will be have time to find out what exactly is going on? (pjb) */ /* * There are 7 cases where cache invalidations occur. The semantics * of each is listed here: * * CODA_FLUSH -- flush all entries from the name cache and the cnode cache. * CODA_PURGEUSER -- flush all entries from the name cache for a specific user * This call is a result of token expiration. * * The next arise as the result of callbacks on a file or directory. * CODA_ZAPFILE -- flush the cached attributes for a file. * CODA_ZAPDIR -- flush the attributes for the dir and * force a new lookup for all the children of this dir. * * The next is a result of Venus detecting an inconsistent file. * CODA_PURGEFID -- flush the attribute for the file * purge it and its children from the dcache * * The last allows Venus to replace local fids with global ones * during reintegration. * * CODA_REPLACE -- replace one CodaFid with another throughout the name cache */ int coda_downcall(struct venus_comm *vcp, int opcode, union outputArgs *out, size_t nbytes) { struct inode *inode = NULL; struct CodaFid *fid = NULL, *newfid; struct super_block *sb; /* * Make sure we have received enough data from the cache * manager to populate the necessary fields in the buffer */ switch (opcode) { case CODA_PURGEUSER: if (nbytes < sizeof(struct coda_purgeuser_out)) return -EINVAL; break; case CODA_ZAPDIR: if (nbytes < sizeof(struct coda_zapdir_out)) return -EINVAL; break; case CODA_ZAPFILE: if (nbytes < sizeof(struct coda_zapfile_out)) return -EINVAL; break; case CODA_PURGEFID: if (nbytes < sizeof(struct coda_purgefid_out)) return -EINVAL; break; case CODA_REPLACE: if (nbytes < sizeof(struct coda_replace_out)) return -EINVAL; break; } /* Handle invalidation requests. */ mutex_lock(&vcp->vc_mutex); sb = vcp->vc_sb; if (!sb || !sb->s_root) goto unlock_out; switch (opcode) { case CODA_FLUSH: coda_cache_clear_all(sb); shrink_dcache_sb(sb); if (d_really_is_positive(sb->s_root)) coda_flag_inode(d_inode(sb->s_root), C_FLUSH); break; case CODA_PURGEUSER: coda_cache_clear_all(sb); break; case CODA_ZAPDIR: fid = &out->coda_zapdir.CodaFid; break; case CODA_ZAPFILE: fid = &out->coda_zapfile.CodaFid; break; case CODA_PURGEFID: fid = &out->coda_purgefid.CodaFid; break; case CODA_REPLACE: fid = &out->coda_replace.OldFid; break; } if (fid) inode = coda_fid_to_inode(fid, sb); unlock_out: mutex_unlock(&vcp->vc_mutex); if (!inode) return 0; switch (opcode) { case CODA_ZAPDIR: coda_flag_inode_children(inode, C_PURGE); coda_flag_inode(inode, C_VATTR); break; case CODA_ZAPFILE: coda_flag_inode(inode, C_VATTR); break; case CODA_PURGEFID: coda_flag_inode_children(inode, C_PURGE); /* catch the dentries later if some are still busy */ coda_flag_inode(inode, C_PURGE); d_prune_aliases(inode); break; case CODA_REPLACE: newfid = &out->coda_replace.NewFid; coda_replace_fid(inode, fid, newfid); break; } iput(inode); return 0; }
linux-master
fs/coda/upcall.c
// SPDX-License-Identifier: GPL-2.0 /* cnode related routines for the coda kernel code (C) 1996 Peter Braam */ #include <linux/types.h> #include <linux/string.h> #include <linux/time.h> #include <linux/coda.h> #include <linux/pagemap.h> #include "coda_psdev.h" #include "coda_linux.h" static inline int coda_fideq(struct CodaFid *fid1, struct CodaFid *fid2) { return memcmp(fid1, fid2, sizeof(*fid1)) == 0; } static const struct inode_operations coda_symlink_inode_operations = { .get_link = page_get_link, .setattr = coda_setattr, }; /* cnode.c */ static void coda_fill_inode(struct inode *inode, struct coda_vattr *attr) { coda_vattr_to_iattr(inode, attr); if (S_ISREG(inode->i_mode)) { inode->i_op = &coda_file_inode_operations; inode->i_fop = &coda_file_operations; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &coda_dir_inode_operations; inode->i_fop = &coda_dir_operations; } else if (S_ISLNK(inode->i_mode)) { inode->i_op = &coda_symlink_inode_operations; inode_nohighmem(inode); inode->i_data.a_ops = &coda_symlink_aops; inode->i_mapping = &inode->i_data; } else init_special_inode(inode, inode->i_mode, huge_decode_dev(attr->va_rdev)); } static int coda_test_inode(struct inode *inode, void *data) { struct CodaFid *fid = (struct CodaFid *)data; struct coda_inode_info *cii = ITOC(inode); return coda_fideq(&cii->c_fid, fid); } static int coda_set_inode(struct inode *inode, void *data) { struct CodaFid *fid = (struct CodaFid *)data; struct coda_inode_info *cii = ITOC(inode); cii->c_fid = *fid; return 0; } struct inode * coda_iget(struct super_block * sb, struct CodaFid * fid, struct coda_vattr * attr) { struct inode *inode; struct coda_inode_info *cii; unsigned long hash = coda_f2i(fid); umode_t inode_type = coda_inode_type(attr); retry: inode = iget5_locked(sb, hash, coda_test_inode, coda_set_inode, fid); if (!inode) return ERR_PTR(-ENOMEM); if (inode->i_state & I_NEW) { cii = ITOC(inode); /* we still need to set i_ino for things like stat(2) */ inode->i_ino = hash; /* inode is locked and unique, no need to grab cii->c_lock */ cii->c_mapcount = 0; coda_fill_inode(inode, attr); unlock_new_inode(inode); } else if ((inode->i_mode & S_IFMT) != inode_type) { /* Inode has changed type, mark bad and grab a new one */ remove_inode_hash(inode); coda_flag_inode(inode, C_PURGE); iput(inode); goto retry; } return inode; } /* this is effectively coda_iget: - get attributes (might be cached) - get the inode for the fid using vfs iget - link the two up if this is needed - fill in the attributes */ struct inode *coda_cnode_make(struct CodaFid *fid, struct super_block *sb) { struct coda_vattr attr; struct inode *inode; int error; /* We get inode numbers from Venus -- see venus source */ error = venus_getattr(sb, fid, &attr); if (error) return ERR_PTR(error); inode = coda_iget(sb, fid, &attr); if (IS_ERR(inode)) pr_warn("%s: coda_iget failed\n", __func__); return inode; } /* Although we treat Coda file identifiers as immutable, there is one * special case for files created during a disconnection where they may * not be globally unique. When an identifier collision is detected we * first try to flush the cached inode from the kernel and finally * resort to renaming/rehashing in-place. Userspace remembers both old * and new values of the identifier to handle any in-flight upcalls. * The real solution is to use globally unique UUIDs as identifiers, but * retrofitting the existing userspace code for this is non-trivial. */ void coda_replace_fid(struct inode *inode, struct CodaFid *oldfid, struct CodaFid *newfid) { struct coda_inode_info *cii = ITOC(inode); unsigned long hash = coda_f2i(newfid); BUG_ON(!coda_fideq(&cii->c_fid, oldfid)); /* replace fid and rehash inode */ /* XXX we probably need to hold some lock here! */ remove_inode_hash(inode); cii->c_fid = *newfid; inode->i_ino = hash; __insert_inode_hash(inode, hash); } /* convert a fid to an inode. */ struct inode *coda_fid_to_inode(struct CodaFid *fid, struct super_block *sb) { struct inode *inode; unsigned long hash = coda_f2i(fid); inode = ilookup5(sb, hash, coda_test_inode, fid); if ( !inode ) return NULL; /* we should never see newly created inodes because we intentionally * fail in the initialization callback */ BUG_ON(inode->i_state & I_NEW); return inode; } struct coda_file_info *coda_ftoc(struct file *file) { struct coda_file_info *cfi = file->private_data; BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC); return cfi; } /* the CONTROL inode is made without asking attributes from Venus */ struct inode *coda_cnode_makectl(struct super_block *sb) { struct inode *inode = new_inode(sb); if (inode) { inode->i_ino = CTL_INO; inode->i_op = &coda_ioctl_inode_operations; inode->i_fop = &coda_ioctl_operations; inode->i_mode = 0444; return inode; } return ERR_PTR(-ENOMEM); }
linux-master
fs/coda/cnode.c
// SPDX-License-Identifier: GPL-2.0 /* * Directory operations for Coda filesystem * Original version: (C) 1996 P. Braam and M. Callahan * Rewritten for Linux 2.1. (C) 1997 Carnegie Mellon University * * Carnegie Mellon encourages users to contribute improvements to * the Coda project. Contact Peter Braam ([email protected]). */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/time.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/stat.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/spinlock.h> #include <linux/namei.h> #include <linux/uaccess.h> #include <linux/coda.h> #include "coda_psdev.h" #include "coda_linux.h" #include "coda_cache.h" #include "coda_int.h" /* same as fs/bad_inode.c */ static int coda_return_EIO(void) { return -EIO; } #define CODA_EIO_ERROR ((void *) (coda_return_EIO)) /* inode operations for directories */ /* access routines: lookup, readlink, permission */ static struct dentry *coda_lookup(struct inode *dir, struct dentry *entry, unsigned int flags) { struct super_block *sb = dir->i_sb; const char *name = entry->d_name.name; size_t length = entry->d_name.len; struct inode *inode; int type = 0; if (length > CODA_MAXNAMLEN) { pr_err("name too long: lookup, %s %zu\n", coda_i2s(dir), length); return ERR_PTR(-ENAMETOOLONG); } /* control object, create inode on the fly */ if (is_root_inode(dir) && coda_iscontrol(name, length)) { inode = coda_cnode_makectl(sb); type = CODA_NOCACHE; } else { struct CodaFid fid = { { 0, } }; int error = venus_lookup(sb, coda_i2f(dir), name, length, &type, &fid); inode = !error ? coda_cnode_make(&fid, sb) : ERR_PTR(error); } if (!IS_ERR(inode) && (type & CODA_NOCACHE)) coda_flag_inode(inode, C_VATTR | C_PURGE); if (inode == ERR_PTR(-ENOENT)) inode = NULL; return d_splice_alias(inode, entry); } int coda_permission(struct mnt_idmap *idmap, struct inode *inode, int mask) { int error; if (mask & MAY_NOT_BLOCK) return -ECHILD; mask &= MAY_READ | MAY_WRITE | MAY_EXEC; if (!mask) return 0; if ((mask & MAY_EXEC) && !execute_ok(inode)) return -EACCES; if (coda_cache_check(inode, mask)) return 0; error = venus_access(inode->i_sb, coda_i2f(inode), mask); if (!error) coda_cache_enter(inode, mask); return error; } static inline void coda_dir_update_mtime(struct inode *dir) { #ifdef REQUERY_VENUS_FOR_MTIME /* invalidate the directory cnode's attributes so we refetch the * attributes from venus next time the inode is referenced */ coda_flag_inode(dir, C_VATTR); #else /* optimistically we can also act as if our nose bleeds. The * granularity of the mtime is coarse anyways so we might actually be * right most of the time. Note: we only do this for directories. */ dir->i_mtime = inode_set_ctime_current(dir); #endif } /* we have to wrap inc_nlink/drop_nlink because sometimes userspace uses a * trick to fool GNU find's optimizations. If we can't be sure of the link * (because of volume mount points) we set i_nlink to 1 which forces find * to consider every child as a possible directory. We should also never * see an increment or decrement for deleted directories where i_nlink == 0 */ static inline void coda_dir_inc_nlink(struct inode *dir) { if (dir->i_nlink >= 2) inc_nlink(dir); } static inline void coda_dir_drop_nlink(struct inode *dir) { if (dir->i_nlink > 2) drop_nlink(dir); } /* creation routines: create, mknod, mkdir, link, symlink */ static int coda_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *de, umode_t mode, bool excl) { int error; const char *name=de->d_name.name; int length=de->d_name.len; struct inode *inode; struct CodaFid newfid; struct coda_vattr attrs; if (is_root_inode(dir) && coda_iscontrol(name, length)) return -EPERM; error = venus_create(dir->i_sb, coda_i2f(dir), name, length, 0, mode, &newfid, &attrs); if (error) goto err_out; inode = coda_iget(dir->i_sb, &newfid, &attrs); if (IS_ERR(inode)) { error = PTR_ERR(inode); goto err_out; } /* invalidate the directory cnode's attributes */ coda_dir_update_mtime(dir); d_instantiate(de, inode); return 0; err_out: d_drop(de); return error; } static int coda_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *de, umode_t mode) { struct inode *inode; struct coda_vattr attrs; const char *name = de->d_name.name; int len = de->d_name.len; int error; struct CodaFid newfid; if (is_root_inode(dir) && coda_iscontrol(name, len)) return -EPERM; attrs.va_mode = mode; error = venus_mkdir(dir->i_sb, coda_i2f(dir), name, len, &newfid, &attrs); if (error) goto err_out; inode = coda_iget(dir->i_sb, &newfid, &attrs); if (IS_ERR(inode)) { error = PTR_ERR(inode); goto err_out; } /* invalidate the directory cnode's attributes */ coda_dir_inc_nlink(dir); coda_dir_update_mtime(dir); d_instantiate(de, inode); return 0; err_out: d_drop(de); return error; } /* try to make de an entry in dir_inodde linked to source_de */ static int coda_link(struct dentry *source_de, struct inode *dir_inode, struct dentry *de) { struct inode *inode = d_inode(source_de); const char * name = de->d_name.name; int len = de->d_name.len; int error; if (is_root_inode(dir_inode) && coda_iscontrol(name, len)) return -EPERM; error = venus_link(dir_inode->i_sb, coda_i2f(inode), coda_i2f(dir_inode), (const char *)name, len); if (error) { d_drop(de); return error; } coda_dir_update_mtime(dir_inode); ihold(inode); d_instantiate(de, inode); inc_nlink(inode); return 0; } static int coda_symlink(struct mnt_idmap *idmap, struct inode *dir_inode, struct dentry *de, const char *symname) { const char *name = de->d_name.name; int len = de->d_name.len; int symlen; int error; if (is_root_inode(dir_inode) && coda_iscontrol(name, len)) return -EPERM; symlen = strlen(symname); if (symlen > CODA_MAXPATHLEN) return -ENAMETOOLONG; /* * This entry is now negative. Since we do not create * an inode for the entry we have to drop it. */ d_drop(de); error = venus_symlink(dir_inode->i_sb, coda_i2f(dir_inode), name, len, symname, symlen); /* mtime is no good anymore */ if (!error) coda_dir_update_mtime(dir_inode); return error; } /* destruction routines: unlink, rmdir */ static int coda_unlink(struct inode *dir, struct dentry *de) { int error; const char *name = de->d_name.name; int len = de->d_name.len; error = venus_remove(dir->i_sb, coda_i2f(dir), name, len); if (error) return error; coda_dir_update_mtime(dir); drop_nlink(d_inode(de)); return 0; } static int coda_rmdir(struct inode *dir, struct dentry *de) { const char *name = de->d_name.name; int len = de->d_name.len; int error; error = venus_rmdir(dir->i_sb, coda_i2f(dir), name, len); if (!error) { /* VFS may delete the child */ if (d_really_is_positive(de)) clear_nlink(d_inode(de)); /* fix the link count of the parent */ coda_dir_drop_nlink(dir); coda_dir_update_mtime(dir); } return error; } /* rename */ static int coda_rename(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { const char *old_name = old_dentry->d_name.name; const char *new_name = new_dentry->d_name.name; int old_length = old_dentry->d_name.len; int new_length = new_dentry->d_name.len; int error; if (flags) return -EINVAL; error = venus_rename(old_dir->i_sb, coda_i2f(old_dir), coda_i2f(new_dir), old_length, new_length, (const char *) old_name, (const char *)new_name); if (!error) { if (d_really_is_positive(new_dentry)) { if (d_is_dir(new_dentry)) { coda_dir_drop_nlink(old_dir); coda_dir_inc_nlink(new_dir); } coda_flag_inode(d_inode(new_dentry), C_VATTR); } coda_dir_update_mtime(old_dir); coda_dir_update_mtime(new_dir); } return error; } static inline unsigned int CDT2DT(unsigned char cdt) { unsigned int dt; switch(cdt) { case CDT_UNKNOWN: dt = DT_UNKNOWN; break; case CDT_FIFO: dt = DT_FIFO; break; case CDT_CHR: dt = DT_CHR; break; case CDT_DIR: dt = DT_DIR; break; case CDT_BLK: dt = DT_BLK; break; case CDT_REG: dt = DT_REG; break; case CDT_LNK: dt = DT_LNK; break; case CDT_SOCK: dt = DT_SOCK; break; case CDT_WHT: dt = DT_WHT; break; default: dt = DT_UNKNOWN; break; } return dt; } /* support routines */ static int coda_venus_readdir(struct file *coda_file, struct dir_context *ctx) { struct coda_file_info *cfi; struct coda_inode_info *cii; struct file *host_file; struct venus_dirent *vdir; unsigned long vdir_size = offsetof(struct venus_dirent, d_name); unsigned int type; struct qstr name; ino_t ino; int ret; cfi = coda_ftoc(coda_file); host_file = cfi->cfi_container; cii = ITOC(file_inode(coda_file)); vdir = kmalloc(sizeof(*vdir), GFP_KERNEL); if (!vdir) return -ENOMEM; if (!dir_emit_dots(coda_file, ctx)) goto out; while (1) { loff_t pos = ctx->pos - 2; /* read entries from the directory file */ ret = kernel_read(host_file, vdir, sizeof(*vdir), &pos); if (ret < 0) { pr_err("%s: read dir %s failed %d\n", __func__, coda_f2s(&cii->c_fid), ret); break; } if (ret == 0) break; /* end of directory file reached */ /* catch truncated reads */ if (ret < vdir_size || ret < vdir_size + vdir->d_namlen) { pr_err("%s: short read on %s\n", __func__, coda_f2s(&cii->c_fid)); ret = -EBADF; break; } /* validate whether the directory file actually makes sense */ if (vdir->d_reclen < vdir_size + vdir->d_namlen) { pr_err("%s: invalid dir %s\n", __func__, coda_f2s(&cii->c_fid)); ret = -EBADF; break; } name.len = vdir->d_namlen; name.name = vdir->d_name; /* Make sure we skip '.' and '..', we already got those */ if (name.name[0] == '.' && (name.len == 1 || (name.name[1] == '.' && name.len == 2))) vdir->d_fileno = name.len = 0; /* skip null entries */ if (vdir->d_fileno && name.len) { ino = vdir->d_fileno; type = CDT2DT(vdir->d_type); if (!dir_emit(ctx, name.name, name.len, ino, type)) break; } /* we'll always have progress because d_reclen is unsigned and * we've already established it is non-zero. */ ctx->pos += vdir->d_reclen; } out: kfree(vdir); return 0; } /* file operations for directories */ static int coda_readdir(struct file *coda_file, struct dir_context *ctx) { struct coda_file_info *cfi; struct file *host_file; int ret; cfi = coda_ftoc(coda_file); host_file = cfi->cfi_container; if (host_file->f_op->iterate_shared) { struct inode *host_inode = file_inode(host_file); ret = -ENOENT; if (!IS_DEADDIR(host_inode)) { inode_lock_shared(host_inode); ret = host_file->f_op->iterate_shared(host_file, ctx); file_accessed(host_file); inode_unlock_shared(host_inode); } return ret; } /* Venus: we must read Venus dirents from a file */ return coda_venus_readdir(coda_file, ctx); } /* called when a cache lookup succeeds */ static int coda_dentry_revalidate(struct dentry *de, unsigned int flags) { struct inode *inode; struct coda_inode_info *cii; if (flags & LOOKUP_RCU) return -ECHILD; inode = d_inode(de); if (!inode || is_root_inode(inode)) goto out; if (is_bad_inode(inode)) goto bad; cii = ITOC(d_inode(de)); if (!(cii->c_flags & (C_PURGE | C_FLUSH))) goto out; shrink_dcache_parent(de); /* propagate for a flush */ if (cii->c_flags & C_FLUSH) coda_flag_inode_children(inode, C_FLUSH); if (d_count(de) > 1) /* pretend it's valid, but don't change the flags */ goto out; /* clear the flags. */ spin_lock(&cii->c_lock); cii->c_flags &= ~(C_VATTR | C_PURGE | C_FLUSH); spin_unlock(&cii->c_lock); bad: return 0; out: return 1; } /* * This is the callback from dput() when d_count is going to 0. * We use this to unhash dentries with bad inodes. */ static int coda_dentry_delete(const struct dentry * dentry) { struct inode *inode; struct coda_inode_info *cii; if (d_really_is_negative(dentry)) return 0; inode = d_inode(dentry); if (!inode || is_bad_inode(inode)) return 1; cii = ITOC(inode); if (cii->c_flags & C_PURGE) return 1; return 0; } /* * This is called when we want to check if the inode has * changed on the server. Coda makes this easy since the * cache manager Venus issues a downcall to the kernel when this * happens */ int coda_revalidate_inode(struct inode *inode) { struct coda_vattr attr; int error; int old_mode; ino_t old_ino; struct coda_inode_info *cii = ITOC(inode); if (!cii->c_flags) return 0; if (cii->c_flags & (C_VATTR | C_PURGE | C_FLUSH)) { error = venus_getattr(inode->i_sb, &(cii->c_fid), &attr); if (error) return -EIO; /* this inode may be lost if: - it's ino changed - type changes must be permitted for repair and missing mount points. */ old_mode = inode->i_mode; old_ino = inode->i_ino; coda_vattr_to_iattr(inode, &attr); if ((old_mode & S_IFMT) != (inode->i_mode & S_IFMT)) { pr_warn("inode %ld, fid %s changed type!\n", inode->i_ino, coda_f2s(&(cii->c_fid))); } /* the following can happen when a local fid is replaced with a global one, here we lose and declare the inode bad */ if (inode->i_ino != old_ino) return -EIO; coda_flag_inode_children(inode, C_FLUSH); spin_lock(&cii->c_lock); cii->c_flags &= ~(C_VATTR | C_PURGE | C_FLUSH); spin_unlock(&cii->c_lock); } return 0; } const struct dentry_operations coda_dentry_operations = { .d_revalidate = coda_dentry_revalidate, .d_delete = coda_dentry_delete, }; const struct inode_operations coda_dir_inode_operations = { .create = coda_create, .lookup = coda_lookup, .link = coda_link, .unlink = coda_unlink, .symlink = coda_symlink, .mkdir = coda_mkdir, .rmdir = coda_rmdir, .mknod = CODA_EIO_ERROR, .rename = coda_rename, .permission = coda_permission, .getattr = coda_getattr, .setattr = coda_setattr, }; WRAP_DIR_ITER(coda_readdir) // FIXME! const struct file_operations coda_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .iterate_shared = shared_coda_readdir, .open = coda_open, .release = coda_release, .fsync = coda_fsync, };
linux-master
fs/coda/dir.c
// SPDX-License-Identifier: GPL-2.0 /* * Inode operations for Coda filesystem * Original version: (C) 1996 P. Braam and M. Callahan * Rewritten for Linux 2.1. (C) 1997 Carnegie Mellon University * * Carnegie Mellon encourages users to contribute improvements to * the Coda project. Contact Peter Braam ([email protected]). */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/time.h> #include <linux/fs.h> #include <linux/stat.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <linux/string.h> #include <linux/coda.h> #include "coda_psdev.h" #include "coda_linux.h" /* initialize the debugging variables */ int coda_fake_statfs; /* print a fid */ char * coda_f2s(struct CodaFid *f) { static char s[60]; sprintf(s, "(%08x.%08x.%08x.%08x)", f->opaque[0], f->opaque[1], f->opaque[2], f->opaque[3]); return s; } /* recognize special .CONTROL name */ int coda_iscontrol(const char *name, size_t length) { return ((CODA_CONTROLLEN == length) && (strncmp(name, CODA_CONTROL, CODA_CONTROLLEN) == 0)); } unsigned short coda_flags_to_cflags(unsigned short flags) { unsigned short coda_flags = 0; if ((flags & O_ACCMODE) == O_RDONLY) coda_flags |= C_O_READ; if ((flags & O_ACCMODE) == O_RDWR) coda_flags |= C_O_READ | C_O_WRITE; if ((flags & O_ACCMODE) == O_WRONLY) coda_flags |= C_O_WRITE; if (flags & O_TRUNC) coda_flags |= C_O_TRUNC; if (flags & O_CREAT) coda_flags |= C_O_CREAT; if (flags & O_EXCL) coda_flags |= C_O_EXCL; return coda_flags; } static struct timespec64 coda_to_timespec64(struct coda_timespec ts) { struct timespec64 ts64 = { .tv_sec = ts.tv_sec, .tv_nsec = ts.tv_nsec, }; return ts64; } static struct coda_timespec timespec64_to_coda(struct timespec64 ts64) { struct coda_timespec ts = { .tv_sec = ts64.tv_sec, .tv_nsec = ts64.tv_nsec, }; return ts; } /* utility functions below */ umode_t coda_inode_type(struct coda_vattr *attr) { switch (attr->va_type) { case C_VREG: return S_IFREG; case C_VDIR: return S_IFDIR; case C_VLNK: return S_IFLNK; case C_VNON: default: return 0; } } void coda_vattr_to_iattr(struct inode *inode, struct coda_vattr *attr) { /* inode's i_flags, i_ino are set by iget * XXX: is this all we need ?? */ umode_t inode_type = coda_inode_type(attr); inode->i_mode |= inode_type; if (attr->va_mode != (u_short) -1) inode->i_mode = attr->va_mode | inode_type; if (attr->va_uid != -1) inode->i_uid = make_kuid(&init_user_ns, (uid_t) attr->va_uid); if (attr->va_gid != -1) inode->i_gid = make_kgid(&init_user_ns, (gid_t) attr->va_gid); if (attr->va_nlink != -1) set_nlink(inode, attr->va_nlink); if (attr->va_size != -1) inode->i_size = attr->va_size; if (attr->va_size != -1) inode->i_blocks = (attr->va_size + 511) >> 9; if (attr->va_atime.tv_sec != -1) inode->i_atime = coda_to_timespec64(attr->va_atime); if (attr->va_mtime.tv_sec != -1) inode->i_mtime = coda_to_timespec64(attr->va_mtime); if (attr->va_ctime.tv_sec != -1) inode_set_ctime_to_ts(inode, coda_to_timespec64(attr->va_ctime)); } /* * BSD sets attributes that need not be modified to -1. * Linux uses the valid field to indicate what should be * looked at. The BSD type field needs to be deduced from linux * mode. * So we have to do some translations here. */ void coda_iattr_to_vattr(struct iattr *iattr, struct coda_vattr *vattr) { unsigned int valid; /* clean out */ vattr->va_mode = -1; vattr->va_uid = (vuid_t) -1; vattr->va_gid = (vgid_t) -1; vattr->va_size = (off_t) -1; vattr->va_atime.tv_sec = (int64_t) -1; vattr->va_atime.tv_nsec = (long) -1; vattr->va_mtime.tv_sec = (int64_t) -1; vattr->va_mtime.tv_nsec = (long) -1; vattr->va_ctime.tv_sec = (int64_t) -1; vattr->va_ctime.tv_nsec = (long) -1; vattr->va_type = C_VNON; vattr->va_fileid = -1; vattr->va_gen = -1; vattr->va_bytes = -1; vattr->va_nlink = -1; vattr->va_blocksize = -1; vattr->va_rdev = -1; vattr->va_flags = 0; /* determine the type */ #if 0 mode = iattr->ia_mode; if ( S_ISDIR(mode) ) { vattr->va_type = C_VDIR; } else if ( S_ISREG(mode) ) { vattr->va_type = C_VREG; } else if ( S_ISLNK(mode) ) { vattr->va_type = C_VLNK; } else { /* don't do others */ vattr->va_type = C_VNON; } #endif /* set those vattrs that need change */ valid = iattr->ia_valid; if ( valid & ATTR_MODE ) { vattr->va_mode = iattr->ia_mode; } if ( valid & ATTR_UID ) { vattr->va_uid = (vuid_t) from_kuid(&init_user_ns, iattr->ia_uid); } if ( valid & ATTR_GID ) { vattr->va_gid = (vgid_t) from_kgid(&init_user_ns, iattr->ia_gid); } if ( valid & ATTR_SIZE ) { vattr->va_size = iattr->ia_size; } if ( valid & ATTR_ATIME ) { vattr->va_atime = timespec64_to_coda(iattr->ia_atime); } if ( valid & ATTR_MTIME ) { vattr->va_mtime = timespec64_to_coda(iattr->ia_mtime); } if ( valid & ATTR_CTIME ) { vattr->va_ctime = timespec64_to_coda(iattr->ia_ctime); } }
linux-master
fs/coda/coda_linux.c
// SPDX-License-Identifier: GPL-2.0 /* * Super block/filesystem wide operations * * Copyright (C) 1996 Peter J. Braam <[email protected]> and * Michael Callahan <[email protected]> * * Rewritten for Linux 2.1. Peter Braam <[email protected]> * Copyright (C) Carnegie Mellon University */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/stat.h> #include <linux/errno.h> #include <linux/unistd.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/file.h> #include <linux/vfs.h> #include <linux/slab.h> #include <linux/pid_namespace.h> #include <linux/uaccess.h> #include <linux/fs.h> #include <linux/vmalloc.h> #include <linux/coda.h> #include "coda_psdev.h" #include "coda_linux.h" #include "coda_cache.h" #include "coda_int.h" /* VFS super_block ops */ static void coda_evict_inode(struct inode *); static void coda_put_super(struct super_block *); static int coda_statfs(struct dentry *dentry, struct kstatfs *buf); static struct kmem_cache * coda_inode_cachep; static struct inode *coda_alloc_inode(struct super_block *sb) { struct coda_inode_info *ei; ei = alloc_inode_sb(sb, coda_inode_cachep, GFP_KERNEL); if (!ei) return NULL; memset(&ei->c_fid, 0, sizeof(struct CodaFid)); ei->c_flags = 0; ei->c_uid = GLOBAL_ROOT_UID; ei->c_cached_perm = 0; spin_lock_init(&ei->c_lock); return &ei->vfs_inode; } static void coda_free_inode(struct inode *inode) { kmem_cache_free(coda_inode_cachep, ITOC(inode)); } static void init_once(void *foo) { struct coda_inode_info *ei = (struct coda_inode_info *) foo; inode_init_once(&ei->vfs_inode); } int __init coda_init_inodecache(void) { coda_inode_cachep = kmem_cache_create("coda_inode_cache", sizeof(struct coda_inode_info), 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD| SLAB_ACCOUNT, init_once); if (coda_inode_cachep == NULL) return -ENOMEM; return 0; } void coda_destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(coda_inode_cachep); } static int coda_remount(struct super_block *sb, int *flags, char *data) { sync_filesystem(sb); *flags |= SB_NOATIME; return 0; } /* exported operations */ static const struct super_operations coda_super_operations = { .alloc_inode = coda_alloc_inode, .free_inode = coda_free_inode, .evict_inode = coda_evict_inode, .put_super = coda_put_super, .statfs = coda_statfs, .remount_fs = coda_remount, }; static int get_device_index(struct coda_mount_data *data) { struct fd f; struct inode *inode; int idx; if (data == NULL) { pr_warn("%s: Bad mount data\n", __func__); return -1; } if (data->version != CODA_MOUNT_VERSION) { pr_warn("%s: Bad mount version\n", __func__); return -1; } f = fdget(data->fd); if (!f.file) goto Ebadf; inode = file_inode(f.file); if (!S_ISCHR(inode->i_mode) || imajor(inode) != CODA_PSDEV_MAJOR) { fdput(f); goto Ebadf; } idx = iminor(inode); fdput(f); if (idx < 0 || idx >= MAX_CODADEVS) { pr_warn("%s: Bad minor number\n", __func__); return -1; } return idx; Ebadf: pr_warn("%s: Bad file\n", __func__); return -1; } static int coda_fill_super(struct super_block *sb, void *data, int silent) { struct inode *root = NULL; struct venus_comm *vc; struct CodaFid fid; int error; int idx; if (task_active_pid_ns(current) != &init_pid_ns) return -EINVAL; idx = get_device_index((struct coda_mount_data *) data); /* Ignore errors in data, for backward compatibility */ if(idx == -1) idx = 0; pr_info("%s: device index: %i\n", __func__, idx); vc = &coda_comms[idx]; mutex_lock(&vc->vc_mutex); if (!vc->vc_inuse) { pr_warn("%s: No pseudo device\n", __func__); error = -EINVAL; goto unlock_out; } if (vc->vc_sb) { pr_warn("%s: Device already mounted\n", __func__); error = -EBUSY; goto unlock_out; } vc->vc_sb = sb; mutex_unlock(&vc->vc_mutex); sb->s_fs_info = vc; sb->s_flags |= SB_NOATIME; sb->s_blocksize = 4096; /* XXXXX what do we put here?? */ sb->s_blocksize_bits = 12; sb->s_magic = CODA_SUPER_MAGIC; sb->s_op = &coda_super_operations; sb->s_d_op = &coda_dentry_operations; sb->s_time_gran = 1; sb->s_time_min = S64_MIN; sb->s_time_max = S64_MAX; error = super_setup_bdi(sb); if (error) goto error; /* get root fid from Venus: this needs the root inode */ error = venus_rootfid(sb, &fid); if ( error ) { pr_warn("%s: coda_get_rootfid failed with %d\n", __func__, error); goto error; } pr_info("%s: rootfid is %s\n", __func__, coda_f2s(&fid)); /* make root inode */ root = coda_cnode_make(&fid, sb); if (IS_ERR(root)) { error = PTR_ERR(root); pr_warn("Failure of coda_cnode_make for root: error %d\n", error); goto error; } pr_info("%s: rootinode is %ld dev %s\n", __func__, root->i_ino, root->i_sb->s_id); sb->s_root = d_make_root(root); if (!sb->s_root) { error = -EINVAL; goto error; } return 0; error: mutex_lock(&vc->vc_mutex); vc->vc_sb = NULL; sb->s_fs_info = NULL; unlock_out: mutex_unlock(&vc->vc_mutex); return error; } static void coda_put_super(struct super_block *sb) { struct venus_comm *vcp = coda_vcp(sb); mutex_lock(&vcp->vc_mutex); vcp->vc_sb = NULL; sb->s_fs_info = NULL; mutex_unlock(&vcp->vc_mutex); mutex_destroy(&vcp->vc_mutex); pr_info("Bye bye.\n"); } static void coda_evict_inode(struct inode *inode) { truncate_inode_pages_final(&inode->i_data); clear_inode(inode); coda_cache_clear_inode(inode); } int coda_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int flags) { int err = coda_revalidate_inode(d_inode(path->dentry)); if (!err) generic_fillattr(&nop_mnt_idmap, request_mask, d_inode(path->dentry), stat); return err; } int coda_setattr(struct mnt_idmap *idmap, struct dentry *de, struct iattr *iattr) { struct inode *inode = d_inode(de); struct coda_vattr vattr; int error; memset(&vattr, 0, sizeof(vattr)); inode_set_ctime_current(inode); coda_iattr_to_vattr(iattr, &vattr); vattr.va_type = C_VNON; /* cannot set type */ /* Venus is responsible for truncating the container-file!!! */ error = venus_setattr(inode->i_sb, coda_i2f(inode), &vattr); if (!error) { coda_vattr_to_iattr(inode, &vattr); coda_cache_clear_inode(inode); } return error; } const struct inode_operations coda_file_inode_operations = { .permission = coda_permission, .getattr = coda_getattr, .setattr = coda_setattr, }; static int coda_statfs(struct dentry *dentry, struct kstatfs *buf) { int error; error = venus_statfs(dentry, buf); if (error) { /* fake something like AFS does */ buf->f_blocks = 9000000; buf->f_bfree = 9000000; buf->f_bavail = 9000000; buf->f_files = 9000000; buf->f_ffree = 9000000; } /* and fill in the rest */ buf->f_type = CODA_SUPER_MAGIC; buf->f_bsize = 4096; buf->f_namelen = CODA_MAXNAMLEN; return 0; } /* init_coda: used by filesystems.c to register coda */ static struct dentry *coda_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_nodev(fs_type, flags, data, coda_fill_super); } struct file_system_type coda_fs_type = { .owner = THIS_MODULE, .name = "coda", .mount = coda_mount, .kill_sb = kill_anon_super, .fs_flags = FS_BINARY_MOUNTDATA, }; MODULE_ALIAS_FS("coda");
linux-master
fs/coda/inode.c
// SPDX-License-Identifier: GPL-2.0 /* * Pioctl operations for Coda. * Original version: (C) 1996 Peter Braam * Rewritten for Linux 2.1: (C) 1997 Carnegie Mellon University * * Carnegie Mellon encourages users of this code to contribute improvements * to the Coda project. Contact Peter Braam <[email protected]>. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/time.h> #include <linux/fs.h> #include <linux/stat.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/namei.h> #include <linux/module.h> #include <linux/uaccess.h> #include <linux/coda.h> #include "coda_psdev.h" #include "coda_linux.h" /* pioctl ops */ static int coda_ioctl_permission(struct mnt_idmap *idmap, struct inode *inode, int mask); static long coda_pioctl(struct file *filp, unsigned int cmd, unsigned long user_data); /* exported from this file */ const struct inode_operations coda_ioctl_inode_operations = { .permission = coda_ioctl_permission, .setattr = coda_setattr, }; const struct file_operations coda_ioctl_operations = { .unlocked_ioctl = coda_pioctl, .llseek = noop_llseek, }; /* the coda pioctl inode ops */ static int coda_ioctl_permission(struct mnt_idmap *idmap, struct inode *inode, int mask) { return (mask & MAY_EXEC) ? -EACCES : 0; } static long coda_pioctl(struct file *filp, unsigned int cmd, unsigned long user_data) { struct path path; int error; struct PioctlData data; struct inode *inode = file_inode(filp); struct inode *target_inode = NULL; struct coda_inode_info *cnp; /* get the Pioctl data arguments from user space */ if (copy_from_user(&data, (void __user *)user_data, sizeof(data))) return -EINVAL; /* * Look up the pathname. Note that the pathname is in * user memory, and namei takes care of this */ error = user_path_at(AT_FDCWD, data.path, data.follow ? LOOKUP_FOLLOW : 0, &path); if (error) return error; target_inode = d_inode(path.dentry); /* return if it is not a Coda inode */ if (target_inode->i_sb != inode->i_sb) { error = -EINVAL; goto out; } /* now proceed to make the upcall */ cnp = ITOC(target_inode); error = venus_pioctl(inode->i_sb, &(cnp->c_fid), cmd, &data); out: path_put(&path); return error; }
linux-master
fs/coda/pioctl.c
// SPDX-License-Identifier: GPL-2.0 /* * Symlink inode operations for Coda filesystem * Original version: (C) 1996 P. Braam and M. Callahan * Rewritten for Linux 2.1. (C) 1997 Carnegie Mellon University * * Carnegie Mellon encourages users to contribute improvements to * the Coda project. Contact Peter Braam ([email protected]). */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/time.h> #include <linux/fs.h> #include <linux/stat.h> #include <linux/errno.h> #include <linux/pagemap.h> #include <linux/coda.h> #include "coda_psdev.h" #include "coda_linux.h" static int coda_symlink_filler(struct file *file, struct folio *folio) { struct inode *inode = folio->mapping->host; int error; struct coda_inode_info *cii; unsigned int len = PAGE_SIZE; char *p = folio_address(folio); cii = ITOC(inode); error = venus_readlink(inode->i_sb, &cii->c_fid, p, &len); if (error) goto fail; folio_mark_uptodate(folio); folio_unlock(folio); return 0; fail: folio_set_error(folio); folio_unlock(folio); return error; } const struct address_space_operations coda_symlink_aops = { .read_folio = coda_symlink_filler, };
linux-master
fs/coda/symlink.c
// SPDX-License-Identifier: GPL-2.0 /* * Cache operations for Coda. * For Linux 2.1: (C) 1997 Carnegie Mellon University * For Linux 2.3: (C) 2000 Carnegie Mellon University * * Carnegie Mellon encourages users of this code to contribute improvements * to the Coda project http://www.coda.cs.cmu.edu/ <[email protected]>. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/time.h> #include <linux/fs.h> #include <linux/stat.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <linux/string.h> #include <linux/list.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/coda.h> #include "coda_psdev.h" #include "coda_linux.h" #include "coda_cache.h" static atomic_t permission_epoch = ATOMIC_INIT(0); /* replace or extend an acl cache hit */ void coda_cache_enter(struct inode *inode, int mask) { struct coda_inode_info *cii = ITOC(inode); spin_lock(&cii->c_lock); cii->c_cached_epoch = atomic_read(&permission_epoch); if (!uid_eq(cii->c_uid, current_fsuid())) { cii->c_uid = current_fsuid(); cii->c_cached_perm = mask; } else cii->c_cached_perm |= mask; spin_unlock(&cii->c_lock); } /* remove cached acl from an inode */ void coda_cache_clear_inode(struct inode *inode) { struct coda_inode_info *cii = ITOC(inode); spin_lock(&cii->c_lock); cii->c_cached_epoch = atomic_read(&permission_epoch) - 1; spin_unlock(&cii->c_lock); } /* remove all acl caches */ void coda_cache_clear_all(struct super_block *sb) { atomic_inc(&permission_epoch); } /* check if the mask has been matched against the acl already */ int coda_cache_check(struct inode *inode, int mask) { struct coda_inode_info *cii = ITOC(inode); int hit; spin_lock(&cii->c_lock); hit = (mask & cii->c_cached_perm) == mask && uid_eq(cii->c_uid, current_fsuid()) && cii->c_cached_epoch == atomic_read(&permission_epoch); spin_unlock(&cii->c_lock); return hit; } /* Purging dentries and children */ /* The following routines drop dentries which are not in use and flag dentries which are in use to be zapped later. The flags are detected by: - coda_dentry_revalidate (for lookups) if the flag is C_PURGE - coda_dentry_delete: to remove dentry from the cache when d_count falls to zero - an inode method coda_revalidate (for attributes) if the flag is C_VATTR */ /* this won't do any harm: just flag all children */ static void coda_flag_children(struct dentry *parent, int flag) { struct dentry *de; spin_lock(&parent->d_lock); list_for_each_entry(de, &parent->d_subdirs, d_child) { /* don't know what to do with negative dentries */ if (d_inode(de) ) coda_flag_inode(d_inode(de), flag); } spin_unlock(&parent->d_lock); return; } void coda_flag_inode_children(struct inode *inode, int flag) { struct dentry *alias_de; if ( !inode || !S_ISDIR(inode->i_mode)) return; alias_de = d_find_alias(inode); if (!alias_de) return; coda_flag_children(alias_de, flag); shrink_dcache_parent(alias_de); dput(alias_de); }
linux-master
fs/coda/cache.c
// SPDX-License-Identifier: GPL-2.0 /* * File operations for Coda. * Original version: (C) 1996 Peter Braam * Rewritten for Linux 2.1: (C) 1997 Carnegie Mellon University * * Carnegie Mellon encourages users of this code to contribute improvements * to the Coda project. Contact Peter Braam <[email protected]>. */ #include <linux/refcount.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/time.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/stat.h> #include <linux/cred.h> #include <linux/errno.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/uio.h> #include <linux/splice.h> #include <linux/coda.h> #include "coda_psdev.h" #include "coda_linux.h" #include "coda_int.h" struct coda_vm_ops { refcount_t refcnt; struct file *coda_file; const struct vm_operations_struct *host_vm_ops; struct vm_operations_struct vm_ops; }; static ssize_t coda_file_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *coda_file = iocb->ki_filp; struct inode *coda_inode = file_inode(coda_file); struct coda_file_info *cfi = coda_ftoc(coda_file); loff_t ki_pos = iocb->ki_pos; size_t count = iov_iter_count(to); ssize_t ret; ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode), &cfi->cfi_access_intent, count, ki_pos, CODA_ACCESS_TYPE_READ); if (ret) goto finish_read; ret = vfs_iter_read(cfi->cfi_container, to, &iocb->ki_pos, 0); finish_read: venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode), &cfi->cfi_access_intent, count, ki_pos, CODA_ACCESS_TYPE_READ_FINISH); return ret; } static ssize_t coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *coda_file = iocb->ki_filp; struct inode *coda_inode = file_inode(coda_file); struct coda_file_info *cfi = coda_ftoc(coda_file); struct file *host_file = cfi->cfi_container; loff_t ki_pos = iocb->ki_pos; size_t count = iov_iter_count(to); ssize_t ret; ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode), &cfi->cfi_access_intent, count, ki_pos, CODA_ACCESS_TYPE_WRITE); if (ret) goto finish_write; file_start_write(host_file); inode_lock(coda_inode); ret = vfs_iter_write(cfi->cfi_container, to, &iocb->ki_pos, 0); coda_inode->i_size = file_inode(host_file)->i_size; coda_inode->i_blocks = (coda_inode->i_size + 511) >> 9; coda_inode->i_mtime = inode_set_ctime_current(coda_inode); inode_unlock(coda_inode); file_end_write(host_file); finish_write: venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode), &cfi->cfi_access_intent, count, ki_pos, CODA_ACCESS_TYPE_WRITE_FINISH); return ret; } static ssize_t coda_file_splice_read(struct file *coda_file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct inode *coda_inode = file_inode(coda_file); struct coda_file_info *cfi = coda_ftoc(coda_file); struct file *in = cfi->cfi_container; loff_t ki_pos = *ppos; ssize_t ret; ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode), &cfi->cfi_access_intent, len, ki_pos, CODA_ACCESS_TYPE_READ); if (ret) goto finish_read; ret = vfs_splice_read(in, ppos, pipe, len, flags); finish_read: venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode), &cfi->cfi_access_intent, len, ki_pos, CODA_ACCESS_TYPE_READ_FINISH); return ret; } static void coda_vm_open(struct vm_area_struct *vma) { struct coda_vm_ops *cvm_ops = container_of(vma->vm_ops, struct coda_vm_ops, vm_ops); refcount_inc(&cvm_ops->refcnt); if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->open) cvm_ops->host_vm_ops->open(vma); } static void coda_vm_close(struct vm_area_struct *vma) { struct coda_vm_ops *cvm_ops = container_of(vma->vm_ops, struct coda_vm_ops, vm_ops); if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->close) cvm_ops->host_vm_ops->close(vma); if (refcount_dec_and_test(&cvm_ops->refcnt)) { vma->vm_ops = cvm_ops->host_vm_ops; fput(cvm_ops->coda_file); kfree(cvm_ops); } } static int coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma) { struct inode *coda_inode = file_inode(coda_file); struct coda_file_info *cfi = coda_ftoc(coda_file); struct file *host_file = cfi->cfi_container; struct inode *host_inode = file_inode(host_file); struct coda_inode_info *cii; struct coda_vm_ops *cvm_ops; loff_t ppos; size_t count; int ret; if (!host_file->f_op->mmap) return -ENODEV; if (WARN_ON(coda_file != vma->vm_file)) return -EIO; count = vma->vm_end - vma->vm_start; ppos = vma->vm_pgoff * PAGE_SIZE; ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode), &cfi->cfi_access_intent, count, ppos, CODA_ACCESS_TYPE_MMAP); if (ret) return ret; cvm_ops = kmalloc(sizeof(struct coda_vm_ops), GFP_KERNEL); if (!cvm_ops) return -ENOMEM; cii = ITOC(coda_inode); spin_lock(&cii->c_lock); coda_file->f_mapping = host_file->f_mapping; if (coda_inode->i_mapping == &coda_inode->i_data) coda_inode->i_mapping = host_inode->i_mapping; /* only allow additional mmaps as long as userspace isn't changing * the container file on us! */ else if (coda_inode->i_mapping != host_inode->i_mapping) { spin_unlock(&cii->c_lock); kfree(cvm_ops); return -EBUSY; } /* keep track of how often the coda_inode/host_file has been mmapped */ cii->c_mapcount++; cfi->cfi_mapcount++; spin_unlock(&cii->c_lock); vma->vm_file = get_file(host_file); ret = call_mmap(vma->vm_file, vma); if (ret) { /* if call_mmap fails, our caller will put host_file so we * should drop the reference to the coda_file that we got. */ fput(coda_file); kfree(cvm_ops); } else { /* here we add redirects for the open/close vm_operations */ cvm_ops->host_vm_ops = vma->vm_ops; if (vma->vm_ops) cvm_ops->vm_ops = *vma->vm_ops; cvm_ops->vm_ops.open = coda_vm_open; cvm_ops->vm_ops.close = coda_vm_close; cvm_ops->coda_file = coda_file; refcount_set(&cvm_ops->refcnt, 1); vma->vm_ops = &cvm_ops->vm_ops; } return ret; } int coda_open(struct inode *coda_inode, struct file *coda_file) { struct file *host_file = NULL; int error; unsigned short flags = coda_file->f_flags & (~O_EXCL); unsigned short coda_flags = coda_flags_to_cflags(flags); struct coda_file_info *cfi; cfi = kmalloc(sizeof(struct coda_file_info), GFP_KERNEL); if (!cfi) return -ENOMEM; error = venus_open(coda_inode->i_sb, coda_i2f(coda_inode), coda_flags, &host_file); if (!host_file) error = -EIO; if (error) { kfree(cfi); return error; } host_file->f_flags |= coda_file->f_flags & (O_APPEND | O_SYNC); cfi->cfi_magic = CODA_MAGIC; cfi->cfi_mapcount = 0; cfi->cfi_container = host_file; /* assume access intents are supported unless we hear otherwise */ cfi->cfi_access_intent = true; BUG_ON(coda_file->private_data != NULL); coda_file->private_data = cfi; return 0; } int coda_release(struct inode *coda_inode, struct file *coda_file) { unsigned short flags = (coda_file->f_flags) & (~O_EXCL); unsigned short coda_flags = coda_flags_to_cflags(flags); struct coda_file_info *cfi; struct coda_inode_info *cii; struct inode *host_inode; cfi = coda_ftoc(coda_file); venus_close(coda_inode->i_sb, coda_i2f(coda_inode), coda_flags, coda_file->f_cred->fsuid); host_inode = file_inode(cfi->cfi_container); cii = ITOC(coda_inode); /* did we mmap this file? */ spin_lock(&cii->c_lock); if (coda_inode->i_mapping == &host_inode->i_data) { cii->c_mapcount -= cfi->cfi_mapcount; if (!cii->c_mapcount) coda_inode->i_mapping = &coda_inode->i_data; } spin_unlock(&cii->c_lock); fput(cfi->cfi_container); kfree(coda_file->private_data); coda_file->private_data = NULL; /* VFS fput ignores the return value from file_operations->release, so * there is no use returning an error here */ return 0; } int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync) { struct file *host_file; struct inode *coda_inode = file_inode(coda_file); struct coda_file_info *cfi; int err; if (!(S_ISREG(coda_inode->i_mode) || S_ISDIR(coda_inode->i_mode) || S_ISLNK(coda_inode->i_mode))) return -EINVAL; err = filemap_write_and_wait_range(coda_inode->i_mapping, start, end); if (err) return err; inode_lock(coda_inode); cfi = coda_ftoc(coda_file); host_file = cfi->cfi_container; err = vfs_fsync(host_file, datasync); if (!err && !datasync) err = venus_fsync(coda_inode->i_sb, coda_i2f(coda_inode)); inode_unlock(coda_inode); return err; } const struct file_operations coda_file_operations = { .llseek = generic_file_llseek, .read_iter = coda_file_read_iter, .write_iter = coda_file_write_iter, .mmap = coda_file_mmap, .open = coda_open, .release = coda_release, .fsync = coda_fsync, .splice_read = coda_file_splice_read, };
linux-master
fs/coda/file.c
// SPDX-License-Identifier: GPL-2.0 /* * super.c * * Copyright (c) 1999 Al Smith * * Portions derived from work (c) 1995,1996 Christian Vogelgsang. */ #include <linux/init.h> #include <linux/module.h> #include <linux/exportfs.h> #include <linux/slab.h> #include <linux/buffer_head.h> #include <linux/vfs.h> #include <linux/blkdev.h> #include "efs.h" #include <linux/efs_vh.h> #include <linux/efs_fs_sb.h> static int efs_statfs(struct dentry *dentry, struct kstatfs *buf); static int efs_fill_super(struct super_block *s, void *d, int silent); static struct dentry *efs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, efs_fill_super); } static void efs_kill_sb(struct super_block *s) { struct efs_sb_info *sbi = SUPER_INFO(s); kill_block_super(s); kfree(sbi); } static struct file_system_type efs_fs_type = { .owner = THIS_MODULE, .name = "efs", .mount = efs_mount, .kill_sb = efs_kill_sb, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("efs"); static struct pt_types sgi_pt_types[] = { {0x00, "SGI vh"}, {0x01, "SGI trkrepl"}, {0x02, "SGI secrepl"}, {0x03, "SGI raw"}, {0x04, "SGI bsd"}, {SGI_SYSV, "SGI sysv"}, {0x06, "SGI vol"}, {SGI_EFS, "SGI efs"}, {0x08, "SGI lv"}, {0x09, "SGI rlv"}, {0x0A, "SGI xfs"}, {0x0B, "SGI xfslog"}, {0x0C, "SGI xlv"}, {0x82, "Linux swap"}, {0x83, "Linux native"}, {0, NULL} }; static struct kmem_cache * efs_inode_cachep; static struct inode *efs_alloc_inode(struct super_block *sb) { struct efs_inode_info *ei; ei = alloc_inode_sb(sb, efs_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; } static void efs_free_inode(struct inode *inode) { kmem_cache_free(efs_inode_cachep, INODE_INFO(inode)); } static void init_once(void *foo) { struct efs_inode_info *ei = (struct efs_inode_info *) foo; inode_init_once(&ei->vfs_inode); } static int __init init_inodecache(void) { efs_inode_cachep = kmem_cache_create("efs_inode_cache", sizeof(struct efs_inode_info), 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD| SLAB_ACCOUNT, init_once); if (efs_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(efs_inode_cachep); } static int efs_remount(struct super_block *sb, int *flags, char *data) { sync_filesystem(sb); *flags |= SB_RDONLY; return 0; } static const struct super_operations efs_superblock_operations = { .alloc_inode = efs_alloc_inode, .free_inode = efs_free_inode, .statfs = efs_statfs, .remount_fs = efs_remount, }; static const struct export_operations efs_export_ops = { .fh_to_dentry = efs_fh_to_dentry, .fh_to_parent = efs_fh_to_parent, .get_parent = efs_get_parent, }; static int __init init_efs_fs(void) { int err; pr_info(EFS_VERSION" - http://aeschi.ch.eu.org/efs/\n"); err = init_inodecache(); if (err) goto out1; err = register_filesystem(&efs_fs_type); if (err) goto out; return 0; out: destroy_inodecache(); out1: return err; } static void __exit exit_efs_fs(void) { unregister_filesystem(&efs_fs_type); destroy_inodecache(); } module_init(init_efs_fs) module_exit(exit_efs_fs) static efs_block_t efs_validate_vh(struct volume_header *vh) { int i; __be32 cs, *ui; int csum; efs_block_t sblock = 0; /* shuts up gcc */ struct pt_types *pt_entry; int pt_type, slice = -1; if (be32_to_cpu(vh->vh_magic) != VHMAGIC) { /* * assume that we're dealing with a partition and allow * read_super() to try and detect a valid superblock * on the next block. */ return 0; } ui = ((__be32 *) (vh + 1)) - 1; for(csum = 0; ui >= ((__be32 *) vh);) { cs = *ui--; csum += be32_to_cpu(cs); } if (csum) { pr_warn("SGI disklabel: checksum bad, label corrupted\n"); return 0; } #ifdef DEBUG pr_debug("bf: \"%16s\"\n", vh->vh_bootfile); for(i = 0; i < NVDIR; i++) { int j; char name[VDNAMESIZE+1]; for(j = 0; j < VDNAMESIZE; j++) { name[j] = vh->vh_vd[i].vd_name[j]; } name[j] = (char) 0; if (name[0]) { pr_debug("vh: %8s block: 0x%08x size: 0x%08x\n", name, (int) be32_to_cpu(vh->vh_vd[i].vd_lbn), (int) be32_to_cpu(vh->vh_vd[i].vd_nbytes)); } } #endif for(i = 0; i < NPARTAB; i++) { pt_type = (int) be32_to_cpu(vh->vh_pt[i].pt_type); for(pt_entry = sgi_pt_types; pt_entry->pt_name; pt_entry++) { if (pt_type == pt_entry->pt_type) break; } #ifdef DEBUG if (be32_to_cpu(vh->vh_pt[i].pt_nblks)) { pr_debug("pt %2d: start: %08d size: %08d type: 0x%02x (%s)\n", i, (int)be32_to_cpu(vh->vh_pt[i].pt_firstlbn), (int)be32_to_cpu(vh->vh_pt[i].pt_nblks), pt_type, (pt_entry->pt_name) ? pt_entry->pt_name : "unknown"); } #endif if (IS_EFS(pt_type)) { sblock = be32_to_cpu(vh->vh_pt[i].pt_firstlbn); slice = i; } } if (slice == -1) { pr_notice("partition table contained no EFS partitions\n"); #ifdef DEBUG } else { pr_info("using slice %d (type %s, offset 0x%x)\n", slice, (pt_entry->pt_name) ? pt_entry->pt_name : "unknown", sblock); #endif } return sblock; } static int efs_validate_super(struct efs_sb_info *sb, struct efs_super *super) { if (!IS_EFS_MAGIC(be32_to_cpu(super->fs_magic))) return -1; sb->fs_magic = be32_to_cpu(super->fs_magic); sb->total_blocks = be32_to_cpu(super->fs_size); sb->first_block = be32_to_cpu(super->fs_firstcg); sb->group_size = be32_to_cpu(super->fs_cgfsize); sb->data_free = be32_to_cpu(super->fs_tfree); sb->inode_free = be32_to_cpu(super->fs_tinode); sb->inode_blocks = be16_to_cpu(super->fs_cgisize); sb->total_groups = be16_to_cpu(super->fs_ncg); return 0; } static int efs_fill_super(struct super_block *s, void *d, int silent) { struct efs_sb_info *sb; struct buffer_head *bh; struct inode *root; sb = kzalloc(sizeof(struct efs_sb_info), GFP_KERNEL); if (!sb) return -ENOMEM; s->s_fs_info = sb; s->s_time_min = 0; s->s_time_max = U32_MAX; s->s_magic = EFS_SUPER_MAGIC; if (!sb_set_blocksize(s, EFS_BLOCKSIZE)) { pr_err("device does not support %d byte blocks\n", EFS_BLOCKSIZE); return -EINVAL; } /* read the vh (volume header) block */ bh = sb_bread(s, 0); if (!bh) { pr_err("cannot read volume header\n"); return -EIO; } /* * if this returns zero then we didn't find any partition table. * this isn't (yet) an error - just assume for the moment that * the device is valid and go on to search for a superblock. */ sb->fs_start = efs_validate_vh((struct volume_header *) bh->b_data); brelse(bh); if (sb->fs_start == -1) { return -EINVAL; } bh = sb_bread(s, sb->fs_start + EFS_SUPER); if (!bh) { pr_err("cannot read superblock\n"); return -EIO; } if (efs_validate_super(sb, (struct efs_super *) bh->b_data)) { #ifdef DEBUG pr_warn("invalid superblock at block %u\n", sb->fs_start + EFS_SUPER); #endif brelse(bh); return -EINVAL; } brelse(bh); if (!sb_rdonly(s)) { #ifdef DEBUG pr_info("forcing read-only mode\n"); #endif s->s_flags |= SB_RDONLY; } s->s_op = &efs_superblock_operations; s->s_export_op = &efs_export_ops; root = efs_iget(s, EFS_ROOTINODE); if (IS_ERR(root)) { pr_err("get root inode failed\n"); return PTR_ERR(root); } s->s_root = d_make_root(root); if (!(s->s_root)) { pr_err("get root dentry failed\n"); return -ENOMEM; } return 0; } static int efs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct efs_sb_info *sbi = SUPER_INFO(sb); u64 id = huge_encode_dev(sb->s_bdev->bd_dev); buf->f_type = EFS_SUPER_MAGIC; /* efs magic number */ buf->f_bsize = EFS_BLOCKSIZE; /* blocksize */ buf->f_blocks = sbi->total_groups * /* total data blocks */ (sbi->group_size - sbi->inode_blocks); buf->f_bfree = sbi->data_free; /* free data blocks */ buf->f_bavail = sbi->data_free; /* free blocks for non-root */ buf->f_files = sbi->total_groups * /* total inodes */ sbi->inode_blocks * (EFS_BLOCKSIZE / sizeof(struct efs_dinode)); buf->f_ffree = sbi->inode_free; /* free inodes */ buf->f_fsid = u64_to_fsid(id); buf->f_namelen = EFS_MAXNAMELEN; /* max filename length */ return 0; }
linux-master
fs/efs/super.c
// SPDX-License-Identifier: GPL-2.0 /* * dir.c * * Copyright (c) 1999 Al Smith */ #include <linux/buffer_head.h> #include "efs.h" static int efs_readdir(struct file *, struct dir_context *); const struct file_operations efs_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .iterate_shared = efs_readdir, }; const struct inode_operations efs_dir_inode_operations = { .lookup = efs_lookup, }; static int efs_readdir(struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); efs_block_t block; int slot; if (inode->i_size & (EFS_DIRBSIZE-1)) pr_warn("%s(): directory size not a multiple of EFS_DIRBSIZE\n", __func__); /* work out where this entry can be found */ block = ctx->pos >> EFS_DIRBSIZE_BITS; /* each block contains at most 256 slots */ slot = ctx->pos & 0xff; /* look at all blocks */ while (block < inode->i_blocks) { struct efs_dir *dirblock; struct buffer_head *bh; /* read the dir block */ bh = sb_bread(inode->i_sb, efs_bmap(inode, block)); if (!bh) { pr_err("%s(): failed to read dir block %d\n", __func__, block); break; } dirblock = (struct efs_dir *) bh->b_data; if (be16_to_cpu(dirblock->magic) != EFS_DIRBLK_MAGIC) { pr_err("%s(): invalid directory block\n", __func__); brelse(bh); break; } for (; slot < dirblock->slots; slot++) { struct efs_dentry *dirslot; efs_ino_t inodenum; const char *nameptr; int namelen; if (dirblock->space[slot] == 0) continue; dirslot = (struct efs_dentry *) (((char *) bh->b_data) + EFS_SLOTAT(dirblock, slot)); inodenum = be32_to_cpu(dirslot->inode); namelen = dirslot->namelen; nameptr = dirslot->name; pr_debug("%s(): block %d slot %d/%d: inode %u, name \"%s\", namelen %u\n", __func__, block, slot, dirblock->slots-1, inodenum, nameptr, namelen); if (!namelen) continue; /* found the next entry */ ctx->pos = (block << EFS_DIRBSIZE_BITS) | slot; /* sanity check */ if (nameptr - (char *) dirblock + namelen > EFS_DIRBSIZE) { pr_warn("directory entry %d exceeds directory block\n", slot); continue; } /* copy filename and data in dirslot */ if (!dir_emit(ctx, nameptr, namelen, inodenum, DT_UNKNOWN)) { brelse(bh); return 0; } } brelse(bh); slot = 0; block++; } ctx->pos = (block << EFS_DIRBSIZE_BITS) | slot; return 0; }
linux-master
fs/efs/dir.c
// SPDX-License-Identifier: GPL-2.0-only /* * inode.c * * Copyright (c) 1999 Al Smith * * Portions derived from work (c) 1995,1996 Christian Vogelgsang, * and from work (c) 1998 Mike Shaver. */ #include <linux/buffer_head.h> #include <linux/module.h> #include <linux/fs.h> #include "efs.h" #include <linux/efs_fs_sb.h> static int efs_read_folio(struct file *file, struct folio *folio) { return block_read_full_folio(folio, efs_get_block); } static sector_t _efs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping,block,efs_get_block); } static const struct address_space_operations efs_aops = { .read_folio = efs_read_folio, .bmap = _efs_bmap }; static inline void extent_copy(efs_extent *src, efs_extent *dst) { /* * this is slightly evil. it doesn't just copy * efs_extent from src to dst, it also mangles * the bits so that dst ends up in cpu byte-order. */ dst->cooked.ex_magic = (unsigned int) src->raw[0]; dst->cooked.ex_bn = ((unsigned int) src->raw[1] << 16) | ((unsigned int) src->raw[2] << 8) | ((unsigned int) src->raw[3] << 0); dst->cooked.ex_length = (unsigned int) src->raw[4]; dst->cooked.ex_offset = ((unsigned int) src->raw[5] << 16) | ((unsigned int) src->raw[6] << 8) | ((unsigned int) src->raw[7] << 0); return; } struct inode *efs_iget(struct super_block *super, unsigned long ino) { int i, inode_index; dev_t device; u32 rdev; struct buffer_head *bh; struct efs_sb_info *sb = SUPER_INFO(super); struct efs_inode_info *in; efs_block_t block, offset; struct efs_dinode *efs_inode; struct inode *inode; inode = iget_locked(super, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; in = INODE_INFO(inode); /* ** EFS layout: ** ** | cylinder group | cylinder group | cylinder group ..etc ** |inodes|data |inodes|data |inodes|data ..etc ** ** work out the inode block index, (considering initially that the ** inodes are stored as consecutive blocks). then work out the block ** number of that inode given the above layout, and finally the ** offset of the inode within that block. */ inode_index = inode->i_ino / (EFS_BLOCKSIZE / sizeof(struct efs_dinode)); block = sb->fs_start + sb->first_block + (sb->group_size * (inode_index / sb->inode_blocks)) + (inode_index % sb->inode_blocks); offset = (inode->i_ino % (EFS_BLOCKSIZE / sizeof(struct efs_dinode))) * sizeof(struct efs_dinode); bh = sb_bread(inode->i_sb, block); if (!bh) { pr_warn("%s() failed at block %d\n", __func__, block); goto read_inode_error; } efs_inode = (struct efs_dinode *) (bh->b_data + offset); inode->i_mode = be16_to_cpu(efs_inode->di_mode); set_nlink(inode, be16_to_cpu(efs_inode->di_nlink)); i_uid_write(inode, (uid_t)be16_to_cpu(efs_inode->di_uid)); i_gid_write(inode, (gid_t)be16_to_cpu(efs_inode->di_gid)); inode->i_size = be32_to_cpu(efs_inode->di_size); inode->i_atime.tv_sec = be32_to_cpu(efs_inode->di_atime); inode->i_mtime.tv_sec = be32_to_cpu(efs_inode->di_mtime); inode_set_ctime(inode, be32_to_cpu(efs_inode->di_ctime), 0); inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = 0; /* this is the number of blocks in the file */ if (inode->i_size == 0) { inode->i_blocks = 0; } else { inode->i_blocks = ((inode->i_size - 1) >> EFS_BLOCKSIZE_BITS) + 1; } rdev = be16_to_cpu(efs_inode->di_u.di_dev.odev); if (rdev == 0xffff) { rdev = be32_to_cpu(efs_inode->di_u.di_dev.ndev); if (sysv_major(rdev) > 0xfff) device = 0; else device = MKDEV(sysv_major(rdev), sysv_minor(rdev)); } else device = old_decode_dev(rdev); /* get the number of extents for this object */ in->numextents = be16_to_cpu(efs_inode->di_numextents); in->lastextent = 0; /* copy the extents contained within the inode to memory */ for(i = 0; i < EFS_DIRECTEXTENTS; i++) { extent_copy(&(efs_inode->di_u.di_extents[i]), &(in->extents[i])); if (i < in->numextents && in->extents[i].cooked.ex_magic != 0) { pr_warn("extent %d has bad magic number in inode %lu\n", i, inode->i_ino); brelse(bh); goto read_inode_error; } } brelse(bh); pr_debug("efs_iget(): inode %lu, extents %d, mode %o\n", inode->i_ino, in->numextents, inode->i_mode); switch (inode->i_mode & S_IFMT) { case S_IFDIR: inode->i_op = &efs_dir_inode_operations; inode->i_fop = &efs_dir_operations; break; case S_IFREG: inode->i_fop = &generic_ro_fops; inode->i_data.a_ops = &efs_aops; break; case S_IFLNK: inode->i_op = &page_symlink_inode_operations; inode_nohighmem(inode); inode->i_data.a_ops = &efs_symlink_aops; break; case S_IFCHR: case S_IFBLK: case S_IFIFO: init_special_inode(inode, inode->i_mode, device); break; default: pr_warn("unsupported inode mode %o\n", inode->i_mode); goto read_inode_error; break; } unlock_new_inode(inode); return inode; read_inode_error: pr_warn("failed to read inode %lu\n", inode->i_ino); iget_failed(inode); return ERR_PTR(-EIO); } static inline efs_block_t efs_extent_check(efs_extent *ptr, efs_block_t block, struct efs_sb_info *sb) { efs_block_t start; efs_block_t length; efs_block_t offset; /* * given an extent and a logical block within a file, * can this block be found within this extent ? */ start = ptr->cooked.ex_bn; length = ptr->cooked.ex_length; offset = ptr->cooked.ex_offset; if ((block >= offset) && (block < offset+length)) { return(sb->fs_start + start + block - offset); } else { return 0; } } efs_block_t efs_map_block(struct inode *inode, efs_block_t block) { struct efs_sb_info *sb = SUPER_INFO(inode->i_sb); struct efs_inode_info *in = INODE_INFO(inode); struct buffer_head *bh = NULL; int cur, last, first = 1; int ibase, ioffset, dirext, direxts, indext, indexts; efs_block_t iblock, result = 0, lastblock = 0; efs_extent ext, *exts; last = in->lastextent; if (in->numextents <= EFS_DIRECTEXTENTS) { /* first check the last extent we returned */ if ((result = efs_extent_check(&in->extents[last], block, sb))) return result; /* if we only have one extent then nothing can be found */ if (in->numextents == 1) { pr_err("%s() failed to map (1 extent)\n", __func__); return 0; } direxts = in->numextents; /* * check the stored extents in the inode * start with next extent and check forwards */ for(dirext = 1; dirext < direxts; dirext++) { cur = (last + dirext) % in->numextents; if ((result = efs_extent_check(&in->extents[cur], block, sb))) { in->lastextent = cur; return result; } } pr_err("%s() failed to map block %u (dir)\n", __func__, block); return 0; } pr_debug("%s(): indirect search for logical block %u\n", __func__, block); direxts = in->extents[0].cooked.ex_offset; indexts = in->numextents; for(indext = 0; indext < indexts; indext++) { cur = (last + indext) % indexts; /* * work out which direct extent contains `cur'. * * also compute ibase: i.e. the number of the first * indirect extent contained within direct extent `cur'. * */ ibase = 0; for(dirext = 0; cur < ibase && dirext < direxts; dirext++) { ibase += in->extents[dirext].cooked.ex_length * (EFS_BLOCKSIZE / sizeof(efs_extent)); } if (dirext == direxts) { /* should never happen */ pr_err("couldn't find direct extent for indirect extent %d (block %u)\n", cur, block); if (bh) brelse(bh); return 0; } /* work out block number and offset of this indirect extent */ iblock = sb->fs_start + in->extents[dirext].cooked.ex_bn + (cur - ibase) / (EFS_BLOCKSIZE / sizeof(efs_extent)); ioffset = (cur - ibase) % (EFS_BLOCKSIZE / sizeof(efs_extent)); if (first || lastblock != iblock) { if (bh) brelse(bh); bh = sb_bread(inode->i_sb, iblock); if (!bh) { pr_err("%s() failed at block %d\n", __func__, iblock); return 0; } pr_debug("%s(): read indirect extent block %d\n", __func__, iblock); first = 0; lastblock = iblock; } exts = (efs_extent *) bh->b_data; extent_copy(&(exts[ioffset]), &ext); if (ext.cooked.ex_magic != 0) { pr_err("extent %d has bad magic number in block %d\n", cur, iblock); if (bh) brelse(bh); return 0; } if ((result = efs_extent_check(&ext, block, sb))) { if (bh) brelse(bh); in->lastextent = cur; return result; } } if (bh) brelse(bh); pr_err("%s() failed to map block %u (indir)\n", __func__, block); return 0; } MODULE_LICENSE("GPL");
linux-master
fs/efs/inode.c
// SPDX-License-Identifier: GPL-2.0 /* * namei.c * * Copyright (c) 1999 Al Smith * * Portions derived from work (c) 1995,1996 Christian Vogelgsang. */ #include <linux/buffer_head.h> #include <linux/string.h> #include <linux/exportfs.h> #include "efs.h" static efs_ino_t efs_find_entry(struct inode *inode, const char *name, int len) { struct buffer_head *bh; int slot, namelen; char *nameptr; struct efs_dir *dirblock; struct efs_dentry *dirslot; efs_ino_t inodenum; efs_block_t block; if (inode->i_size & (EFS_DIRBSIZE-1)) pr_warn("%s(): directory size not a multiple of EFS_DIRBSIZE\n", __func__); for(block = 0; block < inode->i_blocks; block++) { bh = sb_bread(inode->i_sb, efs_bmap(inode, block)); if (!bh) { pr_err("%s(): failed to read dir block %d\n", __func__, block); return 0; } dirblock = (struct efs_dir *) bh->b_data; if (be16_to_cpu(dirblock->magic) != EFS_DIRBLK_MAGIC) { pr_err("%s(): invalid directory block\n", __func__); brelse(bh); return 0; } for (slot = 0; slot < dirblock->slots; slot++) { dirslot = (struct efs_dentry *) (((char *) bh->b_data) + EFS_SLOTAT(dirblock, slot)); namelen = dirslot->namelen; nameptr = dirslot->name; if ((namelen == len) && (!memcmp(name, nameptr, len))) { inodenum = be32_to_cpu(dirslot->inode); brelse(bh); return inodenum; } } brelse(bh); } return 0; } struct dentry *efs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { efs_ino_t inodenum; struct inode *inode = NULL; inodenum = efs_find_entry(dir, dentry->d_name.name, dentry->d_name.len); if (inodenum) inode = efs_iget(dir->i_sb, inodenum); return d_splice_alias(inode, dentry); } static struct inode *efs_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation) { struct inode *inode; if (ino == 0) return ERR_PTR(-ESTALE); inode = efs_iget(sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); if (generation && inode->i_generation != generation) { iput(inode); return ERR_PTR(-ESTALE); } return inode; } struct dentry *efs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_dentry(sb, fid, fh_len, fh_type, efs_nfs_get_inode); } struct dentry *efs_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_parent(sb, fid, fh_len, fh_type, efs_nfs_get_inode); } struct dentry *efs_get_parent(struct dentry *child) { struct dentry *parent = ERR_PTR(-ENOENT); efs_ino_t ino; ino = efs_find_entry(d_inode(child), "..", 2); if (ino) parent = d_obtain_alias(efs_iget(child->d_sb, ino)); return parent; }
linux-master
fs/efs/namei.c
// SPDX-License-Identifier: GPL-2.0 /* * symlink.c * * Copyright (c) 1999 Al Smith * * Portions derived from work (c) 1995,1996 Christian Vogelgsang. */ #include <linux/string.h> #include <linux/pagemap.h> #include <linux/buffer_head.h> #include "efs.h" static int efs_symlink_read_folio(struct file *file, struct folio *folio) { struct page *page = &folio->page; char *link = page_address(page); struct buffer_head * bh; struct inode * inode = page->mapping->host; efs_block_t size = inode->i_size; int err; err = -ENAMETOOLONG; if (size > 2 * EFS_BLOCKSIZE) goto fail; /* read first 512 bytes of link target */ err = -EIO; bh = sb_bread(inode->i_sb, efs_bmap(inode, 0)); if (!bh) goto fail; memcpy(link, bh->b_data, (size > EFS_BLOCKSIZE) ? EFS_BLOCKSIZE : size); brelse(bh); if (size > EFS_BLOCKSIZE) { bh = sb_bread(inode->i_sb, efs_bmap(inode, 1)); if (!bh) goto fail; memcpy(link + EFS_BLOCKSIZE, bh->b_data, size - EFS_BLOCKSIZE); brelse(bh); } link[size] = '\0'; SetPageUptodate(page); unlock_page(page); return 0; fail: SetPageError(page); unlock_page(page); return err; } const struct address_space_operations efs_symlink_aops = { .read_folio = efs_symlink_read_folio };
linux-master
fs/efs/symlink.c
// SPDX-License-Identifier: GPL-2.0 /* * file.c * * Copyright (c) 1999 Al Smith * * Portions derived from work (c) 1995,1996 Christian Vogelgsang. */ #include <linux/buffer_head.h> #include "efs.h" int efs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { int error = -EROFS; long phys; if (create) return error; if (iblock >= inode->i_blocks) { #ifdef DEBUG /* * i have no idea why this happens as often as it does */ pr_warn("%s(): block %d >= %ld (filesize %ld)\n", __func__, block, inode->i_blocks, inode->i_size); #endif return 0; } phys = efs_map_block(inode, iblock); if (phys) map_bh(bh_result, inode->i_sb, phys); return 0; } int efs_bmap(struct inode *inode, efs_block_t block) { if (block < 0) { pr_warn("%s(): block < 0\n", __func__); return 0; } /* are we about to read past the end of a file ? */ if (!(block < inode->i_blocks)) { #ifdef DEBUG /* * i have no idea why this happens as often as it does */ pr_warn("%s(): block %d >= %ld (filesize %ld)\n", __func__, block, inode->i_blocks, inode->i_size); #endif return 0; } return efs_map_block(inode, block); }
linux-master
fs/efs/file.c